+static void
+put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id,
+ const struct netdev_queue_stats *stats)
+{
+ struct ofp_queue_stats *reply;
+
+ reply = append_ofp_stats_reply(sizeof *reply, cbdata->ofconn, &cbdata->msg);
+ reply->port_no = htons(cbdata->ofport->opp.port_no);
+ memset(reply->pad, 0, sizeof reply->pad);
+ reply->queue_id = htonl(queue_id);
+ reply->tx_bytes = htonll(stats->tx_bytes);
+ reply->tx_packets = htonll(stats->tx_packets);
+ reply->tx_errors = htonll(stats->tx_errors);
+}
+
+static void
+handle_queue_stats_dump_cb(uint32_t queue_id,
+ struct netdev_queue_stats *stats,
+ void *cbdata_)
+{
+ struct queue_stats_cbdata *cbdata = cbdata_;
+
+ put_queue_stats(cbdata, queue_id, stats);
+}
+
+static void
+handle_queue_stats_for_port(struct ofport *port, uint32_t queue_id,
+ struct queue_stats_cbdata *cbdata)
+{
+ cbdata->ofport = port;
+ if (queue_id == OFPQ_ALL) {
+ netdev_dump_queue_stats(port->netdev,
+ handle_queue_stats_dump_cb, cbdata);
+ } else {
+ struct netdev_queue_stats stats;
+
+ if (!netdev_get_queue_stats(port->netdev, queue_id, &stats)) {
+ put_queue_stats(cbdata, queue_id, &stats);
+ }
+ }
+}
+
+static int
+handle_queue_stats_request(struct ofconn *ofconn,
+ const struct ofp_stats_request *osr,
+ size_t arg_size)
+{
+ struct ofproto *ofproto = ofconn->ofproto;
+ struct ofp_queue_stats_request *qsr;
+ struct queue_stats_cbdata cbdata;
+ struct ofport *port;
+ unsigned int port_no;
+ uint32_t queue_id;
+
+ if (arg_size != sizeof *qsr) {
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
+ }
+ qsr = (struct ofp_queue_stats_request *) osr->body;
+
+ COVERAGE_INC(ofproto_queue_req);
+
+ cbdata.ofconn = ofconn;
+ cbdata.msg = start_ofp_stats_reply(osr, 128);
+
+ port_no = ntohs(qsr->port_no);
+ queue_id = ntohl(qsr->queue_id);
+ if (port_no == OFPP_ALL) {
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
+ handle_queue_stats_for_port(port, queue_id, &cbdata);
+ }
+ } else if (port_no < ofproto->max_ports) {
+ port = get_port(ofproto, ofp_port_to_odp_port(port_no));
+ if (port) {
+ handle_queue_stats_for_port(port, queue_id, &cbdata);
+ }
+ } else {
+ ofpbuf_delete(cbdata.msg);
+ return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT);
+ }
+ queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
+
+ return 0;
+}
+
+static int
+handle_vendor_stats_request(struct ofconn *ofconn,
+ struct ofp_stats_request *osr, size_t arg_size)
+{
+ struct nicira_stats_msg *nsm;
+ struct ofpbuf b;
+ ovs_be32 vendor;
+
+ if (arg_size < 4) {
+ VLOG_WARN_RL(&rl, "truncated vendor stats request body");
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
+ }
+
+ memcpy(&vendor, osr->body, sizeof vendor);
+ if (vendor != htonl(NX_VENDOR_ID)) {
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
+ }
+
+ if (ntohs(osr->header.length) < sizeof(struct nicira_stats_msg)) {
+ VLOG_WARN_RL(&rl, "truncated Nicira stats request");
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
+ }
+
+ nsm = (struct nicira_stats_msg *) osr;
+ b.data = nsm;
+ b.size = ntohs(nsm->header.length);
+ switch (ntohl(nsm->subtype)) {
+ case NXST_FLOW:
+ return handle_nxst_flow(ofconn, &b);
+
+ case NXST_AGGREGATE:
+ return handle_nxst_aggregate(ofconn, &b);
+
+ default:
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
+ }
+}
+
+static int
+handle_stats_request(struct ofconn *ofconn, struct ofp_header *oh)
+{
+ struct ofp_stats_request *osr;
+ size_t arg_size;
+ int error;
+
+ error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
+ 1, &arg_size);
+ if (error) {
+ return error;
+ }
+ osr = (struct ofp_stats_request *) oh;
+
+ switch (ntohs(osr->type)) {
+ case OFPST_DESC:
+ return handle_desc_stats_request(ofconn, osr);
+
+ case OFPST_FLOW:
+ return handle_flow_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_AGGREGATE:
+ return handle_aggregate_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_TABLE:
+ return handle_table_stats_request(ofconn, osr);