+ cbdata.ofconn = ofconn;
+ cbdata.msg = start_ofp_stats_reply(osr, 128);
+
+ port_no = ntohs(qsr->port_no);
+ queue_id = ntohl(qsr->queue_id);
+ if (port_no == OFPP_ALL) {
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
+ handle_queue_stats_for_port(port, queue_id, &cbdata);
+ }
+ } else if (port_no < ofproto->max_ports) {
+ port = get_port(ofproto, ofp_port_to_odp_port(port_no));
+ if (port) {
+ handle_queue_stats_for_port(port, queue_id, &cbdata);
+ }
+ } else {
+ ofpbuf_delete(cbdata.msg);
+ return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT);
+ }
+ queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
+
+ return 0;
+}
+
+static int
+handle_vendor_stats_request(struct ofconn *ofconn,
+ struct ofp_stats_request *osr, size_t arg_size)
+{
+ struct nicira_stats_msg *nsm;
+ struct ofpbuf b;
+ ovs_be32 vendor;
+
+ if (arg_size < 4) {
+ VLOG_WARN_RL(&rl, "truncated vendor stats request body");
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
+ }
+
+ memcpy(&vendor, osr->body, sizeof vendor);
+ if (vendor != htonl(NX_VENDOR_ID)) {
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
+ }
+
+ if (ntohs(osr->header.length) < sizeof(struct nicira_stats_msg)) {
+ VLOG_WARN_RL(&rl, "truncated Nicira stats request");
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
+ }
+
+ nsm = (struct nicira_stats_msg *) osr;
+ b.data = nsm;
+ b.size = ntohs(nsm->header.length);
+ switch (ntohl(nsm->subtype)) {
+ case NXST_FLOW:
+ return handle_nxst_flow(ofconn, &b);
+
+ case NXST_AGGREGATE:
+ return handle_nxst_aggregate(ofconn, &b);
+
+ default:
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
+ }
+}
+
+static int
+handle_stats_request(struct ofconn *ofconn, struct ofp_header *oh)
+{
+ struct ofp_stats_request *osr;
+ size_t arg_size;
+ int error;
+
+ error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
+ 1, &arg_size);
+ if (error) {
+ return error;
+ }
+ osr = (struct ofp_stats_request *) oh;
+
+ switch (ntohs(osr->type)) {
+ case OFPST_DESC:
+ return handle_desc_stats_request(ofconn, osr);
+
+ case OFPST_FLOW:
+ return handle_flow_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_AGGREGATE:
+ return handle_aggregate_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_TABLE:
+ return handle_table_stats_request(ofconn, osr);
+
+ case OFPST_PORT:
+ return handle_port_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_QUEUE:
+ return handle_queue_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_VENDOR:
+ return handle_vendor_stats_request(ofconn, osr, arg_size);
+
+ default:
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
+ }
+}
+
+static long long int
+msec_from_nsec(uint64_t sec, uint32_t nsec)
+{
+ return !sec ? 0 : sec * 1000 + nsec / 1000000;
+}
+
+static void
+facet_update_time(struct ofproto *ofproto, struct facet *facet,
+ const struct odp_flow_stats *stats)
+{
+ long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
+ if (used > facet->used) {
+ facet->used = used;
+ if (used > facet->rule->used) {
+ facet->rule->used = used;
+ }
+ netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
+ }
+}
+
+/* Folds the statistics from 'stats' into the counters in 'facet'.
+ *
+ * Because of the meaning of a facet's counters, it only makes sense to do this
+ * if 'stats' are not tracked in the datapath, that is, if 'stats' represents a
+ * packet that was sent by hand or if it represents statistics that have been
+ * cleared out of the datapath. */
+static void
+facet_update_stats(struct ofproto *ofproto, struct facet *facet,
+ const struct odp_flow_stats *stats)
+{
+ if (stats->n_packets) {
+ facet_update_time(ofproto, facet, stats);
+ facet->packet_count += stats->n_packets;
+ facet->byte_count += stats->n_bytes;
+ netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
+ }
+}
+
+struct flow_mod {
+ struct cls_rule cr;
+ ovs_be64 cookie;
+ uint16_t command;
+ uint16_t idle_timeout;
+ uint16_t hard_timeout;
+ uint32_t buffer_id;
+ uint16_t out_port;
+ uint16_t flags;
+ union ofp_action *actions;
+ size_t n_actions;
+};
+
+/* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
+ * in which no matching flow already exists in the flow table.
+ *
+ * Adds the flow specified by 'ofm', which is followed by 'n_actions'
+ * ofp_actions, to ofconn->ofproto's flow table. Returns 0 on success or an
+ * OpenFlow error code as encoded by ofp_mkerr() on failure.
+ *
+ * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
+ * if any. */
+static int
+add_flow(struct ofconn *ofconn, struct flow_mod *fm)
+{
+ struct ofproto *p = ofconn->ofproto;
+ struct ofpbuf *packet;
+ struct rule *rule;
+ uint16_t in_port;
+ int error;
+
+ if (fm->flags & OFPFF_CHECK_OVERLAP
+ && classifier_rule_overlaps(&p->cls, &fm->cr)) {
+ return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
+ }
+
+ error = 0;
+ if (fm->buffer_id != UINT32_MAX) {
+ error = pktbuf_retrieve(ofconn->pktbuf, fm->buffer_id,
+ &packet, &in_port);
+ } else {
+ packet = NULL;
+ in_port = UINT16_MAX;
+ }
+
+ rule = rule_create(&fm->cr, fm->actions, fm->n_actions,
+ fm->idle_timeout, fm->hard_timeout, fm->cookie,
+ fm->flags & OFPFF_SEND_FLOW_REM);
+ rule_insert(p, rule);
+ if (packet) {
+ rule_execute(p, rule, in_port, packet);
+ }
+ return error;
+}
+
+static struct rule *
+find_flow_strict(struct ofproto *p, const struct flow_mod *fm)
+{
+ return rule_from_cls_rule(classifier_find_rule_exactly(&p->cls, &fm->cr));
+}
+
+static int
+send_buffered_packet(struct ofconn *ofconn,
+ struct rule *rule, uint32_t buffer_id)
+{
+ struct ofpbuf *packet;
+ uint16_t in_port;
+ int error;
+
+ if (buffer_id == UINT32_MAX) {
+ return 0;