+ cbdata.ofconn = ofconn;
+ cbdata.msg = start_ofp_stats_reply(osr, 128);
+
+ port_no = ntohs(qsr->port_no);
+ queue_id = ntohl(qsr->queue_id);
+ if (port_no == OFPP_ALL) {
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
+ handle_queue_stats_for_port(port, queue_id, &cbdata);
+ }
+ } else if (port_no < ofproto->max_ports) {
+ port = get_port(ofproto, ofp_port_to_odp_port(port_no));
+ if (port) {
+ handle_queue_stats_for_port(port, queue_id, &cbdata);
+ }
+ } else {
+ ofpbuf_delete(cbdata.msg);
+ return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT);
+ }
+ queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
+
+ return 0;
+}
+
+static int
+handle_vendor_stats_request(struct ofconn *ofconn,
+ struct ofp_stats_request *osr, size_t arg_size)
+{
+ struct nicira_stats_msg *nsm;
+ struct ofpbuf b;
+ ovs_be32 vendor;
+
+ if (arg_size < 4) {
+ VLOG_WARN_RL(&rl, "truncated vendor stats request body");
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
+ }
+
+ memcpy(&vendor, osr->body, sizeof vendor);
+ if (vendor != htonl(NX_VENDOR_ID)) {
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
+ }
+
+ if (ntohs(osr->header.length) < sizeof(struct nicira_stats_msg)) {
+ VLOG_WARN_RL(&rl, "truncated Nicira stats request");
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
+ }
+
+ nsm = (struct nicira_stats_msg *) osr;
+ b.data = nsm;
+ b.size = ntohs(nsm->header.length);
+ switch (ntohl(nsm->subtype)) {
+ case NXST_FLOW:
+ return handle_nxst_flow(ofconn, &b);
+
+ case NXST_AGGREGATE:
+ return handle_nxst_aggregate(ofconn, &b);
+
+ default:
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
+ }
+}
+
+static int
+handle_stats_request(struct ofconn *ofconn, struct ofp_header *oh)
+{
+ struct ofp_stats_request *osr;
+ size_t arg_size;
+ int error;
+
+ error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
+ 1, &arg_size);
+ if (error) {
+ return error;
+ }
+ osr = (struct ofp_stats_request *) oh;
+
+ switch (ntohs(osr->type)) {
+ case OFPST_DESC:
+ return handle_desc_stats_request(ofconn, osr);
+
+ case OFPST_FLOW:
+ return handle_flow_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_AGGREGATE:
+ return handle_aggregate_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_TABLE:
+ return handle_table_stats_request(ofconn, osr);
+
+ case OFPST_PORT:
+ return handle_port_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_QUEUE:
+ return handle_queue_stats_request(ofconn, osr, arg_size);
+
+ case OFPST_VENDOR:
+ return handle_vendor_stats_request(ofconn, osr, arg_size);
+
+ default:
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
+ }
+}
+
+static long long int
+msec_from_nsec(uint64_t sec, uint32_t nsec)
+{
+ return !sec ? 0 : sec * 1000 + nsec / 1000000;
+}
+
+static void
+facet_update_time(struct ofproto *ofproto, struct facet *facet,
+ const struct odp_flow_stats *stats)
+{
+ long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
+ if (used > facet->used) {
+ facet->used = used;
+ if (used > facet->rule->used) {
+ facet->rule->used = used;
+ }
+ netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
+ }
+}
+
+/* Folds the statistics from 'stats' into the counters in 'facet'.
+ *
+ * Because of the meaning of a facet's counters, it only makes sense to do this
+ * if 'stats' are not tracked in the datapath, that is, if 'stats' represents a
+ * packet that was sent by hand or if it represents statistics that have been
+ * cleared out of the datapath. */
+static void
+facet_update_stats(struct ofproto *ofproto, struct facet *facet,
+ const struct odp_flow_stats *stats)
+{
+ if (stats->n_packets) {
+ facet_update_time(ofproto, facet, stats);
+ facet->packet_count += stats->n_packets;
+ facet->byte_count += stats->n_bytes;
+ netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
+ }
+}
+
+struct flow_mod {
+ struct cls_rule cr;
+ ovs_be64 cookie;
+ uint16_t command;
+ uint16_t idle_timeout;
+ uint16_t hard_timeout;
+ uint32_t buffer_id;
+ uint16_t out_port;
+ uint16_t flags;
+ union ofp_action *actions;
+ size_t n_actions;
+};
+
+/* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
+ * in which no matching flow already exists in the flow table.
+ *
+ * Adds the flow specified by 'ofm', which is followed by 'n_actions'
+ * ofp_actions, to ofconn->ofproto's flow table. Returns 0 on success or an
+ * OpenFlow error code as encoded by ofp_mkerr() on failure.
+ *
+ * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
+ * if any. */
+static int
+add_flow(struct ofconn *ofconn, struct flow_mod *fm)
+{
+ struct ofproto *p = ofconn->ofproto;
+ struct ofpbuf *packet;
+ struct rule *rule;
+ uint16_t in_port;
+ int error;
+
+ if (fm->flags & OFPFF_CHECK_OVERLAP
+ && classifier_rule_overlaps(&p->cls, &fm->cr)) {
+ return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
+ }
+
+ error = 0;
+ if (fm->buffer_id != UINT32_MAX) {
+ error = pktbuf_retrieve(ofconn->pktbuf, fm->buffer_id,
+ &packet, &in_port);
+ } else {
+ packet = NULL;
+ in_port = UINT16_MAX;
+ }
+
+ rule = rule_create(&fm->cr, fm->actions, fm->n_actions,
+ fm->idle_timeout, fm->hard_timeout, fm->cookie,
+ fm->flags & OFPFF_SEND_FLOW_REM);
+ rule_insert(p, rule);
+ if (packet) {
+ rule_execute(p, rule, in_port, packet);
+ }
+ return error;
+}
+
+static struct rule *
+find_flow_strict(struct ofproto *p, const struct flow_mod *fm)
+{
+ return rule_from_cls_rule(classifier_find_rule_exactly(&p->cls, &fm->cr));
+}
+
+static int
+send_buffered_packet(struct ofconn *ofconn,
+ struct rule *rule, uint32_t buffer_id)
+{
+ struct ofpbuf *packet;
+ uint16_t in_port;
+ int error;
+
+ if (buffer_id == UINT32_MAX) {
+ return 0;
+ }
+
+ error = pktbuf_retrieve(ofconn->pktbuf, buffer_id, &packet, &in_port);
+ if (error) {
+ return error;
+ }
+
+ rule_execute(ofconn->ofproto, rule, in_port, packet);
+
+ return 0;
+}
+\f
+/* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */
+
+struct modify_flows_cbdata {
+ struct ofproto *ofproto;
+ const struct flow_mod *fm;
+ struct rule *match;
+};
+
+static int modify_flow(struct ofproto *, const struct flow_mod *,
+ struct rule *);
+
+/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code as
+ * encoded by ofp_mkerr() on failure.
+ *
+ * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
+ * if any. */
+static int
+modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm)
+{
+ struct ofproto *p = ofconn->ofproto;
+ struct rule *match = NULL;
+ struct cls_cursor cursor;
+ struct rule *rule;
+
+ cls_cursor_init(&cursor, &p->cls, &fm->cr);
+ CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+ if (!rule_is_hidden(rule)) {
+ match = rule;
+ modify_flow(p, fm, rule);
+ }
+ }
+
+ if (match) {
+ /* This credits the packet to whichever flow happened to match last.
+ * That's weird. Maybe we should do a lookup for the flow that
+ * actually matches the packet? Who knows. */
+ send_buffered_packet(ofconn, match, fm->buffer_id);
+ return 0;
+ } else {
+ return add_flow(ofconn, fm);
+ }
+}
+
+/* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error
+ * code as encoded by ofp_mkerr() on failure.
+ *
+ * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
+ * if any. */
+static int
+modify_flow_strict(struct ofconn *ofconn, struct flow_mod *fm)
+{
+ struct ofproto *p = ofconn->ofproto;
+ struct rule *rule = find_flow_strict(p, fm);
+ if (rule && !rule_is_hidden(rule)) {
+ modify_flow(p, fm, rule);
+ return send_buffered_packet(ofconn, rule, fm->buffer_id);
+ } else {
+ return add_flow(ofconn, fm);
+ }
+}
+
+/* Implements core of OFPFC_MODIFY and OFPFC_MODIFY_STRICT where 'rule' has
+ * been identified as a flow in 'p''s flow table to be modified, by changing
+ * the rule's actions to match those in 'ofm' (which is followed by 'n_actions'
+ * ofp_action[] structures). */
+static int
+modify_flow(struct ofproto *p, const struct flow_mod *fm, struct rule *rule)
+{
+ size_t actions_len = fm->n_actions * sizeof *rule->actions;
+
+ rule->flow_cookie = fm->cookie;
+
+ /* If the actions are the same, do nothing. */
+ if (fm->n_actions == rule->n_actions
+ && (!fm->n_actions
+ || !memcmp(fm->actions, rule->actions, actions_len))) {
+ return 0;
+ }
+
+ /* Replace actions. */
+ free(rule->actions);
+ rule->actions = fm->n_actions ? xmemdup(fm->actions, actions_len) : NULL;
+ rule->n_actions = fm->n_actions;
+
+ p->need_revalidate = true;
+
+ return 0;
+}
+\f
+/* OFPFC_DELETE implementation. */
+
+static void delete_flow(struct ofproto *, struct rule *, ovs_be16 out_port);
+
+/* Implements OFPFC_DELETE. */
+static void
+delete_flows_loose(struct ofproto *p, const struct flow_mod *fm)
+{
+ struct rule *rule, *next_rule;
+ struct cls_cursor cursor;
+
+ cls_cursor_init(&cursor, &p->cls, &fm->cr);
+ CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
+ delete_flow(p, rule, htons(fm->out_port));
+ }
+}
+
+/* Implements OFPFC_DELETE_STRICT. */
+static void
+delete_flow_strict(struct ofproto *p, struct flow_mod *fm)
+{
+ struct rule *rule = find_flow_strict(p, fm);
+ if (rule) {
+ delete_flow(p, rule, htons(fm->out_port));
+ }
+}
+
+/* Implements core of OFPFC_DELETE and OFPFC_DELETE_STRICT where 'rule' has
+ * been identified as a flow to delete from 'p''s flow table, by deleting the
+ * flow and sending out a OFPT_FLOW_REMOVED message to any interested
+ * controller.
+ *
+ * Will not delete 'rule' if it is hidden. Will delete 'rule' only if
+ * 'out_port' is htons(OFPP_NONE) or if 'rule' actually outputs to the
+ * specified 'out_port'. */
+static void
+delete_flow(struct ofproto *p, struct rule *rule, ovs_be16 out_port)
+{
+ if (rule_is_hidden(rule)) {
+ return;
+ }
+
+ if (out_port != htons(OFPP_NONE) && !rule_has_out_port(rule, out_port)) {
+ return;
+ }
+
+ rule_send_removed(p, rule, OFPRR_DELETE);
+ rule_remove(p, rule);
+}
+\f
+static int
+flow_mod_core(struct ofconn *ofconn, struct flow_mod *fm)
+{
+ struct ofproto *p = ofconn->ofproto;
+ int error;
+
+ error = reject_slave_controller(ofconn, "flow_mod");
+ if (error) {
+ return error;
+ }
+
+ error = validate_actions(fm->actions, fm->n_actions,
+ &fm->cr.flow, p->max_ports);
+ if (error) {
+ return error;
+ }
+
+ /* We do not support the emergency flow cache. It will hopefully
+ * get dropped from OpenFlow in the near future. */
+ if (fm->flags & OFPFF_EMERG) {
+ /* There isn't a good fit for an error code, so just state that the
+ * flow table is full. */
+ return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
+ }
+
+ switch (fm->command) {
+ case OFPFC_ADD:
+ return add_flow(ofconn, fm);