mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror);
mirror->ofproto = ofproto;
mirror->idx = idx;
+ mirror->aux = aux;
mirror->out_vlan = -1;
mirror->name = NULL;
}
* N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each facet
* that is installed in the kernel gets dropped in the appropriate bucket.
* After the histogram has been built, we compute the cutoff so that only
- * the most-recently-used 1% of facets (but at least 1000 flows) are kept
- * cached. At least the most-recently-used bucket of facets is kept, so
- * actually an arbitrary number of facets can be kept in any given
- * expiration run (though the next run will delete most of those unless
- * they receive additional data).
+ * the most-recently-used 1% of facets (but at least
+ * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
+ * the most-recently-used bucket of facets is kept, so actually an
+ * arbitrary number of facets can be kept in any given expiration run
+ * (though the next run will delete most of those unless they receive
+ * additional data).
*
* This requires a second pass through the facets, in addition to the pass
* made by update_stats(), because the former function never looks
int i;
total = hmap_count(&ofproto->facets);
- if (total <= 1000) {
+ if (total <= ofproto->up.flow_eviction_threshold) {
return N_BUCKETS * BUCKET_WIDTH;
}
subtotal = bucket = 0;
do {
subtotal += buckets[bucket++];
- } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100));
+ } while (bucket < N_BUCKETS &&
+ subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100));
if (VLOG_IS_DBG_ENABLED()) {
struct ds s;
struct ofpbuf *packet)
{
if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t))
- && odp_actions->nla_type == ODP_ACTION_ATTR_CONTROLLER) {
+ && odp_actions->nla_type == ODP_ACTION_ATTR_USERSPACE) {
/* As an optimization, avoid a round-trip from userspace to kernel to
* userspace. This also avoids possibly filling up kernel packet
* buffers along the way. */
static void do_xlate_actions(const union ofp_action *in, size_t n_in,
struct action_xlate_ctx *ctx);
-static bool xlate_normal(struct action_xlate_ctx *);
+static void xlate_normal(struct action_xlate_ctx *);
static void
commit_odp_actions(struct action_xlate_ctx *ctx)
break;
case OFPP_CONTROLLER:
commit_odp_actions(ctx);
- nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_CONTROLLER, max_len);
+ nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_USERSPACE, max_len);
break;
case OFPP_LOCAL:
add_output_action(ctx, OFPP_LOCAL);
static bool
ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
{
- return bundle->vlan < 0 && vlan_bitmap_contains(bundle->trunks, vlan);
+ return (bundle->vlan < 0
+ && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
}
static bool
static bool
vlan_is_mirrored(const struct ofmirror *m, int vlan)
{
- return vlan_bitmap_contains(m->vlans, vlan);
+ return !m->vlans || bitmap_is_set(m->vlans, vlan);
}
/* Returns true if a packet with Ethernet destination MAC 'dst' may be mirrored
return true;
}
-/* If the composed actions may be applied to any packet in the given 'flow',
- * returns true. Otherwise, the actions should only be applied to 'packet', or
- * not at all, if 'packet' was NULL. */
-static bool
+static void
xlate_normal(struct action_xlate_ctx *ctx)
{
struct ofbundle *in_bundle;
* of time where we could learn from a packet reflected on a bond and
* blackhole packets before the learning table is updated to reflect
* the correct port. */
- return false;
+ ctx->may_set_up_flow = false;
+ return;
} else {
out_bundle = OFBUNDLE_FLOOD;
}
if (in_bundle) {
compose_actions(ctx, vlan, in_bundle, out_bundle);
}
-
- return true;
}
\f
static bool