return 0;
}
-static int answer_query(struct sw_flow *flow, struct odp_flow __user *ufp)
+static int answer_query(struct sw_flow *flow, u32 query_flags,
+ struct odp_flow __user *ufp)
{
struct odp_flow_stats stats;
unsigned long int flags;
spin_lock_irqsave(&flow->lock, flags);
get_stats(flow, &stats);
+
+ if (query_flags & ODPFF_ZERO_TCP_FLAGS) {
+ flow->tcp_flags = 0;
+ }
spin_unlock_irqrestore(&flow->lock, flags);
if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats)))
* we get completely accurate stats, but that blows our performance,
* badly. */
dp->n_flows--;
- error = answer_query(flow, ufp);
+ error = answer_query(flow, uf.flags, ufp);
flow_deferred_free(flow);
error:
if (!flow)
error = __put_user(ENOENT, &ufp->stats.error);
else
- error = answer_query(flow, ufp);
+ error = answer_query(flow, 0, ufp);
if (error)
return -EFAULT;
}
if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
return -EFAULT;
- error = answer_query(flow, ufp);
+ error = answer_query(flow, 0, ufp);
if (error)
return error;
__u8 reserved; /* Pad to 64 bits. */
};
+/* Flags for ODP_FLOW. */
+#define ODPFF_ZERO_TCP_FLAGS (1 << 0) /* Zero the TCP flags. */
+
struct odp_flow {
struct odp_flow_stats stats;
struct odp_flow_key key;
union odp_action *actions;
__u32 n_actions;
+ __u32 flags;
};
/* Flags for ODP_FLOW_PUT. */