stats->n_bytes = flow->byte_count;
stats->ip_tos = flow->ip_tos;
stats->tcp_flags = flow->tcp_flags;
+ stats->error = 0;
}
static void clear_stats(struct sw_flow *flow)
return put_actions(flow, ufp);
}
-static int del_or_query_flow(struct datapath *dp,
- struct odp_flow __user *ufp,
- unsigned int cmd)
+static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
{
struct dp_table *table = rcu_dereference(dp->table);
struct odp_flow uf;
if (!flow)
goto error;
- if (cmd == ODP_FLOW_DEL) {
- /* XXX redundant lookup */
- error = dp_table_delete(table, flow);
- if (error)
- goto error;
+ /* XXX redundant lookup */
+ error = dp_table_delete(table, flow);
+ if (error)
+ goto error;
- /* XXX These statistics might lose a few packets, since other
- * CPUs can be using this flow. We used to synchronize_rcu()
- * to make sure that we get completely accurate stats, but that
- * blows our performance, badly. */
- dp->n_flows--;
- error = answer_query(flow, ufp);
- flow_deferred_free(flow);
- } else {
- error = answer_query(flow, ufp);
- }
+ /* XXX These statistics might lose a few packets, since other CPUs can
+ * be using this flow. We used to synchronize_rcu() to make sure that
+ * we get completely accurate stats, but that blows our performance,
+ * badly. */
+ dp->n_flows--;
+ error = answer_query(flow, ufp);
+ flow_deferred_free(flow);
error:
return error;
}
-static int query_multiple_flows(struct datapath *dp,
- const struct odp_flowvec *flowvec)
+static int query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
{
struct dp_table *table = rcu_dereference(dp->table);
int i;
flow = dp_table_lookup(table, &uf.key);
if (!flow)
- error = __clear_user(&ufp->stats, sizeof ufp->stats);
+ error = __put_user(ENOENT, &ufp->stats.error);
else
error = answer_query(flow, ufp);
if (error)
break;
case ODP_FLOW_DEL:
- case ODP_FLOW_GET:
- err = del_or_query_flow(dp, (struct odp_flow __user *)argp,
- cmd);
+ err = del_flow(dp, (struct odp_flow __user *)argp);
break;
- case ODP_FLOW_GET_MULTIPLE:
- err = do_flowvec_ioctl(dp, argp, query_multiple_flows);
+ case ODP_FLOW_GET:
+ err = do_flowvec_ioctl(dp, argp, query_flows);
break;
case ODP_FLOW_LIST:
int
dpif_flow_get(const struct dpif *dpif, struct odp_flow *flow)
{
- COVERAGE_INC(dpif_flow_query);
+ struct odp_flowvec fv;
+ int error;
+
+ COVERAGE_INC(dpif_flow_get);
+
check_rw_odp_flow(flow);
- memset(&flow->stats, 0, sizeof flow->stats);
- return do_flow_ioctl(dpif, ODP_FLOW_GET, flow, "get flow", true);
+ fv.flows = flow;
+ fv.n_flows = 1;
+ error = do_ioctl(dpif, ODP_FLOW_GET, "ODP_FLOW_GET", &fv);
+ if (!error) {
+ error = flow->stats.error;
+ if (error) {
+ VLOG_WARN_RL(&error_rl, "%s: ioctl(ODP_FLOW_GET) failed (%s)",
+ dpif_name(dpif), strerror(error));
+ }
+ }
+ return error;
}
int
for (i = 0; i < n; i++) {
check_rw_odp_flow(&flows[i]);
}
- return do_ioctl(dpif, ODP_FLOW_GET_MULTIPLE, "ODP_FLOW_GET_MULTIPLE",
+ return do_ioctl(dpif, ODP_FLOW_GET, "ODP_FLOW_GET",
&fv);
}