X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Fvport.c;h=a6b686c85928c1ea389b447f8a997d1fa2a02310;hb=77912ae717298cb06544f1bebd9701d0b3c2f2f0;hp=bf6297e90a28e3eec6a1cb15c075a0093dda89d3;hpb=ff8d7a5e81625bbb13d33ca73888fc848b02db83;p=openvswitch diff --git a/datapath/vport.c b/datapath/vport.c index bf6297e9..a6b686c8 100644 --- a/datapath/vport.c +++ b/datapath/vport.c @@ -129,7 +129,7 @@ struct vport *vport_locate(const char *name) struct hlist_node *node; hlist_for_each_entry_rcu(vport, node, bucket, hash_node) - if (!strcmp(name, vport_get_name(vport))) + if (!strcmp(name, vport->ops->get_name(vport))) return vport; return NULL; @@ -159,7 +159,8 @@ static struct kobj_type brport_ktype = { * vport_priv(). vports that are no longer needed should be released with * vport_free(). */ -struct vport *vport_alloc(int priv_size, const struct vport_ops *ops, const struct vport_parms *parms) +struct vport *vport_alloc(int priv_size, const struct vport_ops *ops, + const struct vport_parms *parms) { struct vport *vport; size_t alloc_size; @@ -176,7 +177,7 @@ struct vport *vport_alloc(int priv_size, const struct vport_ops *ops, const stru vport->dp = parms->dp; vport->port_no = parms->port_no; - atomic_set(&vport->sflow_pool, 0); + vport->upcall_pid = parms->upcall_pid; vport->ops = ops; /* Initialize kobject for bridge. This will be added as @@ -184,13 +185,11 @@ struct vport *vport_alloc(int priv_size, const struct vport_ops *ops, const stru vport->kobj.kset = NULL; kobject_init(&vport->kobj, &brport_ktype); - if (vport->ops->flags & VPORT_F_GEN_STATS) { - vport->percpu_stats = alloc_percpu(struct vport_percpu_stats); - if (!vport->percpu_stats) - return ERR_PTR(-ENOMEM); + vport->percpu_stats = alloc_percpu(struct vport_percpu_stats); + if (!vport->percpu_stats) + return ERR_PTR(-ENOMEM); - spin_lock_init(&vport->stats_lock); - } + spin_lock_init(&vport->stats_lock); return vport; } @@ -207,8 +206,7 @@ struct vport *vport_alloc(int priv_size, const struct vport_ops *ops, const stru */ void vport_free(struct vport *vport) { - if (vport->ops->flags & VPORT_F_GEN_STATS) - free_percpu(vport->percpu_stats); + free_percpu(vport->percpu_stats); kobject_put(&vport->kobj); } @@ -238,7 +236,7 @@ struct vport *vport_add(const struct vport_parms *parms) } hlist_add_head_rcu(&vport->hash_node, - hash_bucket(vport_get_name(vport))); + hash_bucket(vport->ops->get_name(vport))); return vport; } } @@ -275,43 +273,13 @@ int vport_set_options(struct vport *vport, struct nlattr *options) * Detaches @vport from its datapath and destroys it. It is possible to fail * for reasons such as lack of memory. RTNL lock must be held. */ -int vport_del(struct vport *vport) +void vport_del(struct vport *vport) { ASSERT_RTNL(); hlist_del_rcu(&vport->hash_node); - return vport->ops->destroy(vport); -} - -/** - * vport_set_mtu - set device MTU (for kernel callers) - * - * @vport: vport on which to set MTU. - * @mtu: New MTU. - * - * Sets the MTU of the given device. Some devices may not support setting the - * MTU, in which case the result will always be -EOPNOTSUPP. RTNL lock must - * be held. - */ -int vport_set_mtu(struct vport *vport, int mtu) -{ - ASSERT_RTNL(); - - if (mtu < 68) - return -EINVAL; - - if (vport->ops->set_mtu) { - int ret; - - ret = vport->ops->set_mtu(vport, mtu); - - if (!ret && !is_internal_vport(vport)) - set_internal_devs_mtu(vport->dp); - - return ret; - } else - return -EOPNOTSUPP; + vport->ops->destroy(vport); } /** @@ -350,84 +318,13 @@ int vport_set_addr(struct vport *vport, const unsigned char *addr) * * Must be called with RTNL lock. */ -int vport_set_stats(struct vport *vport, struct rtnl_link_stats64 *stats) +void vport_set_stats(struct vport *vport, struct ovs_vport_stats *stats) { ASSERT_RTNL(); - if (vport->ops->flags & VPORT_F_GEN_STATS) { - spin_lock_bh(&vport->stats_lock); - vport->offset_stats = *stats; - spin_unlock_bh(&vport->stats_lock); - - return 0; - } else - return -EOPNOTSUPP; -} - -/** - * vport_get_name - retrieve device name - * - * @vport: vport from which to retrieve the name. - * - * Retrieves the name of the given device. Either RTNL lock or rcu_read_lock - * must be held for the entire duration that the name is in use. - */ -const char *vport_get_name(const struct vport *vport) -{ - return vport->ops->get_name(vport); -} - -/** - * vport_get_type - retrieve device type - * - * @vport: vport from which to retrieve the type. - * - * Retrieves the type of the given device. - */ -enum ovs_vport_type vport_get_type(const struct vport *vport) -{ - return vport->ops->type; -} - -/** - * vport_get_addr - retrieve device Ethernet address (for kernel callers) - * - * @vport: vport from which to retrieve the Ethernet address. - * - * Retrieves the Ethernet address of the given device. Either RTNL lock or - * rcu_read_lock must be held for the entire duration that the Ethernet address - * is in use. - */ -const unsigned char *vport_get_addr(const struct vport *vport) -{ - return vport->ops->get_addr(vport); -} - -/** - * vport_get_kobj - retrieve associated kobj - * - * @vport: vport from which to retrieve the associated kobj - * - * Retrieves the associated kobj or null if no kobj. The returned kobj is - * valid for as long as the vport exists. - */ -struct kobject *vport_get_kobj(const struct vport *vport) -{ - if (vport->ops->get_kobj) - return vport->ops->get_kobj(vport); - else - return NULL; -} - -static int vport_call_get_stats(struct vport *vport, struct rtnl_link_stats64 *stats) -{ - int err; - - rcu_read_lock(); - err = vport->ops->get_stats(vport, stats); - rcu_read_unlock(); - - return err; + spin_lock_bh(&vport->stats_lock); + vport->offset_stats = *stats; + spin_unlock_bh(&vport->stats_lock); } /** @@ -440,19 +337,20 @@ static int vport_call_get_stats(struct vport *vport, struct rtnl_link_stats64 *s * * Must be called with RTNL lock or rcu_read_lock. */ -int vport_get_stats(struct vport *vport, struct rtnl_link_stats64 *stats) +void vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) { int i; - if (!(vport->ops->flags & VPORT_F_GEN_STATS)) - return vport_call_get_stats(vport, stats); - /* We potentially have 3 sources of stats that need to be * combined: those we have collected (split into err_stats and * percpu_stats), offset_stats from set_stats(), and device - * error stats from get_stats() (for errors that happen + * error stats from netdev->get_stats() (for errors that happen * downstream and therefore aren't reported through our - * vport_record_error() function). */ + * vport_record_error() function). + * Stats from first two sources are merged and reported by ovs over + * OVS_VPORT_ATTR_STATS. + * netdev-stats can be directly read over netlink-ioctl. + */ spin_lock_bh(&vport->stats_lock); @@ -465,35 +363,6 @@ int vport_get_stats(struct vport *vport, struct rtnl_link_stats64 *stats) spin_unlock_bh(&vport->stats_lock); - if (vport->ops->get_stats) { - struct rtnl_link_stats64 dev_stats; - int err; - - err = vport_call_get_stats(vport, &dev_stats); - if (err) - return err; - - stats->rx_errors += dev_stats.rx_errors; - stats->tx_errors += dev_stats.tx_errors; - stats->rx_dropped += dev_stats.rx_dropped; - stats->tx_dropped += dev_stats.tx_dropped; - stats->multicast += dev_stats.multicast; - stats->collisions += dev_stats.collisions; - stats->rx_length_errors += dev_stats.rx_length_errors; - stats->rx_over_errors += dev_stats.rx_over_errors; - stats->rx_crc_errors += dev_stats.rx_crc_errors; - stats->rx_frame_errors += dev_stats.rx_frame_errors; - stats->rx_fifo_errors += dev_stats.rx_fifo_errors; - stats->rx_missed_errors += dev_stats.rx_missed_errors; - stats->tx_aborted_errors += dev_stats.tx_aborted_errors; - stats->tx_carrier_errors += dev_stats.tx_carrier_errors; - stats->tx_fifo_errors += dev_stats.tx_fifo_errors; - stats->tx_heartbeat_errors += dev_stats.tx_heartbeat_errors; - stats->tx_window_errors += dev_stats.tx_window_errors; - stats->rx_compressed += dev_stats.rx_compressed; - stats->tx_compressed += dev_stats.tx_compressed; - } - for_each_possible_cpu(i) { const struct vport_percpu_stats *percpu_stats; struct vport_percpu_stats local_stats; @@ -511,85 +380,6 @@ int vport_get_stats(struct vport *vport, struct rtnl_link_stats64 *stats) stats->tx_bytes += local_stats.tx_bytes; stats->tx_packets += local_stats.tx_packets; } - - return 0; -} - -/** - * vport_get_flags - retrieve device flags - * - * @vport: vport from which to retrieve the flags - * - * Retrieves the flags of the given device. - * - * Must be called with RTNL lock or rcu_read_lock. - */ -unsigned vport_get_flags(const struct vport *vport) -{ - return vport->ops->get_dev_flags(vport); -} - -/** - * vport_get_flags - check whether device is running - * - * @vport: vport on which to check status. - * - * Checks whether the given device is running. - * - * Must be called with RTNL lock or rcu_read_lock. - */ -int vport_is_running(const struct vport *vport) -{ - return vport->ops->is_running(vport); -} - -/** - * vport_get_flags - retrieve device operating state - * - * @vport: vport from which to check status - * - * Retrieves the RFC2863 operstate of the given device. - * - * Must be called with RTNL lock or rcu_read_lock. - */ -unsigned char vport_get_operstate(const struct vport *vport) -{ - return vport->ops->get_operstate(vport); -} - -/** - * vport_get_ifindex - retrieve device system interface index - * - * @vport: vport from which to retrieve index - * - * Retrieves the system interface index of the given device or 0 if - * the device does not have one (in the case of virtual ports). - * Returns a negative index on error. - * - * Must be called with RTNL lock or rcu_read_lock. - */ -int vport_get_ifindex(const struct vport *vport) -{ - if (vport->ops->get_ifindex) - return vport->ops->get_ifindex(vport); - else - return 0; -} - -/** - * vport_get_mtu - retrieve device MTU - * - * @vport: vport from which to retrieve MTU - * - * Retrieves the MTU of the given device. Returns 0 if @vport does not have an - * MTU (as e.g. some tunnels do not). Either RTNL lock or rcu_read_lock must - * be held. - */ -int vport_get_mtu(const struct vport *vport) -{ - if (!vport->ops->get_mtu) - return 0; - return vport->ops->get_mtu(vport); } /** @@ -640,19 +430,14 @@ int vport_get_options(const struct vport *vport, struct sk_buff *skb) */ void vport_receive(struct vport *vport, struct sk_buff *skb) { - if (vport->ops->flags & VPORT_F_GEN_STATS) { - struct vport_percpu_stats *stats; - - local_bh_disable(); - stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); + struct vport_percpu_stats *stats; - write_seqcount_begin(&stats->seqlock); - stats->rx_packets++; - stats->rx_bytes += skb->len; - write_seqcount_end(&stats->seqlock); + stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); - local_bh_enable(); - } + write_seqcount_begin(&stats->seqlock); + stats->rx_packets++; + stats->rx_bytes += skb->len; + write_seqcount_end(&stats->seqlock); if (!(vport->ops->flags & VPORT_F_FLOW)) OVS_CB(skb)->flow = NULL; @@ -676,20 +461,16 @@ int vport_send(struct vport *vport, struct sk_buff *skb) { int sent = vport->ops->send(vport, skb); - if (vport->ops->flags & VPORT_F_GEN_STATS && sent > 0) { + if (likely(sent)) { struct vport_percpu_stats *stats; - local_bh_disable(); stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); write_seqcount_begin(&stats->seqlock); stats->tx_packets++; stats->tx_bytes += sent; write_seqcount_end(&stats->seqlock); - - local_bh_enable(); } - return sent; } @@ -704,28 +485,25 @@ int vport_send(struct vport *vport, struct sk_buff *skb) */ void vport_record_error(struct vport *vport, enum vport_err_type err_type) { - if (vport->ops->flags & VPORT_F_GEN_STATS) { - - spin_lock_bh(&vport->stats_lock); + spin_lock(&vport->stats_lock); - switch (err_type) { - case VPORT_E_RX_DROPPED: - vport->err_stats.rx_dropped++; - break; + switch (err_type) { + case VPORT_E_RX_DROPPED: + vport->err_stats.rx_dropped++; + break; - case VPORT_E_RX_ERROR: - vport->err_stats.rx_errors++; - break; + case VPORT_E_RX_ERROR: + vport->err_stats.rx_errors++; + break; - case VPORT_E_TX_DROPPED: - vport->err_stats.tx_dropped++; - break; + case VPORT_E_TX_DROPPED: + vport->err_stats.tx_dropped++; + break; - case VPORT_E_TX_ERROR: - vport->err_stats.tx_errors++; - break; - }; + case VPORT_E_TX_ERROR: + vport->err_stats.tx_errors++; + break; + }; - spin_unlock_bh(&vport->stats_lock); - } + spin_unlock(&vport->stats_lock); }