2 * Copyright (c) 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/dcache.h>
12 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/percpu.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/compat.h>
21 #include <linux/version.h>
24 #include "vport-internal_dev.h"
26 /* List of statically compiled vport implementations. Don't forget to also
27 * add yours to the list at the bottom of vport.h. */
28 static const struct vport_ops *base_vport_ops_list[] = {
33 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
38 static const struct vport_ops **vport_ops_list;
39 static int n_vport_types;
41 static struct hlist_head *dev_table;
42 #define VPORT_HASH_BUCKETS 1024
44 /* Both RTNL lock and vport_mutex need to be held when updating dev_table.
46 * If you use vport_locate and then perform some operations, you need to hold
47 * one of these locks if you don't want the vport to be deleted out from under
50 * If you get a reference to a vport through a datapath, it is protected
51 * by RCU and you need to hold rcu_read_lock instead when reading.
53 * If multiple locks are taken, the hierarchy is:
58 static DEFINE_MUTEX(vport_mutex);
61 * vport_lock - acquire vport lock
63 * Acquire global vport lock. See above comment about locking requirements
64 * and specific function definitions. May sleep.
68 mutex_lock(&vport_mutex);
72 * vport_unlock - release vport lock
74 * Release lock acquired with vport_lock.
76 void vport_unlock(void)
78 mutex_unlock(&vport_mutex);
81 #define ASSERT_VPORT() \
83 if (unlikely(!mutex_is_locked(&vport_mutex))) { \
84 pr_err("vport lock not held at %s (%d)\n", \
85 __FILE__, __LINE__); \
91 * vport_init - initialize vport subsystem
93 * Called at module load time to initialize the vport subsystem and any
94 * compiled in vport types.
101 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
108 vport_ops_list = kmalloc(ARRAY_SIZE(base_vport_ops_list) *
109 sizeof(struct vport_ops *), GFP_KERNEL);
110 if (!vport_ops_list) {
112 goto error_dev_table;
115 for (i = 0; i < ARRAY_SIZE(base_vport_ops_list); i++) {
116 const struct vport_ops *new_ops = base_vport_ops_list[i];
119 err = new_ops->init();
124 vport_ops_list[n_vport_types++] = new_ops;
125 else if (new_ops->flags & VPORT_F_REQUIRED) {
139 static void vport_del_all(void)
146 for (i = 0; i < VPORT_HASH_BUCKETS; i++) {
147 struct hlist_head *bucket = &dev_table[i];
149 struct hlist_node *node, *next;
151 hlist_for_each_entry_safe(vport, node, next, bucket, hash_node)
160 * vport_exit - shutdown vport subsystem
162 * Called at module exit time to shutdown the vport subsystem and any
163 * initialized vport types.
165 void vport_exit(void)
171 for (i = 0; i < n_vport_types; i++) {
172 if (vport_ops_list[i]->exit)
173 vport_ops_list[i]->exit();
176 kfree(vport_ops_list);
181 * vport_user_mod - modify existing vport device (for userspace callers)
183 * @uport: New configuration for vport
185 * Modifies an existing device with the specified configuration (which is
186 * dependent on device type). This function is for userspace callers and
187 * assumes no locks are held.
189 int vport_user_mod(const struct odp_port __user *uport)
191 struct odp_port port;
195 if (copy_from_user(&port, uport, sizeof(port)))
198 port.devname[IFNAMSIZ - 1] = '\0';
202 vport = vport_locate(port.devname);
209 err = vport_mod(vport, &port);
218 * vport_user_stats_get - retrieve device stats (for userspace callers)
220 * @ustats_req: Stats request parameters.
222 * Retrieves transmit, receive, and error stats for the given device. This
223 * function is for userspace callers and assumes no locks are held.
225 int vport_user_stats_get(struct odp_vport_stats_req __user *ustats_req)
227 struct odp_vport_stats_req stats_req;
231 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
234 stats_req.devname[IFNAMSIZ - 1] = '\0';
238 vport = vport_locate(stats_req.devname);
244 err = vport_get_stats(vport, &stats_req.stats);
250 if (copy_to_user(ustats_req, &stats_req, sizeof(struct odp_vport_stats_req)))
257 * vport_user_stats_set - sets offset device stats (for userspace callers)
259 * @ustats_req: Stats set parameters.
261 * Provides a set of transmit, receive, and error stats to be added as an
262 * offset to the collect data when stats are retreived. Some devices may not
263 * support setting the stats, in which case the result will always be
264 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
267 int vport_user_stats_set(struct odp_vport_stats_req __user *ustats_req)
269 struct odp_vport_stats_req stats_req;
273 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
276 stats_req.devname[IFNAMSIZ - 1] = '\0';
281 vport = vport_locate(stats_req.devname);
287 err = vport_set_stats(vport, &stats_req.stats);
297 * vport_user_ether_get - retrieve device Ethernet address (for userspace callers)
299 * @uvport_ether: Ethernet address request parameters.
301 * Retrieves the Ethernet address of the given device. This function is for
302 * userspace callers and assumes no locks are held.
304 int vport_user_ether_get(struct odp_vport_ether __user *uvport_ether)
306 struct odp_vport_ether vport_ether;
310 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
313 vport_ether.devname[IFNAMSIZ - 1] = '\0';
317 vport = vport_locate(vport_ether.devname);
324 memcpy(vport_ether.ether_addr, vport_get_addr(vport), ETH_ALEN);
331 if (copy_to_user(uvport_ether, &vport_ether, sizeof(struct odp_vport_ether)))
338 * vport_user_ether_set - set device Ethernet address (for userspace callers)
340 * @uvport_ether: Ethernet address request parameters.
342 * Sets the Ethernet address of the given device. Some devices may not support
343 * setting the Ethernet address, in which case the result will always be
344 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
347 int vport_user_ether_set(struct odp_vport_ether __user *uvport_ether)
349 struct odp_vport_ether vport_ether;
353 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
356 vport_ether.devname[IFNAMSIZ - 1] = '\0';
361 vport = vport_locate(vport_ether.devname);
367 err = vport_set_addr(vport, vport_ether.ether_addr);
376 * vport_user_mtu_get - retrieve device MTU (for userspace callers)
378 * @uvport_mtu: MTU request parameters.
380 * Retrieves the MTU of the given device. This function is for userspace
381 * callers and assumes no locks are held.
383 int vport_user_mtu_get(struct odp_vport_mtu __user *uvport_mtu)
385 struct odp_vport_mtu vport_mtu;
389 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
392 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
396 vport = vport_locate(vport_mtu.devname);
402 vport_mtu.mtu = vport_get_mtu(vport);
408 if (copy_to_user(uvport_mtu, &vport_mtu, sizeof(struct odp_vport_mtu)))
415 * vport_user_mtu_set - set device MTU (for userspace callers)
417 * @uvport_mtu: MTU request parameters.
419 * Sets the MTU of the given device. Some devices may not support setting the
420 * MTU, in which case the result will always be -EOPNOTSUPP. This function is
421 * for userspace callers and assumes no locks are held.
423 int vport_user_mtu_set(struct odp_vport_mtu __user *uvport_mtu)
425 struct odp_vport_mtu vport_mtu;
429 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
432 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
437 vport = vport_locate(vport_mtu.devname);
443 err = vport_set_mtu(vport, vport_mtu.mtu);
451 static struct hlist_head *hash_bucket(const char *name)
453 unsigned int hash = full_name_hash(name, strlen(name));
454 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
458 * vport_locate - find a port that has already been created
460 * @name: name of port to find
462 * Either RTNL or vport lock must be acquired before calling this function
463 * and held while using the found port. See the locking comments at the
466 struct vport *vport_locate(const char *name)
468 struct hlist_head *bucket = hash_bucket(name);
470 struct hlist_node *node;
472 if (unlikely(!mutex_is_locked(&vport_mutex) && !rtnl_is_locked())) {
473 pr_err("neither RTNL nor vport lock held in vport_locate\n");
479 hlist_for_each_entry(vport, node, bucket, hash_node)
480 if (!strcmp(name, vport_get_name(vport)))
490 static void register_vport(struct vport *vport)
492 hlist_add_head(&vport->hash_node, hash_bucket(vport_get_name(vport)));
495 static void unregister_vport(struct vport *vport)
497 hlist_del(&vport->hash_node);
500 static void release_vport(struct kobject *kobj)
502 struct vport *p = container_of(kobj, struct vport, kobj);
506 static struct kobj_type brport_ktype = {
508 .sysfs_ops = &brport_sysfs_ops,
510 .release = release_vport
514 * vport_alloc - allocate and initialize new vport
516 * @priv_size: Size of private data area to allocate.
517 * @ops: vport device ops
519 * Allocate and initialize a new vport defined by @ops. The vport will contain
520 * a private data area of size @priv_size that can be accessed using
521 * vport_priv(). vports that are no longer needed should be released with
524 struct vport *vport_alloc(int priv_size, const struct vport_ops *ops, const struct vport_parms *parms)
529 alloc_size = sizeof(struct vport);
531 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
532 alloc_size += priv_size;
535 vport = kzalloc(alloc_size, GFP_KERNEL);
537 return ERR_PTR(-ENOMEM);
539 vport->dp = parms->dp;
540 vport->port_no = parms->port_no;
541 atomic_set(&vport->sflow_pool, 0);
544 /* Initialize kobject for bridge. This will be added as
545 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
546 vport->kobj.kset = NULL;
547 kobject_init(&vport->kobj, &brport_ktype);
549 if (vport->ops->flags & VPORT_F_GEN_STATS) {
550 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
551 if (!vport->percpu_stats)
552 return ERR_PTR(-ENOMEM);
554 spin_lock_init(&vport->stats_lock);
561 * vport_free - uninitialize and free vport
563 * @vport: vport to free
565 * Frees a vport allocated with vport_alloc() when it is no longer needed.
567 void vport_free(struct vport *vport)
569 if (vport->ops->flags & VPORT_F_GEN_STATS)
570 free_percpu(vport->percpu_stats);
572 kobject_put(&vport->kobj);
576 * vport_add - add vport device (for kernel callers)
578 * @parms: Information about new vport.
580 * Creates a new vport with the specified configuration (which is dependent on
581 * device type) and attaches it to a datapath. Both RTNL and vport locks must
584 struct vport *vport_add(const struct vport_parms *parms)
593 for (i = 0; i < n_vport_types; i++) {
594 if (!strcmp(vport_ops_list[i]->type, parms->type)) {
595 vport = vport_ops_list[i]->create(parms);
597 err = PTR_ERR(vport);
601 register_vport(vport);
613 * vport_mod - modify existing vport device (for kernel callers)
615 * @vport: vport to modify.
616 * @port: New configuration.
618 * Modifies an existing device with the specified configuration (which is
619 * dependent on device type). Both RTNL and vport locks must be held.
621 int vport_mod(struct vport *vport, struct odp_port *port)
626 if (vport->ops->modify)
627 return vport->ops->modify(vport, port);
633 * vport_del - delete existing vport device (for kernel callers)
635 * @vport: vport to delete.
637 * Detaches @vport from its datapath and destroys it. It is possible to fail
638 * for reasons such as lack of memory. Both RTNL and vport locks must be held.
640 int vport_del(struct vport *vport)
645 unregister_vport(vport);
647 return vport->ops->destroy(vport);
651 * vport_set_mtu - set device MTU (for kernel callers)
653 * @vport: vport on which to set MTU.
656 * Sets the MTU of the given device. Some devices may not support setting the
657 * MTU, in which case the result will always be -EOPNOTSUPP. RTNL lock must
660 int vport_set_mtu(struct vport *vport, int mtu)
667 if (vport->ops->set_mtu) {
670 ret = vport->ops->set_mtu(vport, mtu);
672 if (!ret && !is_internal_vport(vport))
673 set_internal_devs_mtu(vport->dp);
681 * vport_set_addr - set device Ethernet address (for kernel callers)
683 * @vport: vport on which to set Ethernet address.
684 * @addr: New address.
686 * Sets the Ethernet address of the given device. Some devices may not support
687 * setting the Ethernet address, in which case the result will always be
688 * -EOPNOTSUPP. RTNL lock must be held.
690 int vport_set_addr(struct vport *vport, const unsigned char *addr)
694 if (!is_valid_ether_addr(addr))
695 return -EADDRNOTAVAIL;
697 if (vport->ops->set_addr)
698 return vport->ops->set_addr(vport, addr);
704 * vport_set_stats - sets offset device stats (for kernel callers)
706 * @vport: vport on which to set stats
707 * @stats: stats to set
709 * Provides a set of transmit, receive, and error stats to be added as an
710 * offset to the collect data when stats are retreived. Some devices may not
711 * support setting the stats, in which case the result will always be
712 * -EOPNOTSUPP. RTNL lock must be held.
714 int vport_set_stats(struct vport *vport, struct rtnl_link_stats64 *stats)
718 if (vport->ops->flags & VPORT_F_GEN_STATS) {
719 spin_lock_bh(&vport->stats_lock);
720 vport->offset_stats = *stats;
721 spin_unlock_bh(&vport->stats_lock);
724 } else if (vport->ops->set_stats)
725 return vport->ops->set_stats(vport, stats);
731 * vport_get_name - retrieve device name
733 * @vport: vport from which to retrieve the name.
735 * Retrieves the name of the given device. Either RTNL lock or rcu_read_lock
736 * must be held for the entire duration that the name is in use.
738 const char *vport_get_name(const struct vport *vport)
740 return vport->ops->get_name(vport);
744 * vport_get_type - retrieve device type
746 * @vport: vport from which to retrieve the type.
748 * Retrieves the type of the given device. Either RTNL lock or rcu_read_lock
749 * must be held for the entire duration that the type is in use.
751 const char *vport_get_type(const struct vport *vport)
753 return vport->ops->type;
757 * vport_get_addr - retrieve device Ethernet address (for kernel callers)
759 * @vport: vport from which to retrieve the Ethernet address.
761 * Retrieves the Ethernet address of the given device. Either RTNL lock or
762 * rcu_read_lock must be held for the entire duration that the Ethernet address
765 const unsigned char *vport_get_addr(const struct vport *vport)
767 return vport->ops->get_addr(vport);
771 * vport_get_kobj - retrieve associated kobj
773 * @vport: vport from which to retrieve the associated kobj
775 * Retrieves the associated kobj or null if no kobj. The returned kobj is
776 * valid for as long as the vport exists.
778 struct kobject *vport_get_kobj(const struct vport *vport)
780 if (vport->ops->get_kobj)
781 return vport->ops->get_kobj(vport);
786 static int vport_call_get_stats(struct vport *vport, struct rtnl_link_stats64 *stats)
791 err = vport->ops->get_stats(vport, stats);
798 * vport_get_stats - retrieve device stats (for kernel callers)
800 * @vport: vport from which to retrieve the stats
801 * @stats: location to store stats
803 * Retrieves transmit, receive, and error stats for the given device.
805 int vport_get_stats(struct vport *vport, struct rtnl_link_stats64 *stats)
809 if (!(vport->ops->flags & VPORT_F_GEN_STATS))
810 return vport_call_get_stats(vport, stats);
812 /* We potentially have 3 sources of stats that need to be
813 * combined: those we have collected (split into err_stats and
814 * percpu_stats), offset_stats from set_stats(), and device
815 * error stats from get_stats() (for errors that happen
816 * downstream and therefore aren't reported through our
817 * vport_record_error() function). */
819 spin_lock_bh(&vport->stats_lock);
821 *stats = vport->offset_stats;
823 stats->rx_errors += vport->err_stats.rx_errors;
824 stats->tx_errors += vport->err_stats.tx_errors;
825 stats->tx_dropped += vport->err_stats.tx_dropped;
826 stats->rx_dropped += vport->err_stats.rx_dropped;
828 spin_unlock_bh(&vport->stats_lock);
830 if (vport->ops->get_stats) {
831 struct rtnl_link_stats64 dev_stats;
834 err = vport_call_get_stats(vport, &dev_stats);
838 stats->rx_errors += dev_stats.rx_errors;
839 stats->tx_errors += dev_stats.tx_errors;
840 stats->rx_dropped += dev_stats.rx_dropped;
841 stats->tx_dropped += dev_stats.tx_dropped;
842 stats->multicast += dev_stats.multicast;
843 stats->collisions += dev_stats.collisions;
844 stats->rx_length_errors += dev_stats.rx_length_errors;
845 stats->rx_over_errors += dev_stats.rx_over_errors;
846 stats->rx_crc_errors += dev_stats.rx_crc_errors;
847 stats->rx_frame_errors += dev_stats.rx_frame_errors;
848 stats->rx_fifo_errors += dev_stats.rx_fifo_errors;
849 stats->rx_missed_errors += dev_stats.rx_missed_errors;
850 stats->tx_aborted_errors += dev_stats.tx_aborted_errors;
851 stats->tx_carrier_errors += dev_stats.tx_carrier_errors;
852 stats->tx_fifo_errors += dev_stats.tx_fifo_errors;
853 stats->tx_heartbeat_errors += dev_stats.tx_heartbeat_errors;
854 stats->tx_window_errors += dev_stats.tx_window_errors;
855 stats->rx_compressed += dev_stats.rx_compressed;
856 stats->tx_compressed += dev_stats.tx_compressed;
859 for_each_possible_cpu(i) {
860 const struct vport_percpu_stats *percpu_stats;
861 struct vport_percpu_stats local_stats;
864 percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
867 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
868 local_stats = *percpu_stats;
869 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
871 stats->rx_bytes += local_stats.rx_bytes;
872 stats->rx_packets += local_stats.rx_packets;
873 stats->tx_bytes += local_stats.tx_bytes;
874 stats->tx_packets += local_stats.tx_packets;
881 * vport_get_flags - retrieve device flags
883 * @vport: vport from which to retrieve the flags
885 * Retrieves the flags of the given device. Either RTNL lock or rcu_read_lock
888 unsigned vport_get_flags(const struct vport *vport)
890 return vport->ops->get_dev_flags(vport);
894 * vport_get_flags - check whether device is running
896 * @vport: vport on which to check status.
898 * Checks whether the given device is running. Either RTNL lock or
899 * rcu_read_lock must be held.
901 int vport_is_running(const struct vport *vport)
903 return vport->ops->is_running(vport);
907 * vport_get_flags - retrieve device operating state
909 * @vport: vport from which to check status
911 * Retrieves the RFC2863 operstate of the given device. Either RTNL lock or
912 * rcu_read_lock must be held.
914 unsigned char vport_get_operstate(const struct vport *vport)
916 return vport->ops->get_operstate(vport);
920 * vport_get_ifindex - retrieve device system interface index
922 * @vport: vport from which to retrieve index
924 * Retrieves the system interface index of the given device or 0 if
925 * the device does not have one (in the case of virtual ports).
926 * Returns a negative index on error. Either RTNL lock or
927 * rcu_read_lock must be held.
929 int vport_get_ifindex(const struct vport *vport)
931 if (vport->ops->get_ifindex)
932 return vport->ops->get_ifindex(vport);
938 * vport_get_iflink - retrieve device system link index
940 * @vport: vport from which to retrieve index
942 * Retrieves the system link index of the given device. The link is the index
943 * of the interface on which the packet will actually be sent. In most cases
944 * this is the same as the ifindex but may be different for tunnel devices.
945 * Returns a negative index on error. Either RTNL lock or rcu_read_lock must
948 int vport_get_iflink(const struct vport *vport)
950 if (vport->ops->get_iflink)
951 return vport->ops->get_iflink(vport);
953 /* If we don't have an iflink, use the ifindex. In most cases they
955 return vport_get_ifindex(vport);
959 * vport_get_mtu - retrieve device MTU (for kernel callers)
961 * @vport: vport from which to retrieve MTU
963 * Retrieves the MTU of the given device. Either RTNL lock or rcu_read_lock
966 int vport_get_mtu(const struct vport *vport)
968 return vport->ops->get_mtu(vport);
972 * vport_get_config - retrieve device configuration
974 * @vport: vport from which to retrieve the configuration.
975 * @config: buffer to store config, which must be at least the length
976 * of VPORT_CONFIG_SIZE.
978 * Retrieves the configuration of the given device. Either RTNL lock or
979 * rcu_read_lock must be held.
981 void vport_get_config(const struct vport *vport, void *config)
983 if (vport->ops->get_config)
984 vport->ops->get_config(vport, config);
988 * vport_receive - pass up received packet to the datapath for processing
990 * @vport: vport that received the packet
991 * @skb: skb that was received
993 * Must be called with rcu_read_lock. The packet cannot be shared and
994 * skb->data should point to the Ethernet header. The caller must have already
995 * called compute_ip_summed() to initialize the checksumming fields.
997 void vport_receive(struct vport *vport, struct sk_buff *skb)
999 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1000 struct vport_percpu_stats *stats;
1003 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1005 write_seqcount_begin(&stats->seqlock);
1006 stats->rx_packets++;
1007 stats->rx_bytes += skb->len;
1008 write_seqcount_end(&stats->seqlock);
1013 if (!(vport->ops->flags & VPORT_F_FLOW))
1014 OVS_CB(skb)->flow = NULL;
1016 if (!(vport->ops->flags & VPORT_F_TUN_ID))
1017 OVS_CB(skb)->tun_id = 0;
1019 dp_process_received_packet(vport, skb);
1022 static inline unsigned packet_length(const struct sk_buff *skb)
1024 unsigned length = skb->len - ETH_HLEN;
1026 if (skb->protocol == htons(ETH_P_8021Q))
1027 length -= VLAN_HLEN;
1033 * vport_send - send a packet on a device
1035 * @vport: vport on which to send the packet
1038 * Sends the given packet and returns the length of data sent. Either RTNL
1039 * lock or rcu_read_lock must be held.
1041 int vport_send(struct vport *vport, struct sk_buff *skb)
1046 mtu = vport_get_mtu(vport);
1047 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
1048 if (net_ratelimit())
1049 pr_warn("%s: dropped over-mtu packet: %d > %d\n",
1050 dp_name(vport->dp), packet_length(skb), mtu);
1054 sent = vport->ops->send(vport, skb);
1056 if (vport->ops->flags & VPORT_F_GEN_STATS && sent > 0) {
1057 struct vport_percpu_stats *stats;
1060 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1062 write_seqcount_begin(&stats->seqlock);
1063 stats->tx_packets++;
1064 stats->tx_bytes += sent;
1065 write_seqcount_end(&stats->seqlock);
1074 vport_record_error(vport, VPORT_E_TX_DROPPED);
1079 * vport_record_error - indicate device error to generic stats layer
1081 * @vport: vport that encountered the error
1082 * @err_type: one of enum vport_err_type types to indicate the error type
1084 * If using the vport generic stats layer indicate that an error of the given
1087 void vport_record_error(struct vport *vport, enum vport_err_type err_type)
1089 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1091 spin_lock_bh(&vport->stats_lock);
1094 case VPORT_E_RX_DROPPED:
1095 vport->err_stats.rx_dropped++;
1098 case VPORT_E_RX_ERROR:
1099 vport->err_stats.rx_errors++;
1102 case VPORT_E_TX_DROPPED:
1103 vport->err_stats.tx_dropped++;
1106 case VPORT_E_TX_ERROR:
1107 vport->err_stats.tx_errors++;
1111 spin_unlock_bh(&vport->stats_lock);