2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/dcache.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
15 #include "vport-generic.h"
17 struct device_config {
20 unsigned char eth_addr[ETH_ALEN];
27 /* Protected by RTNL lock. */
28 char peer_name[IFNAMSIZ];
29 struct hlist_node hash_node;
31 /* Protected by RCU. */
34 /* Protected by RCU. */
35 struct device_config *devconf;
38 /* Protected by RTNL lock. */
39 static struct hlist_head *peer_table;
40 #define PEER_HASH_BUCKETS 256
42 static inline struct patch_vport *patch_vport_priv(const struct vport *vport)
44 return vport_priv(vport);
48 static void free_config(struct rcu_head *rcu)
50 struct device_config *c = container_of(rcu, struct device_config, rcu);
54 static void assign_config_rcu(struct vport *vport,
55 struct device_config *new_config)
57 struct patch_vport *patch_vport = patch_vport_priv(vport);
58 struct device_config *old_config;
60 old_config = rcu_dereference(patch_vport->devconf);
61 rcu_assign_pointer(patch_vport->devconf, new_config);
62 call_rcu(&old_config->rcu, free_config);
65 static struct hlist_head *hash_bucket(const char *name)
67 unsigned int hash = full_name_hash(name, strlen(name));
68 return &peer_table[hash & (PEER_HASH_BUCKETS - 1)];
71 static int patch_init(void)
73 peer_table = kzalloc(PEER_HASH_BUCKETS * sizeof(struct hlist_head),
81 static void patch_exit(void)
86 static int set_config(struct vport *vport, const void *config)
88 struct patch_vport *patch_vport = patch_vport_priv(vport);
89 char peer_name[IFNAMSIZ];
91 strlcpy(peer_name, config, IFNAMSIZ);
93 if (!strcmp(patch_vport->name, peer_name))
96 strcpy(patch_vport->peer_name, peer_name);
98 if (vport_get_dp_port(vport)) {
99 hlist_del(&patch_vport->hash_node);
100 rcu_assign_pointer(patch_vport->peer, vport_locate(patch_vport->peer_name));
101 hlist_add_head(&patch_vport->hash_node, hash_bucket(patch_vport->peer_name));
107 static struct vport *patch_create(const struct vport_parms *parms)
110 struct patch_vport *patch_vport;
113 vport = vport_alloc(sizeof(struct patch_vport), &patch_vport_ops);
115 err = PTR_ERR(vport);
119 patch_vport = patch_vport_priv(vport);
121 strcpy(patch_vport->name, parms->name);
123 err = set_config(vport, parms->config);
125 goto error_free_vport;
127 patch_vport->devconf = kmalloc(sizeof(struct device_config), GFP_KERNEL);
128 if (!patch_vport->devconf) {
130 goto error_free_vport;
133 vport_gen_rand_ether_addr(patch_vport->devconf->eth_addr);
135 /* Make the default MTU fairly large so that it doesn't become the
136 * bottleneck on systems using jumbo frames. */
137 patch_vport->devconf->mtu = 65535;
147 static int patch_modify(struct vport *vport, struct odp_port *port)
149 return set_config(vport, port->config);
152 static int patch_destroy(struct vport *vport)
154 struct patch_vport *patch_vport = patch_vport_priv(vport);
156 kfree(patch_vport->devconf);
162 static void update_peers(const char *name, struct vport *vport)
164 struct hlist_head *bucket = hash_bucket(name);
165 struct patch_vport *peer_vport;
166 struct hlist_node *node;
168 hlist_for_each_entry(peer_vport, node, bucket, hash_node)
169 if (!strcmp(peer_vport->peer_name, name))
170 rcu_assign_pointer(peer_vport->peer, vport);
173 static int patch_attach(struct vport *vport)
175 struct patch_vport *patch_vport = patch_vport_priv(vport);
177 hlist_add_head(&patch_vport->hash_node, hash_bucket(patch_vport->peer_name));
179 rcu_assign_pointer(patch_vport->peer, vport_locate(patch_vport->peer_name));
180 update_peers(patch_vport->name, vport);
185 static int patch_detach(struct vport *vport)
187 struct patch_vport *patch_vport = patch_vport_priv(vport);
189 update_peers(patch_vport->name, NULL);
190 rcu_assign_pointer(patch_vport->peer, NULL);
192 hlist_del(&patch_vport->hash_node);
197 static int patch_set_mtu(struct vport *vport, int mtu)
199 struct patch_vport *patch_vport = patch_vport_priv(vport);
200 struct device_config *devconf;
202 devconf = kmemdup(patch_vport->devconf, sizeof(struct device_config), GFP_KERNEL);
207 assign_config_rcu(vport, devconf);
212 static int patch_set_addr(struct vport *vport, const unsigned char *addr)
214 struct patch_vport *patch_vport = patch_vport_priv(vport);
215 struct device_config *devconf;
217 devconf = kmemdup(patch_vport->devconf, sizeof(struct device_config), GFP_KERNEL);
221 memcpy(devconf->eth_addr, addr, ETH_ALEN);
222 assign_config_rcu(vport, devconf);
228 static const char *patch_get_name(const struct vport *vport)
230 const struct patch_vport *patch_vport = patch_vport_priv(vport);
231 return patch_vport->name;
234 static const unsigned char *patch_get_addr(const struct vport *vport)
236 const struct patch_vport *patch_vport = patch_vport_priv(vport);
237 return rcu_dereference(patch_vport->devconf)->eth_addr;
240 static int patch_get_mtu(const struct vport *vport)
242 const struct patch_vport *patch_vport = patch_vport_priv(vport);
243 return rcu_dereference(patch_vport->devconf)->mtu;
246 static int patch_send(struct vport *vport, struct sk_buff *skb)
248 struct patch_vport *patch_vport = patch_vport_priv(vport);
249 struct vport *peer = rcu_dereference(patch_vport->peer);
250 int skb_len = skb->len;
254 vport_record_error(vport, VPORT_E_TX_DROPPED);
259 vport_receive(peer, skb);
263 const struct vport_ops patch_vport_ops = {
265 .flags = VPORT_F_GEN_STATS,
268 .create = patch_create,
269 .modify = patch_modify,
270 .destroy = patch_destroy,
271 .attach = patch_attach,
272 .detach = patch_detach,
273 .set_mtu = patch_set_mtu,
274 .set_addr = patch_set_addr,
275 .get_name = patch_get_name,
276 .get_addr = patch_get_addr,
277 .get_dev_flags = vport_gen_get_dev_flags,
278 .is_running = vport_gen_is_running,
279 .get_operstate = vport_gen_get_operstate,
280 .get_mtu = patch_get_mtu,