2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/dcache.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
15 #include "vport-generic.h"
17 struct device_config {
20 unsigned char eth_addr[ETH_ALEN];
27 /* Protected by RTNL lock. */
28 char peer_name[IFNAMSIZ];
29 struct hlist_node hash_node;
31 /* Protected by RCU. */
34 /* Protected by RCU. */
35 struct device_config *devconf;
38 /* Protected by RTNL lock. */
39 static struct hlist_head *peer_table;
40 #define PEER_HASH_BUCKETS 256
42 static inline struct patch_vport *patch_vport_priv(const struct vport *vport)
44 return vport_priv(vport);
48 static void free_config(struct rcu_head *rcu)
50 struct device_config *c = container_of(rcu, struct device_config, rcu);
54 static void assign_config_rcu(struct vport *vport,
55 struct device_config *new_config)
57 struct patch_vport *patch_vport = patch_vport_priv(vport);
58 struct device_config *old_config;
60 old_config = rcu_dereference(patch_vport->devconf);
61 rcu_assign_pointer(patch_vport->devconf, new_config);
62 call_rcu(&old_config->rcu, free_config);
65 static struct hlist_head *hash_bucket(const char *name)
67 unsigned int hash = full_name_hash(name, strlen(name));
68 return &peer_table[hash & (PEER_HASH_BUCKETS - 1)];
71 static int patch_init(void)
73 peer_table = kzalloc(PEER_HASH_BUCKETS * sizeof(struct hlist_head),
81 static void patch_exit(void)
86 static int set_config(struct vport *vport, const void __user *uconfig)
88 struct patch_vport *patch_vport = patch_vport_priv(vport);
89 char peer_name[IFNAMSIZ];
92 retval = strncpy_from_user(peer_name, uconfig, IFNAMSIZ);
95 else if (retval >= IFNAMSIZ)
98 if (!strcmp(patch_vport->name, peer_name))
101 strcpy(patch_vport->peer_name, peer_name);
103 if (vport_get_dp_port(vport)) {
104 hlist_del(&patch_vport->hash_node);
105 rcu_assign_pointer(patch_vport->peer, vport_locate(patch_vport->peer_name));
106 hlist_add_head(&patch_vport->hash_node, hash_bucket(patch_vport->peer_name));
112 static struct vport *patch_create(const char *name, const void __user *config)
115 struct patch_vport *patch_vport;
118 vport = vport_alloc(sizeof(struct patch_vport), &patch_vport_ops);
120 err = PTR_ERR(vport);
124 patch_vport = patch_vport_priv(vport);
126 strcpy(patch_vport->name, name);
128 err = set_config(vport, config);
130 goto error_free_vport;
132 patch_vport->devconf = kmalloc(sizeof(struct device_config), GFP_KERNEL);
133 if (!patch_vport->devconf) {
135 goto error_free_vport;
138 vport_gen_rand_ether_addr(patch_vport->devconf->eth_addr);
140 /* Make the default MTU fairly large so that it doesn't become the
141 * bottleneck on systems using jumbo frames. */
142 patch_vport->devconf->mtu = 65535;
152 static int patch_modify(struct vport *vport, const void __user *config)
154 return set_config(vport, config);
157 static int patch_destroy(struct vport *vport)
159 struct patch_vport *patch_vport = patch_vport_priv(vport);
161 kfree(patch_vport->devconf);
167 static void update_peers(const char *name, struct vport *vport)
169 struct hlist_head *bucket = hash_bucket(name);
170 struct patch_vport *peer_vport;
171 struct hlist_node *node;
173 hlist_for_each_entry(peer_vport, node, bucket, hash_node)
174 if (!strcmp(peer_vport->peer_name, name))
175 rcu_assign_pointer(peer_vport->peer, vport);
178 static int patch_attach(struct vport *vport)
180 struct patch_vport *patch_vport = patch_vport_priv(vport);
182 hlist_add_head(&patch_vport->hash_node, hash_bucket(patch_vport->peer_name));
184 rcu_assign_pointer(patch_vport->peer, vport_locate(patch_vport->peer_name));
185 update_peers(patch_vport->name, vport);
190 static int patch_detach(struct vport *vport)
192 struct patch_vport *patch_vport = patch_vport_priv(vport);
194 update_peers(patch_vport->name, NULL);
195 rcu_assign_pointer(patch_vport->peer, NULL);
197 hlist_del(&patch_vport->hash_node);
202 static int patch_set_mtu(struct vport *vport, int mtu)
204 struct patch_vport *patch_vport = patch_vport_priv(vport);
205 struct device_config *devconf;
207 devconf = kmemdup(patch_vport->devconf, sizeof(struct device_config), GFP_KERNEL);
212 assign_config_rcu(vport, devconf);
217 static int patch_set_addr(struct vport *vport, const unsigned char *addr)
219 struct patch_vport *patch_vport = patch_vport_priv(vport);
220 struct device_config *devconf;
222 devconf = kmemdup(patch_vport->devconf, sizeof(struct device_config), GFP_KERNEL);
226 memcpy(devconf->eth_addr, addr, ETH_ALEN);
227 assign_config_rcu(vport, devconf);
233 static const char *patch_get_name(const struct vport *vport)
235 const struct patch_vport *patch_vport = patch_vport_priv(vport);
236 return patch_vport->name;
239 static const unsigned char *patch_get_addr(const struct vport *vport)
241 const struct patch_vport *patch_vport = patch_vport_priv(vport);
242 return rcu_dereference(patch_vport->devconf)->eth_addr;
245 static int patch_get_mtu(const struct vport *vport)
247 const struct patch_vport *patch_vport = patch_vport_priv(vport);
248 return rcu_dereference(patch_vport->devconf)->mtu;
251 static int patch_send(struct vport *vport, struct sk_buff *skb)
253 struct patch_vport *patch_vport = patch_vport_priv(vport);
254 struct vport *peer = rcu_dereference(patch_vport->peer);
255 int skb_len = skb->len;
259 vport_record_error(vport, VPORT_E_TX_DROPPED);
264 vport_receive(peer, skb);
268 const struct vport_ops patch_vport_ops = {
270 .flags = VPORT_F_GEN_STATS,
273 .create = patch_create,
274 .modify = patch_modify,
275 .destroy = patch_destroy,
276 .attach = patch_attach,
277 .detach = patch_detach,
278 .set_mtu = patch_set_mtu,
279 .set_addr = patch_set_addr,
280 .get_name = patch_get_name,
281 .get_addr = patch_get_addr,
282 .get_dev_flags = vport_gen_get_dev_flags,
283 .is_running = vport_gen_is_running,
284 .get_operstate = vport_gen_get_operstate,
285 .get_mtu = patch_get_mtu,