2 * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netlink-socket.h"
23 #include <sys/types.h>
27 #include "dynamic-string.h"
31 #include "netlink-protocol.h"
33 #include "poll-loop.h"
34 #include "socket-util.h"
39 VLOG_DEFINE_THIS_MODULE(netlink_socket);
41 COVERAGE_DEFINE(netlink_overflow);
42 COVERAGE_DEFINE(netlink_received);
43 COVERAGE_DEFINE(netlink_recv_jumbo);
44 COVERAGE_DEFINE(netlink_send);
45 COVERAGE_DEFINE(netlink_sent);
47 /* Linux header file confusion causes this to be undefined. */
49 #define SOL_NETLINK 270
52 /* A single (bad) Netlink message can in theory dump out many, many log
53 * messages, so the burst size is set quite high here to avoid missing useful
54 * information. Also, at high logging levels we log *all* Netlink messages. */
55 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600);
57 static void log_nlmsg(const char *function, int error,
58 const void *message, size_t size, int protocol);
60 /* Netlink sockets. */
68 unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */
71 /* Compile-time limit on iovecs, so that we can allocate a maximum-size array
72 * of iovecs on the stack. */
75 /* Maximum number of iovecs that may be passed to sendmsg, capped at a
76 * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS.
78 * Initialized by nl_sock_create(). */
81 static int nl_sock_cow__(struct nl_sock *);
83 /* Creates a new netlink socket for the given netlink 'protocol'
84 * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the
85 * new socket if successful, otherwise returns a positive errno value. */
87 nl_sock_create(int protocol, struct nl_sock **sockp)
90 struct sockaddr_nl local, remote;
96 int save_errno = errno;
99 max_iovs = sysconf(_SC_UIO_MAXIOV);
100 if (max_iovs < _XOPEN_IOV_MAX) {
101 if (max_iovs == -1 && errno) {
102 VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", strerror(errno));
104 max_iovs = _XOPEN_IOV_MAX;
105 } else if (max_iovs > MAX_IOVS) {
113 sock = malloc(sizeof *sock);
118 sock->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
120 VLOG_ERR("fcntl: %s", strerror(errno));
123 sock->protocol = protocol;
126 rcvbuf = 1024 * 1024;
127 if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUFFORCE,
128 &rcvbuf, sizeof rcvbuf)) {
129 VLOG_WARN_RL(&rl, "setting %d-byte socket receive buffer failed (%s)",
130 rcvbuf, strerror(errno));
133 retval = get_socket_rcvbuf(sock->fd);
138 sock->rcvbuf = retval;
140 /* Connect to kernel (pid 0) as remote address. */
141 memset(&remote, 0, sizeof remote);
142 remote.nl_family = AF_NETLINK;
144 if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) {
145 VLOG_ERR("connect(0): %s", strerror(errno));
149 /* Obtain pid assigned by kernel. */
150 local_size = sizeof local;
151 if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) {
152 VLOG_ERR("getsockname: %s", strerror(errno));
155 if (local_size < sizeof local || local.nl_family != AF_NETLINK) {
156 VLOG_ERR("getsockname returned bad Netlink name");
160 sock->pid = local.nl_pid;
179 /* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and
180 * sets '*sockp' to the new socket if successful, otherwise returns a positive
183 nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp)
185 return nl_sock_create(src->protocol, sockp);
188 /* Destroys netlink socket 'sock'. */
190 nl_sock_destroy(struct nl_sock *sock)
202 /* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if
203 * successful, otherwise a positive errno value.
205 * A socket that is subscribed to a multicast group that receives asynchronous
206 * notifications must not be used for Netlink transactions or dumps, because
207 * transactions and dumps can cause notifications to be lost.
209 * Multicast group numbers are always positive.
211 * It is not an error to attempt to join a multicast group to which a socket
212 * already belongs. */
214 nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
216 int error = nl_sock_cow__(sock);
220 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
221 &multicast_group, sizeof multicast_group) < 0) {
222 VLOG_WARN("could not join multicast group %u (%s)",
223 multicast_group, strerror(errno));
229 /* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if
230 * successful, otherwise a positive errno value.
232 * Multicast group numbers are always positive.
234 * It is not an error to attempt to leave a multicast group to which a socket
237 * On success, reading from 'sock' will still return any messages that were
238 * received on 'multicast_group' before the group was left. */
240 nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
243 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP,
244 &multicast_group, sizeof multicast_group) < 0) {
245 VLOG_WARN("could not leave multicast group %u (%s)",
246 multicast_group, strerror(errno));
253 nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
255 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
258 nlmsg->nlmsg_len = msg->size;
259 nlmsg->nlmsg_pid = sock->pid;
262 retval = send(sock->fd, msg->data, msg->size, wait ? 0 : MSG_DONTWAIT);
263 error = retval < 0 ? errno : 0;
264 } while (error == EINTR);
265 log_nlmsg(__func__, error, msg->data, msg->size, sock->protocol);
267 COVERAGE_INC(netlink_sent);
272 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
273 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, and
274 * nlmsg_pid will be set to 'sock''s pid, before the message is sent.
276 * Returns 0 if successful, otherwise a positive errno value. If
277 * 'wait' is true, then the send will wait until buffer space is ready;
278 * otherwise, returns EAGAIN if the 'sock' send buffer is full. */
280 nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
282 int error = nl_sock_cow__(sock);
286 return nl_sock_send__(sock, msg, wait);
289 /* This stress option is useful for testing that OVS properly tolerates
290 * -ENOBUFS on NetLink sockets. Such errors are unavoidable because they can
291 * occur if the kernel cannot temporarily allocate enough GFP_ATOMIC memory to
292 * reply to a request. They can also occur if messages arrive on a multicast
293 * channel faster than OVS can process them. */
295 netlink_overflow, "simulate netlink socket receive buffer overflow",
299 nl_sock_recv__(struct nl_sock *sock, struct ofpbuf **bufp, bool wait)
301 /* We can't accurately predict the size of the data to be received. Most
302 * received data will fit in a 2 kB buffer, so we allocate that much space.
303 * In case the data is actually bigger than that, we make available enough
304 * additional space to allow Netlink messages to be up to 64 kB long (a
305 * reasonable figure since that's the maximum length of a Netlink
307 enum { MAX_SIZE = 65536 };
308 enum { HEAD_SIZE = 2048 };
309 enum { TAIL_SIZE = MAX_SIZE - HEAD_SIZE };
311 struct nlmsghdr *nlmsghdr;
312 uint8_t tail[TAIL_SIZE];
320 buf = ofpbuf_new(HEAD_SIZE);
321 iov[0].iov_base = buf->data;
322 iov[0].iov_len = HEAD_SIZE;
323 iov[1].iov_base = tail;
324 iov[1].iov_len = TAIL_SIZE;
326 memset(&msg, 0, sizeof msg);
331 retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT);
332 } while (retval < 0 && errno == EINTR);
336 if (error == ENOBUFS) {
337 /* Socket receive buffer overflow dropped one or more messages that
338 * the kernel tried to send to us. */
339 COVERAGE_INC(netlink_overflow);
345 if (msg.msg_flags & MSG_TRUNC) {
346 VLOG_ERR_RL(&rl, "truncated message (longer than %d bytes)", MAX_SIZE);
351 ofpbuf_put_uninit(buf, MIN(retval, HEAD_SIZE));
352 if (retval > HEAD_SIZE) {
353 COVERAGE_INC(netlink_recv_jumbo);
354 ofpbuf_put(buf, tail, retval - HEAD_SIZE);
357 nlmsghdr = buf->data;
358 if (retval < sizeof *nlmsghdr
359 || nlmsghdr->nlmsg_len < sizeof *nlmsghdr
360 || nlmsghdr->nlmsg_len > retval) {
361 VLOG_ERR_RL(&rl, "received invalid nlmsg (%zd bytes < %d)",
362 retval, NLMSG_HDRLEN);
367 if (STRESS(netlink_overflow)) {
373 log_nlmsg(__func__, 0, buf->data, buf->size, sock->protocol);
374 COVERAGE_INC(netlink_received);
379 /* Tries to receive a netlink message from the kernel on 'sock'. If
380 * successful, stores the received message into '*bufp' and returns 0. The
381 * caller is responsible for destroying the message with ofpbuf_delete(). On
382 * failure, returns a positive errno value and stores a null pointer into
385 * If 'wait' is true, nl_sock_recv waits for a message to be ready; otherwise,
386 * returns EAGAIN if the 'sock' receive buffer is empty. */
388 nl_sock_recv(struct nl_sock *sock, struct ofpbuf **bufp, bool wait)
390 int error = nl_sock_cow__(sock);
394 return nl_sock_recv__(sock, bufp, wait);
398 find_nl_transaction_by_seq(struct nl_transaction **transactions, size_t n,
403 for (i = 0; i < n; i++) {
404 struct nl_transaction *t = transactions[i];
406 if (seq == nl_msg_nlmsghdr(t->request)->nlmsg_seq) {
415 nl_sock_record_errors__(struct nl_transaction **transactions, size_t n,
420 for (i = 0; i < n; i++) {
421 transactions[i]->error = error;
422 transactions[i]->reply = NULL;
427 nl_sock_transact_multiple__(struct nl_sock *sock,
428 struct nl_transaction **transactions, size_t n,
431 struct iovec iovs[MAX_IOVS];
437 for (i = 0; i < n; i++) {
438 struct ofpbuf *request = transactions[i]->request;
439 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(request);
441 nlmsg->nlmsg_len = request->size;
442 nlmsg->nlmsg_pid = sock->pid;
444 /* Ensure that we get a reply even if the final request doesn't
445 * ordinarily call for one. */
446 nlmsg->nlmsg_flags |= NLM_F_ACK;
449 iovs[i].iov_base = request->data;
450 iovs[i].iov_len = request->size;
453 memset(&msg, 0, sizeof msg);
457 error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
458 } while (error == EINTR);
460 for (i = 0; i < n; i++) {
461 struct ofpbuf *request = transactions[i]->request;
463 log_nlmsg(__func__, error, request->data, request->size,
467 COVERAGE_ADD(netlink_sent, n);
475 struct ofpbuf *reply;
477 error = nl_sock_recv__(sock, &reply, true);
482 i = find_nl_transaction_by_seq(transactions, n,
483 nl_msg_nlmsghdr(reply)->nlmsg_seq);
485 VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32,
486 nl_msg_nlmsghdr(reply)->nlmsg_seq);
487 ofpbuf_delete(reply);
491 nl_sock_record_errors__(transactions, i, 0);
492 if (nl_msg_nlmsgerr(reply, &error)) {
493 transactions[i]->reply = NULL;
494 transactions[i]->error = error;
496 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
497 error, strerror(error));
499 ofpbuf_delete(reply);
501 transactions[i]->reply = reply;
502 transactions[i]->error = 0;
506 transactions += i + 1;
513 /* Sends the 'request' member of the 'n' transactions in 'transactions' to the
514 * kernel, in order, and waits for responses to all of them. Fills in the
515 * 'error' member of each transaction with 0 if it was successful, otherwise
516 * with a positive errno value. 'reply' will be NULL on error or if the
517 * transaction was successful but had no reply beyond an indication of success.
518 * For a successful transaction that did have a more detailed reply, 'reply'
519 * will be set to the reply message.
521 * The caller is responsible for destroying each request and reply, and the
522 * transactions array itself.
524 * Before sending each message, this function will finalize nlmsg_len in each
525 * 'request' to match the ofpbuf's size, and set nlmsg_pid to 'sock''s pid.
526 * NLM_F_ACK will be added to some requests' nlmsg_flags.
528 * Bare Netlink is an unreliable transport protocol. This function layers
529 * reliable delivery and reply semantics on top of bare Netlink. See
530 * nl_sock_transact() for some caveats.
533 nl_sock_transact_multiple(struct nl_sock *sock,
534 struct nl_transaction **transactions, size_t n)
543 error = nl_sock_cow__(sock);
545 nl_sock_record_errors__(transactions, n, error);
549 /* In theory, every request could have a 64 kB reply. But the default and
550 * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
551 * be a bit below 128 kB, so that would only allow a single message in a
552 * "batch". So we assume that replies average (at most) 4 kB, which allows
553 * a good deal of batching.
555 * In practice, most of the requests that we batch either have no reply at
556 * all or a brief reply. */
557 max_batch_count = MAX(sock->rcvbuf / 4096, 1);
558 max_batch_count = MIN(max_batch_count, max_iovs);
564 /* Batch up to 'max_batch_count' transactions. But cap it at about a
565 * page of requests total because big skbuffs are expensive to
566 * allocate in the kernel. */
567 #if defined(PAGESIZE)
568 enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
570 enum { MAX_BATCH_BYTES = 4096 - 512 };
572 bytes = transactions[0]->request->size;
573 for (count = 1; count < n && count < max_batch_count; count++) {
574 if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) {
577 bytes += transactions[count]->request->size;
580 error = nl_sock_transact_multiple__(sock, transactions, count, &done);
581 transactions += done;
584 if (error == ENOBUFS) {
585 VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
587 VLOG_ERR_RL(&rl, "transaction error (%s)", strerror(error));
588 nl_sock_record_errors__(transactions, n, error);
593 /* Sends 'request' to the kernel via 'sock' and waits for a response. If
594 * successful, returns 0. On failure, returns a positive errno value.
596 * If 'replyp' is nonnull, then on success '*replyp' is set to the kernel's
597 * reply, which the caller is responsible for freeing with ofpbuf_delete(), and
598 * on failure '*replyp' is set to NULL. If 'replyp' is null, then the kernel's
599 * reply, if any, is discarded.
601 * nlmsg_len in 'msg' will be finalized to match msg->size, and nlmsg_pid will
602 * be set to 'sock''s pid, before the message is sent. NLM_F_ACK will be set
605 * The caller is responsible for destroying 'request'.
607 * Bare Netlink is an unreliable transport protocol. This function layers
608 * reliable delivery and reply semantics on top of bare Netlink.
610 * In Netlink, sending a request to the kernel is reliable enough, because the
611 * kernel will tell us if the message cannot be queued (and we will in that
612 * case put it on the transmit queue and wait until it can be delivered).
614 * Receiving the reply is the real problem: if the socket buffer is full when
615 * the kernel tries to send the reply, the reply will be dropped. However, the
616 * kernel sets a flag that a reply has been dropped. The next call to recv
617 * then returns ENOBUFS. We can then re-send the request.
621 * 1. Netlink depends on sequence numbers to match up requests and
622 * replies. The sender of a request supplies a sequence number, and
623 * the reply echos back that sequence number.
625 * This is fine, but (1) some kernel netlink implementations are
626 * broken, in that they fail to echo sequence numbers and (2) this
627 * function will drop packets with non-matching sequence numbers, so
628 * that only a single request can be usefully transacted at a time.
630 * 2. Resending the request causes it to be re-executed, so the request
631 * needs to be idempotent.
634 nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request,
635 struct ofpbuf **replyp)
637 struct nl_transaction *transactionp;
638 struct nl_transaction transaction;
640 transaction.request = (struct ofpbuf *) request;
641 transactionp = &transaction;
642 nl_sock_transact_multiple(sock, &transactionp, 1);
644 *replyp = transaction.reply;
646 ofpbuf_delete(transaction.reply);
648 return transaction.error;
651 /* Drain all the messages currently in 'sock''s receive queue. */
653 nl_sock_drain(struct nl_sock *sock)
655 int error = nl_sock_cow__(sock);
659 return drain_rcvbuf(sock->fd);
662 /* The client is attempting some operation on 'sock'. If 'sock' has an ongoing
663 * dump operation, then replace 'sock''s fd with a new socket and hand 'sock''s
664 * old fd over to the dump. */
666 nl_sock_cow__(struct nl_sock *sock)
668 struct nl_sock *copy;
677 error = nl_sock_clone(sock, ©);
687 sock->pid = copy->pid;
690 sock->dump->sock = copy;
696 /* Starts a Netlink "dump" operation, by sending 'request' to the kernel via
697 * 'sock', and initializes 'dump' to reflect the state of the operation.
699 * nlmsg_len in 'msg' will be finalized to match msg->size, and nlmsg_pid will
700 * be set to 'sock''s pid, before the message is sent. NLM_F_DUMP and
701 * NLM_F_ACK will be set in nlmsg_flags.
703 * This Netlink socket library is designed to ensure that the dump is reliable
704 * and that it will not interfere with other operations on 'sock', including
705 * destroying or sending and receiving messages on 'sock'. One corner case is
708 * - If 'sock' has been used to send a request (e.g. with nl_sock_send())
709 * whose response has not yet been received (e.g. with nl_sock_recv()).
710 * This is unusual: usually nl_sock_transact() is used to send a message
711 * and receive its reply all in one go.
713 * This function provides no status indication. An error status for the entire
714 * dump operation is provided when it is completed by calling nl_dump_done().
716 * The caller is responsible for destroying 'request'.
718 * The new 'dump' is independent of 'sock'. 'sock' and 'dump' may be destroyed
722 nl_dump_start(struct nl_dump *dump,
723 struct nl_sock *sock, const struct ofpbuf *request)
725 struct nlmsghdr *nlmsghdr = nl_msg_nlmsghdr(request);
726 nlmsghdr->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
727 dump->seq = nlmsghdr->nlmsg_seq;
730 /* 'sock' already has an ongoing dump. Clone the socket because
731 * Netlink only allows one dump at a time. */
732 dump->status = nl_sock_clone(sock, &dump->sock);
741 dump->status = nl_sock_send__(sock, request, true);
744 /* Helper function for nl_dump_next(). */
746 nl_dump_recv(struct nl_dump *dump, struct ofpbuf **bufferp)
748 struct nlmsghdr *nlmsghdr;
749 struct ofpbuf *buffer;
752 retval = nl_sock_recv__(dump->sock, bufferp, true);
754 return retval == EINTR ? EAGAIN : retval;
758 nlmsghdr = nl_msg_nlmsghdr(buffer);
759 if (dump->seq != nlmsghdr->nlmsg_seq) {
760 VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
761 nlmsghdr->nlmsg_seq, dump->seq);
765 if (nl_msg_nlmsgerr(buffer, &retval)) {
766 VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
768 return retval && retval != EAGAIN ? retval : EPROTO;
774 /* Attempts to retrieve another reply from 'dump', which must have been
775 * initialized with nl_dump_start().
777 * If successful, returns true and points 'reply->data' and 'reply->size' to
778 * the message that was retrieved. The caller must not modify 'reply' (because
779 * it points into the middle of a larger buffer).
781 * On failure, returns false and sets 'reply->data' to NULL and 'reply->size'
782 * to 0. Failure might indicate an actual error or merely the end of replies.
783 * An error status for the entire dump operation is provided when it is
784 * completed by calling nl_dump_done().
787 nl_dump_next(struct nl_dump *dump, struct ofpbuf *reply)
789 struct nlmsghdr *nlmsghdr;
797 if (dump->buffer && !dump->buffer->size) {
798 ofpbuf_delete(dump->buffer);
801 while (!dump->buffer) {
802 int retval = nl_dump_recv(dump, &dump->buffer);
804 ofpbuf_delete(dump->buffer);
806 if (retval != EAGAIN) {
807 dump->status = retval;
813 nlmsghdr = nl_msg_next(dump->buffer, reply);
815 VLOG_WARN_RL(&rl, "netlink dump reply contains message fragment");
816 dump->status = EPROTO;
818 } else if (nlmsghdr->nlmsg_type == NLMSG_DONE) {
826 /* Completes Netlink dump operation 'dump', which must have been initialized
827 * with nl_dump_start(). Returns 0 if the dump operation was error-free,
828 * otherwise a positive errno value describing the problem. */
830 nl_dump_done(struct nl_dump *dump)
832 /* Drain any remaining messages that the client didn't read. Otherwise the
833 * kernel will continue to queue them up and waste buffer space. */
834 while (!dump->status) {
836 if (!nl_dump_next(dump, &reply)) {
837 assert(dump->status);
842 if (dump->sock->dump) {
843 dump->sock->dump = NULL;
845 nl_sock_destroy(dump->sock);
848 ofpbuf_delete(dump->buffer);
849 return dump->status == EOF ? 0 : dump->status;
852 /* Causes poll_block() to wake up when any of the specified 'events' (which is
853 * a OR'd combination of POLLIN, POLLOUT, etc.) occur on 'sock'. */
855 nl_sock_wait(const struct nl_sock *sock, short int events)
857 poll_fd_wait(sock->fd, events);
860 /* Returns the underlying fd for 'sock', for use in "poll()"-like operations
861 * that can't use nl_sock_wait().
863 * It's a little tricky to use the returned fd correctly, because nl_sock does
864 * "copy on write" to allow a single nl_sock to be used for notifications,
865 * transactions, and dumps. If 'sock' is used only for notifications and
866 * transactions (and never for dump) then the usage is safe. */
868 nl_sock_fd(const struct nl_sock *sock)
873 /* Returns the PID associated with this socket. */
875 nl_sock_pid(const struct nl_sock *sock)
883 struct hmap_node hmap_node;
888 static struct hmap genl_families = HMAP_INITIALIZER(&genl_families);
890 static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
891 [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
892 [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true},
895 static struct genl_family *
896 find_genl_family_by_id(uint16_t id)
898 struct genl_family *family;
900 HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0),
902 if (family->id == id) {
910 define_genl_family(uint16_t id, const char *name)
912 struct genl_family *family = find_genl_family_by_id(id);
915 if (!strcmp(family->name, name)) {
920 family = xmalloc(sizeof *family);
922 hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0));
924 family->name = xstrdup(name);
928 genl_family_to_name(uint16_t id)
930 if (id == GENL_ID_CTRL) {
933 struct genl_family *family = find_genl_family_by_id(id);
934 return family ? family->name : "unknown";
939 do_lookup_genl_family(const char *name, struct nlattr **attrs,
940 struct ofpbuf **replyp)
942 struct nl_sock *sock;
943 struct ofpbuf request, *reply;
947 error = nl_sock_create(NETLINK_GENERIC, &sock);
952 ofpbuf_init(&request, 0);
953 nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST,
954 CTRL_CMD_GETFAMILY, 1);
955 nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name);
956 error = nl_sock_transact(sock, &request, &reply);
957 ofpbuf_uninit(&request);
959 nl_sock_destroy(sock);
963 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
964 family_policy, attrs, ARRAY_SIZE(family_policy))
965 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
966 nl_sock_destroy(sock);
967 ofpbuf_delete(reply);
971 nl_sock_destroy(sock);
976 /* Finds the multicast group called 'group_name' in genl family 'family_name'.
977 * When successful, writes its result to 'multicast_group' and returns 0.
978 * Otherwise, clears 'multicast_group' and returns a positive error code.
980 * Some kernels do not support looking up a multicast group with this function.
981 * In this case, 'multicast_group' will be populated with 'fallback'. */
983 nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
984 unsigned int *multicast_group, unsigned int fallback)
986 struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
987 const struct nlattr *mc;
988 struct ofpbuf *reply;
992 *multicast_group = 0;
993 error = do_lookup_genl_family(family_name, family_attrs, &reply);
998 if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
999 *multicast_group = fallback;
1000 VLOG_WARN("%s-%s: has no multicast group, using fallback %d",
1001 family_name, group_name, *multicast_group);
1006 NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
1007 static const struct nl_policy mc_policy[] = {
1008 [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32},
1009 [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING},
1012 struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)];
1013 const char *mc_name;
1015 if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) {
1020 mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]);
1021 if (!strcmp(group_name, mc_name)) {
1023 nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]);
1031 ofpbuf_delete(reply);
1035 /* If '*number' is 0, translates the given Generic Netlink family 'name' to a
1036 * number and stores it in '*number'. If successful, returns 0 and the caller
1037 * may use '*number' as the family number. On failure, returns a positive
1038 * errno value and '*number' caches the errno value. */
1040 nl_lookup_genl_family(const char *name, int *number)
1043 struct nlattr *attrs[ARRAY_SIZE(family_policy)];
1044 struct ofpbuf *reply;
1047 error = do_lookup_genl_family(name, attrs, &reply);
1049 *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]);
1050 define_genl_family(*number, name);
1054 ofpbuf_delete(reply);
1056 assert(*number != 0);
1058 return *number > 0 ? 0 : -*number;
1062 nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds)
1068 static const struct nlmsg_flag flags[] = {
1069 { NLM_F_REQUEST, "REQUEST" },
1070 { NLM_F_MULTI, "MULTI" },
1071 { NLM_F_ACK, "ACK" },
1072 { NLM_F_ECHO, "ECHO" },
1073 { NLM_F_DUMP, "DUMP" },
1074 { NLM_F_ROOT, "ROOT" },
1075 { NLM_F_MATCH, "MATCH" },
1076 { NLM_F_ATOMIC, "ATOMIC" },
1078 const struct nlmsg_flag *flag;
1079 uint16_t flags_left;
1081 ds_put_format(ds, "nl(len:%"PRIu32", type=%"PRIu16,
1082 h->nlmsg_len, h->nlmsg_type);
1083 if (h->nlmsg_type == NLMSG_NOOP) {
1084 ds_put_cstr(ds, "(no-op)");
1085 } else if (h->nlmsg_type == NLMSG_ERROR) {
1086 ds_put_cstr(ds, "(error)");
1087 } else if (h->nlmsg_type == NLMSG_DONE) {
1088 ds_put_cstr(ds, "(done)");
1089 } else if (h->nlmsg_type == NLMSG_OVERRUN) {
1090 ds_put_cstr(ds, "(overrun)");
1091 } else if (h->nlmsg_type < NLMSG_MIN_TYPE) {
1092 ds_put_cstr(ds, "(reserved)");
1093 } else if (protocol == NETLINK_GENERIC) {
1094 ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type));
1096 ds_put_cstr(ds, "(family-defined)");
1098 ds_put_format(ds, ", flags=%"PRIx16, h->nlmsg_flags);
1099 flags_left = h->nlmsg_flags;
1100 for (flag = flags; flag < &flags[ARRAY_SIZE(flags)]; flag++) {
1101 if ((flags_left & flag->bits) == flag->bits) {
1102 ds_put_format(ds, "[%s]", flag->name);
1103 flags_left &= ~flag->bits;
1107 ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left);
1109 ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32,
1110 h->nlmsg_seq, h->nlmsg_pid);
1114 nlmsg_to_string(const struct ofpbuf *buffer, int protocol)
1116 struct ds ds = DS_EMPTY_INITIALIZER;
1117 const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN);
1119 nlmsghdr_to_string(h, protocol, &ds);
1120 if (h->nlmsg_type == NLMSG_ERROR) {
1121 const struct nlmsgerr *e;
1122 e = ofpbuf_at(buffer, NLMSG_HDRLEN,
1123 NLMSG_ALIGN(sizeof(struct nlmsgerr)));
1125 ds_put_format(&ds, " error(%d", e->error);
1127 ds_put_format(&ds, "(%s)", strerror(-e->error));
1129 ds_put_cstr(&ds, ", in-reply-to(");
1130 nlmsghdr_to_string(&e->msg, protocol, &ds);
1131 ds_put_cstr(&ds, "))");
1133 ds_put_cstr(&ds, " error(truncated)");
1135 } else if (h->nlmsg_type == NLMSG_DONE) {
1136 int *error = ofpbuf_at(buffer, NLMSG_HDRLEN, sizeof *error);
1138 ds_put_format(&ds, " done(%d", *error);
1140 ds_put_format(&ds, "(%s)", strerror(-*error));
1142 ds_put_cstr(&ds, ")");
1144 ds_put_cstr(&ds, " done(truncated)");
1146 } else if (protocol == NETLINK_GENERIC) {
1147 struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer);
1149 ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")",
1150 genl->cmd, genl->version);
1154 ds_put_cstr(&ds, "nl(truncated)");
1160 log_nlmsg(const char *function, int error,
1161 const void *message, size_t size, int protocol)
1163 struct ofpbuf buffer;
1166 if (!VLOG_IS_DBG_ENABLED()) {
1170 ofpbuf_use_const(&buffer, message, size);
1171 nlmsg = nlmsg_to_string(&buffer, protocol);
1172 VLOG_DBG_RL(&rl, "%s (%s): %s", function, strerror(error), nlmsg);