2 * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netlink-socket.h"
23 #include <sys/types.h>
27 #include "dynamic-string.h"
31 #include "netlink-protocol.h"
33 #include "poll-loop.h"
34 #include "socket-util.h"
39 VLOG_DEFINE_THIS_MODULE(netlink_socket);
41 COVERAGE_DEFINE(netlink_overflow);
42 COVERAGE_DEFINE(netlink_received);
43 COVERAGE_DEFINE(netlink_recv_jumbo);
44 COVERAGE_DEFINE(netlink_send);
45 COVERAGE_DEFINE(netlink_sent);
47 /* Linux header file confusion causes this to be undefined. */
49 #define SOL_NETLINK 270
52 /* A single (bad) Netlink message can in theory dump out many, many log
53 * messages, so the burst size is set quite high here to avoid missing useful
54 * information. Also, at high logging levels we log *all* Netlink messages. */
55 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600);
57 static uint32_t nl_sock_allocate_seq(struct nl_sock *, unsigned int n);
58 static void log_nlmsg(const char *function, int error,
59 const void *message, size_t size, int protocol);
61 /* Netlink sockets. */
70 unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */
73 /* Compile-time limit on iovecs, so that we can allocate a maximum-size array
74 * of iovecs on the stack. */
77 /* Maximum number of iovecs that may be passed to sendmsg, capped at a
78 * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS.
80 * Initialized by nl_sock_create(). */
83 static int nl_sock_cow__(struct nl_sock *);
85 /* Creates a new netlink socket for the given netlink 'protocol'
86 * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the
87 * new socket if successful, otherwise returns a positive errno value. */
89 nl_sock_create(int protocol, struct nl_sock **sockp)
92 struct sockaddr_nl local, remote;
98 int save_errno = errno;
101 max_iovs = sysconf(_SC_UIO_MAXIOV);
102 if (max_iovs < _XOPEN_IOV_MAX) {
103 if (max_iovs == -1 && errno) {
104 VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", strerror(errno));
106 max_iovs = _XOPEN_IOV_MAX;
107 } else if (max_iovs > MAX_IOVS) {
115 sock = malloc(sizeof *sock);
120 sock->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
122 VLOG_ERR("fcntl: %s", strerror(errno));
125 sock->protocol = protocol;
129 rcvbuf = 1024 * 1024;
130 if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUFFORCE,
131 &rcvbuf, sizeof rcvbuf)) {
132 VLOG_WARN_RL(&rl, "setting %d-byte socket receive buffer failed (%s)",
133 rcvbuf, strerror(errno));
136 retval = get_socket_rcvbuf(sock->fd);
141 sock->rcvbuf = retval;
143 /* Connect to kernel (pid 0) as remote address. */
144 memset(&remote, 0, sizeof remote);
145 remote.nl_family = AF_NETLINK;
147 if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) {
148 VLOG_ERR("connect(0): %s", strerror(errno));
152 /* Obtain pid assigned by kernel. */
153 local_size = sizeof local;
154 if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) {
155 VLOG_ERR("getsockname: %s", strerror(errno));
158 if (local_size < sizeof local || local.nl_family != AF_NETLINK) {
159 VLOG_ERR("getsockname returned bad Netlink name");
163 sock->pid = local.nl_pid;
182 /* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and
183 * sets '*sockp' to the new socket if successful, otherwise returns a positive
186 nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp)
188 return nl_sock_create(src->protocol, sockp);
191 /* Destroys netlink socket 'sock'. */
193 nl_sock_destroy(struct nl_sock *sock)
205 /* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if
206 * successful, otherwise a positive errno value.
208 * A socket that is subscribed to a multicast group that receives asynchronous
209 * notifications must not be used for Netlink transactions or dumps, because
210 * transactions and dumps can cause notifications to be lost.
212 * Multicast group numbers are always positive.
214 * It is not an error to attempt to join a multicast group to which a socket
215 * already belongs. */
217 nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
219 int error = nl_sock_cow__(sock);
223 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
224 &multicast_group, sizeof multicast_group) < 0) {
225 VLOG_WARN("could not join multicast group %u (%s)",
226 multicast_group, strerror(errno));
232 /* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if
233 * successful, otherwise a positive errno value.
235 * Multicast group numbers are always positive.
237 * It is not an error to attempt to leave a multicast group to which a socket
240 * On success, reading from 'sock' will still return any messages that were
241 * received on 'multicast_group' before the group was left. */
243 nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
246 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP,
247 &multicast_group, sizeof multicast_group) < 0) {
248 VLOG_WARN("could not leave multicast group %u (%s)",
249 multicast_group, strerror(errno));
256 nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
258 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
261 nlmsg->nlmsg_len = msg->size;
262 nlmsg->nlmsg_seq = nl_sock_allocate_seq(sock, 1);
263 nlmsg->nlmsg_pid = sock->pid;
266 retval = send(sock->fd, msg->data, msg->size, wait ? 0 : MSG_DONTWAIT);
267 error = retval < 0 ? errno : 0;
268 } while (error == EINTR);
269 log_nlmsg(__func__, error, msg->data, msg->size, sock->protocol);
271 COVERAGE_INC(netlink_sent);
276 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
277 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, and
278 * nlmsg_pid will be set to 'sock''s pid, before the message is sent.
280 * Returns 0 if successful, otherwise a positive errno value. If
281 * 'wait' is true, then the send will wait until buffer space is ready;
282 * otherwise, returns EAGAIN if the 'sock' send buffer is full. */
284 nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
286 int error = nl_sock_cow__(sock);
290 return nl_sock_send__(sock, msg, wait);
293 /* This stress option is useful for testing that OVS properly tolerates
294 * -ENOBUFS on NetLink sockets. Such errors are unavoidable because they can
295 * occur if the kernel cannot temporarily allocate enough GFP_ATOMIC memory to
296 * reply to a request. They can also occur if messages arrive on a multicast
297 * channel faster than OVS can process them. */
299 netlink_overflow, "simulate netlink socket receive buffer overflow",
303 nl_sock_recv__(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
305 /* We can't accurately predict the size of the data to be received. The
306 * caller is supposed to have allocated enough space in 'buf' to handle the
307 * "typical" case. To handle exceptions, we make available enough space in
308 * 'tail' to allow Netlink messages to be up to 64 kB long (a reasonable
309 * figure since that's the maximum length of a Netlink attribute). */
310 struct nlmsghdr *nlmsghdr;
316 assert(buf->allocated >= sizeof *nlmsghdr);
319 iov[0].iov_base = buf->base;
320 iov[0].iov_len = buf->allocated;
321 iov[1].iov_base = tail;
322 iov[1].iov_len = sizeof tail;
324 memset(&msg, 0, sizeof msg);
329 retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT);
330 } while (retval < 0 && errno == EINTR);
334 if (error == ENOBUFS) {
335 /* Socket receive buffer overflow dropped one or more messages that
336 * the kernel tried to send to us. */
337 COVERAGE_INC(netlink_overflow);
342 if (msg.msg_flags & MSG_TRUNC) {
343 VLOG_ERR_RL(&rl, "truncated message (longer than %zu bytes)",
348 nlmsghdr = buf->data;
349 if (retval < sizeof *nlmsghdr
350 || nlmsghdr->nlmsg_len < sizeof *nlmsghdr
351 || nlmsghdr->nlmsg_len > retval) {
352 VLOG_ERR_RL(&rl, "received invalid nlmsg (%zd bytes < %zu)",
353 retval, sizeof *nlmsghdr);
357 if (STRESS(netlink_overflow)) {
361 buf->size = MIN(retval, buf->allocated);
362 if (retval > buf->allocated) {
363 COVERAGE_INC(netlink_recv_jumbo);
364 ofpbuf_put(buf, tail, retval - buf->allocated);
367 log_nlmsg(__func__, 0, buf->data, buf->size, sock->protocol);
368 COVERAGE_INC(netlink_received);
373 /* Tries to receive a Netlink message from the kernel on 'sock' into 'buf'. If
374 * 'wait' is true, waits for a message to be ready. Otherwise, fails with
375 * EAGAIN if the 'sock' receive buffer is empty.
377 * The caller must have initialized 'buf' with an allocation of at least
378 * NLMSG_HDRLEN bytes. For best performance, the caller should allocate enough
379 * space for a "typical" message.
381 * On success, returns 0 and replaces 'buf''s previous content by the received
382 * message. This function expands 'buf''s allocated memory, as necessary, to
383 * hold the actual size of the received message.
385 * On failure, returns a positive errno value and clears 'buf' to zero length.
386 * 'buf' retains its previous memory allocation.
388 * Regardless of success or failure, this function resets 'buf''s headroom to
391 nl_sock_recv(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
393 int error = nl_sock_cow__(sock);
397 return nl_sock_recv__(sock, buf, wait);
401 nl_sock_record_errors__(struct nl_transaction **transactions, size_t n,
406 for (i = 0; i < n; i++) {
407 struct nl_transaction *txn = transactions[i];
411 ofpbuf_clear(txn->reply);
417 nl_sock_transact_multiple__(struct nl_sock *sock,
418 struct nl_transaction **transactions, size_t n,
421 uint64_t tmp_reply_stub[1024 / 8];
422 struct nl_transaction tmp_txn;
423 struct ofpbuf tmp_reply;
426 struct iovec iovs[MAX_IOVS];
431 base_seq = nl_sock_allocate_seq(sock, n);
433 for (i = 0; i < n; i++) {
434 struct nl_transaction *txn = transactions[i];
435 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(txn->request);
437 nlmsg->nlmsg_len = txn->request->size;
438 nlmsg->nlmsg_seq = base_seq + i;
439 nlmsg->nlmsg_pid = sock->pid;
441 iovs[i].iov_base = txn->request->data;
442 iovs[i].iov_len = txn->request->size;
445 memset(&msg, 0, sizeof msg);
449 error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
450 } while (error == EINTR);
452 for (i = 0; i < n; i++) {
453 struct nl_transaction *txn = transactions[i];
455 log_nlmsg(__func__, error, txn->request->data, txn->request->size,
459 COVERAGE_ADD(netlink_sent, n);
466 ofpbuf_use_stub(&tmp_reply, tmp_reply_stub, sizeof tmp_reply_stub);
467 tmp_txn.request = NULL;
468 tmp_txn.reply = &tmp_reply;
471 struct nl_transaction *buf_txn, *txn;
474 /* Find a transaction whose buffer we can use for receiving a reply.
475 * If no such transaction is left, use tmp_txn. */
477 for (i = 0; i < n; i++) {
478 if (transactions[i]->reply) {
479 buf_txn = transactions[i];
484 /* Receive a reply. */
485 error = nl_sock_recv__(sock, buf_txn->reply, false);
487 if (error == EAGAIN) {
488 nl_sock_record_errors__(transactions, n, 0);
495 /* Match the reply up with a transaction. */
496 seq = nl_msg_nlmsghdr(buf_txn->reply)->nlmsg_seq;
497 if (seq < base_seq || seq >= base_seq + n) {
498 VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, seq);
502 txn = transactions[i];
504 /* Fill in the results for 'txn'. */
505 if (nl_msg_nlmsgerr(buf_txn->reply, &txn->error)) {
507 ofpbuf_clear(txn->reply);
510 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
511 error, strerror(txn->error));
515 if (txn->reply && txn != buf_txn) {
517 struct ofpbuf *reply = buf_txn->reply;
518 buf_txn->reply = txn->reply;
523 /* Fill in the results for transactions before 'txn'. (We have to do
524 * this after the results for 'txn' itself because of the buffer swap
526 nl_sock_record_errors__(transactions, i, 0);
530 transactions += i + 1;
534 ofpbuf_uninit(&tmp_reply);
539 /* Sends the 'request' member of the 'n' transactions in 'transactions' on
540 * 'sock', in order, and receives responses to all of them. Fills in the
541 * 'error' member of each transaction with 0 if it was successful, otherwise
542 * with a positive errno value. If 'reply' is nonnull, then it will be filled
543 * with the reply if the message receives a detailed reply. In other cases,
544 * i.e. where the request failed or had no reply beyond an indication of
545 * success, 'reply' will be cleared if it is nonnull.
547 * The caller is responsible for destroying each request and reply, and the
548 * transactions array itself.
550 * Before sending each message, this function will finalize nlmsg_len in each
551 * 'request' to match the ofpbuf's size, set nlmsg_pid to 'sock''s pid, and
552 * initialize nlmsg_seq.
554 * Bare Netlink is an unreliable transport protocol. This function layers
555 * reliable delivery and reply semantics on top of bare Netlink. See
556 * nl_sock_transact() for some caveats.
559 nl_sock_transact_multiple(struct nl_sock *sock,
560 struct nl_transaction **transactions, size_t n)
569 error = nl_sock_cow__(sock);
571 nl_sock_record_errors__(transactions, n, error);
575 /* In theory, every request could have a 64 kB reply. But the default and
576 * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
577 * be a bit below 128 kB, so that would only allow a single message in a
578 * "batch". So we assume that replies average (at most) 4 kB, which allows
579 * a good deal of batching.
581 * In practice, most of the requests that we batch either have no reply at
582 * all or a brief reply. */
583 max_batch_count = MAX(sock->rcvbuf / 4096, 1);
584 max_batch_count = MIN(max_batch_count, max_iovs);
590 /* Batch up to 'max_batch_count' transactions. But cap it at about a
591 * page of requests total because big skbuffs are expensive to
592 * allocate in the kernel. */
593 #if defined(PAGESIZE)
594 enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
596 enum { MAX_BATCH_BYTES = 4096 - 512 };
598 bytes = transactions[0]->request->size;
599 for (count = 1; count < n && count < max_batch_count; count++) {
600 if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) {
603 bytes += transactions[count]->request->size;
606 error = nl_sock_transact_multiple__(sock, transactions, count, &done);
607 transactions += done;
610 if (error == ENOBUFS) {
611 VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
613 VLOG_ERR_RL(&rl, "transaction error (%s)", strerror(error));
614 nl_sock_record_errors__(transactions, n, error);
619 /* Sends 'request' to the kernel via 'sock' and waits for a response. If
620 * successful, returns 0. On failure, returns a positive errno value.
622 * If 'replyp' is nonnull, then on success '*replyp' is set to the kernel's
623 * reply, which the caller is responsible for freeing with ofpbuf_delete(), and
624 * on failure '*replyp' is set to NULL. If 'replyp' is null, then the kernel's
625 * reply, if any, is discarded.
627 * Before the message is sent, nlmsg_len in 'request' will be finalized to
628 * match msg->size, nlmsg_pid will be set to 'sock''s pid, and nlmsg_seq will
629 * be initialized, NLM_F_ACK will be set in nlmsg_flags.
631 * The caller is responsible for destroying 'request'.
633 * Bare Netlink is an unreliable transport protocol. This function layers
634 * reliable delivery and reply semantics on top of bare Netlink.
636 * In Netlink, sending a request to the kernel is reliable enough, because the
637 * kernel will tell us if the message cannot be queued (and we will in that
638 * case put it on the transmit queue and wait until it can be delivered).
640 * Receiving the reply is the real problem: if the socket buffer is full when
641 * the kernel tries to send the reply, the reply will be dropped. However, the
642 * kernel sets a flag that a reply has been dropped. The next call to recv
643 * then returns ENOBUFS. We can then re-send the request.
647 * 1. Netlink depends on sequence numbers to match up requests and
648 * replies. The sender of a request supplies a sequence number, and
649 * the reply echos back that sequence number.
651 * This is fine, but (1) some kernel netlink implementations are
652 * broken, in that they fail to echo sequence numbers and (2) this
653 * function will drop packets with non-matching sequence numbers, so
654 * that only a single request can be usefully transacted at a time.
656 * 2. Resending the request causes it to be re-executed, so the request
657 * needs to be idempotent.
660 nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request,
661 struct ofpbuf **replyp)
663 struct nl_transaction *transactionp;
664 struct nl_transaction transaction;
666 transaction.request = (struct ofpbuf *) request;
667 transaction.reply = replyp ? ofpbuf_new(1024) : NULL;
668 transactionp = &transaction;
670 nl_sock_transact_multiple(sock, &transactionp, 1);
673 if (transaction.error) {
674 ofpbuf_delete(transaction.reply);
677 *replyp = transaction.reply;
681 return transaction.error;
684 /* Drain all the messages currently in 'sock''s receive queue. */
686 nl_sock_drain(struct nl_sock *sock)
688 int error = nl_sock_cow__(sock);
692 return drain_rcvbuf(sock->fd);
695 /* The client is attempting some operation on 'sock'. If 'sock' has an ongoing
696 * dump operation, then replace 'sock''s fd with a new socket and hand 'sock''s
697 * old fd over to the dump. */
699 nl_sock_cow__(struct nl_sock *sock)
701 struct nl_sock *copy;
710 error = nl_sock_clone(sock, ©);
720 sock->pid = copy->pid;
723 sock->dump->sock = copy;
729 /* Starts a Netlink "dump" operation, by sending 'request' to the kernel via
730 * 'sock', and initializes 'dump' to reflect the state of the operation.
732 * nlmsg_len in 'msg' will be finalized to match msg->size, and nlmsg_pid will
733 * be set to 'sock''s pid, before the message is sent. NLM_F_DUMP and
734 * NLM_F_ACK will be set in nlmsg_flags.
736 * This Netlink socket library is designed to ensure that the dump is reliable
737 * and that it will not interfere with other operations on 'sock', including
738 * destroying or sending and receiving messages on 'sock'. One corner case is
741 * - If 'sock' has been used to send a request (e.g. with nl_sock_send())
742 * whose response has not yet been received (e.g. with nl_sock_recv()).
743 * This is unusual: usually nl_sock_transact() is used to send a message
744 * and receive its reply all in one go.
746 * This function provides no status indication. An error status for the entire
747 * dump operation is provided when it is completed by calling nl_dump_done().
749 * The caller is responsible for destroying 'request'.
751 * The new 'dump' is independent of 'sock'. 'sock' and 'dump' may be destroyed
755 nl_dump_start(struct nl_dump *dump,
756 struct nl_sock *sock, const struct ofpbuf *request)
758 ofpbuf_init(&dump->buffer, 4096);
760 /* 'sock' already has an ongoing dump. Clone the socket because
761 * Netlink only allows one dump at a time. */
762 dump->status = nl_sock_clone(sock, &dump->sock);
772 nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
773 dump->status = nl_sock_send__(sock, request, true);
774 dump->seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
777 /* Helper function for nl_dump_next(). */
779 nl_dump_recv(struct nl_dump *dump)
781 struct nlmsghdr *nlmsghdr;
784 retval = nl_sock_recv__(dump->sock, &dump->buffer, true);
786 return retval == EINTR ? EAGAIN : retval;
789 nlmsghdr = nl_msg_nlmsghdr(&dump->buffer);
790 if (dump->seq != nlmsghdr->nlmsg_seq) {
791 VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
792 nlmsghdr->nlmsg_seq, dump->seq);
796 if (nl_msg_nlmsgerr(&dump->buffer, &retval)) {
797 VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
799 return retval && retval != EAGAIN ? retval : EPROTO;
805 /* Attempts to retrieve another reply from 'dump', which must have been
806 * initialized with nl_dump_start().
808 * If successful, returns true and points 'reply->data' and 'reply->size' to
809 * the message that was retrieved. The caller must not modify 'reply' (because
810 * it points into the middle of a larger buffer).
812 * On failure, returns false and sets 'reply->data' to NULL and 'reply->size'
813 * to 0. Failure might indicate an actual error or merely the end of replies.
814 * An error status for the entire dump operation is provided when it is
815 * completed by calling nl_dump_done().
818 nl_dump_next(struct nl_dump *dump, struct ofpbuf *reply)
820 struct nlmsghdr *nlmsghdr;
828 while (!dump->buffer.size) {
829 int retval = nl_dump_recv(dump);
831 ofpbuf_clear(&dump->buffer);
832 if (retval != EAGAIN) {
833 dump->status = retval;
839 nlmsghdr = nl_msg_next(&dump->buffer, reply);
841 VLOG_WARN_RL(&rl, "netlink dump reply contains message fragment");
842 dump->status = EPROTO;
844 } else if (nlmsghdr->nlmsg_type == NLMSG_DONE) {
852 /* Completes Netlink dump operation 'dump', which must have been initialized
853 * with nl_dump_start(). Returns 0 if the dump operation was error-free,
854 * otherwise a positive errno value describing the problem. */
856 nl_dump_done(struct nl_dump *dump)
858 /* Drain any remaining messages that the client didn't read. Otherwise the
859 * kernel will continue to queue them up and waste buffer space. */
860 while (!dump->status) {
862 if (!nl_dump_next(dump, &reply)) {
863 assert(dump->status);
868 if (dump->sock->dump) {
869 dump->sock->dump = NULL;
871 nl_sock_destroy(dump->sock);
874 ofpbuf_uninit(&dump->buffer);
875 return dump->status == EOF ? 0 : dump->status;
878 /* Causes poll_block() to wake up when any of the specified 'events' (which is
879 * a OR'd combination of POLLIN, POLLOUT, etc.) occur on 'sock'. */
881 nl_sock_wait(const struct nl_sock *sock, short int events)
883 poll_fd_wait(sock->fd, events);
886 /* Returns the underlying fd for 'sock', for use in "poll()"-like operations
887 * that can't use nl_sock_wait().
889 * It's a little tricky to use the returned fd correctly, because nl_sock does
890 * "copy on write" to allow a single nl_sock to be used for notifications,
891 * transactions, and dumps. If 'sock' is used only for notifications and
892 * transactions (and never for dump) then the usage is safe. */
894 nl_sock_fd(const struct nl_sock *sock)
899 /* Returns the PID associated with this socket. */
901 nl_sock_pid(const struct nl_sock *sock)
909 struct hmap_node hmap_node;
914 static struct hmap genl_families = HMAP_INITIALIZER(&genl_families);
916 static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
917 [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
918 [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true},
921 static struct genl_family *
922 find_genl_family_by_id(uint16_t id)
924 struct genl_family *family;
926 HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0),
928 if (family->id == id) {
936 define_genl_family(uint16_t id, const char *name)
938 struct genl_family *family = find_genl_family_by_id(id);
941 if (!strcmp(family->name, name)) {
946 family = xmalloc(sizeof *family);
948 hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0));
950 family->name = xstrdup(name);
954 genl_family_to_name(uint16_t id)
956 if (id == GENL_ID_CTRL) {
959 struct genl_family *family = find_genl_family_by_id(id);
960 return family ? family->name : "unknown";
965 do_lookup_genl_family(const char *name, struct nlattr **attrs,
966 struct ofpbuf **replyp)
968 struct nl_sock *sock;
969 struct ofpbuf request, *reply;
973 error = nl_sock_create(NETLINK_GENERIC, &sock);
978 ofpbuf_init(&request, 0);
979 nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST,
980 CTRL_CMD_GETFAMILY, 1);
981 nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name);
982 error = nl_sock_transact(sock, &request, &reply);
983 ofpbuf_uninit(&request);
985 nl_sock_destroy(sock);
989 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
990 family_policy, attrs, ARRAY_SIZE(family_policy))
991 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
992 nl_sock_destroy(sock);
993 ofpbuf_delete(reply);
997 nl_sock_destroy(sock);
1002 /* Finds the multicast group called 'group_name' in genl family 'family_name'.
1003 * When successful, writes its result to 'multicast_group' and returns 0.
1004 * Otherwise, clears 'multicast_group' and returns a positive error code.
1006 * Some kernels do not support looking up a multicast group with this function.
1007 * In this case, 'multicast_group' will be populated with 'fallback'. */
1009 nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
1010 unsigned int *multicast_group, unsigned int fallback)
1012 struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
1013 const struct nlattr *mc;
1014 struct ofpbuf *reply;
1018 *multicast_group = 0;
1019 error = do_lookup_genl_family(family_name, family_attrs, &reply);
1024 if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
1025 *multicast_group = fallback;
1026 VLOG_WARN("%s-%s: has no multicast group, using fallback %d",
1027 family_name, group_name, *multicast_group);
1032 NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
1033 static const struct nl_policy mc_policy[] = {
1034 [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32},
1035 [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING},
1038 struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)];
1039 const char *mc_name;
1041 if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) {
1046 mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]);
1047 if (!strcmp(group_name, mc_name)) {
1049 nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]);
1057 ofpbuf_delete(reply);
1061 /* If '*number' is 0, translates the given Generic Netlink family 'name' to a
1062 * number and stores it in '*number'. If successful, returns 0 and the caller
1063 * may use '*number' as the family number. On failure, returns a positive
1064 * errno value and '*number' caches the errno value. */
1066 nl_lookup_genl_family(const char *name, int *number)
1069 struct nlattr *attrs[ARRAY_SIZE(family_policy)];
1070 struct ofpbuf *reply;
1073 error = do_lookup_genl_family(name, attrs, &reply);
1075 *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]);
1076 define_genl_family(*number, name);
1080 ofpbuf_delete(reply);
1082 assert(*number != 0);
1084 return *number > 0 ? 0 : -*number;
1088 nl_sock_allocate_seq(struct nl_sock *sock, unsigned int n)
1090 uint32_t seq = sock->next_seq;
1092 sock->next_seq += n;
1094 /* Make it impossible for the next request for sequence numbers to wrap
1095 * around to 0. Start over with 1 to avoid ever using a sequence number of
1096 * 0, because the kernel uses sequence number 0 for notifications. */
1097 if (sock->next_seq >= UINT32_MAX / 2) {
1105 nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds)
1111 static const struct nlmsg_flag flags[] = {
1112 { NLM_F_REQUEST, "REQUEST" },
1113 { NLM_F_MULTI, "MULTI" },
1114 { NLM_F_ACK, "ACK" },
1115 { NLM_F_ECHO, "ECHO" },
1116 { NLM_F_DUMP, "DUMP" },
1117 { NLM_F_ROOT, "ROOT" },
1118 { NLM_F_MATCH, "MATCH" },
1119 { NLM_F_ATOMIC, "ATOMIC" },
1121 const struct nlmsg_flag *flag;
1122 uint16_t flags_left;
1124 ds_put_format(ds, "nl(len:%"PRIu32", type=%"PRIu16,
1125 h->nlmsg_len, h->nlmsg_type);
1126 if (h->nlmsg_type == NLMSG_NOOP) {
1127 ds_put_cstr(ds, "(no-op)");
1128 } else if (h->nlmsg_type == NLMSG_ERROR) {
1129 ds_put_cstr(ds, "(error)");
1130 } else if (h->nlmsg_type == NLMSG_DONE) {
1131 ds_put_cstr(ds, "(done)");
1132 } else if (h->nlmsg_type == NLMSG_OVERRUN) {
1133 ds_put_cstr(ds, "(overrun)");
1134 } else if (h->nlmsg_type < NLMSG_MIN_TYPE) {
1135 ds_put_cstr(ds, "(reserved)");
1136 } else if (protocol == NETLINK_GENERIC) {
1137 ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type));
1139 ds_put_cstr(ds, "(family-defined)");
1141 ds_put_format(ds, ", flags=%"PRIx16, h->nlmsg_flags);
1142 flags_left = h->nlmsg_flags;
1143 for (flag = flags; flag < &flags[ARRAY_SIZE(flags)]; flag++) {
1144 if ((flags_left & flag->bits) == flag->bits) {
1145 ds_put_format(ds, "[%s]", flag->name);
1146 flags_left &= ~flag->bits;
1150 ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left);
1152 ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32,
1153 h->nlmsg_seq, h->nlmsg_pid);
1157 nlmsg_to_string(const struct ofpbuf *buffer, int protocol)
1159 struct ds ds = DS_EMPTY_INITIALIZER;
1160 const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN);
1162 nlmsghdr_to_string(h, protocol, &ds);
1163 if (h->nlmsg_type == NLMSG_ERROR) {
1164 const struct nlmsgerr *e;
1165 e = ofpbuf_at(buffer, NLMSG_HDRLEN,
1166 NLMSG_ALIGN(sizeof(struct nlmsgerr)));
1168 ds_put_format(&ds, " error(%d", e->error);
1170 ds_put_format(&ds, "(%s)", strerror(-e->error));
1172 ds_put_cstr(&ds, ", in-reply-to(");
1173 nlmsghdr_to_string(&e->msg, protocol, &ds);
1174 ds_put_cstr(&ds, "))");
1176 ds_put_cstr(&ds, " error(truncated)");
1178 } else if (h->nlmsg_type == NLMSG_DONE) {
1179 int *error = ofpbuf_at(buffer, NLMSG_HDRLEN, sizeof *error);
1181 ds_put_format(&ds, " done(%d", *error);
1183 ds_put_format(&ds, "(%s)", strerror(-*error));
1185 ds_put_cstr(&ds, ")");
1187 ds_put_cstr(&ds, " done(truncated)");
1189 } else if (protocol == NETLINK_GENERIC) {
1190 struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer);
1192 ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")",
1193 genl->cmd, genl->version);
1197 ds_put_cstr(&ds, "nl(truncated)");
1203 log_nlmsg(const char *function, int error,
1204 const void *message, size_t size, int protocol)
1206 struct ofpbuf buffer;
1209 if (!VLOG_IS_DBG_ENABLED()) {
1213 ofpbuf_use_const(&buffer, message, size);
1214 nlmsg = nlmsg_to_string(&buffer, protocol);
1215 VLOG_DBG_RL(&rl, "%s (%s): %s", function, strerror(error), nlmsg);