X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=lib%2Fnetlink-socket.c;h=bc462353434ddc215cc48027eafcfd0aae130dbb;hb=b5d29991cc4722aec39c346c3f82291581e92aa0;hp=c9402fd9fa42943c64c24030e889161b83b1a188;hpb=2fe27d5ad27f3c7879ea696209bcf9702d9b7109;p=openvswitch diff --git a/lib/netlink-socket.c b/lib/netlink-socket.c index c9402fd9..bc462353 100644 --- a/lib/netlink-socket.c +++ b/lib/netlink-socket.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2009, 2010 Nicira Networks. + * Copyright (c) 2008, 2009, 2010, 2011 Nicira Networks. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,21 +21,26 @@ #include #include #include +#include #include #include "coverage.h" #include "dynamic-string.h" +#include "hash.h" +#include "hmap.h" #include "netlink.h" #include "netlink-protocol.h" #include "ofpbuf.h" #include "poll-loop.h" +#include "socket-util.h" #include "stress.h" +#include "util.h" #include "vlog.h" VLOG_DEFINE_THIS_MODULE(netlink_socket); COVERAGE_DEFINE(netlink_overflow); COVERAGE_DEFINE(netlink_received); -COVERAGE_DEFINE(netlink_recv_retry); +COVERAGE_DEFINE(netlink_recv_jumbo); COVERAGE_DEFINE(netlink_send); COVERAGE_DEFINE(netlink_sent); @@ -50,7 +55,7 @@ COVERAGE_DEFINE(netlink_sent); static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600); static void log_nlmsg(const char *function, int error, - const void *message, size_t size); + const void *message, size_t size, int protocol); /* Netlink sockets. */ @@ -58,30 +63,51 @@ struct nl_sock { int fd; uint32_t pid; + int protocol; + struct nl_dump *dump; + unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */ }; -static int alloc_pid(uint32_t *); -static void free_pid(uint32_t); +/* Compile-time limit on iovecs, so that we can allocate a maximum-size array + * of iovecs on the stack. */ +#define MAX_IOVS 128 + +/* Maximum number of iovecs that may be passed to sendmsg, capped at a + * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS. + * + * Initialized by nl_sock_create(). */ +static int max_iovs; + +static int nl_sock_cow__(struct nl_sock *); /* Creates a new netlink socket for the given netlink 'protocol' * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the - * new socket if successful, otherwise returns a positive errno value. - * - * If 'multicast_group' is nonzero, the new socket subscribes to the specified - * netlink multicast group. (A netlink socket may listen to an arbitrary - * number of multicast groups, but so far we only need one at a time.) - * - * Nonzero 'so_sndbuf' or 'so_rcvbuf' override the kernel default send or - * receive buffer size, respectively. - */ + * new socket if successful, otherwise returns a positive errno value. */ int -nl_sock_create(int protocol, int multicast_group, - size_t so_sndbuf, size_t so_rcvbuf, struct nl_sock **sockp) +nl_sock_create(int protocol, struct nl_sock **sockp) { struct nl_sock *sock; struct sockaddr_nl local, remote; + socklen_t local_size; int retval = 0; + if (!max_iovs) { + int save_errno = errno; + errno = 0; + + max_iovs = sysconf(_SC_UIO_MAXIOV); + if (max_iovs < _XOPEN_IOV_MAX) { + if (max_iovs == -1 && errno) { + VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", strerror(errno)); + } + max_iovs = _XOPEN_IOV_MAX; + } else if (max_iovs > MAX_IOVS) { + max_iovs = MAX_IOVS; + } + + errno = save_errno; + } + *sockp = NULL; sock = malloc(sizeof *sock); if (sock == NULL) { @@ -93,71 +119,41 @@ nl_sock_create(int protocol, int multicast_group, VLOG_ERR("fcntl: %s", strerror(errno)); goto error; } + sock->protocol = protocol; + sock->dump = NULL; - retval = alloc_pid(&sock->pid); - if (retval) { + retval = get_socket_rcvbuf(sock->fd); + if (retval < 0) { + retval = -retval; goto error; } + sock->rcvbuf = retval; - if (so_sndbuf != 0 - && setsockopt(sock->fd, SOL_SOCKET, SO_SNDBUF, - &so_sndbuf, sizeof so_sndbuf) < 0) { - VLOG_ERR("setsockopt(SO_SNDBUF,%zu): %s", so_sndbuf, strerror(errno)); - goto error_free_pid; - } - - if (so_rcvbuf != 0 - && setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUF, - &so_rcvbuf, sizeof so_rcvbuf) < 0) { - VLOG_ERR("setsockopt(SO_RCVBUF,%zu): %s", so_rcvbuf, strerror(errno)); - goto error_free_pid; - } - - /* Bind local address as our selected pid. */ - memset(&local, 0, sizeof local); - local.nl_family = AF_NETLINK; - local.nl_pid = sock->pid; - if (multicast_group > 0 && multicast_group <= 32) { - /* This method of joining multicast groups is supported by old kernels, - * but it only allows 32 multicast groups per protocol. */ - local.nl_groups |= 1ul << (multicast_group - 1); - } - if (bind(sock->fd, (struct sockaddr *) &local, sizeof local) < 0) { - VLOG_ERR("bind(%"PRIu32"): %s", sock->pid, strerror(errno)); - goto error_free_pid; - } - - /* Bind remote address as the kernel (pid 0). */ + /* Connect to kernel (pid 0) as remote address. */ memset(&remote, 0, sizeof remote); remote.nl_family = AF_NETLINK; remote.nl_pid = 0; if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) { VLOG_ERR("connect(0): %s", strerror(errno)); - goto error_free_pid; + goto error; } - /* Older kernel headers failed to define this macro. We want our programs - * to support the newer kernel features even if compiled with older - * headers, so define it ourselves in such a case. */ -#ifndef NETLINK_ADD_MEMBERSHIP -#define NETLINK_ADD_MEMBERSHIP 1 -#endif - - /* This method of joining multicast groups is only supported by newish - * kernels, but it allows for an arbitrary number of multicast groups. */ - if (multicast_group > 32 - && setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, - &multicast_group, sizeof multicast_group) < 0) { - VLOG_ERR("setsockopt(NETLINK_ADD_MEMBERSHIP,%d): %s", - multicast_group, strerror(errno)); - goto error_free_pid; + /* Obtain pid assigned by kernel. */ + local_size = sizeof local; + if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) { + VLOG_ERR("getsockname: %s", strerror(errno)); + goto error; + } + if (local_size < sizeof local || local.nl_family != AF_NETLINK) { + VLOG_ERR("getsockname returned bad Netlink name"); + retval = EINVAL; + goto error; } + sock->pid = local.nl_pid; *sockp = sock; return 0; -error_free_pid: - free_pid(sock->pid); error: if (retval == 0) { retval = errno; @@ -172,26 +168,81 @@ error: return retval; } +/* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and + * sets '*sockp' to the new socket if successful, otherwise returns a positive + * errno value. */ +int +nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp) +{ + return nl_sock_create(src->protocol, sockp); +} + /* Destroys netlink socket 'sock'. */ void nl_sock_destroy(struct nl_sock *sock) { if (sock) { - close(sock->fd); - free_pid(sock->pid); - free(sock); + if (sock->dump) { + sock->dump = NULL; + } else { + close(sock->fd); + free(sock); + } } } -/* Tries to send 'msg', which must contain a Netlink message, to the kernel on - * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, and - * nlmsg_pid will be set to 'sock''s pid, before the message is sent. +/* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if + * successful, otherwise a positive errno value. * - * Returns 0 if successful, otherwise a positive errno value. If - * 'wait' is true, then the send will wait until buffer space is ready; - * otherwise, returns EAGAIN if the 'sock' send buffer is full. */ + * A socket that is subscribed to a multicast group that receives asynchronous + * notifications must not be used for Netlink transactions or dumps, because + * transactions and dumps can cause notifications to be lost. + * + * Multicast group numbers are always positive. + * + * It is not an error to attempt to join a multicast group to which a socket + * already belongs. */ int -nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait) +nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group) +{ + int error = nl_sock_cow__(sock); + if (error) { + return error; + } + if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, + &multicast_group, sizeof multicast_group) < 0) { + VLOG_WARN("could not join multicast group %u (%s)", + multicast_group, strerror(errno)); + return errno; + } + return 0; +} + +/* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if + * successful, otherwise a positive errno value. + * + * Multicast group numbers are always positive. + * + * It is not an error to attempt to leave a multicast group to which a socket + * does not belong. + * + * On success, reading from 'sock' will still return any messages that were + * received on 'multicast_group' before the group was left. */ +int +nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group) +{ + assert(!sock->dump); + if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP, + &multicast_group, sizeof multicast_group) < 0) { + VLOG_WARN("could not leave multicast group %u (%s)", + multicast_group, strerror(errno)); + return errno; + } + return 0; +} + +static int +nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg, bool wait) { struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg); int error; @@ -203,43 +254,28 @@ nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait) retval = send(sock->fd, msg->data, msg->size, wait ? 0 : MSG_DONTWAIT); error = retval < 0 ? errno : 0; } while (error == EINTR); - log_nlmsg(__func__, error, msg->data, msg->size); + log_nlmsg(__func__, error, msg->data, msg->size, sock->protocol); if (!error) { COVERAGE_INC(netlink_sent); } return error; } -/* Tries to send the 'n_iov' chunks of data in 'iov' to the kernel on 'sock' as - * a single Netlink message. (The message must be fully formed and not require - * finalization of its nlmsg_len or nlmsg_pid fields.) +/* Tries to send 'msg', which must contain a Netlink message, to the kernel on + * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, and + * nlmsg_pid will be set to 'sock''s pid, before the message is sent. * - * Returns 0 if successful, otherwise a positive errno value. If 'wait' is - * true, then the send will wait until buffer space is ready; otherwise, - * returns EAGAIN if the 'sock' send buffer is full. */ + * Returns 0 if successful, otherwise a positive errno value. If + * 'wait' is true, then the send will wait until buffer space is ready; + * otherwise, returns EAGAIN if the 'sock' send buffer is full. */ int -nl_sock_sendv(struct nl_sock *sock, const struct iovec iov[], size_t n_iov, - bool wait) +nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait) { - struct msghdr msg; - int error; - - COVERAGE_INC(netlink_send); - memset(&msg, 0, sizeof msg); - msg.msg_iov = (struct iovec *) iov; - msg.msg_iovlen = n_iov; - do { - int retval; - retval = sendmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT); - error = retval < 0 ? errno : 0; - } while (error == EINTR); - if (error != EAGAIN) { - log_nlmsg(__func__, error, iov[0].iov_base, iov[0].iov_len); - if (!error) { - COVERAGE_INC(netlink_sent); - } + int error = nl_sock_cow__(sock); + if (error) { + return error; } - return error; + return nl_sock_send__(sock, msg, wait); } /* This stress option is useful for testing that OVS properly tolerates @@ -251,84 +287,71 @@ STRESS_OPTION( netlink_overflow, "simulate netlink socket receive buffer overflow", 5, 1, -1, 100); -/* Tries to receive a netlink message from the kernel on 'sock'. If - * successful, stores the received message into '*bufp' and returns 0. The - * caller is responsible for destroying the message with ofpbuf_delete(). On - * failure, returns a positive errno value and stores a null pointer into - * '*bufp'. - * - * If 'wait' is true, nl_sock_recv waits for a message to be ready; otherwise, - * returns EAGAIN if the 'sock' receive buffer is empty. */ -int -nl_sock_recv(struct nl_sock *sock, struct ofpbuf **bufp, bool wait) +static int +nl_sock_recv__(struct nl_sock *sock, struct ofpbuf **bufp, bool wait) { - uint8_t tmp; - ssize_t bufsize = 2048; - ssize_t nbytes, nbytes2; - struct ofpbuf *buf; + /* We can't accurately predict the size of the data to be received. Most + * received data will fit in a 2 kB buffer, so we allocate that much space. + * In case the data is actually bigger than that, we make available enough + * additional space to allow Netlink messages to be up to 64 kB long (a + * reasonable figure since that's the maximum length of a Netlink + * attribute). */ + enum { MAX_SIZE = 65536 }; + enum { HEAD_SIZE = 2048 }; + enum { TAIL_SIZE = MAX_SIZE - HEAD_SIZE }; + struct nlmsghdr *nlmsghdr; - struct iovec iov; - struct msghdr msg = { - .msg_name = NULL, - .msg_namelen = 0, - .msg_iov = &iov, - .msg_iovlen = 1, - .msg_control = NULL, - .msg_controllen = 0, - .msg_flags = 0 - }; + uint8_t tail[TAIL_SIZE]; + struct iovec iov[2]; + struct ofpbuf *buf; + struct msghdr msg; + ssize_t retval; - buf = ofpbuf_new(bufsize); *bufp = NULL; -try_again: - /* Attempt to read the message. We don't know the size of the data - * yet, so we take a guess at 2048. If we're wrong, we keep trying - * and doubling the buffer size each time. - */ - nlmsghdr = ofpbuf_put_uninit(buf, bufsize); - iov.iov_base = nlmsghdr; - iov.iov_len = bufsize; + buf = ofpbuf_new(HEAD_SIZE); + iov[0].iov_base = buf->data; + iov[0].iov_len = HEAD_SIZE; + iov[1].iov_base = tail; + iov[1].iov_len = TAIL_SIZE; + + memset(&msg, 0, sizeof msg); + msg.msg_iov = iov; + msg.msg_iovlen = 2; + do { - nbytes = recvmsg(sock->fd, &msg, (wait ? 0 : MSG_DONTWAIT) | MSG_PEEK); - } while (nbytes < 0 && errno == EINTR); - if (nbytes < 0) { + retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT); + } while (retval < 0 && errno == EINTR); + + if (retval < 0) { + int error = errno; + if (error == ENOBUFS) { + /* Socket receive buffer overflow dropped one or more messages that + * the kernel tried to send to us. */ + COVERAGE_INC(netlink_overflow); + } ofpbuf_delete(buf); - return errno; + return error; } + if (msg.msg_flags & MSG_TRUNC) { - COVERAGE_INC(netlink_recv_retry); - bufsize *= 2; - ofpbuf_reinit(buf, bufsize); - goto try_again; + VLOG_ERR_RL(&rl, "truncated message (longer than %d bytes)", MAX_SIZE); + ofpbuf_delete(buf); + return E2BIG; } - buf->size = nbytes; - /* We successfully read the message, so recv again to clear the queue */ - iov.iov_base = &tmp; - iov.iov_len = 1; - do { - nbytes2 = recvmsg(sock->fd, &msg, MSG_DONTWAIT); - } while (nbytes2 < 0 && errno == EINTR); - if (nbytes2 < 0) { - if (errno == ENOBUFS) { - /* The kernel is notifying us that a message it tried to send to us - * was dropped. We have to pass this along to the caller in case - * it wants to retry a request. So kill the buffer, which we can - * re-read next time. */ - COVERAGE_INC(netlink_overflow); - ofpbuf_delete(buf); - return ENOBUFS; - } else { - VLOG_ERR_RL(&rl, "failed to remove nlmsg from socket: %s\n", - strerror(errno)); - } + ofpbuf_put_uninit(buf, MIN(retval, HEAD_SIZE)); + if (retval > HEAD_SIZE) { + COVERAGE_INC(netlink_recv_jumbo); + ofpbuf_put(buf, tail, retval - HEAD_SIZE); } - if (nbytes < sizeof *nlmsghdr + + nlmsghdr = buf->data; + if (retval < sizeof *nlmsghdr || nlmsghdr->nlmsg_len < sizeof *nlmsghdr - || nlmsghdr->nlmsg_len > nbytes) { + || nlmsghdr->nlmsg_len > retval) { VLOG_ERR_RL(&rl, "received invalid nlmsg (%zd bytes < %d)", - bufsize, NLMSG_HDRLEN); + retval, NLMSG_HDRLEN); ofpbuf_delete(buf); return EPROTO; } @@ -339,12 +362,226 @@ try_again: } *bufp = buf; - log_nlmsg(__func__, 0, buf->data, buf->size); + log_nlmsg(__func__, 0, buf->data, buf->size, sock->protocol); COVERAGE_INC(netlink_received); return 0; } +/* Tries to receive a netlink message from the kernel on 'sock'. If + * successful, stores the received message into '*bufp' and returns 0. The + * caller is responsible for destroying the message with ofpbuf_delete(). On + * failure, returns a positive errno value and stores a null pointer into + * '*bufp'. + * + * If 'wait' is true, nl_sock_recv waits for a message to be ready; otherwise, + * returns EAGAIN if the 'sock' receive buffer is empty. */ +int +nl_sock_recv(struct nl_sock *sock, struct ofpbuf **bufp, bool wait) +{ + int error = nl_sock_cow__(sock); + if (error) { + return error; + } + return nl_sock_recv__(sock, bufp, wait); +} + +static int +find_nl_transaction_by_seq(struct nl_transaction **transactions, size_t n, + uint32_t seq) +{ + int i; + + for (i = 0; i < n; i++) { + struct nl_transaction *t = transactions[i]; + + if (seq == nl_msg_nlmsghdr(t->request)->nlmsg_seq) { + return i; + } + } + + return -1; +} + +static void +nl_sock_record_errors__(struct nl_transaction **transactions, size_t n, + int error) +{ + size_t i; + + for (i = 0; i < n; i++) { + transactions[i]->error = error; + transactions[i]->reply = NULL; + } +} + +static int +nl_sock_transact_multiple__(struct nl_sock *sock, + struct nl_transaction **transactions, size_t n, + size_t *done) +{ + struct iovec iovs[MAX_IOVS]; + struct msghdr msg; + int error; + int i; + + *done = 0; + for (i = 0; i < n; i++) { + struct ofpbuf *request = transactions[i]->request; + struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(request); + + nlmsg->nlmsg_len = request->size; + nlmsg->nlmsg_pid = sock->pid; + if (i == n - 1) { + /* Ensure that we get a reply even if the final request doesn't + * ordinarily call for one. */ + nlmsg->nlmsg_flags |= NLM_F_ACK; + } + + iovs[i].iov_base = request->data; + iovs[i].iov_len = request->size; + } + + memset(&msg, 0, sizeof msg); + msg.msg_iov = iovs; + msg.msg_iovlen = n; + do { + error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0; + } while (error == EINTR); + + for (i = 0; i < n; i++) { + struct ofpbuf *request = transactions[i]->request; + + log_nlmsg(__func__, error, request->data, request->size, + sock->protocol); + } + if (!error) { + COVERAGE_ADD(netlink_sent, n); + } + + if (error) { + return error; + } + + while (n > 0) { + struct ofpbuf *reply; + + error = nl_sock_recv__(sock, &reply, true); + if (error) { + return error; + } + + i = find_nl_transaction_by_seq(transactions, n, + nl_msg_nlmsghdr(reply)->nlmsg_seq); + if (i < 0) { + VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, + nl_msg_nlmsghdr(reply)->nlmsg_seq); + ofpbuf_delete(reply); + continue; + } + + nl_sock_record_errors__(transactions, i, 0); + if (nl_msg_nlmsgerr(reply, &error)) { + transactions[i]->reply = NULL; + transactions[i]->error = error; + if (error) { + VLOG_DBG_RL(&rl, "received NAK error=%d (%s)", + error, strerror(error)); + } + ofpbuf_delete(reply); + } else { + transactions[i]->reply = reply; + transactions[i]->error = 0; + } + + *done += i + 1; + transactions += i + 1; + n -= i + 1; + } + + return 0; +} + +/* Sends the 'request' member of the 'n' transactions in 'transactions' to the + * kernel, in order, and waits for responses to all of them. Fills in the + * 'error' member of each transaction with 0 if it was successful, otherwise + * with a positive errno value. 'reply' will be NULL on error or if the + * transaction was successful but had no reply beyond an indication of success. + * For a successful transaction that did have a more detailed reply, 'reply' + * will be set to the reply message. + * + * The caller is responsible for destroying each request and reply, and the + * transactions array itself. + * + * Before sending each message, this function will finalize nlmsg_len in each + * 'request' to match the ofpbuf's size, and set nlmsg_pid to 'sock''s pid. + * NLM_F_ACK will be added to some requests' nlmsg_flags. + * + * Bare Netlink is an unreliable transport protocol. This function layers + * reliable delivery and reply semantics on top of bare Netlink. See + * nl_sock_transact() for some caveats. + */ +void +nl_sock_transact_multiple(struct nl_sock *sock, + struct nl_transaction **transactions, size_t n) +{ + int max_batch_count; + int error; + + if (!n) { + return; + } + + error = nl_sock_cow__(sock); + if (error) { + nl_sock_record_errors__(transactions, n, error); + return; + } + + /* In theory, every request could have a 64 kB reply. But the default and + * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to + * be a bit below 128 kB, so that would only allow a single message in a + * "batch". So we assume that replies average (at most) 4 kB, which allows + * a good deal of batching. + * + * In practice, most of the requests that we batch either have no reply at + * all or a brief reply. */ + max_batch_count = MAX(sock->rcvbuf / 4096, 1); + max_batch_count = MIN(max_batch_count, max_iovs); + + while (n > 0) { + size_t count, bytes; + size_t done; + + /* Batch up to 'max_batch_count' transactions. But cap it at about a + * page of requests total because big skbuffs are expensive to + * allocate in the kernel. */ +#if defined(PAGESIZE) + enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) }; +#else + enum { MAX_BATCH_BYTES = 4096 - 512 }; +#endif + bytes = transactions[0]->request->size; + for (count = 1; count < n && count < max_batch_count; count++) { + if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) { + break; + } + bytes += transactions[count]->request->size; + } + + error = nl_sock_transact_multiple__(sock, transactions, count, &done); + transactions += done; + n -= done; + + if (error == ENOBUFS) { + VLOG_DBG_RL(&rl, "receive buffer overflow, resending request"); + } else if (error) { + VLOG_ERR_RL(&rl, "transaction error (%s)", strerror(error)); + nl_sock_record_errors__(transactions, n, error); + } + } +} + /* Sends 'request' to the kernel via 'sock' and waits for a response. If * successful, returns 0. On failure, returns a positive errno value. * @@ -386,67 +623,65 @@ try_again: * needs to be idempotent. */ int -nl_sock_transact(struct nl_sock *sock, - const struct ofpbuf *request, struct ofpbuf **replyp) +nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request, + struct ofpbuf **replyp) { - uint32_t seq = nl_msg_nlmsghdr(request)->nlmsg_seq; - struct nlmsghdr *nlmsghdr; - struct ofpbuf *reply; - int retval; + struct nl_transaction *transactionp; + struct nl_transaction transaction; + transaction.request = (struct ofpbuf *) request; + transactionp = &transaction; + nl_sock_transact_multiple(sock, &transactionp, 1); if (replyp) { - *replyp = NULL; + *replyp = transaction.reply; + } else { + ofpbuf_delete(transaction.reply); } + return transaction.error; +} - /* Ensure that we get a reply even if this message doesn't ordinarily call - * for one. */ - nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_ACK; - -send: - retval = nl_sock_send(sock, request, true); - if (retval) { - return retval; +/* Drain all the messages currently in 'sock''s receive queue. */ +int +nl_sock_drain(struct nl_sock *sock) +{ + int error = nl_sock_cow__(sock); + if (error) { + return error; } + return drain_rcvbuf(sock->fd); +} -recv: - retval = nl_sock_recv(sock, &reply, true); - if (retval) { - if (retval == ENOBUFS) { - COVERAGE_INC(netlink_overflow); - VLOG_DBG_RL(&rl, "receive buffer overflow, resending request"); - goto send; - } else { - return retval; - } - } - nlmsghdr = nl_msg_nlmsghdr(reply); - if (seq != nlmsghdr->nlmsg_seq) { - VLOG_DBG_RL(&rl, "ignoring seq %"PRIu32" != expected %"PRIu32, - nl_msg_nlmsghdr(reply)->nlmsg_seq, seq); - ofpbuf_delete(reply); - goto recv; - } +/* The client is attempting some operation on 'sock'. If 'sock' has an ongoing + * dump operation, then replace 'sock''s fd with a new socket and hand 'sock''s + * old fd over to the dump. */ +static int +nl_sock_cow__(struct nl_sock *sock) +{ + struct nl_sock *copy; + uint32_t tmp_pid; + int tmp_fd; + int error; - /* If the reply is an error, discard the reply and return the error code. - * - * Except: if the reply is just an acknowledgement (error code of 0), and - * the caller is interested in the reply (replyp != NULL), pass the reply - * up to the caller. Otherwise the caller will get a return value of 0 - * and null '*replyp', which makes unwary callers likely to segfault. */ - if (nl_msg_nlmsgerr(reply, &retval) && (retval || !replyp)) { - ofpbuf_delete(reply); - if (retval) { - VLOG_DBG_RL(&rl, "received NAK error=%d (%s)", - retval, strerror(retval)); - } - return retval != EAGAIN ? retval : EPROTO; + if (!sock->dump) { + return 0; } - if (replyp) { - *replyp = reply; - } else { - ofpbuf_delete(reply); + error = nl_sock_clone(sock, ©); + if (error) { + return error; } + + tmp_fd = sock->fd; + sock->fd = copy->fd; + copy->fd = tmp_fd; + + tmp_pid = sock->pid; + sock->pid = copy->pid; + copy->pid = tmp_pid; + + sock->dump->sock = copy; + sock->dump = NULL; + return 0; } @@ -457,22 +692,23 @@ recv: * be set to 'sock''s pid, before the message is sent. NLM_F_DUMP and * NLM_F_ACK will be set in nlmsg_flags. * - * The properties of Netlink make dump operations reliable as long as all of - * the following are true: - * - * - At most a single dump is in progress at a time on a given nl_sock. + * This Netlink socket library is designed to ensure that the dump is reliable + * and that it will not interfere with other operations on 'sock', including + * destroying or sending and receiving messages on 'sock'. One corner case is + * not handled: * - * - The nl_sock is not subscribed to any multicast groups. - * - * - The nl_sock is not used to send any other messages before the dump - * operation is complete. + * - If 'sock' has been used to send a request (e.g. with nl_sock_send()) + * whose response has not yet been received (e.g. with nl_sock_recv()). + * This is unusual: usually nl_sock_transact() is used to send a message + * and receive its reply all in one go. * * This function provides no status indication. An error status for the entire * dump operation is provided when it is completed by calling nl_dump_done(). * - * The caller is responsible for destroying 'request'. The caller must not - * close 'sock' before it completes the dump operation (by calling - * nl_dump_done()). + * The caller is responsible for destroying 'request'. + * + * The new 'dump' is independent of 'sock'. 'sock' and 'dump' may be destroyed + * in either order. */ void nl_dump_start(struct nl_dump *dump, @@ -481,9 +717,20 @@ nl_dump_start(struct nl_dump *dump, struct nlmsghdr *nlmsghdr = nl_msg_nlmsghdr(request); nlmsghdr->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK; dump->seq = nlmsghdr->nlmsg_seq; - dump->sock = sock; - dump->status = nl_sock_send(sock, request, true); dump->buffer = NULL; + if (sock->dump) { + /* 'sock' already has an ongoing dump. Clone the socket because + * Netlink only allows one dump at a time. */ + dump->status = nl_sock_clone(sock, &dump->sock); + if (dump->status) { + return; + } + } else { + sock->dump = dump; + dump->sock = sock; + dump->status = 0; + } + dump->status = nl_sock_send__(sock, request, true); } /* Helper function for nl_dump_next(). */ @@ -494,7 +741,7 @@ nl_dump_recv(struct nl_dump *dump, struct ofpbuf **bufferp) struct ofpbuf *buffer; int retval; - retval = nl_sock_recv(dump->sock, bufferp, true); + retval = nl_sock_recv__(dump->sock, bufferp, true); if (retval) { return retval == EINTR ? EAGAIN : retval; } @@ -502,7 +749,7 @@ nl_dump_recv(struct nl_dump *dump, struct ofpbuf **bufferp) nlmsghdr = nl_msg_nlmsghdr(buffer); if (dump->seq != nlmsghdr->nlmsg_seq) { - VLOG_DBG_RL(&rl, "ignoring seq %"PRIu32" != expected %"PRIu32, + VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32, nlmsghdr->nlmsg_seq, dump->seq); return EAGAIN; } @@ -583,6 +830,13 @@ nl_dump_done(struct nl_dump *dump) } } + if (dump->sock) { + if (dump->sock->dump) { + dump->sock->dump = NULL; + } else { + nl_sock_destroy(dump->sock); + } + } ofpbuf_delete(dump->buffer); return dump->status == EOF ? 0 : dump->status; } @@ -594,116 +848,210 @@ nl_sock_wait(const struct nl_sock *sock, short int events) { poll_fd_wait(sock->fd, events); } + +/* Returns the underlying fd for 'sock', for use in "poll()"-like operations + * that can't use nl_sock_wait(). + * + * It's a little tricky to use the returned fd correctly, because nl_sock does + * "copy on write" to allow a single nl_sock to be used for notifications, + * transactions, and dumps. If 'sock' is used only for notifications and + * transactions (and never for dump) then the usage is safe. */ +int +nl_sock_fd(const struct nl_sock *sock) +{ + return sock->fd; +} + +/* Returns the PID associated with this socket. */ +uint32_t +nl_sock_pid(const struct nl_sock *sock) +{ + return sock->pid; +} /* Miscellaneous. */ +struct genl_family { + struct hmap_node hmap_node; + uint16_t id; + char *name; +}; + +static struct hmap genl_families = HMAP_INITIALIZER(&genl_families); + static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = { [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16}, + [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true}, }; -static int do_lookup_genl_family(const char *name) +static struct genl_family * +find_genl_family_by_id(uint16_t id) +{ + struct genl_family *family; + + HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0), + &genl_families) { + if (family->id == id) { + return family; + } + } + return NULL; +} + +static void +define_genl_family(uint16_t id, const char *name) +{ + struct genl_family *family = find_genl_family_by_id(id); + + if (family) { + if (!strcmp(family->name, name)) { + return; + } + free(family->name); + } else { + family = xmalloc(sizeof *family); + family->id = id; + hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0)); + } + family->name = xstrdup(name); +} + +static const char * +genl_family_to_name(uint16_t id) +{ + if (id == GENL_ID_CTRL) { + return "control"; + } else { + struct genl_family *family = find_genl_family_by_id(id); + return family ? family->name : "unknown"; + } +} + +static int +do_lookup_genl_family(const char *name, struct nlattr **attrs, + struct ofpbuf **replyp) { struct nl_sock *sock; struct ofpbuf request, *reply; - struct nlattr *attrs[ARRAY_SIZE(family_policy)]; - int retval; + int error; - retval = nl_sock_create(NETLINK_GENERIC, 0, 0, 0, &sock); - if (retval) { - return -retval; + *replyp = NULL; + error = nl_sock_create(NETLINK_GENERIC, &sock); + if (error) { + return error; } ofpbuf_init(&request, 0); nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST, CTRL_CMD_GETFAMILY, 1); nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name); - retval = nl_sock_transact(sock, &request, &reply); + error = nl_sock_transact(sock, &request, &reply); ofpbuf_uninit(&request); - if (retval) { + if (error) { nl_sock_destroy(sock); - return -retval; + return error; } if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN, - family_policy, attrs, ARRAY_SIZE(family_policy))) { + family_policy, attrs, ARRAY_SIZE(family_policy)) + || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) { nl_sock_destroy(sock); ofpbuf_delete(reply); - return -EPROTO; + return EPROTO; } - retval = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]); - if (retval == 0) { - retval = -EPROTO; - } nl_sock_destroy(sock); - ofpbuf_delete(reply); - return retval; + *replyp = reply; + return 0; } -/* If '*number' is 0, translates the given Generic Netlink family 'name' to a - * number and stores it in '*number'. If successful, returns 0 and the caller - * may use '*number' as the family number. On failure, returns a positive - * errno value and '*number' caches the errno value. */ +/* Finds the multicast group called 'group_name' in genl family 'family_name'. + * When successful, writes its result to 'multicast_group' and returns 0. + * Otherwise, clears 'multicast_group' and returns a positive error code. + * + * Some kernels do not support looking up a multicast group with this function. + * In this case, 'multicast_group' will be populated with 'fallback'. */ int -nl_lookup_genl_family(const char *name, int *number) +nl_lookup_genl_mcgroup(const char *family_name, const char *group_name, + unsigned int *multicast_group, unsigned int fallback) { - if (*number == 0) { - *number = do_lookup_genl_family(name); - assert(*number != 0); + struct nlattr *family_attrs[ARRAY_SIZE(family_policy)]; + const struct nlattr *mc; + struct ofpbuf *reply; + unsigned int left; + int error; + + *multicast_group = 0; + error = do_lookup_genl_family(family_name, family_attrs, &reply); + if (error) { + return error; } - return *number > 0 ? 0 : -*number; -} - -/* Netlink PID. - * - * Every Netlink socket must be bound to a unique 32-bit PID. By convention, - * programs that have a single Netlink socket use their Unix process ID as PID, - * and programs with multiple Netlink sockets add a unique per-socket - * identifier in the bits above the Unix process ID. - * - * The kernel has Netlink PID 0. - */ -/* Parameters for how many bits in the PID should come from the Unix process ID - * and how many unique per-socket. */ -#define SOCKET_BITS 10 -#define MAX_SOCKETS (1u << SOCKET_BITS) + if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) { + *multicast_group = fallback; + VLOG_WARN("%s-%s: has no multicast group, using fallback %d", + family_name, group_name, *multicast_group); + error = 0; + goto exit; + } -#define PROCESS_BITS (32 - SOCKET_BITS) -#define MAX_PROCESSES (1u << PROCESS_BITS) -#define PROCESS_MASK ((uint32_t) (MAX_PROCESSES - 1)) + NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) { + static const struct nl_policy mc_policy[] = { + [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32}, + [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING}, + }; -/* Bit vector of unused socket identifiers. */ -static uint32_t avail_sockets[ROUND_UP(MAX_SOCKETS, 32)]; + struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)]; + const char *mc_name; -/* Allocates and returns a new Netlink PID. */ -static int -alloc_pid(uint32_t *pid) -{ - int i; + if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) { + error = EPROTO; + goto exit; + } - for (i = 0; i < MAX_SOCKETS; i++) { - if ((avail_sockets[i / 32] & (1u << (i % 32))) == 0) { - avail_sockets[i / 32] |= 1u << (i % 32); - *pid = (getpid() & PROCESS_MASK) | (i << PROCESS_BITS); - return 0; + mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]); + if (!strcmp(group_name, mc_name)) { + *multicast_group = + nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]); + error = 0; + goto exit; } } - VLOG_ERR("netlink pid space exhausted"); - return ENOBUFS; + error = EPROTO; + +exit: + ofpbuf_delete(reply); + return error; } -/* Makes the specified 'pid' available for reuse. */ -static void -free_pid(uint32_t pid) +/* If '*number' is 0, translates the given Generic Netlink family 'name' to a + * number and stores it in '*number'. If successful, returns 0 and the caller + * may use '*number' as the family number. On failure, returns a positive + * errno value and '*number' caches the errno value. */ +int +nl_lookup_genl_family(const char *name, int *number) { - int sock = pid >> PROCESS_BITS; - assert(avail_sockets[sock / 32] & (1u << (sock % 32))); - avail_sockets[sock / 32] &= ~(1u << (sock % 32)); + if (*number == 0) { + struct nlattr *attrs[ARRAY_SIZE(family_policy)]; + struct ofpbuf *reply; + int error; + + error = do_lookup_genl_family(name, attrs, &reply); + if (!error) { + *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]); + define_genl_family(*number, name); + } else { + *number = -error; + } + ofpbuf_delete(reply); + + assert(*number != 0); + } + return *number > 0 ? 0 : -*number; } static void -nlmsghdr_to_string(const struct nlmsghdr *h, struct ds *ds) +nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds) { struct nlmsg_flag { unsigned int bits; @@ -734,6 +1082,8 @@ nlmsghdr_to_string(const struct nlmsghdr *h, struct ds *ds) ds_put_cstr(ds, "(overrun)"); } else if (h->nlmsg_type < NLMSG_MIN_TYPE) { ds_put_cstr(ds, "(reserved)"); + } else if (protocol == NETLINK_GENERIC) { + ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type)); } else { ds_put_cstr(ds, "(family-defined)"); } @@ -748,19 +1098,17 @@ nlmsghdr_to_string(const struct nlmsghdr *h, struct ds *ds) if (flags_left) { ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left); } - ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32"(%d:%d))", - h->nlmsg_seq, h->nlmsg_pid, - (int) (h->nlmsg_pid & PROCESS_MASK), - (int) (h->nlmsg_pid >> PROCESS_BITS)); + ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32, + h->nlmsg_seq, h->nlmsg_pid); } static char * -nlmsg_to_string(const struct ofpbuf *buffer) +nlmsg_to_string(const struct ofpbuf *buffer, int protocol) { struct ds ds = DS_EMPTY_INITIALIZER; const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN); if (h) { - nlmsghdr_to_string(h, &ds); + nlmsghdr_to_string(h, protocol, &ds); if (h->nlmsg_type == NLMSG_ERROR) { const struct nlmsgerr *e; e = ofpbuf_at(buffer, NLMSG_HDRLEN, @@ -771,7 +1119,7 @@ nlmsg_to_string(const struct ofpbuf *buffer) ds_put_format(&ds, "(%s)", strerror(-e->error)); } ds_put_cstr(&ds, ", in-reply-to("); - nlmsghdr_to_string(&e->msg, &ds); + nlmsghdr_to_string(&e->msg, protocol, &ds); ds_put_cstr(&ds, "))"); } else { ds_put_cstr(&ds, " error(truncated)"); @@ -787,6 +1135,12 @@ nlmsg_to_string(const struct ofpbuf *buffer) } else { ds_put_cstr(&ds, " done(truncated)"); } + } else if (protocol == NETLINK_GENERIC) { + struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer); + if (genl) { + ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")", + genl->cmd, genl->version); + } } } else { ds_put_cstr(&ds, "nl(truncated)"); @@ -796,7 +1150,7 @@ nlmsg_to_string(const struct ofpbuf *buffer) static void log_nlmsg(const char *function, int error, - const void *message, size_t size) + const void *message, size_t size, int protocol) { struct ofpbuf buffer; char *nlmsg; @@ -806,7 +1160,7 @@ log_nlmsg(const char *function, int error, } ofpbuf_use_const(&buffer, message, size); - nlmsg = nlmsg_to_string(&buffer); + nlmsg = nlmsg_to_string(&buffer, protocol); VLOG_DBG_RL(&rl, "%s (%s): %s", function, strerror(error), nlmsg); free(nlmsg); }