2 * Copyright (c) 2008, 2009 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "poll-loop.h"
24 #include "backtrace.h"
26 #include "dynamic-string.h"
30 #define THIS_MODULE VLM_poll_loop
33 /* An event that will wake the following call to poll_block(). */
35 /* Set when the waiter is created. */
36 struct list node; /* Element in global waiters list. */
37 int fd; /* File descriptor. */
38 short int events; /* Events to wait for (POLLIN, POLLOUT). */
39 poll_fd_func *function; /* Callback function, if any, or null. */
40 void *aux; /* Argument to callback function. */
41 struct backtrace *backtrace; /* Optionally, event that created waiter. */
43 /* Set only when poll_block() is called. */
44 struct pollfd *pollfd; /* Pointer to element of the pollfds array
45 (null if added from a callback). */
48 /* All active poll waiters. */
49 static struct list waiters = LIST_INITIALIZER(&waiters);
51 /* Number of elements in the waiters list. */
52 static size_t n_waiters;
54 /* Max time to wait in next call to poll_block(), in milliseconds, or -1 to
56 static int timeout = -1;
58 /* Backtrace of 'timeout''s registration, if debugging is enabled. */
59 static struct backtrace timeout_backtrace;
61 /* Callback currently running, to allow verifying that poll_cancel() is not
62 * being called on a running callback. */
64 static struct poll_waiter *running_cb;
67 static struct poll_waiter *new_waiter(int fd, short int events);
69 /* Registers 'fd' as waiting for the specified 'events' (which should be POLLIN
70 * or POLLOUT or POLLIN | POLLOUT). The following call to poll_block() will
71 * wake up when 'fd' becomes ready for one or more of the requested events.
73 * The event registration is one-shot: only the following call to poll_block()
74 * is affected. The event will need to be re-registered after poll_block() is
75 * called if it is to persist. */
77 poll_fd_wait(int fd, short int events)
79 COVERAGE_INC(poll_fd_wait);
80 return new_waiter(fd, events);
83 /* Causes the following call to poll_block() to block for no more than 'msec'
84 * milliseconds. If 'msec' is nonpositive, the following call to poll_block()
85 * will not block at all.
87 * The timer registration is one-shot: only the following call to poll_block()
88 * is affected. The timer will need to be re-registered after poll_block() is
89 * called if it is to persist. */
91 poll_timer_wait(int msec)
93 if (timeout < 0 || msec < timeout) {
94 timeout = MAX(0, msec);
95 if (VLOG_IS_DBG_ENABLED()) {
96 backtrace_capture(&timeout_backtrace);
101 /* Causes the following call to poll_block() to wake up immediately, without
104 poll_immediate_wake(void)
109 static void PRINTF_FORMAT(2, 3)
110 log_wakeup(const struct backtrace *backtrace, const char *format, ...)
116 va_start(args, format);
117 ds_put_format_valist(&ds, format, args);
123 ds_put_char(&ds, ':');
124 for (i = 0; i < backtrace->n_frames; i++) {
125 ds_put_format(&ds, " 0x%x", backtrace->frames[i]);
128 VLOG_DBG("%s", ds_cstr(&ds));
132 /* Blocks until one or more of the events registered with poll_fd_wait()
133 * occurs, or until the minimum duration registered with poll_timer_wait()
134 * elapses, or not at all if poll_immediate_wake() has been called.
136 * Also executes any autonomous subroutines registered with poll_fd_callback(),
137 * if their file descriptors have become ready. */
141 static struct pollfd *pollfds;
142 static size_t max_pollfds;
144 struct poll_waiter *pw;
150 if (max_pollfds < n_waiters) {
151 max_pollfds = n_waiters;
152 pollfds = xrealloc(pollfds, max_pollfds * sizeof *pollfds);
156 LIST_FOR_EACH (pw, struct poll_waiter, node, &waiters) {
157 pw->pollfd = &pollfds[n_pollfds];
158 pollfds[n_pollfds].fd = pw->fd;
159 pollfds[n_pollfds].events = pw->events;
160 pollfds[n_pollfds].revents = 0;
165 COVERAGE_INC(poll_zero_timeout);
167 retval = time_poll(pollfds, n_pollfds, timeout);
169 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
170 VLOG_ERR_RL(&rl, "poll: %s", strerror(-retval));
171 } else if (!retval && VLOG_IS_DBG_ENABLED()) {
172 log_wakeup(&timeout_backtrace, "%d-ms timeout", timeout);
175 for (node = waiters.next; node != &waiters; ) {
176 pw = CONTAINER_OF(node, struct poll_waiter, node);
177 if (!pw->pollfd || !pw->pollfd->revents) {
183 if (VLOG_IS_DBG_ENABLED()) {
184 log_wakeup(pw->backtrace, "%s%s%s%s%s on fd %d",
185 pw->pollfd->revents & POLLIN ? "[POLLIN]" : "",
186 pw->pollfd->revents & POLLOUT ? "[POLLOUT]" : "",
187 pw->pollfd->revents & POLLERR ? "[POLLERR]" : "",
188 pw->pollfd->revents & POLLHUP ? "[POLLHUP]" : "",
189 pw->pollfd->revents & POLLNVAL ? "[POLLNVAL]" : "",
197 pw->function(pw->fd, pw->pollfd->revents, pw->aux);
208 timeout_backtrace.n_frames = 0;
211 /* Registers 'function' to be called with argument 'aux' by poll_block() when
212 * 'fd' becomes ready for one of the events in 'events', which should be POLLIN
213 * or POLLOUT or POLLIN | POLLOUT.
215 * The callback registration persists until the event actually occurs. At that
216 * point, it is automatically de-registered. The callback function must
217 * re-register the event by calling poll_fd_callback() again within the
218 * callback, if it wants to be called back again later. */
220 poll_fd_callback(int fd, short int events, poll_fd_func *function, void *aux)
222 struct poll_waiter *pw = new_waiter(fd, events);
223 pw->function = function;
228 /* Cancels the file descriptor event registered with poll_fd_wait() or
229 * poll_fd_callback(). 'pw' must be the struct poll_waiter returned by one of
232 * An event registered with poll_fd_wait() may be canceled from its time of
233 * registration until the next call to poll_block(). At that point, the event
234 * is automatically canceled by the system and its poll_waiter is freed.
236 * An event registered with poll_fd_callback() may be canceled from its time of
237 * registration until its callback is actually called. At that point, the
238 * event is automatically canceled by the system and its poll_waiter is
241 poll_cancel(struct poll_waiter *pw)
244 assert(pw != running_cb);
245 list_remove(&pw->node);
252 /* Creates and returns a new poll_waiter for 'fd' and 'events'. */
253 static struct poll_waiter *
254 new_waiter(int fd, short int events)
256 struct poll_waiter *waiter = xcalloc(1, sizeof *waiter);
259 waiter->events = events;
260 if (VLOG_IS_DBG_ENABLED()) {
261 waiter->backtrace = xmalloc(sizeof *waiter->backtrace);
262 backtrace_capture(waiter->backtrace);
264 list_push_back(&waiters, &waiter->node);