2 * Copyright (c) 2008, 2009 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "poll-loop.h"
25 #include "backtrace.h"
27 #include "dynamic-string.h"
31 #define THIS_MODULE VLM_poll_loop
34 /* An event that will wake the following call to poll_block(). */
36 /* Set when the waiter is created. */
37 struct list node; /* Element in global waiters list. */
38 int fd; /* File descriptor. */
39 short int events; /* Events to wait for (POLLIN, POLLOUT). */
40 poll_fd_func *function; /* Callback function, if any, or null. */
41 void *aux; /* Argument to callback function. */
42 struct backtrace *backtrace; /* Optionally, event that created waiter. */
44 /* Set only when poll_block() is called. */
45 struct pollfd *pollfd; /* Pointer to element of the pollfds array
46 (null if added from a callback). */
49 /* All active poll waiters. */
50 static struct list waiters = LIST_INITIALIZER(&waiters);
52 /* Number of elements in the waiters list. */
53 static size_t n_waiters;
55 /* Max time to wait in next call to poll_block(), in milliseconds, or -1 to
57 static int timeout = -1;
59 /* Backtrace of 'timeout''s registration, if debugging is enabled. */
60 static struct backtrace timeout_backtrace;
62 /* Callback currently running, to allow verifying that poll_cancel() is not
63 * being called on a running callback. */
65 static struct poll_waiter *running_cb;
68 static struct poll_waiter *new_waiter(int fd, short int events);
70 /* Registers 'fd' as waiting for the specified 'events' (which should be POLLIN
71 * or POLLOUT or POLLIN | POLLOUT). The following call to poll_block() will
72 * wake up when 'fd' becomes ready for one or more of the requested events.
74 * The event registration is one-shot: only the following call to poll_block()
75 * is affected. The event will need to be re-registered after poll_block() is
76 * called if it is to persist. */
78 poll_fd_wait(int fd, short int events)
80 COVERAGE_INC(poll_fd_wait);
81 return new_waiter(fd, events);
84 /* Causes the following call to poll_block() to block for no more than 'msec'
85 * milliseconds. If 'msec' is nonpositive, the following call to poll_block()
86 * will not block at all.
88 * The timer registration is one-shot: only the following call to poll_block()
89 * is affected. The timer will need to be re-registered after poll_block() is
90 * called if it is to persist. */
92 poll_timer_wait(int msec)
94 if (timeout < 0 || msec < timeout) {
95 timeout = MAX(0, msec);
96 if (VLOG_IS_DBG_ENABLED()) {
97 backtrace_capture(&timeout_backtrace);
102 /* Causes the following call to poll_block() to wake up immediately, without
105 poll_immediate_wake(void)
110 static void PRINTF_FORMAT(2, 3)
111 log_wakeup(const struct backtrace *backtrace, const char *format, ...)
117 va_start(args, format);
118 ds_put_format_valist(&ds, format, args);
124 ds_put_char(&ds, ':');
125 for (i = 0; i < backtrace->n_frames; i++) {
126 ds_put_format(&ds, " 0x%"PRIxPTR, backtrace->frames[i]);
129 VLOG_DBG("%s", ds_cstr(&ds));
133 /* Blocks until one or more of the events registered with poll_fd_wait()
134 * occurs, or until the minimum duration registered with poll_timer_wait()
135 * elapses, or not at all if poll_immediate_wake() has been called.
137 * Also executes any autonomous subroutines registered with poll_fd_callback(),
138 * if their file descriptors have become ready. */
142 static struct pollfd *pollfds;
143 static size_t max_pollfds;
145 struct poll_waiter *pw;
151 if (max_pollfds < n_waiters) {
152 max_pollfds = n_waiters;
153 pollfds = xrealloc(pollfds, max_pollfds * sizeof *pollfds);
157 LIST_FOR_EACH (pw, struct poll_waiter, node, &waiters) {
158 pw->pollfd = &pollfds[n_pollfds];
159 pollfds[n_pollfds].fd = pw->fd;
160 pollfds[n_pollfds].events = pw->events;
161 pollfds[n_pollfds].revents = 0;
166 COVERAGE_INC(poll_zero_timeout);
168 retval = time_poll(pollfds, n_pollfds, timeout);
170 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
171 VLOG_ERR_RL(&rl, "poll: %s", strerror(-retval));
172 } else if (!retval && VLOG_IS_DBG_ENABLED()) {
173 log_wakeup(&timeout_backtrace, "%d-ms timeout", timeout);
176 for (node = waiters.next; node != &waiters; ) {
177 pw = CONTAINER_OF(node, struct poll_waiter, node);
178 if (!pw->pollfd || !pw->pollfd->revents) {
184 if (VLOG_IS_DBG_ENABLED()) {
185 log_wakeup(pw->backtrace, "%s%s%s%s%s on fd %d",
186 pw->pollfd->revents & POLLIN ? "[POLLIN]" : "",
187 pw->pollfd->revents & POLLOUT ? "[POLLOUT]" : "",
188 pw->pollfd->revents & POLLERR ? "[POLLERR]" : "",
189 pw->pollfd->revents & POLLHUP ? "[POLLHUP]" : "",
190 pw->pollfd->revents & POLLNVAL ? "[POLLNVAL]" : "",
198 pw->function(pw->fd, pw->pollfd->revents, pw->aux);
209 timeout_backtrace.n_frames = 0;
212 /* Registers 'function' to be called with argument 'aux' by poll_block() when
213 * 'fd' becomes ready for one of the events in 'events', which should be POLLIN
214 * or POLLOUT or POLLIN | POLLOUT.
216 * The callback registration persists until the event actually occurs. At that
217 * point, it is automatically de-registered. The callback function must
218 * re-register the event by calling poll_fd_callback() again within the
219 * callback, if it wants to be called back again later. */
221 poll_fd_callback(int fd, short int events, poll_fd_func *function, void *aux)
223 struct poll_waiter *pw = new_waiter(fd, events);
224 pw->function = function;
229 /* Cancels the file descriptor event registered with poll_fd_wait() or
230 * poll_fd_callback(). 'pw' must be the struct poll_waiter returned by one of
233 * An event registered with poll_fd_wait() may be canceled from its time of
234 * registration until the next call to poll_block(). At that point, the event
235 * is automatically canceled by the system and its poll_waiter is freed.
237 * An event registered with poll_fd_callback() may be canceled from its time of
238 * registration until its callback is actually called. At that point, the
239 * event is automatically canceled by the system and its poll_waiter is
242 poll_cancel(struct poll_waiter *pw)
245 assert(pw != running_cb);
246 list_remove(&pw->node);
253 /* Creates and returns a new poll_waiter for 'fd' and 'events'. */
254 static struct poll_waiter *
255 new_waiter(int fd, short int events)
257 struct poll_waiter *waiter = xcalloc(1, sizeof *waiter);
260 waiter->events = events;
261 if (VLOG_IS_DBG_ENABLED()) {
262 waiter->backtrace = xmalloc(sizeof *waiter->backtrace);
263 backtrace_capture(waiter->backtrace);
265 list_push_back(&waiters, &waiter->node);