2 * Copyright (c) 2008, 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "poll-loop.h"
25 #include "backtrace.h"
27 #include "dynamic-string.h"
28 #include "fatal-signal.h"
32 #define THIS_MODULE VLM_poll_loop
35 /* An event that will wake the following call to poll_block(). */
37 /* Set when the waiter is created. */
38 struct list node; /* Element in global waiters list. */
39 int fd; /* File descriptor. */
40 short int events; /* Events to wait for (POLLIN, POLLOUT). */
41 struct backtrace *backtrace; /* Optionally, event that created waiter. */
43 /* Set only when poll_block() is called. */
44 struct pollfd *pollfd; /* Pointer to element of the pollfds array. */
47 /* All active poll waiters. */
48 static struct list waiters = LIST_INITIALIZER(&waiters);
50 /* Number of elements in the waiters list. */
51 static size_t n_waiters;
53 /* Max time to wait in next call to poll_block(), in milliseconds, or -1 to
55 static int timeout = -1;
57 /* Backtrace of 'timeout''s registration, if debugging is enabled. */
58 static struct backtrace timeout_backtrace;
60 static struct poll_waiter *new_waiter(int fd, short int events);
62 /* Registers 'fd' as waiting for the specified 'events' (which should be POLLIN
63 * or POLLOUT or POLLIN | POLLOUT). The following call to poll_block() will
64 * wake up when 'fd' becomes ready for one or more of the requested events.
66 * The event registration is one-shot: only the following call to poll_block()
67 * is affected. The event will need to be re-registered after poll_block() is
68 * called if it is to persist. */
70 poll_fd_wait(int fd, short int events)
72 COVERAGE_INC(poll_fd_wait);
73 return new_waiter(fd, events);
76 /* The caller must ensure that 'msec' is not negative. */
78 poll_timer_wait__(int msec)
80 if (timeout < 0 || msec < timeout) {
82 if (VLOG_IS_DBG_ENABLED()) {
83 backtrace_capture(&timeout_backtrace);
88 /* Causes the following call to poll_block() to block for no more than 'msec'
89 * milliseconds. If 'msec' is nonpositive, the following call to poll_block()
90 * will not block at all.
92 * The timer registration is one-shot: only the following call to poll_block()
93 * is affected. The timer will need to be re-registered after poll_block() is
94 * called if it is to persist. */
96 poll_timer_wait(long long int msec)
98 poll_timer_wait__(msec < 0 ? 0
99 : msec > INT_MAX ? INT_MAX
103 /* Causes the following call to poll_block() to wake up immediately, without
106 poll_immediate_wake(void)
111 static void PRINTF_FORMAT(2, 3)
112 log_wakeup(const struct backtrace *backtrace, const char *format, ...)
118 va_start(args, format);
119 ds_put_format_valist(&ds, format, args);
125 ds_put_char(&ds, ':');
126 for (i = 0; i < backtrace->n_frames; i++) {
127 ds_put_format(&ds, " 0x%"PRIxPTR, backtrace->frames[i]);
130 VLOG_DBG("%s", ds_cstr(&ds));
134 /* Blocks until one or more of the events registered with poll_fd_wait()
135 * occurs, or until the minimum duration registered with poll_timer_wait()
136 * elapses, or not at all if poll_immediate_wake() has been called. */
140 static struct pollfd *pollfds;
141 static size_t max_pollfds;
143 struct poll_waiter *pw, *next;
147 /* Register fatal signal events before actually doing any real work for
151 if (max_pollfds < n_waiters) {
152 max_pollfds = n_waiters;
153 pollfds = xrealloc(pollfds, max_pollfds * sizeof *pollfds);
157 LIST_FOR_EACH (pw, struct poll_waiter, node, &waiters) {
158 pw->pollfd = &pollfds[n_pollfds];
159 pollfds[n_pollfds].fd = pw->fd;
160 pollfds[n_pollfds].events = pw->events;
161 pollfds[n_pollfds].revents = 0;
166 COVERAGE_INC(poll_zero_timeout);
168 retval = time_poll(pollfds, n_pollfds, timeout);
170 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
171 VLOG_ERR_RL(&rl, "poll: %s", strerror(-retval));
172 } else if (!retval && VLOG_IS_DBG_ENABLED()) {
173 log_wakeup(&timeout_backtrace, "%d-ms timeout", timeout);
176 LIST_FOR_EACH_SAFE (pw, next, struct poll_waiter, node, &waiters) {
177 if (pw->pollfd->revents && VLOG_IS_DBG_ENABLED()) {
178 log_wakeup(pw->backtrace, "%s%s%s%s%s on fd %d",
179 pw->pollfd->revents & POLLIN ? "[POLLIN]" : "",
180 pw->pollfd->revents & POLLOUT ? "[POLLOUT]" : "",
181 pw->pollfd->revents & POLLERR ? "[POLLERR]" : "",
182 pw->pollfd->revents & POLLHUP ? "[POLLHUP]" : "",
183 pw->pollfd->revents & POLLNVAL ? "[POLLNVAL]" : "",
190 timeout_backtrace.n_frames = 0;
192 /* Handle any pending signals before doing anything else. */
196 /* Cancels the file descriptor event registered with poll_fd_wait() using 'pw',
197 * the struct poll_waiter returned by that function.
199 * An event registered with poll_fd_wait() may be canceled from its time of
200 * registration until the next call to poll_block(). At that point, the event
201 * is automatically canceled by the system and its poll_waiter is freed. */
203 poll_cancel(struct poll_waiter *pw)
206 list_remove(&pw->node);
213 /* Creates and returns a new poll_waiter for 'fd' and 'events'. */
214 static struct poll_waiter *
215 new_waiter(int fd, short int events)
217 struct poll_waiter *waiter = xzalloc(sizeof *waiter);
220 waiter->events = events;
221 if (VLOG_IS_DBG_ENABLED()) {
222 waiter->backtrace = xmalloc(sizeof *waiter->backtrace);
223 backtrace_capture(waiter->backtrace);
225 list_push_back(&waiters, &waiter->node);