/*
- * Copyright (c) 2008, 2009, 2010 Nicira Networks.
+ * Copyright (c) 2008, 2009, 2010, 2011 Nicira Networks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <unistd.h>
#include "coverage.h"
#include "fatal-signal.h"
+#include "signals.h"
#include "util.h"
-
#include "vlog.h"
-#define THIS_MODULE VLM_timeval
+
+VLOG_DEFINE_THIS_MODULE(timeval);
/* The clock to use for measuring time intervals. This is CLOCK_MONOTONIC by
* preference, but on systems that don't have a monotonic clock we fall back
static time_t time_add(time_t, time_t);
static void block_sigalrm(sigset_t *);
static void unblock_sigalrm(const sigset_t *);
-static void log_poll_interval(long long int last_wakeup,
- const struct rusage *last_rusage);
+static void log_poll_interval(long long int last_wakeup);
+static struct rusage *get_recent_rusage(void);
+static void refresh_rusage(void);
/* Initializes the timetracking module.
*
sa.sa_handler = sigalrm_handler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = flags;
- if (sigaction(SIGALRM, &sa, NULL)) {
- ovs_fatal(errno, "sigaction(SIGALRM) failed");
- }
+ xsigaction(SIGALRM, &sa, NULL);
}
/* Remove SA_RESTART from the flags for SIGALRM, so that any system call that
struct itimerspec itimer;
if (timer_create(monotonic_clock, NULL, &timer_id)) {
- ovs_fatal(errno, "timer_create failed");
+ VLOG_FATAL("timer_create failed (%s)", strerror(errno));
}
itimer.it_interval.tv_sec = 0;
itimer.it_value = itimer.it_interval;
if (timer_settime(timer_id, 0, &itimer, NULL)) {
- ovs_fatal(errno, "timer_settime failed");
+ VLOG_FATAL("timer_settime failed (%s)", strerror(errno));
}
}
time_poll(struct pollfd *pollfds, int n_pollfds, int timeout)
{
static long long int last_wakeup;
- static struct rusage last_rusage;
long long int start;
sigset_t oldsigs;
bool blocked;
int retval;
time_refresh();
- log_poll_interval(last_wakeup, &last_rusage);
+ log_poll_interval(last_wakeup);
coverage_clear();
start = time_msec();
blocked = false;
unblock_sigalrm(&oldsigs);
}
last_wakeup = time_msec();
- getrusage(RUSAGE_SELF, &last_rusage);
+ refresh_rusage();
return retval;
}
sigset_t sigalrm;
sigemptyset(&sigalrm);
sigaddset(&sigalrm, SIGALRM);
- if (sigprocmask(SIG_BLOCK, &sigalrm, oldsigs)) {
- ovs_fatal(errno, "sigprocmask");
- }
+ xsigprocmask(SIG_BLOCK, &sigalrm, oldsigs);
}
static void
unblock_sigalrm(const sigset_t *oldsigs)
{
- if (sigprocmask(SIG_SETMASK, oldsigs, NULL)) {
- ovs_fatal(errno, "sigprocmask");
- }
+ xsigprocmask(SIG_SETMASK, oldsigs, NULL);
}
long long int
return (long long int) tv->tv_sec * 1000 + tv->tv_usec / 1000;
}
+void
+xgettimeofday(struct timeval *tv)
+{
+ if (gettimeofday(tv, NULL) == -1) {
+ VLOG_FATAL("gettimeofday failed (%s)", strerror(errno));
+ }
+}
+
static long long int
timeval_diff_msec(const struct timeval *a, const struct timeval *b)
{
}
static void
-log_poll_interval(long long int last_wakeup, const struct rusage *last_rusage)
+log_poll_interval(long long int last_wakeup)
{
static unsigned int mean_interval; /* In 16ths of a millisecond. */
static unsigned int n_samples;
now = time_msec();
interval = MIN(10000, now - last_wakeup) << 4;
- /* Warn if we took too much time between polls. */
- if (n_samples > 10 && interval > mean_interval * 8) {
+ /* Warn if we took too much time between polls: at least 50 ms and at least
+ * 8X the mean interval. */
+ if (n_samples > 10 && interval > mean_interval * 8 && interval > 50 * 16) {
+ const struct rusage *last_rusage = get_recent_rusage();
struct rusage rusage;
getrusage(RUSAGE_SELF, &rusage);
rusage.ru_nivcsw - last_rusage->ru_nivcsw);
}
- /* Care should be taken in the value chosen for logging. Depending
- * on the configuration, syslog can write changes synchronously,
- * which can cause the coverage messages to take longer to log
+ /* Care should be taken in the value chosen for logging. Depending
+ * on the configuration, syslog can write changes synchronously,
+ * which can cause the coverage messages to take longer to log
* than the processing delay that triggered it. */
coverage_log(VLL_INFO, true);
}
mean_interval = interval;
}
}
+\f
+/* CPU usage tracking. */
+
+struct cpu_usage {
+ long long int when; /* Time that this sample was taken. */
+ unsigned long long int cpu; /* Total user+system CPU usage when sampled. */
+};
+
+static struct rusage recent_rusage;
+static struct cpu_usage older = { LLONG_MIN, 0 };
+static struct cpu_usage newer = { LLONG_MIN, 0 };
+static int cpu_usage = -1;
+
+static struct rusage *
+get_recent_rusage(void)
+{
+ return &recent_rusage;
+}
+
+static void
+refresh_rusage(void)
+{
+ long long int now;
+
+ now = time_msec();
+ getrusage(RUSAGE_SELF, &recent_rusage);
+
+ if (now >= newer.when + 3 * 1000) {
+ older = newer;
+ newer.when = now;
+ newer.cpu = (timeval_to_msec(&recent_rusage.ru_utime) +
+ timeval_to_msec(&recent_rusage.ru_stime));
+
+ if (older.when != LLONG_MIN && newer.cpu > older.cpu) {
+ unsigned int dividend = newer.cpu - older.cpu;
+ unsigned int divisor = (newer.when - older.when) / 100;
+ cpu_usage = divisor > 0 ? dividend / divisor : -1;
+ } else {
+ cpu_usage = -1;
+ }
+ }
+}
+
+/* Returns an estimate of this process's CPU usage, as a percentage, over the
+ * past few seconds of wall-clock time. Returns -1 if no estimate is available
+ * (which will happen if the process has not been running long enough to have
+ * an estimate, and can happen for other reasons as well). */
+int
+get_cpu_usage(void)
+{
+ return cpu_usage;
+}