+\f
+/* CPU usage tracking. */
+
+struct cpu_usage {
+ long long int when; /* Time that this sample was taken. */
+ unsigned long long int cpu; /* Total user+system CPU usage when sampled. */
+};
+
+static struct rusage recent_rusage;
+static struct cpu_usage older = { LLONG_MIN, 0 };
+static struct cpu_usage newer = { LLONG_MIN, 0 };
+static int cpu_usage = -1;
+
+static struct rusage *
+get_recent_rusage(void)
+{
+ return &recent_rusage;
+}
+
+static void
+refresh_rusage(void)
+{
+ long long int now;
+
+ now = time_msec();
+ getrusage(RUSAGE_SELF, &recent_rusage);
+
+ if (now >= newer.when + 3 * 1000) {
+ older = newer;
+ newer.when = now;
+ newer.cpu = (timeval_to_msec(&recent_rusage.ru_utime) +
+ timeval_to_msec(&recent_rusage.ru_stime));
+
+ if (older.when != LLONG_MIN && newer.cpu > older.cpu) {
+ unsigned int dividend = newer.cpu - older.cpu;
+ unsigned int divisor = (newer.when - older.when) / 100;
+ cpu_usage = divisor > 0 ? dividend / divisor : -1;
+ } else {
+ cpu_usage = -1;
+ }
+ }
+}
+
+/* Returns an estimate of this process's CPU usage, as a percentage, over the
+ * past few seconds of wall-clock time. Returns -1 if no estimate is available
+ * (which will happen if the process has not been running long enough to have
+ * an estimate, and can happen for other reasons as well). */
+int
+get_cpu_usage(void)
+{
+ return cpu_usage;
+}
+
+static void
+trace_run(void)
+{
+#if HAVE_EXECINFO_H
+ if (backtrace_conn && n_traces >= MAX_TRACES) {
+ struct unixctl_conn *reply_conn = backtrace_conn;
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ sigset_t oldsigs;
+ size_t i;
+
+ block_sigalrm(&oldsigs);
+
+ for (i = 0; i < n_traces; i++) {
+ struct trace *trace = &traces[i];
+ char **frame_strs;
+ size_t j;
+
+ frame_strs = backtrace_symbols(trace->backtrace, trace->n_frames);
+
+ ds_put_format(&ds, "Backtrace %zu\n", i + 1);
+ for (j = 0; j < trace->n_frames; j++) {
+ ds_put_format(&ds, "%s\n", frame_strs[j]);
+ }
+ ds_put_cstr(&ds, "\n");
+
+ free(frame_strs);
+ }
+
+ free(traces);
+ traces = NULL;
+ n_traces = 0;
+ backtrace_conn = NULL;
+
+ unblock_sigalrm(&oldsigs);
+
+ unixctl_command_reply(reply_conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+ }
+#endif
+}
+\f
+/* Unixctl interface. */
+
+/* "time/stop" stops the monotonic time returned by e.g. time_msec() from
+ * advancing, except due to later calls to "time/warp". */
+static void
+timeval_stop_cb(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ time_stopped = true;
+ unixctl_command_reply(conn, NULL);
+}
+
+/* "time/warp MSECS" advances the current monotonic time by the specified
+ * number of milliseconds. Unless "time/stop" has also been executed, the
+ * monotonic clock continues to tick forward at the normal rate afterward.
+ *
+ * Does not affect wall clock readings. */
+static void
+timeval_warp_cb(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[], void *aux OVS_UNUSED)
+{
+ struct timespec ts;
+ int msecs;
+
+ msecs = atoi(argv[1]);
+ if (msecs <= 0) {
+ unixctl_command_reply_error(conn, "invalid MSECS");
+ return;
+ }
+
+ ts.tv_sec = msecs / 1000;
+ ts.tv_nsec = (msecs % 1000) * 1000 * 1000;
+ timespec_add(&warp_offset, &warp_offset, &ts);
+ timespec_add(&monotonic_time, &monotonic_time, &ts);
+ unixctl_command_reply(conn, "warped");
+}
+
+static void
+backtrace_cb(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ sigset_t oldsigs;
+
+ assert(HAVE_EXECINFO_H && CACHE_TIME);
+
+ if (backtrace_conn) {
+ unixctl_command_reply_error(conn, "In Use");
+ return;
+ }
+ assert(!traces);
+
+ block_sigalrm(&oldsigs);
+ backtrace_conn = conn;
+ traces = xmalloc(MAX_TRACES * sizeof *traces);
+ n_traces = 0;
+ unblock_sigalrm(&oldsigs);
+}
+
+void
+timeval_dummy_register(void)
+{
+ unixctl_command_register("time/stop", "", 0, 0, timeval_stop_cb, NULL);
+ unixctl_command_register("time/warp", "MSECS", 1, 1,
+ timeval_warp_cb, NULL);
+}