+
+/* Returns an estimate of this process's CPU usage, as a percentage, over the
+ * past few seconds of wall-clock time. Returns -1 if no estimate is available
+ * (which will happen if the process has not been running long enough to have
+ * an estimate, and can happen for other reasons as well). */
+int
+get_cpu_usage(void)
+{
+ return cpu_usage;
+}
+
+static uint32_t
+hash_trace(struct trace *trace)
+{
+ return hash_bytes(trace->backtrace,
+ trace->n_frames * sizeof *trace->backtrace, 0);
+}
+
+static struct trace *
+trace_map_lookup(struct hmap *trace_map, struct trace *key)
+{
+ struct trace *value;
+
+ HMAP_FOR_EACH_WITH_HASH (value, node, hash_trace(key), trace_map) {
+ if (key->n_frames == value->n_frames
+ && !memcmp(key->backtrace, value->backtrace,
+ key->n_frames * sizeof *key->backtrace)) {
+ return value;
+ }
+ }
+ return NULL;
+}
+
+/* Appends a string to 'ds' representing backtraces recorded at regular
+ * intervals in the recent past. This information can be used to get a sense
+ * of what the process has been spending the majority of time doing. Will
+ * ommit any backtraces which have not occurred at least 'min_count' times. */
+void
+format_backtraces(struct ds *ds, size_t min_count)
+{
+ time_init();
+
+ if (HAVE_EXECINFO_H && CACHE_TIME) {
+ struct hmap trace_map = HMAP_INITIALIZER(&trace_map);
+ struct trace *trace, *next;
+ sigset_t oldsigs;
+ size_t i;
+
+ block_sigalrm(&oldsigs);
+
+ for (i = 0; i < MAX_TRACES; i++) {
+ struct trace *trace = &traces[i];
+ struct trace *map_trace;
+
+ if (!trace->n_frames) {
+ continue;
+ }
+
+ map_trace = trace_map_lookup(&trace_map, trace);
+ if (map_trace) {
+ map_trace->count++;
+ } else {
+ hmap_insert(&trace_map, &trace->node, hash_trace(trace));
+ trace->count = 1;
+ }
+ }
+
+ HMAP_FOR_EACH_SAFE (trace, next, node, &trace_map) {
+ char **frame_strs;
+ size_t j;
+
+ hmap_remove(&trace_map, &trace->node);
+
+ if (trace->count < min_count) {
+ continue;
+ }
+
+ frame_strs = backtrace_symbols(trace->backtrace, trace->n_frames);
+
+ ds_put_format(ds, "Count %zu\n", trace->count);
+ for (j = 0; j < trace->n_frames; j++) {
+ ds_put_format(ds, "%s\n", frame_strs[j]);
+ }
+ ds_put_cstr(ds, "\n");
+
+ free(frame_strs);
+ }
+ hmap_destroy(&trace_map);
+
+ ds_chomp(ds, '\n');
+ unblock_sigalrm(&oldsigs);
+ }
+}
+\f
+/* Unixctl interface. */
+
+/* "time/stop" stops the monotonic time returned by e.g. time_msec() from
+ * advancing, except due to later calls to "time/warp". */
+static void
+timeval_stop_cb(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ time_stopped = true;
+ unixctl_command_reply(conn, NULL);
+}
+
+/* "time/warp MSECS" advances the current monotonic time by the specified
+ * number of milliseconds. Unless "time/stop" has also been executed, the
+ * monotonic clock continues to tick forward at the normal rate afterward.
+ *
+ * Does not affect wall clock readings. */
+static void
+timeval_warp_cb(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[], void *aux OVS_UNUSED)
+{
+ struct timespec ts;
+ int msecs;
+
+ msecs = atoi(argv[1]);
+ if (msecs <= 0) {
+ unixctl_command_reply_error(conn, "invalid MSECS");
+ return;
+ }
+
+ ts.tv_sec = msecs / 1000;
+ ts.tv_nsec = (msecs % 1000) * 1000 * 1000;
+ timespec_add(&warp_offset, &warp_offset, &ts);
+ timespec_add(&monotonic_time, &monotonic_time, &ts);
+ unixctl_command_reply(conn, "warped");
+}
+
+static void
+backtrace_cb(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+
+ assert(HAVE_EXECINFO_H && CACHE_TIME);
+ format_backtraces(&ds, 0);
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
+void
+timeval_dummy_register(void)
+{
+ unixctl_command_register("time/stop", "", 0, 0, timeval_stop_cb, NULL);
+ unixctl_command_register("time/warp", "MSECS", 1, 1,
+ timeval_warp_cb, NULL);
+}