static long long kernel_ticks; /* # of timer ticks in kernel threads. */
static long long user_ticks; /* # of timer ticks in user programs. */
+/* Scheduling. */
+#define TIME_SLICE 4 /* # of timer ticks to give each thread. */
+static unsigned thread_ticks; /* # of timer ticks since last yield. */
+
static void kernel_thread (thread_func *, void *aux);
static void idle (void *aux UNUSED);
static struct thread *running_thread (void);
static struct thread *next_thread_to_run (void);
static void init_thread (struct thread *, const char *name, int priority);
-static bool is_thread (struct thread *);
+static bool is_thread (struct thread *) UNUSED;
static void *alloc_frame (struct thread *, size_t size);
static void schedule (void);
void schedule_tail (struct thread *prev);
{
ASSERT (intr_get_level () == INTR_OFF);
- lock_init (&tid_lock, "tid");
+ lock_init (&tid_lock);
list_init (&ready_list);
/* Set up a thread structure for the running thread. */
void
thread_start (void)
{
- thread_create ("idle", PRI_DEFAULT, idle, NULL);
+ thread_create ("idle", PRI_MAX, idle, NULL);
intr_enable ();
}
-/* Called by the timer interrupt handler at each timer tick to
- update statistics. */
+/* Called by the timer interrupt handler at each timer tick.
+ Thus, this function runs in an external interrupt context. */
void
thread_tick (void)
{
struct thread *t = thread_current ();
+
+ /* Update statistics. */
if (t == idle_thread)
idle_ticks++;
#ifdef USERPROG
#endif
else
kernel_ticks++;
+
+ /* Enforce preemption. */
+ if (++thread_ticks >= TIME_SLICE)
+ intr_yield_on_return ();
}
/* Prints thread statistics. */
{
return thread_current ()->priority;
}
+
+/* Sets the current thread's nice value to NICE. */
+void
+thread_set_nice (int nice UNUSED)
+{
+ /* Not yet implemented. */
+}
+
+/* Returns the current thread's nice value. */
+int
+thread_get_nice (void)
+{
+ /* Not yet implemented. */
+ return 0;
+}
+
+/* Returns 100 times the system load average. */
+int
+thread_get_load_avg (void)
+{
+ /* Not yet implemented. */
+ return 0;
+}
+
+/* Returns 100 times the current thread's recent_cpu value. */
+int
+thread_get_recent_cpu (void)
+{
+ /* Not yet implemented. */
+ return 0;
+}
\f
-/* Idle thread. Executes when no other thread is ready to run. */
+/* Idle thread. Executes when no other thread is ready to run.
+
+ The idle thread is initially put on the ready list by
+ thread_start(). It will be scheduled once initially, at which
+ point it initializes idle_thread and immediately blocks.
+ After that, the idle thread never appears in the ready list.
+ It is returned by next_thread_to_run() as a special case when
+ the ready list is empty. */
static void
idle (void *aux UNUSED)
{
+ /* Initialize idle_thread.
+
+ Until we run for the first time, idle_thread remains a null
+ pointer. That's okay because we know that, at that point,
+ the ready list has at least one element (the idle thread),
+ so next_thread_to_run() will not attempt to return the idle
+ thread. */
idle_thread = thread_current ();
for (;;)
one to occur, wasting as much as one clock tick worth of
time.
- See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3] 7.7. */
+ See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a]
+ 7.11.1 "HLT Instruction". */
asm ("sti; hlt");
}
}
down to the start of a page. Because `struct thread' is
always at the beginning of a page and the stack pointer is
somewhere in the middle, this locates the curent thread. */
- asm ("mov %0, %%esp" : "=g" (esp));
+ asm ("mov %%esp, %0" : "=g" (esp));
return pg_round_down (esp);
}
/* Returns true if T appears to point to a valid thread. */
static bool
-is_thread (struct thread *t)
+is_thread (struct thread *t)
{
return t != NULL && t->magic == THREAD_MAGIC;
}
/* Mark us as running. */
cur->status = THREAD_RUNNING;
+ /* Start new time slice. */
+ thread_ticks = 0;
+
#ifdef USERPROG
/* Activate the new address space. */
process_activate ();
/* If the thread we switched from is dying, destroy its struct
thread. This must happen late so that thread_exit() doesn't
- pull out the rug under itself. */
- if (prev != NULL && prev->status == THREAD_DYING)
+ pull out the rug under itself. (We don't free
+ initial_thread because its memory was not obtained via
+ palloc().) */
+ if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread)
{
ASSERT (prev != cur);
- if (prev != initial_thread)
- palloc_free_page (prev);
+ palloc_free_page (prev);
}
}