must be active, but we must be able to choose the 4.4@acronym{BSD}
scheduler
with the @option{-mlfqs} kernel option. Passing this
-option sets @code{enable_mlfqs}, declared in @file{threads/init.h}, to
+option sets @code{thread_mlfqs}, declared in @file{threads/thread.h}, to
true when the options are parsed by @func{parse_options}, which happens
midway through @func{main}.
}
/* Suspends execution for approximately MS milliseconds. */
-@@ -132,6 +158,16 @@ timer_interrupt (struct intr_frame *args
+@@ -132,6 +158,17 @@ timer_interrupt (struct intr_frame *args
{
ticks++;
thread_tick ();
+ if (ticks < t->wakeup_time)
+ break;
+ sema_up (&t->timer_sema);
++ thread_yield_to_higher_priority ();
+ list_pop_front (&wait_list);
+ }
}
- /* Enforce preemption. */
- if (++thread_ticks >= TIME_SLICE)
- intr_yield_on_return ();
-+ if (enable_mlfqs)
++ if (thread_mlfqs)
+ {
+ /* Update load average. */
+ if (timer_ticks () % TIMER_FREQ == 0)
+ /* Switch threads if time slice has expired. */
+ if (++thread_ticks >= TIME_SLICE)
+ {
-+ if (enable_mlfqs)
++ if (thread_mlfqs)
+ thread_recompute_priority (thread_current ());
+ intr_yield_on_return ();
+ }
/* Initialize thread. */
- init_thread (t, name, priority);
-+ init_thread (t, name, enable_mlfqs ? cur->priority : priority);
++ init_thread (t, name, thread_mlfqs ? cur->priority : priority);
tid = t->tid = allocate_tid ();
+ t->nice = cur->nice;
+ t->recent_cpu = cur->recent_cpu;
+thread_set_priority (int priority)
{
- thread_current ()->priority = new_priority;
-+ if (!enable_mlfqs)
++ if (!thread_mlfqs)
+ {
+ struct thread *t = thread_current ();
+
}
/* Returns the current thread's priority. */
-@@ -298,33 +386,93 @@ thread_get_priority (void)
+@@ -298,33 +386,98 @@ thread_get_priority (void)
/* Sets the current thread's nice value to NICE. */
void
+ int old_priority = t->priority;
+ int default_priority = t->normal_priority;
+ int donation = PRI_MIN;
-+ if (enable_mlfqs)
++ if (thread_mlfqs)
+ {
+ default_priority = PRI_MAX - fix_round (t->recent_cpu) / 4 - t->nice * 2;
+ if (default_priority < PRI_MIN)
+ thread_lower_priority, NULL),
+ struct thread, elem);
+ if (max->priority > cur->priority)
-+ thread_yield ();
++ {
++ if (intr_context ())
++ intr_yield_on_return ();
++ else
++ thread_yield ();
++ }
+ }
+ intr_set_level (old_level);
}
int i;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
wake_time = timer_ticks () + 5 * TIMER_FREQ;
sema_init (&wait_sema, 0);
int i;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
msg ("Creating %d threads to sleep %d times each.", thread_cnt, iterations);
msg ("Each thread sleeps 10 ticks each time.");
int i;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
msg ("Creating %d threads to sleep %d times each.", thread_cnt, iterations);
msg ("Thread 0 sleeps 10 ticks each time,");
int64_t start_time;
struct lock lock;
- ASSERT (enable_mlfqs);
+ ASSERT (thread_mlfqs);
msg ("Main thread acquiring lock.");
lock_init (&lock);
int nice;
int i;
- ASSERT (enable_mlfqs);
+ ASSERT (thread_mlfqs);
ASSERT (thread_cnt <= MAX_THREAD_CNT);
ASSERT (nice_min >= -10);
ASSERT (nice_step >= 0);
int elapsed;
int load_avg;
- ASSERT (enable_mlfqs);
+ ASSERT (thread_mlfqs);
msg ("spinning for up to 45 seconds, please wait...");
{
int i;
- ASSERT (enable_mlfqs);
+ ASSERT (thread_mlfqs);
start_time = timer_ticks ();
msg ("Starting %d niced load threads...", THREAD_CNT);
{
int i;
- ASSERT (enable_mlfqs);
+ ASSERT (thread_mlfqs);
start_time = timer_ticks ();
msg ("Starting %d load threads...", THREAD_CNT);
int64_t start_time;
int last_elapsed = 0;
- ASSERT (enable_mlfqs);
+ ASSERT (thread_mlfqs);
msg ("Sleeping 10 seconds to allow recent_cpu to decay, please wait...");
start_time = timer_ticks ();
test_priority_change (void)
{
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
msg ("Creating a high-priority thread 2.");
thread_create ("thread 2", PRI_DEFAULT + 1, changing_thread, NULL);
int i;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
lock_init (&lock);
cond_init (&condition);
struct lock lock;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
struct lock a, b;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
struct lock a, b;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
struct locks locks;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
struct lock lock;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
struct lock_and_sema ls;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
int i, cnt;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
test_priority_preempt (void)
{
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
int i;
/* This test does not work with the MLFQS. */
- ASSERT (!enable_mlfqs);
+ ASSERT (!thread_mlfqs);
sema_init (&sema, 0);
thread_set_priority (PRI_MIN);
/* Page directory with kernel mappings only. */
uint32_t *base_page_dir;
-/* -mlfqs:
- If false (default), use round-robin scheduler.
- If true, use multi-level feedback queue scheduler. */
-bool enable_mlfqs;
-
#ifdef FILESYS
/* -f: Format the file system? */
static bool format_filesys;
else if (!strcmp (name, "-rs"))
random_init (atoi (value));
else if (!strcmp (name, "-mlfqs"))
- enable_mlfqs = true;
+ thread_mlfqs = true;
#ifdef USERPROG
else if (!strcmp (name, "-ul"))
user_page_limit = atoi (value);
/* Page directory with kernel mappings only. */
extern uint32_t *base_page_dir;
-/* -o mlfqs:
- If false (default), use round-robin scheduler.
- If true, use multi-level feedback queue scheduler. */
-extern bool enable_mlfqs;
-
/* -q: Power off when kernel tasks complete? */
extern bool power_off_when_done;
#define TIME_SLICE 4 /* # of timer ticks to give each thread. */
static unsigned thread_ticks; /* # of timer ticks since last yield. */
+/* If false (default), use round-robin scheduler.
+ If true, use multi-level feedback queue scheduler.
+ Controlled by kernel command-line options "-o mlfqs".
+ Note that the command line is not parsed until well after
+ thread_init() is called. */
+bool thread_mlfqs;
+
static void kernel_thread (thread_func *, void *aux);
static void idle (void *aux UNUSED);
After calling this function, be sure to initialize the page
allocator before trying to create any threads with
- thread_create(). */
+ thread_create().
+
+ The kernel command line is not parsed until *after* this
+ function returns, so that when this function runs,
+ thread_mlfqs is always false.
+
+ It is not safe to call thread_current() until this function
+ finishes. */
void
thread_init (void)
{
}
/* Starts preemptive thread scheduling by enabling interrupts.
- Also creates the idle thread. */
+ Also creates the idle thread.
+
+ By the time this function runs, thread_mlfqs has been properly
+ initialized to its final value. */
void
thread_start (void)
{
- thread_create ("idle", PRI_MIN, idle, NULL);
+ /* Create the idle thread with maximum priority. This ensures
+ that it will be scheduled soon after interrupts are enabled.
+ The idle thread will block almost immediately upon
+ scheduling, and subsequently it will never appear on the
+ ready list, so the priority here is otherwise
+ unimportant. */
+ struct semaphore idle_started;
+ sema_init (&idle_started, 0);
+ thread_create ("idle", PRI_MAX, idle, &idle_started);
+
+ /* Start preemptive thread scheduling. */
intr_enable ();
+
+ /* Wait for the idle thread to initialize idle_thread. */
+ sema_down (&idle_started);
}
/* Called by the timer interrupt handler at each timer tick.
ASSERT (!intr_context ());
old_level = intr_disable ();
- list_push_back (&ready_list, &cur->elem);
+ if (cur != idle_thread)
+ list_push_back (&ready_list, &cur->elem);
cur->status = THREAD_READY;
schedule ();
intr_set_level (old_level);
The idle thread is initially put on the ready list by
thread_start(). It will be scheduled once initially, at which
- point it initializes idle_thread and immediately blocks.
- After that, the idle thread never appears in the ready list.
- It is returned by next_thread_to_run() as a special case when
- the ready list is empty. */
+ point it initializes idle_thread, "up"s the semaphore passed
+ to it to enable thread_start() to continue, and immediately
+ blocks. After that, the idle thread never appears in the
+ ready list. It is returned by next_thread_to_run() as a
+ special case when the ready list is empty. */
static void
-idle (void *aux UNUSED)
+idle (void *idle_started_ UNUSED)
{
- /* Initialize idle_thread.
-
- Until we run for the first time, idle_thread remains a null
- pointer. That's okay because we know that, at that point,
- the ready list has at least one element (the idle thread),
- so next_thread_to_run() will not attempt to return the idle
- thread. */
+ struct semaphore *idle_started = idle_started_;
idle_thread = thread_current ();
+ sema_up (idle_started);
for (;;)
{
unsigned magic; /* Detects stack overflow. */
};
+/* If false (default), use round-robin scheduler.
+ If true, use multi-level feedback queue scheduler.
+ Controlled by kernel command-line options "-o mlfqs".
+ Note that the command line is not parsed until well after
+ thread_init() is called. */
+extern bool thread_mlfqs;
+
void thread_init (void);
void thread_start (void);