X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?p=pintos-anon;a=blobdiff_plain;f=src%2Fthreads%2Fthread.c;h=d68c123fb2476420d09557f4f52d4a6fb12949f4;hp=5229aef34d013b5bfd8bcd9c09a2983637018dc9;hb=09fa5c11aff62bd728d4fe8de28a7abaf252fed3;hpb=3f521b71eaf97c531bfa671b3aa2b9b971a37499 diff --git a/src/threads/thread.c b/src/threads/thread.c index 5229aef..d68c123 100644 --- a/src/threads/thread.c +++ b/src/threads/thread.c @@ -7,10 +7,10 @@ #include "threads/flags.h" #include "threads/interrupt.h" #include "threads/intr-stubs.h" -#include "threads/mmu.h" #include "threads/palloc.h" #include "threads/switch.h" #include "threads/synch.h" +#include "threads/vaddr.h" #ifdef USERPROG #include "userprog/process.h" #endif @@ -24,6 +24,10 @@ that are ready to run but not actually running. */ static struct list ready_list; +/* List of all processes. Processes are added to this list + when they are first scheduled and removed when they exit. */ +static struct list all_list; + /* Idle thread. */ static struct thread *idle_thread; @@ -50,6 +54,11 @@ static long long user_ticks; /* # of timer ticks in user programs. */ #define TIME_SLICE 4 /* # of timer ticks to give each thread. */ static unsigned thread_ticks; /* # of timer ticks since last yield. */ +/* If false (default), use round-robin scheduler. + If true, use multi-level feedback queue scheduler. + Controlled by kernel command-line option "-o mlfqs". */ +bool thread_mlfqs; + static void kernel_thread (thread_func *, void *aux); static void idle (void *aux UNUSED); @@ -59,7 +68,7 @@ static void init_thread (struct thread *, const char *name, int priority); static bool is_thread (struct thread *) UNUSED; static void *alloc_frame (struct thread *, size_t size); static void schedule (void); -void schedule_tail (struct thread *prev); +void thread_schedule_tail (struct thread *prev); static tid_t allocate_tid (void); /* Initializes the threading system by transforming the code @@ -71,7 +80,10 @@ static tid_t allocate_tid (void); After calling this function, be sure to initialize the page allocator before trying to create any threads with - thread_create(). */ + thread_create(). + + It is not safe to call thread_current() until this function + finishes. */ void thread_init (void) { @@ -79,6 +91,7 @@ thread_init (void) lock_init (&tid_lock); list_init (&ready_list); + list_init (&all_list); /* Set up a thread structure for the running thread. */ initial_thread = running_thread (); @@ -92,8 +105,16 @@ thread_init (void) void thread_start (void) { - thread_create ("idle", PRI_MAX, idle, NULL); + /* Create the idle thread. */ + struct semaphore idle_started; + sema_init (&idle_started, 0); + thread_create ("idle", PRI_MIN, idle, &idle_started); + + /* Start preemptive thread scheduling. */ intr_enable (); + + /* Wait for the idle thread to initialize idle_thread. */ + sema_down (&idle_started); } /* Called by the timer interrupt handler at each timer tick. @@ -150,6 +171,7 @@ thread_create (const char *name, int priority, struct switch_entry_frame *ef; struct switch_threads_frame *sf; tid_t tid; + enum intr_level old_level; ASSERT (function != NULL); @@ -162,6 +184,11 @@ thread_create (const char *name, int priority, init_thread (t, name, priority); tid = t->tid = allocate_tid (); + /* Prepare thread for first run by initializing its stack. + Do this atomically so intermediate values for the 'stack' + member cannot be observed. */ + old_level = intr_disable (); + /* Stack frame for kernel_thread(). */ kf = alloc_frame (t, sizeof *kf); kf->eip = NULL; @@ -175,6 +202,9 @@ thread_create (const char *name, int priority, /* Stack frame for switch_threads(). */ sf = alloc_frame (t, sizeof *sf); sf->eip = switch_entry; + sf->ebp = 0; + + intr_set_level (old_level); /* Add to run queue. */ thread_unblock (t); @@ -200,7 +230,12 @@ thread_block (void) /* Transitions a blocked thread T to the ready-to-run state. This is an error if T is not blocked. (Use thread_yield() to - make the running thread ready.) */ + make the running thread ready.) + + This function does not preempt the running thread. This can + be important: if the caller had disabled interrupts itself, + it may expect that it can atomically unblock a thread and + update other data. */ void thread_unblock (struct thread *t) { @@ -259,9 +294,11 @@ thread_exit (void) process_exit (); #endif - /* Just set our status to dying and schedule another process. - We will be destroyed during the call to schedule_tail(). */ + /* Remove thread from all threads list, set our status to dying, + and schedule another process. That process will destroy us + when it calls thread_schedule_tail(). */ intr_disable (); + list_remove (&thread_current()->allelem); thread_current ()->status = THREAD_DYING; schedule (); NOT_REACHED (); @@ -278,12 +315,30 @@ thread_yield (void) ASSERT (!intr_context ()); old_level = intr_disable (); - list_push_back (&ready_list, &cur->elem); + if (cur != idle_thread) + list_push_back (&ready_list, &cur->elem); cur->status = THREAD_READY; schedule (); intr_set_level (old_level); } +/* Invoke function 'func' on all threads, passing along 'aux'. + This function must be called with interrupts off. */ +void +thread_foreach (thread_action_func *func, void *aux) +{ + struct list_elem *e; + + ASSERT (intr_get_level () == INTR_OFF); + + for (e = list_begin (&all_list); e != list_end (&all_list); + e = list_next (e)) + { + struct thread *t = list_entry (e, struct thread, allelem); + func (t, aux); + } +} + /* Sets the current thread's priority to NEW_PRIORITY. */ void thread_set_priority (int new_priority) @@ -333,21 +388,17 @@ thread_get_recent_cpu (void) The idle thread is initially put on the ready list by thread_start(). It will be scheduled once initially, at which - point it initializes idle_thread and immediately blocks. - After that, the idle thread never appears in the ready list. - It is returned by next_thread_to_run() as a special case when - the ready list is empty. */ + point it initializes idle_thread, "up"s the semaphore passed + to it to enable thread_start() to continue, and immediately + blocks. After that, the idle thread never appears in the + ready list. It is returned by next_thread_to_run() as a + special case when the ready list is empty. */ static void -idle (void *aux UNUSED) +idle (void *idle_started_ UNUSED) { - /* Initialize idle_thread. - - Until we run for the first time, idle_thread remains a null - pointer. That's okay because we know that, at that point, - the ready list has at least one element (the idle thread), - so next_thread_to_run() will not attempt to return the idle - thread. */ + struct semaphore *idle_started = idle_started_; idle_thread = thread_current (); + sema_up (idle_started); for (;;) { @@ -365,8 +416,9 @@ idle (void *aux UNUSED) one to occur, wasting as much as one clock tick worth of time. - See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3] 7.7. */ - asm ("sti; hlt"); + See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a] + 7.11.1 "HLT Instruction". */ + asm volatile ("sti; hlt" : : : "memory"); } } @@ -407,6 +459,8 @@ is_thread (struct thread *t) static void init_thread (struct thread *t, const char *name, int priority) { + enum intr_level old_level; + ASSERT (t != NULL); ASSERT (PRI_MIN <= priority && priority <= PRI_MAX); ASSERT (name != NULL); @@ -417,6 +471,10 @@ init_thread (struct thread *t, const char *name, int priority) t->stack = (uint8_t *) t + PGSIZE; t->priority = priority; t->magic = THREAD_MAGIC; + + old_level = intr_disable (); + list_push_back (&all_list, &t->allelem); + intr_set_level (old_level); } /* Allocates a SIZE-byte frame at the top of thread T's stack and @@ -463,7 +521,7 @@ next_thread_to_run (void) After this function and its caller returns, the thread switch is complete. */ void -schedule_tail (struct thread *prev) +thread_schedule_tail (struct thread *prev) { struct thread *cur = running_thread (); @@ -482,12 +540,13 @@ schedule_tail (struct thread *prev) /* If the thread we switched from is dying, destroy its struct thread. This must happen late so that thread_exit() doesn't - pull out the rug under itself. */ - if (prev != NULL && prev->status == THREAD_DYING) + pull out the rug under itself. (We don't free + initial_thread because its memory was not obtained via + palloc().) */ + if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread) { ASSERT (prev != cur); - if (prev != initial_thread) - palloc_free_page (prev); + palloc_free_page (prev); } } @@ -496,8 +555,8 @@ schedule_tail (struct thread *prev) running to some other state. This function finds another thread to run and switches to it. - It's not safe to call printf() until schedule_tail() has - completed. */ + It's not safe to call printf() until thread_schedule_tail() + has completed. */ static void schedule (void) { @@ -511,7 +570,7 @@ schedule (void) if (cur != next) prev = switch_threads (cur, next); - schedule_tail (prev); + thread_schedule_tail (prev); } /* Returns a tid to use for a new thread. */