X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?p=pintos-anon;a=blobdiff_plain;f=src%2Fthreads%2Fthread.c;h=1dc34adac7770f6bc53b5e5a1fb6b533ec0043f4;hp=01ce7d33227a19196762199f3c349bab4890b688;hb=49c19e58aa14fba779bfe331b1ebaba62d31dfa5;hpb=7d4e3dda080a47db88616f1c0d975f2091be47f1 diff --git a/src/threads/thread.c b/src/threads/thread.c index 01ce7d3..1dc34ad 100644 --- a/src/threads/thread.c +++ b/src/threads/thread.c @@ -7,10 +7,10 @@ #include "threads/flags.h" #include "threads/interrupt.h" #include "threads/intr-stubs.h" -#include "threads/mmu.h" #include "threads/palloc.h" #include "threads/switch.h" #include "threads/synch.h" +#include "threads/vaddr.h" #ifdef USERPROG #include "userprog/process.h" #endif @@ -46,13 +46,24 @@ static long long idle_ticks; /* # of timer ticks spent idle. */ static long long kernel_ticks; /* # of timer ticks in kernel threads. */ static long long user_ticks; /* # of timer ticks in user programs. */ +/* Scheduling. */ +#define TIME_SLICE 4 /* # of timer ticks to give each thread. */ +static unsigned thread_ticks; /* # of timer ticks since last yield. */ + +/* If false (default), use round-robin scheduler. + If true, use multi-level feedback queue scheduler. + Controlled by kernel command-line options "-o mlfqs". + Note that the command line is not parsed until well after + thread_init() is called. */ +bool thread_mlfqs; + static void kernel_thread (thread_func *, void *aux); static void idle (void *aux UNUSED); static struct thread *running_thread (void); static struct thread *next_thread_to_run (void); static void init_thread (struct thread *, const char *name, int priority); -static bool is_thread (struct thread *); +static bool is_thread (struct thread *) UNUSED; static void *alloc_frame (struct thread *, size_t size); static void schedule (void); void schedule_tail (struct thread *prev); @@ -67,13 +78,20 @@ static tid_t allocate_tid (void); After calling this function, be sure to initialize the page allocator before trying to create any threads with - thread_create(). */ + thread_create(). + + The kernel command line is not parsed until *after* this + function returns, so that when this function runs, + thread_mlfqs is always false. + + It is not safe to call thread_current() until this function + finishes. */ void thread_init (void) { ASSERT (intr_get_level () == INTR_OFF); - lock_init (&tid_lock, "tid"); + lock_init (&tid_lock); list_init (&ready_list); /* Set up a thread structure for the running thread. */ @@ -84,20 +102,33 @@ thread_init (void) } /* Starts preemptive thread scheduling by enabling interrupts. - Also creates the idle thread. */ + Also creates the idle thread. + + By the time this function runs, thread_mlfqs has been properly + initialized to its final value. */ void thread_start (void) { - thread_create ("idle", PRI_DEFAULT, idle, NULL); + /* Create the idle thread. */ + struct semaphore idle_started; + sema_init (&idle_started, 0); + thread_create ("idle", PRI_MIN, idle, &idle_started); + + /* Start preemptive thread scheduling. */ intr_enable (); + + /* Wait for the idle thread to initialize idle_thread. */ + sema_down (&idle_started); } -/* Called by the timer interrupt handler at each timer tick to - update statistics. */ +/* Called by the timer interrupt handler at each timer tick. + Thus, this function runs in an external interrupt context. */ void thread_tick (void) { struct thread *t = thread_current (); + + /* Update statistics. */ if (t == idle_thread) idle_ticks++; #ifdef USERPROG @@ -106,6 +137,10 @@ thread_tick (void) #endif else kernel_ticks++; + + /* Enforce preemption. */ + if (++thread_ticks >= TIME_SLICE) + intr_yield_on_return (); } /* Prints thread statistics. */ @@ -118,13 +153,15 @@ thread_print_stats (void) /* Creates a new kernel thread named NAME with the given initial PRIORITY, which executes FUNCTION passing AUX as the argument, - and adds it to the ready queue. If thread_start() has been - called, then the new thread may be scheduled before - thread_create() returns. It could even exit before - thread_create() returns. Use a semaphore or some other form - of synchronization if you need to ensure ordering. Returns - the thread identifier for the new thread, or TID_ERROR if - creation fails. + and adds it to the ready queue. Returns the thread identifier + for the new thread, or TID_ERROR if creation fails. + + If thread_start() has been called, then the new thread may be + scheduled before thread_create() returns. It could even exit + before thread_create() returns. Contrariwise, the original + thread may run for any amount of time before the new thread is + scheduled. Use a semaphore or some other form of + synchronization if you need to ensure ordering. The code provided sets the new thread's `priority' member to PRIORITY, but no actual priority scheduling is implemented. @@ -266,28 +303,93 @@ thread_yield (void) ASSERT (!intr_context ()); old_level = intr_disable (); - list_push_back (&ready_list, &cur->elem); + if (cur != idle_thread) + list_push_back (&ready_list, &cur->elem); cur->status = THREAD_READY; schedule (); intr_set_level (old_level); } + +/* Sets the current thread's priority to NEW_PRIORITY. */ +void +thread_set_priority (int new_priority) +{ + thread_current ()->priority = new_priority; +} + +/* Returns the current thread's priority. */ +int +thread_get_priority (void) +{ + return thread_current ()->priority; +} + +/* Sets the current thread's nice value to NICE. */ +void +thread_set_nice (int nice UNUSED) +{ + /* Not yet implemented. */ +} + +/* Returns the current thread's nice value. */ +int +thread_get_nice (void) +{ + /* Not yet implemented. */ + return 0; +} + +/* Returns 100 times the system load average. */ +int +thread_get_load_avg (void) +{ + /* Not yet implemented. */ + return 0; +} + +/* Returns 100 times the current thread's recent_cpu value. */ +int +thread_get_recent_cpu (void) +{ + /* Not yet implemented. */ + return 0; +} -/* Idle thread. Executes when no other thread is ready to run. */ +/* Idle thread. Executes when no other thread is ready to run. + + The idle thread is initially put on the ready list by + thread_start(). It will be scheduled once initially, at which + point it initializes idle_thread, "up"s the semaphore passed + to it to enable thread_start() to continue, and immediately + blocks. After that, the idle thread never appears in the + ready list. It is returned by next_thread_to_run() as a + special case when the ready list is empty. */ static void -idle (void *aux UNUSED) +idle (void *idle_started_ UNUSED) { + struct semaphore *idle_started = idle_started_; idle_thread = thread_current (); + sema_up (idle_started); for (;;) { /* Let someone else run. */ intr_disable (); thread_block (); - intr_enable (); - /* Use CPU `hlt' instruction to wait for interrupt. - See [IA32-v2a] "HLT" and [IA32-v3] 7.7. */ - asm ("hlt"); + /* Re-enable interrupts and wait for the next one. + + The `sti' instruction disables interrupts until the + completion of the next instruction, so these two + instructions are executed atomically. This atomicity is + important; otherwise, an interrupt could be handled + between re-enabling interrupts and waiting for the next + one to occur, wasting as much as one clock tick worth of + time. + + See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a] + 7.11.1 "HLT Instruction". */ + asm volatile ("sti; hlt" : : : "memory"); } } @@ -312,13 +414,13 @@ running_thread (void) down to the start of a page. Because `struct thread' is always at the beginning of a page and the stack pointer is somewhere in the middle, this locates the curent thread. */ - asm ("mov %0, %%esp" : "=g" (esp)); + asm ("mov %%esp, %0" : "=g" (esp)); return pg_round_down (esp); } /* Returns true if T appears to point to a valid thread. */ static bool -is_thread (struct thread *t) +is_thread (struct thread *t) { return t != NULL && t->magic == THREAD_MAGIC; } @@ -393,6 +495,9 @@ schedule_tail (struct thread *prev) /* Mark us as running. */ cur->status = THREAD_RUNNING; + /* Start new time slice. */ + thread_ticks = 0; + #ifdef USERPROG /* Activate the new address space. */ process_activate (); @@ -400,12 +505,13 @@ schedule_tail (struct thread *prev) /* If the thread we switched from is dying, destroy its struct thread. This must happen late so that thread_exit() doesn't - pull out the rug under itself. */ - if (prev != NULL && prev->status == THREAD_DYING) + pull out the rug under itself. (We don't free + initial_thread because its memory was not obtained via + palloc().) */ + if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread) { ASSERT (prev != cur); - if (prev != initial_thread) - palloc_free_page (prev); + palloc_free_page (prev); } }