1 diff -Nur ../../src/devices/timer.c src/devices/timer.c
2 --- ../../src/devices/timer.c 2008-08-27 08:04:38.000000000 -0400
3 +++ src/devices/timer.c 2008-08-27 12:42:33.000000000 -0400
5 Initialized by timer_calibrate(). */
6 static unsigned loops_per_tick;
8 +/* Threads waiting in timer_sleep(). */
9 +static struct list wait_list;
11 static intr_handler_func timer_interrupt;
12 static bool too_many_loops (unsigned loops);
13 static void busy_wait (int64_t loops);
15 outb (0x40, count >> 8);
17 intr_register_ext (0x20, timer_interrupt, "8254 Timer");
19 + list_init (&wait_list);
22 /* Calibrates loops_per_tick, used to implement brief delays. */
24 return timer_ticks () - then;
27 +/* Compares two threads based on their wake-up times. */
29 +compare_threads_by_wakeup_time (const struct list_elem *a_,
30 + const struct list_elem *b_,
33 + const struct thread *a = list_entry (a_, struct thread, timer_elem);
34 + const struct thread *b = list_entry (b_, struct thread, timer_elem);
36 + return a->wakeup_time < b->wakeup_time;
39 /* Sleeps for approximately TICKS timer ticks. Interrupts must
42 timer_sleep (int64_t ticks)
44 - int64_t start = timer_ticks ();
45 + struct thread *t = thread_current ();
47 + /* Schedule our wake-up time. */
48 + t->wakeup_time = timer_ticks () + ticks;
50 + /* Atomically insert the current thread into the wait list. */
51 ASSERT (intr_get_level () == INTR_ON);
52 - while (timer_elapsed (start) < ticks)
55 + list_insert_ordered (&wait_list, &t->timer_elem,
56 + compare_threads_by_wakeup_time, NULL);
60 + sema_down (&t->timer_sema);
63 /* Sleeps for approximately MS milliseconds. Interrupts must be
69 + while (!list_empty (&wait_list))
71 + struct thread *t = list_entry (list_front (&wait_list),
72 + struct thread, timer_elem);
73 + if (ticks < t->wakeup_time)
75 + sema_up (&t->timer_sema);
76 + thread_yield_to_higher_priority ();
77 + list_pop_front (&wait_list);
81 /* Returns true if LOOPS iterations waits for more than one timer
82 diff -Nur ../../src/tests/Make.tests src/tests/Make.tests
83 --- ../../src/tests/Make.tests 2007-01-11 22:07:10.000000000 -0500
84 +++ src/tests/Make.tests 2008-08-27 13:00:45.000000000 -0400
86 # Prevent an environment variable VERBOSE from surprising us.
89 +# Append utils directory to fall back to pintos script in source tree
90 +# unless user places a different script in their path.
91 +export PATH := $(PATH):$(SRCDIR)/utils
93 TESTCMD = pintos -v -k -T $(TIMEOUT)
94 TESTCMD += $(SIMULATOR)
95 TESTCMD += $(PINTOSOPTS)
96 diff -Nur ../../src/threads/fixed-point.h src/threads/fixed-point.h
97 --- ../../src/threads/fixed-point.h 1969-12-31 19:00:00.000000000 -0500
98 +++ src/threads/fixed-point.h 2008-08-27 12:42:33.000000000 -0400
100 +#ifndef THREADS_FIXED_POINT_H
101 +#define THREADS_FIXED_POINT_H
106 +#define FIX_BITS 32 /* Total bits per fixed-point number. */
107 +#define FIX_P 16 /* Number of integer bits. */
108 +#define FIX_Q 16 /* Number of fractional bits. */
109 +#define FIX_F (1 << FIX_Q) /* pow(2, FIX_Q). */
111 +#define FIX_MIN_INT (-FIX_MAX_INT) /* Smallest representable integer. */
112 +#define FIX_MAX_INT ((1 << FIX_P) - 1) /* Largest representable integer. */
114 +/* A fixed-point number. */
121 +/* Returns a fixed-point number with F as its internal value. */
122 +static inline fixed_point_t
130 +/* Returns fixed-point number corresponding to integer N. */
131 +static inline fixed_point_t
134 + ASSERT (n >= FIX_MIN_INT && n <= FIX_MAX_INT);
135 + return __mk_fix (n * FIX_F);
138 +/* Returns fixed-point number corresponding to N divided by D. */
139 +static inline fixed_point_t
140 +fix_frac (int n, int d)
143 + ASSERT (n / d >= FIX_MIN_INT && n / d <= FIX_MAX_INT);
144 + return __mk_fix ((long long) n * FIX_F / d);
147 +/* Returns X rounded to the nearest integer. */
149 +fix_round (fixed_point_t x)
151 + return (x.f + FIX_F / 2) / FIX_F;
154 +/* Returns X truncated down to the nearest integer. */
156 +fix_trunc (fixed_point_t x)
158 + return x.f / FIX_F;
161 +/* Returns X + Y. */
162 +static inline fixed_point_t
163 +fix_add (fixed_point_t x, fixed_point_t y)
165 + return __mk_fix (x.f + y.f);
168 +/* Returns X - Y. */
169 +static inline fixed_point_t
170 +fix_sub (fixed_point_t x, fixed_point_t y)
172 + return __mk_fix (x.f - y.f);
175 +/* Returns X * Y. */
176 +static inline fixed_point_t
177 +fix_mul (fixed_point_t x, fixed_point_t y)
179 + return __mk_fix ((long long) x.f * y.f / FIX_F);
182 +/* Returns X * N. */
183 +static inline fixed_point_t
184 +fix_scale (fixed_point_t x, int n)
187 + return __mk_fix (x.f * n);
190 +/* Returns X / Y. */
191 +static inline fixed_point_t
192 +fix_div (fixed_point_t x, fixed_point_t y)
194 + return __mk_fix ((long long) x.f * FIX_F / y.f);
197 +/* Returns X / N. */
198 +static inline fixed_point_t
199 +fix_unscale (fixed_point_t x, int n)
202 + return __mk_fix (x.f / n);
205 +/* Returns 1 / X. */
206 +static inline fixed_point_t
207 +fix_inv (fixed_point_t x)
209 + return fix_div (fix_int (1), x);
212 +/* Returns -1 if X < Y, 0 if X == Y, 1 if X > Y. */
214 +fix_compare (fixed_point_t x, fixed_point_t y)
216 + return x.f < y.f ? -1 : x.f > y.f;
219 +#endif /* threads/fixed-point.h */
220 diff -Nur ../../src/threads/synch.c src/threads/synch.c
221 --- ../../src/threads/synch.c 2006-07-20 22:05:37.000000000 -0400
222 +++ src/threads/synch.c 2008-08-27 12:42:33.000000000 -0400
223 @@ -113,10 +113,28 @@
224 ASSERT (sema != NULL);
226 old_level = intr_disable ();
227 - if (!list_empty (&sema->waiters))
228 - thread_unblock (list_entry (list_pop_front (&sema->waiters),
229 - struct thread, elem));
231 + if (!list_empty (&sema->waiters))
233 + /* Find highest-priority waiting thread. */
234 + struct thread *max = list_entry (list_max (&sema->waiters,
235 + thread_lower_priority, NULL),
236 + struct thread, elem);
238 + /* Remove `max' from wait list and unblock. */
239 + list_remove (&max->elem);
240 + thread_unblock (max);
242 + /* Yield to a higher-priority thread, if we're running in a
243 + context where it makes sense to do so.
245 + Kind of a funny interaction with donation here.
246 + We only support donation for locks, and locks turn off
247 + interrupts before calling us, so we automatically don't
248 + do the yield here, delegating to lock_release(). */
249 + if (!intr_context () && old_level == INTR_ON)
250 + thread_yield_to_higher_priority ();
252 intr_set_level (old_level);
255 @@ -192,12 +210,33 @@
257 lock_acquire (struct lock *lock)
259 + enum intr_level old_level;
261 ASSERT (lock != NULL);
262 ASSERT (!intr_context ());
263 ASSERT (!lock_held_by_current_thread (lock));
265 + old_level = intr_disable ();
267 + if (lock->holder != NULL)
269 + /* Donate our priority to the thread holding the lock.
270 + First, update the data structures. */
271 + struct thread *donor = thread_current ();
272 + donor->want_lock = lock;
273 + donor->donee = lock->holder;
274 + list_push_back (&lock->holder->donors, &donor->donor_elem);
276 + /* Now implement the priority donation itself
277 + by recomputing the donee's priority
278 + and cascading the donation as far as necessary. */
279 + if (donor->donee != NULL)
280 + thread_recompute_priority (donor->donee);
283 sema_down (&lock->semaphore);
284 lock->holder = thread_current ();
285 + intr_set_level (old_level);
288 /* Tries to acquires LOCK and returns true if successful or false
289 @@ -228,11 +267,39 @@
291 lock_release (struct lock *lock)
293 + enum intr_level old_level;
294 + struct thread *t = thread_current ();
295 + struct list_elem *e;
297 ASSERT (lock != NULL);
298 ASSERT (lock_held_by_current_thread (lock));
300 + old_level = intr_disable ();
302 + /* Return donations to threads that want this lock. */
303 + for (e = list_begin (&t->donors); e != list_end (&t->donors); )
305 + struct thread *donor = list_entry (e, struct thread, donor_elem);
306 + if (donor->want_lock == lock)
308 + donor->donee = NULL;
309 + e = list_remove (e);
315 + /* Release lock. */
317 sema_up (&lock->semaphore);
319 + /* Recompute our priority based on our remaining donations,
320 + then yield to a higher-priority ready thread if one now
322 + thread_recompute_priority (t);
323 + thread_yield_to_higher_priority ();
325 + intr_set_level (old_level);
328 /* Returns true if the current thread holds LOCK, false
331 struct list_elem elem; /* List element. */
332 struct semaphore semaphore; /* This semaphore. */
333 + struct thread *thread; /* Thread. */
336 /* Initializes condition variable COND. A condition variable
337 @@ -295,12 +363,26 @@
338 ASSERT (lock_held_by_current_thread (lock));
340 sema_init (&waiter.semaphore, 0);
341 + waiter.thread = thread_current ();
342 list_push_back (&cond->waiters, &waiter.elem);
344 sema_down (&waiter.semaphore);
349 +semaphore_elem_lower_priority (const struct list_elem *a_,
350 + const struct list_elem *b_,
353 + const struct semaphore_elem *a
354 + = list_entry (a_, struct semaphore_elem, elem);
355 + const struct semaphore_elem *b
356 + = list_entry (b_, struct semaphore_elem, elem);
358 + return a->thread->priority < b->thread->priority;
361 /* If any threads are waiting on COND (protected by LOCK), then
362 this function signals one of them to wake up from its wait.
363 LOCK must be held before calling this function.
365 ASSERT (lock_held_by_current_thread (lock));
367 if (!list_empty (&cond->waiters))
368 - sema_up (&list_entry (list_pop_front (&cond->waiters),
369 - struct semaphore_elem, elem)->semaphore);
371 + struct list_elem *max
372 + = list_max (&cond->waiters, semaphore_elem_lower_priority, NULL);
374 + sema_up (&list_entry (max, struct semaphore_elem, elem)->semaphore);
378 /* Wakes up all threads, if any, waiting on COND (protected by
379 diff -Nur ../../src/threads/thread.c src/threads/thread.c
380 --- ../../src/threads/thread.c 2008-08-27 13:06:01.000000000 -0400
381 +++ src/threads/thread.c 2008-08-27 13:14:12.000000000 -0400
385 #include "threads/flags.h"
386 +#include "threads/init.h"
387 #include "threads/interrupt.h"
388 #include "threads/intr-stubs.h"
389 #include "threads/palloc.h"
390 #include "threads/switch.h"
391 #include "threads/synch.h"
392 +#include "devices/timer.h"
393 #include "threads/vaddr.h"
395 #include "userprog/process.h"
398 #define TIME_SLICE 4 /* # of timer ticks to give each thread. */
399 static unsigned thread_ticks; /* # of timer ticks since last yield. */
400 +static fixed_point_t load_avg; /* Load average. */
402 /* If false (default), use round-robin scheduler.
403 If true, use multi-level feedback queue scheduler.
405 lock_init (&tid_lock);
406 list_init (&ready_list);
407 list_init (&all_list);
408 + load_avg = fix_int (0);
410 /* Set up a thread structure for the running thread. */
411 initial_thread = running_thread ();
413 sema_down (&idle_started);
416 +/* Adjust recent CPU of a thread based on load factor
417 + and recompute its priority. */
419 +adjust_recent_cpu (struct thread *t, void *aux)
421 + fixed_point_t load_factor = *(fixed_point_t *)aux;
423 + t->recent_cpu = fix_add (fix_mul (load_factor, t->recent_cpu),
424 + fix_int (t->nice));
425 + thread_recompute_priority (t);
428 /* Called by the timer interrupt handler at each timer tick.
429 Thus, this function runs in an external interrupt context. */
435 - /* Enforce preemption. */
436 - if (++thread_ticks >= TIME_SLICE)
437 - intr_yield_on_return ();
440 + /* Update load average. */
441 + if (timer_ticks () % TIMER_FREQ == 0)
443 + size_t ready_threads = list_size (&ready_list);
444 + if (t != idle_thread)
447 + load_avg = fix_add (fix_mul (fix_frac (59, 60), load_avg),
448 + fix_mul (fix_frac (1, 60), fix_int (ready_threads)));
451 + /* Increment running process's recent_cpu. */
452 + if (t != idle_thread)
453 + t->recent_cpu = fix_add (t->recent_cpu, fix_int (1));
455 + /* Update recent_cpu and thread priorities once per second. */
456 + if (timer_ticks () % TIMER_FREQ == 0)
458 + fixed_point_t twice_load = fix_scale (load_avg, 2);
459 + fixed_point_t twice_load_plus_1 = fix_add (twice_load, fix_int (1));
460 + fixed_point_t load_factor = fix_div (twice_load, twice_load_plus_1);
462 + thread_foreach (adjust_recent_cpu, &load_factor);
466 + /* Switch threads if time slice has expired. */
467 + if (++thread_ticks >= TIME_SLICE)
470 + thread_recompute_priority (thread_current ());
471 + intr_yield_on_return ();
475 /* Prints thread statistics. */
476 @@ -166,12 +214,13 @@
477 thread_create (const char *name, int priority,
478 thread_func *function, void *aux)
480 + struct thread *cur = thread_current ();
482 struct kernel_thread_frame *kf;
483 struct switch_entry_frame *ef;
484 struct switch_threads_frame *sf;
486 enum intr_level old_level;
489 ASSERT (function != NULL);
494 /* Initialize thread. */
495 - init_thread (t, name, priority);
496 + init_thread (t, name, thread_mlfqs ? cur->priority : priority);
497 tid = t->tid = allocate_tid ();
498 + t->nice = cur->nice;
499 + t->recent_cpu = cur->recent_cpu;
501 /* Prepare thread for first run by initializing its stack.
502 Do this atomically so intermediate values for the 'stack'
505 /* Add to run queue. */
507 + if (priority > thread_get_priority ())
516 +/* Returns true if A has lower priority than B,
517 + within a list of threads. */
519 +thread_lower_priority (const struct list_elem *a_,
520 + const struct list_elem *b_,
523 + const struct thread *a = list_entry (a_, struct thread, elem);
524 + const struct thread *b = list_entry (b_, struct thread, elem);
526 + return a->priority < b->priority;
529 /* Transitions a blocked thread T to the ready-to-run state.
530 This is an error if T is not blocked. (Use thread_yield() to
531 make the running thread ready.)
532 @@ -339,11 +405,26 @@
536 -/* Sets the current thread's priority to NEW_PRIORITY. */
538 +recompute_priority_chain (void)
540 + enum intr_level old_level = intr_disable ();
541 + thread_recompute_priority (thread_current ());
542 + thread_yield_to_higher_priority ();
543 + intr_set_level (old_level);
546 +/* Sets the current thread's priority to PRIORITY. */
548 -thread_set_priority (int new_priority)
549 +thread_set_priority (int priority)
551 - thread_current ()->priority = new_priority;
554 + struct thread *t = thread_current ();
556 + t->normal_priority = priority;
557 + recompute_priority_chain ();
561 /* Returns the current thread's priority. */
562 @@ -355,33 +436,98 @@
564 /* Sets the current thread's nice value to NICE. */
566 -thread_set_nice (int nice UNUSED)
567 +thread_set_nice (int nice)
569 - /* Not yet implemented. */
570 + thread_current ()->nice = nice;
571 + recompute_priority_chain ();
574 /* Returns the current thread's nice value. */
576 thread_get_nice (void)
578 - /* Not yet implemented. */
580 + return thread_current ()->nice;
583 -/* Returns 100 times the system load average. */
585 thread_get_load_avg (void)
587 - /* Not yet implemented. */
590 + enum intr_level level = intr_disable ();
591 + load_avg_int = fix_round (fix_scale (load_avg, 100));
592 + intr_set_level (level);
593 + return load_avg_int;
596 -/* Returns 100 times the current thread's recent_cpu value. */
598 thread_get_recent_cpu (void)
600 - /* Not yet implemented. */
602 + int recent_cpu_int;
603 + enum intr_level level = intr_disable ();
604 + recent_cpu_int = fix_round (fix_scale (thread_current ()->recent_cpu, 100));
605 + intr_set_level (level);
606 + return recent_cpu_int;
609 +/* Returns true if thread A has lower priority than thread B,
610 + within a list of donors. */
612 +donated_lower_priority (const struct list_elem *a_,
613 + const struct list_elem *b_,
616 + const struct thread *a = list_entry (a_, struct thread, donor_elem);
617 + const struct thread *b = list_entry (b_, struct thread, donor_elem);
619 + return a->priority < b->priority;
622 +/* Recomputes T's priority in terms of its normal priority and
623 + its donors' priorities, if any,
624 + and cascades the donation as necessary. */
626 +thread_recompute_priority (struct thread *t)
628 + int old_priority = t->priority;
629 + int default_priority = t->normal_priority;
630 + int donation = PRI_MIN;
633 + default_priority = PRI_MAX - fix_round (t->recent_cpu) / 4 - t->nice * 2;
634 + if (default_priority < PRI_MIN)
635 + default_priority = PRI_MIN;
636 + else if (default_priority > PRI_MAX)
637 + default_priority = PRI_MAX;
639 + if (!list_empty (&t->donors))
640 + donation = list_entry (list_max (&t->donors, donated_lower_priority, NULL),
641 + struct thread, donor_elem)->priority;
642 + t->priority = donation > default_priority ? donation : default_priority;
643 + if (t->priority > old_priority && t->donee != NULL)
644 + thread_recompute_priority (t->donee);
647 +/* If the ready list contains a thread with a higher priority,
650 +thread_yield_to_higher_priority (void)
652 + enum intr_level old_level = intr_disable ();
653 + if (!list_empty (&ready_list))
655 + struct thread *cur = thread_current ();
656 + struct thread *max = list_entry (list_max (&ready_list,
657 + thread_lower_priority, NULL),
658 + struct thread, elem);
659 + if (max->priority > cur->priority)
661 + if (intr_context ())
662 + intr_yield_on_return ();
667 + intr_set_level (old_level);
670 /* Idle thread. Executes when no other thread is ready to run.
672 t->status = THREAD_BLOCKED;
673 strlcpy (t->name, name, sizeof t->name);
674 t->stack = (uint8_t *) t + PGSIZE;
675 - t->priority = priority;
676 + t->priority = t->normal_priority = priority;
677 t->magic = THREAD_MAGIC;
678 + sema_init (&t->timer_sema, 0);
679 + list_init (&t->donors);
680 list_push_back (&all_list, &t->allelem);
685 if (list_empty (&ready_list))
688 - return list_entry (list_pop_front (&ready_list), struct thread, elem);
692 + = list_entry (list_max (&ready_list, thread_lower_priority, NULL),
693 + struct thread, elem);
694 + list_remove (&max->elem);
699 /* Completes a thread switch by activating the new thread's page
700 diff -Nur ../../src/threads/thread.h src/threads/thread.h
701 --- ../../src/threads/thread.h 2008-08-27 08:45:26.000000000 -0400
702 +++ src/threads/thread.h 2008-08-27 12:45:31.000000000 -0400
707 +#include "threads/synch.h"
708 +#include "threads/fixed-point.h"
710 /* States in a thread's life cycle. */
713 enum thread_status status; /* Thread state. */
714 char name[16]; /* Name (for debugging purposes). */
715 uint8_t *stack; /* Saved stack pointer. */
716 - int priority; /* Priority. */
718 + /* Scheduler data. */
719 + int priority; /* Priority, including donations. */
720 + int normal_priority; /* Priority, without donations. */
721 + struct list donors; /* Threads donating priority to us. */
722 + struct list_elem donor_elem; /* Element in donors list. */
723 + struct thread *donee; /* Thread we're donating to. */
724 + struct lock *want_lock; /* Lock we're waiting to acquire. */
725 + int nice; /* Niceness. */
726 + fixed_point_t recent_cpu; /* Recent amount of CPU time. */
727 struct list_elem allelem; /* List element for all threads list. */
729 /* Shared between thread.c and synch.c. */
730 struct list_elem elem; /* List element. */
733 + int64_t wakeup_time; /* Time to wake this thread up. */
734 + struct list_elem timer_elem; /* Element in wait_list. */
735 + struct semaphore timer_sema; /* Semaphore. */
738 /* Owned by userprog/process.c. */
739 uint32_t *pagedir; /* Page directory. */
742 void thread_exit (void) NO_RETURN;
743 void thread_yield (void);
744 +void thread_yield_to_higher_priority (void);
745 +void thread_recompute_priority (struct thread *);
746 +bool thread_lower_priority (const struct list_elem *, const struct list_elem *,
749 /* Performs some operation on thread t, given auxiliary data AUX. */
750 typedef void thread_action_func (struct thread *t, void *aux);