1 diff -u src/devices/timer.c~ src/devices/timer.c
2 --- src/devices/timer.c~ 2005-05-24 15:52:43.000000000 -0700
3 +++ src/devices/timer.c 2005-05-26 15:19:20.000000000 -0700
4 @@ -23,6 +23,9 @@ static volatile int64_t ticks;
5 Initialized by timer_calibrate(). */
6 static unsigned loops_per_tick;
8 +/* Threads waiting in timer_sleep(). */
9 +static struct list wait_list;
11 static intr_handler_func timer_interrupt;
12 static bool too_many_loops (unsigned loops);
13 static void busy_wait (int64_t loops);
14 @@ -43,6 +46,8 @@ timer_init (void)
15 outb (0x40, count >> 8);
17 intr_register_ext (0x20, timer_interrupt, "8254 Timer");
19 + list_init (&wait_list);
22 /* Calibrates loops_per_tick, used to implement brief delays. */
23 @@ -87,15 +92,36 @@ timer_elapsed (int64_t then)
24 return timer_ticks () - then;
27 +/* Compares two threads based on their wake-up times. */
29 +compare_threads_by_wakeup_time (const struct list_elem *a_,
30 + const struct list_elem *b_,
33 + const struct thread *a = list_entry (a_, struct thread, timer_elem);
34 + const struct thread *b = list_entry (b_, struct thread, timer_elem);
36 + return a->wakeup_time < b->wakeup_time;
39 /* Suspends execution for approximately TICKS timer ticks. */
41 timer_sleep (int64_t ticks)
43 - int64_t start = timer_ticks ();
44 + struct thread *t = thread_current ();
46 + /* Schedule our wake-up time. */
47 + t->wakeup_time = timer_ticks () + ticks;
49 + /* Atomically insert the current thread into the wait list. */
50 ASSERT (intr_get_level () == INTR_ON);
51 - while (timer_elapsed (start) < ticks)
54 + list_insert_ordered (&wait_list, &t->timer_elem,
55 + compare_threads_by_wakeup_time, NULL);
59 + sema_down (&t->timer_sema);
62 /* Suspends execution for approximately MS milliseconds. */
63 @@ -132,6 +158,16 @@ timer_interrupt (struct intr_frame *args
68 + while (!list_empty (&wait_list))
70 + struct thread *t = list_entry (list_front (&wait_list),
71 + struct thread, timer_elem);
72 + if (ticks < t->wakeup_time)
74 + sema_up (&t->timer_sema);
75 + list_pop_front (&wait_list);
79 /* Returns true if LOOPS iterations waits for more than one timer
80 diff -u src/threads/fixed-point.h~ src/threads/fixed-point.h
81 --- src/threads/fixed-point.h~ 1969-12-31 16:00:00.000000000 -0800
82 +++ src/threads/fixed-point.h 2005-06-02 14:11:21.000000000 -0700
84 +#ifndef THREADS_FIXED_POINT_H
85 +#define THREADS_FIXED_POINT_H
90 +#define FIX_BITS 32 /* Total bits per fixed-point number. */
91 +#define FIX_P 16 /* Number of integer bits. */
92 +#define FIX_Q 16 /* Number of fractional bits. */
93 +#define FIX_F (1 << FIX_Q) /* pow(2, FIX_Q). */
95 +#define FIX_MIN_INT (-FIX_MAX_INT) /* Smallest representable integer. */
96 +#define FIX_MAX_INT ((1 << FIX_P) - 1) /* Largest representable integer. */
98 +/* A fixed-point number. */
105 +/* Returns a fixed-point number with F as its internal value. */
106 +static inline fixed_point_t
114 +/* Returns fixed-point number corresponding to integer N. */
115 +static inline fixed_point_t
118 + ASSERT (n >= FIX_MIN_INT && n <= FIX_MAX_INT);
119 + return __mk_fix (n * FIX_F);
122 +/* Returns fixed-point number corresponding to N divided by D. */
123 +static inline fixed_point_t
124 +fix_frac (int n, int d)
127 + ASSERT (n / d >= FIX_MIN_INT && n / d <= FIX_MAX_INT);
128 + return __mk_fix ((long long) n * FIX_F / d);
131 +/* Returns X rounded to the nearest integer. */
133 +fix_round (fixed_point_t x)
135 + return (x.f + FIX_F / 2) / FIX_F;
138 +/* Returns X truncated down to the nearest integer. */
140 +fix_trunc (fixed_point_t x)
142 + return x.f / FIX_F;
145 +/* Returns X + Y. */
146 +static inline fixed_point_t
147 +fix_add (fixed_point_t x, fixed_point_t y)
149 + return __mk_fix (x.f + y.f);
152 +/* Returns X - Y. */
153 +static inline fixed_point_t
154 +fix_sub (fixed_point_t x, fixed_point_t y)
156 + return __mk_fix (x.f - y.f);
159 +/* Returns X * Y. */
160 +static inline fixed_point_t
161 +fix_mul (fixed_point_t x, fixed_point_t y)
163 + return __mk_fix ((long long) x.f * y.f / FIX_F);
166 +/* Returns X * N. */
167 +static inline fixed_point_t
168 +fix_scale (fixed_point_t x, int n)
171 + return __mk_fix (x.f * n);
174 +/* Returns X / Y. */
175 +static inline fixed_point_t
176 +fix_div (fixed_point_t x, fixed_point_t y)
178 + return __mk_fix ((long long) x.f * FIX_F / y.f);
181 +/* Returns X / N. */
182 +static inline fixed_point_t
183 +fix_unscale (fixed_point_t x, int n)
186 + return __mk_fix (x.f / n);
189 +/* Returns 1 / X. */
190 +static inline fixed_point_t
191 +fix_inv (fixed_point_t x)
193 + return fix_div (fix_int (1), x);
196 +/* Returns -1 if X < Y, 0 if X == Y, 1 if X > Y. */
198 +fix_compare (fixed_point_t x, fixed_point_t y)
200 + return x.f < y.f ? -1 : x.f > y.f;
203 +#endif /* threads/fixed-point.h */
204 diff -u src/threads/synch.c~ src/threads/synch.c
205 --- src/threads/synch.c~ 2005-05-24 20:47:28.000000000 -0700
206 +++ src/threads/synch.c 2005-06-02 14:20:15.000000000 -0700
207 @@ -114,10 +114,28 @@ sema_up (struct semaphore *sema)
208 ASSERT (sema != NULL);
210 old_level = intr_disable ();
211 - if (!list_empty (&sema->waiters))
212 - thread_unblock (list_entry (list_pop_front (&sema->waiters),
213 - struct thread, elem));
215 + if (!list_empty (&sema->waiters))
217 + /* Find highest-priority waiting thread. */
218 + struct thread *max = list_entry (list_max (&sema->waiters,
219 + thread_lower_priority, NULL),
220 + struct thread, elem);
222 + /* Remove `max' from wait list and unblock. */
223 + list_remove (&max->elem);
224 + thread_unblock (max);
226 + /* Yield to a higher-priority thread, if we're running in a
227 + context where it makes sense to do so.
229 + Kind of a funny interaction with donation here.
230 + We only support donation for locks, and locks turn off
231 + interrupts before calling us, so we automatically don't
232 + do the yield here, delegating to lock_release(). */
233 + if (!intr_context () && old_level == INTR_ON)
234 + thread_yield_to_higher_priority ();
236 intr_set_level (old_level);
239 @@ -200,6 +218,23 @@ lock_acquire (struct lock *lock)
240 ASSERT (!lock_held_by_current_thread (lock));
242 old_level = intr_disable ();
244 + if (lock->holder != NULL)
246 + /* Donate our priority to the thread holding the lock.
247 + First, update the data structures. */
248 + struct thread *donor = thread_current ();
249 + donor->want_lock = lock;
250 + donor->donee = lock->holder;
251 + list_push_back (&lock->holder->donors, &donor->donor_elem);
253 + /* Now implement the priority donation itself
254 + by recomputing the donee's priority
255 + and cascading the donation as far as necessary. */
256 + if (donor->donee != NULL)
257 + thread_recompute_priority (donor->donee);
260 sema_down (&lock->semaphore);
261 lock->holder = thread_current ();
262 intr_set_level (old_level);
263 @@ -238,13 +273,37 @@ void
264 lock_release (struct lock *lock)
266 enum intr_level old_level;
267 + struct thread *t = thread_current ();
268 + struct list_elem *e;
270 ASSERT (lock != NULL);
271 ASSERT (lock_held_by_current_thread (lock));
273 old_level = intr_disable ();
275 + /* Return donations to threads that want this lock. */
276 + for (e = list_begin (&t->donors); e != list_end (&t->donors); )
278 + struct thread *donor = list_entry (e, struct thread, donor_elem);
279 + if (donor->want_lock == lock)
281 + donor->donee = NULL;
282 + e = list_remove (e);
288 + /* Release lock. */
290 sema_up (&lock->semaphore);
292 + /* Recompute our priority based on our remaining donations,
293 + then yield to a higher-priority ready thread if one now
295 + thread_recompute_priority (t);
296 + thread_yield_to_higher_priority ();
298 intr_set_level (old_level);
301 @@ -264,6 +323,7 @@ struct semaphore_elem
303 struct list_elem elem; /* List element. */
304 struct semaphore semaphore; /* This semaphore. */
305 + struct thread *thread; /* Thread. */
308 /* Initializes condition variable COND. A condition variable
309 @@ -308,12 +368,26 @@ cond_wait (struct condition *cond, struc
310 ASSERT (lock_held_by_current_thread (lock));
312 sema_init (&waiter.semaphore, 0);
313 + waiter.thread = thread_current ();
314 list_push_back (&cond->waiters, &waiter.elem);
316 sema_down (&waiter.semaphore);
321 +semaphore_elem_lower_priority (const struct list_elem *a_,
322 + const struct list_elem *b_,
325 + const struct semaphore_elem *a
326 + = list_entry (a_, struct semaphore_elem, elem);
327 + const struct semaphore_elem *b
328 + = list_entry (b_, struct semaphore_elem, elem);
330 + return a->thread->priority > b->thread->priority;
333 /* If any threads are waiting on COND (protected by LOCK), then
334 this function signals one of them to wake up from its wait.
335 LOCK must be held before calling this function.
336 @@ -330,8 +404,12 @@ cond_signal (struct condition *cond, str
337 ASSERT (lock_held_by_current_thread (lock));
339 if (!list_empty (&cond->waiters))
340 - sema_up (&list_entry (list_pop_front (&cond->waiters),
341 - struct semaphore_elem, elem)->semaphore);
343 + struct list_elem *max
344 + = list_max (&cond->waiters, semaphore_elem_lower_priority, NULL);
346 + sema_up (&list_entry (max, struct semaphore_elem, elem)->semaphore);
350 /* Wakes up all threads, if any, waiting on COND (protected by
351 diff -u src/threads/thread.c~ src/threads/thread.c
352 --- src/threads/thread.c~ 2005-06-02 14:35:12.000000000 -0700
353 +++ src/threads/thread.c 2005-06-02 14:55:56.000000000 -0700
357 #include "threads/flags.h"
358 +#include "threads/init.h"
359 #include "threads/interrupt.h"
360 #include "threads/intr-stubs.h"
361 #include "threads/mmu.h"
362 #include "threads/palloc.h"
363 #include "threads/switch.h"
364 #include "threads/synch.h"
365 +#include "devices/timer.h"
367 #include "userprog/process.h"
370 that are ready to run but not actually running. */
371 static struct list ready_list;
373 +/* List of all threads. */
374 +static struct list all_threads_list;
377 static struct thread *idle_thread;
379 @@ -49,6 +54,7 @@ static long long user_ticks; /* # of
381 #define TIME_SLICE 4 /* # of timer ticks to give each thread. */
382 static unsigned thread_ticks; /* # of timer ticks since last yield. */
383 +static fixed_point_t load_avg; /* Load average. */
385 static void kernel_thread (thread_func *, void *aux);
387 @@ -79,12 +85,15 @@ thread_init (void)
389 lock_init (&tid_lock);
390 list_init (&ready_list);
391 + list_init (&all_threads_list);
392 + load_avg = fix_int (0);
394 /* Set up a thread structure for the running thread. */
395 initial_thread = running_thread ();
396 init_thread (initial_thread, "main", PRI_DEFAULT);
397 initial_thread->status = THREAD_RUNNING;
398 initial_thread->tid = allocate_tid ();
399 + list_push_front (&all_threads_list, &initial_thread->all_elem);
402 /* Starts preemptive thread scheduling by enabling interrupts.
403 @@ -101,9 +110,48 @@ void
407 - /* Enforce preemption. */
408 - if (++thread_ticks >= TIME_SLICE)
409 - intr_yield_on_return ();
412 + /* Update load average. */
413 + if (timer_ticks () % TIMER_FREQ == 0)
415 + size_t ready_threads = list_size (&ready_list);
416 + if (t != idle_thread)
419 + load_avg = fix_add (fix_mul (fix_frac (59, 60), load_avg),
420 + fix_mul (fix_frac (1, 60), fix_int (ready_threads)));
423 + /* Increment running process's recent_cpu. */
424 + if (t != idle_thread)
425 + t->recent_cpu = fix_add (t->recent_cpu, fix_int (1));
427 + /* Update recent_cpu and thread priorities once per second. */
428 + if (timer_ticks () % TIMER_FREQ == 0)
430 + struct list_elem *e;
431 + fixed_point_t twice_load = fix_scale (load_avg, 2);
432 + fixed_point_t twice_load_plus_1 = fix_add (twice_load, fix_int (1));
433 + fixed_point_t load_factor = fix_div (twice_load, twice_load_plus_1);
434 + for (e = list_begin (&all_threads_list);
435 + e != list_end (&all_threads_list); e = list_next (e))
437 + struct thread *t = list_entry (e, struct thread, all_elem);
438 + t->recent_cpu = fix_add (fix_mul (load_factor, t->recent_cpu),
439 + fix_int (t->nice));
440 + thread_recompute_priority (t);
445 + /* Switch threads if time slice has expired. */
446 + if (++thread_ticks >= TIME_SLICE)
449 + thread_recompute_priority (thread_current ());
450 + intr_yield_on_return ();
454 /* Prints thread statistics. */
455 @@ -143,10 +191,12 @@ tid_t
456 thread_create (const char *name, int priority,
457 thread_func *function, void *aux)
459 + struct thread *cur = thread_current ();
461 struct kernel_thread_frame *kf;
462 struct switch_entry_frame *ef;
463 struct switch_threads_frame *sf;
464 + enum intr_level old_level;
467 ASSERT (function != NULL);
468 @@ -157,8 +207,10 @@ thread_create (const char *name, int pri
471 /* Initialize thread. */
472 - init_thread (t, name, priority);
473 + init_thread (t, name, enable_mlfqs ? cur->priority : priority);
474 tid = t->tid = allocate_tid ();
475 + t->nice = cur->nice;
476 + t->recent_cpu = cur->recent_cpu;
478 /* Stack frame for kernel_thread(). */
479 kf = alloc_frame (t, sizeof *kf);
480 @@ -174,8 +226,15 @@ thread_create (const char *name, int pri
481 sf = alloc_frame (t, sizeof *sf);
482 sf->eip = switch_entry;
484 + /* Add to list of all threads. */
485 + old_level = intr_disable ();
486 + list_push_front (&all_threads_list, &t->all_elem);
487 + intr_set_level (old_level);
489 /* Add to run queue. */
491 + if (priority < thread_get_priority ())
496 @@ -196,6 +255,19 @@ thread_block (void)
500 +/* Returns true if A has lower priority than B,
501 + within a list of threads. */
503 +thread_lower_priority (const struct list_elem *a_,
504 + const struct list_elem *b_,
507 + const struct thread *a = list_entry (a_, struct thread, elem);
508 + const struct thread *b = list_entry (b_, struct thread, elem);
510 + return a->priority > b->priority;
513 /* Transitions a blocked thread T to the ready-to-run state.
514 This is an error if T is not blocked. (Use thread_yield() to
515 make the running thread ready.) */
516 @@ -260,6 +332,7 @@ thread_exit (void)
517 /* Just set our status to dying and schedule another process.
518 We will be destroyed during the call to schedule_tail(). */
520 + list_remove (&thread_current ()->all_elem);
521 thread_current ()->status = THREAD_DYING;
524 @@ -282,11 +355,26 @@ thread_yield (void)
525 intr_set_level (old_level);
528 -/* Sets the current thread's priority to NEW_PRIORITY. */
530 +recompute_priority_chain (void)
532 + enum intr_level old_level = intr_disable ();
533 + thread_recompute_priority (thread_current ());
534 + thread_yield_to_higher_priority ();
535 + intr_set_level (old_level);
538 +/* Sets the current thread's priority to PRIORITY. */
540 -thread_set_priority (int new_priority)
541 +thread_set_priority (int priority)
543 - thread_current ()->priority = new_priority;
546 + struct thread *t = thread_current ();
548 + t->normal_priority = priority;
549 + recompute_priority_chain ();
553 /* Returns the current thread's priority. */
554 @@ -298,33 +386,93 @@ thread_get_priority (void)
556 /* Sets the current thread's nice value to NICE. */
558 -thread_set_nice (int nice UNUSED)
559 +thread_set_nice (int nice)
561 - /* Not yet implemented. */
562 + thread_current ()->nice = nice;
563 + recompute_priority_chain ();
566 /* Returns the current thread's nice value. */
568 thread_get_nice (void)
570 - /* Not yet implemented. */
572 + return thread_current ()->nice;
575 -/* Returns 100 times the system load average. */
577 thread_get_load_avg (void)
579 - /* Not yet implemented. */
582 + enum intr_level level = intr_disable ();
583 + load_avg_int = fix_round (fix_scale (load_avg, 100));
584 + intr_set_level (level);
585 + return load_avg_int;
588 -/* Returns 100 times the current thread's recent_cpu value. */
590 thread_get_recent_cpu (void)
592 - /* Not yet implemented. */
594 + int recent_cpu_int;
595 + enum intr_level level = intr_disable ();
596 + recent_cpu_int = fix_round (fix_scale (thread_current ()->recent_cpu, 100));
597 + intr_set_level (level);
598 + return recent_cpu_int;
601 +/* Returns true if thread A has lower priority than thread B,
602 + within a list of donors. */
604 +donated_lower_priority (const struct list_elem *a_,
605 + const struct list_elem *b_,
608 + const struct thread *a = list_entry (a_, struct thread, donor_elem);
609 + const struct thread *b = list_entry (b_, struct thread, donor_elem);
611 + return a->priority > b->priority;
614 +/* Recomputes T's priority in terms of its normal priority and
615 + its donors' priorities, if any,
616 + and cascades the donation as necessary. */
618 +thread_recompute_priority (struct thread *t)
620 + int old_priority = t->priority;
621 + int default_priority = t->normal_priority;
622 + int donation = PRI_MAX;
625 + default_priority = fix_round (t->recent_cpu) / 4 + t->nice * 2;
626 + if (default_priority < PRI_MIN)
627 + default_priority = PRI_MIN;
628 + else if (default_priority > PRI_MAX)
629 + default_priority = PRI_MAX;
631 + if (!list_empty (&t->donors))
632 + donation = list_entry (list_max (&t->donors, donated_lower_priority, NULL),
633 + struct thread, donor_elem)->priority;
634 + t->priority = donation < default_priority ? donation : default_priority;
635 + if (t->priority < old_priority && t->donee != NULL)
636 + thread_recompute_priority (t->donee);
639 +/* If the ready list contains a thread with a higher priority,
642 +thread_yield_to_higher_priority (void)
644 + enum intr_level old_level = intr_disable ();
645 + if (!list_empty (&ready_list))
647 + struct thread *cur = thread_current ();
648 + struct thread *max = list_entry (list_max (&ready_list,
649 + thread_lower_priority, NULL),
650 + struct thread, elem);
651 + if (max->priority < cur->priority)
654 + intr_set_level (old_level);
657 /* Idle thread. Executes when no other thread is ready to run. */
658 @@ -399,8 +547,10 @@ init_thread (struct thread *t, const cha
659 t->status = THREAD_BLOCKED;
660 strlcpy (t->name, name, sizeof t->name);
661 t->stack = (uint8_t *) t + PGSIZE;
662 - t->priority = priority;
663 + t->priority = t->normal_priority = priority;
664 t->magic = THREAD_MAGIC;
665 + sema_init (&t->timer_sema, 0);
666 + list_init (&t->donors);
669 /* Allocates a SIZE-byte frame at the top of thread T's stack and
670 @@ -426,8 +576,14 @@ next_thread_to_run (void)
672 if (list_empty (&ready_list))
675 - return list_entry (list_pop_front (&ready_list), struct thread, elem);
679 + = list_entry (list_max (&ready_list, thread_lower_priority, NULL),
680 + struct thread, elem);
681 + list_remove (&max->elem);
686 /* Completes a thread switch by activating the new thread's page
687 diff -u src/threads/thread.h~ src/threads/thread.h
688 --- src/threads/thread.h~ 2005-06-02 14:32:36.000000000 -0700
689 +++ src/threads/thread.h 2005-06-02 14:38:46.000000000 -0700
694 +#include "threads/synch.h"
695 +#include "threads/fixed-point.h"
697 /* States in a thread's life cycle. */
699 @@ -87,11 +89,26 @@ struct thread
700 enum thread_status status; /* Thread state. */
701 char name[16]; /* Name (for debugging purposes). */
702 uint8_t *stack; /* Saved stack pointer. */
703 - int priority; /* Priority. */
705 + /* Scheduler data. */
706 + int priority; /* Priority, including donations. */
707 + int normal_priority; /* Priority, without donations. */
708 + struct list donors; /* Threads donating priority to us. */
709 + struct list_elem donor_elem; /* Element in donors list. */
710 + struct thread *donee; /* Thread we're donating to. */
711 + struct lock *want_lock; /* Lock we're waiting to acquire. */
712 + int nice; /* Niceness. */
713 + fixed_point_t recent_cpu; /* Recent amount of CPU time. */
714 + struct list_elem all_elem; /* all_threads list element. */
716 /* Shared between thread.c and synch.c. */
717 struct list_elem elem; /* List element. */
720 + int64_t wakeup_time; /* Time to wake this thread up. */
721 + struct list_elem timer_elem; /* Element in timer_wait_list. */
722 + struct semaphore timer_sema; /* Semaphore. */
725 /* Owned by userprog/process.c. */
726 uint32_t *pagedir; /* Page directory. */
727 @@ -118,6 +135,10 @@ const char *thread_name (void);
729 void thread_exit (void) NO_RETURN;
730 void thread_yield (void);
731 +void thread_yield_to_higher_priority (void);
732 +void thread_recompute_priority (struct thread *);
733 +bool thread_lower_priority (const struct list_elem *, const struct list_elem *,
736 int thread_get_priority (void);
737 void thread_set_priority (int);