1 diff --git a/src/Makefile.build b/src/Makefile.build
2 index e997d27..1057023 100644
3 --- a/src/Makefile.build
4 +++ b/src/Makefile.build
5 @@ -62,7 +62,9 @@ userprog_SRC += userprog/gdt.c # GDT initialization.
6 userprog_SRC += userprog/tss.c # TSS management.
8 # No virtual memory code yet.
9 -#vm_SRC = vm/file.c # Some file.
15 filesys_SRC = filesys/filesys.c # Filesystem core.
16 diff --git a/src/devices/timer.c b/src/devices/timer.c
17 index 3ed10ad..1b14cdc 100644
18 --- a/src/devices/timer.c
19 +++ b/src/devices/timer.c
20 @@ -206,7 +206,6 @@ timer_interrupt (struct intr_frame *args UNUSED)
21 if (ticks < t->wakeup_time)
23 sema_up (&t->timer_sema);
24 - thread_yield_to_higher_priority ();
25 list_pop_front (&wait_list);
28 diff --git a/src/threads/fixed-point.h b/src/threads/fixed-point.h
29 deleted file mode 100644
30 index ca88f97..0000000
31 --- a/src/threads/fixed-point.h
34 -#ifndef THREADS_FIXED_POINT_H
35 -#define THREADS_FIXED_POINT_H
40 -#define FIX_BITS 32 /* Total bits per fixed-point number. */
41 -#define FIX_P 16 /* Number of integer bits. */
42 -#define FIX_Q 16 /* Number of fractional bits. */
43 -#define FIX_F (1 << FIX_Q) /* pow(2, FIX_Q). */
45 -#define FIX_MIN_INT (-FIX_MAX_INT) /* Smallest representable integer. */
46 -#define FIX_MAX_INT ((1 << FIX_P) - 1) /* Largest representable integer. */
48 -/* A fixed-point number. */
55 -/* Returns a fixed-point number with F as its internal value. */
56 -static inline fixed_point_t
64 -/* Returns fixed-point number corresponding to integer N. */
65 -static inline fixed_point_t
68 - ASSERT (n >= FIX_MIN_INT && n <= FIX_MAX_INT);
69 - return __mk_fix (n * FIX_F);
72 -/* Returns fixed-point number corresponding to N divided by D. */
73 -static inline fixed_point_t
74 -fix_frac (int n, int d)
77 - ASSERT (n / d >= FIX_MIN_INT && n / d <= FIX_MAX_INT);
78 - return __mk_fix ((long long) n * FIX_F / d);
81 -/* Returns X rounded to the nearest integer. */
83 -fix_round (fixed_point_t x)
85 - return (x.f + FIX_F / 2) / FIX_F;
88 -/* Returns X truncated down to the nearest integer. */
90 -fix_trunc (fixed_point_t x)
96 -static inline fixed_point_t
97 -fix_add (fixed_point_t x, fixed_point_t y)
99 - return __mk_fix (x.f + y.f);
102 -/* Returns X - Y. */
103 -static inline fixed_point_t
104 -fix_sub (fixed_point_t x, fixed_point_t y)
106 - return __mk_fix (x.f - y.f);
109 -/* Returns X * Y. */
110 -static inline fixed_point_t
111 -fix_mul (fixed_point_t x, fixed_point_t y)
113 - return __mk_fix ((long long) x.f * y.f / FIX_F);
116 -/* Returns X * N. */
117 -static inline fixed_point_t
118 -fix_scale (fixed_point_t x, int n)
121 - return __mk_fix (x.f * n);
124 -/* Returns X / Y. */
125 -static inline fixed_point_t
126 -fix_div (fixed_point_t x, fixed_point_t y)
128 - return __mk_fix ((long long) x.f * FIX_F / y.f);
131 -/* Returns X / N. */
132 -static inline fixed_point_t
133 -fix_unscale (fixed_point_t x, int n)
136 - return __mk_fix (x.f / n);
139 -/* Returns 1 / X. */
140 -static inline fixed_point_t
141 -fix_inv (fixed_point_t x)
143 - return fix_div (fix_int (1), x);
146 -/* Returns -1 if X < Y, 0 if X == Y, 1 if X > Y. */
148 -fix_compare (fixed_point_t x, fixed_point_t y)
150 - return x.f < y.f ? -1 : x.f > y.f;
153 -#endif /* threads/fixed-point.h */
154 diff --git a/src/threads/init.c b/src/threads/init.c
155 index cebec2c..9729de7 100644
156 --- a/src/threads/init.c
157 +++ b/src/threads/init.c
159 #include "filesys/filesys.h"
160 #include "filesys/fsutil.h"
162 +#include "vm/frame.h"
163 +#include "vm/swap.h"
165 /* Page directory with kernel mappings only. */
166 uint32_t *init_page_dir;
167 @@ -127,6 +129,9 @@ main (void)
168 filesys_init (format_filesys);
174 printf ("Boot complete.\n");
176 /* Run actions specified on kernel command line. */
177 diff --git a/src/threads/interrupt.c b/src/threads/interrupt.c
178 index e3b90dc..f897882 100644
179 --- a/src/threads/interrupt.c
180 +++ b/src/threads/interrupt.c
181 @@ -360,6 +360,8 @@ intr_handler (struct intr_frame *frame)
182 in_external_intr = true;
183 yield_on_return = false;
186 + thread_current ()->user_esp = frame->esp;
188 /* Invoke the interrupt's handler. */
189 handler = intr_handlers[frame->vec_no];
190 diff --git a/src/threads/synch.c b/src/threads/synch.c
191 index 53197bb..317c68a 100644
192 --- a/src/threads/synch.c
193 +++ b/src/threads/synch.c
194 @@ -113,28 +113,10 @@ sema_up (struct semaphore *sema)
195 ASSERT (sema != NULL);
197 old_level = intr_disable ();
199 if (!list_empty (&sema->waiters))
201 - /* Find highest-priority waiting thread. */
202 - struct thread *max = list_entry (list_max (&sema->waiters,
203 - thread_lower_priority, NULL),
204 - struct thread, elem);
206 - /* Remove `max' from wait list and unblock. */
207 - list_remove (&max->elem);
208 - thread_unblock (max);
210 - /* Yield to a higher-priority thread, if we're running in a
211 - context where it makes sense to do so.
213 - Kind of a funny interaction with donation here.
214 - We only support donation for locks, and locks turn off
215 - interrupts before calling us, so we automatically don't
216 - do the yield here, delegating to lock_release(). */
217 - if (!intr_context () && old_level == INTR_ON)
218 - thread_yield_to_higher_priority ();
220 + thread_unblock (list_entry (list_pop_front (&sema->waiters),
221 + struct thread, elem));
223 intr_set_level (old_level);
226 @@ -210,33 +192,12 @@ lock_init (struct lock *lock)
228 lock_acquire (struct lock *lock)
230 - enum intr_level old_level;
232 ASSERT (lock != NULL);
233 ASSERT (!intr_context ());
234 ASSERT (!lock_held_by_current_thread (lock));
236 - old_level = intr_disable ();
238 - if (lock->holder != NULL)
240 - /* Donate our priority to the thread holding the lock.
241 - First, update the data structures. */
242 - struct thread *donor = thread_current ();
243 - donor->want_lock = lock;
244 - donor->donee = lock->holder;
245 - list_push_back (&lock->holder->donors, &donor->donor_elem);
247 - /* Now implement the priority donation itself
248 - by recomputing the donee's priority
249 - and cascading the donation as far as necessary. */
250 - if (donor->donee != NULL)
251 - thread_recompute_priority (donor->donee);
254 sema_down (&lock->semaphore);
255 lock->holder = thread_current ();
256 - intr_set_level (old_level);
259 /* Tries to acquires LOCK and returns true if successful or false
260 @@ -267,39 +228,11 @@ lock_try_acquire (struct lock *lock)
262 lock_release (struct lock *lock)
264 - enum intr_level old_level;
265 - struct thread *t = thread_current ();
266 - struct list_elem *e;
268 ASSERT (lock != NULL);
269 ASSERT (lock_held_by_current_thread (lock));
271 - old_level = intr_disable ();
273 - /* Return donations to threads that want this lock. */
274 - for (e = list_begin (&t->donors); e != list_end (&t->donors); )
276 - struct thread *donor = list_entry (e, struct thread, donor_elem);
277 - if (donor->want_lock == lock)
279 - donor->donee = NULL;
280 - e = list_remove (e);
286 - /* Release lock. */
288 sema_up (&lock->semaphore);
290 - /* Recompute our priority based on our remaining donations,
291 - then yield to a higher-priority ready thread if one now
293 - thread_recompute_priority (t);
294 - thread_yield_to_higher_priority ();
296 - intr_set_level (old_level);
299 /* Returns true if the current thread holds LOCK, false
300 @@ -318,7 +251,6 @@ struct semaphore_elem
302 struct list_elem elem; /* List element. */
303 struct semaphore semaphore; /* This semaphore. */
304 - struct thread *thread; /* Thread. */
307 /* Initializes condition variable COND. A condition variable
308 @@ -363,26 +295,12 @@ cond_wait (struct condition *cond, struct lock *lock)
309 ASSERT (lock_held_by_current_thread (lock));
311 sema_init (&waiter.semaphore, 0);
312 - waiter.thread = thread_current ();
313 list_push_back (&cond->waiters, &waiter.elem);
315 sema_down (&waiter.semaphore);
320 -semaphore_elem_lower_priority (const struct list_elem *a_,
321 - const struct list_elem *b_,
324 - const struct semaphore_elem *a
325 - = list_entry (a_, struct semaphore_elem, elem);
326 - const struct semaphore_elem *b
327 - = list_entry (b_, struct semaphore_elem, elem);
329 - return a->thread->priority < b->thread->priority;
332 /* If any threads are waiting on COND (protected by LOCK), then
333 this function signals one of them to wake up from its wait.
334 LOCK must be held before calling this function.
335 @@ -399,12 +317,8 @@ cond_signal (struct condition *cond, struct lock *lock UNUSED)
336 ASSERT (lock_held_by_current_thread (lock));
338 if (!list_empty (&cond->waiters))
340 - struct list_elem *max
341 - = list_max (&cond->waiters, semaphore_elem_lower_priority, NULL);
343 - sema_up (&list_entry (max, struct semaphore_elem, elem)->semaphore);
345 + sema_up (&list_entry (list_pop_front (&cond->waiters),
346 + struct semaphore_elem, elem)->semaphore);
349 /* Wakes up all threads, if any, waiting on COND (protected by
350 diff --git a/src/threads/thread.c b/src/threads/thread.c
351 index 7c19894..31e2ba6 100644
352 --- a/src/threads/thread.c
353 +++ b/src/threads/thread.c
357 #include "threads/flags.h"
358 -#include "threads/init.h"
359 #include "threads/interrupt.h"
360 #include "threads/intr-stubs.h"
361 #include "threads/palloc.h"
362 #include "threads/switch.h"
363 #include "threads/synch.h"
364 -#include "devices/timer.h"
365 #include "threads/vaddr.h"
367 #include "userprog/process.h"
368 @@ -56,7 +54,6 @@ static long long user_ticks; /* # of timer ticks in user programs. */
370 #define TIME_SLICE 4 /* # of timer ticks to give each thread. */
371 static unsigned thread_ticks; /* # of timer ticks since last yield. */
372 -static fixed_point_t load_avg; /* Load average. */
374 /* If false (default), use round-robin scheduler.
375 If true, use multi-level feedback queue scheduler.
376 @@ -68,7 +65,8 @@ static void kernel_thread (thread_func *, void *aux);
377 static void idle (void *aux UNUSED);
378 static struct thread *running_thread (void);
379 static struct thread *next_thread_to_run (void);
380 -static void init_thread (struct thread *, const char *name, int priority);
381 +static void init_thread (struct thread *, const char *name, int priority,
383 static bool is_thread (struct thread *) UNUSED;
384 static void *alloc_frame (struct thread *, size_t size);
385 static void schedule (void);
386 @@ -96,13 +94,11 @@ thread_init (void)
387 lock_init (&tid_lock);
388 list_init (&ready_list);
389 list_init (&all_list);
390 - load_avg = fix_int (0);
392 /* Set up a thread structure for the running thread. */
393 initial_thread = running_thread ();
394 - init_thread (initial_thread, "main", PRI_DEFAULT);
395 + init_thread (initial_thread, "main", PRI_DEFAULT, 0);
396 initial_thread->status = THREAD_RUNNING;
397 - initial_thread->tid = allocate_tid ();
400 /* Starts preemptive thread scheduling by enabling interrupts.
401 @@ -122,18 +118,6 @@ thread_start (void)
402 sema_down (&idle_started);
405 -/* Adjust recent CPU of a thread based on load factor
406 - and recompute its priority. */
408 -adjust_recent_cpu (struct thread *t, void *aux)
410 - fixed_point_t load_factor = *(fixed_point_t *)aux;
412 - t->recent_cpu = fix_add (fix_mul (load_factor, t->recent_cpu),
413 - fix_int (t->nice));
414 - thread_recompute_priority (t);
417 /* Called by the timer interrupt handler at each timer tick.
418 Thus, this function runs in an external interrupt context. */
420 @@ -151,41 +135,9 @@ thread_tick (void)
426 - /* Update load average. */
427 - if (timer_ticks () % TIMER_FREQ == 0)
429 - size_t ready_threads = list_size (&ready_list);
430 - if (t != idle_thread)
433 - load_avg = fix_add (fix_mul (fix_frac (59, 60), load_avg),
434 - fix_mul (fix_frac (1, 60), fix_int (ready_threads)));
437 - /* Increment running process's recent_cpu. */
438 - if (t != idle_thread)
439 - t->recent_cpu = fix_add (t->recent_cpu, fix_int (1));
441 - /* Update recent_cpu and thread priorities once per second. */
442 - if (timer_ticks () % TIMER_FREQ == 0)
444 - fixed_point_t twice_load = fix_scale (load_avg, 2);
445 - fixed_point_t twice_load_plus_1 = fix_add (twice_load, fix_int (1));
446 - fixed_point_t load_factor = fix_div (twice_load, twice_load_plus_1);
448 - thread_foreach (adjust_recent_cpu, &load_factor);
452 - /* Switch threads if time slice has expired. */
453 - if (++thread_ticks >= TIME_SLICE)
456 - thread_recompute_priority (thread_current ());
457 - intr_yield_on_return ();
459 + /* Enforce preemption. */
460 + if (++thread_ticks >= TIME_SLICE)
461 + intr_yield_on_return ();
464 /* Prints thread statistics. */
465 @@ -215,7 +167,6 @@ tid_t
466 thread_create (const char *name, int priority,
467 thread_func *function, void *aux)
469 - struct thread *cur = thread_current ();
471 struct kernel_thread_frame *kf;
472 struct switch_entry_frame *ef;
473 @@ -230,10 +181,8 @@ thread_create (const char *name, int priority,
476 /* Initialize thread. */
477 - init_thread (t, name, thread_mlfqs ? cur->priority : priority);
478 - tid = t->tid = allocate_tid ();
479 - t->nice = cur->nice;
480 - t->recent_cpu = cur->recent_cpu;
481 + init_thread (t, name, priority, allocate_tid ());
484 /* Stack frame for kernel_thread(). */
485 kf = alloc_frame (t, sizeof *kf);
486 @@ -252,8 +201,6 @@ thread_create (const char *name, int priority,
488 /* Add to run queue. */
490 - if (priority > thread_get_priority ())
495 @@ -274,19 +221,6 @@ thread_block (void)
499 -/* Returns true if A has lower priority than B,
500 - within a list of threads. */
502 -thread_lower_priority (const struct list_elem *a_,
503 - const struct list_elem *b_,
506 - const struct thread *a = list_entry (a_, struct thread, elem);
507 - const struct thread *b = list_entry (b_, struct thread, elem);
509 - return a->priority < b->priority;
512 /* Transitions a blocked thread T to the ready-to-run state.
513 This is an error if T is not blocked. (Use thread_yield() to
514 make the running thread ready.)
515 @@ -399,26 +333,11 @@ thread_foreach (thread_action_func *func, void *aux)
520 -recompute_priority_chain (void)
522 - enum intr_level old_level = intr_disable ();
523 - thread_recompute_priority (thread_current ());
524 - thread_yield_to_higher_priority ();
525 - intr_set_level (old_level);
528 -/* Sets the current thread's priority to PRIORITY. */
529 +/* Sets the current thread's priority to NEW_PRIORITY. */
531 -thread_set_priority (int priority)
532 +thread_set_priority (int new_priority)
536 - struct thread *t = thread_current ();
538 - t->normal_priority = priority;
539 - recompute_priority_chain ();
541 + thread_current ()->priority = new_priority;
544 /* Returns the current thread's priority. */
545 @@ -430,98 +349,33 @@ thread_get_priority (void)
547 /* Sets the current thread's nice value to NICE. */
549 -thread_set_nice (int nice)
550 +thread_set_nice (int nice UNUSED)
552 - thread_current ()->nice = nice;
553 - recompute_priority_chain ();
554 + /* Not yet implemented. */
557 /* Returns the current thread's nice value. */
559 thread_get_nice (void)
561 - return thread_current ()->nice;
562 + /* Not yet implemented. */
566 +/* Returns 100 times the system load average. */
568 thread_get_load_avg (void)
571 - enum intr_level level = intr_disable ();
572 - load_avg_int = fix_round (fix_scale (load_avg, 100));
573 - intr_set_level (level);
574 - return load_avg_int;
575 + /* Not yet implemented. */
579 +/* Returns 100 times the current thread's recent_cpu value. */
581 thread_get_recent_cpu (void)
583 - int recent_cpu_int;
584 - enum intr_level level = intr_disable ();
585 - recent_cpu_int = fix_round (fix_scale (thread_current ()->recent_cpu, 100));
586 - intr_set_level (level);
587 - return recent_cpu_int;
590 -/* Returns true if thread A has lower priority than thread B,
591 - within a list of donors. */
593 -donated_lower_priority (const struct list_elem *a_,
594 - const struct list_elem *b_,
597 - const struct thread *a = list_entry (a_, struct thread, donor_elem);
598 - const struct thread *b = list_entry (b_, struct thread, donor_elem);
600 - return a->priority < b->priority;
603 -/* Recomputes T's priority in terms of its normal priority and
604 - its donors' priorities, if any,
605 - and cascades the donation as necessary. */
607 -thread_recompute_priority (struct thread *t)
609 - int old_priority = t->priority;
610 - int default_priority = t->normal_priority;
611 - int donation = PRI_MIN;
614 - default_priority = PRI_MAX - fix_round (t->recent_cpu) / 4 - t->nice * 2;
615 - if (default_priority < PRI_MIN)
616 - default_priority = PRI_MIN;
617 - else if (default_priority > PRI_MAX)
618 - default_priority = PRI_MAX;
620 - if (!list_empty (&t->donors))
621 - donation = list_entry (list_max (&t->donors, donated_lower_priority, NULL),
622 - struct thread, donor_elem)->priority;
623 - t->priority = donation > default_priority ? donation : default_priority;
624 - if (t->priority > old_priority && t->donee != NULL)
625 - thread_recompute_priority (t->donee);
628 -/* If the ready list contains a thread with a higher priority,
631 -thread_yield_to_higher_priority (void)
633 - enum intr_level old_level = intr_disable ();
634 - if (!list_empty (&ready_list))
636 - struct thread *cur = thread_current ();
637 - struct thread *max = list_entry (list_max (&ready_list,
638 - thread_lower_priority, NULL),
639 - struct thread, elem);
640 - if (max->priority > cur->priority)
642 - if (intr_context ())
643 - intr_yield_on_return ();
648 - intr_set_level (old_level);
649 + /* Not yet implemented. */
653 /* Idle thread. Executes when no other thread is ready to run.
654 @@ -597,7 +451,7 @@ is_thread (struct thread *t)
655 /* Does basic initialization of T as a blocked thread named
658 -init_thread (struct thread *t, const char *name, int priority)
659 +init_thread (struct thread *t, const char *name, int priority, tid_t tid)
661 enum intr_level old_level;
663 @@ -606,17 +460,22 @@ init_thread (struct thread *t, const char *name, int priority)
664 ASSERT (name != NULL);
666 memset (t, 0, sizeof *t);
668 t->status = THREAD_BLOCKED;
669 strlcpy (t->name, name, sizeof t->name);
670 t->stack = (uint8_t *) t + PGSIZE;
671 - t->priority = t->normal_priority = priority;
672 - list_init (&t->children);
673 + t->priority = priority;
675 t->wait_status = NULL;
676 + list_init (&t->children);
677 + sema_init (&t->timer_sema, 0);
680 + t->bin_file = NULL;
682 + list_init (&t->mappings);
684 t->magic = THREAD_MAGIC;
685 - sema_init (&t->timer_sema, 0);
686 - list_init (&t->donors);
687 old_level = intr_disable ();
688 list_push_back (&all_list, &t->allelem);
689 intr_set_level (old_level);
690 @@ -645,14 +504,8 @@ next_thread_to_run (void)
692 if (list_empty (&ready_list))
697 - = list_entry (list_max (&ready_list, thread_lower_priority, NULL),
698 - struct thread, elem);
699 - list_remove (&max->elem);
703 + return list_entry (list_pop_front (&ready_list), struct thread, elem);
706 /* Completes a thread switch by activating the new thread's page
707 diff --git a/src/threads/thread.h b/src/threads/thread.h
708 index 2c85d88..b9e7b0c 100644
709 --- a/src/threads/thread.h
710 +++ b/src/threads/thread.h
712 #define THREADS_THREAD_H
718 #include "threads/synch.h"
719 -#include "threads/fixed-point.h"
721 /* States in a thread's life cycle. */
723 @@ -89,19 +89,11 @@ struct thread
724 enum thread_status status; /* Thread state. */
725 char name[16]; /* Name (for debugging purposes). */
726 uint8_t *stack; /* Saved stack pointer. */
728 - /* Scheduler data. */
729 - int priority; /* Priority, including donations. */
730 - int normal_priority; /* Priority, without donations. */
731 - struct list donors; /* Threads donating priority to us. */
732 - struct list_elem donor_elem; /* Element in donors list. */
733 - struct thread *donee; /* Thread we're donating to. */
734 - struct lock *want_lock; /* Lock we're waiting to acquire. */
735 - int nice; /* Niceness. */
736 - fixed_point_t recent_cpu; /* Recent amount of CPU time. */
737 + int priority; /* Priority. */
738 struct list_elem allelem; /* List element for all threads list. */
740 /* Owned by process.c. */
741 + int exit_code; /* Exit code. */
742 struct wait_status *wait_status; /* This process's completion status. */
743 struct list children; /* Completion status of children. */
745 @@ -110,18 +102,19 @@ struct thread
748 int64_t wakeup_time; /* Time to wake this thread up. */
749 - struct list_elem timer_elem; /* Element in wait_list. */
750 + struct list_elem timer_elem; /* Element in timer_wait_list. */
751 struct semaphore timer_sema; /* Semaphore. */
755 /* Owned by userprog/process.c. */
756 uint32_t *pagedir; /* Page directory. */
758 - struct file *bin_file; /* Executable. */
759 + struct hash *pages; /* Page table. */
760 + struct file *bin_file; /* The binary executable. */
762 /* Owned by syscall.c. */
763 struct list fds; /* List of file descriptors. */
764 + struct list mappings; /* Memory-mapped files. */
765 int next_handle; /* Next handle value. */
766 + void *user_esp; /* User's stack pointer. */
768 /* Owned by thread.c. */
769 unsigned magic; /* Detects stack overflow. */
770 @@ -165,10 +158,6 @@ const char *thread_name (void);
772 void thread_exit (void) NO_RETURN;
773 void thread_yield (void);
774 -void thread_yield_to_higher_priority (void);
775 -void thread_recompute_priority (struct thread *);
776 -bool thread_lower_priority (const struct list_elem *, const struct list_elem *,
779 /* Performs some operation on thread t, given auxiliary data AUX. */
780 typedef void thread_action_func (struct thread *t, void *aux);
781 diff --git a/src/userprog/exception.c b/src/userprog/exception.c
782 index 3682478..9cfcf93 100644
783 --- a/src/userprog/exception.c
784 +++ b/src/userprog/exception.c
786 #include "userprog/gdt.h"
787 #include "threads/interrupt.h"
788 #include "threads/thread.h"
789 +#include "vm/page.h"
791 /* Number of page faults processed. */
792 static long long page_fault_cnt;
793 @@ -148,17 +149,14 @@ page_fault (struct intr_frame *f)
794 write = (f->error_code & PF_W) != 0;
795 user = (f->error_code & PF_U) != 0;
797 - /* Handle bad dereferences from system call implementations. */
799 + /* Allow the pager to try to handle it. */
800 + if (user && not_present)
802 - f->eip = (void (*) (void)) f->eax;
804 + if (!page_in (fault_addr))
809 - /* To implement virtual memory, delete the rest of the function
810 - body, and replace it with code that brings in the page to
811 - which fault_addr refers. */
812 printf ("Page fault at %p: %s error %s page in %s context.\n",
814 not_present ? "not present" : "rights violation",
815 diff --git a/src/userprog/pagedir.c b/src/userprog/pagedir.c
816 index a6a87b8..eed41b5 100644
817 --- a/src/userprog/pagedir.c
818 +++ b/src/userprog/pagedir.c
819 @@ -35,15 +35,7 @@ pagedir_destroy (uint32_t *pd)
820 ASSERT (pd != init_page_dir);
821 for (pde = pd; pde < pd + pd_no (PHYS_BASE); pde++)
824 - uint32_t *pt = pde_get_pt (*pde);
827 - for (pte = pt; pte < pt + PGSIZE / sizeof *pte; pte++)
829 - palloc_free_page (pte_get_page (*pte));
830 - palloc_free_page (pt);
832 + palloc_free_page (pde_get_pt (*pde));
833 palloc_free_page (pd);
836 diff --git a/src/userprog/process.c b/src/userprog/process.c
837 index 06ff27e..7a15814 100644
838 --- a/src/userprog/process.c
839 +++ b/src/userprog/process.c
841 #include "threads/palloc.h"
842 #include "threads/thread.h"
843 #include "threads/vaddr.h"
844 +#include "vm/page.h"
845 +#include "vm/frame.h"
847 static thread_func start_process NO_RETURN;
848 static bool load (const char *cmd_line, void (**eip) (void), void **esp);
849 @@ -58,7 +60,7 @@ process_execute (const char *file_name)
850 sema_down (&exec.load_done);
852 list_push_back (&thread_current ()->children, &exec.wait_status->elem);
858 @@ -95,7 +97,6 @@ start_process (void *exec_)
859 lock_init (&exec->wait_status->lock);
860 exec->wait_status->ref_cnt = 2;
861 exec->wait_status->tid = thread_current ()->tid;
862 - exec->wait_status->exit_code = -1;
863 sema_init (&exec->wait_status->dead, 0);
866 @@ -167,14 +168,13 @@ process_exit (void)
867 struct list_elem *e, *next;
870 - /* Close executable (and allow writes). */
871 - file_close (cur->bin_file);
872 + printf ("%s: exit(%d)\n", cur->name, cur->exit_code);
874 /* Notify parent that we're dead. */
875 if (cur->wait_status != NULL)
877 struct wait_status *cs = cur->wait_status;
878 - printf ("%s: exit(%d)\n", cur->name, cs->exit_code);
879 + cs->exit_code = cur->exit_code;
883 @@ -187,7 +187,13 @@ process_exit (void)
884 next = list_remove (e);
888 + /* Destroy the page hash table. */
891 + /* Close executable (and allow writes). */
892 + file_close (cur->bin_file);
894 /* Destroy the current process's page directory and switch back
895 to the kernel-only page directory. */
897 @@ -313,6 +319,12 @@ load (const char *cmd_line, void (**eip) (void), void **esp)
901 + /* Create page hash table. */
902 + t->pages = malloc (sizeof *t->pages);
903 + if (t->pages == NULL)
905 + hash_init (t->pages, page_hash, page_less, NULL);
907 /* Extract file_name from command line. */
908 while (*cmd_line == ' ')
910 @@ -328,7 +340,7 @@ load (const char *cmd_line, void (**eip) (void), void **esp)
911 printf ("load: %s: open failed\n", file_name);
914 - file_deny_write (file);
915 + file_deny_write (t->bin_file);
917 /* Read and verify executable header. */
918 if (file_read (file, &ehdr, sizeof ehdr) != sizeof ehdr
919 @@ -418,8 +430,6 @@ load (const char *cmd_line, void (**eip) (void), void **esp)
921 /* load() helpers. */
923 -static bool install_page (void *upage, void *kpage, bool writable);
925 /* Checks whether PHDR describes a valid, loadable segment in
926 FILE and returns true if so, false otherwise. */
928 @@ -487,38 +497,22 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
929 ASSERT (pg_ofs (upage) == 0);
930 ASSERT (ofs % PGSIZE == 0);
932 - file_seek (file, ofs);
933 while (read_bytes > 0 || zero_bytes > 0)
935 - /* Calculate how to fill this page.
936 - We will read PAGE_READ_BYTES bytes from FILE
937 - and zero the final PAGE_ZERO_BYTES bytes. */
938 size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE;
939 size_t page_zero_bytes = PGSIZE - page_read_bytes;
941 - /* Get a page of memory. */
942 - uint8_t *kpage = palloc_get_page (PAL_USER);
944 + struct page *p = page_allocate (upage, !writable);
948 - /* Load this page. */
949 - if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes)
951 - palloc_free_page (kpage);
954 - memset (kpage + page_read_bytes, 0, page_zero_bytes);
956 - /* Add the page to the process's address space. */
957 - if (!install_page (upage, kpage, writable))
958 + if (page_read_bytes > 0)
960 - palloc_free_page (kpage);
963 + p->file_offset = ofs;
964 + p->file_bytes = page_read_bytes;
968 read_bytes -= page_read_bytes;
969 zero_bytes -= page_zero_bytes;
970 + ofs += page_read_bytes;
974 @@ -535,7 +529,7 @@ reverse (int argc, char **argv)
975 argv[argc - 1] = tmp;
980 /* Pushes the SIZE bytes in BUF onto the stack in KPAGE, whose
981 page-relative stack pointer is *OFS, and then adjusts *OFS
982 appropriately. The bytes pushed are rounded to a 32-bit
983 @@ -611,37 +605,19 @@ init_cmd_line (uint8_t *kpage, uint8_t *upage, const char *cmd_line,
985 setup_stack (const char *cmd_line, void **esp)
988 - bool success = false;
990 - kpage = palloc_get_page (PAL_USER | PAL_ZERO);
992 + struct page *page = page_allocate (((uint8_t *) PHYS_BASE) - PGSIZE, false);
995 - uint8_t *upage = ((uint8_t *) PHYS_BASE) - PGSIZE;
996 - if (install_page (upage, kpage, true))
997 - success = init_cmd_line (kpage, upage, cmd_line, esp);
999 - palloc_free_page (kpage);
1000 + page->frame = frame_alloc_and_lock (page);
1001 + if (page->frame != NULL)
1004 + page->read_only = false;
1005 + page->private = false;
1006 + ok = init_cmd_line (page->frame->base, page->addr, cmd_line, esp);
1007 + frame_unlock (page->frame);
1014 -/* Adds a mapping from user virtual address UPAGE to kernel
1015 - virtual address KPAGE to the page table.
1016 - If WRITABLE is true, the user process may modify the page;
1017 - otherwise, it is read-only.
1018 - UPAGE must not already be mapped.
1019 - KPAGE should probably be a page obtained from the user pool
1020 - with palloc_get_page().
1021 - Returns true on success, false if UPAGE is already mapped or
1022 - if memory allocation fails. */
1024 -install_page (void *upage, void *kpage, bool writable)
1026 - struct thread *t = thread_current ();
1028 - /* Verify that there's not already a page at that virtual
1029 - address, then map our page there. */
1030 - return (pagedir_get_page (t->pagedir, upage) == NULL
1031 - && pagedir_set_page (t->pagedir, upage, kpage, writable));
1034 diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c
1035 index ef31316..e6be702 100644
1036 --- a/src/userprog/syscall.c
1037 +++ b/src/userprog/syscall.c
1039 #include "userprog/pagedir.h"
1040 #include "devices/input.h"
1041 #include "devices/shutdown.h"
1042 +#include "filesys/directory.h"
1043 #include "filesys/filesys.h"
1044 #include "filesys/file.h"
1045 #include "threads/interrupt.h"
1047 #include "threads/palloc.h"
1048 #include "threads/thread.h"
1049 #include "threads/vaddr.h"
1050 +#include "vm/page.h"
1053 static int sys_halt (void);
1054 @@ -28,11 +30,12 @@ static int sys_write (int handle, void *usrc_, unsigned size);
1055 static int sys_seek (int handle, unsigned position);
1056 static int sys_tell (int handle);
1057 static int sys_close (int handle);
1058 +static int sys_mmap (int handle, void *addr);
1059 +static int sys_munmap (int mapping);
1061 static void syscall_handler (struct intr_frame *);
1062 static void copy_in (void *, const void *, size_t);
1064 -/* Serializes file system operations. */
1066 static struct lock fs_lock;
1069 @@ -71,6 +74,8 @@ syscall_handler (struct intr_frame *f)
1070 {2, (syscall_function *) sys_seek},
1071 {1, (syscall_function *) sys_tell},
1072 {1, (syscall_function *) sys_close},
1073 + {2, (syscall_function *) sys_mmap},
1074 + {1, (syscall_function *) sys_munmap},
1077 const struct syscall *sc;
1078 @@ -93,39 +98,6 @@ syscall_handler (struct intr_frame *f)
1079 f->eax = sc->func (args[0], args[1], args[2]);
1082 -/* Returns true if UADDR is a valid, mapped user address,
1083 - false otherwise. */
1085 -verify_user (const void *uaddr)
1087 - return (uaddr < PHYS_BASE
1088 - && pagedir_get_page (thread_current ()->pagedir, uaddr) != NULL);
1091 -/* Copies a byte from user address USRC to kernel address DST.
1092 - USRC must be below PHYS_BASE.
1093 - Returns true if successful, false if a segfault occurred. */
1095 -get_user (uint8_t *dst, const uint8_t *usrc)
1098 - asm ("movl $1f, %%eax; movb %2, %%al; movb %%al, %0; 1:"
1099 - : "=m" (*dst), "=&a" (eax) : "m" (*usrc));
1103 -/* Writes BYTE to user address UDST.
1104 - UDST must be below PHYS_BASE.
1105 - Returns true if successful, false if a segfault occurred. */
1107 -put_user (uint8_t *udst, uint8_t byte)
1110 - asm ("movl $1f, %%eax; movb %b2, %0; 1:"
1111 - : "=m" (*udst), "=&a" (eax) : "q" (byte));
1115 /* Copies SIZE bytes from user address USRC to kernel address
1117 Call thread_exit() if any of the user accesses are invalid. */
1118 @@ -134,10 +106,22 @@ copy_in (void *dst_, const void *usrc_, size_t size)
1120 uint8_t *dst = dst_;
1121 const uint8_t *usrc = usrc_;
1123 - for (; size > 0; size--, dst++, usrc++)
1124 - if (usrc >= (uint8_t *) PHYS_BASE || !get_user (dst, usrc))
1129 + size_t chunk_size = PGSIZE - pg_ofs (usrc);
1130 + if (chunk_size > size)
1131 + chunk_size = size;
1133 + if (!page_lock (usrc, false))
1135 + memcpy (dst, usrc, chunk_size);
1136 + page_unlock (usrc);
1138 + dst += chunk_size;
1139 + usrc += chunk_size;
1140 + size -= chunk_size;
1144 /* Creates a copy of user string US in kernel memory
1145 @@ -149,25 +133,40 @@ static char *
1146 copy_in_string (const char *us)
1152 ks = palloc_get_page (0);
1156 - for (length = 0; length < PGSIZE; length++)
1161 - if (us >= (char *) PHYS_BASE || !get_user (ks + length, us++))
1162 + upage = pg_round_down (us);
1163 + if (!page_lock (upage, false))
1166 + for (; us < upage + PGSIZE; us++)
1168 - palloc_free_page (ks);
1170 + ks[length++] = *us;
1173 + page_unlock (upage);
1176 + else if (length >= PGSIZE)
1177 + goto too_long_error;
1180 - if (ks[length] == '\0')
1183 + page_unlock (upage);
1185 - ks[PGSIZE - 1] = '\0';
1189 + page_unlock (upage);
1191 + palloc_free_page (ks);
1195 /* Halt system call. */
1196 @@ -181,7 +180,7 @@ sys_halt (void)
1198 sys_exit (int exit_code)
1200 - thread_current ()->wait_status->exit_code = exit_code;
1201 + thread_current ()->exit_code = exit_code;
1205 @@ -192,7 +191,7 @@ sys_exec (const char *ufile)
1208 char *kfile = copy_in_string (ufile);
1211 lock_acquire (&fs_lock);
1212 tid = process_execute (kfile);
1213 lock_release (&fs_lock);
1214 @@ -215,11 +214,11 @@ sys_create (const char *ufile, unsigned initial_size)
1216 char *kfile = copy_in_string (ufile);
1220 lock_acquire (&fs_lock);
1221 ok = filesys_create (kfile, initial_size);
1222 lock_release (&fs_lock);
1225 palloc_free_page (kfile);
1228 @@ -231,16 +230,16 @@ sys_remove (const char *ufile)
1230 char *kfile = copy_in_string (ufile);
1234 lock_acquire (&fs_lock);
1235 ok = filesys_remove (kfile);
1236 lock_release (&fs_lock);
1239 palloc_free_page (kfile);
1245 /* A file descriptor, for binding a file handle to a file. */
1246 struct file_descriptor
1248 @@ -320,18 +319,7 @@ sys_read (int handle, void *udst_, unsigned size)
1249 struct file_descriptor *fd;
1252 - /* Handle keyboard reads. */
1253 - if (handle == STDIN_FILENO)
1255 - for (bytes_read = 0; (size_t) bytes_read < size; bytes_read++)
1256 - if (udst >= (uint8_t *) PHYS_BASE || !put_user (udst++, input_getc ()))
1258 - return bytes_read;
1261 - /* Handle all other reads. */
1262 fd = lookup_fd (handle);
1263 - lock_acquire (&fs_lock);
1266 /* How much to read into this page? */
1267 @@ -339,32 +327,49 @@ sys_read (int handle, void *udst_, unsigned size)
1268 size_t read_amt = size < page_left ? size : page_left;
1271 - /* Check that touching this page is okay. */
1272 - if (!verify_user (udst))
1273 + /* Read from file into page. */
1274 + if (handle != STDIN_FILENO)
1276 + if (!page_lock (udst, true))
1278 + lock_acquire (&fs_lock);
1279 + retval = file_read (fd->file, udst, read_amt);
1280 lock_release (&fs_lock);
1282 + page_unlock (udst);
1285 - /* Read from file into page. */
1286 - retval = file_read (fd->file, udst, read_amt);
1291 + for (i = 0; i < read_amt; i++)
1293 + char c = input_getc ();
1294 + if (!page_lock (udst, true))
1297 + page_unlock (udst);
1299 + bytes_read = read_amt;
1302 + /* Check success. */
1305 if (bytes_read == 0)
1309 - bytes_read += retval;
1311 - /* If it was a short read we're done. */
1312 - if (retval != (off_t) read_amt)
1314 + bytes_read += retval;
1315 + if (retval != (off_t) read_amt)
1317 + /* Short read, so we're done. */
1325 - lock_release (&fs_lock);
1329 @@ -381,7 +386,6 @@ sys_write (int handle, void *usrc_, unsigned size)
1330 if (handle != STDOUT_FILENO)
1331 fd = lookup_fd (handle);
1333 - lock_acquire (&fs_lock);
1336 /* How much bytes to write to this page? */
1337 @@ -389,21 +393,21 @@ sys_write (int handle, void *usrc_, unsigned size)
1338 size_t write_amt = size < page_left ? size : page_left;
1341 - /* Check that we can touch this user page. */
1342 - if (!verify_user (usrc))
1344 - lock_release (&fs_lock);
1348 - /* Do the write. */
1349 + /* Write from page into file. */
1350 + if (!page_lock (usrc, false))
1352 + lock_acquire (&fs_lock);
1353 if (handle == STDOUT_FILENO)
1355 - putbuf (usrc, write_amt);
1356 + putbuf ((char *) usrc, write_amt);
1360 retval = file_write (fd->file, usrc, write_amt);
1361 + lock_release (&fs_lock);
1362 + page_unlock (usrc);
1364 + /* Handle return value. */
1367 if (bytes_written == 0)
1368 @@ -420,7 +424,6 @@ sys_write (int handle, void *usrc_, unsigned size)
1372 - lock_release (&fs_lock);
1374 return bytes_written;
1376 @@ -435,7 +438,7 @@ sys_seek (int handle, unsigned position)
1377 if ((off_t) position >= 0)
1378 file_seek (fd->file, position);
1379 lock_release (&fs_lock);
1385 @@ -449,7 +452,7 @@ sys_tell (int handle)
1386 lock_acquire (&fs_lock);
1387 position = file_tell (fd->file);
1388 lock_release (&fs_lock);
1394 @@ -465,8 +468,110 @@ sys_close (int handle)
1399 +/* Binds a mapping id to a region of memory and a file. */
1402 + struct list_elem elem; /* List element. */
1403 + int handle; /* Mapping id. */
1404 + struct file *file; /* File. */
1405 + uint8_t *base; /* Start of memory mapping. */
1406 + size_t page_cnt; /* Number of pages mapped. */
1409 +/* Returns the file descriptor associated with the given handle.
1410 + Terminates the process if HANDLE is not associated with a
1411 + memory mapping. */
1412 +static struct mapping *
1413 +lookup_mapping (int handle)
1415 + struct thread *cur = thread_current ();
1416 + struct list_elem *e;
1418 + for (e = list_begin (&cur->mappings); e != list_end (&cur->mappings);
1419 + e = list_next (e))
1421 + struct mapping *m = list_entry (e, struct mapping, elem);
1422 + if (m->handle == handle)
1429 +/* Remove mapping M from the virtual address space,
1430 + writing back any pages that have changed. */
1432 +unmap (struct mapping *m)
1434 + list_remove (&m->elem);
1435 + while (m->page_cnt-- > 0)
1437 + page_deallocate (m->base);
1438 + m->base += PGSIZE;
1440 + file_close (m->file);
1444 -/* On thread exit, close all open files. */
1445 +/* Mmap system call. */
1447 +sys_mmap (int handle, void *addr)
1449 + struct file_descriptor *fd = lookup_fd (handle);
1450 + struct mapping *m = malloc (sizeof *m);
1454 + if (m == NULL || addr == NULL || pg_ofs (addr) != 0)
1457 + m->handle = thread_current ()->next_handle++;
1458 + lock_acquire (&fs_lock);
1459 + m->file = file_reopen (fd->file);
1460 + lock_release (&fs_lock);
1461 + if (m->file == NULL)
1468 + list_push_front (&thread_current ()->mappings, &m->elem);
1471 + lock_acquire (&fs_lock);
1472 + length = file_length (m->file);
1473 + lock_release (&fs_lock);
1474 + while (length > 0)
1476 + struct page *p = page_allocate ((uint8_t *) addr + offset, false);
1482 + p->private = false;
1483 + p->file = m->file;
1484 + p->file_offset = offset;
1485 + p->file_bytes = length >= PGSIZE ? PGSIZE : length;
1486 + offset += p->file_bytes;
1487 + length -= p->file_bytes;
1494 +/* Munmap system call. */
1496 +sys_munmap (int mapping)
1498 + unmap (lookup_mapping (mapping));
1502 +/* On thread exit, close all open files and unmap all mappings. */
1506 @@ -475,12 +580,19 @@ syscall_exit (void)
1508 for (e = list_begin (&cur->fds); e != list_end (&cur->fds); e = next)
1510 - struct file_descriptor *fd;
1511 - fd = list_entry (e, struct file_descriptor, elem);
1512 + struct file_descriptor *fd = list_entry (e, struct file_descriptor, elem);
1513 next = list_next (e);
1514 lock_acquire (&fs_lock);
1515 file_close (fd->file);
1516 lock_release (&fs_lock);
1520 + for (e = list_begin (&cur->mappings); e != list_end (&cur->mappings);
1523 + struct mapping *m = list_entry (e, struct mapping, elem);
1524 + next = list_next (e);
1528 diff --git a/src/vm/frame.c b/src/vm/frame.c
1529 new file mode 100644
1530 index 0000000..ef55376
1532 +++ b/src/vm/frame.c
1534 +#include "vm/frame.h"
1536 +#include "vm/page.h"
1537 +#include "devices/timer.h"
1538 +#include "threads/init.h"
1539 +#include "threads/malloc.h"
1540 +#include "threads/palloc.h"
1541 +#include "threads/synch.h"
1542 +#include "threads/vaddr.h"
1544 +static struct frame *frames;
1545 +static size_t frame_cnt;
1547 +static struct lock scan_lock;
1548 +static size_t hand;
1550 +/* Initialize the frame manager. */
1556 + lock_init (&scan_lock);
1558 + frames = malloc (sizeof *frames * init_ram_pages);
1559 + if (frames == NULL)
1560 + PANIC ("out of memory allocating page frames");
1562 + while ((base = palloc_get_page (PAL_USER)) != NULL)
1564 + struct frame *f = &frames[frame_cnt++];
1565 + lock_init (&f->lock);
1571 +/* Tries to allocate and lock a frame for PAGE.
1572 + Returns the frame if successful, false on failure. */
1573 +static struct frame *
1574 +try_frame_alloc_and_lock (struct page *page)
1578 + lock_acquire (&scan_lock);
1580 + /* Find a free frame. */
1581 + for (i = 0; i < frame_cnt; i++)
1583 + struct frame *f = &frames[i];
1584 + if (!lock_try_acquire (&f->lock))
1586 + if (f->page == NULL)
1589 + lock_release (&scan_lock);
1592 + lock_release (&f->lock);
1595 + /* No free frame. Find a frame to evict. */
1596 + for (i = 0; i < frame_cnt * 2; i++)
1598 + /* Get a frame. */
1599 + struct frame *f = &frames[hand];
1600 + if (++hand >= frame_cnt)
1603 + if (!lock_try_acquire (&f->lock))
1606 + if (f->page == NULL)
1609 + lock_release (&scan_lock);
1613 + if (page_accessed_recently (f->page))
1615 + lock_release (&f->lock);
1619 + lock_release (&scan_lock);
1621 + /* Evict this frame. */
1622 + if (!page_out (f->page))
1624 + lock_release (&f->lock);
1632 + lock_release (&scan_lock);
1637 +/* Tries really hard to allocate and lock a frame for PAGE.
1638 + Returns the frame if successful, false on failure. */
1640 +frame_alloc_and_lock (struct page *page)
1644 + for (try = 0; try < 3; try++)
1646 + struct frame *f = try_frame_alloc_and_lock (page);
1649 + ASSERT (lock_held_by_current_thread (&f->lock));
1652 + timer_msleep (1000);
1658 +/* Locks P's frame into memory, if it has one.
1659 + Upon return, p->frame will not change until P is unlocked. */
1661 +frame_lock (struct page *p)
1663 + /* A frame can be asynchronously removed, but never inserted. */
1664 + struct frame *f = p->frame;
1667 + lock_acquire (&f->lock);
1668 + if (f != p->frame)
1670 + lock_release (&f->lock);
1671 + ASSERT (p->frame == NULL);
1676 +/* Releases frame F for use by another page.
1677 + F must be locked for use by the current process.
1678 + Any data in F is lost. */
1680 +frame_free (struct frame *f)
1682 + ASSERT (lock_held_by_current_thread (&f->lock));
1685 + lock_release (&f->lock);
1688 +/* Unlocks frame F, allowing it to be evicted.
1689 + F must be locked for use by the current process. */
1691 +frame_unlock (struct frame *f)
1693 + ASSERT (lock_held_by_current_thread (&f->lock));
1694 + lock_release (&f->lock);
1696 diff --git a/src/vm/frame.h b/src/vm/frame.h
1697 new file mode 100644
1698 index 0000000..496f623
1700 +++ b/src/vm/frame.h
1705 +#include <stdbool.h>
1706 +#include "threads/synch.h"
1708 +/* A physical frame. */
1711 + struct lock lock; /* Prevent simultaneous access. */
1712 + void *base; /* Kernel virtual base address. */
1713 + struct page *page; /* Mapped process page, if any. */
1716 +void frame_init (void);
1718 +struct frame *frame_alloc_and_lock (struct page *);
1719 +void frame_lock (struct page *);
1721 +void frame_free (struct frame *);
1722 +void frame_unlock (struct frame *);
1724 +#endif /* vm/frame.h */
1725 diff --git a/src/vm/page.c b/src/vm/page.c
1726 new file mode 100644
1727 index 0000000..f08bcf8
1731 +#include "vm/page.h"
1733 +#include <string.h>
1734 +#include "vm/frame.h"
1735 +#include "vm/swap.h"
1736 +#include "filesys/file.h"
1737 +#include "threads/malloc.h"
1738 +#include "threads/thread.h"
1739 +#include "userprog/pagedir.h"
1740 +#include "threads/vaddr.h"
1742 +/* Maximum size of process stack, in bytes. */
1743 +#define STACK_MAX (1024 * 1024)
1745 +/* Destroys a page, which must be in the current process's
1746 + page table. Used as a callback for hash_destroy(). */
1748 +destroy_page (struct hash_elem *p_, void *aux UNUSED)
1750 + struct page *p = hash_entry (p_, struct page, hash_elem);
1753 + frame_free (p->frame);
1757 +/* Destroys the current process's page table. */
1761 + struct hash *h = thread_current ()->pages;
1763 + hash_destroy (h, destroy_page);
1766 +/* Returns the page containing the given virtual ADDRESS,
1767 + or a null pointer if no such page exists.
1768 + Allocates stack pages as necessary. */
1769 +static struct page *
1770 +page_for_addr (const void *address)
1772 + if (address < PHYS_BASE)
1775 + struct hash_elem *e;
1777 + /* Find existing page. */
1778 + p.addr = (void *) pg_round_down (address);
1779 + e = hash_find (thread_current ()->pages, &p.hash_elem);
1781 + return hash_entry (e, struct page, hash_elem);
1783 + /* No page. Expand stack? */
1784 + if (address >= PHYS_BASE - STACK_MAX
1785 + && address >= thread_current ()->user_esp - 32)
1786 + return page_allocate ((void *) address, false);
1791 +/* Locks a frame for page P and pages it in.
1792 + Returns true if successful, false on failure. */
1794 +do_page_in (struct page *p)
1796 + /* Get a frame for the page. */
1797 + p->frame = frame_alloc_and_lock (p);
1798 + if (p->frame == NULL)
1801 + /* Copy data into the frame. */
1802 + if (p->sector != (block_sector_t) -1)
1804 + /* Get data from swap. */
1807 + else if (p->file != NULL)
1809 + /* Get data from file. */
1810 + off_t read_bytes = file_read_at (p->file, p->frame->base,
1811 + p->file_bytes, p->file_offset);
1812 + off_t zero_bytes = PGSIZE - read_bytes;
1813 + memset (p->frame->base + read_bytes, 0, zero_bytes);
1814 + if (read_bytes != p->file_bytes)
1815 + printf ("bytes read (%"PROTd") != bytes requested (%"PROTd")\n",
1816 + read_bytes, p->file_bytes);
1820 + /* Provide all-zero page. */
1821 + memset (p->frame->base, 0, PGSIZE);
1827 +/* Faults in the page containing FAULT_ADDR.
1828 + Returns true if successful, false on failure. */
1830 +page_in (void *fault_addr)
1835 + /* Can't handle page faults without a hash table. */
1836 + if (thread_current ()->pages == NULL)
1839 + p = page_for_addr (fault_addr);
1844 + if (p->frame == NULL)
1846 + if (!do_page_in (p))
1849 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
1851 + /* Install frame into page table. */
1852 + success = pagedir_set_page (thread_current ()->pagedir, p->addr,
1853 + p->frame->base, !p->read_only);
1855 + /* Release frame. */
1856 + frame_unlock (p->frame);
1862 + P must have a locked frame.
1863 + Return true if successful, false on failure. */
1865 +page_out (struct page *p)
1870 + ASSERT (p->frame != NULL);
1871 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
1873 + /* Mark page not present in page table, forcing accesses by the
1874 + process to fault. This must happen before checking the
1875 + dirty bit, to prevent a race with the process dirtying the
1877 + pagedir_clear_page (p->thread->pagedir, p->addr);
1879 + /* Has the frame been modified? */
1880 + dirty = pagedir_is_dirty (p->thread->pagedir, p->addr);
1882 + /* Write frame contents to disk if necessary. */
1883 + if (p->file != NULL)
1888 + ok = swap_out (p);
1890 + ok = file_write_at (p->file, p->frame->base, p->file_bytes,
1891 + p->file_offset) == p->file_bytes;
1897 + ok = swap_out (p);
1900 + //memset (p->frame->base, 0xcc, PGSIZE);
1906 +/* Returns true if page P's data has been accessed recently,
1908 + P must have a frame locked into memory. */
1910 +page_accessed_recently (struct page *p)
1912 + bool was_accessed;
1914 + ASSERT (p->frame != NULL);
1915 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
1917 + was_accessed = pagedir_is_accessed (p->thread->pagedir, p->addr);
1919 + pagedir_set_accessed (p->thread->pagedir, p->addr, false);
1920 + return was_accessed;
1923 +/* Adds a mapping for user virtual address VADDR to the page hash
1924 + table. Fails if VADDR is already mapped or if memory
1925 + allocation fails. */
1927 +page_allocate (void *vaddr, bool read_only)
1929 + struct thread *t = thread_current ();
1930 + struct page *p = malloc (sizeof *p);
1933 + p->addr = pg_round_down (vaddr);
1935 + p->read_only = read_only;
1936 + p->private = !read_only;
1940 + p->sector = (block_sector_t) -1;
1943 + p->file_offset = 0;
1944 + p->file_bytes = 0;
1946 + p->thread = thread_current ();
1948 + if (hash_insert (t->pages, &p->hash_elem) != NULL)
1950 + /* Already mapped. */
1958 +/* Evicts the page containing address VADDR
1959 + and removes it from the page table. */
1961 +page_deallocate (void *vaddr)
1963 + struct page *p = page_for_addr (vaddr);
1964 + ASSERT (p != NULL);
1968 + struct frame *f = p->frame;
1969 + if (p->file && !p->private)
1973 + hash_delete (thread_current ()->pages, &p->hash_elem);
1977 +/* Returns a hash value for the page that E refers to. */
1979 +page_hash (const struct hash_elem *e, void *aux UNUSED)
1981 + const struct page *p = hash_entry (e, struct page, hash_elem);
1982 + return ((uintptr_t) p->addr) >> PGBITS;
1985 +/* Returns true if page A precedes page B. */
1987 +page_less (const struct hash_elem *a_, const struct hash_elem *b_,
1990 + const struct page *a = hash_entry (a_, struct page, hash_elem);
1991 + const struct page *b = hash_entry (b_, struct page, hash_elem);
1993 + return a->addr < b->addr;
1996 +/* Tries to lock the page containing ADDR into physical memory.
1997 + If WILL_WRITE is true, the page must be writeable;
1998 + otherwise it may be read-only.
1999 + Returns true if successful, false on failure. */
2001 +page_lock (const void *addr, bool will_write)
2003 + struct page *p = page_for_addr (addr);
2004 + if (p == NULL || (p->read_only && will_write))
2008 + if (p->frame == NULL)
2009 + return (do_page_in (p)
2010 + && pagedir_set_page (thread_current ()->pagedir, p->addr,
2011 + p->frame->base, !p->read_only));
2016 +/* Unlocks a page locked with page_lock(). */
2018 +page_unlock (const void *addr)
2020 + struct page *p = page_for_addr (addr);
2021 + ASSERT (p != NULL);
2022 + frame_unlock (p->frame);
2024 diff --git a/src/vm/page.h b/src/vm/page.h
2025 new file mode 100644
2026 index 0000000..b71b9da
2034 +#include "devices/block.h"
2035 +#include "filesys/off_t.h"
2036 +#include "threads/synch.h"
2038 +/* Virtual page. */
2041 + /* Immutable members. */
2042 + void *addr; /* User virtual address. */
2043 + bool read_only; /* Read-only page? */
2044 + struct thread *thread; /* Owning thread. */
2046 + /* Accessed only in owning process context. */
2047 + struct hash_elem hash_elem; /* struct thread `pages' hash element. */
2049 + /* Set only in owning process context with frame->frame_lock held.
2050 + Cleared only with scan_lock and frame->frame_lock held. */
2051 + struct frame *frame; /* Page frame. */
2053 + /* Swap information, protected by frame->frame_lock. */
2054 + block_sector_t sector; /* Starting sector of swap area, or -1. */
2056 + /* Memory-mapped file information, protected by frame->frame_lock. */
2057 + bool private; /* False to write back to file,
2058 + true to write back to swap. */
2059 + struct file *file; /* File. */
2060 + off_t file_offset; /* Offset in file. */
2061 + off_t file_bytes; /* Bytes to read/write, 1...PGSIZE. */
2064 +void page_exit (void);
2066 +struct page *page_allocate (void *, bool read_only);
2067 +void page_deallocate (void *vaddr);
2069 +bool page_in (void *fault_addr);
2070 +bool page_out (struct page *);
2071 +bool page_accessed_recently (struct page *);
2073 +bool page_lock (const void *, bool will_write);
2074 +void page_unlock (const void *);
2076 +hash_hash_func page_hash;
2077 +hash_less_func page_less;
2079 +#endif /* vm/page.h */
2080 diff --git a/src/vm/swap.c b/src/vm/swap.c
2081 new file mode 100644
2082 index 0000000..76fcf71
2086 +#include "vm/swap.h"
2087 +#include <bitmap.h>
2090 +#include "vm/frame.h"
2091 +#include "vm/page.h"
2092 +#include "threads/synch.h"
2093 +#include "threads/vaddr.h"
2095 +/* The swap device. */
2096 +static struct block *swap_device;
2098 +/* Used swap pages. */
2099 +static struct bitmap *swap_bitmap;
2101 +/* Protects swap_bitmap. */
2102 +static struct lock swap_lock;
2104 +/* Number of sectors per page. */
2105 +#define PAGE_SECTORS (PGSIZE / BLOCK_SECTOR_SIZE)
2107 +/* Sets up swap. */
2111 + swap_device = block_get_role (BLOCK_SWAP);
2112 + if (swap_device == NULL)
2114 + printf ("no swap device--swap disabled\n");
2115 + swap_bitmap = bitmap_create (0);
2118 + swap_bitmap = bitmap_create (block_size (swap_device)
2120 + if (swap_bitmap == NULL)
2121 + PANIC ("couldn't create swap bitmap");
2122 + lock_init (&swap_lock);
2125 +/* Swaps in page P, which must have a locked frame
2126 + (and be swapped out). */
2128 +swap_in (struct page *p)
2132 + ASSERT (p->frame != NULL);
2133 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
2134 + ASSERT (p->sector != (block_sector_t) -1);
2136 + for (i = 0; i < PAGE_SECTORS; i++)
2137 + block_read (swap_device, p->sector + i,
2138 + p->frame->base + i * BLOCK_SECTOR_SIZE);
2139 + bitmap_reset (swap_bitmap, p->sector / PAGE_SECTORS);
2140 + p->sector = (block_sector_t) -1;
2143 +/* Swaps out page P, which must have a locked frame. */
2145 +swap_out (struct page *p)
2150 + ASSERT (p->frame != NULL);
2151 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
2153 + lock_acquire (&swap_lock);
2154 + slot = bitmap_scan_and_flip (swap_bitmap, 0, 1, false);
2155 + lock_release (&swap_lock);
2156 + if (slot == BITMAP_ERROR)
2159 + p->sector = slot * PAGE_SECTORS;
2160 + for (i = 0; i < PAGE_SECTORS; i++)
2161 + block_write (swap_device, p->sector + i,
2162 + p->frame->base + i * BLOCK_SECTOR_SIZE);
2164 + p->private = false;
2166 + p->file_offset = 0;
2167 + p->file_bytes = 0;
2171 diff --git a/src/vm/swap.h b/src/vm/swap.h
2172 new file mode 100644
2173 index 0000000..34d5d51
2178 +#define VM_SWAP_H 1
2180 +#include <stdbool.h>
2183 +void swap_init (void);
2184 +void swap_in (struct page *);
2185 +bool swap_out (struct page *);
2187 +#endif /* vm/swap.h */