1 diff -u src/Makefile.build~ src/Makefile.build
2 --- src/Makefile.build~ 2005-06-16 21:50:20.000000000 -0700
3 +++ src/Makefile.build 2005-06-16 15:09:31.000000000 -0700
4 @@ -53,7 +53,9 @@ userprog_SRC += userprog/gdt.c # GDT in
5 userprog_SRC += userprog/tss.c # TSS management.
7 # No virtual memory code yet.
8 -#vm_SRC = vm/filename.c # Some file.
14 filesys_SRC = filesys/filesys.c # Filesystem core.
15 @@ -62,6 +64,7 @@ filesys_SRC += filesys/file.c # Files.
16 filesys_SRC += filesys/directory.c # Directories.
17 filesys_SRC += filesys/inode.c # File headers.
18 filesys_SRC += filesys/fsutil.c # Utilities.
19 +filesys_SRC += filesys/cache.c # Cache.
21 SOURCES = $(foreach dir,$(KERNEL_SUBDIRS),$($(dir)_SRC))
22 OBJECTS = $(patsubst %.c,%.o,$(patsubst %.S,%.o,$(SOURCES)))
23 diff -u src/devices/timer.c~ src/devices/timer.c
24 --- src/devices/timer.c~ 2005-06-15 15:21:01.000000000 -0700
25 +++ src/devices/timer.c 2005-06-16 15:09:31.000000000 -0700
26 @@ -23,6 +23,9 @@ static volatile int64_t ticks;
27 Initialized by timer_calibrate(). */
28 static unsigned loops_per_tick;
30 +/* Threads waiting in timer_sleep(). */
31 +static struct list wait_list;
33 static intr_handler_func timer_interrupt;
34 static bool too_many_loops (unsigned loops);
35 static void busy_wait (int64_t loops);
36 @@ -43,6 +46,8 @@ timer_init (void)
37 outb (0x40, count >> 8);
39 intr_register_ext (0x20, timer_interrupt, "8254 Timer");
41 + list_init (&wait_list);
44 /* Calibrates loops_per_tick, used to implement brief delays. */
45 @@ -87,15 +92,36 @@ timer_elapsed (int64_t then)
46 return timer_ticks () - then;
49 +/* Compares two threads based on their wake-up times. */
51 +compare_threads_by_wakeup_time (const struct list_elem *a_,
52 + const struct list_elem *b_,
55 + const struct thread *a = list_entry (a_, struct thread, timer_elem);
56 + const struct thread *b = list_entry (b_, struct thread, timer_elem);
58 + return a->wakeup_time < b->wakeup_time;
61 /* Suspends execution for approximately TICKS timer ticks. */
63 timer_sleep (int64_t ticks)
65 - int64_t start = timer_ticks ();
66 + struct thread *t = thread_current ();
68 + /* Schedule our wake-up time. */
69 + t->wakeup_time = timer_ticks () + ticks;
71 + /* Atomically insert the current thread into the wait list. */
72 ASSERT (intr_get_level () == INTR_ON);
73 - while (timer_elapsed (start) < ticks)
76 + list_insert_ordered (&wait_list, &t->timer_elem,
77 + compare_threads_by_wakeup_time, NULL);
81 + sema_down (&t->timer_sema);
84 /* Suspends execution for approximately MS milliseconds. */
85 @@ -132,6 +158,16 @@ timer_interrupt (struct intr_frame *args
90 + while (!list_empty (&wait_list))
92 + struct thread *t = list_entry (list_front (&wait_list),
93 + struct thread, timer_elem);
94 + if (ticks < t->wakeup_time)
96 + sema_up (&t->timer_sema);
97 + list_pop_front (&wait_list);
101 /* Returns true if LOOPS iterations waits for more than one timer
102 diff -u src/filesys/Make.vars~ src/filesys/Make.vars
103 --- src/filesys/Make.vars~ 2005-05-24 14:46:45.000000000 -0700
104 +++ src/filesys/Make.vars 2005-06-16 15:09:31.000000000 -0700
105 @@ -6,7 +6,7 @@ KERNEL_SUBDIRS = threads devices lib lib
106 GRADING_FILE = $(SRCDIR)/tests/filesys/Grading.no-vm
108 # Uncomment the lines below to enable VM.
109 -#os.dsk: DEFINES += -DVM
110 -#KERNEL_SUBDIRS += vm
111 -#TEST_SUBDIRS += tests/vm
112 -#GRADING_FILE = $(SRCDIR)/tests/filesys/Grading.with-vm
113 +os.dsk: DEFINES += -DVM
114 +KERNEL_SUBDIRS += vm
115 +TEST_SUBDIRS += tests/vm
116 +GRADING_FILE = $(SRCDIR)/tests/filesys/Grading.with-vm
117 diff -u src/filesys/cache.c~ src/filesys/cache.c
118 --- src/filesys/cache.c~ 1969-12-31 16:00:00.000000000 -0800
119 +++ src/filesys/cache.c 2005-06-16 15:09:31.000000000 -0700
121 +#include "filesys/cache.h"
124 +#include "filesys/filesys.h"
125 +#include "devices/disk.h"
126 +#include "devices/timer.h"
127 +#include "threads/malloc.h"
128 +#include "threads/synch.h"
129 +#include "threads/thread.h"
131 +#define INVALID_SECTOR ((disk_sector_t) -1)
133 +/* A cached block. */
136 + /* Locking to prevent eviction. */
137 + struct lock block_lock; /* Protects fields in group. */
138 + struct condition no_readers_or_writers; /* readers == 0 && writers == 0 */
139 + struct condition no_writers; /* writers == 0 */
140 + int readers, read_waiters; /* # of readers, # waiting to read. */
141 + int writers, write_waiters; /* # of writers (<= 1), # waiting to write. */
143 + /* Sector number. INVALID_SECTOR indicates a free cache block.
145 + Changing from free to allocated requires cache_sync.
147 + Changing from allocated to free requires block_lock, block
148 + must be up-to-date and not dirty, and no one may be
150 + disk_sector_t sector;
152 + /* Is data[] correct?
153 + Requires write lock or data_lock. */
156 + /* Does data[] need to be written back to disk?
157 + Valid only when up-to-date.
158 + Requires read lock or write lock or data_lock. */
162 + Access to data[] requires up-to-date and read or write lock.
163 + Bringing up-to-date requires write lock or data_lock. */
164 + struct lock data_lock; /* Protects fields in group. */
165 + uint8_t data[DISK_SECTOR_SIZE]; /* Disk data. */
169 +#define CACHE_CNT 64
170 +struct cache_block cache[CACHE_CNT];
174 + Required to allocate a cache block to a sector, to prevent a
175 + single sector being allocated two different cache blocks.
177 + Required to search the cache for a sector, to prevent the
178 + sector from being added while the search is ongoing.
181 +struct lock cache_sync;
183 +/* Cache eviction hand.
184 + Protected by cache_sync. */
185 +static int hand = 0;
187 +static void flushd_init (void);
188 +static void readaheadd_init (void);
189 +static void readaheadd_submit (disk_sector_t sector);
191 +/* Initializes cache. */
197 + lock_init (&cache_sync);
198 + for (i = 0; i < CACHE_CNT; i++)
200 + struct cache_block *b = &cache[i];
201 + lock_init (&b->block_lock);
202 + cond_init (&b->no_readers_or_writers);
203 + cond_init (&b->no_writers);
204 + b->readers = b->read_waiters = 0;
205 + b->writers = b->write_waiters = 0;
206 + b->sector = INVALID_SECTOR;
207 + lock_init (&b->data_lock);
211 + readaheadd_init ();
214 +/* Flushes cache to disk. */
220 + for (i = 0; i < CACHE_CNT; i++)
222 + struct cache_block *b = &cache[i];
223 + disk_sector_t sector;
225 + lock_acquire (&b->block_lock);
226 + sector = b->sector;
227 + lock_release (&b->block_lock);
229 + if (sector == INVALID_SECTOR)
232 + b = cache_lock (sector, EXCLUSIVE);
233 + if (b->up_to_date && b->dirty)
235 + disk_write (filesys_disk, b->sector, b->data);
242 +/* Locks the given SECTOR into the cache and returns the cache
244 + If TYPE is EXCLUSIVE, then the block returned will be locked
245 + only by the caller. The calling thread must not already
246 + have any lock on the block.
247 + If TYPE is NON_EXCLUSIVE, then block returned may be locked by
248 + any number of other callers. The calling thread may already
249 + have any number of non-exclusive locks on the block. */
250 +struct cache_block *
251 +cache_lock (disk_sector_t sector, enum lock_type type)
256 + lock_acquire (&cache_sync);
258 + /* Is the block already in-cache? */
259 + for (i = 0; i < CACHE_CNT; i++)
261 + /* Skip any blocks that don't hold SECTOR. */
262 + struct cache_block *b = &cache[i];
263 + lock_acquire (&b->block_lock);
264 + if (b->sector != sector)
266 + lock_release (&b->block_lock);
269 + lock_release (&cache_sync);
271 + /* Get read or write lock. */
272 + if (type == NON_EXCLUSIVE)
274 + /* Lock for read. */
276 + if (b->writers || b->write_waiters)
278 + cond_wait (&b->no_writers, &b->block_lock);
279 + } while (b->writers);
285 + /* Lock for write. */
286 + b->write_waiters++;
287 + if (b->readers || b->read_waiters || b->writers)
289 + cond_wait (&b->no_readers_or_writers, &b->block_lock);
290 + } while (b->readers || b->writers);
292 + b->write_waiters--;
294 + lock_release (&b->block_lock);
296 + /* Our sector should have been pinned in the cache while we
297 + were waiting. Make sure. */
298 + ASSERT (b->sector == sector);
303 + /* Not in cache. Find empty slot.
304 + We hold cache_sync. */
305 + for (i = 0; i < CACHE_CNT; i++)
307 + struct cache_block *b = &cache[i];
308 + lock_acquire (&b->block_lock);
309 + if (b->sector == INVALID_SECTOR)
311 + /* Drop block_lock, which is no longer needed because
312 + this is the only code that allocates free blocks,
313 + and we still have cache_sync.
315 + We can't drop cache_sync yet because someone else
316 + might try to allocate this same block (or read from
317 + it) while we're still initializing the block. */
318 + lock_release (&b->block_lock);
320 + b->sector = sector;
321 + b->up_to_date = false;
322 + ASSERT (b->readers == 0);
323 + ASSERT (b->writers == 0);
324 + if (type == NON_EXCLUSIVE)
328 + lock_release (&cache_sync);
331 + lock_release (&b->block_lock);
334 + /* No empty slots. Evict something.
335 + We hold cache_sync. */
336 + for (i = 0; i < CACHE_CNT; i++)
338 + struct cache_block *b = &cache[hand];
339 + if (++hand >= CACHE_CNT)
342 + /* Try to grab exclusive write access to block. */
343 + lock_acquire (&b->block_lock);
344 + if (b->readers || b->writers || b->read_waiters || b->write_waiters)
346 + lock_release (&b->block_lock);
350 + lock_release (&b->block_lock);
352 + lock_release (&cache_sync);
354 + /* Write block to disk if dirty. */
355 + if (b->up_to_date && b->dirty)
357 + disk_write (filesys_disk, b->sector, b->data);
361 + /* Remove block from cache, if possible: someone might have
362 + started waiting on it while the lock was released. */
363 + lock_acquire (&b->block_lock);
365 + if (!b->read_waiters && !b->write_waiters)
367 + /* No one is waiting for it, so we can free it. */
368 + b->sector = INVALID_SECTOR;
372 + /* There is a waiter. Give it the block. */
373 + if (b->read_waiters)
374 + cond_broadcast (&b->no_writers, &b->block_lock);
376 + cond_signal (&b->no_readers_or_writers, &b->block_lock);
378 + lock_release (&b->block_lock);
384 + /* Wait for cache contention to die down. */
385 + lock_release (&cache_sync);
386 + timer_sleep (1000);
390 +/* Bring block B up-to-date, by reading it from disk if
391 + necessary, and return a pointer to its data.
392 + The caller must have an exclusive or non-exclusive lock on
395 +cache_read (struct cache_block *b)
397 + lock_acquire (&b->data_lock);
398 + if (!b->up_to_date)
400 + disk_read (filesys_disk, b->sector, b->data);
401 + b->up_to_date = true;
404 + lock_release (&b->data_lock);
409 +/* Zero out block B, without reading it from disk, and return a
410 + pointer to the zeroed data.
411 + The caller must have an exclusive lock on B. */
413 +cache_zero (struct cache_block *b)
415 + ASSERT (b->writers);
416 + memset (b->data, 0, DISK_SECTOR_SIZE);
417 + b->up_to_date = true;
423 +/* Marks block B as dirty, so that it will be written back to
424 + disk before eviction.
425 + The caller must have a read or write lock on B,
426 + and B must be up-to-date. */
428 +cache_dirty (struct cache_block *b)
430 + ASSERT (b->up_to_date);
435 + If B is no longer locked by any thread, then it becomes a
436 + candidate for immediate eviction. */
438 +cache_unlock (struct cache_block *b)
440 + lock_acquire (&b->block_lock);
443 + ASSERT (b->writers == 0);
444 + if (--b->readers == 0)
445 + cond_signal (&b->no_readers_or_writers, &b->block_lock);
447 + else if (b->writers)
449 + ASSERT (b->readers == 0);
450 + ASSERT (b->writers == 1);
452 + if (b->read_waiters)
453 + cond_broadcast (&b->no_writers, &b->block_lock);
455 + cond_signal (&b->no_readers_or_writers, &b->block_lock);
459 + lock_release (&b->block_lock);
462 +/* If SECTOR is in the cache, evicts it immediately without
463 + writing it back to disk (even if dirty).
464 + The block must be entirely unused. */
466 +cache_free (disk_sector_t sector)
470 + lock_acquire (&cache_sync);
471 + for (i = 0; i < CACHE_CNT; i++)
473 + struct cache_block *b = &cache[i];
475 + lock_acquire (&b->block_lock);
476 + if (b->sector == sector)
478 + lock_release (&cache_sync);
480 + /* Only invalidate the block if it's unused. That
481 + should be the normal case, but it could be part of a
482 + read-ahead (in readaheadd()) or write-behind (in
484 + if (b->readers == 0 && b->read_waiters == 0
485 + && b->writers == 0 && b->write_waiters == 0)
486 + b->sector = INVALID_SECTOR;
488 + lock_release (&b->block_lock);
491 + lock_release (&b->block_lock);
493 + lock_release (&cache_sync);
497 +cache_readahead (disk_sector_t sector)
499 + readaheadd_submit (sector);
504 +static void flushd (void *aux);
506 +/* Initializes flush daemon. */
510 + thread_create ("flushd", PRI_MIN, flushd, NULL);
513 +/* Flush daemon thread. */
515 +flushd (void *aux UNUSED)
519 + timer_msleep (30 * 1000);
524 +/* A block to read ahead. */
525 +struct readahead_block
527 + struct list_elem list_elem; /* readahead_list element. */
528 + disk_sector_t sector; /* Sector to read. */
531 +/* Protects readahead_list.
532 + Monitor lock for readahead_list_nonempty. */
533 +static struct lock readahead_lock;
535 +/* Signaled when a block is added to readahead_list. */
536 +static struct condition readahead_list_nonempty;
538 +/* List of blocks for read-ahead. */
539 +static struct list readahead_list;
541 +static void readaheadd (void *aux);
543 +/* Initialize read-ahead daemon. */
545 +readaheadd_init (void)
547 + lock_init (&readahead_lock);
548 + cond_init (&readahead_list_nonempty);
549 + list_init (&readahead_list);
550 + thread_create ("readaheadd", PRI_MIN, readaheadd, NULL);
553 +/* Adds SECTOR to the read-ahead queue. */
555 +readaheadd_submit (disk_sector_t sector)
557 + /* Allocate readahead block. */
558 + struct readahead_block *block = malloc (sizeof *block);
561 + block->sector = sector;
563 + /* Add block to list. */
564 + lock_acquire (&readahead_lock);
565 + list_push_back (&readahead_list, &block->list_elem);
566 + cond_signal (&readahead_list_nonempty, &readahead_lock);
567 + lock_release (&readahead_lock);
570 +/* Read-ahead daemon. */
572 +readaheadd (void *aux UNUSED)
576 + struct readahead_block *ra_block;
577 + struct cache_block *cache_block;
579 + /* Get readahead block from list. */
580 + lock_acquire (&readahead_lock);
581 + while (list_empty (&readahead_list))
582 + cond_wait (&readahead_list_nonempty, &readahead_lock);
583 + ra_block = list_entry (list_pop_front (&readahead_list),
584 + struct readahead_block, list_elem);
585 + lock_release (&readahead_lock);
587 + /* Read block into cache. */
588 + cache_block = cache_lock (ra_block->sector, NON_EXCLUSIVE);
589 + cache_read (cache_block);
590 + cache_unlock (cache_block);
594 diff -u src/filesys/cache.h~ src/filesys/cache.h
595 --- src/filesys/cache.h~ 1969-12-31 16:00:00.000000000 -0800
596 +++ src/filesys/cache.h 2005-06-16 15:09:31.000000000 -0700
598 +#ifndef FILESYS_CACHE_H
599 +#define FILESYS_CACHE_H
601 +#include "devices/disk.h"
603 +/* Type of block lock. */
606 + NON_EXCLUSIVE, /* Any number of lockers. */
607 + EXCLUSIVE /* Only one locker. */
610 +void cache_init (void);
611 +void cache_flush (void);
612 +struct cache_block *cache_lock (disk_sector_t, enum lock_type);
613 +void *cache_read (struct cache_block *);
614 +void *cache_zero (struct cache_block *);
615 +void cache_dirty (struct cache_block *);
616 +void cache_unlock (struct cache_block *);
617 +void cache_free (disk_sector_t);
618 +void cache_readahead (disk_sector_t);
620 +#endif /* filesys/cache.h */
621 diff -u src/filesys/directory.c~ src/filesys/directory.c
622 --- src/filesys/directory.c~ 2005-06-16 21:50:20.000000000 -0700
623 +++ src/filesys/directory.c 2005-06-16 21:09:01.000000000 -0700
627 #include "filesys/filesys.h"
628 +#include "filesys/fsutil.h"
629 #include "filesys/inode.h"
630 #include "threads/malloc.h"
631 +#include "threads/synch.h"
636 + struct list_elem list_elem; /* open_dirs list element. */
637 + struct lock dir_lock; /* Protects inode. */
638 + int open_cnt; /* Number of openers. */
639 struct inode *inode; /* Backing store. */
642 @@ -20,15 +25,21 @@ struct dir_entry
643 bool in_use; /* In use or free? */
646 -/* Creates a directory with space for ENTRY_CNT entries in the
647 - given SECTOR. Returns true if successful, false on failure. */
649 -dir_create (disk_sector_t sector, size_t entry_cnt)
650 +/* List of all open directories. */
651 +static struct list open_dirs;
653 +/* Protects open_dirs additions or removals. */
654 +static struct lock open_dirs_lock;
656 +/* Initializes directory modules. */
660 - return inode_create (sector, entry_cnt * sizeof (struct dir_entry));
661 + list_init (&open_dirs);
662 + lock_init (&open_dirs_lock);
665 -/* Opens the directory in the given INODE, of which it takes
666 +/* Opens the directory for the given INODE, of which it takes
667 ownership, and sets *DIRP to the new directory or a null
668 pointer on failure. Return true if successful, false on
670 @@ -36,19 +47,46 @@ bool
671 dir_open (struct inode *inode, struct dir **dirp)
673 struct dir *dir = NULL;
674 + struct list_elem *e;
676 ASSERT (dirp != NULL);
679 + lock_acquire (&open_dirs_lock);
684 + /* Inode must refer to directory. */
685 + if (inode_get_type (inode) != DIR_INODE)
688 + /* Check whether this directory is already open. */
689 + for (e = list_begin (&open_dirs); e != list_end (&open_dirs);
692 - dir = malloc (sizeof *dir);
694 - dir->inode = inode;
695 + dir = list_entry (e, struct dir, list_elem);
696 + if (dir->inode == inode)
703 + /* Create new directory. */
704 + dir = calloc (1, sizeof *dir);
707 + list_push_front (&open_dirs, &dir->list_elem);
708 + lock_init (&dir->dir_lock);
710 + dir->inode = inode;
711 + inode_reopen (dir->inode);
717 - inode_close (inode);
718 + inode_close (inode);
719 + lock_release (&open_dirs_lock);
723 @@ -61,22 +99,34 @@ dir_open_root (struct dir **dirp)
724 return dir_open (inode_open (ROOT_DIR_SECTOR), dirp);
727 +/* Re-opens DIR and returns true. */
729 +dir_reopen (struct dir *dir)
735 /* Destroys DIR and frees associated resources. */
737 dir_close (struct dir *dir)
743 + lock_acquire (&open_dirs_lock);
744 + if (--dir->open_cnt == 0)
746 + list_remove (&dir->list_elem);
747 inode_close (dir->inode);
750 + lock_release (&open_dirs_lock);
753 /* Searches DIR for a file with the given NAME.
754 - If successful, returns true, sets *EP to the directory entry
755 - if EP is non-null, and sets *OFSP to the byte offset of the
756 - directory entry if OFSP is non-null.
757 - otherwise, returns false and ignores EP and OFSP. */
758 + If successful, returns the file's entry;
759 + otherwise, returns a null pointer. */
761 lookup (const struct dir *dir, const char *name,
762 struct dir_entry *ep, off_t *ofsp)
763 @@ -113,10 +163,12 @@ dir_lookup (const struct dir *dir, const
764 ASSERT (dir != NULL);
765 ASSERT (name != NULL);
767 + lock_acquire ((struct lock *) &dir->dir_lock);
768 if (lookup (dir, name, &e, NULL))
769 *inode = inode_open (e.inode_sector);
772 + lock_release ((struct lock *)&dir->dir_lock);
774 return *inode != NULL;
776 @@ -138,10 +190,11 @@ dir_add (struct dir *dir, const char *na
777 ASSERT (name != NULL);
779 /* Check NAME for validity. */
780 - if (*name == '\0' || strlen (name) > NAME_MAX)
781 + if (*name == '\0' || strchr (name, '/') || strlen (name) > NAME_MAX)
784 /* Check that NAME is not in use. */
785 + lock_acquire (&dir->dir_lock);
786 if (lookup (dir, name, NULL, NULL))
789 @@ -164,6 +217,7 @@ dir_add (struct dir *dir, const char *na
790 success = inode_write_at (dir->inode, &e, sizeof e, ofs) == sizeof e;
793 + lock_release (&dir->dir_lock);
797 @@ -182,12 +236,14 @@ dir_remove (struct dir *dir, const char
798 ASSERT (name != NULL);
800 /* Find directory entry. */
801 + lock_acquire (&dir->dir_lock);
802 if (!lookup (dir, name, &e, &ofs))
806 + /* Open inode and verify that it is not an in-use directory. */
807 inode = inode_open (e.inode_sector);
810 + || (inode_get_type (inode) == DIR_INODE && inode_open_cnt (inode) != 1))
813 /* Erase directory entry. */
814 @@ -201,6 +257,7 @@ dir_remove (struct dir *dir, const char
818 + lock_release (&dir->dir_lock);
822 @@ -211,8 +268,10 @@ dir_list (const struct dir *dir)
826 + lock_acquire ((struct lock *) &dir->dir_lock);
827 for (ofs = 0; inode_read_at (dir->inode, &e, sizeof e, ofs) == sizeof e;
830 printf ("%s\n", e.name);
831 + lock_release ((struct lock *) &dir->dir_lock);
833 diff -u src/filesys/directory.h~ src/filesys/directory.h
834 --- src/filesys/directory.h~ 2005-06-16 21:50:20.000000000 -0700
835 +++ src/filesys/directory.h 2005-06-16 15:09:31.000000000 -0700
840 -bool dir_create (disk_sector_t sector, size_t entry_cnt);
841 +void dir_init (void);
842 bool dir_open (struct inode *, struct dir **);
843 bool dir_open_root (struct dir **);
844 +bool dir_reopen (struct dir *);
845 void dir_close (struct dir *);
846 bool dir_lookup (const struct dir *, const char *name, struct inode **);
847 bool dir_add (struct dir *, const char *name, disk_sector_t);
848 diff -u src/filesys/file.c~ src/filesys/file.c
849 --- src/filesys/file.c~ 2005-06-16 21:50:20.000000000 -0700
850 +++ src/filesys/file.c 2005-06-16 21:02:34.000000000 -0700
852 #include "filesys/file.h"
854 +#include "filesys/directory.h"
855 #include "filesys/inode.h"
856 +#include "filesys/filesys.h"
857 #include "threads/malloc.h"
860 @@ -18,7 +20,7 @@ struct file *
861 file_open (struct inode *inode)
863 struct file *file = calloc (1, sizeof *file);
864 - if (inode != NULL && file != NULL)
865 + if (inode != NULL && file != NULL && inode_get_type (inode) == FILE_INODE)
869 diff -u src/filesys/filesys.c~ src/filesys/filesys.c
870 --- src/filesys/filesys.c~ 2005-06-16 21:50:20.000000000 -0700
871 +++ src/filesys/filesys.c 2005-06-16 21:03:07.000000000 -0700
876 +#include "filesys/cache.h"
877 #include "filesys/file.h"
878 #include "filesys/free-map.h"
879 #include "filesys/inode.h"
880 #include "filesys/directory.h"
881 #include "devices/disk.h"
882 +#include "threads/thread.h"
884 /* The disk that contains the filesystem. */
885 struct disk *filesys_disk;
886 @@ -23,6 +24,8 @@ filesys_init (bool format)
887 PANIC ("hd0:1 (hdb) not present, filesystem initialization failed");
895 @@ -37,6 +40,103 @@ void
902 +/* Extracts a file name part from *SRCP into PART,
903 + and updates *SRCP so that the next call will return the next
905 + Returns 1 if successful, 0 at end of string, -1 for a too-long
908 +get_next_part (char part[NAME_MAX], const char **srcp)
910 + const char *src = *srcp;
913 + /* Skip leading slashes.
914 + If it's all slashes, we're done. */
915 + while (*src == '/')
920 + /* Copy up to NAME_MAX character from SRC to DST.
921 + Add null terminator. */
922 + while (*src != '/' && *src != '\0')
924 + if (dst < part + NAME_MAX)
932 + /* Advance source pointer. */
937 +/* Resolves relative or absolute file NAME.
938 + Returns true if successful, false on failure.
939 + Stores the directory corresponding to the name into *DIRP,
940 + and the file name part into BASENAME. */
942 +resolve_name (const char *name,
943 + struct dir **dirp, char basename[NAME_MAX + 1])
945 + struct dir *dir = NULL;
946 + struct inode *inode;
948 + char part[NAME_MAX + 1], next_part[NAME_MAX + 1];
951 + /* Find starting directory. */
952 + if (name[0] == '/' || thread_current ()->wd == NULL)
954 + if (!dir_open_root (&dir))
959 + if (!dir_reopen (thread_current ()->wd))
961 + dir = thread_current ()->wd;
964 + /* Get first name part. */
966 + if (get_next_part (part, &cp) <= 0)
969 + /* As long as another part follows the current one,
970 + traverse down another directory. */
971 + while ((ok = get_next_part (next_part, &cp)) > 0)
973 + if (!dir_lookup (dir, part, &inode))
977 + if (!dir_open (inode, &dir))
980 + strlcpy (part, next_part, NAME_MAX + 1);
985 + /* Return our results. */
987 + strlcpy (basename, part, NAME_MAX + 1);
991 + /* Return failure. */
994 + basename[0] = '\0';
998 /* Creates a file named NAME with the given INITIAL_SIZE.
999 @@ -44,16 +144,17 @@ filesys_done (void)
1000 Fails if a file named NAME already exists,
1001 or if internal memory allocation fails. */
1003 -filesys_create (const char *name, off_t initial_size)
1004 +filesys_create (const char *name, off_t initial_size, enum inode_type type)
1007 + char basename[NAME_MAX + 1];
1008 disk_sector_t inode_sector = 0;
1009 - bool success = (dir_open_root (&dir)
1010 - && free_map_allocate (1, &inode_sector)
1011 - && inode_create (inode_sector, initial_size)
1012 - && dir_add (dir, name, inode_sector));
1013 - if (!success && inode_sector != 0)
1014 - free_map_release (inode_sector, 1);
1015 + bool success = (resolve_name (name, &dir, basename)
1016 + && free_map_allocate (&inode_sector)
1017 + && inode_create (inode_sector, initial_size, type)
1018 + && dir_add (dir, basename, inode_sector));
1019 + if (!success && inode_sector)
1020 + free_map_release (inode_sector);
1024 @@ -64,17 +165,18 @@ filesys_create (const char *name, off_t
1026 Fails if no file named NAME exists,
1027 or if an internal memory allocation fails. */
1030 filesys_open (const char *name)
1033 + char basename[NAME_MAX + 1];
1034 struct inode *inode = NULL;
1036 - if (dir_open_root (&dir))
1037 - dir_lookup (dir, name, &inode);
1038 + if (resolve_name (name, &dir, basename))
1039 + dir_lookup (dir, basename, &inode);
1042 - return file_open (inode);
1046 /* Deletes the file named NAME.
1047 @@ -85,13 +187,54 @@ bool
1048 filesys_remove (const char *name)
1050 struct dir *dir = NULL;
1051 - bool success = (dir_open_root (&dir)
1052 - && dir_remove (dir, name));
1053 + char basename[NAME_MAX + 1];
1054 + bool success = false;
1056 + if (resolve_name (name, &dir, basename))
1057 + success = dir_remove (dir, basename);
1063 +/* Change current directory to NAME.
1064 + Return true if successful, false on failure. */
1066 +filesys_chdir (const char *name)
1070 + /* Find new directory. */
1071 + if (*name == '\0')
1073 + else if (name[strspn (name, "/")] == '\0')
1075 + if (!dir_open_root (&dir))
1080 + char basename[NAME_MAX + 1];
1081 + struct inode *base_inode;
1082 + struct dir *base_dir;
1083 + if (!resolve_name (name, &dir, basename)
1084 + || !dir_lookup (dir, basename, &base_inode)
1085 + || !dir_open (base_inode, &base_dir))
1094 + /* Change current directory. */
1095 + dir_close (thread_current ()->wd);
1096 + thread_current ()->wd = dir;
1101 /* Prints a list of files in the filesystem to the system
1103 Returns true if successful, false on failure,
1104 @@ -99,13 +242,9 @@ filesys_remove (const char *name)
1108 - struct dir *dir = NULL;
1109 - bool success = dir_open_root (&dir);
1113 + dir_list (thread_current ()->wd);
1119 static void must_succeed_function (int, bool) NO_INLINE;
1120 @@ -128,8 +267,8 @@ filesys_self_test (void)
1122 /* Create file and check that it contains zeros
1123 throughout the created length. */
1124 - MUST_SUCCEED (filesys_create ("foo", sizeof s));
1125 - MUST_SUCCEED ((file = filesys_open ("foo")) != NULL);
1126 + MUST_SUCCEED (filesys_create ("foo", sizeof s, FILE_INODE));
1127 + MUST_SUCCEED ((file = file_open (filesys_open ("foo"))) != NULL);
1128 MUST_SUCCEED (file_read (file, s2, sizeof s2) == sizeof s2);
1129 MUST_SUCCEED (memcmp (s2, zeros, sizeof s) == 0);
1130 MUST_SUCCEED (file_tell (file) == sizeof s);
1131 @@ -137,7 +276,7 @@ filesys_self_test (void)
1134 /* Reopen file and write to it. */
1135 - MUST_SUCCEED ((file = filesys_open ("foo")) != NULL);
1136 + MUST_SUCCEED ((file = file_open (filesys_open ("foo"))) != NULL);
1137 MUST_SUCCEED (file_write (file, s, sizeof s) == sizeof s);
1138 MUST_SUCCEED (file_tell (file) == sizeof s);
1139 MUST_SUCCEED (file_length (file) == sizeof s);
1140 @@ -145,7 +284,7 @@ filesys_self_test (void)
1142 /* Reopen file and verify that it reads back correctly.
1143 Delete file while open to check proper semantics. */
1144 - MUST_SUCCEED ((file = filesys_open ("foo")) != NULL);
1145 + MUST_SUCCEED ((file = file_open (filesys_open ("foo"))) != NULL);
1146 MUST_SUCCEED (filesys_remove ("foo"));
1147 MUST_SUCCEED (filesys_open ("foo") == NULL);
1148 MUST_SUCCEED (file_read (file, s2, sizeof s) == sizeof s);
1149 @@ -172,9 +311,13 @@ static void
1152 printf ("Formatting filesystem...");
1154 + /* Set up free map. */
1156 - if (!dir_create (ROOT_DIR_SECTOR, 16))
1158 + /* Set up root directory. */
1159 + if (!inode_create (ROOT_DIR_SECTOR, 0, DIR_INODE))
1160 PANIC ("root directory creation failed");
1161 - free_map_close ();
1165 diff -u src/filesys/filesys.h~ src/filesys/filesys.h
1166 --- src/filesys/filesys.h~ 2005-06-16 21:50:20.000000000 -0700
1167 +++ src/filesys/filesys.h 2005-06-16 20:55:14.000000000 -0700
1170 #include <stdbool.h>
1171 #include "filesys/off_t.h"
1172 +#include "filesys/inode.h"
1174 /* Sectors of system file inodes. */
1175 #define FREE_MAP_SECTOR 0 /* Free map file inode sector. */
1176 @@ -13,8 +14,8 @@ extern struct disk *filesys_disk;
1178 void filesys_init (bool format);
1179 void filesys_done (void);
1180 -bool filesys_create (const char *name, off_t initial_size);
1181 -struct file *filesys_open (const char *name);
1182 +bool filesys_create (const char *name, off_t initial_size, enum inode_type);
1183 +struct inode *filesys_open (const char *name);
1184 bool filesys_remove (const char *name);
1185 bool filesys_chdir (const char *name);
1186 bool filesys_list (void);
1187 diff -u src/filesys/free-map.c~ src/filesys/free-map.c
1188 --- src/filesys/free-map.c~ 2005-06-16 21:50:20.000000000 -0700
1189 +++ src/filesys/free-map.c 2005-06-16 20:57:13.000000000 -0700
1192 #include "filesys/file.h"
1193 #include "filesys/filesys.h"
1194 -#include "filesys/inode.h"
1195 +#include "threads/synch.h"
1197 static struct file *free_map_file; /* Free map file. */
1198 static struct bitmap *free_map; /* Free map, one bit per disk sector. */
1199 +static struct lock free_map_lock; /* Mutual exclusion. */
1201 /* Initializes the free map. */
1203 free_map_init (void)
1205 + lock_init (&free_map_lock);
1207 free_map = bitmap_create (disk_size (filesys_disk));
1208 if (free_map == NULL)
1209 PANIC ("bitmap creation failed--disk is too large");
1210 @@ -19,33 +22,32 @@ free_map_init (void)
1211 bitmap_mark (free_map, ROOT_DIR_SECTOR);
1214 -/* Allocates CNT consecutive sectors from the free map and stores
1215 - the first into *SECTORP.
1216 - Returns true if successful, false if all sectors were
1217 +/* Allocates a sector from the free map and stores it into
1219 + Return true if successful, false if all sectors were
1222 -free_map_allocate (size_t cnt, disk_sector_t *sectorp)
1223 +free_map_allocate (disk_sector_t *sectorp)
1225 - disk_sector_t sector = bitmap_scan_and_flip (free_map, 0, cnt, false);
1226 - if (sector != BITMAP_ERROR
1227 - && free_map_file != NULL
1228 - && !bitmap_write (free_map, free_map_file))
1230 - bitmap_set_multiple (free_map, sector, cnt, false);
1231 - sector = BITMAP_ERROR;
1233 - if (sector != BITMAP_ERROR)
1236 + lock_acquire (&free_map_lock);
1237 + sector = bitmap_scan_and_flip (free_map, 0, 1, false);
1238 + lock_release (&free_map_lock);
1240 + if (sector != BITMAP_ERROR)
1242 return sector != BITMAP_ERROR;
1245 -/* Makes CNT sectors starting at SECTOR available for use. */
1246 +/* Makes SECTOR available for use. */
1248 -free_map_release (disk_sector_t sector, size_t cnt)
1249 +free_map_release (disk_sector_t sector)
1251 - ASSERT (bitmap_all (free_map, sector, cnt));
1252 - bitmap_set_multiple (free_map, sector, cnt, false);
1253 - bitmap_write (free_map, free_map_file);
1254 + lock_acquire (&free_map_lock);
1255 + ASSERT (bitmap_test (free_map, sector));
1256 + bitmap_reset (free_map, sector);
1257 + lock_release (&free_map_lock);
1260 /* Opens the free map file and reads it from disk. */
1261 @@ -63,6 +65,8 @@ free_map_open (void)
1263 free_map_close (void)
1265 + if (!bitmap_write (free_map, free_map_file))
1266 + PANIC ("can't write free map");
1267 file_close (free_map_file);
1270 @@ -72,7 +76,7 @@ void
1271 free_map_create (void)
1274 - if (!inode_create (FREE_MAP_SECTOR, bitmap_file_size (free_map)))
1275 + if (!inode_create (FREE_MAP_SECTOR, bitmap_file_size (free_map), FILE_INODE))
1276 PANIC ("free map creation failed");
1278 /* Write bitmap to file. */
1279 @@ -81,4 +85,5 @@ free_map_create (void)
1280 PANIC ("can't open free map");
1281 if (!bitmap_write (free_map, free_map_file))
1282 PANIC ("can't write free map");
1283 + file_close (free_map_file);
1285 diff -u src/filesys/free-map.h~ src/filesys/free-map.h
1286 --- src/filesys/free-map.h~ 2005-06-16 21:50:20.000000000 -0700
1287 +++ src/filesys/free-map.h 2005-06-16 20:48:04.000000000 -0700
1288 @@ -11,7 +11,7 @@ void free_map_create (void);
1289 void free_map_open (void);
1290 void free_map_close (void);
1292 -bool free_map_allocate (size_t, disk_sector_t *);
1293 -void free_map_release (disk_sector_t, size_t);
1294 +bool free_map_allocate (disk_sector_t *);
1295 +void free_map_release (disk_sector_t);
1297 #endif /* filesys/free-map.h */
1298 diff -u src/filesys/fsutil.c~ src/filesys/fsutil.c
1299 --- src/filesys/fsutil.c~ 2005-06-16 21:50:20.000000000 -0700
1300 +++ src/filesys/fsutil.c 2005-06-16 20:55:13.000000000 -0700
1301 @@ -30,7 +30,7 @@ fsutil_cat (char **argv)
1304 printf ("Printing '%s' to the console...\n", filename);
1305 - file = filesys_open (filename);
1306 + file = file_open (filesys_open (filename));
1308 PANIC ("%s: open failed", filename);
1309 buffer = palloc_get_page (PAL_ASSERT);
1310 @@ -102,9 +102,9 @@ fsutil_put (char **argv)
1311 PANIC ("%s: invalid file size %d", filename, size);
1313 /* Create destination file. */
1314 - if (!filesys_create (filename, size))
1315 + if (!filesys_create (filename, size, FILE_INODE))
1316 PANIC ("%s: create failed", filename);
1317 - dst = filesys_open (filename);
1318 + dst = file_open (filesys_open (filename));
1320 PANIC ("%s: open failed", filename);
1322 @@ -154,7 +154,7 @@ fsutil_get (char **argv)
1323 PANIC ("couldn't allocate buffer");
1325 /* Open source file. */
1326 - src = filesys_open (filename);
1327 + src = file_open (filesys_open (filename));
1329 PANIC ("%s: open failed", filename);
1330 size = file_length (src);
1331 diff -u src/filesys/inode.c~ src/filesys/inode.c
1332 --- src/filesys/inode.c~ 2005-06-16 21:50:20.000000000 -0700
1333 +++ src/filesys/inode.c 2005-06-16 21:11:24.000000000 -0700
1335 #include "filesys/inode.h"
1336 +#include <bitmap.h>
1342 +#include "filesys/cache.h"
1343 #include "filesys/filesys.h"
1344 #include "filesys/free-map.h"
1345 #include "threads/malloc.h"
1346 +#include "threads/synch.h"
1348 /* Identifies an inode. */
1349 #define INODE_MAGIC 0x494e4f44
1351 +#define DIRECT_CNT 123
1352 +#define INDIRECT_CNT 1
1353 +#define DBL_INDIRECT_CNT 1
1354 +#define SECTOR_CNT (DIRECT_CNT + INDIRECT_CNT + DBL_INDIRECT_CNT)
1356 +#define PTRS_PER_SECTOR ((off_t) (DISK_SECTOR_SIZE / sizeof (disk_sector_t)))
1357 +#define INODE_SPAN ((DIRECT_CNT \
1358 + + PTRS_PER_SECTOR * INDIRECT_CNT \
1359 + + PTRS_PER_SECTOR * PTRS_PER_SECTOR * DBL_INDIRECT_CNT) \
1360 + * DISK_SECTOR_SIZE)
1363 Must be exactly DISK_SECTOR_SIZE bytes long. */
1366 - disk_sector_t start; /* First data sector. */
1367 + disk_sector_t sectors[SECTOR_CNT]; /* Sectors. */
1368 + enum inode_type type; /* FILE_INODE or DIR_INODE. */
1369 off_t length; /* File size in bytes. */
1370 unsigned magic; /* Magic number. */
1371 - uint32_t unused[125]; /* Not used. */
1374 -/* Returns the number of sectors to allocate for an inode SIZE
1375 +/* Returns the number of sectors to allocate for an indoe SIZE
1377 static inline size_t
1378 bytes_to_sectors (off_t size)
1379 @@ -35,33 +49,29 @@ struct inode
1380 disk_sector_t sector; /* Sector number of disk location. */
1381 int open_cnt; /* Number of openers. */
1382 bool removed; /* True if deleted, false otherwise. */
1384 + /* Denying writes. */
1385 + struct lock deny_write_lock; /* Protects members below. */
1386 + struct condition no_writers_cond; /* Signaled when no writers. */
1387 int deny_write_cnt; /* 0: writes ok, >0: deny writes. */
1388 - struct inode_disk data; /* Inode content. */
1389 + int writer_cnt; /* Number of writers. */
1392 -/* Returns the disk sector that contains byte offset POS within
1394 - Returns -1 if INODE does not contain data for a byte at offset
1396 -static disk_sector_t
1397 -byte_to_sector (const struct inode *inode, off_t pos)
1399 - ASSERT (inode != NULL);
1400 - if (pos < inode->data.length)
1401 - return inode->data.start + pos / DISK_SECTOR_SIZE;
1406 /* List of open inodes, so that opening a single inode twice
1407 returns the same `struct inode'. */
1408 static struct list open_inodes;
1410 +/* Controls access to open_inodes list. */
1411 +static struct lock open_inodes_lock;
1413 +static void deallocate_inode (const struct inode *);
1415 /* Initializes the inode module. */
1419 list_init (&open_inodes);
1420 + lock_init (&open_inodes_lock);
1423 /* Initializes an inode with LENGTH bytes of data and
1424 @@ -70,35 +80,32 @@ inode_init (void)
1425 Returns true if successful.
1426 Returns false if memory or disk allocation fails. */
1428 -inode_create (disk_sector_t sector, off_t length)
1429 +inode_create (disk_sector_t sector, off_t length, enum inode_type type)
1431 - struct inode_disk *disk_inode = NULL;
1432 - bool success = false;
1433 + struct cache_block *block;
1434 + struct inode_disk *disk_inode;
1437 ASSERT (length >= 0);
1439 + block = cache_lock (sector, EXCLUSIVE);
1440 ASSERT (sizeof *disk_inode == DISK_SECTOR_SIZE);
1441 + disk_inode = cache_zero (block);
1442 + disk_inode->type = type;
1443 + disk_inode->length = 0;
1444 + disk_inode->magic = INODE_MAGIC;
1445 + cache_dirty (block);
1446 + cache_unlock (block);
1448 - disk_inode = calloc (1, sizeof *disk_inode);
1449 - if (disk_inode != NULL)
1452 - size_t sectors = bytes_to_sectors (length);
1453 - disk_inode->length = length;
1454 - disk_inode->magic = INODE_MAGIC;
1455 - if (free_map_allocate (sectors, &disk_inode->start))
1457 - disk_write (filesys_disk, sector, disk_inode);
1460 - static char zeros[DISK_SECTOR_SIZE];
1463 - for (i = 0; i < sectors; i++)
1464 - disk_write (filesys_disk, disk_inode->start + i, zeros);
1468 - free (disk_inode);
1469 + struct inode *inode = inode_open (sector);
1470 + success = inode != NULL && inode_write_at (inode, "", 1, length - 1);
1471 + inode_close (inode);
1479 @@ -112,6 +119,7 @@ inode_open (disk_sector_t sector)
1480 struct inode *inode;
1482 /* Check whether this inode is already open. */
1483 + lock_acquire (&open_inodes_lock);
1484 for (e = list_begin (&open_inodes); e != list_end (&open_inodes);
1487 @@ -119,22 +127,26 @@ inode_open (disk_sector_t sector)
1488 if (inode->sector == sector)
1490 inode_reopen (inode);
1496 /* Allocate memory. */
1497 inode = malloc (sizeof *inode);
1503 list_push_front (&open_inodes, &inode->elem);
1504 inode->sector = sector;
1505 inode->open_cnt = 1;
1506 inode->deny_write_cnt = 0;
1507 + lock_init (&inode->deny_write_lock);
1508 + cond_init (&inode->no_writers_cond);
1509 inode->removed = false;
1510 - disk_read (filesys_disk, inode->sector, &inode->data);
1513 + lock_release (&open_inodes_lock);
1517 @@ -147,6 +159,17 @@ inode_reopen (struct inode *inode)
1521 +/* Returns the type of INODE. */
1523 +inode_get_type (const struct inode *inode)
1525 + struct cache_block *inode_block = cache_lock (inode->sector, NON_EXCLUSIVE);
1526 + struct inode_disk *disk_inode = cache_read (inode_block);
1527 + enum inode_type type = disk_inode->type;
1528 + cache_unlock (inode_block);
1532 /* Closes INODE and writes it to disk.
1533 If this was the last reference to INODE, frees its memory.
1534 If INODE was also a removed inode, frees its blocks. */
1535 @@ -158,21 +181,60 @@ inode_close (struct inode *inode)
1538 /* Release resources if this was the last opener. */
1539 + lock_acquire (&open_inodes_lock);
1540 if (--inode->open_cnt == 0)
1542 /* Remove from inode list and release lock. */
1543 list_remove (&inode->elem);
1544 + lock_release (&open_inodes_lock);
1546 /* Deallocate blocks if removed. */
1549 - free_map_release (inode->sector, 1);
1550 - free_map_release (inode->data.start,
1551 - bytes_to_sectors (inode->data.length));
1553 + deallocate_inode (inode);
1558 + lock_release (&open_inodes_lock);
1561 +/* Deallocates SECTOR and anything it points to recursively.
1562 + LEVEL is 2 if SECTOR is doubly indirect,
1563 + or 1 if SECTOR is indirect,
1564 + or 0 if SECTOR is a data sector. */
1566 +deallocate_recursive (disk_sector_t sector, int level)
1570 + struct cache_block *block = cache_lock (sector, EXCLUSIVE);
1571 + disk_sector_t *pointers = cache_read (block);
1573 + for (i = 0; i < PTRS_PER_SECTOR; i++)
1575 + deallocate_recursive (sector, level - 1);
1576 + cache_unlock (block);
1579 + cache_free (sector);
1580 + free_map_release (sector);
1583 +/* Deallocates the blocks allocated for INODE. */
1585 +deallocate_inode (const struct inode *inode)
1587 + struct cache_block *block = cache_lock (inode->sector, EXCLUSIVE);
1588 + struct inode_disk *disk_inode = cache_read (block);
1590 + for (i = 0; i < SECTOR_CNT; i++)
1591 + if (disk_inode->sectors[i])
1593 + int level = (i >= DIRECT_CNT) + (i >= DIRECT_CNT + INDIRECT_CNT);
1594 + deallocate_recursive (disk_inode->sectors[i], level);
1596 + cache_unlock (block);
1597 + deallocate_recursive (inode->sector, 0);
1600 /* Marks INODE to be deleted when it is closed by the last caller who
1601 @@ -181,6 +245,156 @@ inode_remove (struct inode *inode)
1602 inode->removed = true;
1605 +/* Translates SECTOR_IDX into a sequence of block indexes in
1606 + OFFSETS and sets *OFFSET_CNT to the number of offsets. */
1608 +calculate_indices (off_t sector_idx, size_t offsets[], size_t *offset_cnt)
1610 + /* Handle direct blocks. */
1611 + if (sector_idx < DIRECT_CNT)
1613 + offsets[0] = sector_idx;
1617 + sector_idx -= DIRECT_CNT;
1619 + /* Handle indirect blocks. */
1620 + if (sector_idx < PTRS_PER_SECTOR * INDIRECT_CNT)
1622 + offsets[0] = DIRECT_CNT + sector_idx / PTRS_PER_SECTOR;
1623 + offsets[1] = sector_idx % PTRS_PER_SECTOR;
1627 + sector_idx -= PTRS_PER_SECTOR * INDIRECT_CNT;
1629 + /* Handle doubly indirect blocks. */
1630 + if (sector_idx < DBL_INDIRECT_CNT * PTRS_PER_SECTOR * PTRS_PER_SECTOR)
1632 + offsets[0] = (DIRECT_CNT + INDIRECT_CNT
1633 + + sector_idx / (PTRS_PER_SECTOR * PTRS_PER_SECTOR));
1634 + offsets[1] = sector_idx / PTRS_PER_SECTOR;
1635 + offsets[2] = sector_idx % PTRS_PER_SECTOR;
1642 +/* Retrieves the data block for the given byte OFFSET in INODE,
1643 + setting *DATA_BLOCK to the block.
1644 + Returns true if successful, false on failure.
1645 + If ALLOCATE is false, then missing blocks will be successful
1646 + with *DATA_BLOCk set to a null pointer.
1647 + If ALLOCATE is true, then missing blocks will be allocated.
1648 + The block returned will be locked, normally non-exclusively,
1649 + but a newly allocated block will have an exclusive lock. */
1651 +get_data_block (struct inode *inode, off_t offset, bool allocate,
1652 + struct cache_block **data_block)
1654 + disk_sector_t this_level_sector;
1655 + size_t offsets[3];
1656 + size_t offset_cnt;
1659 + ASSERT (offset >= 0);
1661 + calculate_indices (offset / DISK_SECTOR_SIZE, offsets, &offset_cnt);
1663 + this_level_sector = inode->sector;
1666 + struct cache_block *this_level_block;
1667 + uint32_t *this_level_data;
1669 + struct cache_block *next_level_block;
1671 + /* Check whether the block for the next level is allocated. */
1672 + this_level_block = cache_lock (this_level_sector, NON_EXCLUSIVE);
1673 + this_level_data = cache_read (this_level_block);
1674 + if (this_level_data[offsets[level]] != 0)
1676 + /* Yes, it's allocated. Advance to next level. */
1677 + this_level_sector = this_level_data[offsets[level]];
1679 + if (++level == offset_cnt)
1681 + /* We hit the data block.
1683 + if ((level == 0 && offsets[level] + 1 < DIRECT_CNT)
1684 + || (level > 0 && offsets[level] + 1 < PTRS_PER_SECTOR))
1686 + uint32_t next_sector = this_level_data[offsets[level] + 1];
1687 + if (next_sector && next_sector < disk_size (filesys_disk))
1688 + cache_readahead (next_sector);
1690 + cache_unlock (this_level_block);
1692 + /* Return block. */
1693 + *data_block = cache_lock (this_level_sector, NON_EXCLUSIVE);
1696 + cache_unlock (this_level_block);
1699 + cache_unlock (this_level_block);
1701 + /* No block is allocated. Nothing is locked.
1702 + If we're not allocating new blocks, then this is
1703 + "success" (with all-zero data). */
1706 + *data_block = NULL;
1710 + /* We need to allocate a new block.
1711 + Grab an exclusive lock on this level's block so we can
1712 + insert the new block. */
1713 + this_level_block = cache_lock (this_level_sector, EXCLUSIVE);
1714 + this_level_data = cache_read (this_level_block);
1716 + /* Since we released this level's block, someone else might
1717 + have allocated the block in the meantime. Recheck. */
1718 + if (this_level_data[offsets[level]] != 0)
1720 + cache_unlock (this_level_block);
1724 + /* Allocate the new block. */
1725 + if (!free_map_allocate (&this_level_data[offsets[level]]))
1727 + cache_unlock (this_level_block);
1728 + *data_block = NULL;
1731 + cache_dirty (this_level_block);
1733 + /* Lock and clear the new block. */
1734 + next_level_block = cache_lock (this_level_data[offsets[level]],
1736 + cache_zero (next_level_block);
1738 + /* Release this level's block. No one else can access the
1739 + new block yet, because we have an exclusive lock on it. */
1740 + cache_unlock (this_level_block);
1742 + /* If this is the final level, then return the new block. */
1743 + if (level == offset_cnt - 1)
1745 + *data_block = next_level_block;
1749 + /* Otherwise, release the new block and go around again to
1750 + follow the new pointer. */
1751 + cache_unlock (next_level_block);
1755 /* Reads SIZE bytes from INODE into BUFFER, starting at position OFFSET.
1756 Returns the number of bytes actually read, which may be less
1757 than SIZE if an error occurs or end of file is reached. */
1758 @@ -189,13 +403,12 @@ inode_read_at (struct inode *inode, void
1760 uint8_t *buffer = buffer_;
1761 off_t bytes_read = 0;
1762 - uint8_t *bounce = NULL;
1766 - /* Disk sector to read, starting byte offset within sector. */
1767 - disk_sector_t sector_idx = byte_to_sector (inode, offset);
1768 + /* Sector to read, starting byte offset within sector, sector data. */
1769 int sector_ofs = offset % DISK_SECTOR_SIZE;
1770 + struct cache_block *block;
1772 /* Bytes left in inode, bytes left in sector, lesser of the two. */
1773 off_t inode_left = inode_length (inode) - offset;
1774 @@ -204,26 +417,16 @@ inode_read_at (struct inode *inode, void
1776 /* Number of bytes to actually copy out of this sector. */
1777 int chunk_size = size < min_left ? size : min_left;
1778 - if (chunk_size <= 0)
1779 + if (chunk_size <= 0 || !get_data_block (inode, offset, false, &block))
1782 - if (sector_ofs == 0 && chunk_size == DISK_SECTOR_SIZE)
1784 - /* Read full sector directly into caller's buffer. */
1785 - disk_read (filesys_disk, sector_idx, buffer + bytes_read);
1787 + if (block == NULL)
1788 + memset (buffer + bytes_read, 0, chunk_size);
1791 - /* Read sector into bounce buffer, then partially copy
1792 - into caller's buffer. */
1793 - if (bounce == NULL)
1795 - bounce = malloc (DISK_SECTOR_SIZE);
1796 - if (bounce == NULL)
1799 - disk_read (filesys_disk, sector_idx, bounce);
1800 - memcpy (buffer + bytes_read, bounce + sector_ofs, chunk_size);
1801 + const uint8_t *sector_data = cache_read (block);
1802 + memcpy (buffer + bytes_read, sector_data + sector_ofs, chunk_size);
1803 + cache_unlock (block);
1807 @@ -231,75 +434,84 @@ inode_read_at (struct inode *inode, void
1808 offset += chunk_size;
1809 bytes_read += chunk_size;
1816 +/* Extends INODE to be at least LENGTH bytes long. */
1818 +extend_file (struct inode *inode, off_t length)
1820 + if (length > inode_length (inode))
1822 + struct cache_block *inode_block = cache_lock (inode->sector, EXCLUSIVE);
1823 + struct inode_disk *disk_inode = cache_read (inode_block);
1824 + if (length > disk_inode->length)
1826 + disk_inode->length = length;
1827 + cache_dirty (inode_block);
1829 + cache_unlock (inode_block);
1833 /* Writes SIZE bytes from BUFFER into INODE, starting at OFFSET.
1834 Returns the number of bytes actually written, which may be
1835 less than SIZE if end of file is reached or an error occurs.
1836 - (Normally a write at end of file would extend the inode, but
1837 - growth is not yet implemented.) */
1838 + (Normally a write at end of file would extend the file, but
1839 + file growth is not yet implemented.) */
1841 inode_write_at (struct inode *inode, const void *buffer_, off_t size,
1844 const uint8_t *buffer = buffer_;
1845 off_t bytes_written = 0;
1846 - uint8_t *bounce = NULL;
1848 - if (inode->deny_write_cnt)
1850 + /* Don't write if writes are denied. */
1851 + lock_acquire (&inode->deny_write_lock);
1852 + if (inode->deny_write_cnt)
1854 + lock_release (&inode->deny_write_lock);
1857 + inode->writer_cnt++;
1858 + lock_release (&inode->deny_write_lock);
1862 - /* Sector to write, starting byte offset within sector. */
1863 - off_t sector_idx = byte_to_sector (inode, offset);
1864 + /* Sector to write, starting byte offset within sector, sector data. */
1865 int sector_ofs = offset % DISK_SECTOR_SIZE;
1866 + struct cache_block *block;
1867 + uint8_t *sector_data;
1869 - /* Bytes left in inode, bytes left in sector, lesser of the two. */
1870 - off_t inode_left = inode_length (inode) - offset;
1871 + /* Bytes to max inode size, bytes left in sector, lesser of the two. */
1872 + off_t inode_left = INODE_SPAN - offset;
1873 int sector_left = DISK_SECTOR_SIZE - sector_ofs;
1874 int min_left = inode_left < sector_left ? inode_left : sector_left;
1876 /* Number of bytes to actually write into this sector. */
1877 int chunk_size = size < min_left ? size : min_left;
1878 - if (chunk_size <= 0)
1881 - if (sector_ofs == 0 && chunk_size == DISK_SECTOR_SIZE)
1883 - /* Write full sector directly to disk. */
1884 - disk_write (filesys_disk, sector_idx, buffer + bytes_written);
1888 - /* We need a bounce buffer. */
1889 - if (bounce == NULL)
1891 - bounce = malloc (DISK_SECTOR_SIZE);
1892 - if (bounce == NULL)
1895 + if (chunk_size <= 0 || !get_data_block (inode, offset, true, &block))
1898 - /* If the sector contains data before or after the chunk
1899 - we're writing, then we need to read in the sector
1900 - first. Otherwise we start with a sector of all zeros. */
1901 - if (sector_ofs > 0 || chunk_size < sector_left)
1902 - disk_read (filesys_disk, sector_idx, bounce);
1904 - memset (bounce, 0, DISK_SECTOR_SIZE);
1905 - memcpy (bounce + sector_ofs, buffer + bytes_written, chunk_size);
1906 - disk_write (filesys_disk, sector_idx, bounce);
1908 + sector_data = cache_read (block);
1909 + memcpy (sector_data + sector_ofs, buffer + bytes_written, chunk_size);
1910 + cache_dirty (block);
1911 + cache_unlock (block);
1915 offset += chunk_size;
1916 bytes_written += chunk_size;
1920 + extend_file (inode, offset);
1922 + lock_acquire (&inode->deny_write_lock);
1923 + if (--inode->writer_cnt == 0)
1924 + cond_signal (&inode->no_writers_cond, &inode->deny_write_lock);
1925 + lock_release (&inode->deny_write_lock);
1927 return bytes_written;
1929 @@ -309,8 +521,12 @@ inode_write_at (struct inode *inode, con
1931 inode_deny_write (struct inode *inode)
1933 + lock_acquire (&inode->deny_write_lock);
1934 + while (inode->writer_cnt > 0)
1935 + cond_wait (&inode->no_writers_cond, &inode->deny_write_lock);
1936 + ASSERT (inode->deny_write_cnt < inode->open_cnt);
1937 inode->deny_write_cnt++;
1938 - ASSERT (inode->deny_write_cnt <= inode->open_cnt);
1939 + lock_release (&inode->deny_write_lock);
1942 /* Re-enables writes to INODE.
1943 @@ -319,14 +535,33 @@ inode_deny_write (struct inode *inode)
1945 inode_allow_write (struct inode *inode)
1947 + lock_acquire (&inode->deny_write_lock);
1948 ASSERT (inode->deny_write_cnt > 0);
1949 ASSERT (inode->deny_write_cnt <= inode->open_cnt);
1950 inode->deny_write_cnt--;
1951 + lock_release (&inode->deny_write_lock);
1954 /* Returns the length, in bytes, of INODE's data. */
1956 inode_length (const struct inode *inode)
1958 - return inode->data.length;
1959 + struct cache_block *inode_block = cache_lock (inode->sector, NON_EXCLUSIVE);
1960 + struct inode_disk *disk_inode = cache_read (inode_block);
1961 + off_t length = disk_inode->length;
1962 + cache_unlock (inode_block);
1966 +/* Returns the number of openers. */
1968 +inode_open_cnt (const struct inode *inode)
1972 + lock_acquire (&open_inodes_lock);
1973 + open_cnt = inode->open_cnt;
1974 + lock_release (&open_inodes_lock);
1978 diff -u src/filesys/inode.h~ src/filesys/inode.h
1979 --- src/filesys/inode.h~ 2005-06-16 21:50:20.000000000 -0700
1980 +++ src/filesys/inode.h 2005-06-16 20:53:47.000000000 -0700
1985 +/* Type of an inode. */
1988 + FILE_INODE, /* Ordinary file. */
1989 + DIR_INODE /* Directory. */
1992 void inode_init (void);
1993 -bool inode_create (disk_sector_t, off_t);
1994 +bool inode_create (disk_sector_t, off_t, enum inode_type);
1995 struct inode *inode_open (disk_sector_t);
1996 struct inode *inode_reopen (struct inode *);
1997 +enum inode_type inode_get_type (const struct inode *);
1998 void inode_close (struct inode *);
1999 void inode_remove (struct inode *);
2000 off_t inode_read_at (struct inode *, void *, off_t size, off_t offset);
2001 @@ -18,5 +26,6 @@ off_t inode_write_at (struct inode *, co
2002 void inode_deny_write (struct inode *);
2003 void inode_allow_write (struct inode *);
2004 off_t inode_length (const struct inode *);
2005 +int inode_open_cnt (const struct inode *);
2007 #endif /* filesys/inode.h */
2008 diff -u src/lib/kernel/bitmap.h~ src/lib/kernel/bitmap.h
2009 diff -u src/threads/init.c~ src/threads/init.c
2010 --- src/threads/init.c~ 2005-06-14 14:04:06.000000000 -0700
2011 +++ src/threads/init.c 2005-06-16 15:09:31.000000000 -0700
2013 #include "filesys/filesys.h"
2014 #include "filesys/fsutil.h"
2016 +#include "vm/frame.h"
2017 +#include "vm/swap.h"
2019 /* Amount of physical memory, in 4 kB pages. */
2021 @@ -131,6 +133,9 @@ main (void)
2022 filesys_init (format_filesys);
2028 printf ("Boot complete.\n");
2030 /* Run actions specified on kernel command line. */
2031 diff -u src/threads/interrupt.c~ src/threads/interrupt.c
2032 --- src/threads/interrupt.c~ 2005-06-15 15:22:43.000000000 -0700
2033 +++ src/threads/interrupt.c 2005-06-16 15:09:31.000000000 -0700
2034 @@ -343,6 +343,8 @@ intr_handler (struct intr_frame *frame)
2035 in_external_intr = true;
2036 yield_on_return = false;
2039 + thread_current ()->user_esp = frame->esp;
2041 /* Invoke the interrupt's handler.
2042 If there is no handler, invoke the unexpected interrupt
2043 diff -u src/threads/thread.c~ src/threads/thread.c
2044 --- src/threads/thread.c~ 2005-06-15 14:41:47.000000000 -0700
2045 +++ src/threads/thread.c 2005-06-16 15:09:31.000000000 -0700
2047 #include "threads/synch.h"
2049 #include "userprog/process.h"
2050 +#include "userprog/syscall.h"
2053 /* Random value for struct thread's `magic' member.
2054 @@ -55,7 +56,8 @@ static void kernel_thread (thread_func *
2055 static void idle (void *aux UNUSED);
2056 static struct thread *running_thread (void);
2057 static struct thread *next_thread_to_run (void);
2058 -static void init_thread (struct thread *, const char *name, int priority);
2059 +static void init_thread (struct thread *, const char *name, int priority,
2061 static bool is_thread (struct thread *) UNUSED;
2062 static void *alloc_frame (struct thread *, size_t size);
2063 static void schedule (void);
2064 @@ -82,9 +84,8 @@ thread_init (void)
2066 /* Set up a thread structure for the running thread. */
2067 initial_thread = running_thread ();
2068 - init_thread (initial_thread, "main", PRI_DEFAULT);
2069 + init_thread (initial_thread, "main", PRI_DEFAULT, 0);
2070 initial_thread->status = THREAD_RUNNING;
2071 - initial_thread->tid = allocate_tid ();
2074 /* Starts preemptive thread scheduling by enabling interrupts.
2075 @@ -158,8 +159,8 @@ thread_create (const char *name, int pri
2078 /* Initialize thread. */
2079 - init_thread (t, name, priority);
2080 - tid = t->tid = allocate_tid ();
2081 + init_thread (t, name, priority, allocate_tid ());
2084 /* Stack frame for kernel_thread(). */
2085 kf = alloc_frame (t, sizeof *kf);
2086 @@ -252,16 +253,19 @@ thread_tid (void)
2090 + struct thread *t = thread_current ();
2092 ASSERT (!intr_context ());
2100 /* Just set our status to dying and schedule another process.
2101 We will be destroyed during the call to schedule_tail(). */
2103 - thread_current ()->status = THREAD_DYING;
2104 + t->status = THREAD_DYING;
2108 @@ -390,17 +394,29 @@ is_thread (struct thread *t)
2109 /* Does basic initialization of T as a blocked thread named
2112 -init_thread (struct thread *t, const char *name, int priority)
2113 +init_thread (struct thread *t, const char *name, int priority, tid_t tid)
2116 ASSERT (PRI_MIN <= priority && priority <= PRI_MAX);
2117 ASSERT (name != NULL);
2119 memset (t, 0, sizeof *t);
2121 t->status = THREAD_BLOCKED;
2122 strlcpy (t->name, name, sizeof t->name);
2123 t->stack = (uint8_t *) t + PGSIZE;
2124 t->priority = priority;
2125 + t->exit_code = -1;
2126 + t->wait_status = NULL;
2127 + list_init (&t->children);
2128 + sema_init (&t->timer_sema, 0);
2129 + t->pagedir = NULL;
2131 + t->bin_file = NULL;
2132 + list_init (&t->fds);
2133 + list_init (&t->mappings);
2134 + t->next_handle = 2;
2136 t->magic = THREAD_MAGIC;
2139 diff -u src/threads/thread.h~ src/threads/thread.h
2140 --- src/threads/thread.h~ 2005-06-15 14:49:06.000000000 -0700
2141 +++ src/threads/thread.h 2005-06-16 15:09:31.000000000 -0700
2143 #define THREADS_THREAD_H
2149 +#include "threads/synch.h"
2151 /* States in a thread's life cycle. */
2153 @@ -89,18 +91,50 @@ struct thread
2154 uint8_t *stack; /* Saved stack pointer. */
2155 int priority; /* Priority. */
2157 + /* Owned by process.c. */
2158 + int exit_code; /* Exit code. */
2159 + struct wait_status *wait_status; /* This process's completion status. */
2160 + struct list children; /* Completion status of children. */
2162 /* Shared between thread.c and synch.c. */
2163 struct list_elem elem; /* List element. */
2166 + /* Alarm clock. */
2167 + int64_t wakeup_time; /* Time to wake this thread up. */
2168 + struct list_elem timer_elem; /* Element in timer_wait_list. */
2169 + struct semaphore timer_sema; /* Semaphore. */
2171 /* Owned by userprog/process.c. */
2172 uint32_t *pagedir; /* Page directory. */
2174 + struct hash *pages; /* Page table. */
2175 + struct file *bin_file; /* The binary executable. */
2177 + /* Owned by syscall.c. */
2178 + struct list fds; /* List of file descriptors. */
2179 + struct list mappings; /* Memory-mapped files. */
2180 + int next_handle; /* Next handle value. */
2181 + void *user_esp; /* User's stack pointer. */
2182 + struct dir *wd; /* Working directory. */
2184 /* Owned by thread.c. */
2185 unsigned magic; /* Detects stack overflow. */
2188 +/* Tracks the completion of a process.
2189 + Reference held by both the parent, in its `children' list,
2190 + and by the child, in its `wait_status' pointer. */
2193 + struct list_elem elem; /* `children' list element. */
2194 + struct lock lock; /* Protects ref_cnt. */
2195 + int ref_cnt; /* 2=child and parent both alive,
2196 + 1=either child or parent alive,
2197 + 0=child and parent both dead. */
2198 + tid_t tid; /* Child thread id. */
2199 + int exit_code; /* Child exit code, if dead. */
2200 + struct semaphore dead; /* 1=child alive, 0=child dead. */
2203 void thread_init (void);
2204 void thread_start (void);
2206 diff -u src/userprog/exception.c~ src/userprog/exception.c
2207 --- src/userprog/exception.c~ 2005-06-15 15:14:10.000000000 -0700
2208 +++ src/userprog/exception.c 2005-06-16 15:09:31.000000000 -0700
2210 #include "userprog/gdt.h"
2211 #include "threads/interrupt.h"
2212 #include "threads/thread.h"
2213 +#include "vm/page.h"
2215 /* Number of page faults processed. */
2216 static long long page_fault_cnt;
2217 @@ -153,9 +154,14 @@ page_fault (struct intr_frame *f)
2218 write = (f->error_code & PF_W) != 0;
2219 user = (f->error_code & PF_U) != 0;
2221 - /* To implement virtual memory, delete the rest of the function
2222 - body, and replace it with code that brings in the page to
2223 - which fault_addr refers. */
2224 + /* Allow the pager to try to handle it. */
2225 + if (user && not_present)
2227 + if (!page_in (fault_addr))
2232 printf ("Page fault at %p: %s error %s page in %s context.\n",
2234 not_present ? "not present" : "rights violation",
2235 diff -u src/userprog/pagedir.c~ src/userprog/pagedir.c
2236 --- src/userprog/pagedir.c~ 2005-06-14 13:16:22.000000000 -0700
2237 +++ src/userprog/pagedir.c 2005-06-16 15:09:31.000000000 -0700
2238 @@ -35,15 +35,7 @@ pagedir_destroy (uint32_t *pd)
2239 ASSERT (pd != base_page_dir);
2240 for (pde = pd; pde < pd + pd_no (PHYS_BASE); pde++)
2243 - uint32_t *pt = pde_get_pt (*pde);
2246 - for (pte = pt; pte < pt + PGSIZE / sizeof *pte; pte++)
2248 - palloc_free_page (pte_get_page (*pte));
2249 - palloc_free_page (pt);
2251 + palloc_free_page (pde_get_pt (*pde));
2252 palloc_free_page (pd);
2255 diff -u src/userprog/process.c~ src/userprog/process.c
2256 --- src/userprog/process.c~ 2005-06-14 13:09:39.000000000 -0700
2257 +++ src/userprog/process.c 2005-06-16 15:09:31.000000000 -0700
2259 #include "threads/init.h"
2260 #include "threads/interrupt.h"
2261 #include "threads/mmu.h"
2262 +#include "threads/malloc.h"
2263 #include "threads/palloc.h"
2264 #include "threads/thread.h"
2265 +#include "vm/page.h"
2266 +#include "vm/frame.h"
2268 static thread_func execute_thread NO_RETURN;
2269 -static bool load (const char *cmdline, void (**eip) (void), void **esp);
2270 +static bool load (const char *cmd_line, void (**eip) (void), void **esp);
2272 +/* Data structure shared between process_execute() in the
2273 + invoking thread and execute_thread() in the newly invoked
2277 + const char *filename; /* Program to load. */
2278 + struct semaphore load_done; /* "Up"ed when loading complete. */
2279 + struct wait_status *wait_status; /* Child process. */
2280 + struct dir *wd; /* Working directory. */
2281 + bool success; /* Program successfully loaded? */
2284 /* Starts a new thread running a user program loaded from
2285 FILENAME. The new thread may be scheduled (and may even exit)
2286 @@ -27,41 +42,82 @@ static bool load (const char *cmdline, v
2288 process_execute (const char *filename)
2291 + struct dir *wd = thread_current ()->wd;
2292 + struct exec_info exec;
2293 + char thread_name[16];
2297 - /* Make a copy of FILENAME.
2298 - Otherwise there's a race between the caller and load(). */
2299 - fn_copy = palloc_get_page (0);
2300 - if (fn_copy == NULL)
2301 + /* Initialize exec_info. */
2302 + exec.filename = filename;
2308 + else if (!dir_open_root (&exec.wd))
2310 - strlcpy (fn_copy, filename, PGSIZE);
2311 + sema_init (&exec.load_done, 0);
2313 /* Create a new thread to execute FILENAME. */
2314 - tid = thread_create (filename, PRI_DEFAULT, execute_thread, fn_copy);
2315 - if (tid == TID_ERROR)
2316 - palloc_free_page (fn_copy);
2317 + strlcpy (thread_name, filename, sizeof thread_name);
2318 + strtok_r (thread_name, " ", &save_ptr);
2319 + tid = thread_create (thread_name, PRI_DEFAULT, execute_thread, &exec);
2320 + if (tid != TID_ERROR)
2322 + sema_down (&exec.load_done);
2324 + list_push_back (&thread_current ()->children, &exec.wait_status->elem);
2328 + /* Don't close exec.wd; child process will have done so. */
2332 + dir_close (exec.wd);
2337 /* A thread function that loads a user process and starts it
2340 -execute_thread (void *filename_)
2341 +execute_thread (void *exec_)
2343 - char *filename = filename_;
2344 + struct exec_info *exec = exec_;
2345 struct intr_frame if_;
2348 + thread_current ()->wd = exec->wd;
2350 /* Initialize interrupt frame and load executable. */
2351 memset (&if_, 0, sizeof if_);
2352 if_.gs = if_.fs = if_.es = if_.ds = if_.ss = SEL_UDSEG;
2354 if_.eflags = FLAG_IF | FLAG_MBS;
2355 - success = load (filename, &if_.eip, &if_.esp);
2356 + success = load (exec->filename, &if_.eip, &if_.esp);
2358 + /* Allocate wait_status. */
2361 + exec->wait_status = thread_current ()->wait_status
2362 + = malloc (sizeof *exec->wait_status);
2363 + success = exec->wait_status != NULL;
2366 - /* If load failed, quit. */
2367 - palloc_free_page (filename);
2368 + /* Initialize wait_status. */
2371 + lock_init (&exec->wait_status->lock);
2372 + exec->wait_status->ref_cnt = 2;
2373 + exec->wait_status->tid = thread_current ()->tid;
2374 + sema_init (&exec->wait_status->dead, 0);
2377 + /* Notify parent thread and clean up. */
2378 + exec->success = success;
2379 + sema_up (&exec->load_done);
2383 @@ -75,18 +131,47 @@ execute_thread (void *filename_)
2387 +/* Releases one reference to CS and, if it is now unreferenced,
2390 +release_child (struct wait_status *cs)
2394 + lock_acquire (&cs->lock);
2395 + new_ref_cnt = --cs->ref_cnt;
2396 + lock_release (&cs->lock);
2398 + if (new_ref_cnt == 0)
2402 /* Waits for thread TID to die and returns its exit status. If
2403 it was terminated by the kernel (i.e. killed due to an
2404 exception), returns -1. If TID is invalid or if it was not a
2405 child of the calling process, or if process_wait() has already
2406 been successfully called for the given TID, returns -1
2407 - immediately, without waiting.
2409 - This function will be implemented in problem 2-2. For now, it
2411 + immediately, without waiting. */
2413 -process_wait (tid_t child_tid UNUSED)
2414 +process_wait (tid_t child_tid)
2416 + struct thread *cur = thread_current ();
2417 + struct list_elem *e;
2419 + for (e = list_begin (&cur->children); e != list_end (&cur->children);
2420 + e = list_next (e))
2422 + struct wait_status *cs = list_entry (e, struct wait_status, elem);
2423 + if (cs->tid == child_tid)
2427 + sema_down (&cs->dead);
2428 + exit_code = cs->exit_code;
2429 + release_child (cs);
2436 @@ -95,8 +180,35 @@ void
2439 struct thread *cur = thread_current ();
2440 + struct list_elem *e, *next;
2443 + printf ("%s: exit(%d)\n", cur->name, cur->exit_code);
2445 + /* Notify parent that we're dead. */
2446 + if (cur->wait_status != NULL)
2448 + struct wait_status *cs = cur->wait_status;
2449 + cs->exit_code = cur->exit_code;
2450 + sema_up (&cs->dead);
2451 + release_child (cs);
2454 + /* Free entries of children list. */
2455 + for (e = list_begin (&cur->children); e != list_end (&cur->children);
2458 + struct wait_status *cs = list_entry (e, struct wait_status, elem);
2459 + next = list_remove (e);
2460 + release_child (cs);
2463 + /* Destroy the page hash table. */
2466 + /* Close executable (and allow writes). */
2467 + file_close (cur->bin_file);
2469 /* Destroy the current process's page directory and switch back
2470 to the kernel-only page directory. */
2472 @@ -194,20 +306,22 @@ struct Elf32_Phdr
2473 #define PF_R 4 /* Readable. */
2475 static bool load_segment (struct file *, const struct Elf32_Phdr *);
2476 -static bool setup_stack (void **esp);
2477 +static bool setup_stack (const char *cmd_line, void **esp);
2479 /* Loads an ELF executable from FILENAME into the current thread.
2480 Stores the executable's entry point into *EIP
2481 and its initial stack pointer into *ESP.
2482 Returns true if successful, false otherwise. */
2484 -load (const char *filename, void (**eip) (void), void **esp)
2485 +load (const char *cmd_line, void (**eip) (void), void **esp)
2487 struct thread *t = thread_current ();
2488 + char filename[NAME_MAX + 2];
2489 struct Elf32_Ehdr ehdr;
2490 struct file *file = NULL;
2492 bool success = false;
2496 /* Allocate and activate page directory. */
2497 @@ -216,13 +330,28 @@ load (const char *filename, void (**eip)
2499 process_activate ();
2501 + /* Create page hash table. */
2502 + t->pages = malloc (sizeof *t->pages);
2503 + if (t->pages == NULL)
2505 + hash_init (t->pages, page_hash, page_less, NULL);
2507 + /* Extract filename from command line. */
2508 + while (*cmd_line == ' ')
2510 + strlcpy (filename, cmd_line, sizeof filename);
2511 + cp = strchr (filename, ' ');
2515 /* Open executable file. */
2516 - file = filesys_open (filename);
2517 + t->bin_file = file = file_open (filesys_open (filename));
2520 printf ("load: %s: open failed\n", filename);
2523 + file_deny_write (file);
2525 /* Read and verify executable header. */
2526 if (file_read (file, &ehdr, sizeof ehdr) != sizeof ehdr
2527 @@ -271,7 +400,7 @@ load (const char *filename, void (**eip)
2531 - if (!setup_stack (esp))
2532 + if (!setup_stack (cmd_line, esp))
2535 /* Start address. */
2536 @@ -280,15 +409,11 @@ load (const char *filename, void (**eip)
2540 - /* We arrive here whether the load is successful or not. */
2541 - file_close (file);
2545 /* load() helpers. */
2547 -static bool install_page (void *upage, void *kpage);
2549 /* Loads the segment described by PHDR from FILE into user
2550 address space. Return true if successful, false otherwise. */
2552 @@ -296,6 +421,7 @@ load_segment (struct file *file, const s
2554 void *start, *end; /* Page-rounded segment start and end. */
2555 uint8_t *upage; /* Iterator from start to end. */
2556 + off_t file_offset; /* Offset into file. */
2557 off_t filesz_left; /* Bytes left of file data (as opposed to
2558 zero-initialized bytes). */
2560 @@ -303,7 +429,7 @@ load_segment (struct file *file, const s
2561 commented out. You'll want to use it when implementing VM
2562 to decide whether to page the segment from its executable or
2564 - //bool read_only = (phdr->p_flags & PF_W) == 0;
2565 + bool read_only = (phdr->p_flags & PF_W) == 0;
2567 ASSERT (file != NULL);
2568 ASSERT (phdr != NULL);
2569 @@ -332,73 +458,129 @@ load_segment (struct file *file, const s
2573 - /* Load the segment page-by-page into memory. */
2574 + /* Add the segment page-by-page to the hash table. */
2575 filesz_left = phdr->p_filesz + (phdr->p_vaddr & PGMASK);
2576 - file_seek (file, ROUND_DOWN (phdr->p_offset, PGSIZE));
2577 + file_offset = ROUND_DOWN (phdr->p_offset, PGSIZE);
2578 for (upage = start; upage < (uint8_t *) end; upage += PGSIZE)
2580 - /* We want to read min(PGSIZE, filesz_left) bytes from the
2581 - file into the page and zero the rest. */
2582 - size_t read_bytes = filesz_left >= PGSIZE ? PGSIZE : filesz_left;
2583 - size_t zero_bytes = PGSIZE - read_bytes;
2584 - uint8_t *kpage = palloc_get_page (PAL_USER);
2585 - if (kpage == NULL)
2586 + struct page *p = page_allocate (upage, read_only);
2590 - /* Do the reading and zeroing. */
2591 - if (file_read (file, kpage, read_bytes) != (int) read_bytes)
2592 + if (filesz_left > 0)
2594 - palloc_free_page (kpage);
2597 - memset (kpage + read_bytes, 0, zero_bytes);
2598 - filesz_left -= read_bytes;
2600 - /* Add the page to the process's address space. */
2601 - if (!install_page (upage, kpage))
2603 - palloc_free_page (kpage);
2605 + size_t file_bytes = filesz_left >= PGSIZE ? PGSIZE : filesz_left;
2607 + p->file_offset = file_offset;
2608 + p->file_bytes = file_bytes;
2609 + filesz_left -= file_bytes;
2610 + file_offset += file_bytes;
2617 -/* Create a minimal stack by mapping a zeroed page at the top of
2618 - user virtual memory. */
2619 +/* Reverse the order of the ARGC pointers to char in ARGV. */
2621 +reverse (int argc, char **argv)
2623 + for (; argc > 1; argc -= 2, argv++)
2625 + char *tmp = argv[0];
2626 + argv[0] = argv[argc - 1];
2627 + argv[argc - 1] = tmp;
2631 +/* Pushes the SIZE bytes in BUF onto the stack in KPAGE, whose
2632 + page-relative stack pointer is *OFS, and then adjusts *OFS
2633 + appropriately. The bytes pushed are rounded to a 32-bit
2636 + If successful, returns a pointer to the newly pushed object.
2637 + On failure, returns a null pointer. */
2639 +push (uint8_t *kpage, size_t *ofs, const void *buf, size_t size)
2641 + size_t padsize = ROUND_UP (size, sizeof (uint32_t));
2642 + if (*ofs < padsize)
2646 + memcpy (kpage + *ofs + (padsize - size), buf, size);
2647 + return kpage + *ofs + (padsize - size);
2650 +/* Sets up command line arguments in KPAGE, which will be mapped
2651 + to UPAGE in user space. The command line arguments are taken
2652 + from CMD_LINE, separated by spaces. Sets *ESP to the initial
2653 + stack pointer for the process. */
2655 -setup_stack (void **esp)
2656 +init_cmd_line (uint8_t *kpage, uint8_t *upage, const char *cmd_line,
2660 - bool success = false;
2661 + size_t ofs = PGSIZE;
2662 + char *const null = NULL;
2663 + char *cmd_line_copy;
2664 + char *karg, *saveptr;
2668 + /* Push command line string. */
2669 + cmd_line_copy = push (kpage, &ofs, cmd_line, strlen (cmd_line) + 1);
2670 + if (cmd_line_copy == NULL)
2673 + if (push (kpage, &ofs, &null, sizeof null) == NULL)
2676 - kpage = palloc_get_page (PAL_USER | PAL_ZERO);
2677 - if (kpage != NULL)
2678 + /* Parse command line into arguments
2679 + and push them in reverse order. */
2681 + for (karg = strtok_r (cmd_line_copy, " ", &saveptr); karg != NULL;
2682 + karg = strtok_r (NULL, " ", &saveptr))
2684 - success = install_page (((uint8_t *) PHYS_BASE) - PGSIZE, kpage);
2688 - palloc_free_page (kpage);
2689 + char *uarg = upage + (karg - (char *) kpage);
2690 + if (push (kpage, &ofs, &uarg, sizeof uarg) == NULL)
2696 + /* Reverse the order of the command line arguments. */
2697 + argv = (char **) (upage + ofs);
2698 + reverse (argc, (char **) (kpage + ofs));
2700 + /* Push argv, argc, "return address". */
2701 + if (push (kpage, &ofs, &argv, sizeof argv) == NULL
2702 + || push (kpage, &ofs, &argc, sizeof argc) == NULL
2703 + || push (kpage, &ofs, &null, sizeof null) == NULL)
2706 + /* Set initial stack pointer. */
2707 + *esp = upage + ofs;
2711 -/* Adds a mapping from user virtual address UPAGE to kernel
2712 - virtual address KPAGE to the page table.
2713 - UPAGE must not already be mapped.
2714 - KPAGE should probably be a page obtained from the user pool
2715 - with palloc_get_page().
2716 - Returns true on success, false if UPAGE is already mapped or
2717 - if memory allocation fails. */
2718 +/* Create a minimal stack for T by mapping a page at the
2719 + top of user virtual memory. Fills in the page using CMD_LINE
2720 + and sets *ESP to the stack pointer. */
2722 -install_page (void *upage, void *kpage)
2723 +setup_stack (const char *cmd_line, void **esp)
2725 - struct thread *t = thread_current ();
2727 - /* Verify that there's not already a page at that virtual
2728 - address, then map our page there. */
2729 - return (pagedir_get_page (t->pagedir, upage) == NULL
2730 - && pagedir_set_page (t->pagedir, upage, kpage, true));
2731 + struct page *page = page_allocate (((uint8_t *) PHYS_BASE) - PGSIZE, false);
2734 + page->frame = frame_alloc_and_lock (page);
2735 + if (page->frame != NULL)
2738 + page->read_only = false;
2739 + page->private = false;
2740 + ok = init_cmd_line (page->frame->base, page->addr, cmd_line, esp);
2741 + frame_unlock (page->frame);
2747 diff -u src/userprog/syscall.c~ src/userprog/syscall.c
2748 --- src/userprog/syscall.c~ 2005-06-16 14:56:52.000000000 -0700
2749 +++ src/userprog/syscall.c 2005-06-16 15:09:31.000000000 -0700
2751 #include "userprog/syscall.h"
2753 +#include <string.h>
2754 #include <syscall-nr.h>
2755 +#include "userprog/process.h"
2756 +#include "userprog/pagedir.h"
2757 +#include "devices/kbd.h"
2758 +#include "filesys/directory.h"
2759 +#include "filesys/filesys.h"
2760 +#include "filesys/file.h"
2761 +#include "threads/init.h"
2762 #include "threads/interrupt.h"
2763 +#include "threads/malloc.h"
2764 +#include "threads/mmu.h"
2765 +#include "threads/palloc.h"
2766 #include "threads/thread.h"
2768 +#include "vm/page.h"
2771 +static int sys_halt (void);
2772 +static int sys_exit (int status);
2773 +static int sys_exec (const char *ufile);
2774 +static int sys_wait (tid_t);
2775 +static int sys_create (const char *ufile, unsigned initial_size);
2776 +static int sys_remove (const char *ufile);
2777 +static int sys_open (const char *ufile);
2778 +static int sys_filesize (int handle);
2779 +static int sys_read (int handle, void *udst_, unsigned size);
2780 +static int sys_write (int handle, void *usrc_, unsigned size);
2781 +static int sys_seek (int handle, unsigned position);
2782 +static int sys_tell (int handle);
2783 +static int sys_close (int handle);
2784 +static int sys_mmap (int handle, void *addr);
2785 +static int sys_munmap (int mapping);
2786 +static int sys_chdir (const char *udir);
2787 +static int sys_mkdir (const char *udir);
2788 +static int sys_lsdir (void);
2790 static void syscall_handler (struct intr_frame *);
2792 +static void copy_in (void *, const void *, size_t);
2797 intr_register_int (0x30, 3, INTR_ON, syscall_handler, "syscall");
2800 +/* System call handler. */
2802 +syscall_handler (struct intr_frame *f)
2804 + typedef int syscall_function (int, int, int);
2806 + /* A system call. */
2809 + size_t arg_cnt; /* Number of arguments. */
2810 + syscall_function *func; /* Implementation. */
2813 + /* Table of system calls. */
2814 + static const struct syscall syscall_table[] =
2816 + {0, (syscall_function *) sys_halt},
2817 + {1, (syscall_function *) sys_exit},
2818 + {1, (syscall_function *) sys_exec},
2819 + {1, (syscall_function *) sys_wait},
2820 + {2, (syscall_function *) sys_create},
2821 + {1, (syscall_function *) sys_remove},
2822 + {1, (syscall_function *) sys_open},
2823 + {1, (syscall_function *) sys_filesize},
2824 + {3, (syscall_function *) sys_read},
2825 + {3, (syscall_function *) sys_write},
2826 + {2, (syscall_function *) sys_seek},
2827 + {1, (syscall_function *) sys_tell},
2828 + {1, (syscall_function *) sys_close},
2829 + {2, (syscall_function *) sys_mmap},
2830 + {1, (syscall_function *) sys_munmap},
2831 + {1, (syscall_function *) sys_chdir},
2832 + {1, (syscall_function *) sys_mkdir},
2833 + {0, (syscall_function *) sys_lsdir},
2836 + const struct syscall *sc;
2840 + /* Get the system call. */
2841 + copy_in (&call_nr, f->esp, sizeof call_nr);
2842 + if (call_nr >= sizeof syscall_table / sizeof *syscall_table)
2844 + sc = syscall_table + call_nr;
2846 + /* Get the system call arguments. */
2847 + ASSERT (sc->arg_cnt <= sizeof args / sizeof *args);
2848 + memset (args, 0, sizeof args);
2849 + copy_in (args, (uint32_t *) f->esp + 1, sizeof *args * sc->arg_cnt);
2851 + /* Execute the system call,
2852 + and set the return value. */
2853 + f->eax = sc->func (args[0], args[1], args[2]);
2856 +/* Copies SIZE bytes from user address USRC to kernel address
2858 + Call thread_exit() if any of the user accesses are invalid. */
2860 -syscall_handler (struct intr_frame *f UNUSED)
2861 +copy_in (void *dst_, const void *usrc_, size_t size)
2863 + uint8_t *dst = dst_;
2864 + const uint8_t *usrc = usrc_;
2868 + size_t chunk_size = PGSIZE - pg_ofs (usrc);
2869 + if (chunk_size > size)
2870 + chunk_size = size;
2872 + if (!page_lock (usrc, false))
2874 + memcpy (dst, usrc, chunk_size);
2875 + page_unlock (usrc);
2877 + dst += chunk_size;
2878 + usrc += chunk_size;
2879 + size -= chunk_size;
2883 +/* Creates a copy of user string US in kernel memory
2884 + and returns it as a page that must be freed with
2885 + palloc_free_page().
2886 + Truncates the string at PGSIZE bytes in size.
2887 + Call thread_exit() if any of the user accesses are invalid. */
2889 +copy_in_string (const char *us)
2895 + ks = palloc_get_page (0);
2902 + upage = pg_round_down (us);
2903 + if (!page_lock (upage, false))
2906 + for (; us < upage + PGSIZE; us++)
2908 + ks[length++] = *us;
2911 + page_unlock (upage);
2914 + else if (length >= PGSIZE)
2915 + goto too_long_error;
2918 + page_unlock (upage);
2922 + page_unlock (upage);
2924 + palloc_free_page (ks);
2928 +/* Halt system call. */
2935 +/* Exit system call. */
2937 +sys_exit (int exit_code)
2939 + thread_current ()->exit_code = exit_code;
2944 +/* Exec system call. */
2946 +sys_exec (const char *ufile)
2949 + char *kfile = copy_in_string (ufile);
2951 + tid = process_execute (kfile);
2953 + palloc_free_page (kfile);
2958 +/* Wait system call. */
2960 +sys_wait (tid_t child)
2962 + return process_wait (child);
2965 +/* Create system call. */
2967 +sys_create (const char *ufile, unsigned initial_size)
2969 + char *kfile = copy_in_string (ufile);
2970 + bool ok = filesys_create (kfile, initial_size, FILE_INODE);
2971 + palloc_free_page (kfile);
2976 +/* Remove system call. */
2978 +sys_remove (const char *ufile)
2980 + char *kfile = copy_in_string (ufile);
2981 + bool ok = filesys_remove (kfile);
2982 + palloc_free_page (kfile);
2987 +/* A file descriptor, for binding a file handle to a file. */
2988 +struct file_descriptor
2990 + struct list_elem elem; /* List element. */
2991 + struct file *file; /* File. */
2992 + int handle; /* File handle. */
2995 +/* Open system call. */
2997 +sys_open (const char *ufile)
2999 + char *kfile = copy_in_string (ufile);
3000 + struct file_descriptor *fd;
3003 + fd = malloc (sizeof *fd);
3006 + fd->file = file_open (filesys_open (kfile));
3007 + if (fd->file != NULL)
3009 + struct thread *cur = thread_current ();
3010 + handle = fd->handle = cur->next_handle++;
3011 + list_push_front (&cur->fds, &fd->elem);
3017 + palloc_free_page (kfile);
3021 +/* Returns the file descriptor associated with the given handle.
3022 + Terminates the process if HANDLE is not associated with an
3024 +static struct file_descriptor *
3025 +lookup_fd (int handle)
3027 + struct thread *cur = thread_current ();
3028 + struct list_elem *e;
3030 + for (e = list_begin (&cur->fds); e != list_end (&cur->fds);
3031 + e = list_next (e))
3033 + struct file_descriptor *fd;
3034 + fd = list_entry (e, struct file_descriptor, elem);
3035 + if (fd->handle == handle)
3042 +/* Filesize system call. */
3044 +sys_filesize (int handle)
3046 + struct file_descriptor *fd = lookup_fd (handle);
3049 + size = file_length (fd->file);
3054 +/* Read system call. */
3056 +sys_read (int handle, void *udst_, unsigned size)
3058 + uint8_t *udst = udst_;
3059 + struct file_descriptor *fd;
3060 + int bytes_read = 0;
3062 + /* Look up file descriptor. */
3063 + if (handle != STDIN_FILENO)
3064 + fd = lookup_fd (handle);
3068 + /* How much to read into this page? */
3069 + size_t page_left = PGSIZE - pg_ofs (udst);
3070 + size_t read_amt = size < page_left ? size : page_left;
3073 + /* Check that touching this page is okay. */
3074 + if (!page_lock (udst, true))
3077 + /* Read from file into page. */
3078 + if (handle != STDIN_FILENO)
3080 + retval = file_read (fd->file, udst, read_amt);
3083 + if (bytes_read == 0)
3087 + bytes_read += retval;
3093 + for (i = 0; i < read_amt; i++)
3094 + udst[i] = kbd_getc ();
3095 + bytes_read = read_amt;
3098 + /* Release page. */
3099 + page_unlock (udst);
3101 + /* If it was a short read we're done. */
3102 + if (retval != (off_t) read_amt)
3110 + return bytes_read;
3113 +/* Write system call. */
3115 +sys_write (int handle, void *usrc_, unsigned size)
3117 - printf ("system call!\n");
3118 + uint8_t *usrc = usrc_;
3119 + struct file_descriptor *fd = NULL;
3120 + int bytes_written = 0;
3122 + /* Lookup up file descriptor. */
3123 + if (handle != STDOUT_FILENO)
3124 + fd = lookup_fd (handle);
3128 + /* How much bytes to write to this page? */
3129 + size_t page_left = PGSIZE - pg_ofs (usrc);
3130 + size_t write_amt = size < page_left ? size : page_left;
3133 + /* Check that we can touch this user page. */
3134 + if (!page_lock (usrc, false))
3137 + /* Do the write. */
3138 + if (handle == STDOUT_FILENO)
3140 + putbuf (usrc, write_amt);
3141 + retval = write_amt;
3144 + retval = file_write (fd->file, usrc, write_amt);
3146 + /* Release user page. */
3147 + page_unlock (usrc);
3149 + /* Handle return value. */
3152 + if (bytes_written == 0)
3153 + bytes_written = -1;
3156 + bytes_written += retval;
3158 + /* If it was a short write we're done. */
3159 + if (retval != (off_t) write_amt)
3167 + return bytes_written;
3170 +/* Seek system call. */
3172 +sys_seek (int handle, unsigned position)
3174 + if ((off_t) position >= 0)
3175 + file_seek (lookup_fd (handle)->file, position);
3179 +/* Tell system call. */
3181 +sys_tell (int handle)
3183 + return file_tell (lookup_fd (handle)->file);
3186 +/* Close system call. */
3188 +sys_close (int handle)
3190 + struct file_descriptor *fd = lookup_fd (handle);
3191 + file_close (fd->file);
3192 + list_remove (&fd->elem);
3197 +/* Binds a mapping id to a region of memory and a file. */
3200 + struct list_elem elem; /* List element. */
3201 + int handle; /* Mapping id. */
3202 + struct file *file; /* File. */
3203 + uint8_t *base; /* Start of memory mapping. */
3204 + size_t page_cnt; /* Number of pages mapped. */
3207 +/* Returns the file descriptor associated with the given handle.
3208 + Terminates the process if HANDLE is not associated with a
3209 + memory mapping. */
3210 +static struct mapping *
3211 +lookup_mapping (int handle)
3213 + struct thread *cur = thread_current ();
3214 + struct list_elem *e;
3216 + for (e = list_begin (&cur->mappings); e != list_end (&cur->mappings);
3217 + e = list_next (e))
3219 + struct mapping *m = list_entry (e, struct mapping, elem);
3220 + if (m->handle == handle)
3227 +/* Remove mapping M from the virtual address space,
3228 + writing back any pages that have changed. */
3230 +unmap (struct mapping *m)
3232 + list_remove (&m->elem);
3233 + while (m->page_cnt-- > 0)
3235 + page_deallocate (m->base);
3236 + m->base += PGSIZE;
3238 + file_close (m->file);
3242 +/* Mmap system call. */
3244 +sys_mmap (int handle, void *addr)
3246 + struct file_descriptor *fd = lookup_fd (handle);
3247 + struct mapping *m = malloc (sizeof *m);
3251 + if (m == NULL || addr == NULL || pg_ofs (addr) != 0)
3254 + m->handle = thread_current ()->next_handle++;
3255 + m->file = file_reopen (fd->file);
3256 + if (m->file == NULL)
3263 + list_push_front (&thread_current ()->mappings, &m->elem);
3266 + length = file_length (m->file);
3267 + while (length > 0)
3269 + struct page *p = page_allocate ((uint8_t *) addr + offset, false);
3275 + p->private = false;
3276 + p->file = m->file;
3277 + p->file_offset = offset;
3278 + p->file_bytes = length >= PGSIZE ? PGSIZE : length;
3279 + offset += p->file_bytes;
3280 + length -= p->file_bytes;
3287 +/* Munmap system call. */
3289 +sys_munmap (int mapping)
3291 + unmap (lookup_mapping (mapping));
3295 +/* Chdir system call. */
3297 +sys_chdir (const char *udir)
3299 + char *kdir = copy_in_string (udir);
3300 + bool ok = filesys_chdir (kdir);
3301 + palloc_free_page (kdir);
3305 +/* Mkdir system call. */
3307 +sys_mkdir (const char *udir)
3309 + char *kdir = copy_in_string (udir);
3310 + bool ok = filesys_create (kdir, 0, DIR_INODE);
3311 + palloc_free_page (kdir);
3316 +/* Lsdir system call. */
3320 + dir_list (thread_current ()->wd);
3324 +/* On thread exit, close all open files and unmap all mappings. */
3326 +syscall_exit (void)
3328 + struct thread *cur = thread_current ();
3329 + struct list_elem *e, *next;
3331 + for (e = list_begin (&cur->fds); e != list_end (&cur->fds); e = next)
3333 + struct file_descriptor *fd = list_entry (e, struct file_descriptor, elem);
3334 + next = list_next (e);
3335 + file_close (fd->file);
3339 + for (e = list_begin (&cur->mappings); e != list_end (&cur->mappings);
3342 + struct mapping *m = list_entry (e, struct mapping, elem);
3343 + next = list_next (e);
3347 + dir_close (cur->wd);
3349 diff -u src/userprog/syscall.h~ src/userprog/syscall.h
3350 --- src/userprog/syscall.h~ 2004-09-05 22:38:45.000000000 -0700
3351 +++ src/userprog/syscall.h 2005-06-16 15:09:31.000000000 -0700
3353 #define USERPROG_SYSCALL_H
3355 void syscall_init (void);
3356 +void syscall_exit (void);
3358 #endif /* userprog/syscall.h */
3359 diff -u src/vm/frame.c~ src/vm/frame.c
3360 --- src/vm/frame.c~ 1969-12-31 16:00:00.000000000 -0800
3361 +++ src/vm/frame.c 2005-06-16 15:09:31.000000000 -0700
3363 +#include "vm/frame.h"
3365 +#include "vm/page.h"
3366 +#include "devices/timer.h"
3367 +#include "threads/init.h"
3368 +#include "threads/malloc.h"
3369 +#include "threads/mmu.h"
3370 +#include "threads/palloc.h"
3371 +#include "threads/synch.h"
3373 +static struct frame *frames;
3374 +static size_t frame_cnt;
3376 +static struct lock scan_lock;
3377 +static size_t hand;
3379 +/* Initialize the frame manager. */
3385 + lock_init (&scan_lock);
3387 + frames = malloc (sizeof *frames * ram_pages);
3388 + if (frames == NULL)
3389 + PANIC ("out of memory allocating page frames");
3391 + while ((base = palloc_get_page (PAL_USER)) != NULL)
3393 + struct frame *f = &frames[frame_cnt++];
3394 + lock_init (&f->lock);
3400 +/* Tries to allocate and lock a frame for PAGE.
3401 + Returns the frame if successful, false on failure. */
3402 +static struct frame *
3403 +try_frame_alloc_and_lock (struct page *page)
3407 + lock_acquire (&scan_lock);
3409 + /* Find a free frame. */
3410 + for (i = 0; i < frame_cnt; i++)
3412 + struct frame *f = &frames[i];
3413 + if (!lock_try_acquire (&f->lock))
3415 + if (f->page == NULL)
3418 + lock_release (&scan_lock);
3421 + lock_release (&f->lock);
3424 + /* No free frame. Find a frame to evict. */
3425 + for (i = 0; i < frame_cnt * 2; i++)
3427 + /* Get a frame. */
3428 + struct frame *f = &frames[hand];
3429 + if (++hand >= frame_cnt)
3432 + if (!lock_try_acquire (&f->lock))
3435 + if (f->page == NULL)
3438 + lock_release (&scan_lock);
3442 + if (page_accessed_recently (f->page))
3444 + lock_release (&f->lock);
3448 + lock_release (&scan_lock);
3450 + /* Evict this frame. */
3451 + if (!page_out (f->page))
3453 + lock_release (&f->lock);
3461 + lock_release (&scan_lock);
3465 +/* Tries really hard to allocate and lock a frame for PAGE.
3466 + Returns the frame if successful, false on failure. */
3468 +frame_alloc_and_lock (struct page *page)
3472 + for (try = 0; try < 3; try++)
3474 + struct frame *f = try_frame_alloc_and_lock (page);
3477 + ASSERT (lock_held_by_current_thread (&f->lock));
3480 + timer_msleep (1000);
3486 +/* Locks P's frame into memory, if it has one.
3487 + Upon return, p->frame will not change until P is unlocked. */
3489 +frame_lock (struct page *p)
3491 + /* A frame can be asynchronously removed, but never inserted. */
3492 + struct frame *f = p->frame;
3495 + lock_acquire (&f->lock);
3496 + if (f != p->frame)
3498 + lock_release (&f->lock);
3499 + ASSERT (p->frame == NULL);
3504 +/* Releases frame F for use by another page.
3505 + F must be locked for use by the current process.
3506 + Any data in F is lost. */
3508 +frame_free (struct frame *f)
3510 + ASSERT (lock_held_by_current_thread (&f->lock));
3513 + lock_release (&f->lock);
3516 +/* Unlocks frame F, allowing it to be evicted.
3517 + F must be locked for use by the current process. */
3519 +frame_unlock (struct frame *f)
3521 + ASSERT (lock_held_by_current_thread (&f->lock));
3522 + lock_release (&f->lock);
3524 diff -u src/vm/frame.h~ src/vm/frame.h
3525 --- src/vm/frame.h~ 1969-12-31 16:00:00.000000000 -0800
3526 +++ src/vm/frame.h 2005-06-16 15:09:31.000000000 -0700
3531 +#include <stdbool.h>
3532 +#include "threads/synch.h"
3534 +/* A physical frame. */
3537 + struct lock lock; /* Prevent simultaneous access. */
3538 + void *base; /* Kernel virtual base address. */
3539 + struct page *page; /* Mapped process page, if any. */
3542 +void frame_init (void);
3544 +struct frame *frame_alloc_and_lock (struct page *);
3545 +void frame_lock (struct page *);
3547 +void frame_free (struct frame *);
3548 +void frame_unlock (struct frame *);
3550 +#endif /* vm/frame.h */
3551 diff -u src/vm/page.c~ src/vm/page.c
3552 --- src/vm/page.c~ 1969-12-31 16:00:00.000000000 -0800
3553 +++ src/vm/page.c 2005-06-16 15:09:31.000000000 -0700
3555 +#include "vm/page.h"
3557 +#include <string.h>
3558 +#include "vm/frame.h"
3559 +#include "vm/swap.h"
3560 +#include "filesys/file.h"
3561 +#include "threads/malloc.h"
3562 +#include "threads/mmu.h"
3563 +#include "threads/thread.h"
3564 +#include "userprog/pagedir.h"
3566 +/* Maximum size of process stack, in bytes. */
3567 +#define STACK_MAX (1024 * 1024)
3569 +/* Destroys a page, which must be in the current process's
3570 + page table. Used as a callback for hash_destroy(). */
3572 +destroy_page (struct hash_elem *p_, void *aux UNUSED)
3574 + struct page *p = hash_entry (p_, struct page, hash_elem);
3577 + frame_free (p->frame);
3582 +/* Destroys the current process's page table. */
3586 + struct hash *h = thread_current ()->pages;
3588 + hash_destroy (h, destroy_page);
3591 +/* Returns the page containing the given virtual ADDRESS,
3592 + or a null pointer if no such page exists.
3593 + Allocates stack pages as necessary. */
3594 +static struct page *
3595 +page_for_addr (const void *address)
3597 + if (address < PHYS_BASE)
3600 + struct hash_elem *e;
3602 + /* Find existing page. */
3603 + p.addr = (void *) pg_round_down (address);
3604 + e = hash_find (thread_current ()->pages, &p.hash_elem);
3606 + return hash_entry (e, struct page, hash_elem);
3608 + /* No page. Expand stack? */
3609 + if (address >= PHYS_BASE - STACK_MAX
3610 + && address >= thread_current ()->user_esp - 32)
3611 + return page_allocate ((void *) address, false);
3616 +/* Locks a frame for page P and pages it in.
3617 + Returns true if successful, false on failure. */
3619 +do_page_in (struct page *p)
3621 + /* Get a frame for the page. */
3622 + p->frame = frame_alloc_and_lock (p);
3623 + if (p->frame == NULL)
3626 + /* Copy data into the frame. */
3627 + if (p->sector != (disk_sector_t) -1)
3629 + /* Get data from swap. */
3632 + else if (p->file != NULL)
3634 + /* Get data from file. */
3635 + off_t read_bytes = file_read_at (p->file, p->frame->base,
3636 + p->file_bytes, p->file_offset);
3637 + off_t zero_bytes = PGSIZE - read_bytes;
3638 + memset (p->frame->base + read_bytes, 0, zero_bytes);
3639 + if (read_bytes != p->file_bytes)
3640 + printf ("bytes read (%"PROTd") != bytes requested (%"PROTd")\n",
3641 + read_bytes, p->file_bytes);
3645 + /* Provide all-zero page. */
3646 + memset (p->frame->base, 0, PGSIZE);
3652 +/* Faults in the page containing FAULT_ADDR.
3653 + Returns true if successful, false on failure. */
3655 +page_in (void *fault_addr)
3660 + /* Can't handle page faults without a hash table. */
3661 + if (thread_current ()->pages == NULL)
3664 + p = page_for_addr (fault_addr);
3669 + if (p->frame == NULL)
3671 + if (!do_page_in (p))
3674 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
3676 + /* Install frame into page table. */
3677 + success = pagedir_set_page (thread_current ()->pagedir, p->addr,
3678 + p->frame->base, !p->read_only);
3680 + /* Release frame. */
3681 + frame_unlock (p->frame);
3687 + P must have a locked frame.
3688 + Return true if successful, false on failure. */
3690 +page_out (struct page *p)
3695 + ASSERT (p->frame != NULL);
3696 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
3698 + /* Mark page not present in page table, forcing accesses by the
3699 + process to fault. This must happen before checking the
3700 + dirty bit, to prevent a race with the process dirtying the
3702 + pagedir_clear_page (p->thread->pagedir, p->addr);
3704 + /* Has the frame been modified? */
3705 + dirty = pagedir_is_dirty (p->thread->pagedir, p->addr);
3707 + /* Write frame contents to disk if necessary. */
3708 + if (p->file != NULL)
3713 + ok = swap_out (p);
3715 + ok = file_write_at (p->file, p->frame->base, p->file_bytes,
3716 + p->file_offset) == p->file_bytes;
3722 + ok = swap_out (p);
3725 + //memset (p->frame->base, 0xcc, PGSIZE);
3731 +/* Returns true if page P's data has been accessed recently,
3733 + P must have a frame locked into memory. */
3735 +page_accessed_recently (struct page *p)
3737 + bool was_accessed;
3739 + ASSERT (p->frame != NULL);
3740 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
3742 + was_accessed = pagedir_is_accessed (p->thread->pagedir, p->addr);
3744 + pagedir_set_accessed (p->thread->pagedir, p->addr, false);
3745 + return was_accessed;
3748 +/* Adds a mapping for user virtual address VADDR to the page hash
3749 + table. Fails if VADDR is already mapped or if memory
3750 + allocation fails. */
3752 +page_allocate (void *vaddr, bool read_only)
3754 + struct thread *t = thread_current ();
3755 + struct page *p = malloc (sizeof *p);
3758 + p->addr = pg_round_down (vaddr);
3760 + p->read_only = read_only;
3761 + p->private = !read_only;
3765 + p->sector = (disk_sector_t) -1;
3768 + p->file_offset = 0;
3769 + p->file_bytes = 0;
3771 + p->thread = thread_current ();
3773 + if (hash_insert (t->pages, &p->hash_elem) != NULL)
3775 + /* Already mapped. */
3783 +/* Evicts the page containing address VADDR
3784 + and removes it from the page table. */
3786 +page_deallocate (void *vaddr)
3788 + struct page *p = page_for_addr (vaddr);
3789 + ASSERT (p != NULL);
3793 + struct frame *f = p->frame;
3794 + if (p->file && !p->private)
3798 + hash_delete (thread_current ()->pages, &p->hash_elem);
3802 +/* Returns a hash value for the page that E refers to. */
3804 +page_hash (const struct hash_elem *e, void *aux UNUSED)
3806 + const struct page *p = hash_entry (e, struct page, hash_elem);
3807 + return ((uintptr_t) p->addr) >> PGBITS;
3810 +/* Returns true if page A precedes page B. */
3812 +page_less (const struct hash_elem *a_, const struct hash_elem *b_,
3815 + const struct page *a = hash_entry (a_, struct page, hash_elem);
3816 + const struct page *b = hash_entry (b_, struct page, hash_elem);
3818 + return a->addr < b->addr;
3821 +/* Tries to lock the page containing ADDR into physical memory.
3822 + If WILL_WRITE is true, the page must be writeable;
3823 + otherwise it may be read-only.
3824 + Returns true if successful, false on failure. */
3826 +page_lock (const void *addr, bool will_write)
3828 + struct page *p = page_for_addr (addr);
3829 + if (p == NULL || (p->read_only && will_write))
3833 + if (p->frame == NULL)
3834 + return (do_page_in (p)
3835 + && pagedir_set_page (thread_current ()->pagedir, p->addr,
3836 + p->frame->base, !p->read_only));
3841 +/* Unlocks a page locked with page_lock(). */
3843 +page_unlock (const void *addr)
3845 + struct page *p = page_for_addr (addr);
3846 + ASSERT (p != NULL);
3847 + frame_unlock (p->frame);
3849 diff -u src/vm/page.h~ src/vm/page.h
3850 --- src/vm/page.h~ 1969-12-31 16:00:00.000000000 -0800
3851 +++ src/vm/page.h 2005-06-16 15:09:31.000000000 -0700
3857 +#include "devices/disk.h"
3858 +#include "filesys/off_t.h"
3859 +#include "threads/synch.h"
3861 +/* Virtual page. */
3864 + /* Immutable members. */
3865 + void *addr; /* User virtual address. */
3866 + bool read_only; /* Read-only page? */
3867 + struct thread *thread; /* Owning thread. */
3869 + /* Accessed only in owning process context. */
3870 + struct hash_elem hash_elem; /* struct thread `pages' hash element. */
3872 + /* Set only in owning process context with frame->frame_lock held.
3873 + Cleared only with scan_lock and frame->frame_lock held. */
3874 + struct frame *frame; /* Page frame. */
3876 + /* Swap information, protected by frame->frame_lock. */
3877 + disk_sector_t sector; /* Starting sector of swap area, or -1. */
3879 + /* Memory-mapped file information, protected by frame->frame_lock. */
3880 + bool private; /* False to write back to file,
3881 + true to write back to swap. */
3882 + struct file *file; /* File. */
3883 + off_t file_offset; /* Offset in file. */
3884 + off_t file_bytes; /* Bytes to read/write, 1...PGSIZE. */
3887 +void page_exit (void);
3889 +struct page *page_allocate (void *, bool read_only);
3890 +void page_deallocate (void *vaddr);
3892 +bool page_in (void *fault_addr);
3893 +bool page_out (struct page *);
3894 +bool page_accessed_recently (struct page *);
3896 +bool page_lock (const void *, bool will_write);
3897 +void page_unlock (const void *);
3899 +hash_hash_func page_hash;
3900 +hash_less_func page_less;
3902 +#endif /* vm/page.h */
3903 diff -u src/vm/swap.c~ src/vm/swap.c
3904 --- src/vm/swap.c~ 1969-12-31 16:00:00.000000000 -0800
3905 +++ src/vm/swap.c 2005-06-16 15:09:31.000000000 -0700
3907 +#include "vm/swap.h"
3908 +#include <bitmap.h>
3911 +#include "vm/frame.h"
3912 +#include "vm/page.h"
3913 +#include "devices/disk.h"
3914 +#include "threads/mmu.h"
3915 +#include "threads/synch.h"
3917 +/* The swap disk. */
3918 +static struct disk *swap_disk;
3920 +/* Used swap pages. */
3921 +static struct bitmap *swap_bitmap;
3923 +/* Protects swap_bitmap. */
3924 +static struct lock swap_lock;
3926 +/* Number of sectors per page. */
3927 +#define PAGE_SECTORS (PGSIZE / DISK_SECTOR_SIZE)
3929 +/* Sets up swap. */
3933 + swap_disk = disk_get (1, 1);
3934 + if (swap_disk == NULL)
3936 + printf ("no swap disk--swap disabled\n");
3937 + swap_bitmap = bitmap_create (0);
3940 + swap_bitmap = bitmap_create (disk_size (swap_disk) / PAGE_SECTORS);
3941 + if (swap_bitmap == NULL)
3942 + PANIC ("couldn't create swap bitmap");
3943 + lock_init (&swap_lock);
3946 +/* Swaps in page P, which must have a locked frame
3947 + (and be swapped out). */
3949 +swap_in (struct page *p)
3953 + ASSERT (p->frame != NULL);
3954 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
3955 + ASSERT (p->sector != (disk_sector_t) -1);
3957 + for (i = 0; i < PAGE_SECTORS; i++)
3958 + disk_read (swap_disk, p->sector + i,
3959 + p->frame->base + i * DISK_SECTOR_SIZE);
3960 + bitmap_reset (swap_bitmap, p->sector / PAGE_SECTORS);
3961 + p->sector = (disk_sector_t) -1;
3964 +/* Swaps out page P, which must have a locked frame. */
3966 +swap_out (struct page *p)
3971 + ASSERT (p->frame != NULL);
3972 + ASSERT (lock_held_by_current_thread (&p->frame->lock));
3974 + lock_acquire (&swap_lock);
3975 + slot = bitmap_scan_and_flip (swap_bitmap, 0, 1, false);
3976 + lock_release (&swap_lock);
3977 + if (slot == BITMAP_ERROR)
3980 + p->sector = slot * PAGE_SECTORS;
3981 + for (i = 0; i < PAGE_SECTORS; i++)
3982 + disk_write (swap_disk, p->sector + i,
3983 + p->frame->base + i * DISK_SECTOR_SIZE);
3985 + p->private = false;
3987 + p->file_offset = 0;
3988 + p->file_bytes = 0;
3992 diff -u src/vm/swap.h~ src/vm/swap.h
3993 --- src/vm/swap.h~ 1969-12-31 16:00:00.000000000 -0800
3994 +++ src/vm/swap.h 2005-06-16 15:09:31.000000000 -0700
3997 +#define VM_SWAP_H 1
3999 +#include <stdbool.h>
4002 +void swap_init (void);
4003 +void swap_in (struct page *);
4004 +bool swap_out (struct page *);
4006 +#endif /* vm/swap.h */