From: Ben Pfaff Date: Thu, 26 Aug 2004 19:52:36 +0000 (+0000) Subject: Improve mmu.h. X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?p=pintos-anon;a=commitdiff_plain;h=a98578bf3b6b5c946713654b404a886a7199dbee Improve mmu.h. --- diff --git a/src/threads/init.c b/src/threads/init.c index f7702cd..3eb45ad 100644 --- a/src/threads/init.c +++ b/src/threads/init.c @@ -57,8 +57,8 @@ main (void) /* Memory from the end of the kernel through the end of memory is free. Give it to the page allocator. */ - palloc_init ((void *) (KERN_BASE + kernel_pages * NBPG), - (void *) (PHYS_BASE + ram_pages * NBPG)); + palloc_init ((void *) (KERN_BASE + kernel_pages * PGSIZE), + (void *) (PHYS_BASE + ram_pages * PGSIZE)); paging_init (); gdt_init (); diff --git a/src/threads/malloc.c b/src/threads/malloc.c index ba39d1d..263651d 100644 --- a/src/threads/malloc.c +++ b/src/threads/malloc.c @@ -31,7 +31,7 @@ malloc_init (void) { size_t slot_size; - for (slot_size = 16; slot_size < NBPG; slot_size *= 2) + for (slot_size = 16; slot_size < PGSIZE; slot_size *= 2) { struct desc *d = &descs[desc_cnt++]; ASSERT (desc_cnt <= sizeof descs / sizeof *descs); @@ -44,7 +44,7 @@ malloc_init (void) static struct arena * slot_to_arena (struct slot *s) { - return (struct arena *) ((uint32_t) s & ~(NBPG - 1)); + return (struct arena *) ((uint32_t) s & ~(PGSIZE - 1)); } static void * @@ -87,7 +87,7 @@ malloc (size_t size) a->next = d->arenas; if (d->arenas != NULL) d->arenas->prev = a; - for (ofs = sizeof *a; ofs + d->slot_size <= NBPG; ofs += d->slot_size) + for (ofs = sizeof *a; ofs + d->slot_size <= PGSIZE; ofs += d->slot_size) { struct slot *s = (struct slot *) ((uint8_t *) a + ofs); s->next = d->free_list; diff --git a/src/threads/mmu.h b/src/threads/mmu.h index 10c2a92..87933f1 100644 --- a/src/threads/mmu.h +++ b/src/threads/mmu.h @@ -40,171 +40,76 @@ #ifndef __ASSEMBLER__ #include +#include "debug.h" #endif -// An Address: -// +--------10------+-------10-------+---------12----------+ -// | Page Directory | Page Table | Offset within Page | -// +----------------+----------------+---------------------+ - -#define PGSHIFT 12 /* LOG2(NBPG) */ -#define NBPG (1 << PGSHIFT) /* bytes/page */ - -/* Page tables (selected by VA[31:22] and indexed by VA[21:12]) */ -#define PGMASK (NBPG - 1) /* Mask for page offset. Terrible name! */ -#define PGOFS(va) ((va) & PGMASK) -/* Page number of virtual page in the virtual page table. */ -#define PGNO(va) ((uint32_t) (va) >> PGSHIFT) -/* Index of PTE for VA within the corresponding page table */ -#define PTENO(va) (((uint32_t) (va) >> PGSHIFT) & 0x3ff) -/* Round up to a page */ -#define PGROUNDUP(va) (((va) + PGMASK) & ~PGMASK) -/* Round down to a page */ -#define PGROUNDDOWN(va) ((va) & ~PGMASK) -/* Page directories (indexed by VA[31:22]) */ -#define PDSHIFT 22 /* LOG2(NBPD) */ -#define NBPD (1 << PDSHIFT) /* bytes/page dir */ -#define PDMASK (NBPD-1) /* byte offset into region mapped by - a page table */ -#define PDENO(va) ((uint32_t) (va) >> PDSHIFT) -/* Round up */ -#define PDROUNDUP(va) (((va) + PDMASK) & ~PDMASK) -/* Round down */ -#define PDROUNDDOWN(va) ((va) & ~PDMASK) - -/* At IOPHYSMEM (640K) there is a 384K hole for I/O. From the kernel, - * IOPHYSMEM can be addressed at KERNBASE + IOPHYSMEM. The hole ends - * at physical address EXTPHYSMEM. */ -#define IOPHYSMEM 0xa0000 -#define EXTPHYSMEM 0x100000 - - -/* - * Virtual memory map: Permissions - * kernel/user - * - * 4 Gig --------> +------------------------------+ - * | | RW/-- - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * : . : - * : . : - * : . : - * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| RW/-- - * | | RW/-- - * | Physical Memory | RW/-- - * | | RW/-- - * KERNBASE -----> +------------------------------+ - * | Kernel Virtual Page Table | RW/-- NBPD - * VPT,KSTACKTOP--> +------------------------------+ --+ - * | Kernel Stack | RW/-- KSTKSIZE | - * | - - - - - - - - - - - - - - -| NBPD - * | Invalid memory | --/-- | - * ULIM ------> +------------------------------+ --+ - * | R/O User VPT | R-/R- NBPD - * UVPT ----> +------------------------------+ - * | R/O PPAGE | R-/R- NBPD - * UPPAGES ----> +------------------------------+ - * | R/O UENVS | R-/R- NBPD - * UTOP,UENVS -------> +------------------------------+ - * UXSTACKTOP -/ | user exception stack | RW/RW NBPG - * +------------------------------+ - * | Invalid memory | --/-- NBPG - * USTACKTOP ----> +------------------------------+ - * | normal user stack | RW/RW NBPG - * +------------------------------+ - * | | - * | | - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * . . - * . . - * . . - * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| - * | | - * UTEXT -------> +------------------------------+ - * | | 2 * NBPD - * 0 ------------> +------------------------------+ - */ +#define MASK(SHIFT, CNT) (((1ul << (CNT)) - 1) << (SHIFT)) +/* Page offset (bits 0:11). */ +#define PGSHIFT 0 /* First offset bit. */ +#define PGBITS 12 /* Number of offset bits. */ +#define PGMASK MASK(PGSHIFT, PGBITS) +#define PGSIZE (1 << PGBITS) -#define PHYS_BASE 0xc0000000 /* All physical memory mapped here. */ -#define KERN_BASE 0xc0100000 /* Kernel loaded here. */ +/* Page table (bits 12:21). */ +#define PTSHIFT PGBITS /* First page table bit. */ +#define PTBITS 10 /* Number of page table bits. */ +#define PTMASK MASK(PTSHIFT, PTBITS) -/* Virtual page table. Last entry of all PDEs contains a pointer to - * the PD itself, thereby turning the PD into a page table which - * maps all PTEs over the last 4 Megs of the virtual address space */ -#define VPT (KERNBASE - NBPD) -#define KSTACKTOP VPT -#define KSTKSIZE (8 * NBPG) /* size of a kernel stack */ -#define ULIM (KSTACKTOP - NBPD) +/* Page directory (bits 22:31). */ +#define PDSHIFT (PTSHIFT + PTBITS) /* First page dir bit. */ +#define PDBITS 10 /* Number of page dir bits. */ +#define PDMASK MASK(PDSHIFT, PDBITS) -/* - * User read-only mappings! Anything below here til UTOP are readonly to user. - * They are global pages mapped in at env allocation time. - */ +#ifndef __ASSEMBLER__ +/* Offset within a page. */ +static inline unsigned pg_ofs (void *va) { return (uintptr_t) va & PGMASK; } -/* Same as VPT but read-only for users */ -#define UVPT (ULIM - NBPD) -/* Read-only copies of all ppage structures */ -#define UPPAGES (UVPT - NBPD) -/* Read only copy of the global env structures */ -#define UENVS (UPPAGES - NBPD) +/* Page number. */ +static inline uintptr_t pg_no (void *va) { return (uintptr_t) va >> PTSHIFT; } +/* Page table number. */ +static inline unsigned pt_no (void *va) { + return ((uintptr_t) va & PTMASK) >> PTSHIFT; +} +/* Page directory number. */ +static inline uintptr_t pd_no (void *va) { return (uintptr_t) va >> PDSHIFT; } -/* - * Top of user VM. User can manipulate VA from UTOP-1 and down! - */ -#define UTOP UENVS -#define UXSTACKTOP (UTOP) /* one page user exception stack */ -/* leave top page invalid to guard against exception stack overflow */ -#define USTACKTOP (UTOP - 2*NBPG) /* top of the normal user stack */ -#define UTEXT (2*NBPD) +/* Round up to nearest page boundary. */ +static inline void *pg_round_up (void *va) { + return (void *) (((uintptr_t) va + PGSIZE - 1) & ~PGMASK); +} -/* Number of page tables for mapping physical memory at KERNBASE. - * (each PT contains 1K PTE's, for a total of 128 Megabytes mapped) */ -#define NPPT ((-KERNBASE)>>PDSHIFT) +/* Round down to nearest page boundary. */ +static inline void *pg_round_down (void *va) { + return (void *) ((uintptr_t) va & ~PGMASK); +} -#ifndef __ASSEMBLER__ -#include "debug.h" +#define PHYS_BASE ((void *) 0xc0000000) /* Physical memory mapped here. */ +#define KERN_BASE ((void *) 0xc0100000) /* Kernel loaded here. */ -/* Kernel virtual address at which physical address PADDR is - mapped. */ +/* Returns kernel virtual address at which physical address PADDR + is mapped. */ static inline void * -ptov (uint32_t paddr) +ptov (uintptr_t paddr) { - ASSERT (paddr < PHYS_BASE); + ASSERT ((void *) paddr < PHYS_BASE); return (void *) (paddr + PHYS_BASE); } -/* Physical address at which kernel virtual address VADDR is - mapped. */ -static inline uint32_t +/* Returns physical address at which kernel virtual address VADDR + is mapped. */ +static inline uintptr_t vtop (void *vaddr) { - ASSERT ((uint32_t) vaddr >= PHYS_BASE); + ASSERT (vaddr >= PHYS_BASE); - return (uint32_t) vaddr - PHYS_BASE; + return (uintptr_t) vaddr - (uintptr_t) PHYS_BASE; } #endif -#define PFM_NONE 0x0 /* No page faults expected. Must be a kernel bug */ -#define PFM_KILL 0x1 /* On fault kill user process. */ - - -/* - * Macros to build GDT entries in assembly. - */ -#define SEG_NULL \ - .word 0, 0; \ - .byte 0, 0, 0, 0 -#define SEG(type,base,lim) \ - .word ((lim)&0xffff), ((base)&0xffff); \ - .byte (((base)>>16)&0xff), (0x90|(type)), \ - (0xc0|(((lim)>>16)&0xf)), (((base)>>24)&0xff) - - - /* Page Table/Directory Entry flags * these are defined by the hardware */ diff --git a/src/threads/paging.c b/src/threads/paging.c index 190d4fc..9fd568c 100644 --- a/src/threads/paging.c +++ b/src/threads/paging.c @@ -11,7 +11,7 @@ static uint32_t *base_page_dir; static uint32_t make_pde (uint32_t *pagetab) { - ASSERT (PGOFS ((uintptr_t) pagetab) == 0); + ASSERT (pg_ofs (pagetab) == 0); return vtop (pagetab) | PG_U | PG_P | PG_W; } @@ -21,7 +21,7 @@ make_pte (uint32_t *page, bool writable) { uint32_t entry; - ASSERT (PGOFS ((uintptr_t) page) == 0); + ASSERT (pg_ofs (page) == 0); entry = vtop (page) | PG_U | PG_P; if (writable) @@ -34,7 +34,7 @@ pde_get_pagetab (uint32_t pde) { ASSERT (pde & PG_P); - return ptov (PGROUNDDOWN (pde)); + return ptov (pde & ~PGMASK); } static void * @@ -42,7 +42,7 @@ pte_get_page (uint32_t pte) { ASSERT (pte & PG_P); - return ptov (PGROUNDDOWN (pte)); + return ptov (pte & ~PGMASK); } /* Populates the base page directory and page table with the @@ -63,10 +63,10 @@ paging_init (void) pt = NULL; for (page = 0; page < ram_pages; page++) { - uintptr_t paddr = page * NBPG; + uintptr_t paddr = page * PGSIZE; void *vaddr = ptov (paddr); - size_t pde_idx = PDENO ((uintptr_t) vaddr); - size_t pte_idx = PTENO ((uintptr_t) vaddr); + size_t pde_idx = pd_no (vaddr); + size_t pte_idx = pt_no (vaddr); if (pd[pde_idx] == 0) { @@ -85,7 +85,7 @@ uint32_t * pagedir_create (void) { uint32_t *pd = palloc_get (0); - memcpy (pd, base_page_dir, NBPG); + memcpy (pd, base_page_dir, PGSIZE); return pd; } @@ -107,12 +107,12 @@ lookup_page (uint32_t *pagedir, void *upage, bool create) uint32_t *pde; ASSERT (pagedir != NULL); - ASSERT (PGOFS ((uintptr_t) upage) == 0); - ASSERT ((uintptr_t) upage < PHYS_BASE); + ASSERT (pg_ofs (upage) == 0); + ASSERT (upage < PHYS_BASE); /* Check for a page table for UPAGE. If one is missing, create one if requested. */ - pde = pagedir + PDENO ((uint32_t) upage); + pde = pagedir + pd_no (upage); if (*pde == 0) { if (create) @@ -129,7 +129,7 @@ lookup_page (uint32_t *pagedir, void *upage, bool create) /* Return the page table entry. */ pagetab = pde_get_pagetab (*pde); - return &pagetab[PTENO ((uintptr_t) upage)]; + return &pagetab[pt_no (upage)]; } bool @@ -138,7 +138,7 @@ pagedir_set_page (uint32_t *pagedir, void *upage, void *kpage, { uint32_t *pte; - ASSERT (PGOFS ((uintptr_t) kpage) == 0); + ASSERT (pg_ofs (kpage) == 0); pte = lookup_page (pagedir, upage, true); if (pte != NULL) @@ -168,7 +168,7 @@ pagedir_clear_page (uint32_t *pagedir, void *upage) static uint32_t * scan_pt (uint32_t *pt, unsigned pde_idx, unsigned pte_idx, void **upage) { - for (; pte_idx < NBPG / sizeof *pt; pte_idx++) + for (; pte_idx < PGSIZE / sizeof *pt; pte_idx++) { uint32_t pte = pt[pte_idx]; @@ -177,8 +177,7 @@ scan_pt (uint32_t *pt, unsigned pde_idx, unsigned pte_idx, void **upage) void *kpage = pte_get_page (pte); if (kpage != NULL) { - *upage = (void *) ((pde_idx << PDSHIFT) - | (pte_idx << PGSHIFT)); + *upage = (void *) ((pde_idx << PDSHIFT) | (pte_idx << PTSHIFT)); return kpage; } } @@ -190,7 +189,7 @@ scan_pt (uint32_t *pt, unsigned pde_idx, unsigned pte_idx, void **upage) static void * scan_pd (uint32_t *pd, unsigned pde_idx, void **upage) { - for (; pde_idx < PDENO (PHYS_BASE); pde_idx++) + for (; pde_idx < pd_no (PHYS_BASE); pde_idx++) { uint32_t pde = pd[pde_idx]; @@ -217,8 +216,8 @@ pagedir_next (uint32_t *pd, void **upage) unsigned pde_idx, pte_idx; void *kpage; - pde_idx = PDENO (*upage); - pte_idx = PTENO (*upage); + pde_idx = pd_no (*upage); + pte_idx = pt_no (*upage); kpage = scan_pt (pde_get_pagetab (pd[pde_idx]), pde_idx, pte_idx + 1, upage); if (kpage == NULL) diff --git a/src/threads/palloc.c b/src/threads/palloc.c index 0fe5f94..011816a 100644 --- a/src/threads/palloc.c +++ b/src/threads/palloc.c @@ -29,7 +29,7 @@ palloc_get (enum palloc_flags flags) if (free_pages == NULL && uninit_start < uninit_end) { palloc_free (uninit_start); - uninit_start += NBPG; + uninit_start += PGSIZE; } page = free_pages; @@ -37,7 +37,7 @@ palloc_get (enum palloc_flags flags) { free_pages = page->next; if (flags & PAL_ZERO) - memset (page, 0, NBPG); + memset (page, 0, PGSIZE); } else { @@ -52,9 +52,9 @@ void palloc_free (void *page_) { struct page *page = page_; - ASSERT((uintptr_t) page % NBPG == 0); + ASSERT((uintptr_t) page % PGSIZE == 0); #ifndef NDEBUG - memset (page, 0xcc, NBPG); + memset (page, 0xcc, PGSIZE); #endif page->next = free_pages; free_pages = page; diff --git a/src/threads/thread.c b/src/threads/thread.c index 82d33f0..9d75d18 100644 --- a/src/threads/thread.c +++ b/src/threads/thread.c @@ -40,11 +40,11 @@ thread_create (const char *name, void (*function) (void *aux), void *aux) if (t == NULL) return NULL; - memset (t, 0, NBPG); + memset (t, 0, PGSIZE); strlcpy (t->name, name, sizeof t->name); /* Set up stack. */ - t->stack = (uint32_t *) ((uint8_t *) t + NBPG); + t->stack = (uint32_t *) ((uint8_t *) t + PGSIZE); *--t->stack = (uint32_t) aux; *--t->stack = (uint32_t) function; --t->stack; @@ -61,7 +61,7 @@ thread_create (const char *name, void (*function) (void *aux), void *aux) static struct thread * stack_to_thread (uint32_t *stack) { - return (struct thread *) ((uint32_t) (stack - 1) & ~((uint32_t) NBPG - 1)); + return (struct thread *) ((uint32_t) (stack - 1) & ~((uint32_t) PGSIZE - 1)); } struct thread * diff --git a/src/userprog/addrspace.c b/src/userprog/addrspace.c index 4e317c5..3837d4e 100644 --- a/src/userprog/addrspace.c +++ b/src/userprog/addrspace.c @@ -82,7 +82,7 @@ static bool load_segment (struct addrspace *as, struct file *file, const struct Elf32_Phdr *phdr) { - uintptr_t start, end; + void *start, *end; uint8_t *upage; off_t filesz_left; @@ -91,11 +91,11 @@ load_segment (struct addrspace *as, struct file *file, ASSERT (phdr != NULL); ASSERT (phdr->p_type == PT_LOAD); - /* p_offset and p_vaddr must be congruent modulo NBPG. */ - if (phdr->p_offset % NBPG != phdr->p_vaddr % NBPG) + /* p_offset and p_vaddr must be congruent modulo PGSIZE. */ + if (phdr->p_offset % PGSIZE != phdr->p_vaddr % PGSIZE) { printk ("%#08"PE32Ox" and %#08"PE32Ax" not congruent modulo %#x\n", - phdr->p_offset, phdr->p_vaddr, (unsigned) NBPG); + phdr->p_offset, phdr->p_vaddr, (unsigned) PGSIZE); return false; } @@ -108,8 +108,8 @@ load_segment (struct addrspace *as, struct file *file, } /* Validate virtual memory region to be mapped. */ - start = PGROUNDDOWN (phdr->p_vaddr); - end = PGROUNDUP (phdr->p_vaddr + phdr->p_memsz); + start = pg_round_down ((void *) phdr->p_vaddr); + end = pg_round_up ((void *) (phdr->p_vaddr + phdr->p_memsz)); if (start >= PHYS_BASE || end >= PHYS_BASE || end < start) { printk ("bad virtual region %08lx...%08lx\n", @@ -117,12 +117,12 @@ load_segment (struct addrspace *as, struct file *file, return false; } - filesz_left = phdr->p_filesz + (phdr->p_vaddr - start); - file_seek (file, ROUND_DOWN (phdr->p_offset, NBPG)); - for (upage = (uint8_t *) start; upage < (uint8_t *) end; upage += NBPG) + filesz_left = phdr->p_filesz + (phdr->p_vaddr & PGMASK); + file_seek (file, ROUND_DOWN (phdr->p_offset, PGSIZE)); + for (upage = start; upage < (uint8_t *) end; upage += PGSIZE) { - size_t read_bytes = filesz_left >= NBPG ? NBPG : filesz_left; - size_t zero_bytes = NBPG - read_bytes; + size_t read_bytes = filesz_left >= PGSIZE ? PGSIZE : filesz_left; + size_t zero_bytes = PGSIZE - read_bytes; uint8_t *kpage = palloc_get (0); if (kpage == NULL) return false;