X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=src%2Fthreads%2Fmmu.h;h=a1f05ae7b489a5f61f0f6f2e5f36b9bd320f5afa;hb=76b07342aab9c426a0244e0b6b75ba50659a5cc9;hp=4e9340abe962242b01e43b1a03e264252c04910d;hpb=6916b246f3be8c72d6e77fd98c4a1447fd2c1de7;p=pintos-anon diff --git a/src/threads/mmu.h b/src/threads/mmu.h index 4e9340a..a1f05ae 100644 --- a/src/threads/mmu.h +++ b/src/threads/mmu.h @@ -1,56 +1,109 @@ #ifndef THREADS_MMU_H #define THREADS_MMU_H -#ifndef __ASSEMBLER__ #include #include -#endif +#include #include "threads/loader.h" +/* Virtual to physical translation works like this on an x86: + + - The top 10 bits of the virtual address (bits 22:32) are used + to index into the page directory. If the PDE is marked + "present," the physical address of a page table is read from + the PDE thus obtained. If the PDE is marked "not present" + then a page fault occurs. + + - The next 10 bits of the virtual address (bits 12:22) are + used to index into the page table. If the PTE is marked + "present," the physical address of a data page is read from + the PTE thus obtained. If the PTE is marked "not present" + then a page fault occurs. + + - The bottom 12 bits of the virtual address (bits 0:12) are + added to the data page's physical base address, producing + the final physical address. + + + 32 22 12 0 + +--------------------------------------------------------------------+ + | Page Directory Index | Page Table Index | Page Offset | + +--------------------------------------------------------------------+ + | | | + _______/ _______/ _____/ + / / / + / Page Directory / Page Table / Data Page + / .____________. / .____________. / .____________. + |1,023|____________| |1,023|____________| | |____________| + |1,022|____________| |1,022|____________| | |____________| + |1,021|____________| |1,021|____________| \__\|____________| + |1,020|____________| |1,020|____________| /|____________| + | | | | | | | | + | | | \____\| |_ | | + | | . | /| . | \ | . | + \____\| . |_ | . | | | . | + /| . | \ | . | | | . | + | . | | | . | | | . | + | | | | | | | | + |____________| | |____________| | |____________| + 4|____________| | 4|____________| | |____________| + 3|____________| | 3|____________| | |____________| + 2|____________| | 2|____________| | |____________| + 1|____________| | 1|____________| | |____________| + 0|____________| \__\0|____________| \____\|____________| + / / +*/ + #define MASK(SHIFT, CNT) (((1ul << (CNT)) - 1) << (SHIFT)) -/* Page offset (bits 0:11). */ -#define PGSHIFT 0 /* First offset bit. */ -#define PGBITS 12 /* Number of offset bits. */ -#define PGMASK MASK(PGSHIFT, PGBITS) -#define PGSIZE (1 << PGBITS) +/* Page offset (bits 0:12). */ +#define PGSHIFT 0 /* Index of first offset bit. */ +#define PGBITS 12 /* Number of offset bits. */ +#define PGMASK MASK(PGSHIFT, PGBITS) /* Page offset bits (0:12). */ +#define PGSIZE (1 << PGBITS) /* Bytes in a page. */ -/* Page table (bits 12:21). */ -#define PTSHIFT PGBITS /* First page table bit. */ -#define PTBITS 10 /* Number of page table bits. */ -#define PTMASK MASK(PTSHIFT, PTBITS) +/* Page table (bits 12:22). */ +#define PTSHIFT PGBITS /* Index of first page table bit. */ +#define PTBITS 10 /* Number of page table bits. */ +#define PTMASK MASK(PTSHIFT, PTBITS) /* Page table bits (12:22). */ +#define PTSPAN (1 << PTBITS << PGBITS) /* Bytes covered by a page table. */ -/* Page directory (bits 22:31). */ -#define PDSHIFT (PTSHIFT + PTBITS) /* First page dir bit. */ -#define PDBITS 10 /* Number of page dir bits. */ -#define PDMASK MASK(PDSHIFT, PDBITS) +/* Page directory (bits 22:32). */ +#define PDSHIFT (PTSHIFT + PTBITS) /* First page dir bit. */ +#define PDBITS 10 /* Number of page dir bits. */ +#define PDMASK MASK(PDSHIFT, PDBITS) /* Page directory bits (22:32). */ -#ifndef __ASSEMBLER__ /* Offset within a page. */ -static inline unsigned pg_ofs (void *va) { return (uintptr_t) va & PGMASK; } - -/* Page number. */ -static inline uintptr_t pg_no (void *va) { return (uintptr_t) va >> PTSHIFT; } - -/* Page table number. */ -static inline unsigned pt_no (void *va) { - return ((uintptr_t) va & PTMASK) >> PTSHIFT; +static inline unsigned pg_ofs (const void *va) { + return (uintptr_t) va & PGMASK; } -/* Page directory number. */ -static inline uintptr_t pd_no (void *va) { return (uintptr_t) va >> PDSHIFT; } +/* Virtual page number. */ +static inline uintptr_t pg_no (const void *va) { + return (uintptr_t) va >> PTSHIFT; +} /* Round up to nearest page boundary. */ -static inline void *pg_round_up (void *va) { +static inline void *pg_round_up (const void *va) { return (void *) (((uintptr_t) va + PGSIZE - 1) & ~PGMASK); } /* Round down to nearest page boundary. */ -static inline void *pg_round_down (void *va) { +static inline void *pg_round_down (const void *va) { return (void *) ((uintptr_t) va & ~PGMASK); } +/* Base address of the 1:1 physical-to-virtual mapping. Physical + memory is mapped starting at this virtual address. Thus, + physical address 0 is accessible at PHYS_BASE, physical + address address 0x1234 at (uint8_t *) PHYS_BASE + 0x1234, and + so on. + + This address also marks the end of user programs' address + space. Up to this point in memory, user programs are allowed + to map whatever they like. At this point and above, the + virtual address space belongs to the kernel. */ #define PHYS_BASE ((void *) LOADER_PHYS_BASE) /* Returns kernel virtual address at which physical address PADDR @@ -66,23 +119,82 @@ ptov (uintptr_t paddr) /* Returns physical address at which kernel virtual address VADDR is mapped. */ static inline uintptr_t -vtop (void *vaddr) +vtop (const void *vaddr) { ASSERT (vaddr >= PHYS_BASE); return (uintptr_t) vaddr - (uintptr_t) PHYS_BASE; } -#endif + +/* Page directories and page tables. -/* Page Directory Entry (PDE) and Page Table Entry (PTE) flags. */ + For more information see [IA32-v3] pages 3-23 to 3-28. + + PDEs and PTEs share a common format: + + 32 12 0 + +------------------------------------+------------------------+ + | Physical Address | Flags | + +------------------------------------+------------------------+ + + In a PDE, the physical address points to a page table. + In a PTE, the physical address points to a data or code page. + The important flags are listed below. + When a PDE or PTE is not "present", the other flags are + ignored. + A PDE or PTE that is initialized to 0 will be interpreted as + "not present", which is just fine. */ #define PG_P 0x1 /* 1=present, 0=not present. */ #define PG_W 0x2 /* 1=read/write, 0=read-only. */ #define PG_U 0x4 /* 1=user/kernel, 0=kernel only. */ #define PG_A 0x20 /* 1=accessed, 0=not acccessed. */ -#define PG_D 0x40 /* 1=dirty, 0=not dirty. */ +#define PG_D 0x40 /* 1=dirty, 0=not dirty (PTEs only). */ -/* EFLAGS Register. */ -#define FLAG_MBS 0x00000002 /* Must be set. */ -#define FLAG_IF 0x00000200 /* Interrupt Flag. */ +/* Obtains page directory index from a virtual address. */ +static inline uintptr_t pd_no (const void *va) { + return (uintptr_t) va >> PDSHIFT; +} + +/* Returns a PDE that points to page table PT. */ +static inline uint32_t pde_create (uint32_t *pt) { + ASSERT (pg_ofs (pt) == 0); + return vtop (pt) | PG_U | PG_P | PG_W; +} + +/* Returns a pointer to the page table that page directory entry + PDE, which must "present", points to. */ +static inline uint32_t *pde_get_pt (uint32_t pde) { + ASSERT (pde & PG_P); + return ptov (pde & ~PGMASK); +} + +/* Obtains page table index from a virtual address. */ +static inline unsigned pt_no (void *va) { + return ((uintptr_t) va & PTMASK) >> PTSHIFT; +} + +/* Returns a PTE that points to PAGE. + The PTE's page is readable. + If WRITABLE is true then it will be writable as well. + The page will be usable only by ring 0 code (the kernel). */ +static inline uint32_t pte_create_kernel (uint32_t *page, bool writable) { + ASSERT (pg_ofs (page) == 0); + return vtop (page) | PG_P | (writable ? PG_W : 0); +} + +/* Returns a PTE that points to PAGE. + The PTE's page is readable. + If WRITABLE is true then it will be writable as well. + The page will be usable by both user and kernel code. */ +static inline uint32_t pte_create_user (uint32_t *page, bool writable) { + return pte_create_kernel (page, writable) | PG_U; +} + +/* Returns a pointer to the page that page table entry PTE, which + must be "present", points to. */ +static inline void *pte_get_page (uint32_t pte) { + ASSERT (pte & PG_P); + return ptov (pte & ~PGMASK); +} #endif /* threads/mmu.h */