#ifndef __ASSEMBLER__
#include <stdint.h>
+#include "debug.h"
#endif
-// An Address:
-// +--------10------+-------10-------+---------12----------+
-// | Page Directory | Page Table | Offset within Page |
-// +----------------+----------------+---------------------+
-
-#define PGSHIFT 12 /* LOG2(NBPG) */
-#define NBPG (1 << PGSHIFT) /* bytes/page */
-
-/* Page tables (selected by VA[31:22] and indexed by VA[21:12]) */
-#define PGMASK (NBPG - 1) /* Mask for page offset. Terrible name! */
-#define PGOFS(va) ((va) & PGMASK)
-/* Page number of virtual page in the virtual page table. */
-#define PGNO(va) ((uint32_t) (va) >> PGSHIFT)
-/* Index of PTE for VA within the corresponding page table */
-#define PTENO(va) (((uint32_t) (va) >> PGSHIFT) & 0x3ff)
-/* Round up to a page */
-#define PGROUNDUP(va) (((va) + PGMASK) & ~PGMASK)
-/* Round down to a page */
-#define PGROUNDDOWN(va) ((va) & ~PGMASK)
-/* Page directories (indexed by VA[31:22]) */
-#define PDSHIFT 22 /* LOG2(NBPD) */
-#define NBPD (1 << PDSHIFT) /* bytes/page dir */
-#define PDMASK (NBPD-1) /* byte offset into region mapped by
- a page table */
-#define PDENO(va) ((uint32_t) (va) >> PDSHIFT)
-/* Round up */
-#define PDROUNDUP(va) (((va) + PDMASK) & ~PDMASK)
-/* Round down */
-#define PDROUNDDOWN(va) ((va) & ~PDMASK)
-
-/* At IOPHYSMEM (640K) there is a 384K hole for I/O. From the kernel,
- * IOPHYSMEM can be addressed at KERNBASE + IOPHYSMEM. The hole ends
- * at physical address EXTPHYSMEM. */
-#define IOPHYSMEM 0xa0000
-#define EXTPHYSMEM 0x100000
-
-
-/*
- * Virtual memory map: Permissions
- * kernel/user
- *
- * 4 Gig --------> +------------------------------+
- * | | RW/--
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * : . :
- * : . :
- * : . :
- * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| RW/--
- * | | RW/--
- * | Physical Memory | RW/--
- * | | RW/--
- * KERNBASE -----> +------------------------------+
- * | Kernel Virtual Page Table | RW/-- NBPD
- * VPT,KSTACKTOP--> +------------------------------+ --+
- * | Kernel Stack | RW/-- KSTKSIZE |
- * | - - - - - - - - - - - - - - -| NBPD
- * | Invalid memory | --/-- |
- * ULIM ------> +------------------------------+ --+
- * | R/O User VPT | R-/R- NBPD
- * UVPT ----> +------------------------------+
- * | R/O PPAGE | R-/R- NBPD
- * UPPAGES ----> +------------------------------+
- * | R/O UENVS | R-/R- NBPD
- * UTOP,UENVS -------> +------------------------------+
- * UXSTACKTOP -/ | user exception stack | RW/RW NBPG
- * +------------------------------+
- * | Invalid memory | --/-- NBPG
- * USTACKTOP ----> +------------------------------+
- * | normal user stack | RW/RW NBPG
- * +------------------------------+
- * | |
- * | |
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * . .
- * . .
- * . .
- * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|
- * | |
- * UTEXT -------> +------------------------------+
- * | | 2 * NBPD
- * 0 ------------> +------------------------------+
- */
+#define MASK(SHIFT, CNT) (((1ul << (CNT)) - 1) << (SHIFT))
+/* Page offset (bits 0:11). */
+#define PGSHIFT 0 /* First offset bit. */
+#define PGBITS 12 /* Number of offset bits. */
+#define PGMASK MASK(PGSHIFT, PGBITS)
+#define PGSIZE (1 << PGBITS)
-#define PHYS_BASE 0xc0000000 /* All physical memory mapped here. */
-#define KERN_BASE 0xc0100000 /* Kernel loaded here. */
+/* Page table (bits 12:21). */
+#define PTSHIFT PGBITS /* First page table bit. */
+#define PTBITS 10 /* Number of page table bits. */
+#define PTMASK MASK(PTSHIFT, PTBITS)
-/* Virtual page table. Last entry of all PDEs contains a pointer to
- * the PD itself, thereby turning the PD into a page table which
- * maps all PTEs over the last 4 Megs of the virtual address space */
-#define VPT (KERNBASE - NBPD)
-#define KSTACKTOP VPT
-#define KSTKSIZE (8 * NBPG) /* size of a kernel stack */
-#define ULIM (KSTACKTOP - NBPD)
+/* Page directory (bits 22:31). */
+#define PDSHIFT (PTSHIFT + PTBITS) /* First page dir bit. */
+#define PDBITS 10 /* Number of page dir bits. */
+#define PDMASK MASK(PDSHIFT, PDBITS)
-/*
- * User read-only mappings! Anything below here til UTOP are readonly to user.
- * They are global pages mapped in at env allocation time.
- */
+#ifndef __ASSEMBLER__
+/* Offset within a page. */
+static inline unsigned pg_ofs (void *va) { return (uintptr_t) va & PGMASK; }
-/* Same as VPT but read-only for users */
-#define UVPT (ULIM - NBPD)
-/* Read-only copies of all ppage structures */
-#define UPPAGES (UVPT - NBPD)
-/* Read only copy of the global env structures */
-#define UENVS (UPPAGES - NBPD)
+/* Page number. */
+static inline uintptr_t pg_no (void *va) { return (uintptr_t) va >> PTSHIFT; }
+/* Page table number. */
+static inline unsigned pt_no (void *va) {
+ return ((uintptr_t) va & PTMASK) >> PTSHIFT;
+}
+/* Page directory number. */
+static inline uintptr_t pd_no (void *va) { return (uintptr_t) va >> PDSHIFT; }
-/*
- * Top of user VM. User can manipulate VA from UTOP-1 and down!
- */
-#define UTOP UENVS
-#define UXSTACKTOP (UTOP) /* one page user exception stack */
-/* leave top page invalid to guard against exception stack overflow */
-#define USTACKTOP (UTOP - 2*NBPG) /* top of the normal user stack */
-#define UTEXT (2*NBPD)
+/* Round up to nearest page boundary. */
+static inline void *pg_round_up (void *va) {
+ return (void *) (((uintptr_t) va + PGSIZE - 1) & ~PGMASK);
+}
-/* Number of page tables for mapping physical memory at KERNBASE.
- * (each PT contains 1K PTE's, for a total of 128 Megabytes mapped) */
-#define NPPT ((-KERNBASE)>>PDSHIFT)
+/* Round down to nearest page boundary. */
+static inline void *pg_round_down (void *va) {
+ return (void *) ((uintptr_t) va & ~PGMASK);
+}
-#ifndef __ASSEMBLER__
-#include "debug.h"
+#define PHYS_BASE ((void *) 0xc0000000) /* Physical memory mapped here. */
+#define KERN_BASE ((void *) 0xc0100000) /* Kernel loaded here. */
-/* Kernel virtual address at which physical address PADDR is
- mapped. */
+/* Returns kernel virtual address at which physical address PADDR
+ is mapped. */
static inline void *
-ptov (uint32_t paddr)
+ptov (uintptr_t paddr)
{
- ASSERT (paddr < PHYS_BASE);
+ ASSERT ((void *) paddr < PHYS_BASE);
return (void *) (paddr + PHYS_BASE);
}
-/* Physical address at which kernel virtual address VADDR is
- mapped. */
-static inline uint32_t
+/* Returns physical address at which kernel virtual address VADDR
+ is mapped. */
+static inline uintptr_t
vtop (void *vaddr)
{
- ASSERT ((uint32_t) vaddr >= PHYS_BASE);
+ ASSERT (vaddr >= PHYS_BASE);
- return (uint32_t) vaddr - PHYS_BASE;
+ return (uintptr_t) vaddr - (uintptr_t) PHYS_BASE;
}
#endif
-#define PFM_NONE 0x0 /* No page faults expected. Must be a kernel bug */
-#define PFM_KILL 0x1 /* On fault kill user process. */
-
-
-/*
- * Macros to build GDT entries in assembly.
- */
-#define SEG_NULL \
- .word 0, 0; \
- .byte 0, 0, 0, 0
-#define SEG(type,base,lim) \
- .word ((lim)&0xffff), ((base)&0xffff); \
- .byte (((base)>>16)&0xff), (0x90|(type)), \
- (0xc0|(((lim)>>16)&0xf)), (((base)>>24)&0xff)
-
-
-
/* Page Table/Directory Entry flags
* these are defined by the hardware
*/
static uint32_t
make_pde (uint32_t *pagetab)
{
- ASSERT (PGOFS ((uintptr_t) pagetab) == 0);
+ ASSERT (pg_ofs (pagetab) == 0);
return vtop (pagetab) | PG_U | PG_P | PG_W;
}
{
uint32_t entry;
- ASSERT (PGOFS ((uintptr_t) page) == 0);
+ ASSERT (pg_ofs (page) == 0);
entry = vtop (page) | PG_U | PG_P;
if (writable)
{
ASSERT (pde & PG_P);
- return ptov (PGROUNDDOWN (pde));
+ return ptov (pde & ~PGMASK);
}
static void *
{
ASSERT (pte & PG_P);
- return ptov (PGROUNDDOWN (pte));
+ return ptov (pte & ~PGMASK);
}
/* Populates the base page directory and page table with the
pt = NULL;
for (page = 0; page < ram_pages; page++)
{
- uintptr_t paddr = page * NBPG;
+ uintptr_t paddr = page * PGSIZE;
void *vaddr = ptov (paddr);
- size_t pde_idx = PDENO ((uintptr_t) vaddr);
- size_t pte_idx = PTENO ((uintptr_t) vaddr);
+ size_t pde_idx = pd_no (vaddr);
+ size_t pte_idx = pt_no (vaddr);
if (pd[pde_idx] == 0)
{
pagedir_create (void)
{
uint32_t *pd = palloc_get (0);
- memcpy (pd, base_page_dir, NBPG);
+ memcpy (pd, base_page_dir, PGSIZE);
return pd;
}
uint32_t *pde;
ASSERT (pagedir != NULL);
- ASSERT (PGOFS ((uintptr_t) upage) == 0);
- ASSERT ((uintptr_t) upage < PHYS_BASE);
+ ASSERT (pg_ofs (upage) == 0);
+ ASSERT (upage < PHYS_BASE);
/* Check for a page table for UPAGE.
If one is missing, create one if requested. */
- pde = pagedir + PDENO ((uint32_t) upage);
+ pde = pagedir + pd_no (upage);
if (*pde == 0)
{
if (create)
/* Return the page table entry. */
pagetab = pde_get_pagetab (*pde);
- return &pagetab[PTENO ((uintptr_t) upage)];
+ return &pagetab[pt_no (upage)];
}
bool
{
uint32_t *pte;
- ASSERT (PGOFS ((uintptr_t) kpage) == 0);
+ ASSERT (pg_ofs (kpage) == 0);
pte = lookup_page (pagedir, upage, true);
if (pte != NULL)
static uint32_t *
scan_pt (uint32_t *pt, unsigned pde_idx, unsigned pte_idx, void **upage)
{
- for (; pte_idx < NBPG / sizeof *pt; pte_idx++)
+ for (; pte_idx < PGSIZE / sizeof *pt; pte_idx++)
{
uint32_t pte = pt[pte_idx];
void *kpage = pte_get_page (pte);
if (kpage != NULL)
{
- *upage = (void *) ((pde_idx << PDSHIFT)
- | (pte_idx << PGSHIFT));
+ *upage = (void *) ((pde_idx << PDSHIFT) | (pte_idx << PTSHIFT));
return kpage;
}
}
static void *
scan_pd (uint32_t *pd, unsigned pde_idx, void **upage)
{
- for (; pde_idx < PDENO (PHYS_BASE); pde_idx++)
+ for (; pde_idx < pd_no (PHYS_BASE); pde_idx++)
{
uint32_t pde = pd[pde_idx];
unsigned pde_idx, pte_idx;
void *kpage;
- pde_idx = PDENO (*upage);
- pte_idx = PTENO (*upage);
+ pde_idx = pd_no (*upage);
+ pte_idx = pt_no (*upage);
kpage = scan_pt (pde_get_pagetab (pd[pde_idx]),
pde_idx, pte_idx + 1, upage);
if (kpage == NULL)
load_segment (struct addrspace *as, struct file *file,
const struct Elf32_Phdr *phdr)
{
- uintptr_t start, end;
+ void *start, *end;
uint8_t *upage;
off_t filesz_left;
ASSERT (phdr != NULL);
ASSERT (phdr->p_type == PT_LOAD);
- /* p_offset and p_vaddr must be congruent modulo NBPG. */
- if (phdr->p_offset % NBPG != phdr->p_vaddr % NBPG)
+ /* p_offset and p_vaddr must be congruent modulo PGSIZE. */
+ if (phdr->p_offset % PGSIZE != phdr->p_vaddr % PGSIZE)
{
printk ("%#08"PE32Ox" and %#08"PE32Ax" not congruent modulo %#x\n",
- phdr->p_offset, phdr->p_vaddr, (unsigned) NBPG);
+ phdr->p_offset, phdr->p_vaddr, (unsigned) PGSIZE);
return false;
}
}
/* Validate virtual memory region to be mapped. */
- start = PGROUNDDOWN (phdr->p_vaddr);
- end = PGROUNDUP (phdr->p_vaddr + phdr->p_memsz);
+ start = pg_round_down ((void *) phdr->p_vaddr);
+ end = pg_round_up ((void *) (phdr->p_vaddr + phdr->p_memsz));
if (start >= PHYS_BASE || end >= PHYS_BASE || end < start)
{
printk ("bad virtual region %08lx...%08lx\n",
return false;
}
- filesz_left = phdr->p_filesz + (phdr->p_vaddr - start);
- file_seek (file, ROUND_DOWN (phdr->p_offset, NBPG));
- for (upage = (uint8_t *) start; upage < (uint8_t *) end; upage += NBPG)
+ filesz_left = phdr->p_filesz + (phdr->p_vaddr & PGMASK);
+ file_seek (file, ROUND_DOWN (phdr->p_offset, PGSIZE));
+ for (upage = start; upage < (uint8_t *) end; upage += PGSIZE)
{
- size_t read_bytes = filesz_left >= NBPG ? NBPG : filesz_left;
- size_t zero_bytes = NBPG - read_bytes;
+ size_t read_bytes = filesz_left >= PGSIZE ? PGSIZE : filesz_left;
+ size_t zero_bytes = PGSIZE - read_bytes;
uint8_t *kpage = palloc_get (0);
if (kpage == NULL)
return false;