1 #include "threads/palloc.h"
10 #include "threads/init.h"
11 #include "threads/loader.h"
12 #include "threads/mmu.h"
13 #include "threads/synch.h"
15 /* Page allocator. Hands out memory in page-size (or
16 page-multiple) chunks. See malloc.h for an allocator that
17 hands out smaller chunks.
19 System memory is divided into two "pools" called the kernel
20 and user pools. The user pool is for user (virtual) memory
21 pages, the kernel pool for everything else. The idea here is
22 that the kernel needs to have memory for its own operations
23 even if user processes are swapping like mad.
25 By default, half of system RAM is given to the kernel pool and
26 half to the user pool. That should be huge overkill for the
27 kernel pool, but that's just fine for demonstration purposes. */
32 size_t first_page; /* Page number of first page. */
33 size_t page_cnt; /* Number of pages. */
36 /* Tracks pages in use, with a lock protecting it. */
37 static struct bitmap *used_map;
38 static struct lock used_map_lock;
40 /* Two pools: one for kernel data, one for user pages. */
41 static struct pool kernel_pool, user_pool;
43 /* Maximum number of pages to put in user pool. */
44 size_t max_user_pages = SIZE_MAX;
46 static void init_pool (struct pool *pool, size_t first_page, size_t page_cnt,
48 static bool page_from_pool (const struct pool *, void *page);
50 /* Initializes the page allocator. */
54 /* used_map from 1 MB as long as necessary. */
55 size_t bitmap_start = 1024;
56 size_t bitmap_pages = DIV_ROUND_UP (bitmap_needed_bytes (ram_pages), PGSIZE);
58 /* Free space from the bitmap to the end of RAM. */
59 size_t free_start = bitmap_start + bitmap_pages;
60 size_t free_pages = ram_pages - free_start;
62 /* Kernel and user get half of free space each.
63 User space can be limited by max_user_pages. */
64 size_t half_free = free_pages / 2;
65 size_t kernel_pages = half_free;
66 size_t user_pages = half_free < max_user_pages ? half_free : max_user_pages;
68 used_map = bitmap_create_preallocated (ram_pages,
69 ptov (bitmap_start * PGSIZE),
70 bitmap_pages * PGSIZE);
71 init_pool (&kernel_pool, free_start, kernel_pages, "kernel pool");
72 init_pool (&user_pool, free_start + kernel_pages, user_pages, "use pool");
73 lock_init (&used_map_lock, "used_map");
76 /* Initializes POOL to start (named NAME) at physical page number
77 FIRST_PAGE and continue for PAGE_CNT pages. */
79 init_pool (struct pool *pool, size_t first_page, size_t page_cnt,
82 printf ("%zu pages available in %s.\n", page_cnt, name);
83 pool->first_page = first_page;
84 pool->page_cnt = page_cnt;
87 /* Obtains and returns a group of PAGE_CNT contiguous free pages.
88 If PAL_USER is set, the pages are obtained from the user pool,
89 otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
90 then the pages are filled with zeros. If too few pages are
91 available, returns a null pointer, unless PAL_ASSERT is set in
92 FLAGS, in which case the kernel panics. */
94 palloc_get_multiple (enum palloc_flags flags, size_t page_cnt)
103 pool = flags & PAL_USER ? &user_pool : &kernel_pool;
105 lock_acquire (&used_map_lock);
106 page_idx = bitmap_scan_and_flip (used_map,
107 pool->first_page, pool->page_cnt,
109 lock_release (&used_map_lock);
111 if (page_idx != BITMAP_ERROR)
112 pages = ptov (PGSIZE * page_idx);
118 if (flags & PAL_ZERO)
119 memset (pages, 0, PGSIZE * page_cnt);
123 if (flags & PAL_ASSERT)
124 PANIC ("palloc_get: out of pages");
130 /* Obtains and returns a single free page.
131 If PAL_USER is set, the page is obtained from the user pool,
132 otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
133 then the page is filled with zeros. If no pages are
134 available, returns a null pointer, unless PAL_ASSERT is set in
135 FLAGS, in which case the kernel panics. */
137 palloc_get_page (enum palloc_flags flags)
139 return palloc_get_multiple (flags, 1);
142 /* Frees the PAGE_CNT pages starting at PAGES. */
144 palloc_free_multiple (void *pages, size_t page_cnt)
149 ASSERT (pg_ofs (pages) == 0);
150 if (pages == NULL || page_cnt == 0)
153 if (page_from_pool (&kernel_pool, pages))
155 else if (page_from_pool (&user_pool, pages))
160 page_idx = vtop (pages) / PGSIZE;
163 memset (pages, 0xcc, PGSIZE * page_cnt);
166 ASSERT (bitmap_all (used_map, page_idx, page_cnt));
167 bitmap_set_multiple (used_map, page_idx, page_cnt, false);
170 /* Frees the page at PAGE. */
172 palloc_free_page (void *page)
174 palloc_free_multiple (page, 1);
177 /* Returns true if PAGE was allocated from POOL,
180 page_from_pool (const struct pool *pool, void *page)
182 size_t phys_page_no = vtop (page) / PGSIZE;
184 return (phys_page_no >= pool->first_page
185 && phys_page_no < pool->first_page + pool->page_cnt);