1 #include "threads/palloc.h"
10 #include "threads/init.h"
11 #include "threads/loader.h"
12 #include "threads/mmu.h"
13 #include "threads/synch.h"
15 /* Page allocator. Hands out memory in page-size (or
16 page-multiple) chunks. See malloc.h for an allocator that
17 hands out smaller chunks.
19 System memory is divided into two "pools" called the kernel
20 and user pools. The user pool is for user (virtual) memory
21 pages, the kernel pool for everything else. The idea here is
22 that the kernel needs to have memory for its own operations
23 even if user processes are swapping like mad.
25 By default, half of system RAM is given to the kernel pool and
26 half to the user pool. That should be huge overkill for the
27 kernel pool, but that's just fine for demonstration purposes. */
32 struct lock lock; /* Mutual exclusion. */
33 struct bitmap *used_map; /* Bitmap of free pages. */
34 uint8_t *base; /* Base of pool. */
37 /* Two pools: one for kernel data, one for user pages. */
38 struct pool kernel_pool, user_pool;
40 /* Maximum number of pages to put in user pool. */
41 size_t user_page_limit = SIZE_MAX;
43 static void init_pool (struct pool *, void *base, size_t page_cnt,
45 static bool page_from_pool (const struct pool *, void *page);
47 /* Initializes the page allocator. */
51 /* End of the kernel as recorded by the linker.
56 uint8_t *free_start = pg_round_up (&_end);
57 uint8_t *free_end = ptov (ram_pages * PGSIZE);
58 size_t free_pages = (free_end - free_start) / PGSIZE;
59 size_t user_pages = free_pages / 2;
61 if (user_pages > user_page_limit)
62 user_pages = user_page_limit;
63 kernel_pages = free_pages - user_pages;
65 /* Give half of memory to kernel, half to user. */
66 init_pool (&kernel_pool, free_start, kernel_pages, "kernel pool");
67 init_pool (&user_pool, free_start + kernel_pages * PGSIZE,
68 user_pages, "user pool");
71 /* Obtains and returns a group of PAGE_CNT contiguous free pages.
72 If PAL_USER is set, the pages are obtained from the user pool,
73 otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
74 then the pages are filled with zeros. If too few pages are
75 available, returns a null pointer, unless PAL_ASSERT is set in
76 FLAGS, in which case the kernel panics. */
78 palloc_get_multiple (enum palloc_flags flags, size_t page_cnt)
80 struct pool *pool = flags & PAL_USER ? &user_pool : &kernel_pool;
87 lock_acquire (&pool->lock);
88 page_idx = bitmap_scan_and_flip (pool->used_map, 0, page_cnt, false);
89 lock_release (&pool->lock);
91 if (page_idx != BITMAP_ERROR)
92 pages = pool->base + PGSIZE * page_idx;
99 memset (pages, 0, PGSIZE * page_cnt);
103 if (flags & PAL_ASSERT)
104 PANIC ("palloc_get: out of pages");
110 /* Obtains and returns a single free page.
111 If PAL_USER is set, the page is obtained from the user pool,
112 otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
113 then the page is filled with zeros. If no pages are
114 available, returns a null pointer, unless PAL_ASSERT is set in
115 FLAGS, in which case the kernel panics. */
117 palloc_get_page (enum palloc_flags flags)
119 return palloc_get_multiple (flags, 1);
122 /* Frees the PAGE_CNT pages starting at PAGES. */
124 palloc_free_multiple (void *pages, size_t page_cnt)
129 ASSERT (pg_ofs (pages) == 0);
130 if (pages == NULL || page_cnt == 0)
133 if (page_from_pool (&kernel_pool, pages))
135 else if (page_from_pool (&user_pool, pages))
140 page_idx = pg_no (pages) - pg_no (pool->base);
143 memset (pages, 0xcc, PGSIZE * page_cnt);
146 ASSERT (bitmap_all (pool->used_map, page_idx, page_cnt));
147 bitmap_set_multiple (pool->used_map, page_idx, page_cnt, false);
150 /* Frees the page at PAGE. */
152 palloc_free_page (void *page)
154 palloc_free_multiple (page, 1);
157 /* Initializes pool P as starting at START and ending at END,
158 naming it NAME for debugging purposes. */
160 init_pool (struct pool *p, void *base, size_t page_cnt, const char *name)
162 /* We'll put the pool's used_map at its base.
163 Calculate the space needed for the bitmap
164 and subtract it from the pool's size. */
165 size_t bm_pages = DIV_ROUND_UP (bitmap_needed_bytes (page_cnt), PGSIZE);
166 if (bm_pages > page_cnt)
167 PANIC ("Not enough memory in %s for bitmap.", name);
168 page_cnt -= bm_pages;
170 printf ("%zu pages available in %s.\n", page_cnt, name);
172 /* Initialize the pool. */
173 lock_init (&p->lock, name);
174 p->used_map = bitmap_create_preallocated (page_cnt, base,
176 p->base = base + bm_pages * PGSIZE;
179 /* Returns true if PAGE was allocated from POOL,
182 page_from_pool (const struct pool *pool, void *page)
184 size_t page_no = pg_no (page);
185 size_t start_page = pg_no (pool->base);
186 size_t end_page = start_page + bitmap_size (pool->used_map);
188 return page_no >= start_page && page_no < end_page;