1 #include "threads/palloc.h"
10 #include "threads/init.h"
11 #include "threads/loader.h"
12 #include "threads/mmu.h"
13 #include "threads/synch.h"
15 /* Page allocator. Hands out memory in page-size (or
16 page-multiple) chunks. See malloc.h for an allocator that
17 hands out smaller chunks.
19 System memory is divided into two "pools" called the kernel
20 and user pools. The user pool is for user (virtual) memory
21 pages, the kernel pool for everything else. The idea here is
22 that the kernel needs to have memory for its own operations
23 even if user processes are swapping like mad.
25 By default, half of system RAM is given to the kernel pool and
26 half to the user pool. That should be huge overkill for the
27 kernel pool, but that's just fine for demonstration purposes. */
32 struct lock lock; /* Mutual exclusion. */
33 struct bitmap *used_map; /* Bitmap of free pages. */
34 uint8_t *base; /* Base of pool. */
37 /* Two pools: one for kernel data, one for user pages. */
38 struct pool kernel_pool, user_pool;
40 /* Maximum number of pages to put in user pool. */
41 size_t user_page_limit = SIZE_MAX;
43 static void init_pool (struct pool *, void *base, size_t page_cnt,
45 static bool page_from_pool (const struct pool *, void *page);
47 /* Initializes the page allocator. */
51 /* End of the kernel as recorded by the linker.
56 uint8_t *free_start = pg_round_up (&_end);
57 uint8_t *free_end = ptov (ram_pages * PGSIZE);
58 size_t free_pages = (free_end - free_start) / PGSIZE;
59 size_t user_pages = free_pages / 2;
61 if (user_pages > user_page_limit)
62 user_pages = user_page_limit;
63 kernel_pages = free_pages - user_pages;
65 /* Give half of memory to kernel, half to user. */
66 init_pool (&kernel_pool, free_start, kernel_pages, "kernel pool");
67 init_pool (&user_pool, free_start + kernel_pages * PGSIZE,
68 user_pages, "user pool");
71 /* Obtains and returns a group of PAGE_CNT contiguous free pages.
72 If PAL_USER is set, the pages are obtained from the user pool,
73 otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
74 then the pages are filled with zeros. If too few pages are
75 available, returns a null pointer, unless PAL_ASSERT is set in
76 FLAGS, in which case the kernel panics. */
78 palloc_get_multiple (enum palloc_flags flags, size_t page_cnt)
80 struct pool *pool = flags & PAL_USER ? &user_pool : &kernel_pool;
87 lock_acquire (&pool->lock);
89 page_idx = bitmap_scan_and_flip (pool->used_map, 0, page_cnt, false);
90 if (page_idx != BITMAP_ERROR)
91 pages = pool->base + PGSIZE * page_idx;
98 memset (pages, 0, PGSIZE * page_cnt);
102 if (flags & PAL_ASSERT)
103 PANIC ("palloc_get: out of pages");
106 lock_release (&pool->lock);
111 /* Obtains and returns a single free page.
112 If PAL_USER is set, the page is obtained from the user pool,
113 otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
114 then the page is filled with zeros. If no pages are
115 available, returns a null pointer, unless PAL_ASSERT is set in
116 FLAGS, in which case the kernel panics. */
118 palloc_get_page (enum palloc_flags flags)
120 return palloc_get_multiple (flags, 1);
123 /* Frees the PAGE_CNT pages starting at PAGES. */
125 palloc_free_multiple (void *pages, size_t page_cnt)
130 ASSERT (pg_ofs (pages) == 0);
131 if (pages == NULL || page_cnt == 0)
134 if (page_from_pool (&kernel_pool, pages))
136 else if (page_from_pool (&user_pool, pages))
141 page_idx = pg_no (pages) - pg_no (pool->base);
144 memset (pages, 0xcc, PGSIZE * page_cnt);
147 lock_acquire (&pool->lock);
148 ASSERT (bitmap_all (pool->used_map, page_idx, page_idx + page_cnt));
149 bitmap_set_multiple (pool->used_map, page_idx, page_idx + page_cnt, false);
150 lock_release (&pool->lock);
153 /* Frees the page at PAGE. */
155 palloc_free_page (void *page)
157 palloc_free_multiple (page, 1);
160 /* Initializes pool P as starting at START and ending at END,
161 naming it NAME for debugging purposes. */
163 init_pool (struct pool *p, void *base, size_t page_cnt, const char *name)
165 /* We'll put the pool's used_map at its base.
166 Calculate the space needed for the bitmap
167 and subtract it from the pool's size. */
168 size_t bm_pages = DIV_ROUND_UP (bitmap_needed_bytes (page_cnt), PGSIZE);
169 if (bm_pages > page_cnt)
170 PANIC ("Not enough memory in %s for bitmap.", name);
171 page_cnt -= bm_pages;
173 printf ("%d pages available in %s.\n", page_cnt, name);
175 /* Initialize the pool. */
176 lock_init (&p->lock, name);
177 p->used_map = bitmap_create_preallocated (page_cnt, base,
179 p->base = base + bm_pages * PGSIZE;
182 /* Returns true if PAGE was allocated from POOL,
185 page_from_pool (const struct pool *pool, void *page)
187 size_t page_no = pg_no (page);
188 size_t start_page = pg_no (pool->base);
189 size_t end_page = start_page + bitmap_size (pool->used_map);
191 return page_no >= start_page && page_no < end_page;