#include "threads/malloc.h"
#include <debug.h>
#include <list.h>
+#include <round.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
blocks, we remove all of the arena's blocks from the free list
and give the arena back to the page allocator.
- Major limitation: the largest block that can be allocated is
- PGSIZE / 2, or 2 kB. Use palloc_get() to allocate pages (4 kB
- blocks). You're on your own if you need more memory than
- that. */
+ We can't handle blocks bigger than 2 kB using this scheme,
+ because they're too big to fit in a single page with a
+ descriptor. We handle those by allocating contiguous pages
+ with the page allocator and sticking the allocation size at
+ the beginning of the allocated block's arena header. */
/* Descriptor. */
struct desc
struct arena
{
unsigned magic; /* Always set to ARENA_MAGIC. */
- struct desc *desc; /* Owning descriptor. */
- size_t free_cnt; /* Number of free blocks. */
+ struct desc *desc; /* Owning descriptor, null for big block. */
+ size_t free_cnt; /* Free blocks; pages in big block. */
};
/* Free block. */
{
size_t block_size;
- for (block_size = 16; block_size < PGSIZE; block_size *= 2)
+ for (block_size = 16; block_size < PGSIZE / 2; block_size *= 2)
{
struct desc *d = &descs[desc_cnt++];
ASSERT (desc_cnt <= sizeof descs / sizeof *descs);
break;
if (d == descs + desc_cnt)
{
- printf ("malloc: %zu byte allocation too big\n", size);
- return NULL;
+ /* SIZE is too big for any descriptor.
+ Allocate enough pages to hold SIZE plus an arena. */
+ size_t page_cnt = DIV_ROUND_UP (size + sizeof *a, PGSIZE);
+ a = palloc_get_multiple (0, page_cnt);
+ if (a == NULL)
+ return NULL;
+
+ /* Initialize the arena to indicate a big block of PAGE_CNT
+ pages, and return it. */
+ a->magic = ARENA_MAGIC;
+ a->desc = NULL;
+ a->free_cnt = page_cnt;
+ return a + 1;
}
lock_acquire (&d->lock);
size_t i;
/* Allocate a page. */
- a = palloc_get (0);
+ a = palloc_get_page (0);
if (a == NULL)
{
lock_release (&d->lock);
a = block_to_arena (b);
d = a->desc;
+ if (d == NULL)
+ {
+ /* It's a big block. Free its pages. */
+ palloc_free_multiple (a, a->free_cnt);
+ return;
+ }
+
#ifndef NDEBUG
memset (b, 0xcd, d->block_size);
#endif
lock_acquire (&d->lock);
-
/* Add block to free list. */
list_push_front (&d->free_list, &b->free_elem);
struct block *b = arena_to_block (a, i);
list_remove (&b->free_elem);
}
- palloc_free (a);
+ palloc_free_page (a);
}
lock_release (&d->lock);
#include "threads/palloc.h"
+#include <bitmap.h>
#include <debug.h>
-#include <list.h>
+#include <round.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include "threads/mmu.h"
#include "threads/synch.h"
-/* Page allocator. Hands out memory in page-size chunks.
- See malloc.h for an allocator that hands out smaller
- chunks.
+/* Page allocator. Hands out memory in page-size (or
+ page-multiple) chunks. See malloc.h for an allocator that
+ hands out smaller chunks.
System memory is divided into two "pools" called the kernel
and user pools. The user pool is for user (virtual) memory
By default, half of system RAM is given to the kernel pool and
half to the user pool. That should be huge overkill for the
- kernel pool, but that's just fine for demonstration purposes.
-
- Within each pool, we simply use a linked list of free pages.
- It would be straightforward to add all available memory to
- this free list at initialization time. In practice, though,
- that's really slow because it causes the emulator we're
- running under to have to fault in every page of memory. So
- instead we only add pages to the free list as needed. */
-
-/* A free page owned by the page allocator. */
-struct page
- {
- list_elem elem; /* Free list element. */
- };
+ kernel pool, but that's just fine for demonstration purposes. */
/* A memory pool. */
struct pool
{
struct lock lock; /* Mutual exclusion. */
+ struct bitmap *used_map; /* Bitmap of free pages. */
uint8_t *start, *end; /* Start and end of pool. */
- uint8_t *uninit; /* First page not yet in free_list. */
- struct list free_list; /* Free pages. */
};
/* Two pools: one for kernel data, one for user pages. */
init_pool (&user_pool, free_middle, free_end, "user pool");
}
-/* Obtains and returns a free page. If PAL_USER is set, the page
- is obtained from the user pool, otherwise from the kernel
- pool. If PAL_ZERO is set in FLAGS, then the page is filled
- with zeros. If no pages are available, returns a null
- pointer, unless PAL_ASSERT is set in FLAGS, in which case the
- kernel panics. */
+/* Obtains and returns a group of PAGE_CNT contiguous free pages.
+ If PAL_USER is set, the pages are obtained from the user pool,
+ otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
+ then the pages are filled with zeros. If too few pages are
+ available, returns a null pointer, unless PAL_ASSERT is set in
+ FLAGS, in which case the kernel panics. */
void *
-palloc_get (enum palloc_flags flags)
+palloc_get_multiple (enum palloc_flags flags, size_t page_cnt)
{
struct pool *pool = flags & PAL_USER ? &user_pool : &kernel_pool;
- struct page *page;
+ void *pages;
+ size_t page_idx;
+
+ if (page_cnt == 0)
+ return NULL;
lock_acquire (&pool->lock);
- /* If there's a page in the free list, take it.
- Otherwise, if there's a page not yet added to the free list,
- use it.
- Otherwise, we're out of memory. */
- if (!list_empty (&pool->free_list))
- page = list_entry (list_pop_front (&pool->free_list), struct page, elem);
- else if (pool->uninit < pool->end)
- {
- page = (struct page *) pool->uninit;
- pool->uninit += PGSIZE;
- }
+ page_idx = bitmap_scan_and_flip (pool->used_map, 0, page_cnt, false);
+ if (page_idx != BITMAP_ERROR)
+ pages = pool->start + PGSIZE * page_idx;
else
- page = NULL;
+ pages = NULL;
- if (page != NULL)
+ if (pages != NULL)
{
if (flags & PAL_ZERO)
- memset (page, 0, PGSIZE);
+ memset (pages, 0, PGSIZE * page_cnt);
}
else
{
lock_release (&pool->lock);
- return page;
+ return pages;
+}
+
+/* Obtains and returns a single free page.
+ If PAL_USER is set, the page is obtained from the user pool,
+ otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
+ then the page is filled with zeros. If no pages are
+ available, returns a null pointer, unless PAL_ASSERT is set in
+ FLAGS, in which case the kernel panics. */
+void *
+palloc_get_page (enum palloc_flags flags)
+{
+ return palloc_get_multiple (flags, 1);
}
-/* Frees PAGE. */
+/* Frees the PAGE_CNT pages starting at PAGES. */
void
-palloc_free (void *page_)
+palloc_free_multiple (void *pages, size_t page_cnt)
{
struct pool *pool;
- struct page *page = page_;
+ size_t page_idx;
+
+ ASSERT (pg_ofs (pages) == 0);
+ if (pages == NULL || page_cnt == 0)
+ return;
- ASSERT (page == pg_round_down (page));
- if (page_from_pool (&kernel_pool, page))
+ if (page_from_pool (&kernel_pool, pages))
pool = &kernel_pool;
- else if (page_from_pool (&user_pool, page))
+ else if (page_from_pool (&user_pool, pages))
pool = &user_pool;
else
- PANIC ("freeing invalid pointer");
+ NOT_REACHED ();
+
+ page_idx = pg_no (pages) - pg_no (pool->start);
+ ASSERT (pg_no (pages) + page_cnt <= pg_no (pool->end));
#ifndef NDEBUG
- memset (page, 0xcc, PGSIZE);
+ memset (pages, 0xcc, PGSIZE * page_cnt);
#endif
lock_acquire (&pool->lock);
- list_push_front (&pool->free_list, &page->elem);
+ ASSERT (bitmap_all (pool->used_map, page_idx, page_idx + page_cnt));
+ bitmap_set_multiple (pool->used_map, page_idx, page_idx + page_cnt, false);
lock_release (&pool->lock);
}
+/* Frees the page at PAGE. */
+void
+palloc_free_page (void *page)
+{
+ palloc_free_multiple (page, 1);
+}
+
/* Initializes pool P as starting at START and ending at END,
naming it NAME for debugging purposes. */
static void
init_pool (struct pool *p, void *start, void *end, const char *name)
{
+ size_t bitmap_size;
+ size_t page_cnt;
+
ASSERT (pg_ofs (start) == 0);
ASSERT (pg_ofs (end) == 0);
+ ASSERT (end > start);
- printf ("%d kB allocated for %s.\n",
- (PGSIZE / 1024) * (pg_no (end) - pg_no (start)), name);
+ page_cnt = pg_no (end) - pg_no (start);
+ printf ("%d kB allocated for %s.\n", (PGSIZE / 1024) * page_cnt, name);
lock_init (&p->lock, name);
- p->start = p->uninit = start;
+ bitmap_size = ROUND_UP (bitmap_needed_bytes (page_cnt), PGSIZE);
+ p->used_map = bitmap_create_preallocated (page_cnt, start, bitmap_size);
+ p->start = start + bitmap_size;
p->end = end;
- list_init (&p->free_list);
+ ASSERT (p->end > p->start);
}
/* Returns true if PAGE was allocated from POOL,
{
uint8_t *page = page_;
- return page >= pool->start && page < pool->uninit;
+ return page >= pool->start && page < pool->end;
}
/* Make a copy of FILENAME.
Otherwise there's a race between the caller and load(). */
- fn_copy = palloc_get (0);
+ fn_copy = palloc_get_page (0);
if (fn_copy == NULL)
return TID_ERROR;
strlcpy (fn_copy, filename, PGSIZE);
/* Create a new thread to execute FILENAME. */
tid = thread_create (filename, PRI_DEFAULT, execute_thread, fn_copy);
if (tid == TID_ERROR)
- palloc_free (fn_copy);
+ palloc_free_page (fn_copy);
return tid;
}
success = load (filename, &if_.eip, &if_.esp);
/* If load failed, quit. */
- palloc_free (filename);
+ palloc_free_page (filename);
if (!success)
thread_exit ();
file into the page and zero the rest. */
size_t read_bytes = filesz_left >= PGSIZE ? PGSIZE : filesz_left;
size_t zero_bytes = PGSIZE - read_bytes;
- uint8_t *kpage = palloc_get (PAL_USER);
+ uint8_t *kpage = palloc_get_page (PAL_USER);
if (kpage == NULL)
return false;
/* Do the reading and zeroing. */
if (file_read (file, kpage, read_bytes) != (int) read_bytes)
{
- palloc_free (kpage);
+ palloc_free_page (kpage);
return false;
}
memset (kpage + read_bytes, 0, zero_bytes);
/* Add the page to the process's address space. */
if (!install_page (upage, kpage))
{
- palloc_free (kpage);
+ palloc_free_page (kpage);
return false;
}
}
uint8_t *kpage;
bool success = false;
- kpage = palloc_get (PAL_USER | PAL_ZERO);
+ kpage = palloc_get_page (PAL_USER | PAL_ZERO);
if (kpage != NULL)
{
success = install_page (((uint8_t *) PHYS_BASE) - PGSIZE, kpage);
if (success)
*esp = PHYS_BASE;
else
- palloc_free (kpage);
+ palloc_free_page (kpage);
}
else
printf ("failed to allocate process stack\n");