X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=src%2Fthreads%2Fmalloc.c;h=9c83e3d438f780199166c3c92aa0893aa842f98f;hb=dcfc312e3cc930d418f86948ef062eb7e8fd283a;hp=9910bf433c7b1f2eac0a518de062a9a00ed7cafc;hpb=1bd8601ce81500fc1ddf6584cd07379eec65083e;p=pintos-anon diff --git a/src/threads/malloc.c b/src/threads/malloc.c index 9910bf4..9c83e3d 100644 --- a/src/threads/malloc.c +++ b/src/threads/malloc.c @@ -1,6 +1,7 @@ #include "threads/malloc.h" #include #include +#include #include #include #include @@ -27,10 +28,11 @@ blocks, we remove all of the arena's blocks from the free list and give the arena back to the page allocator. - Major limitation: the largest block that can be allocated is - PGSIZE / 2, or 2 kB. Use palloc_get() to allocate pages (4 kB - blocks). You're on your own if you need more memory than - that. */ + We can't handle blocks bigger than 2 kB using this scheme, + because they're too big to fit in a single page with a + descriptor. We handle those by allocating contiguous pages + with the page allocator and sticking the allocation size at + the beginning of the allocated block's arena header. */ /* Descriptor. */ struct desc @@ -48,8 +50,8 @@ struct desc struct arena { unsigned magic; /* Always set to ARENA_MAGIC. */ - struct desc *desc; /* Owning descriptor. */ - size_t free_cnt; /* Number of free blocks. */ + struct desc *desc; /* Owning descriptor, null for big block. */ + size_t free_cnt; /* Free blocks; pages in big block. */ }; /* Free block. */ @@ -71,7 +73,7 @@ malloc_init (void) { size_t block_size; - for (block_size = 16; block_size < PGSIZE; block_size *= 2) + for (block_size = 16; block_size < PGSIZE / 2; block_size *= 2) { struct desc *d = &descs[desc_cnt++]; ASSERT (desc_cnt <= sizeof descs / sizeof *descs); @@ -102,8 +104,19 @@ malloc (size_t size) break; if (d == descs + desc_cnt) { - printf ("malloc: %zu byte allocation too big\n", size); - return NULL; + /* SIZE is too big for any descriptor. + Allocate enough pages to hold SIZE plus an arena. */ + size_t page_cnt = DIV_ROUND_UP (size + sizeof *a, PGSIZE); + a = palloc_get_multiple (0, page_cnt); + if (a == NULL) + return NULL; + + /* Initialize the arena to indicate a big block of PAGE_CNT + pages, and return it. */ + a->magic = ARENA_MAGIC; + a->desc = NULL; + a->free_cnt = page_cnt; + return a + 1; } lock_acquire (&d->lock); @@ -114,7 +127,7 @@ malloc (size_t size) size_t i; /* Allocate a page. */ - a = palloc_get (0); + a = palloc_get_page (0); if (a == NULL) { lock_release (&d->lock); @@ -140,6 +153,19 @@ malloc (size_t size) return b; } +/* Obtains and returns a new block of at least SIZE bytes. + Panics if memory is not available. It is unacceptable for the + kernel to panic in normal operation, so this function should + only be used during kernel initialization. */ +void * +xmalloc (size_t size) +{ + void *p = malloc (size); + if (p == NULL && size > 0) + PANIC ("memory exhausted"); + return p; +} + /* Allocates and return A times B bytes initialized to zeroes. Returns a null pointer if memory is not available. */ void * @@ -148,7 +174,7 @@ calloc (size_t a, size_t b) void *p; size_t size; - /* Calculate block size. */ + /* Calculate block size and make sure it fits in size_t. */ size = a * b; if (size < a || size < b) return NULL; @@ -161,6 +187,19 @@ calloc (size_t a, size_t b) return p; } +/* Allocates and return A times B bytes initialized to zeroes. + Panics if memory is not available. It is unacceptable for the + kernel to panic in normal operation, so this function should + only be used during kernel initialization. */ +void * +xcalloc (size_t a, size_t b) +{ + void *p = calloc (a, b); + if (p == NULL && a > 0 && b > 0) + PANIC ("memory exhausted"); + return p; +} + /* Frees block P, which must have been previously allocated with malloc() or calloc(). */ void @@ -177,13 +216,19 @@ free (void *p) a = block_to_arena (b); d = a->desc; + if (d == NULL) + { + /* It's a big block. Free its pages. */ + palloc_free_multiple (a, a->free_cnt); + return; + } + #ifndef NDEBUG - memset (b, 0xcd, d->block_size); + memset (b, 0xcc, d->block_size); #endif lock_acquire (&d->lock); - /* Add block to free list. */ list_push_front (&d->free_list, &b->free_elem); @@ -198,7 +243,7 @@ free (void *p) struct block *b = arena_to_block (a, i); list_remove (&b->free_elem); } - palloc_free (a); + palloc_free_page (a); } lock_release (&d->lock); @@ -209,8 +254,16 @@ static struct arena * block_to_arena (struct block *b) { struct arena *a = pg_round_down (b); + + /* Check that the arena is valid. */ ASSERT (a != NULL); ASSERT (a->magic == ARENA_MAGIC); + + /* Check that the block is properly aligned for the arena. */ + ASSERT (a->desc == NULL + || (pg_ofs (b) - sizeof *a) % a->desc->block_size == 0); + ASSERT (a->desc != NULL || pg_ofs (b) == sizeof *a); + return a; }