X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=src%2Fthreads%2Fmalloc.c;h=f6f803b9b6dc437a25876928679ab393176c086d;hb=fd2a5afa946474ba0839de0e9da238dbaecbd6a5;hp=9910bf433c7b1f2eac0a518de062a9a00ed7cafc;hpb=1bd8601ce81500fc1ddf6584cd07379eec65083e;p=pintos-anon diff --git a/src/threads/malloc.c b/src/threads/malloc.c index 9910bf4..f6f803b 100644 --- a/src/threads/malloc.c +++ b/src/threads/malloc.c @@ -1,12 +1,13 @@ #include "threads/malloc.h" #include #include +#include #include #include #include -#include "threads/mmu.h" #include "threads/palloc.h" #include "threads/synch.h" +#include "threads/vaddr.h" /* A simple implementation of malloc(). @@ -27,10 +28,11 @@ blocks, we remove all of the arena's blocks from the free list and give the arena back to the page allocator. - Major limitation: the largest block that can be allocated is - PGSIZE / 2, or 2 kB. Use palloc_get() to allocate pages (4 kB - blocks). You're on your own if you need more memory than - that. */ + We can't handle blocks bigger than 2 kB using this scheme, + because they're too big to fit in a single page with a + descriptor. We handle those by allocating contiguous pages + with the page allocator and sticking the allocation size at + the beginning of the allocated block's arena header. */ /* Descriptor. */ struct desc @@ -48,14 +50,14 @@ struct desc struct arena { unsigned magic; /* Always set to ARENA_MAGIC. */ - struct desc *desc; /* Owning descriptor. */ - size_t free_cnt; /* Number of free blocks. */ + struct desc *desc; /* Owning descriptor, null for big block. */ + size_t free_cnt; /* Free blocks; pages in big block. */ }; /* Free block. */ struct block { - list_elem free_elem; /* Free list element. */ + struct list_elem free_elem; /* Free list element. */ }; /* Our set of descriptors. */ @@ -71,14 +73,14 @@ malloc_init (void) { size_t block_size; - for (block_size = 16; block_size < PGSIZE; block_size *= 2) + for (block_size = 16; block_size < PGSIZE / 2; block_size *= 2) { struct desc *d = &descs[desc_cnt++]; ASSERT (desc_cnt <= sizeof descs / sizeof *descs); d->block_size = block_size; d->blocks_per_arena = (PGSIZE - sizeof (struct arena)) / block_size; list_init (&d->free_list); - lock_init (&d->lock, "malloc"); + lock_init (&d->lock); } } @@ -102,8 +104,19 @@ malloc (size_t size) break; if (d == descs + desc_cnt) { - printf ("malloc: %zu byte allocation too big\n", size); - return NULL; + /* SIZE is too big for any descriptor. + Allocate enough pages to hold SIZE plus an arena. */ + size_t page_cnt = DIV_ROUND_UP (size + sizeof *a, PGSIZE); + a = palloc_get_multiple (0, page_cnt); + if (a == NULL) + return NULL; + + /* Initialize the arena to indicate a big block of PAGE_CNT + pages, and return it. */ + a->magic = ARENA_MAGIC; + a->desc = NULL; + a->free_cnt = page_cnt; + return a + 1; } lock_acquire (&d->lock); @@ -114,7 +127,7 @@ malloc (size_t size) size_t i; /* Allocate a page. */ - a = palloc_get (0); + a = palloc_get_page (0); if (a == NULL) { lock_release (&d->lock); @@ -148,7 +161,7 @@ calloc (size_t a, size_t b) void *p; size_t size; - /* Calculate block size. */ + /* Calculate block size and make sure it fits in size_t. */ size = a * b; if (size < a || size < b) return NULL; @@ -161,47 +174,93 @@ calloc (size_t a, size_t b) return p; } +/* Returns the number of bytes allocated for BLOCK. */ +static size_t +block_size (void *block) +{ + struct block *b = block; + struct arena *a = block_to_arena (b); + struct desc *d = a->desc; + + return d != NULL ? d->block_size : PGSIZE * a->free_cnt - pg_ofs (block); +} + +/* Attempts to resize OLD_BLOCK to NEW_SIZE bytes, possibly + moving it in the process. + If successful, returns the new block; on failure, returns a + null pointer. + A call with null OLD_BLOCK is equivalent to malloc(NEW_SIZE). + A call with zero NEW_SIZE is equivalent to free(OLD_BLOCK). */ +void * +realloc (void *old_block, size_t new_size) +{ + if (new_size == 0) + { + free (old_block); + return NULL; + } + else + { + void *new_block = malloc (new_size); + if (old_block != NULL && new_block != NULL) + { + size_t old_size = block_size (old_block); + size_t min_size = new_size < old_size ? new_size : old_size; + memcpy (new_block, old_block, min_size); + free (old_block); + } + return new_block; + } +} + /* Frees block P, which must have been previously allocated with - malloc() or calloc(). */ + malloc(), calloc(), or realloc(). */ void free (void *p) { - struct block *b; - struct arena *a; - struct desc *d; - - if (p == NULL) - return; - - b = p; - a = block_to_arena (b); - d = a->desc; + if (p != NULL) + { + struct block *b = p; + struct arena *a = block_to_arena (b); + struct desc *d = a->desc; + + if (d != NULL) + { + /* It's a normal block. We handle it here. */ #ifndef NDEBUG - memset (b, 0xcd, d->block_size); + /* Clear the block to help detect use-after-free bugs. */ + memset (b, 0xcc, d->block_size); #endif - lock_acquire (&d->lock); + lock_acquire (&d->lock); + /* Add block to free list. */ + list_push_front (&d->free_list, &b->free_elem); - /* Add block to free list. */ - list_push_front (&d->free_list, &b->free_elem); + /* If the arena is now entirely unused, free it. */ + if (++a->free_cnt >= d->blocks_per_arena) + { + size_t i; - /* If the arena is now entirely unused, free it. */ - if (++a->free_cnt >= d->blocks_per_arena) - { - size_t i; + ASSERT (a->free_cnt == d->blocks_per_arena); + for (i = 0; i < d->blocks_per_arena; i++) + { + struct block *b = arena_to_block (a, i); + list_remove (&b->free_elem); + } + palloc_free_page (a); + } - ASSERT (a->free_cnt == d->blocks_per_arena); - for (i = 0; i < d->blocks_per_arena; i++) + lock_release (&d->lock); + } + else { - struct block *b = arena_to_block (a, i); - list_remove (&b->free_elem); + /* It's a big block. Free its pages. */ + palloc_free_multiple (a, a->free_cnt); + return; } - palloc_free (a); } - - lock_release (&d->lock); } /* Returns the arena that block B is inside. */ @@ -209,8 +268,16 @@ static struct arena * block_to_arena (struct block *b) { struct arena *a = pg_round_down (b); + + /* Check that the arena is valid. */ ASSERT (a != NULL); ASSERT (a->magic == ARENA_MAGIC); + + /* Check that the block is properly aligned for the arena. */ + ASSERT (a->desc == NULL + || (pg_ofs (b) - sizeof *a) % a->desc->block_size == 0); + ASSERT (a->desc != NULL || pg_ofs (b) == sizeof *a); + return a; }