-#include "malloc.h"
+#include "threads/malloc.h"
+#include <debug.h>
+#include <list.h>
+#include <round.h>
#include <stdint.h>
-#include "debug.h"
-#include "lib.h"
-#include "list.h"
-#include "synch.h"
-#include "mmu.h"
-#include "palloc.h"
+#include <stdio.h>
+#include <string.h>
+#include "threads/mmu.h"
+#include "threads/palloc.h"
+#include "threads/synch.h"
/* A simple implementation of malloc().
blocks, we remove all of the arena's blocks from the free list
and give the arena back to the page allocator.
- Major limitation: the largest block that can be allocated is
- PGSIZE / 2, or 2 kB. Use palloc_get() to allocate pages (4 kB
- blocks). You're on your own if you need more memory than
- that. */
+ We can't handle blocks bigger than 2 kB using this scheme,
+ because they're too big to fit in a single page with a
+ descriptor. We handle those by allocating contiguous pages
+ with the page allocator and sticking the allocation size at
+ the beginning of the allocated block's arena header. */
/* Descriptor. */
struct desc
struct lock lock; /* Lock. */
};
+/* Magic number for detecting arena corruption. */
+#define ARENA_MAGIC 0x9a548eed
+
/* Arena. */
struct arena
{
- struct desc *desc; /* Owning descriptor. */
- size_t free_cnt; /* Number of free blocks. */
+ unsigned magic; /* Always set to ARENA_MAGIC. */
+ struct desc *desc; /* Owning descriptor, null for big block. */
+ size_t free_cnt; /* Free blocks; pages in big block. */
};
/* Free block. */
};
/* Our set of descriptors. */
-static struct desc descs[16]; /* Descriptors. */
+static struct desc descs[10]; /* Descriptors. */
static size_t desc_cnt; /* Number of descriptors. */
static struct arena *block_to_arena (struct block *);
{
size_t block_size;
- for (block_size = 16; block_size < PGSIZE; block_size *= 2)
+ for (block_size = 16; block_size < PGSIZE / 2; block_size *= 2)
{
struct desc *d = &descs[desc_cnt++];
ASSERT (desc_cnt <= sizeof descs / sizeof *descs);
/* Find the smallest descriptor that satisfies a SIZE-byte
request. */
for (d = descs; d < descs + desc_cnt; d++)
- if (size < d->block_size)
+ if (d->block_size >= size)
break;
if (d == descs + desc_cnt)
{
- printk ("malloc: %zu byte allocation too big\n", size);
- return NULL;
+ /* SIZE is too big for any descriptor.
+ Allocate enough pages to hold SIZE plus an arena. */
+ size_t page_cnt = DIV_ROUND_UP (size + sizeof *a, PGSIZE);
+ a = palloc_get_multiple (0, page_cnt);
+ if (a == NULL)
+ return NULL;
+
+ /* Initialize the arena to indicate a big block of PAGE_CNT
+ pages, and return it. */
+ a->magic = ARENA_MAGIC;
+ a->desc = NULL;
+ a->free_cnt = page_cnt;
+ return a + 1;
}
lock_acquire (&d->lock);
size_t i;
/* Allocate a page. */
- a = palloc_get (0);
+ a = palloc_get_page (0);
if (a == NULL)
{
lock_release (&d->lock);
}
/* Initialize arena and add its blocks to the free list. */
+ a->magic = ARENA_MAGIC;
a->desc = d;
a->free_cnt = d->blocks_per_arena;
for (i = 0; i < d->blocks_per_arena; i++)
{
- b = arena_to_block (a, i);
+ struct block *b = arena_to_block (a, i);
list_push_back (&d->free_list, &b->free_elem);
}
}
void *p;
size_t size;
- /* Calculate block size. */
+ /* Calculate block size and make sure it fits in size_t. */
size = a * b;
if (size < a || size < b)
return NULL;
void
free (void *p)
{
- struct block *b = p;
- struct arena *a = block_to_arena (b);
- struct desc *d = a->desc;
+ struct block *b;
+ struct arena *a;
+ struct desc *d;
if (p == NULL)
return;
+ b = p;
+ a = block_to_arena (b);
+ d = a->desc;
+
+ if (d == NULL)
+ {
+ /* It's a big block. Free its pages. */
+ palloc_free_multiple (a, a->free_cnt);
+ return;
+ }
+
+#ifndef NDEBUG
+ memset (b, 0xcc, d->block_size);
+#endif
+
lock_acquire (&d->lock);
/* Add block to free list. */
struct block *b = arena_to_block (a, i);
list_remove (&b->free_elem);
}
- palloc_free (a);
+ palloc_free_page (a);
}
lock_release (&d->lock);
static struct arena *
block_to_arena (struct block *b)
{
- return pg_round_down (b);
+ struct arena *a = pg_round_down (b);
+
+ /* Check that the arena is valid. */
+ ASSERT (a != NULL);
+ ASSERT (a->magic == ARENA_MAGIC);
+
+ /* Check that the block is properly aligned for the arena. */
+ ASSERT (a->desc == NULL
+ || (pg_ofs (b) - sizeof *a) % a->desc->block_size == 0);
+ ASSERT (a->desc != NULL || pg_ofs (b) == sizeof *a);
+
+ return a;
}
/* Returns the (IDX - 1)'th block within arena A. */
static struct block *
arena_to_block (struct arena *a, size_t idx)
{
+ ASSERT (a != NULL);
+ ASSERT (a->magic == ARENA_MAGIC);
ASSERT (idx < a->desc->blocks_per_arena);
return (struct block *) ((uint8_t *) a
+ sizeof *a