#include <stdint.h>
#include <stdio.h>
#include <string.h>
-#include "threads/mmu.h"
#include "threads/palloc.h"
#include "threads/synch.h"
+#include "threads/vaddr.h"
/* A simple implementation of malloc().
Otherwise, a new page of memory, called an "arena", is
obtained from the page allocator (if none is available,
malloc() returns a null pointer). The new arena is divided
- into blocks, all of which are added to the descriptor's free
- list. Then we return one of the new blocks.
+ into blocks. The first block in the page is reserved for
+ arena bookkeeping data (struct arena). The remaining blocks
+ are added to the descriptor's free list. Then we return one
+ of the new blocks.
When we free a block, we add it to its descriptor's free list.
But if the arena that the block was in now has no in-use
/* Free block. */
struct block
{
- list_elem free_elem; /* Free list element. */
+ struct list_elem free_elem; /* Free list element. */
};
/* Our set of descriptors. */
-static struct desc descs[10]; /* Descriptors. */
+static struct desc descs[11]; /* Descriptors. */
static size_t desc_cnt; /* Number of descriptors. */
static struct arena *block_to_arena (struct block *);
{
size_t block_size;
- for (block_size = 16; block_size < PGSIZE / 2; block_size *= 2)
+ for (block_size = 16; block_size <= PGSIZE / 2; block_size *= 2)
{
struct desc *d = &descs[desc_cnt++];
ASSERT (desc_cnt <= sizeof descs / sizeof *descs);
+ ASSERT (block_size >= sizeof (struct arena));
d->block_size = block_size;
- d->blocks_per_arena = (PGSIZE - sizeof (struct arena)) / block_size;
+ d->blocks_per_arena = (PGSIZE / block_size) - 1;
list_init (&d->free_list);
- lock_init (&d->lock, "malloc");
+ lock_init (&d->lock);
}
}
/* Obtains and returns a new block of at least SIZE bytes.
- Returns a null pointer if memory is not available. */
+ Returns a null pointer if memory is not available.
+
+ If SIZE is PGSIZE / 2 or less, the returned block is
+ guaranteed to be aligned on the least power-of-2 boundary
+ greater than or equal to SIZE. (Hence, such a block never
+ crosses the boundary between two physical pages.) */
void *
malloc (size_t size)
{
void *p;
size_t size;
- /* Calculate block size. */
+ /* Calculate block size and make sure it fits in size_t. */
size = a * b;
if (size < a || size < b)
return NULL;
return p;
}
-/* Frees block P, which must have been previously allocated with
- malloc() or calloc(). */
-void
-free (void *p)
+/* Returns the number of bytes allocated for BLOCK. If the
+ return value is PGSIZE / 2 or less, the block is also
+ guaranteed to be aligned to that power-of-2 boundary. */
+static size_t
+block_size (void *block)
{
- struct block *b;
- struct arena *a;
- struct desc *d;
-
- if (p == NULL)
- return;
+ struct block *b = block;
+ struct arena *a = block_to_arena (b);
+ struct desc *d = a->desc;
- b = p;
- a = block_to_arena (b);
- d = a->desc;
+ return d != NULL ? d->block_size : PGSIZE * a->free_cnt - pg_ofs (block);
+}
- if (d == NULL)
+/* Attempts to resize OLD_BLOCK to NEW_SIZE bytes, possibly
+ moving it in the process.
+ If successful, returns the new block; on failure, returns a
+ null pointer.
+ A call with null OLD_BLOCK is equivalent to malloc(NEW_SIZE).
+ A call with zero NEW_SIZE is equivalent to free(OLD_BLOCK). */
+void *
+realloc (void *old_block, size_t new_size)
+{
+ if (new_size == 0)
{
- /* It's a big block. Free its pages. */
- palloc_free_multiple (a, a->free_cnt);
- return;
+ free (old_block);
+ return NULL;
}
+ else
+ {
+ void *new_block = malloc (new_size);
+ if (old_block != NULL && new_block != NULL)
+ {
+ size_t old_size = block_size (old_block);
+ size_t min_size = new_size < old_size ? new_size : old_size;
+ memcpy (new_block, old_block, min_size);
+ free (old_block);
+ }
+ return new_block;
+ }
+}
+
+/* Frees block P, which must have been previously allocated with
+ malloc(), calloc(), or realloc(). */
+void
+free (void *p)
+{
+ if (p != NULL)
+ {
+ struct block *b = p;
+ struct arena *a = block_to_arena (b);
+ struct desc *d = a->desc;
+
+ if (d != NULL)
+ {
+ /* It's a normal block. We handle it here. */
#ifndef NDEBUG
- memset (b, 0xcd, d->block_size);
+ /* Clear the block to help detect use-after-free bugs. */
+ memset (b, 0xcc, d->block_size);
#endif
- lock_acquire (&d->lock);
+ lock_acquire (&d->lock);
- /* Add block to free list. */
- list_push_front (&d->free_list, &b->free_elem);
+ /* Add block to free list. */
+ list_push_front (&d->free_list, &b->free_elem);
- /* If the arena is now entirely unused, free it. */
- if (++a->free_cnt >= d->blocks_per_arena)
- {
- size_t i;
+ /* If the arena is now entirely unused, free it. */
+ if (++a->free_cnt >= d->blocks_per_arena)
+ {
+ size_t i;
- ASSERT (a->free_cnt == d->blocks_per_arena);
- for (i = 0; i < d->blocks_per_arena; i++)
+ ASSERT (a->free_cnt == d->blocks_per_arena);
+ for (i = 0; i < d->blocks_per_arena; i++)
+ {
+ struct block *b = arena_to_block (a, i);
+ list_remove (&b->free_elem);
+ }
+ palloc_free_page (a);
+ }
+
+ lock_release (&d->lock);
+ }
+ else
{
- struct block *b = arena_to_block (a, i);
- list_remove (&b->free_elem);
+ /* It's a big block. Free its pages. */
+ palloc_free_multiple (a, a->free_cnt);
+ return;
}
- palloc_free_page (a);
}
-
- lock_release (&d->lock);
}
\f
/* Returns the arena that block B is inside. */
ASSERT (a->magic == ARENA_MAGIC);
/* Check that the block is properly aligned for the arena. */
- ASSERT (a->desc == NULL
- || (pg_ofs (b) - sizeof *a) % a->desc->block_size == 0);
- ASSERT (a->desc != NULL || pg_ofs (b) == sizeof *a);
+ ASSERT (a->desc != NULL
+ ? pg_ofs (b) % a->desc->block_size == 0
+ : pg_ofs (b) == sizeof *a);
+ ASSERT (pg_ofs (b) != 0);
return a;
}
ASSERT (a != NULL);
ASSERT (a->magic == ARENA_MAGIC);
ASSERT (idx < a->desc->blocks_per_arena);
- return (struct block *) ((uint8_t *) a
- + sizeof *a
- + idx * a->desc->block_size);
+ return (struct block *) ((uint8_t *) a + (idx + 1) * a->desc->block_size);
}