1 #include "threads/malloc.h"
8 #include "threads/palloc.h"
9 #include "threads/synch.h"
10 #include "threads/vaddr.h"
12 /* A simple implementation of malloc().
14 The size of each request, in bytes, is rounded up to a power
15 of 2 and assigned to the "descriptor" that manages blocks of
16 that size. The descriptor keeps a list of free blocks. If
17 the free list is nonempty, one of its blocks is used to
20 Otherwise, a new page of memory, called an "arena", is
21 obtained from the page allocator (if none is available,
22 malloc() returns a null pointer). The new arena is divided
23 into blocks. The first block in the page is reserved for
24 arena bookkeeping data (struct arena). The remaining blocks
25 are added to the descriptor's free list. Then we return one
28 When we free a block, we add it to its descriptor's free list.
29 But if the arena that the block was in now has no in-use
30 blocks, we remove all of the arena's blocks from the free list
31 and give the arena back to the page allocator.
33 We can't handle blocks bigger than 2 kB using this scheme,
34 because they're too big to fit in a single page with a
35 descriptor. We handle those by allocating contiguous pages
36 with the page allocator and sticking the allocation size at
37 the beginning of the allocated block's arena header. */
42 size_t block_size; /* Size of each element in bytes. */
43 size_t blocks_per_arena; /* Number of blocks in an arena. */
44 struct list free_list; /* List of free blocks. */
45 struct lock lock; /* Lock. */
48 /* Magic number for detecting arena corruption. */
49 #define ARENA_MAGIC 0x9a548eed
54 unsigned magic; /* Always set to ARENA_MAGIC. */
55 struct desc *desc; /* Owning descriptor, null for big block. */
56 size_t free_cnt; /* Free blocks; pages in big block. */
62 struct list_elem free_elem; /* Free list element. */
65 /* Our set of descriptors. */
66 static struct desc descs[11]; /* Descriptors. */
67 static size_t desc_cnt; /* Number of descriptors. */
69 static struct arena *block_to_arena (struct block *);
70 static struct block *arena_to_block (struct arena *, size_t idx);
72 /* Initializes the malloc() descriptors. */
78 for (block_size = 16; block_size <= PGSIZE / 2; block_size *= 2)
80 struct desc *d = &descs[desc_cnt++];
81 ASSERT (desc_cnt <= sizeof descs / sizeof *descs);
82 ASSERT (block_size >= sizeof (struct arena));
83 d->block_size = block_size;
84 d->blocks_per_arena = (PGSIZE / block_size) - 1;
85 list_init (&d->free_list);
90 /* Obtains and returns a new block of at least SIZE bytes.
91 Returns a null pointer if memory is not available.
93 If SIZE is PGSIZE / 2 or less, the returned block is
94 guaranteed to be aligned on the least power-of-2 boundary
95 greater than or equal to SIZE. (Hence, such a block never
96 crosses the boundary between two physical pages.) */
104 /* A null pointer satisfies a request for 0 bytes. */
108 /* Find the smallest descriptor that satisfies a SIZE-byte
110 for (d = descs; d < descs + desc_cnt; d++)
111 if (d->block_size >= size)
113 if (d == descs + desc_cnt)
115 /* SIZE is too big for any descriptor.
116 Allocate enough pages to hold SIZE plus an arena. */
117 size_t page_cnt = DIV_ROUND_UP (size + sizeof *a, PGSIZE);
118 a = palloc_get_multiple (0, page_cnt);
122 /* Initialize the arena to indicate a big block of PAGE_CNT
123 pages, and return it. */
124 a->magic = ARENA_MAGIC;
126 a->free_cnt = page_cnt;
130 lock_acquire (&d->lock);
132 /* If the free list is empty, create a new arena. */
133 if (list_empty (&d->free_list))
137 /* Allocate a page. */
138 a = palloc_get_page (0);
141 lock_release (&d->lock);
145 /* Initialize arena and add its blocks to the free list. */
146 a->magic = ARENA_MAGIC;
148 a->free_cnt = d->blocks_per_arena;
149 for (i = 0; i < d->blocks_per_arena; i++)
151 struct block *b = arena_to_block (a, i);
152 list_push_back (&d->free_list, &b->free_elem);
156 /* Get a block from free list and return it. */
157 b = list_entry (list_pop_front (&d->free_list), struct block, free_elem);
158 a = block_to_arena (b);
160 lock_release (&d->lock);
164 /* Allocates and return A times B bytes initialized to zeroes.
165 Returns a null pointer if memory is not available. */
167 calloc (size_t a, size_t b)
172 /* Calculate block size and make sure it fits in size_t. */
174 if (size < a || size < b)
177 /* Allocate and zero memory. */
185 /* Returns the number of bytes allocated for BLOCK. If the
186 return value is PGSIZE / 2 or less, the block is also
187 guaranteed to be aligned to that power-of-2 boundary. */
189 block_size (void *block)
191 struct block *b = block;
192 struct arena *a = block_to_arena (b);
193 struct desc *d = a->desc;
195 return d != NULL ? d->block_size : PGSIZE * a->free_cnt - pg_ofs (block);
198 /* Attempts to resize OLD_BLOCK to NEW_SIZE bytes, possibly
199 moving it in the process.
200 If successful, returns the new block; on failure, returns a
202 A call with null OLD_BLOCK is equivalent to malloc(NEW_SIZE).
203 A call with zero NEW_SIZE is equivalent to free(OLD_BLOCK). */
205 realloc (void *old_block, size_t new_size)
214 void *new_block = malloc (new_size);
215 if (old_block != NULL && new_block != NULL)
217 size_t old_size = block_size (old_block);
218 size_t min_size = new_size < old_size ? new_size : old_size;
219 memcpy (new_block, old_block, min_size);
226 /* Frees block P, which must have been previously allocated with
227 malloc(), calloc(), or realloc(). */
234 struct arena *a = block_to_arena (b);
235 struct desc *d = a->desc;
239 /* It's a normal block. We handle it here. */
242 /* Clear the block to help detect use-after-free bugs. */
243 memset (b, 0xcc, d->block_size);
246 lock_acquire (&d->lock);
248 /* Add block to free list. */
249 list_push_front (&d->free_list, &b->free_elem);
251 /* If the arena is now entirely unused, free it. */
252 if (++a->free_cnt >= d->blocks_per_arena)
256 ASSERT (a->free_cnt == d->blocks_per_arena);
257 for (i = 0; i < d->blocks_per_arena; i++)
259 struct block *b = arena_to_block (a, i);
260 list_remove (&b->free_elem);
262 palloc_free_page (a);
265 lock_release (&d->lock);
269 /* It's a big block. Free its pages. */
270 palloc_free_multiple (a, a->free_cnt);
276 /* Returns the arena that block B is inside. */
277 static struct arena *
278 block_to_arena (struct block *b)
280 struct arena *a = pg_round_down (b);
282 /* Check that the arena is valid. */
284 ASSERT (a->magic == ARENA_MAGIC);
286 /* Check that the block is properly aligned for the arena. */
287 ASSERT (a->desc != NULL
288 ? pg_ofs (b) % a->desc->block_size == 0
289 : pg_ofs (b) == sizeof *a);
290 ASSERT (pg_ofs (b) != 0);
295 /* Returns the (IDX - 1)'th block within arena A. */
296 static struct block *
297 arena_to_block (struct arena *a, size_t idx)
300 ASSERT (a->magic == ARENA_MAGIC);
301 ASSERT (idx < a->desc->blocks_per_arena);
302 return (struct block *) ((uint8_t *) a + (idx + 1) * a->desc->block_size);