1 #include "threads/malloc.h"
8 #include "threads/mmu.h"
9 #include "threads/palloc.h"
10 #include "threads/synch.h"
12 /* A simple implementation of malloc().
14 The size of each request, in bytes, is rounded up to a power
15 of 2 and assigned to the "descriptor" that manages blocks of
16 that size. The descriptor keeps a list of free blocks. If
17 the free list is nonempty, one of its blocks is used to
20 Otherwise, a new page of memory, called an "arena", is
21 obtained from the page allocator (if none is available,
22 malloc() returns a null pointer). The new arena is divided
23 into blocks, all of which are added to the descriptor's free
24 list. Then we return one of the new blocks.
26 When we free a block, we add it to its descriptor's free list.
27 But if the arena that the block was in now has no in-use
28 blocks, we remove all of the arena's blocks from the free list
29 and give the arena back to the page allocator.
31 We can't handle blocks bigger than 2 kB using this scheme,
32 because they're too big to fit in a single page with a
33 descriptor. We handle those by allocating contiguous pages
34 with the page allocator and sticking the allocation size at
35 the beginning of the allocated block's arena header. */
40 size_t block_size; /* Size of each element in bytes. */
41 size_t blocks_per_arena; /* Number of blocks in an arena. */
42 struct list free_list; /* List of free blocks. */
43 struct lock lock; /* Lock. */
46 /* Magic number for detecting arena corruption. */
47 #define ARENA_MAGIC 0x9a548eed
52 unsigned magic; /* Always set to ARENA_MAGIC. */
53 struct desc *desc; /* Owning descriptor, null for big block. */
54 size_t free_cnt; /* Free blocks; pages in big block. */
60 list_elem free_elem; /* Free list element. */
63 /* Our set of descriptors. */
64 static struct desc descs[10]; /* Descriptors. */
65 static size_t desc_cnt; /* Number of descriptors. */
67 static struct arena *block_to_arena (struct block *);
68 static struct block *arena_to_block (struct arena *, size_t idx);
70 /* Initializes the malloc() descriptors. */
76 for (block_size = 16; block_size < PGSIZE / 2; block_size *= 2)
78 struct desc *d = &descs[desc_cnt++];
79 ASSERT (desc_cnt <= sizeof descs / sizeof *descs);
80 d->block_size = block_size;
81 d->blocks_per_arena = (PGSIZE - sizeof (struct arena)) / block_size;
82 list_init (&d->free_list);
83 lock_init (&d->lock, "malloc");
87 /* Obtains and returns a new block of at least SIZE bytes.
88 Returns a null pointer if memory is not available. */
96 /* A null pointer satisfies a request for 0 bytes. */
100 /* Find the smallest descriptor that satisfies a SIZE-byte
102 for (d = descs; d < descs + desc_cnt; d++)
103 if (d->block_size >= size)
105 if (d == descs + desc_cnt)
107 /* SIZE is too big for any descriptor.
108 Allocate enough pages to hold SIZE plus an arena. */
109 size_t page_cnt = DIV_ROUND_UP (size + sizeof *a, PGSIZE);
110 a = palloc_get_multiple (0, page_cnt);
114 /* Initialize the arena to indicate a big block of PAGE_CNT
115 pages, and return it. */
116 a->magic = ARENA_MAGIC;
118 a->free_cnt = page_cnt;
122 lock_acquire (&d->lock);
124 /* If the free list is empty, create a new arena. */
125 if (list_empty (&d->free_list))
129 /* Allocate a page. */
130 a = palloc_get_page (0);
133 lock_release (&d->lock);
137 /* Initialize arena and add its blocks to the free list. */
138 a->magic = ARENA_MAGIC;
140 a->free_cnt = d->blocks_per_arena;
141 for (i = 0; i < d->blocks_per_arena; i++)
143 struct block *b = arena_to_block (a, i);
144 list_push_back (&d->free_list, &b->free_elem);
148 /* Get a block from free list and return it. */
149 b = list_entry (list_pop_front (&d->free_list), struct block, free_elem);
150 a = block_to_arena (b);
152 lock_release (&d->lock);
156 /* Allocates and return A times B bytes initialized to zeroes.
157 Returns a null pointer if memory is not available. */
159 calloc (size_t a, size_t b)
164 /* Calculate block size. */
166 if (size < a || size < b)
169 /* Allocate and zero memory. */
177 /* Frees block P, which must have been previously allocated with
178 malloc() or calloc(). */
190 a = block_to_arena (b);
195 /* It's a big block. Free its pages. */
196 palloc_free_multiple (a, a->free_cnt);
201 memset (b, 0xcd, d->block_size);
204 lock_acquire (&d->lock);
206 /* Add block to free list. */
207 list_push_front (&d->free_list, &b->free_elem);
209 /* If the arena is now entirely unused, free it. */
210 if (++a->free_cnt >= d->blocks_per_arena)
214 ASSERT (a->free_cnt == d->blocks_per_arena);
215 for (i = 0; i < d->blocks_per_arena; i++)
217 struct block *b = arena_to_block (a, i);
218 list_remove (&b->free_elem);
220 palloc_free_page (a);
223 lock_release (&d->lock);
226 /* Returns the arena that block B is inside. */
227 static struct arena *
228 block_to_arena (struct block *b)
230 struct arena *a = pg_round_down (b);
232 ASSERT (a->magic == ARENA_MAGIC);
236 /* Returns the (IDX - 1)'th block within arena A. */
237 static struct block *
238 arena_to_block (struct arena *a, size_t idx)
241 ASSERT (a->magic == ARENA_MAGIC);
242 ASSERT (idx < a->desc->blocks_per_arena);
243 return (struct block *) ((uint8_t *) a
245 + idx * a->desc->block_size);