1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2000 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include <libpspp/assertion.h>
26 /* Fast, low-overhead memory block suballocator. */
29 struct pool *parent; /* Pool of which this pool is a subpool. */
30 struct pool_block *blocks; /* Blocks owned by the pool. */
31 struct pool_gizmo *gizmos; /* Other stuff owned by the pool. */
37 struct pool_block *prev;
38 struct pool_block *next;
48 POOL_GIZMO_REGISTERED,
51 /* Pool routines can maintain objects (`gizmos') as well as doing
53 This structure is used to keep track of them. */
57 struct pool_gizmo *prev;
58 struct pool_gizmo *next;
60 long serial; /* Serial number. */
61 int type; /* Type of this gizmo. */
63 /* Type-dependent info. */
66 FILE *file; /* POOL_GIZMO_FILE. */
67 struct pool *subpool; /* POOL_GIZMO_SUBPOOL. */
69 /* POOL_GIZMO_REGISTERED. */
72 void (*free) (void *p);
80 /* Rounds X up to the next multiple of Y. */
82 #define ROUND_UP(X, Y) \
83 (((X) + ((Y) - 1)) / (Y) * (Y))
86 /* Types that provide typically useful alignment sizes. */
95 /* This should be the alignment size used by malloc(). The size of
96 the union above is correct, if not optimal, in all known cases. */
97 #define ALIGN_SIZE sizeof (union align)
99 /* DISCRETE_BLOCKS may be declared as nonzero to prevent
100 suballocation of blocks. This is useful under memory
101 debuggers like valgrind because it allows the source location
102 of bugs to be more accurately pinpointed.
104 On the other hand, if we're testing the library, then we want to
105 test the library's real functionality, not its crippled, slow,
106 simplified functionality. */
107 /*#define DISCRETE_BLOCKS 1*/
109 /* Size of each block allocated in the pool, in bytes.
110 Should be at least 1k. */
112 #define BLOCK_SIZE 1024
116 /* Sizes of some structures with alignment padding included. */
117 #define POOL_BLOCK_SIZE ROUND_UP (sizeof (struct pool_block), ALIGN_SIZE)
118 #define POOL_GIZMO_SIZE ROUND_UP (sizeof (struct pool_gizmo), ALIGN_SIZE)
119 #define POOL_SIZE ROUND_UP (sizeof (struct pool), ALIGN_SIZE)
121 /* Serial number used to keep track of gizmos for mark/release. */
122 static long serial = 0;
125 static void add_gizmo (struct pool *, struct pool_gizmo *);
126 static void free_gizmo (struct pool_gizmo *);
127 static void free_all_gizmos (struct pool *pool);
128 static void delete_gizmo (struct pool *, struct pool_gizmo *);
129 static void check_gizmo (struct pool *, struct pool_gizmo *);
131 /* General routines. */
133 /* Creates and returns a new memory pool, which allows malloc()'d
134 blocks to be suballocated in a time- and space-efficient manner.
135 The entire contents of the memory pool are freed at once.
137 In addition, other objects can be associated with a memory pool.
138 These are released when the pool is destroyed. */
142 struct pool_block *block;
145 block = xmalloc (BLOCK_SIZE);
146 block->prev = block->next = block;
147 block->ofs = POOL_BLOCK_SIZE + POOL_SIZE;
149 pool = (struct pool *) (((char *) block) + POOL_BLOCK_SIZE);
151 pool->blocks = block;
157 /* Creates a pool, allocates a block STRUCT_SIZE bytes in
158 length from it, stores the pool's address at offset
159 POOL_MEMBER_OFFSET within the block, and returns the allocated
162 Meant for use indirectly via pool_create_container(). */
164 pool_create_at_offset (size_t struct_size, size_t pool_member_offset)
169 assert (struct_size >= sizeof pool);
170 assert (pool_member_offset <= struct_size - sizeof pool);
172 pool = pool_create ();
173 struct_ = pool_alloc (pool, struct_size);
174 *(struct pool **) (struct_ + pool_member_offset) = pool;
178 /* Destroy the specified pool, including all subpools. */
180 pool_destroy (struct pool *pool)
185 /* Remove this pool from its parent's list of gizmos. */
187 delete_gizmo (pool->parent, (void *) (((char *) pool) + POOL_SIZE));
189 free_all_gizmos (pool);
191 /* Free all the memory. */
193 struct pool_block *cur, *next;
195 pool->blocks->prev->next = NULL;
196 for (cur = pool->blocks; cur; cur = next)
204 /* Release all the memory and gizmos in POOL.
205 Blocks are not given back with free() but kept for later
206 allocations. To give back memory, use a subpool instead. */
208 pool_clear (struct pool *pool)
210 free_all_gizmos (pool);
212 /* Zero out block sizes. */
214 struct pool_block *cur;
219 cur->ofs = POOL_BLOCK_SIZE;
220 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
222 cur->ofs += POOL_SIZE;
223 if (pool->parent != NULL)
224 cur->ofs += POOL_GIZMO_SIZE;
228 while (cur != pool->blocks);
232 /* Suballocation routines. */
234 /* Allocates a memory region AMT bytes in size from POOL and returns a
235 pointer to the region's start.
236 The region is properly aligned for storing any object. */
238 pool_alloc (struct pool *pool, size_t amt)
240 assert (pool != NULL);
245 #ifndef DISCRETE_BLOCKS
246 if (amt <= MAX_SUBALLOC)
248 /* If there is space in this block, take it. */
249 struct pool_block *b = pool->blocks;
250 b->ofs = ROUND_UP (b->ofs, ALIGN_SIZE);
251 if (b->ofs + amt <= BLOCK_SIZE)
253 void *const p = ((char *) b) + b->ofs;
258 /* No space in this block, so we must make other
260 if (b->next->ofs == 0)
262 /* The next block is empty. Use it. */
264 b->ofs = POOL_BLOCK_SIZE;
265 if ((char *) b + POOL_BLOCK_SIZE == (char *) pool)
270 /* Create a new block at the start of the list. */
271 b = xmalloc (BLOCK_SIZE);
272 b->next = pool->blocks;
273 b->prev = pool->blocks->prev;
274 b->ofs = POOL_BLOCK_SIZE;
275 pool->blocks->prev->next = b;
276 pool->blocks->prev = b;
280 /* Allocate space from B. */
282 return ((char *) b) + b->ofs - amt;
286 return pool_malloc (pool, amt);
289 /* Allocates a memory region AMT bytes in size from POOL and
290 returns a pointer to the region's start. The region is not
291 necessarily aligned, so it is most suitable for storing
294 pool_alloc_unaligned (struct pool *pool, size_t amt)
296 assert (pool != NULL);
298 #ifndef DISCRETE_BLOCKS
299 /* Strings need not be aligned on any boundary, but some
300 operations may be more efficient when they are. However,
301 that's only going to help with reasonably long strings. */
302 if (amt < ALIGN_SIZE)
308 struct pool_block *const b = pool->blocks;
310 if (b->ofs + amt <= BLOCK_SIZE)
312 void *p = ((char *) b) + b->ofs;
320 return pool_alloc (pool, amt);
323 /* Allocates a memory region N * S bytes in size from POOL and
324 returns a pointer to the region's start.
325 N must be nonnegative, S must be positive.
326 Terminates the program if the memory cannot be obtained,
327 including the case where N * S overflows the range of size_t. */
329 pool_nalloc (struct pool *pool, size_t n, size_t s)
331 if (xalloc_oversized (n, s))
333 return pool_alloc (pool, n * s);
336 /* Allocates SIZE bytes in POOL, copies BUFFER into it, and
337 returns the new copy. */
339 pool_clone (struct pool *pool, const void *buffer, size_t size)
341 void *block = pool_alloc (pool, size);
342 memcpy (block, buffer, size);
346 /* Allocates SIZE bytes of unaligned data in POOL, copies BUFFER
347 into it, and returns the new copy. */
349 pool_clone_unaligned (struct pool *pool, const void *buffer, size_t size)
351 void *block = pool_alloc_unaligned (pool, size);
352 memcpy (block, buffer, size);
356 /* Duplicates null-terminated STRING, within POOL, and returns a
357 pointer to the duplicate. For use only with strings, because
358 the returned pointere may not be aligned properly for other
361 pool_strdup (struct pool *pool, const char *string)
363 return pool_clone_unaligned (pool, string, strlen (string) + 1);
366 /* Formats FORMAT with the given ARGS in memory allocated from
367 POOL and returns the formatted string. */
369 pool_vasprintf (struct pool *pool, const char *format, va_list args_)
371 struct pool_block *b;
376 va_copy (args, args_);
378 avail = BLOCK_SIZE - b->ofs;
379 s = ((char *) b) + b->ofs;
380 needed = vsnprintf (s, avail, format, args);
387 /* Success. Reserve the space that was actually used. */
388 b->ofs += needed + 1;
392 /* Failure, but now we know how much space is needed.
393 Allocate that much and reformat. */
394 s = pool_alloc (pool, needed + 1);
396 va_copy (args, args_);
397 vsprintf (s, format, args);
403 /* Some old libc's returned -1 when the destination string
404 was too short. This should be uncommon these days and
405 it's a rare case anyhow. Use the easiest solution: punt
406 to dynamic allocation. */
407 va_copy (args, args_);
408 s = xvasprintf (format, args);
411 pool_register (pool, free, s);
417 /* Formats FORMAT in memory allocated from POOL
418 and returns the formatted string. */
420 pool_asprintf (struct pool *pool, const char *format, ...)
425 va_start (args, format);
426 string = pool_vasprintf (pool, format, args);
432 /* Standard allocation routines. */
434 /* Allocates AMT bytes using malloc(), to be managed by POOL, and
435 returns a pointer to the beginning of the block.
436 If POOL is a null pointer, then allocates a normal memory block
439 pool_malloc (struct pool *pool, size_t amt)
445 struct pool_gizmo *g = xmalloc (amt + POOL_GIZMO_SIZE);
446 g->type = POOL_GIZMO_MALLOC;
449 return ((char *) g) + POOL_GIZMO_SIZE;
455 return xmalloc (amt);
458 /* Allocates and returns N elements of S bytes each, to be
460 If POOL is a null pointer, then allocates a normal memory block
462 N must be nonnegative, S must be positive.
463 Terminates the program if the memory cannot be obtained,
464 including the case where N * S overflows the range of size_t. */
466 pool_nmalloc (struct pool *pool, size_t n, size_t s)
468 if (xalloc_oversized (n, s))
470 return pool_malloc (pool, n * s);
473 /* Allocates AMT bytes using malloc(), to be managed by POOL,
474 zeros the block, and returns a pointer to the beginning of the
476 If POOL is a null pointer, then allocates a normal memory block
479 pool_zalloc (struct pool *pool, size_t amt)
481 void *p = pool_malloc (pool, amt);
486 /* Allocates and returns N elements of S bytes each, to be
487 managed by POOL, and zeros the block.
488 If POOL is a null pointer, then allocates a normal memory block
490 N must be nonnegative, S must be positive.
491 Terminates the program if the memory cannot be obtained,
492 including the case where N * S overflows the range of size_t. */
494 pool_calloc (struct pool *pool, size_t n, size_t s)
496 void *p = pool_nmalloc (pool, n, s);
497 memset (p, 0, n * s);
501 /* Changes the allocation size of the specified memory block P managed
502 by POOL to AMT bytes and returns a pointer to the beginning of the
504 If POOL is a null pointer, then the block is reallocated in the
505 usual way with realloc(). */
507 pool_realloc (struct pool *pool, void *p, size_t amt)
515 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
516 check_gizmo (pool, g);
518 g = xrealloc (g, amt + POOL_GIZMO_SIZE);
525 check_gizmo (pool, g);
527 return ((char *) g) + POOL_GIZMO_SIZE;
536 return pool_malloc (pool, amt);
539 return xrealloc (p, amt);
542 /* Changes the allocation size of the specified memory block P
543 managed by POOL to N * S bytes and returns a pointer to the
544 beginning of the block.
545 N must be nonnegative, S must be positive.
546 If POOL is a null pointer, then the block is reallocated in
547 the usual way with xrealloc().
548 Terminates the program if the memory cannot be obtained,
549 including the case where N * S overflows the range of size_t. */
551 pool_nrealloc (struct pool *pool, void *p, size_t n, size_t s)
553 if (xalloc_oversized (n, s))
555 return pool_realloc (pool, p, n * s);
558 /* If P is null, allocate a block of at least *PN such objects;
559 otherwise, reallocate P so that it contains more than *PN
560 objects each of S bytes. *PN must be nonzero unless P is
561 null, and S must be nonzero. Set *PN to the new number of
562 objects, and return the pointer to the new block. *PN is
563 never set to zero, and the returned pointer is never null.
565 The block returned is managed by POOL. If POOL is a null
566 pointer, then the block is reallocated in the usual way with
569 Terminates the program if the memory cannot be obtained,
570 including the case where the memory required overflows the
573 Repeated reallocations are guaranteed to make progress, either by
574 allocating an initial block with a nonzero size, or by allocating a
577 In the following implementation, nonzero sizes are doubled so that
578 repeated reallocations have O(N log N) overall cost rather than
579 O(N**2) cost, but the specification for this function does not
580 guarantee that sizes are doubled.
582 Here is an example of use:
587 size_t allocated = 0;
590 append_int (int value)
592 if (used == allocated)
593 p = pool_2nrealloc (pool, p, &allocated, sizeof *p);
597 This causes x2nrealloc to allocate a block of some nonzero size the
598 first time it is called.
600 To have finer-grained control over the initial size, set *PN to a
601 nonzero value before calling this function with P == NULL. For
607 size_t allocated = 0;
608 size_t allocated1 = 1000;
611 append_int (int value)
613 if (used == allocated)
615 p = pool_2nrealloc (pool, p, &allocated1, sizeof *p);
616 allocated = allocated1;
621 This function implementation is from gnulib. */
623 pool_2nrealloc (struct pool *pool, void *p, size_t *pn, size_t s)
631 /* The approximate size to use for initial small allocation
632 requests, when the invoking code specifies an old size of
633 zero. 64 bytes is the largest "small" request for the
634 GNU C library malloc. */
635 enum { DEFAULT_MXFAST = 64 };
637 n = DEFAULT_MXFAST / s;
643 if (SIZE_MAX / 2 / s < n)
649 return pool_realloc (pool, p, n * s);
652 /* Frees block P managed by POOL.
653 If POOL is a null pointer, then the block is freed as usual with
656 pool_free (struct pool *pool, void *p)
658 if (pool != NULL && p != NULL)
660 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
661 check_gizmo (pool, g);
662 delete_gizmo (pool, g);
669 /* Gizmo allocations. */
671 /* Creates and returns a pool as a subpool of POOL.
672 The subpool will be destroyed automatically when POOL is destroyed.
673 It may also be destroyed explicitly in advance. */
675 pool_create_subpool (struct pool *pool)
677 struct pool *subpool;
678 struct pool_gizmo *g;
680 assert (pool != NULL);
681 subpool = pool_create ();
682 subpool->parent = pool;
684 g = (void *) (((char *) subpool->blocks) + subpool->blocks->ofs);
685 subpool->blocks->ofs += POOL_GIZMO_SIZE;
687 g->type = POOL_GIZMO_SUBPOOL;
688 g->p.subpool = subpool;
695 /* Makes SUBPOOL a subpool of POOL.
696 SUBPOOL must not already have a parent pool.
697 The subpool will be destroyed automatically when POOL is destroyed.
698 It may also be destroyed explicitly in advance. */
700 pool_add_subpool (struct pool *pool, struct pool *subpool)
702 struct pool_gizmo *g;
704 assert (pool != NULL);
705 assert (subpool != NULL);
706 assert (subpool->parent == NULL);
708 g = pool_alloc (subpool, sizeof *g);
709 g->type = POOL_GIZMO_SUBPOOL;
710 g->p.subpool = subpool;
713 subpool->parent = pool;
716 /* Opens file FILE_NAME with mode MODE and returns a handle to it
717 if successful or a null pointer if not.
718 The file will be closed automatically when POOL is destroyed, or it
719 may be closed explicitly in advance using pool_fclose(), or
720 detached from the pool with pool_detach_file(). */
722 pool_fopen (struct pool *pool, const char *file_name, const char *mode)
726 assert (pool && file_name && mode);
727 f = fopen (file_name, mode);
729 pool_attach_file (pool, f);
734 /* Closes file FILE managed by POOL.
735 Returns 0 if successful, EOF if an I/O error occurred. */
737 pool_fclose (struct pool *pool, FILE *file)
739 assert (pool && file);
740 pool_detach_file (pool, file);
741 return fclose (file);
744 /* Creates a temporary file with tmpfile() and returns a handle to it
745 if successful or a null pointer if not.
746 The file will be closed automatically when POOL is destroyed, or it
747 may be closed explicitly in advance using pool_fclose(), or
748 detached from the pool with pool_detach_file(). */
750 pool_tmpfile (struct pool *pool)
752 FILE *file = tmpfile ();
754 pool_attach_file (pool, file);
758 /* Attaches FILE to POOL.
759 The file will be closed automatically when POOL is destroyed, or it
760 may be closed explicitly in advance using pool_fclose(), or
761 detached from the pool with pool_detach_file(). */
763 pool_attach_file (struct pool *pool, FILE *file)
765 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
766 g->type = POOL_GIZMO_FILE;
771 /* Detaches FILE from POOL. */
773 pool_detach_file (struct pool *pool, FILE *file)
775 struct pool_gizmo *g;
777 for (g = pool->gizmos; g; g = g->next)
778 if (g->type == POOL_GIZMO_FILE && g->p.file == file)
780 delete_gizmo (pool, g);
785 /* Registers FREE to be called with argument P.
786 P should be unique among those registered in POOL so that it can be
787 uniquely identified by pool_unregister().
788 If not unregistered, FREE will be called with argument P when POOL
791 pool_register (struct pool *pool, void (*free) (void *), void *p)
793 assert (pool && free && p);
796 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
797 g->type = POOL_GIZMO_REGISTERED;
798 g->p.registered.free = free;
799 g->p.registered.p = p;
804 /* Unregisters previously registered P from POOL.
805 Returns true only if P was found to be registered in POOL. */
807 pool_unregister (struct pool *pool, void *p)
812 struct pool_gizmo *g;
814 for (g = pool->gizmos; g; g = g->next)
815 if (g->type == POOL_GIZMO_REGISTERED && g->p.registered.p == p)
817 delete_gizmo (pool, g);
825 /* Partial freeing. */
827 /* Notes the state of POOL into MARK so that it may be restored
828 by a call to pool_release(). */
830 pool_mark (struct pool *pool, struct pool_mark *mark)
832 assert (pool && mark);
834 mark->block = pool->blocks;
835 mark->ofs = pool->blocks->ofs;
837 mark->serial = serial;
840 /* Restores to POOL the state recorded in MARK.
841 Emptied blocks are not given back with free() but kept for
842 later allocations. To get that behavior, use a subpool
845 pool_release (struct pool *pool, const struct pool_mark *mark)
847 assert (pool && mark);
850 struct pool_gizmo *cur, *next;
852 for (cur = pool->gizmos; cur && cur->serial >= mark->serial; cur = next)
868 struct pool_block *cur;
870 for (cur = pool->blocks; cur != mark->block; cur = cur->next)
872 cur->ofs = POOL_BLOCK_SIZE;
873 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
875 cur->ofs += POOL_SIZE;
876 if (pool->parent != NULL)
877 cur->ofs += POOL_GIZMO_SIZE;
880 pool->blocks = mark->block;
881 pool->blocks->ofs = mark->ofs;
885 /* Private functions. */
887 /* Adds GIZMO at the beginning of POOL's gizmo list. */
889 add_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
891 assert (pool && gizmo);
894 gizmo->next = pool->gizmos;
897 pool->gizmos->prev = gizmo;
898 pool->gizmos = gizmo;
900 gizmo->serial = serial++;
902 check_gizmo (pool, gizmo);
905 /* Removes GIZMO from POOL's gizmo list. */
907 delete_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
909 assert (pool && gizmo);
911 check_gizmo (pool, gizmo);
914 gizmo->prev->next = gizmo->next;
916 pool->gizmos = gizmo->next;
918 gizmo->next->prev = gizmo->prev;
921 /* Frees any of GIZMO's internal state.
922 GIZMO's data must not be referenced after calling this function. */
924 free_gizmo (struct pool_gizmo *gizmo)
926 assert (gizmo != NULL);
930 case POOL_GIZMO_MALLOC:
933 case POOL_GIZMO_FILE:
934 fclose (gizmo->p.file); /* Ignore errors. */
936 case POOL_GIZMO_SUBPOOL:
937 gizmo->p.subpool->parent = NULL;
938 pool_destroy (gizmo->p.subpool);
940 case POOL_GIZMO_REGISTERED:
941 gizmo->p.registered.free (gizmo->p.registered.p);
948 /* Free all the gizmos in POOL. */
950 free_all_gizmos (struct pool *pool)
952 struct pool_gizmo *cur, *next;
954 for (cur = pool->gizmos; cur; cur = next)
963 check_gizmo (struct pool *p, struct pool_gizmo *g)
965 assert (g->pool == p);
966 assert (g->next == NULL || g->next->prev == g);
967 assert ((g->prev != NULL && g->prev->next == g)
968 || (g->prev == NULL && p->gizmos == g));