1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2000 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include <libpspp/assertion.h>
27 /* Fast, low-overhead memory block suballocator. */
30 struct pool *parent; /* Pool of which this pool is a subpool. */
31 struct pool_block *blocks; /* Blocks owned by the pool. */
32 struct pool_gizmo *gizmos; /* Other stuff owned by the pool. */
38 struct pool_block *prev;
39 struct pool_block *next;
49 POOL_GIZMO_REGISTERED,
52 /* Pool routines can maintain objects (`gizmos') as well as doing
54 This structure is used to keep track of them. */
58 struct pool_gizmo *prev;
59 struct pool_gizmo *next;
61 long serial; /* Serial number. */
62 int type; /* Type of this gizmo. */
64 /* Type-dependent info. */
67 FILE *file; /* POOL_GIZMO_FILE. */
68 struct pool *subpool; /* POOL_GIZMO_SUBPOOL. */
70 /* POOL_GIZMO_REGISTERED. */
73 void (*free) (void *p);
81 /* Rounds X up to the next multiple of Y. */
83 #define ROUND_UP(X, Y) \
84 (((X) + ((Y) - 1)) / (Y) * (Y))
87 /* Types that provide typically useful alignment sizes. */
96 /* This should be the alignment size used by malloc(). The size of
97 the union above is correct, if not optimal, in all known cases. */
98 #define ALIGN_SIZE sizeof (union align)
100 /* DISCRETE_BLOCKS may be declared as nonzero to prevent
101 suballocation of blocks. This is useful under memory
102 debuggers like valgrind because it allows the source location
103 of bugs to be more accurately pinpointed.
105 On the other hand, if we're testing the library, then we want to
106 test the library's real functionality, not its crippled, slow,
107 simplified functionality. */
108 /*#define DISCRETE_BLOCKS 1*/
110 /* Size of each block allocated in the pool, in bytes.
111 Should be at least 1k. */
113 #define BLOCK_SIZE 1024
117 /* Sizes of some structures with alignment padding included. */
118 #define POOL_BLOCK_SIZE ROUND_UP (sizeof (struct pool_block), ALIGN_SIZE)
119 #define POOL_GIZMO_SIZE ROUND_UP (sizeof (struct pool_gizmo), ALIGN_SIZE)
120 #define POOL_SIZE ROUND_UP (sizeof (struct pool), ALIGN_SIZE)
122 /* Serial number used to keep track of gizmos for mark/release. */
123 static long serial = 0;
126 static void add_gizmo (struct pool *, struct pool_gizmo *);
127 static void free_gizmo (struct pool_gizmo *);
128 static void free_all_gizmos (struct pool *pool);
129 static void delete_gizmo (struct pool *, struct pool_gizmo *);
130 static void check_gizmo (struct pool *, struct pool_gizmo *);
132 /* General routines. */
134 /* Creates and returns a new memory pool, which allows malloc()'d
135 blocks to be suballocated in a time- and space-efficient manner.
136 The entire contents of the memory pool are freed at once.
138 In addition, other objects can be associated with a memory pool.
139 These are released when the pool is destroyed. */
143 struct pool_block *block;
146 block = xmalloc (BLOCK_SIZE);
147 block->prev = block->next = block;
148 block->ofs = POOL_BLOCK_SIZE + POOL_SIZE;
150 pool = (struct pool *) (((char *) block) + POOL_BLOCK_SIZE);
152 pool->blocks = block;
158 /* Creates a pool, allocates a block STRUCT_SIZE bytes in
159 length from it, stores the pool's address at offset
160 POOL_MEMBER_OFFSET within the block, and returns the allocated
163 Meant for use indirectly via pool_create_container(). */
165 pool_create_at_offset (size_t struct_size, size_t pool_member_offset)
170 assert (struct_size >= sizeof pool);
171 assert (pool_member_offset <= struct_size - sizeof pool);
173 pool = pool_create ();
174 struct_ = pool_alloc (pool, struct_size);
175 *(struct pool **) (struct_ + pool_member_offset) = pool;
179 /* Destroy the specified pool, including all subpools. */
181 pool_destroy (struct pool *pool)
186 /* Remove this pool from its parent's list of gizmos. */
188 delete_gizmo (pool->parent, (void *) (((char *) pool) + POOL_SIZE));
190 free_all_gizmos (pool);
192 /* Free all the memory. */
194 struct pool_block *cur, *next;
196 pool->blocks->prev->next = NULL;
197 for (cur = pool->blocks; cur; cur = next)
205 /* Release all the memory and gizmos in POOL.
206 Blocks are not given back with free() but kept for later
207 allocations. To give back memory, use a subpool instead. */
209 pool_clear (struct pool *pool)
211 free_all_gizmos (pool);
213 /* Zero out block sizes. */
215 struct pool_block *cur;
220 cur->ofs = POOL_BLOCK_SIZE;
221 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
223 cur->ofs += POOL_SIZE;
224 if (pool->parent != NULL)
225 cur->ofs += POOL_GIZMO_SIZE;
229 while (cur != pool->blocks);
233 /* Suballocation routines. */
235 /* Allocates a memory region AMT bytes in size from POOL and returns a
236 pointer to the region's start.
237 The region is properly aligned for storing any object. */
239 pool_alloc (struct pool *pool, size_t amt)
241 assert (pool != NULL);
246 #ifndef DISCRETE_BLOCKS
247 if (amt <= MAX_SUBALLOC)
249 /* If there is space in this block, take it. */
250 struct pool_block *b = pool->blocks;
251 b->ofs = ROUND_UP (b->ofs, ALIGN_SIZE);
252 if (b->ofs + amt <= BLOCK_SIZE)
254 void *const p = ((char *) b) + b->ofs;
259 /* No space in this block, so we must make other
261 if (b->next->ofs == 0)
263 /* The next block is empty. Use it. */
265 b->ofs = POOL_BLOCK_SIZE;
266 if ((char *) b + POOL_BLOCK_SIZE == (char *) pool)
271 /* Create a new block at the start of the list. */
272 b = xmalloc (BLOCK_SIZE);
273 b->next = pool->blocks;
274 b->prev = pool->blocks->prev;
275 b->ofs = POOL_BLOCK_SIZE;
276 pool->blocks->prev->next = b;
277 pool->blocks->prev = b;
281 /* Allocate space from B. */
283 return ((char *) b) + b->ofs - amt;
287 return pool_malloc (pool, amt);
290 /* Allocates a memory region AMT bytes in size from POOL and
291 returns a pointer to the region's start. The region is not
292 necessarily aligned, so it is most suitable for storing
295 pool_alloc_unaligned (struct pool *pool, size_t amt)
297 assert (pool != NULL);
299 #ifndef DISCRETE_BLOCKS
300 /* Strings need not be aligned on any boundary, but some
301 operations may be more efficient when they are. However,
302 that's only going to help with reasonably long strings. */
303 if (amt < ALIGN_SIZE)
309 struct pool_block *const b = pool->blocks;
311 if (b->ofs + amt <= BLOCK_SIZE)
313 void *p = ((char *) b) + b->ofs;
321 return pool_alloc (pool, amt);
324 /* Allocates a memory region N * S bytes in size from POOL and
325 returns a pointer to the region's start.
326 N must be nonnegative, S must be positive.
327 Terminates the program if the memory cannot be obtained,
328 including the case where N * S overflows the range of size_t. */
330 pool_nalloc (struct pool *pool, size_t n, size_t s)
332 if (xalloc_oversized (n, s))
334 return pool_alloc (pool, n * s);
337 /* Allocates SIZE bytes in POOL, copies BUFFER into it, and
338 returns the new copy. */
340 pool_clone (struct pool *pool, const void *buffer, size_t size)
342 void *block = pool_alloc (pool, size);
343 memcpy (block, buffer, size);
347 /* Allocates SIZE bytes of unaligned data in POOL, copies BUFFER
348 into it, and returns the new copy. */
350 pool_clone_unaligned (struct pool *pool, const void *buffer, size_t size)
352 void *block = pool_alloc_unaligned (pool, size);
353 memcpy (block, buffer, size);
357 /* Duplicates null-terminated STRING, within POOL, and returns a
358 pointer to the duplicate. For use only with strings, because
359 the returned pointere may not be aligned properly for other
362 pool_strdup (struct pool *pool, const char *string)
364 return pool_clone_unaligned (pool, string, strlen (string) + 1);
367 /* Formats FORMAT with the given ARGS in memory allocated from
368 POOL and returns the formatted string. */
370 pool_vasprintf (struct pool *pool, const char *format, va_list args_)
372 struct pool_block *b;
377 va_copy (args, args_);
379 avail = BLOCK_SIZE - b->ofs;
380 s = ((char *) b) + b->ofs;
381 needed = vsnprintf (s, avail, format, args);
388 /* Success. Reserve the space that was actually used. */
389 b->ofs += needed + 1;
393 /* Failure, but now we know how much space is needed.
394 Allocate that much and reformat. */
395 s = pool_alloc (pool, needed + 1);
397 va_copy (args, args_);
398 vsprintf (s, format, args);
404 /* Some old libc's returned -1 when the destination string
405 was too short. This should be uncommon these days and
406 it's a rare case anyhow. Use the easiest solution: punt
407 to dynamic allocation. */
408 va_copy (args, args_);
409 s = xvasprintf (format, args);
412 pool_register (pool, free, s);
418 /* Formats FORMAT in memory allocated from POOL
419 and returns the formatted string. */
421 pool_asprintf (struct pool *pool, const char *format, ...)
426 va_start (args, format);
427 string = pool_vasprintf (pool, format, args);
433 /* Standard allocation routines. */
435 /* Allocates AMT bytes using malloc(), to be managed by POOL, and
436 returns a pointer to the beginning of the block.
437 If POOL is a null pointer, then allocates a normal memory block
440 pool_malloc (struct pool *pool, size_t amt)
446 struct pool_gizmo *g = xmalloc (amt + POOL_GIZMO_SIZE);
447 g->type = POOL_GIZMO_MALLOC;
450 return ((char *) g) + POOL_GIZMO_SIZE;
456 return xmalloc (amt);
459 /* Allocates and returns N elements of S bytes each, to be
461 If POOL is a null pointer, then allocates a normal memory block
463 N must be nonnegative, S must be positive.
464 Terminates the program if the memory cannot be obtained,
465 including the case where N * S overflows the range of size_t. */
467 pool_nmalloc (struct pool *pool, size_t n, size_t s)
469 if (xalloc_oversized (n, s))
471 return pool_malloc (pool, n * s);
474 /* Allocates AMT bytes using malloc(), to be managed by POOL,
475 zeros the block, and returns a pointer to the beginning of the
477 If POOL is a null pointer, then allocates a normal memory block
480 pool_zalloc (struct pool *pool, size_t amt)
482 void *p = pool_malloc (pool, amt);
487 /* Allocates and returns N elements of S bytes each, to be
488 managed by POOL, and zeros the block.
489 If POOL is a null pointer, then allocates a normal memory block
491 N must be nonnegative, S must be positive.
492 Terminates the program if the memory cannot be obtained,
493 including the case where N * S overflows the range of size_t. */
495 pool_calloc (struct pool *pool, size_t n, size_t s)
497 void *p = pool_nmalloc (pool, n, s);
498 memset (p, 0, n * s);
502 /* Changes the allocation size of the specified memory block P managed
503 by POOL to AMT bytes and returns a pointer to the beginning of the
505 If POOL is a null pointer, then the block is reallocated in the
506 usual way with realloc(). */
508 pool_realloc (struct pool *pool, void *p, size_t amt)
516 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
517 check_gizmo (pool, g);
519 g = xrealloc (g, amt + POOL_GIZMO_SIZE);
526 check_gizmo (pool, g);
528 return ((char *) g) + POOL_GIZMO_SIZE;
537 return pool_malloc (pool, amt);
540 return xrealloc (p, amt);
543 /* Changes the allocation size of the specified memory block P
544 managed by POOL to N * S bytes and returns a pointer to the
545 beginning of the block.
546 N must be nonnegative, S must be positive.
547 If POOL is a null pointer, then the block is reallocated in
548 the usual way with xrealloc().
549 Terminates the program if the memory cannot be obtained,
550 including the case where N * S overflows the range of size_t. */
552 pool_nrealloc (struct pool *pool, void *p, size_t n, size_t s)
554 if (xalloc_oversized (n, s))
556 return pool_realloc (pool, p, n * s);
559 /* If P is null, allocate a block of at least *PN such objects;
560 otherwise, reallocate P so that it contains more than *PN
561 objects each of S bytes. *PN must be nonzero unless P is
562 null, and S must be nonzero. Set *PN to the new number of
563 objects, and return the pointer to the new block. *PN is
564 never set to zero, and the returned pointer is never null.
566 The block returned is managed by POOL. If POOL is a null
567 pointer, then the block is reallocated in the usual way with
570 Terminates the program if the memory cannot be obtained,
571 including the case where the memory required overflows the
574 Repeated reallocations are guaranteed to make progress, either by
575 allocating an initial block with a nonzero size, or by allocating a
578 In the following implementation, nonzero sizes are doubled so that
579 repeated reallocations have O(N log N) overall cost rather than
580 O(N**2) cost, but the specification for this function does not
581 guarantee that sizes are doubled.
583 Here is an example of use:
588 size_t allocated = 0;
591 append_int (int value)
593 if (used == allocated)
594 p = pool_2nrealloc (pool, p, &allocated, sizeof *p);
598 This causes x2nrealloc to allocate a block of some nonzero size the
599 first time it is called.
601 To have finer-grained control over the initial size, set *PN to a
602 nonzero value before calling this function with P == NULL. For
608 size_t allocated = 0;
609 size_t allocated1 = 1000;
612 append_int (int value)
614 if (used == allocated)
616 p = pool_2nrealloc (pool, p, &allocated1, sizeof *p);
617 allocated = allocated1;
622 This function implementation is from gnulib. */
624 pool_2nrealloc (struct pool *pool, void *p, size_t *pn, size_t s)
632 /* The approximate size to use for initial small allocation
633 requests, when the invoking code specifies an old size of
634 zero. 64 bytes is the largest "small" request for the
635 GNU C library malloc. */
636 enum { DEFAULT_MXFAST = 64 };
638 n = DEFAULT_MXFAST / s;
644 if (SIZE_MAX / 2 / s < n)
650 return pool_realloc (pool, p, n * s);
653 /* Frees block P managed by POOL.
654 If POOL is a null pointer, then the block is freed as usual with
657 pool_free (struct pool *pool, void *p)
659 if (pool != NULL && p != NULL)
661 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
662 check_gizmo (pool, g);
663 delete_gizmo (pool, g);
670 /* Gizmo allocations. */
672 /* Creates and returns a pool as a subpool of POOL.
673 The subpool will be destroyed automatically when POOL is destroyed.
674 It may also be destroyed explicitly in advance. */
676 pool_create_subpool (struct pool *pool)
678 struct pool *subpool;
679 struct pool_gizmo *g;
681 assert (pool != NULL);
682 subpool = pool_create ();
683 subpool->parent = pool;
685 g = (void *) (((char *) subpool->blocks) + subpool->blocks->ofs);
686 subpool->blocks->ofs += POOL_GIZMO_SIZE;
688 g->type = POOL_GIZMO_SUBPOOL;
689 g->p.subpool = subpool;
696 /* Makes SUBPOOL a subpool of POOL.
697 SUBPOOL must not already have a parent pool.
698 The subpool will be destroyed automatically when POOL is destroyed.
699 It may also be destroyed explicitly in advance. */
701 pool_add_subpool (struct pool *pool, struct pool *subpool)
703 struct pool_gizmo *g;
705 assert (pool != NULL);
706 assert (subpool != NULL);
707 assert (subpool->parent == NULL);
709 g = pool_alloc (subpool, sizeof *g);
710 g->type = POOL_GIZMO_SUBPOOL;
711 g->p.subpool = subpool;
714 subpool->parent = pool;
717 /* Opens file FILE_NAME with mode MODE and returns a handle to it
718 if successful or a null pointer if not.
719 The file will be closed automatically when POOL is destroyed, or it
720 may be closed explicitly in advance using pool_fclose(), or
721 detached from the pool with pool_detach_file(). */
723 pool_fopen (struct pool *pool, const char *file_name, const char *mode)
727 assert (pool && file_name && mode);
728 f = fopen (file_name, mode);
730 pool_attach_file (pool, f);
735 /* Closes file FILE managed by POOL.
736 Returns 0 if successful, EOF if an I/O error occurred. */
738 pool_fclose (struct pool *pool, FILE *file)
740 assert (pool && file);
741 pool_detach_file (pool, file);
742 return fclose (file);
745 /* Creates a temporary file with tmpfile() and returns a handle to it
746 if successful or a null pointer if not.
747 The file will be closed automatically when POOL is destroyed, or it
748 may be closed explicitly in advance using pool_fclose(), or
749 detached from the pool with pool_detach_file(). */
751 pool_tmpfile (struct pool *pool)
753 FILE *file = tmpfile ();
755 pool_attach_file (pool, file);
759 /* Attaches FILE to POOL.
760 The file will be closed automatically when POOL is destroyed, or it
761 may be closed explicitly in advance using pool_fclose(), or
762 detached from the pool with pool_detach_file(). */
764 pool_attach_file (struct pool *pool, FILE *file)
766 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
767 g->type = POOL_GIZMO_FILE;
772 /* Detaches FILE from POOL. */
774 pool_detach_file (struct pool *pool, FILE *file)
776 struct pool_gizmo *g;
778 for (g = pool->gizmos; g; g = g->next)
779 if (g->type == POOL_GIZMO_FILE && g->p.file == file)
781 delete_gizmo (pool, g);
786 /* Registers FREE to be called with argument P.
787 P should be unique among those registered in POOL so that it can be
788 uniquely identified by pool_unregister().
789 If not unregistered, FREE will be called with argument P when POOL
792 pool_register (struct pool *pool, void (*free) (void *), void *p)
794 assert (pool && free && p);
797 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
798 g->type = POOL_GIZMO_REGISTERED;
799 g->p.registered.free = free;
800 g->p.registered.p = p;
805 /* Unregisters previously registered P from POOL.
806 Returns true only if P was found to be registered in POOL. */
808 pool_unregister (struct pool *pool, void *p)
813 struct pool_gizmo *g;
815 for (g = pool->gizmos; g; g = g->next)
816 if (g->type == POOL_GIZMO_REGISTERED && g->p.registered.p == p)
818 delete_gizmo (pool, g);
826 /* Partial freeing. */
828 /* Notes the state of POOL into MARK so that it may be restored
829 by a call to pool_release(). */
831 pool_mark (struct pool *pool, struct pool_mark *mark)
833 assert (pool && mark);
835 mark->block = pool->blocks;
836 mark->ofs = pool->blocks->ofs;
838 mark->serial = serial;
841 /* Restores to POOL the state recorded in MARK.
842 Emptied blocks are not given back with free() but kept for
843 later allocations. To get that behavior, use a subpool
846 pool_release (struct pool *pool, const struct pool_mark *mark)
848 assert (pool && mark);
851 struct pool_gizmo *cur, *next;
853 for (cur = pool->gizmos; cur && cur->serial >= mark->serial; cur = next)
869 struct pool_block *cur;
871 for (cur = pool->blocks; cur != mark->block; cur = cur->next)
873 cur->ofs = POOL_BLOCK_SIZE;
874 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
876 cur->ofs += POOL_SIZE;
877 if (pool->parent != NULL)
878 cur->ofs += POOL_GIZMO_SIZE;
881 pool->blocks = mark->block;
882 pool->blocks->ofs = mark->ofs;
886 /* Private functions. */
888 /* Adds GIZMO at the beginning of POOL's gizmo list. */
890 add_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
892 assert (pool && gizmo);
895 gizmo->next = pool->gizmos;
898 pool->gizmos->prev = gizmo;
899 pool->gizmos = gizmo;
901 gizmo->serial = serial++;
903 check_gizmo (pool, gizmo);
906 /* Removes GIZMO from POOL's gizmo list. */
908 delete_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
910 assert (pool && gizmo);
912 check_gizmo (pool, gizmo);
915 gizmo->prev->next = gizmo->next;
917 pool->gizmos = gizmo->next;
919 gizmo->next->prev = gizmo->prev;
922 /* Frees any of GIZMO's internal state.
923 GIZMO's data must not be referenced after calling this function. */
925 free_gizmo (struct pool_gizmo *gizmo)
927 assert (gizmo != NULL);
931 case POOL_GIZMO_MALLOC:
934 case POOL_GIZMO_FILE:
935 fclose (gizmo->p.file); /* Ignore errors. */
937 case POOL_GIZMO_SUBPOOL:
938 gizmo->p.subpool->parent = NULL;
939 pool_destroy (gizmo->p.subpool);
941 case POOL_GIZMO_REGISTERED:
942 gizmo->p.registered.free (gizmo->p.registered.p);
949 /* Free all the gizmos in POOL. */
951 free_all_gizmos (struct pool *pool)
953 struct pool_gizmo *cur, *next;
955 for (cur = pool->gizmos; cur; cur = next)
964 check_gizmo (struct pool *p, struct pool_gizmo *g)
966 assert (g->pool == p);
967 assert (g->next == NULL || g->next->prev == g);
968 assert ((g->prev != NULL && g->prev->next == g)
969 || (g->prev == NULL && p->gizmos == g));