1 /* PSPP - computes sample statistics.
2 Copyright (C) 2000 Free Software Foundation, Inc.
4 This program is free software; you can redistribute it and/or
5 modify it under the terms of the GNU General Public License as
6 published by the Free Software Foundation; either version 2 of the
7 License, or (at your option) any later version.
9 This program is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include <libpspp/assertion.h>
28 /* Fast, low-overhead memory block suballocator. */
31 struct pool *parent; /* Pool of which this pool is a subpool. */
32 struct pool_block *blocks; /* Blocks owned by the pool. */
33 struct pool_gizmo *gizmos; /* Other stuff owned by the pool. */
39 struct pool_block *prev;
40 struct pool_block *next;
50 POOL_GIZMO_REGISTERED,
53 /* Pool routines can maintain objects (`gizmos') as well as doing
55 This structure is used to keep track of them. */
59 struct pool_gizmo *prev;
60 struct pool_gizmo *next;
62 long serial; /* Serial number. */
63 int type; /* Type of this gizmo. */
65 /* Type-dependent info. */
68 FILE *file; /* POOL_GIZMO_FILE. */
69 struct pool *subpool; /* POOL_GIZMO_SUBPOOL. */
71 /* POOL_GIZMO_REGISTERED. */
74 void (*free) (void *p);
82 /* Rounds X up to the next multiple of Y. */
84 #define ROUND_UP(X, Y) \
85 (((X) + ((Y) - 1)) / (Y) * (Y))
88 /* Types that provide typically useful alignment sizes. */
97 /* This should be the alignment size used by malloc(). The size of
98 the union above is correct, if not optimal, in all known cases. */
99 #define ALIGN_SIZE sizeof (union align)
101 /* DISCRETE_BLOCKS may be declared as nonzero to prevent
102 suballocation of blocks. This is useful under memory
103 debuggers like Checker or valgrind because it allows the
104 source location of bugs to be more accurately pinpointed.
106 On the other hand, if we're testing the library, then we want to
107 test the library's real functionality, not its crippled, slow,
108 simplified functionality. */
109 /*#define DISCRETE_BLOCKS 1*/
111 /* Size of each block allocated in the pool, in bytes.
112 Should be at least 1k. */
114 #define BLOCK_SIZE 1024
118 /* Sizes of some structures with alignment padding included. */
119 #define POOL_BLOCK_SIZE ROUND_UP (sizeof (struct pool_block), ALIGN_SIZE)
120 #define POOL_GIZMO_SIZE ROUND_UP (sizeof (struct pool_gizmo), ALIGN_SIZE)
121 #define POOL_SIZE ROUND_UP (sizeof (struct pool), ALIGN_SIZE)
123 /* Serial number used to keep track of gizmos for mark/release. */
124 static long serial = 0;
127 static void add_gizmo (struct pool *, struct pool_gizmo *);
128 static void free_gizmo (struct pool_gizmo *);
129 static void free_all_gizmos (struct pool *pool);
130 static void delete_gizmo (struct pool *, struct pool_gizmo *);
131 static void check_gizmo (struct pool *, struct pool_gizmo *);
133 /* General routines. */
135 /* Creates and returns a new memory pool, which allows malloc()'d
136 blocks to be suballocated in a time- and space-efficient manner.
137 The entire contents of the memory pool are freed at once.
139 In addition, other objects can be associated with a memory pool.
140 These are released when the pool is destroyed. */
144 struct pool_block *block;
147 block = xmalloc (BLOCK_SIZE);
148 block->prev = block->next = block;
149 block->ofs = POOL_BLOCK_SIZE + POOL_SIZE;
151 pool = (struct pool *) (((char *) block) + POOL_BLOCK_SIZE);
153 pool->blocks = block;
159 /* Creates a pool, allocates a block STRUCT_SIZE bytes in
160 length from it, stores the pool's address at offset
161 POOL_MEMBER_OFFSET within the block, and returns the allocated
164 Meant for use indirectly via pool_create_container(). */
166 pool_create_at_offset (size_t struct_size, size_t pool_member_offset)
171 assert (struct_size >= sizeof pool);
172 assert (pool_member_offset <= struct_size - sizeof pool);
174 pool = pool_create ();
175 struct_ = pool_alloc (pool, struct_size);
176 *(struct pool **) (struct_ + pool_member_offset) = pool;
180 /* Destroy the specified pool, including all subpools. */
182 pool_destroy (struct pool *pool)
187 /* Remove this pool from its parent's list of gizmos. */
189 delete_gizmo (pool->parent, (void *) (((char *) pool) + POOL_SIZE));
191 free_all_gizmos (pool);
193 /* Free all the memory. */
195 struct pool_block *cur, *next;
197 pool->blocks->prev->next = NULL;
198 for (cur = pool->blocks; cur; cur = next)
206 /* Release all the memory and gizmos in POOL.
207 Blocks are not given back with free() but kept for later
208 allocations. To give back memory, use a subpool instead. */
210 pool_clear (struct pool *pool)
212 free_all_gizmos (pool);
214 /* Zero out block sizes. */
216 struct pool_block *cur;
221 cur->ofs = POOL_BLOCK_SIZE;
222 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
224 cur->ofs += POOL_SIZE;
225 if (pool->parent != NULL)
226 cur->ofs += POOL_GIZMO_SIZE;
230 while (cur != pool->blocks);
234 /* Suballocation routines. */
236 /* Allocates a memory region AMT bytes in size from POOL and returns a
237 pointer to the region's start.
238 The region is properly aligned for storing any object. */
240 pool_alloc (struct pool *pool, size_t amt)
242 assert (pool != NULL);
247 #ifndef DISCRETE_BLOCKS
248 if (amt <= MAX_SUBALLOC)
250 /* If there is space in this block, take it. */
251 struct pool_block *b = pool->blocks;
252 b->ofs = ROUND_UP (b->ofs, ALIGN_SIZE);
253 if (b->ofs + amt <= BLOCK_SIZE)
255 void *const p = ((char *) b) + b->ofs;
260 /* No space in this block, so we must make other
262 if (b->next->ofs == 0)
264 /* The next block is empty. Use it. */
266 b->ofs = POOL_BLOCK_SIZE;
267 if ((char *) b + POOL_BLOCK_SIZE == (char *) pool)
272 /* Create a new block at the start of the list. */
273 b = xmalloc (BLOCK_SIZE);
274 b->next = pool->blocks;
275 b->prev = pool->blocks->prev;
276 b->ofs = POOL_BLOCK_SIZE;
277 pool->blocks->prev->next = b;
278 pool->blocks->prev = b;
282 /* Allocate space from B. */
284 return ((char *) b) + b->ofs - amt;
288 return pool_malloc (pool, amt);
291 /* Allocates a memory region AMT bytes in size from POOL and
292 returns a pointer to the region's start. The region is not
293 necessarily aligned, so it is most suitable for storing
296 pool_alloc_unaligned (struct pool *pool, size_t amt)
298 assert (pool != NULL);
300 #ifndef DISCRETE_BLOCKS
301 /* Strings need not be aligned on any boundary, but some
302 operations may be more efficient when they are. However,
303 that's only going to help with reasonably long strings. */
304 if (amt < ALIGN_SIZE)
310 struct pool_block *const b = pool->blocks;
312 if (b->ofs + amt <= BLOCK_SIZE)
314 void *p = ((char *) b) + b->ofs;
322 return pool_alloc (pool, amt);
325 /* Allocates a memory region N * S bytes in size from POOL and
326 returns a pointer to the region's start.
327 N must be nonnegative, S must be positive.
328 Terminates the program if the memory cannot be obtained,
329 including the case where N * S overflows the range of size_t. */
331 pool_nalloc (struct pool *pool, size_t n, size_t s)
333 if (xalloc_oversized (n, s))
335 return pool_alloc (pool, n * s);
338 /* Allocates SIZE bytes in POOL, copies BUFFER into it, and
339 returns the new copy. */
341 pool_clone (struct pool *pool, const void *buffer, size_t size)
343 void *block = pool_alloc (pool, size);
344 memcpy (block, buffer, size);
348 /* Allocates SIZE bytes of unaligned data in POOL, copies BUFFER
349 into it, and returns the new copy. */
351 pool_clone_unaligned (struct pool *pool, const void *buffer, size_t size)
353 void *block = pool_alloc_unaligned (pool, size);
354 memcpy (block, buffer, size);
358 /* Duplicates null-terminated STRING, within POOL, and returns a
359 pointer to the duplicate. For use only with strings, because
360 the returned pointere may not be aligned properly for other
363 pool_strdup (struct pool *pool, const char *string)
365 return pool_clone_unaligned (pool, string, strlen (string) + 1);
368 /* Formats FORMAT with the given ARGS in memory allocated from
369 POOL and returns the formatted string. */
371 pool_vasprintf (struct pool *pool, const char *format, va_list args_)
373 struct pool_block *b;
378 va_copy (args, args_);
380 avail = BLOCK_SIZE - b->ofs;
381 s = ((char *) b) + b->ofs;
382 needed = vsnprintf (s, avail, format, args);
389 /* Success. Reserve the space that was actually used. */
390 b->ofs += needed + 1;
394 /* Failure, but now we know how much space is needed.
395 Allocate that much and reformat. */
396 s = pool_alloc (pool, needed + 1);
398 va_copy (args, args_);
399 vsprintf (s, format, args);
405 /* Some old libc's returned -1 when the destination string
406 was too short. This should be uncommon these days and
407 it's a rare case anyhow. Use the easiest solution: punt
408 to dynamic allocation. */
409 va_copy (args, args_);
410 s = xvasprintf (format, args);
413 pool_register (pool, free, s);
419 /* Formats FORMAT in memory allocated from POOL
420 and returns the formatted string. */
422 pool_asprintf (struct pool *pool, const char *format, ...)
427 va_start (args, format);
428 string = pool_vasprintf (pool, format, args);
434 /* Standard allocation routines. */
436 /* Allocates AMT bytes using malloc(), to be managed by POOL, and
437 returns a pointer to the beginning of the block.
438 If POOL is a null pointer, then allocates a normal memory block
441 pool_malloc (struct pool *pool, size_t amt)
447 struct pool_gizmo *g = xmalloc (amt + POOL_GIZMO_SIZE);
448 g->type = POOL_GIZMO_MALLOC;
451 return ((char *) g) + POOL_GIZMO_SIZE;
457 return xmalloc (amt);
460 /* Allocates and returns N elements of S bytes each, to be
462 If POOL is a null pointer, then allocates a normal memory block
464 N must be nonnegative, S must be positive.
465 Terminates the program if the memory cannot be obtained,
466 including the case where N * S overflows the range of size_t. */
468 pool_nmalloc (struct pool *pool, size_t n, size_t s)
470 if (xalloc_oversized (n, s))
472 return pool_malloc (pool, n * s);
475 /* Changes the allocation size of the specified memory block P managed
476 by POOL to AMT bytes and returns a pointer to the beginning of the
478 If POOL is a null pointer, then the block is reallocated in the
479 usual way with realloc(). */
481 pool_realloc (struct pool *pool, void *p, size_t amt)
489 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
490 check_gizmo (pool, g);
492 g = xrealloc (g, amt + POOL_GIZMO_SIZE);
499 check_gizmo (pool, g);
501 return ((char *) g) + POOL_GIZMO_SIZE;
510 return pool_malloc (pool, amt);
513 return xrealloc (p, amt);
516 /* Changes the allocation size of the specified memory block P
517 managed by POOL to N * S bytes and returns a pointer to the
518 beginning of the block.
519 N must be nonnegative, S must be positive.
520 If POOL is a null pointer, then the block is reallocated in
521 the usual way with xrealloc().
522 Terminates the program if the memory cannot be obtained,
523 including the case where N * S overflows the range of size_t. */
525 pool_nrealloc (struct pool *pool, void *p, size_t n, size_t s)
527 if (xalloc_oversized (n, s))
529 return pool_realloc (pool, p, n * s);
532 /* If P is null, allocate a block of at least *PN such objects;
533 otherwise, reallocate P so that it contains more than *PN
534 objects each of S bytes. *PN must be nonzero unless P is
535 null, and S must be nonzero. Set *PN to the new number of
536 objects, and return the pointer to the new block. *PN is
537 never set to zero, and the returned pointer is never null.
539 The block returned is managed by POOL. If POOL is a null
540 pointer, then the block is reallocated in the usual way with
543 Terminates the program if the memory cannot be obtained,
544 including the case where the memory required overflows the
547 Repeated reallocations are guaranteed to make progress, either by
548 allocating an initial block with a nonzero size, or by allocating a
551 In the following implementation, nonzero sizes are doubled so that
552 repeated reallocations have O(N log N) overall cost rather than
553 O(N**2) cost, but the specification for this function does not
554 guarantee that sizes are doubled.
556 Here is an example of use:
561 size_t allocated = 0;
564 append_int (int value)
566 if (used == allocated)
567 p = pool_2nrealloc (pool, p, &allocated, sizeof *p);
571 This causes x2nrealloc to allocate a block of some nonzero size the
572 first time it is called.
574 To have finer-grained control over the initial size, set *PN to a
575 nonzero value before calling this function with P == NULL. For
581 size_t allocated = 0;
582 size_t allocated1 = 1000;
585 append_int (int value)
587 if (used == allocated)
589 p = pool_2nrealloc (pool, p, &allocated1, sizeof *p);
590 allocated = allocated1;
595 This function implementation is from gnulib. */
597 pool_2nrealloc (struct pool *pool, void *p, size_t *pn, size_t s)
605 /* The approximate size to use for initial small allocation
606 requests, when the invoking code specifies an old size of
607 zero. 64 bytes is the largest "small" request for the
608 GNU C library malloc. */
609 enum { DEFAULT_MXFAST = 64 };
611 n = DEFAULT_MXFAST / s;
617 if (SIZE_MAX / 2 / s < n)
623 return pool_realloc (pool, p, n * s);
626 /* Frees block P managed by POOL.
627 If POOL is a null pointer, then the block is freed as usual with
630 pool_free (struct pool *pool, void *p)
632 if (pool != NULL && p != NULL)
634 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
635 check_gizmo (pool, g);
636 delete_gizmo (pool, g);
643 /* Gizmo allocations. */
645 /* Creates and returns a pool as a subpool of POOL.
646 The subpool will be destroyed automatically when POOL is destroyed.
647 It may also be destroyed explicitly in advance. */
649 pool_create_subpool (struct pool *pool)
651 struct pool *subpool;
652 struct pool_gizmo *g;
654 assert (pool != NULL);
655 subpool = pool_create ();
656 subpool->parent = pool;
658 g = (void *) (((char *) subpool->blocks) + subpool->blocks->ofs);
659 subpool->blocks->ofs += POOL_GIZMO_SIZE;
661 g->type = POOL_GIZMO_SUBPOOL;
662 g->p.subpool = subpool;
669 /* Makes SUBPOOL a subpool of POOL.
670 SUBPOOL must not already have a parent pool.
671 The subpool will be destroyed automatically when POOL is destroyed.
672 It may also be destroyed explicitly in advance. */
674 pool_add_subpool (struct pool *pool, struct pool *subpool)
676 struct pool_gizmo *g;
678 assert (pool != NULL);
679 assert (subpool != NULL);
680 assert (subpool->parent == NULL);
682 g = pool_alloc (subpool, sizeof *g);
683 g->type = POOL_GIZMO_SUBPOOL;
684 g->p.subpool = subpool;
687 subpool->parent = pool;
690 /* Opens file FILE_NAME with mode MODE and returns a handle to it
691 if successful or a null pointer if not.
692 The file will be closed automatically when POOL is destroyed, or it
693 may be closed explicitly in advance using pool_fclose(), or
694 detached from the pool with pool_detach_file(). */
696 pool_fopen (struct pool *pool, const char *file_name, const char *mode)
700 assert (pool && file_name && mode);
701 f = fopen (file_name, mode);
703 pool_attach_file (pool, f);
708 /* Closes file FILE managed by POOL.
709 Returns 0 if successful, EOF if an I/O error occurred. */
711 pool_fclose (struct pool *pool, FILE *file)
713 assert (pool && file);
714 pool_detach_file (pool, file);
715 return fclose (file);
718 /* Creates a temporary file with tmpfile() and returns a handle to it
719 if successful or a null pointer if not.
720 The file will be closed automatically when POOL is destroyed, or it
721 may be closed explicitly in advance using pool_fclose(), or
722 detached from the pool with pool_detach_file(). */
724 pool_tmpfile (struct pool *pool)
726 FILE *file = tmpfile ();
728 pool_attach_file (pool, file);
732 /* Attaches FILE to POOL.
733 The file will be closed automatically when POOL is destroyed, or it
734 may be closed explicitly in advance using pool_fclose(), or
735 detached from the pool with pool_detach_file(). */
737 pool_attach_file (struct pool *pool, FILE *file)
739 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
740 g->type = POOL_GIZMO_FILE;
745 /* Detaches FILE from POOL. */
747 pool_detach_file (struct pool *pool, FILE *file)
749 struct pool_gizmo *g;
751 for (g = pool->gizmos; g; g = g->next)
752 if (g->type == POOL_GIZMO_FILE && g->p.file == file)
754 delete_gizmo (pool, g);
759 /* Registers FREE to be called with argument P.
760 P should be unique among those registered in POOL so that it can be
761 uniquely identified by pool_unregister().
762 If not unregistered, FREE will be called with argument P when POOL
765 pool_register (struct pool *pool, void (*free) (void *), void *p)
767 assert (pool && free && p);
770 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
771 g->type = POOL_GIZMO_REGISTERED;
772 g->p.registered.free = free;
773 g->p.registered.p = p;
778 /* Unregisters previously registered P from POOL.
779 Returns true only if P was found to be registered in POOL. */
781 pool_unregister (struct pool *pool, void *p)
786 struct pool_gizmo *g;
788 for (g = pool->gizmos; g; g = g->next)
789 if (g->type == POOL_GIZMO_REGISTERED && g->p.registered.p == p)
791 delete_gizmo (pool, g);
799 /* Partial freeing. */
801 /* Notes the state of POOL into MARK so that it may be restored
802 by a call to pool_release(). */
804 pool_mark (struct pool *pool, struct pool_mark *mark)
806 assert (pool && mark);
808 mark->block = pool->blocks;
809 mark->ofs = pool->blocks->ofs;
811 mark->serial = serial;
814 /* Restores to POOL the state recorded in MARK.
815 Emptied blocks are not given back with free() but kept for
816 later allocations. To get that behavior, use a subpool
819 pool_release (struct pool *pool, const struct pool_mark *mark)
821 assert (pool && mark);
824 struct pool_gizmo *cur, *next;
826 for (cur = pool->gizmos; cur && cur->serial >= mark->serial; cur = next)
842 struct pool_block *cur;
844 for (cur = pool->blocks; cur != mark->block; cur = cur->next)
846 cur->ofs = POOL_BLOCK_SIZE;
847 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
849 cur->ofs += POOL_SIZE;
850 if (pool->parent != NULL)
851 cur->ofs += POOL_GIZMO_SIZE;
854 pool->blocks = mark->block;
855 pool->blocks->ofs = mark->ofs;
859 /* Private functions. */
861 /* Adds GIZMO at the beginning of POOL's gizmo list. */
863 add_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
865 assert (pool && gizmo);
868 gizmo->next = pool->gizmos;
871 pool->gizmos->prev = gizmo;
872 pool->gizmos = gizmo;
874 gizmo->serial = serial++;
876 check_gizmo (pool, gizmo);
879 /* Removes GIZMO from POOL's gizmo list. */
881 delete_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
883 assert (pool && gizmo);
885 check_gizmo (pool, gizmo);
888 gizmo->prev->next = gizmo->next;
890 pool->gizmos = gizmo->next;
892 gizmo->next->prev = gizmo->prev;
895 /* Frees any of GIZMO's internal state.
896 GIZMO's data must not be referenced after calling this function. */
898 free_gizmo (struct pool_gizmo *gizmo)
900 assert (gizmo != NULL);
904 case POOL_GIZMO_MALLOC:
907 case POOL_GIZMO_FILE:
908 fclose (gizmo->p.file); /* Ignore errors. */
910 case POOL_GIZMO_SUBPOOL:
911 gizmo->p.subpool->parent = NULL;
912 pool_destroy (gizmo->p.subpool);
914 case POOL_GIZMO_REGISTERED:
915 gizmo->p.registered.free (gizmo->p.registered.p);
922 /* Free all the gizmos in POOL. */
924 free_all_gizmos (struct pool *pool)
926 struct pool_gizmo *cur, *next;
928 for (cur = pool->gizmos; cur; cur = next)
937 check_gizmo (struct pool *p, struct pool_gizmo *g)
939 assert (g->pool == p);
940 assert (g->next == NULL || g->next->prev == g);
941 assert ((g->prev != NULL && g->prev->next == g)
942 || (g->prev == NULL && p->gizmos == g));