1 /* PSPP - computes sample statistics.
2 Copyright (C) 2000 Free Software Foundation, Inc.
3 Written by Ben Pfaff <blp@gnu.org>.
5 This program is free software; you can redistribute it and/or
6 modify it under the terms of the GNU General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
10 This program is distributed in the hope that it will be useful, but
11 WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 #include <libpspp/assertion.h>
29 /* Fast, low-overhead memory block suballocator. */
32 struct pool *parent; /* Pool of which this pool is a subpool. */
33 struct pool_block *blocks; /* Blocks owned by the pool. */
34 struct pool_gizmo *gizmos; /* Other stuff owned by the pool. */
40 struct pool_block *prev;
41 struct pool_block *next;
51 POOL_GIZMO_REGISTERED,
54 /* Pool routines can maintain objects (`gizmos') as well as doing
56 This structure is used to keep track of them. */
60 struct pool_gizmo *prev;
61 struct pool_gizmo *next;
63 long serial; /* Serial number. */
64 int type; /* Type of this gizmo. */
66 /* Type-dependent info. */
69 FILE *file; /* POOL_GIZMO_FILE. */
70 struct pool *subpool; /* POOL_GIZMO_SUBPOOL. */
72 /* POOL_GIZMO_REGISTERED. */
75 void (*free) (void *p);
83 /* Rounds X up to the next multiple of Y. */
85 #define ROUND_UP(X, Y) \
86 (((X) + ((Y) - 1)) / (Y) * (Y))
89 /* Types that provide typically useful alignment sizes. */
98 /* This should be the alignment size used by malloc(). The size of
99 the union above is correct, if not optimal, in all known cases. */
100 #define ALIGN_SIZE sizeof (union align)
102 /* DISCRETE_BLOCKS may be declared as nonzero to prevent
103 suballocation of blocks. This is useful under memory
104 debuggers like Checker or valgrind because it allows the
105 source location of bugs to be more accurately pinpointed.
107 On the other hand, if we're testing the library, then we want to
108 test the library's real functionality, not its crippled, slow,
109 simplified functionality. */
110 /*#define DISCRETE_BLOCKS 1*/
112 /* Size of each block allocated in the pool, in bytes.
113 Should be at least 1k. */
115 #define BLOCK_SIZE 1024
119 /* Sizes of some structures with alignment padding included. */
120 #define POOL_BLOCK_SIZE ROUND_UP (sizeof (struct pool_block), ALIGN_SIZE)
121 #define POOL_GIZMO_SIZE ROUND_UP (sizeof (struct pool_gizmo), ALIGN_SIZE)
122 #define POOL_SIZE ROUND_UP (sizeof (struct pool), ALIGN_SIZE)
124 /* Serial number used to keep track of gizmos for mark/release. */
125 static long serial = 0;
128 static void add_gizmo (struct pool *, struct pool_gizmo *);
129 static void free_gizmo (struct pool_gizmo *);
130 static void free_all_gizmos (struct pool *pool);
131 static void delete_gizmo (struct pool *, struct pool_gizmo *);
132 static void check_gizmo (struct pool *, struct pool_gizmo *);
134 /* General routines. */
136 /* Creates and returns a new memory pool, which allows malloc()'d
137 blocks to be suballocated in a time- and space-efficient manner.
138 The entire contents of the memory pool are freed at once.
140 In addition, other objects can be associated with a memory pool.
141 These are released when the pool is destroyed. */
145 struct pool_block *block;
148 block = xmalloc (BLOCK_SIZE);
149 block->prev = block->next = block;
150 block->ofs = POOL_BLOCK_SIZE + POOL_SIZE;
152 pool = (struct pool *) (((char *) block) + POOL_BLOCK_SIZE);
154 pool->blocks = block;
160 /* Creates a pool, allocates a block STRUCT_SIZE bytes in
161 length from it, stores the pool's address at offset
162 POOL_MEMBER_OFFSET within the block, and returns the allocated
165 Meant for use indirectly via pool_create_container(). */
167 pool_create_at_offset (size_t struct_size, size_t pool_member_offset)
172 assert (struct_size >= sizeof pool);
173 assert (pool_member_offset <= struct_size - sizeof pool);
175 pool = pool_create ();
176 struct_ = pool_alloc (pool, struct_size);
177 *(struct pool **) (struct_ + pool_member_offset) = pool;
181 /* Destroy the specified pool, including all subpools. */
183 pool_destroy (struct pool *pool)
188 /* Remove this pool from its parent's list of gizmos. */
190 delete_gizmo (pool->parent, (void *) (((char *) pool) + POOL_SIZE));
192 free_all_gizmos (pool);
194 /* Free all the memory. */
196 struct pool_block *cur, *next;
198 pool->blocks->prev->next = NULL;
199 for (cur = pool->blocks; cur; cur = next)
207 /* Release all the memory and gizmos in POOL.
208 Blocks are not given back with free() but kept for later
209 allocations. To give back memory, use a subpool instead. */
211 pool_clear (struct pool *pool)
213 free_all_gizmos (pool);
215 /* Zero out block sizes. */
217 struct pool_block *cur;
222 cur->ofs = POOL_BLOCK_SIZE;
223 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
225 cur->ofs += POOL_SIZE;
226 if (pool->parent != NULL)
227 cur->ofs += POOL_GIZMO_SIZE;
231 while (cur != pool->blocks);
235 /* Suballocation routines. */
237 /* Allocates a memory region AMT bytes in size from POOL and returns a
238 pointer to the region's start.
239 The region is properly aligned for storing any object. */
241 pool_alloc (struct pool *pool, size_t amt)
243 assert (pool != NULL);
248 #ifndef DISCRETE_BLOCKS
249 if (amt <= MAX_SUBALLOC)
251 /* If there is space in this block, take it. */
252 struct pool_block *b = pool->blocks;
253 b->ofs = ROUND_UP (b->ofs, ALIGN_SIZE);
254 if (b->ofs + amt <= BLOCK_SIZE)
256 void *const p = ((char *) b) + b->ofs;
261 /* No space in this block, so we must make other
263 if (b->next->ofs == 0)
265 /* The next block is empty. Use it. */
267 b->ofs = POOL_BLOCK_SIZE;
268 if ((char *) b + POOL_BLOCK_SIZE == (char *) pool)
273 /* Create a new block at the start of the list. */
274 b = xmalloc (BLOCK_SIZE);
275 b->next = pool->blocks;
276 b->prev = pool->blocks->prev;
277 b->ofs = POOL_BLOCK_SIZE;
278 pool->blocks->prev->next = b;
279 pool->blocks->prev = b;
283 /* Allocate space from B. */
285 return ((char *) b) + b->ofs - amt;
289 return pool_malloc (pool, amt);
292 /* Allocates a memory region AMT bytes in size from POOL and
293 returns a pointer to the region's start. The region is not
294 necessarily aligned, so it is most suitable for storing
297 pool_alloc_unaligned (struct pool *pool, size_t amt)
299 assert (pool != NULL);
301 #ifndef DISCRETE_BLOCKS
302 /* Strings need not be aligned on any boundary, but some
303 operations may be more efficient when they are. However,
304 that's only going to help with reasonably long strings. */
305 if (amt < ALIGN_SIZE)
311 struct pool_block *const b = pool->blocks;
313 if (b->ofs + amt <= BLOCK_SIZE)
315 void *p = ((char *) b) + b->ofs;
323 return pool_alloc (pool, amt);
326 /* Allocates a memory region N * S bytes in size from POOL and
327 returns a pointer to the region's start.
328 N must be nonnegative, S must be positive.
329 Terminates the program if the memory cannot be obtained,
330 including the case where N * S overflows the range of size_t. */
332 pool_nalloc (struct pool *pool, size_t n, size_t s)
334 if (xalloc_oversized (n, s))
336 return pool_alloc (pool, n * s);
339 /* Allocates SIZE bytes in POOL, copies BUFFER into it, and
340 returns the new copy. */
342 pool_clone (struct pool *pool, const void *buffer, size_t size)
344 void *block = pool_alloc (pool, size);
345 memcpy (block, buffer, size);
349 /* Allocates SIZE bytes of unaligned data in POOL, copies BUFFER
350 into it, and returns the new copy. */
352 pool_clone_unaligned (struct pool *pool, const void *buffer, size_t size)
354 void *block = pool_alloc_unaligned (pool, size);
355 memcpy (block, buffer, size);
359 /* Duplicates null-terminated STRING, within POOL, and returns a
360 pointer to the duplicate. For use only with strings, because
361 the returned pointere may not be aligned properly for other
364 pool_strdup (struct pool *pool, const char *string)
366 return pool_clone_unaligned (pool, string, strlen (string) + 1);
369 /* Formats FORMAT with the given ARGS in memory allocated from
370 POOL and returns the formatted string. */
372 pool_vasprintf (struct pool *pool, const char *format, va_list args_)
374 struct pool_block *b;
379 va_copy (args, args_);
381 avail = BLOCK_SIZE - b->ofs;
382 s = ((char *) b) + b->ofs;
383 needed = vsnprintf (s, avail, format, args);
390 /* Success. Reserve the space that was actually used. */
391 b->ofs += needed + 1;
395 /* Failure, but now we know how much space is needed.
396 Allocate that much and reformat. */
397 s = pool_alloc (pool, needed + 1);
399 va_copy (args, args_);
400 vsprintf (s, format, args);
406 /* Some old libc's returned -1 when the destination string
407 was too short. This should be uncommon these days and
408 it's a rare case anyhow. Use the easiest solution: punt
409 to dynamic allocation. */
410 va_copy (args, args_);
411 s = xvasprintf (format, args);
414 pool_register (pool, free, s);
420 /* Formats FORMAT in memory allocated from POOL
421 and returns the formatted string. */
423 pool_asprintf (struct pool *pool, const char *format, ...)
428 va_start (args, format);
429 string = pool_vasprintf (pool, format, args);
435 /* Standard allocation routines. */
437 /* Allocates AMT bytes using malloc(), to be managed by POOL, and
438 returns a pointer to the beginning of the block.
439 If POOL is a null pointer, then allocates a normal memory block
442 pool_malloc (struct pool *pool, size_t amt)
448 struct pool_gizmo *g = xmalloc (amt + POOL_GIZMO_SIZE);
449 g->type = POOL_GIZMO_MALLOC;
452 return ((char *) g) + POOL_GIZMO_SIZE;
458 return xmalloc (amt);
461 /* Allocates and returns N elements of S bytes each, to be
463 If POOL is a null pointer, then allocates a normal memory block
465 N must be nonnegative, S must be positive.
466 Terminates the program if the memory cannot be obtained,
467 including the case where N * S overflows the range of size_t. */
469 pool_nmalloc (struct pool *pool, size_t n, size_t s)
471 if (xalloc_oversized (n, s))
473 return pool_malloc (pool, n * s);
476 /* Changes the allocation size of the specified memory block P managed
477 by POOL to AMT bytes and returns a pointer to the beginning of the
479 If POOL is a null pointer, then the block is reallocated in the
480 usual way with realloc(). */
482 pool_realloc (struct pool *pool, void *p, size_t amt)
490 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
491 check_gizmo (pool, g);
493 g = xrealloc (g, amt + POOL_GIZMO_SIZE);
500 check_gizmo (pool, g);
502 return ((char *) g) + POOL_GIZMO_SIZE;
511 return pool_malloc (pool, amt);
514 return xrealloc (p, amt);
517 /* Changes the allocation size of the specified memory block P
518 managed by POOL to N * S bytes and returns a pointer to the
519 beginning of the block.
520 N must be nonnegative, S must be positive.
521 If POOL is a null pointer, then the block is reallocated in
522 the usual way with xrealloc().
523 Terminates the program if the memory cannot be obtained,
524 including the case where N * S overflows the range of size_t. */
526 pool_nrealloc (struct pool *pool, void *p, size_t n, size_t s)
528 if (xalloc_oversized (n, s))
530 return pool_realloc (pool, p, n * s);
533 /* If P is null, allocate a block of at least *PN such objects;
534 otherwise, reallocate P so that it contains more than *PN
535 objects each of S bytes. *PN must be nonzero unless P is
536 null, and S must be nonzero. Set *PN to the new number of
537 objects, and return the pointer to the new block. *PN is
538 never set to zero, and the returned pointer is never null.
540 The block returned is managed by POOL. If POOL is a null
541 pointer, then the block is reallocated in the usual way with
544 Terminates the program if the memory cannot be obtained,
545 including the case where the memory required overflows the
548 Repeated reallocations are guaranteed to make progress, either by
549 allocating an initial block with a nonzero size, or by allocating a
552 In the following implementation, nonzero sizes are doubled so that
553 repeated reallocations have O(N log N) overall cost rather than
554 O(N**2) cost, but the specification for this function does not
555 guarantee that sizes are doubled.
557 Here is an example of use:
562 size_t allocated = 0;
565 append_int (int value)
567 if (used == allocated)
568 p = pool_2nrealloc (pool, p, &allocated, sizeof *p);
572 This causes x2nrealloc to allocate a block of some nonzero size the
573 first time it is called.
575 To have finer-grained control over the initial size, set *PN to a
576 nonzero value before calling this function with P == NULL. For
582 size_t allocated = 0;
583 size_t allocated1 = 1000;
586 append_int (int value)
588 if (used == allocated)
590 p = pool_2nrealloc (pool, p, &allocated1, sizeof *p);
591 allocated = allocated1;
596 This function implementation is from gnulib. */
598 pool_2nrealloc (struct pool *pool, void *p, size_t *pn, size_t s)
606 /* The approximate size to use for initial small allocation
607 requests, when the invoking code specifies an old size of
608 zero. 64 bytes is the largest "small" request for the
609 GNU C library malloc. */
610 enum { DEFAULT_MXFAST = 64 };
612 n = DEFAULT_MXFAST / s;
618 if (SIZE_MAX / 2 / s < n)
624 return pool_realloc (pool, p, n * s);
627 /* Frees block P managed by POOL.
628 If POOL is a null pointer, then the block is freed as usual with
631 pool_free (struct pool *pool, void *p)
633 if (pool != NULL && p != NULL)
635 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
636 check_gizmo (pool, g);
637 delete_gizmo (pool, g);
644 /* Gizmo allocations. */
646 /* Creates and returns a pool as a subpool of POOL.
647 The subpool will be destroyed automatically when POOL is destroyed.
648 It may also be destroyed explicitly in advance. */
650 pool_create_subpool (struct pool *pool)
652 struct pool *subpool;
653 struct pool_gizmo *g;
655 assert (pool != NULL);
656 subpool = pool_create ();
657 subpool->parent = pool;
659 g = (void *) (((char *) subpool->blocks) + subpool->blocks->ofs);
660 subpool->blocks->ofs += POOL_GIZMO_SIZE;
662 g->type = POOL_GIZMO_SUBPOOL;
663 g->p.subpool = subpool;
670 /* Makes SUBPOOL a subpool of POOL.
671 SUBPOOL must not already have a parent pool.
672 The subpool will be destroyed automatically when POOL is destroyed.
673 It may also be destroyed explicitly in advance. */
675 pool_add_subpool (struct pool *pool, struct pool *subpool)
677 struct pool_gizmo *g;
679 assert (pool != NULL);
680 assert (subpool != NULL);
681 assert (subpool->parent == NULL);
683 g = pool_alloc (subpool, sizeof *g);
684 g->type = POOL_GIZMO_SUBPOOL;
685 g->p.subpool = subpool;
688 subpool->parent = pool;
691 /* Opens file FILE_NAME with mode MODE and returns a handle to it
692 if successful or a null pointer if not.
693 The file will be closed automatically when POOL is destroyed, or it
694 may be closed explicitly in advance using pool_fclose(), or
695 detached from the pool with pool_detach_file(). */
697 pool_fopen (struct pool *pool, const char *file_name, const char *mode)
701 assert (pool && file_name && mode);
702 f = fopen (file_name, mode);
709 /* Closes file FILE managed by POOL.
710 Returns 0 if successful, EOF if an I/O error occurred. */
712 pool_fclose (struct pool *pool, FILE *file)
714 assert (pool && file);
715 pool_detach_file (pool, file);
716 return fclose (file);
719 /* Creates a temporary file with tmpfile() and returns a handle to it
720 if successful or a null pointer if not.
721 The file will be closed automatically when POOL is destroyed, or it
722 may be closed explicitly in advance using pool_fclose(), or
723 detached from the pool with pool_detach_file(). */
725 pool_tmpfile (struct pool *pool)
727 FILE *file = tmpfile ();
729 pool_attach_file (pool, file);
733 /* Attaches FILE to POOL.
734 The file will be closed automatically when POOL is destroyed, or it
735 may be closed explicitly in advance using pool_fclose(), or
736 detached from the pool with pool_detach_file(). */
738 pool_attach_file (struct pool *pool, FILE *file)
740 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
741 g->type = POOL_GIZMO_FILE;
746 /* Detaches FILE from POOL. */
748 pool_detach_file (struct pool *pool, FILE *file)
750 struct pool_gizmo *g;
752 for (g = pool->gizmos; g; g = g->next)
753 if (g->type == POOL_GIZMO_FILE && g->p.file == file)
755 delete_gizmo (pool, g);
760 /* Registers FREE to be called with argument P.
761 P should be unique among those registered in POOL so that it can be
762 uniquely identified by pool_unregister().
763 If not unregistered, FREE will be called with argument P when POOL
766 pool_register (struct pool *pool, void (*free) (void *), void *p)
768 assert (pool && free && p);
771 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
772 g->type = POOL_GIZMO_REGISTERED;
773 g->p.registered.free = free;
774 g->p.registered.p = p;
779 /* Unregisters previously registered P from POOL.
780 Returns true only if P was found to be registered in POOL. */
782 pool_unregister (struct pool *pool, void *p)
787 struct pool_gizmo *g;
789 for (g = pool->gizmos; g; g = g->next)
790 if (g->type == POOL_GIZMO_REGISTERED && g->p.registered.p == p)
792 delete_gizmo (pool, g);
800 /* Partial freeing. */
802 /* Notes the state of POOL into MARK so that it may be restored
803 by a call to pool_release(). */
805 pool_mark (struct pool *pool, struct pool_mark *mark)
807 assert (pool && mark);
809 mark->block = pool->blocks;
810 mark->ofs = pool->blocks->ofs;
812 mark->serial = serial;
815 /* Restores to POOL the state recorded in MARK.
816 Emptied blocks are not given back with free() but kept for
817 later allocations. To get that behavior, use a subpool
820 pool_release (struct pool *pool, const struct pool_mark *mark)
822 assert (pool && mark);
825 struct pool_gizmo *cur, *next;
827 for (cur = pool->gizmos; cur && cur->serial >= mark->serial; cur = next)
843 struct pool_block *cur;
845 for (cur = pool->blocks; cur != mark->block; cur = cur->next)
847 cur->ofs = POOL_BLOCK_SIZE;
848 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
850 cur->ofs += POOL_SIZE;
851 if (pool->parent != NULL)
852 cur->ofs += POOL_GIZMO_SIZE;
855 pool->blocks = mark->block;
856 pool->blocks->ofs = mark->ofs;
860 /* Private functions. */
862 /* Adds GIZMO at the beginning of POOL's gizmo list. */
864 add_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
866 assert (pool && gizmo);
869 gizmo->next = pool->gizmos;
872 pool->gizmos->prev = gizmo;
873 pool->gizmos = gizmo;
875 gizmo->serial = serial++;
877 check_gizmo (pool, gizmo);
880 /* Removes GIZMO from POOL's gizmo list. */
882 delete_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
884 assert (pool && gizmo);
886 check_gizmo (pool, gizmo);
889 gizmo->prev->next = gizmo->next;
891 pool->gizmos = gizmo->next;
893 gizmo->next->prev = gizmo->prev;
896 /* Frees any of GIZMO's internal state.
897 GIZMO's data must not be referenced after calling this function. */
899 free_gizmo (struct pool_gizmo *gizmo)
901 assert (gizmo != NULL);
905 case POOL_GIZMO_MALLOC:
908 case POOL_GIZMO_FILE:
909 fclose (gizmo->p.file); /* Ignore errors. */
911 case POOL_GIZMO_SUBPOOL:
912 gizmo->p.subpool->parent = NULL;
913 pool_destroy (gizmo->p.subpool);
915 case POOL_GIZMO_REGISTERED:
916 gizmo->p.registered.free (gizmo->p.registered.p);
923 /* Free all the gizmos in POOL. */
925 free_all_gizmos (struct pool *pool)
927 struct pool_gizmo *cur, *next;
929 for (cur = pool->gizmos; cur; cur = next)
938 check_gizmo (struct pool *p, struct pool_gizmo *g)
940 assert (g->pool == p);
941 assert (g->next == NULL || g->next->prev == g);
942 assert ((g->prev != NULL && g->prev->next == g)
943 || (g->prev == NULL && p->gizmos == g));