1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2000, 2010 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "libpspp/pool.h"
24 #include "libpspp/assertion.h"
25 #include "libpspp/message.h"
26 #include "libpspp/temp-file.h"
27 #include "libpspp/str.h"
29 #include "gl/xalloc.h"
31 /* Fast, low-overhead memory block suballocator. */
34 struct pool *parent; /* Pool of which this pool is a subpool. */
35 struct pool_block *blocks; /* Blocks owned by the pool. */
36 struct pool_gizmo *gizmos; /* Other stuff owned by the pool. */
42 struct pool_block *prev;
43 struct pool_block *next;
54 POOL_GIZMO_REGISTERED,
57 /* Pool routines can maintain objects (`gizmos') as well as doing
59 This structure is used to keep track of them. */
63 struct pool_gizmo *prev;
64 struct pool_gizmo *next;
66 long serial; /* Serial number. */
67 int type; /* Type of this gizmo. */
69 /* Type-dependent info. */
72 FILE *file; /* POOL_GIZMO_FILE, POOL_GIZMO_TEMP_FILE. */
73 struct pool *subpool; /* POOL_GIZMO_SUBPOOL. */
75 /* POOL_GIZMO_REGISTERED. */
78 void (*free) (void *p);
86 /* Rounds X up to the next multiple of Y. */
88 #define ROUND_UP(X, Y) \
89 (((X) + ((Y) - 1)) / (Y) * (Y))
92 /* Types that provide typically useful alignment sizes. */
101 /* This should be the alignment size used by malloc(). The size of
102 the union above is correct, if not optimal, in all known cases. */
103 #define ALIGN_SIZE sizeof (union align)
105 /* DISCRETE_BLOCKS may be declared as nonzero to prevent
106 suballocation of blocks. This is useful under memory
107 debuggers like valgrind because it allows the source location
108 of bugs to be more accurately pinpointed.
110 On the other hand, if we're testing the library, then we want to
111 test the library's real functionality, not its crippled, slow,
112 simplified functionality. */
113 /*#define DISCRETE_BLOCKS 1*/
115 /* Size of each block allocated in the pool, in bytes.
116 Should be at least 1k. */
118 #define BLOCK_SIZE 1024
122 /* Sizes of some structures with alignment padding included. */
123 #define POOL_BLOCK_SIZE ROUND_UP (sizeof (struct pool_block), ALIGN_SIZE)
124 #define POOL_GIZMO_SIZE ROUND_UP (sizeof (struct pool_gizmo), ALIGN_SIZE)
125 #define POOL_SIZE ROUND_UP (sizeof (struct pool), ALIGN_SIZE)
127 /* Serial number used to keep track of gizmos for mark/release. */
128 static long serial = 0;
131 static void add_gizmo (struct pool *, struct pool_gizmo *);
132 static void free_gizmo (struct pool_gizmo *);
133 static void free_all_gizmos (struct pool *pool);
134 static void delete_gizmo (struct pool *, struct pool_gizmo *);
135 static void check_gizmo (struct pool *, struct pool_gizmo *);
137 /* General routines. */
139 /* Creates and returns a new memory pool, which allows malloc()'d
140 blocks to be suballocated in a time- and space-efficient manner.
141 The entire contents of the memory pool are freed at once.
143 In addition, other objects can be associated with a memory pool.
144 These are released when the pool is destroyed. */
148 struct pool_block *block;
151 block = xmalloc (BLOCK_SIZE);
152 block->prev = block->next = block;
153 block->ofs = POOL_BLOCK_SIZE + POOL_SIZE;
155 pool = (struct pool *) (((char *) block) + POOL_BLOCK_SIZE);
157 pool->blocks = block;
163 /* Creates a pool, allocates a block STRUCT_SIZE bytes in
164 length from it, stores the pool's address at offset
165 POOL_MEMBER_OFFSET within the block, and returns the allocated
168 Meant for use indirectly via pool_create_container(). */
170 pool_create_at_offset (size_t struct_size, size_t pool_member_offset)
175 assert (struct_size >= sizeof pool);
176 assert (pool_member_offset <= struct_size - sizeof pool);
178 pool = pool_create ();
179 struct_ = pool_alloc (pool, struct_size);
180 *(struct pool **) (struct_ + pool_member_offset) = pool;
184 /* Destroy the specified pool, including all subpools. */
186 pool_destroy (struct pool *pool)
191 /* Remove this pool from its parent's list of gizmos. */
193 delete_gizmo (pool->parent, (void *) (((char *) pool) + POOL_SIZE));
195 free_all_gizmos (pool);
197 /* Free all the memory. */
199 struct pool_block *cur, *next;
201 pool->blocks->prev->next = NULL;
202 for (cur = pool->blocks; cur; cur = next)
210 /* Release all the memory and gizmos in POOL.
211 Blocks are not given back with free() but kept for later
212 allocations. To give back memory, use a subpool instead. */
214 pool_clear (struct pool *pool)
216 free_all_gizmos (pool);
218 /* Zero out block sizes. */
220 struct pool_block *cur;
225 cur->ofs = POOL_BLOCK_SIZE;
226 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
228 cur->ofs += POOL_SIZE;
229 if (pool->parent != NULL)
230 cur->ofs += POOL_GIZMO_SIZE;
234 while (cur != pool->blocks);
238 /* Suballocation routines. */
240 /* Allocates a memory region AMT bytes in size from POOL and returns a
241 pointer to the region's start.
242 The region is properly aligned for storing any object. */
244 pool_alloc (struct pool *pool, size_t amt)
246 assert (pool != NULL);
251 #ifndef DISCRETE_BLOCKS
252 if (amt <= MAX_SUBALLOC)
254 /* If there is space in this block, take it. */
255 struct pool_block *b = pool->blocks;
256 b->ofs = ROUND_UP (b->ofs, ALIGN_SIZE);
257 if (b->ofs + amt <= BLOCK_SIZE)
259 void *const p = ((char *) b) + b->ofs;
264 /* No space in this block, so we must make other
266 if (b->next->ofs == 0)
268 /* The next block is empty. Use it. */
270 b->ofs = POOL_BLOCK_SIZE;
271 if ((char *) b + POOL_BLOCK_SIZE == (char *) pool)
276 /* Create a new block at the start of the list. */
277 b = xmalloc (BLOCK_SIZE);
278 b->next = pool->blocks;
279 b->prev = pool->blocks->prev;
280 b->ofs = POOL_BLOCK_SIZE;
281 pool->blocks->prev->next = b;
282 pool->blocks->prev = b;
286 /* Allocate space from B. */
288 return ((char *) b) + b->ofs - amt;
292 return pool_malloc (pool, amt);
295 /* Allocates a memory region AMT bytes in size from POOL and
296 returns a pointer to the region's start. The region is not
297 necessarily aligned, so it is most suitable for storing
300 pool_alloc_unaligned (struct pool *pool, size_t amt)
302 assert (pool != NULL);
304 #ifndef DISCRETE_BLOCKS
305 /* Strings need not be aligned on any boundary, but some
306 operations may be more efficient when they are. However,
307 that's only going to help with reasonably long strings. */
308 if (amt < ALIGN_SIZE)
314 struct pool_block *const b = pool->blocks;
316 if (b->ofs + amt <= BLOCK_SIZE)
318 void *p = ((char *) b) + b->ofs;
326 return pool_alloc (pool, amt);
329 /* Allocates a memory region N * S bytes in size from POOL and
330 returns a pointer to the region's start.
331 N must be nonnegative, S must be positive.
332 Terminates the program if the memory cannot be obtained,
333 including the case where N * S overflows the range of size_t. */
335 pool_nalloc (struct pool *pool, size_t n, size_t s)
337 if (xalloc_oversized (n, s))
339 return pool_alloc (pool, n * s);
342 /* Allocates SIZE bytes in POOL, copies BUFFER into it, and
343 returns the new copy. */
345 pool_clone (struct pool *pool, const void *buffer, size_t size)
347 void *block = pool_alloc (pool, size);
348 memcpy (block, buffer, size);
352 /* Allocates SIZE bytes of unaligned data in POOL, copies BUFFER
353 into it, and returns the new copy. */
355 pool_clone_unaligned (struct pool *pool, const void *buffer, size_t size)
357 void *block = pool_alloc_unaligned (pool, size);
358 memcpy (block, buffer, size);
362 /* Duplicates null-terminated STRING, within POOL, and returns a
363 pointer to the duplicate. For use only with strings, because
364 the returned pointere may not be aligned properly for other
367 pool_strdup (struct pool *pool, const char *string)
369 return pool_clone_unaligned (pool, string, strlen (string) + 1);
372 /* Duplicates the SIZE bytes of STRING, plus a trailing 0 byte,
373 and returns a pointer to the duplicate. For use only with
374 strings, because the returned pointere may not be aligned
375 properly for other types. */
377 pool_strdup0 (struct pool *pool, const char *string, size_t size)
379 char *new_string = pool_alloc_unaligned (pool, size + 1);
380 memcpy (new_string, string, size);
381 new_string[size] = '\0';
385 /* Formats FORMAT with the given ARGS in memory allocated from
386 POOL and returns the formatted string. */
388 pool_vasprintf (struct pool *pool, const char *format, va_list args_)
390 struct pool_block *b;
395 va_copy (args, args_);
397 avail = BLOCK_SIZE - b->ofs;
398 s = ((char *) b) + b->ofs;
399 needed = vsnprintf (s, avail, format, args);
406 /* Success. Reserve the space that was actually used. */
407 b->ofs += needed + 1;
411 /* Failure, but now we know how much space is needed.
412 Allocate that much and reformat. */
413 s = pool_alloc (pool, needed + 1);
415 va_copy (args, args_);
416 vsprintf (s, format, args);
422 /* Some old libc's returned -1 when the destination string
423 was too short. This should be uncommon these days and
424 it's a rare case anyhow. Use the easiest solution: punt
425 to dynamic allocation. */
426 va_copy (args, args_);
427 s = xvasprintf (format, args);
430 pool_register (pool, free, s);
436 /* Formats FORMAT in memory allocated from POOL
437 and returns the formatted string. */
439 pool_asprintf (struct pool *pool, const char *format, ...)
444 va_start (args, format);
445 string = pool_vasprintf (pool, format, args);
451 /* Standard allocation routines. */
453 /* Allocates AMT bytes using malloc(), to be managed by POOL, and
454 returns a pointer to the beginning of the block.
455 If POOL is a null pointer, then allocates a normal memory block
458 pool_malloc (struct pool *pool, size_t amt)
464 struct pool_gizmo *g = xmalloc (amt + POOL_GIZMO_SIZE);
465 g->type = POOL_GIZMO_MALLOC;
468 return ((char *) g) + POOL_GIZMO_SIZE;
474 return xmalloc (amt);
477 /* Allocates and returns N elements of S bytes each, to be
479 If POOL is a null pointer, then allocates a normal memory block
481 N must be nonnegative, S must be positive.
482 Terminates the program if the memory cannot be obtained,
483 including the case where N * S overflows the range of size_t. */
485 pool_nmalloc (struct pool *pool, size_t n, size_t s)
487 if (xalloc_oversized (n, s))
489 return pool_malloc (pool, n * s);
492 /* Allocates AMT bytes using malloc(), to be managed by POOL,
493 zeros the block, and returns a pointer to the beginning of the
495 If POOL is a null pointer, then allocates a normal memory block
498 pool_zalloc (struct pool *pool, size_t amt)
500 void *p = pool_malloc (pool, amt);
505 /* Allocates and returns N elements of S bytes each, to be
506 managed by POOL, and zeros the block.
507 If POOL is a null pointer, then allocates a normal memory block
509 N must be nonnegative, S must be positive.
510 Terminates the program if the memory cannot be obtained,
511 including the case where N * S overflows the range of size_t. */
513 pool_calloc (struct pool *pool, size_t n, size_t s)
515 void *p = pool_nmalloc (pool, n, s);
516 memset (p, 0, n * s);
520 /* Changes the allocation size of the specified memory block P managed
521 by POOL to AMT bytes and returns a pointer to the beginning of the
523 If POOL is a null pointer, then the block is reallocated in the
524 usual way with realloc(). */
526 pool_realloc (struct pool *pool, void *p, size_t amt)
534 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
535 check_gizmo (pool, g);
537 g = xrealloc (g, amt + POOL_GIZMO_SIZE);
544 check_gizmo (pool, g);
546 return ((char *) g) + POOL_GIZMO_SIZE;
555 return pool_malloc (pool, amt);
558 return xrealloc (p, amt);
561 /* Changes the allocation size of the specified memory block P
562 managed by POOL to N * S bytes and returns a pointer to the
563 beginning of the block.
564 N must be nonnegative, S must be positive.
565 If POOL is a null pointer, then the block is reallocated in
566 the usual way with xrealloc().
567 Terminates the program if the memory cannot be obtained,
568 including the case where N * S overflows the range of size_t. */
570 pool_nrealloc (struct pool *pool, void *p, size_t n, size_t s)
572 if (xalloc_oversized (n, s))
574 return pool_realloc (pool, p, n * s);
577 /* If P is null, allocate a block of at least *PN such objects;
578 otherwise, reallocate P so that it contains more than *PN
579 objects each of S bytes. *PN must be nonzero unless P is
580 null, and S must be nonzero. Set *PN to the new number of
581 objects, and return the pointer to the new block. *PN is
582 never set to zero, and the returned pointer is never null.
584 The block returned is managed by POOL. If POOL is a null
585 pointer, then the block is reallocated in the usual way with
588 Terminates the program if the memory cannot be obtained,
589 including the case where the memory required overflows the
592 Repeated reallocations are guaranteed to make progress, either by
593 allocating an initial block with a nonzero size, or by allocating a
596 In the following implementation, nonzero sizes are doubled so that
597 repeated reallocations have O(N log N) overall cost rather than
598 O(N**2) cost, but the specification for this function does not
599 guarantee that sizes are doubled.
601 Here is an example of use:
606 size_t allocated = 0;
609 append_int (int value)
611 if (used == allocated)
612 p = pool_2nrealloc (pool, p, &allocated, sizeof *p);
616 This causes x2nrealloc to allocate a block of some nonzero size the
617 first time it is called.
619 To have finer-grained control over the initial size, set *PN to a
620 nonzero value before calling this function with P == NULL. For
626 size_t allocated = 0;
627 size_t allocated1 = 1000;
630 append_int (int value)
632 if (used == allocated)
634 p = pool_2nrealloc (pool, p, &allocated1, sizeof *p);
635 allocated = allocated1;
640 This function implementation is from gnulib. */
642 pool_2nrealloc (struct pool *pool, void *p, size_t *pn, size_t s)
650 /* The approximate size to use for initial small allocation
651 requests, when the invoking code specifies an old size of
652 zero. 64 bytes is the largest "small" request for the
653 GNU C library malloc. */
654 enum { DEFAULT_MXFAST = 64 };
656 n = DEFAULT_MXFAST / s;
662 if (SIZE_MAX / 2 / s < n)
668 return pool_realloc (pool, p, n * s);
671 /* Frees block P managed by POOL.
672 If POOL is a null pointer, then the block is freed as usual with
675 pool_free (struct pool *pool, void *p)
677 if (pool != NULL && p != NULL)
679 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
680 check_gizmo (pool, g);
681 delete_gizmo (pool, g);
688 /* Gizmo allocations. */
690 /* Creates and returns a pool as a subpool of POOL.
691 The subpool will be destroyed automatically when POOL is destroyed.
692 It may also be destroyed explicitly in advance. */
694 pool_create_subpool (struct pool *pool)
696 struct pool *subpool;
697 struct pool_gizmo *g;
699 assert (pool != NULL);
700 subpool = pool_create ();
701 subpool->parent = pool;
703 g = (void *) (((char *) subpool->blocks) + subpool->blocks->ofs);
704 subpool->blocks->ofs += POOL_GIZMO_SIZE;
706 g->type = POOL_GIZMO_SUBPOOL;
707 g->p.subpool = subpool;
714 /* Makes SUBPOOL a subpool of POOL.
715 SUBPOOL must not already have a parent pool.
716 The subpool will be destroyed automatically when POOL is destroyed.
717 It may also be destroyed explicitly in advance. */
719 pool_add_subpool (struct pool *pool, struct pool *subpool)
721 struct pool_gizmo *g;
723 assert (pool != NULL);
724 assert (subpool != NULL);
725 assert (subpool->parent == NULL);
727 g = pool_alloc (subpool, sizeof *g);
728 g->type = POOL_GIZMO_SUBPOOL;
729 g->p.subpool = subpool;
732 subpool->parent = pool;
735 /* Opens file FILE_NAME with mode MODE and returns a handle to it
736 if successful or a null pointer if not.
737 The file will be closed automatically when POOL is destroyed, or it
738 may be closed explicitly in advance using pool_fclose(), or
739 detached from the pool with pool_detach_file(). */
741 pool_fopen (struct pool *pool, const char *file_name, const char *mode)
745 assert (pool && file_name && mode);
746 f = fopen (file_name, mode);
748 pool_attach_file (pool, f);
753 /* Closes file FILE managed by POOL.
754 Returns 0 if successful, EOF if an I/O error occurred. */
756 pool_fclose (struct pool *pool, FILE *file)
758 assert (pool && file);
759 pool_detach_file (pool, file);
760 return fclose (file);
763 /* Attaches FILE to POOL.
764 The file will be closed automatically when POOL is destroyed, or it
765 may be closed explicitly in advance using pool_fclose(), or
766 detached from the pool with pool_detach_file(). */
768 pool_attach_file (struct pool *pool, FILE *file)
770 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
771 g->type = POOL_GIZMO_FILE;
776 /* Detaches FILE from POOL. */
778 pool_detach_file (struct pool *pool, FILE *file)
780 struct pool_gizmo *g;
782 for (g = pool->gizmos; g; g = g->next)
783 if (g->type == POOL_GIZMO_FILE && g->p.file == file)
785 delete_gizmo (pool, g);
790 /* Creates a temporary file with create_temp_file() and returns a handle to it
791 if successful or a null pointer if not.
792 The file will be closed automatically when POOL is destroyed, or it
793 may be closed explicitly in advance using pool_fclose_temp_file(), or
794 detached from the pool with pool_detach_temp_file(). */
796 pool_create_temp_file (struct pool *pool)
798 FILE *file = create_temp_file ();
800 pool_attach_temp_file (pool, file);
804 /* Closes file FILE managed by POOL.
805 FILE must have been opened with create_temp_file(). */
807 pool_fclose_temp_file (struct pool *pool, FILE *file)
809 assert (pool && file);
810 pool_detach_temp_file (pool, file);
811 close_temp_file (file);
814 /* Attaches FILE, which must have been opened with create_temp_file(), to POOL.
815 The file will be closed automatically when POOL is destroyed, or it
816 may be closed explicitly in advance using pool_fclose_temp_file(), or
817 detached from the pool with pool_detach_temp_file(). */
819 pool_attach_temp_file (struct pool *pool, FILE *file)
821 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
822 g->type = POOL_GIZMO_TEMP_FILE;
827 /* Detaches FILE that was opened with create_temp_file() from POOL. */
829 pool_detach_temp_file (struct pool *pool, FILE *file)
831 struct pool_gizmo *g;
833 for (g = pool->gizmos; g; g = g->next)
834 if (g->type == POOL_GIZMO_TEMP_FILE && g->p.file == file)
836 delete_gizmo (pool, g);
841 /* Registers FREE to be called with argument P.
842 P should be unique among those registered in POOL so that it can be
843 uniquely identified by pool_unregister().
844 If not unregistered, FREE will be called with argument P when POOL
847 pool_register (struct pool *pool, void (*free) (void *), void *p)
849 assert (pool && free && p);
852 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
853 g->type = POOL_GIZMO_REGISTERED;
854 g->p.registered.free = free;
855 g->p.registered.p = p;
860 /* Unregisters previously registered P from POOL.
861 Returns true only if P was found to be registered in POOL. */
863 pool_unregister (struct pool *pool, void *p)
868 struct pool_gizmo *g;
870 for (g = pool->gizmos; g; g = g->next)
871 if (g->type == POOL_GIZMO_REGISTERED && g->p.registered.p == p)
873 delete_gizmo (pool, g);
881 /* Partial freeing. */
883 /* Notes the state of POOL into MARK so that it may be restored
884 by a call to pool_release(). */
886 pool_mark (struct pool *pool, struct pool_mark *mark)
888 assert (pool && mark);
890 mark->block = pool->blocks;
891 mark->ofs = pool->blocks->ofs;
893 mark->serial = serial;
896 /* Restores to POOL the state recorded in MARK.
897 Emptied blocks are not given back with free() but kept for
898 later allocations. To get that behavior, use a subpool
901 pool_release (struct pool *pool, const struct pool_mark *mark)
903 assert (pool && mark);
906 struct pool_gizmo *cur, *next;
908 for (cur = pool->gizmos; cur && cur->serial >= mark->serial; cur = next)
924 struct pool_block *cur;
926 for (cur = pool->blocks; cur != mark->block; cur = cur->next)
928 cur->ofs = POOL_BLOCK_SIZE;
929 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
931 cur->ofs += POOL_SIZE;
932 if (pool->parent != NULL)
933 cur->ofs += POOL_GIZMO_SIZE;
936 pool->blocks = mark->block;
937 pool->blocks->ofs = mark->ofs;
941 /* Private functions. */
943 /* Adds GIZMO at the beginning of POOL's gizmo list. */
945 add_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
947 assert (pool && gizmo);
950 gizmo->next = pool->gizmos;
953 pool->gizmos->prev = gizmo;
954 pool->gizmos = gizmo;
956 gizmo->serial = serial++;
958 check_gizmo (pool, gizmo);
961 /* Removes GIZMO from POOL's gizmo list. */
963 delete_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
965 assert (pool && gizmo);
967 check_gizmo (pool, gizmo);
970 gizmo->prev->next = gizmo->next;
972 pool->gizmos = gizmo->next;
974 gizmo->next->prev = gizmo->prev;
977 /* Frees any of GIZMO's internal state.
978 GIZMO's data must not be referenced after calling this function. */
980 free_gizmo (struct pool_gizmo *gizmo)
982 assert (gizmo != NULL);
986 case POOL_GIZMO_MALLOC:
989 case POOL_GIZMO_FILE:
990 fclose (gizmo->p.file); /* Ignore errors. */
992 case POOL_GIZMO_TEMP_FILE:
993 close_temp_file (gizmo->p.file); /* Ignore errors. */
995 case POOL_GIZMO_SUBPOOL:
996 gizmo->p.subpool->parent = NULL;
997 pool_destroy (gizmo->p.subpool);
999 case POOL_GIZMO_REGISTERED:
1000 gizmo->p.registered.free (gizmo->p.registered.p);
1007 /* Free all the gizmos in POOL. */
1009 free_all_gizmos (struct pool *pool)
1011 struct pool_gizmo *cur, *next;
1013 for (cur = pool->gizmos; cur; cur = next)
1018 pool->gizmos = NULL;
1022 check_gizmo (struct pool *p, struct pool_gizmo *g)
1024 assert (g->pool == p);
1025 assert (g->next == NULL || g->next->prev == g);
1026 assert ((g->prev != NULL && g->prev->next == g)
1027 || (g->prev == NULL && p->gizmos == g));