1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2007, 2009, 2011 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17 /* Bitmap, implemented as a balanced binary tree. */
19 /* If you add routines in this file, please add a corresponding
20 test to range-set-test.c. This test program should achieve
21 100% coverage of lines and branches in this code, as reported
26 #include "libpspp/range-set.h"
31 #include "libpspp/assertion.h"
32 #include "libpspp/compiler.h"
33 #include "libpspp/pool.h"
35 #include "gl/minmax.h"
36 #include "gl/xalloc.h"
38 static int compare_range_set_nodes (const struct bt_node *,
39 const struct bt_node *,
41 static void delete_node (struct range_set *, struct range_set_node *);
42 static struct range_set_node *delete_node_get_next (struct range_set *,
43 struct range_set_node *);
44 static struct range_set_node *find_node_le (const struct range_set *,
45 unsigned long int position);
46 static struct range_set_node *insert_node (struct range_set *,
47 unsigned long int start,
48 unsigned long int end);
49 static void insert_just_before (struct range_set *,
50 unsigned long int start, unsigned long int end,
51 struct range_set_node *);
52 static void merge_node_with_successors (struct range_set *,
53 struct range_set_node *);
54 static void destroy_pool (void *);
56 /* Creates and returns a new, empty range set. */
58 range_set_create (void)
60 return range_set_create_pool (NULL);
63 /* Creates and returns a new, empty range set in the given
66 range_set_create_pool (struct pool *pool)
68 struct range_set *rs = xmalloc (sizeof *rs);
71 pool_register (pool, destroy_pool, rs);
72 bt_init (&rs->bt, compare_range_set_nodes, NULL);
77 /* Creates and returns a clone of OLD range set in the given POOL
78 (which may be null). */
80 range_set_clone (const struct range_set *old, struct pool *pool)
82 struct range_set *new;
83 struct range_set_node *node;
85 new = range_set_create_pool (pool);
86 for (node = range_set_first__ (old); node != NULL;
87 node = range_set_next__ (old, node))
88 insert_node (new, node->start, node->end);
92 /* Destroys range set RS. */
94 range_set_destroy (struct range_set *rs)
99 pool_unregister (rs->pool, rs);
100 while (!range_set_is_empty (rs))
101 delete_node (rs, range_set_first__ (rs));
106 /* Inserts the region starting at START and extending for WIDTH
109 range_set_insert (struct range_set *rs,
110 unsigned long int start, unsigned long int width)
112 unsigned long int end = start + width;
113 struct range_set_node *node;
115 assert (width == 0 || start + width - 1 >= start);
120 /* Invalidate cache. */
123 node = find_node_le (rs, start);
126 if (start > node->end)
128 /* New region does not overlap NODE, but it might
129 overlap the next node. */
130 insert_just_before (rs, start, end, range_set_next__ (rs, node));
132 else if (end > node->end)
134 /* New region starts in the middle of NODE and
135 continues past its end, so extend NODE, then merge
136 it with any following node that it now potentially
139 merge_node_with_successors (rs, node);
143 /* New region is completely contained by NODE, so
144 there's nothing to do. */
149 /* New region starts before any existing region, but it
150 might overlap the first region. */
151 insert_just_before (rs, start, end, range_set_first__ (rs));
155 /* Deletes the region starting at START and extending for WIDTH
158 range_set_delete (struct range_set *rs,
159 unsigned long int start, unsigned long int width)
161 unsigned long int end = start + width;
162 struct range_set_node *node;
164 assert (width == 0 || start + width - 1 >= start);
169 /* Invalidate cache. */
172 node = find_node_le (rs, start);
174 node = range_set_first__ (rs);
176 while (node != NULL && end > node->start)
178 if (start <= node->start)
180 if (end >= node->end)
182 /* Region to delete covers entire node. */
183 node = delete_node_get_next (rs, node);
187 /* Region to delete covers only left part of node. */
192 else if (start < node->end)
196 /* Region to delete covers middle of the node, so
197 we have to split the node into two pieces. */
198 unsigned long int old_node_end = node->end;
200 insert_node (rs, end, old_node_end);
205 /* Region to delete covers only right part of
208 node = range_set_next__ (rs, node);
212 node = range_set_next__ (rs, node);
216 /* Scans RS for its first 1-bit and deletes up to REQUEST
217 contiguous 1-bits starting at that position. Unless RS is
218 completely empty, sets *START to the position of the first
219 1-bit deleted and *WIDTH to the number actually deleted, which
220 may be less than REQUEST if fewer contiguous 1-bits were
221 present, and returns true. If RS is completely empty, returns
224 range_set_allocate (struct range_set *rs, unsigned long int request,
225 unsigned long int *start, unsigned long int *width)
227 struct range_set_node *node;
228 unsigned long int node_width;
230 assert (request > 0);
232 node = range_set_first__ (rs);
235 node_width = node->end - node->start;
237 *start = node->start;
238 if (request < node_width)
241 node->start += request;
246 delete_node (rs, node);
254 /* Scans RS for and deletes the first contiguous run of REQUEST
255 1-bits. If successful, sets *START to the position of the
256 first 1-bit deleted and returns true If RS does not contain a
257 run of REQUEST or more contiguous 1-bits, returns false and
258 does not modify RS. */
260 range_set_allocate_fully (struct range_set *rs, unsigned long int request,
261 unsigned long int *start)
263 struct range_set_node *node;
265 assert (request > 0);
267 for (node = range_set_first__ (rs); node != NULL;
268 node = range_set_next__ (rs, node))
270 unsigned long int node_width = node->end - node->start;
271 if (node_width >= request)
273 *start = node->start;
274 if (node_width > request)
275 node->start += request;
277 delete_node (rs, node);
285 /* Returns true if there is a 1-bit at the given POSITION in RS,
288 range_set_contains (const struct range_set *rs_, unsigned long int position)
290 struct range_set *rs = CONST_CAST (struct range_set *, rs_);
291 if (position < rs->cache_end && position >= rs->cache_start)
292 return rs->cache_value;
295 struct range_set_node *node = find_node_le (rs, position);
298 if (position < node->end)
300 rs->cache_start = node->start;
301 rs->cache_end = node->end;
302 rs->cache_value = true;
307 struct range_set_node *next = range_set_next__ (rs, node);
308 rs->cache_start = node->end;
309 rs->cache_end = next != NULL ? next->start : ULONG_MAX;
310 rs->cache_value = false;
316 node = range_set_first__ (rs);
318 rs->cache_end = node != NULL ? node->start : ULONG_MAX;
319 rs->cache_value = false;
325 /* Returns the smallest position of a 1-bit greater than or
326 equal to START. Returns ULONG_MAX if there is no 1-bit with
327 position greater than or equal to START. */
329 range_set_scan (const struct range_set *rs_, unsigned long int start)
331 struct range_set *rs = CONST_CAST (struct range_set *, rs_);
332 unsigned long int retval = ULONG_MAX;
333 struct bt_node *bt_node;
335 if (start < rs->cache_end && start >= rs->cache_start && rs->cache_value)
337 bt_node = rs->bt.root;
338 while (bt_node != NULL)
340 struct range_set_node *node = range_set_node_from_bt__ (bt_node);
341 if (start < node->start)
343 retval = node->start;
344 bt_node = node->bt_node.down[0];
346 else if (start >= node->end)
347 bt_node = node->bt_node.down[1];
350 rs->cache_start = node->start;
351 rs->cache_end = node->end;
352 rs->cache_value = true;
359 /* Compares `range_set_node's A and B and returns a strcmp-like
362 compare_range_set_nodes (const struct bt_node *a_,
363 const struct bt_node *b_,
364 const void *aux UNUSED)
366 const struct range_set_node *a = range_set_node_from_bt__ (a_);
367 const struct range_set_node *b = range_set_node_from_bt__ (b_);
369 return a->start < b->start ? -1 : a->start > b->start;
372 /* Deletes NODE from RS and frees it. */
374 delete_node (struct range_set *rs, struct range_set_node *node)
376 bt_delete (&rs->bt, &node->bt_node);
380 /* Deletes NODE from RS, frees it, and returns the following
382 static struct range_set_node *
383 delete_node_get_next (struct range_set *rs, struct range_set_node *node)
385 struct range_set_node *next = range_set_next__ (rs, node);
386 delete_node (rs, node);
390 /* Returns the node in RS that would be just before POSITION if a
391 range_set_node with `start' as POSITION were inserted.
392 Returns a null pointer if POSITION is before any existing node
393 in RS. If POSITION would duplicate an existing region's
394 start, returns that region. */
395 static struct range_set_node *
396 find_node_le (const struct range_set *rs, unsigned long int position)
398 struct range_set_node tmp;
399 tmp.start = position;
400 return range_set_node_from_bt__ (bt_find_le (&rs->bt, &tmp.bt_node));
403 /* Creates a new region with the given START and END, inserts the
404 region into RS, and returns the new region. */
405 static struct range_set_node *
406 insert_node (struct range_set *rs,
407 unsigned long int start, unsigned long int end)
409 struct range_set_node *node = xmalloc (sizeof *node);
410 struct bt_node *dummy;
413 dummy = bt_insert (&rs->bt, &node->bt_node);
414 assert (dummy == NULL);
418 /* Inserts the region START...END (exclusive) into RS given that
419 the new region is "just before" NODE, that is, if NODE is
420 nonnull, the new region is known not to overlap any node
421 preceding NODE, and START precedes NODE's start; if NODE is
422 null, then the new region is known to follow any and all
423 regions already in RS. */
425 insert_just_before (struct range_set *rs,
426 unsigned long int start, unsigned long int end,
427 struct range_set_node *node)
429 assert (node == NULL || start < node->start);
430 if (node != NULL && end >= node->start)
432 /* New region overlaps NODE, so just extend NODE. */
437 merge_node_with_successors (rs, node);
442 /* New region does not overlap NODE. */
443 insert_node (rs, start, end);
447 /* Merges NODE in RS with its successors. */
449 merge_node_with_successors (struct range_set *rs, struct range_set_node *node)
451 struct range_set_node *next;
453 while ((next = range_set_next__ (rs, node)) != NULL
454 && node->end >= next->start)
456 if (next->end > node->end)
457 node->end = next->end;
458 delete_node (rs, next);
462 /* Destroys range set RS.
463 Helper function for range_set_create_pool. */
465 destroy_pool (void *rs_)
467 struct range_set *rs = rs_;
469 range_set_destroy (rs);