1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "libpspp/assertion.h"
25 #include "libpspp/hash-functions.h"
26 #include "libpspp/hmap.h"
27 #include "output/render.h"
28 #include "output/table.h"
30 #include "gl/minmax.h"
31 #include "gl/xalloc.h"
33 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
37 /* A layout for rendering a specific table on a specific device.
39 May represent the layout of an entire table presented to
40 render_page_create(), or a rectangular subregion of a table broken out using
41 render_page_next() to allow a table to be broken across multiple pages. */
44 const struct render_params *params; /* Parameters of the target device. */
45 struct table *table; /* Table rendered. */
48 /* Local copies of table->n and table->h, for convenience. */
50 int h[TABLE_N_AXES][2];
52 /* cp[H] represents x positions within the table.
54 cp[H][1] = the width of the leftmost vertical rule.
55 cp[H][2] = cp[H][1] + the width of the leftmost column.
56 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
58 cp[H][2 * nc] = x position of the rightmost vertical rule.
59 cp[H][2 * nc + 1] = total table width including all rules.
61 Similarly, cp[V] represents y positions within the table.
63 cp[V][1] = the height of the topmost horizontal rule.
64 cp[V][2] = cp[V][1] + the height of the topmost column.
65 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
67 cp[V][2 * nr] = y position of the bottommost horizontal rule.
68 cp[V][2 * nr + 1] = total table height including all rules.
70 Rules and columns can have width or height 0, in which case consecutive
71 values in this array are equal. */
72 int *cp[TABLE_N_AXES];
74 /* render_break_next() can break a table such that some cells are not fully
75 contained within a render_page. This will happen if a cell is too wide
76 or two tall to fit on a single page, or if a cell spans multiple rows or
77 columns and the page only includes some of those rows or columns.
79 This hash table contains "struct render_overflow"s that represents each
80 such cell that doesn't completely fit on this page.
82 Each overflow cell borders at least one header edge of the table and may
83 border more. (A single table cell that is so large that it fills the
84 entire page can overflow on all four sides!) */
85 struct hmap overflows;
87 /* If a single column (or row) is too wide (or tall) to fit on a page
88 reasonably, then render_break_next() will split a single row or column
89 across multiple render_pages. This member indicates when this has
92 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
93 of the leftmost column in this page, and false otherwise.
95 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
96 of the rightmost column in this page, and false otherwise.
98 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
99 and bottom of the table.
101 The effect of is_edge_cutoff is to prevent rules along the edge in
102 question from being rendered.
104 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
105 contain a node for each cell along that edge. */
106 bool is_edge_cutoff[TABLE_N_AXES][2];
108 /* If part of a joined cell would be cut off by breaking a table along
109 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
110 join_crossing[axis][z] is the thickness of the rule that would be cut
113 This is used to know to allocate extra space for breaking at such a
114 position, so that part of the cell's content is not lost.
116 This affects breaking a table only when headers are present. When
117 headers are not present, the rule's thickness is used for cell content,
118 so no part of the cell's content is lost (and in fact it is duplicated
119 across both pages). */
120 int *join_crossing[TABLE_N_AXES];
123 /* Returns the offset in struct render_page's cp[axis] array of the rule with
124 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
125 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
126 next rule to the right (or below); and so on. */
128 rule_ofs (int rule_idx)
133 /* Returns the offset in struct render_page's cp[axis] array of the rule with
134 index RULE_IDX_R, which counts from the right side (or bottom) of the page
135 left (or up), according to whether AXIS is H or V, respectively. That is,
136 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
137 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
138 (or above); and so on. */
140 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
142 return (page->n[axis] - rule_idx_r) * 2;
145 /* Returns the offset in struct render_page's cp[axis] array of the cell with
146 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
147 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
148 next cell to the right (or below); and so on. */
150 cell_ofs (int cell_idx)
152 return cell_idx * 2 + 1;
155 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
157 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
159 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
162 /* Returns the width of the headers in PAGE along AXIS. */
164 headers_width (const struct render_page *page, int axis)
166 int h0 = page->h[axis][0];
167 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
168 int n = page->n[axis];
169 int h1 = page->h[axis][1];
170 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
174 /* Returns the width of cell X along AXIS in PAGE. */
176 cell_width (const struct render_page *page, int axis, int x)
178 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
181 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
183 joined_width (const struct render_page *page, int axis, int x0, int x1)
185 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
188 /* Returns the width of the widest cell, excluding headers, along AXIS in
191 max_cell_width (const struct render_page *page, int axis)
193 int n = page->n[axis];
194 int x0 = page->h[axis][0];
195 int x1 = n - page->h[axis][1];
199 for (x = x0; x < x1; x++)
201 int w = cell_width (page, axis, x);
208 /* A cell that doesn't completely fit on the render_page. */
209 struct render_overflow
211 struct hmap_node node; /* In render_page's 'overflows' hmap. */
213 /* Occupied region of page.
215 d[H][0] is the leftmost column.
216 d[H][1] is the rightmost column, plus 1.
217 d[V][0] is the top row.
218 d[V][1] is the bottom row, plus 1.
220 The cell in its original table might occupy a larger region. This
221 member reflects the size of the cell in the current render_page, after
222 trimming off any rows or columns due to page-breaking. */
225 /* The space that has been trimmed off the cell:
227 overflow[H][0]: space trimmed off its left side.
228 overflow[H][1]: space trimmed off its right side.
229 overflow[V][0]: space trimmed off its top.
230 overflow[V][1]: space trimmed off its bottom.
232 During rendering, this information is used to position the rendered
233 portion of the cell within the available space.
235 When a cell is rendered, sometimes it is permitted to spill over into
236 space that is ordinarily reserved for rules. Either way, this space is
237 still included in overflow values.
239 Suppose, for example, that a cell that joins 2 columns has a width of 60
240 pixels and content "abcdef", that the 2 columns that it joins have
241 widths of 20 and 30 pixels, respectively, and that therefore the rule
242 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
243 It might render like this, if each character is 10x10, and showing a few
244 extra table cells for context:
252 If this render_page is broken at the rule that separates "gh" from
253 "ijk", then the page that contains the left side of the "abcdef" cell
254 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
255 and the page that contains the right side of the cell will have
256 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
274 int overflow[TABLE_N_AXES][2];
277 /* Returns a hash value for (X,Y). */
279 hash_overflow (int x, int y)
281 return hash_int (x + (y << 16), 0);
284 /* Searches PAGE's set of render_overflow for one whose top-left cell is
285 (X,Y). Returns it, if there is one, otherwise a null pointer. */
286 static const struct render_overflow *
287 find_overflow (const struct render_page *page, int x, int y)
289 if (!hmap_is_empty (&page->overflows))
291 const struct render_overflow *of;
293 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
294 hash_overflow (x, y), &page->overflows)
295 if (x == of->d[H] && y == of->d[V])
302 /* Row or column dimensions. Used to figure the size of a table in
303 render_page_create() and discarded after that. */
306 /* Width without considering rows (or columns) that span more than one (or
310 /* Width taking spanned rows (or columns) into consideration. */
314 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
315 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
318 distribute_spanned_width (int width,
319 struct render_row *rows, const int *rules, int n)
325 /* Sum up the unspanned widths of the N rows for use as weights. */
327 for (x = 0; x < n; x++)
328 total_unspanned += rows[x].unspanned;
329 for (x = 0; x < n - 1; x++)
330 total_unspanned += rules[x + 1];
331 if (total_unspanned >= width)
334 /* The algorithm used here is based on the following description from HTML 4:
336 For cells that span multiple columns, a simple approach consists of
337 apportioning the min/max widths evenly to each of the constituent
338 columns. A slightly more complex approach is to use the min/max
339 widths of unspanned cells to weight how spanned widths are
340 apportioned. Experiments suggest that a blend of the two approaches
341 gives good results for a wide range of tables.
343 We blend the two approaches half-and-half, except that we cannot use the
344 unspanned weights when 'total_unspanned' is 0 (because that would cause a
347 This implementation uses floating-point types and operators, but all the
348 values involved are integers. For integers smaller than 53 bits, this
349 should not lose any precision, and it should degrade gracefully for larger
352 The calculation we want to do is this:
355 w1 = width * (column's unspanned width) / (total unspanned width)
356 (column's width) = (w0 + w1) / 2
358 We implement it as a precise calculation in integers by multiplying w0 and
359 w1 by the common denominator of all three calculations (d), dividing that
360 out in the column width calculation, and then keeping the remainder for
363 (We actually compute the unspanned width of a column as twice the
364 unspanned width, plus the width of the rule on the left, plus the width of
365 the rule on the right. That way each rule contributes to both the cell on
366 its left and on its right.)
369 d1 = 2.0 * (total_unspanned > 0 ? total_unspanned : 1.0);
371 if (total_unspanned > 0)
374 for (x = 0; x < n; x++)
377 if (total_unspanned > 0)
379 double unspanned = rows[x].unspanned * 2.0;
381 unspanned += rules[x + 1];
383 unspanned += rules[x];
384 w += width * unspanned * d0;
387 rows[x].width = MAX (rows[x].width, w / d);
388 w -= rows[x].width * d;
392 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
395 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
396 const struct render_row *rows, const int *rules)
398 int n = page->n[axis];
404 for (z = 0; z < n; z++)
406 cp[1] = cp[0] + rules[z];
407 cp[2] = cp[1] + rows[z].width;
410 cp[1] = cp[0] + rules[n];
413 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
415 calculate_table_width (int n, const struct render_row *rows, int *rules)
421 for (x = 0; x < n; x++)
422 width += rows[x].width;
423 for (x = 0; x <= n; x++)
429 /* Rendering utility functions. */
431 /* Returns the line style to use for drawing a rule of the given TYPE. */
432 static enum render_line_style
433 rule_to_render_type (unsigned char type)
439 return RENDER_LINE_NONE;
441 return RENDER_LINE_SINGLE;
443 return RENDER_LINE_DOUBLE;
449 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
450 rendered with PARAMS. */
452 measure_rule (const struct render_params *params, const struct table *table,
453 enum table_axis a, int z)
455 enum table_axis b = !a;
460 /* Determine all types of rules that are present, as a bitmap in 'rules'
461 where rule type 't' is present if bit 2**t is set. */
464 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
465 rules |= 1u << table_get_rule (table, a, d[H], d[V]);
467 /* Calculate maximum width of the rules that are present. */
469 if (rules & (1u << TAL_1)
470 || (z > 0 && z < table->n[a] && rules & (1u << TAL_GAP)))
471 width = params->line_widths[a][RENDER_LINE_SINGLE];
472 if (rules & (1u << TAL_2))
473 width = MAX (width, params->line_widths[a][RENDER_LINE_DOUBLE]);
477 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
478 space for all of the members of the new page, but the caller must initialize
479 the 'cp' member itself. */
480 static struct render_page *
481 render_page_allocate (const struct render_params *params,
484 struct render_page *page;
487 page = xmalloc (sizeof *page);
488 page->params = params;
491 page->n[H] = table->n[H];
492 page->n[V] = table->n[V];
493 page->h[H][0] = table->h[H][0];
494 page->h[H][1] = table->h[H][1];
495 page->h[V][0] = table->h[V][0];
496 page->h[V][1] = table->h[V][1];
498 for (i = 0; i < TABLE_N_AXES; i++)
500 page->cp[i] = xmalloc ((2 * page->n[i] + 2) * sizeof *page->cp[i]);
501 page->join_crossing[i] = xzalloc ((page->n[i] + 1) * sizeof *page->join_crossing[i]);
504 hmap_init (&page->overflows);
505 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
510 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
511 cp[H] in the new page from ROWS and RULES. The caller must still initialize
513 static struct render_page *
514 create_page_with_exact_widths (const struct render_params *params,
516 const struct render_row *rows, int *rules)
518 struct render_page *page = render_page_allocate (params, table);
519 accumulate_row_widths (page, H, rows, rules);
523 /* Allocates and returns a new render_page for PARAMS and TABLE.
525 Initializes cp[H] in the new page by setting the width of each row 'i' to
526 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
527 ROW_MAX[i].width. Sets the width of rules to those in RULES.
529 W_MIN is the sum of ROWS_MIN[].width.
531 W_MAX is the sum of ROWS_MAX[].width.
533 The caller must still initialize cp[V]. */
534 static struct render_page *
535 create_page_with_interpolated_widths (const struct render_params *params,
537 const struct render_row *rows_min,
538 const struct render_row *rows_max,
539 int w_min, int w_max, const int *rules)
541 /* This implementation uses floating-point types and operators, but all the
542 values involved are integers. For integers smaller than 53 bits, this
543 should not lose any precision, and it should degrade gracefully for larger
545 const int n = table->n[H];
546 const double avail = params->size[H] - w_min;
547 const double wanted = w_max - w_min;
548 struct render_page *page;
555 page = render_page_allocate (params, table);
559 w = (int) wanted / 2;
560 for (x = 0; x < n; x++)
564 w += avail * (rows_max[x].width - rows_min[x].width);
568 cph[1] = cph[0] + rules[x];
569 cph[2] = cph[1] + rows_min[x].width + extra;
572 cph[1] = cph[0] + rules[n];
574 assert (page->cp[H][n * 2 + 1] == params->size[H]);
580 set_join_crossings (struct render_page *page, enum table_axis axis,
581 const struct table_cell *cell, int *rules)
585 for (z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
586 page->join_crossing[axis][z] = rules[z];
589 /* Creates and returns a new render_page for rendering TABLE on a device
592 The new render_page will be suitable for rendering on a device whose page
593 size is PARAMS->size, but the caller is responsible for actually breaking it
594 up to fit on such a device, using the render_break abstraction. */
596 render_page_create (const struct render_params *params,
597 const struct table *table_)
599 struct render_page *page;
602 struct render_row *columns[2];
603 struct render_row *rows;
605 int *rules[TABLE_N_AXES];
609 enum table_axis axis;
611 table = table_ref (table_);
612 nc = table_nc (table);
613 nr = table_nr (table);
615 /* Figure out rule widths. */
616 for (axis = 0; axis < TABLE_N_AXES; axis++)
618 int n = table->n[axis] + 1;
621 rules[axis] = xnmalloc (n, sizeof *rules);
622 for (z = 0; z < n; z++)
623 rules[axis][z] = measure_rule (params, table, axis, z);
626 /* Calculate minimum and maximum widths of cells that do not
627 span multiple columns. */
628 for (i = 0; i < 2; i++)
629 columns[i] = xzalloc (nc * sizeof *columns[i]);
630 for (y = 0; y < nr; y++)
631 for (x = 0; x < nc; )
633 struct table_cell cell;
635 table_get_cell (table, x, y, &cell);
636 if (y == cell.d[V][0] && table_cell_colspan (&cell) == 1)
641 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
642 for (i = 0; i < 2; i++)
643 if (columns[i][x].unspanned < w[i])
644 columns[i][x].unspanned = w[i];
647 table_cell_free (&cell);
650 /* Distribute widths of spanned columns. */
651 for (i = 0; i < 2; i++)
652 for (x = 0; x < nc; x++)
653 columns[i][x].width = columns[i][x].unspanned;
654 for (y = 0; y < nr; y++)
655 for (x = 0; x < nc; )
657 struct table_cell cell;
659 table_get_cell (table, x, y, &cell);
660 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
664 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
665 for (i = 0; i < 2; i++)
666 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
667 rules[H], table_cell_colspan (&cell));
670 table_cell_free (&cell);
673 /* Decide final column widths. */
674 for (i = 0; i < 2; i++)
675 table_widths[i] = calculate_table_width (table_nc (table),
676 columns[i], rules[H]);
677 if (table_widths[MAX] <= params->size[H])
679 /* Fits even with maximum widths. Use them. */
680 page = create_page_with_exact_widths (params, table, columns[MAX],
683 else if (table_widths[MIN] <= params->size[H])
685 /* Fits with minimum widths, so distribute the leftover space. */
686 page = create_page_with_interpolated_widths (
687 params, table, columns[MIN], columns[MAX],
688 table_widths[MIN], table_widths[MAX], rules[H]);
692 /* Doesn't fit even with minimum widths. Assign minimums for now, and
693 later we can break it horizontally into multiple pages. */
694 page = create_page_with_exact_widths (params, table, columns[MIN],
698 /* Calculate heights of cells that do not span multiple rows. */
699 rows = xzalloc (nr * sizeof *rows);
700 for (y = 0; y < nr; y++)
702 for (x = 0; x < nc; )
704 struct render_row *r = &rows[y];
705 struct table_cell cell;
707 table_get_cell (table, x, y, &cell);
708 if (y == cell.d[V][0])
710 if (table_cell_rowspan (&cell) == 1)
712 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
713 int h = params->measure_cell_height (params->aux, &cell, w);
714 if (h > r->unspanned)
715 r->unspanned = r->width = h;
718 set_join_crossings (page, V, &cell, rules[V]);
720 if (table_cell_colspan (&cell) > 1)
721 set_join_crossings (page, H, &cell, rules[H]);
724 table_cell_free (&cell);
727 for (i = 0; i < 2; i++)
730 /* Distribute heights of spanned rows. */
731 for (y = 0; y < nr; y++)
732 for (x = 0; x < nc; )
734 struct table_cell cell;
736 table_get_cell (table, x, y, &cell);
737 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
739 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
740 int h = params->measure_cell_height (params->aux, &cell, w);
741 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
742 table_cell_rowspan (&cell));
745 table_cell_free (&cell);
748 /* Decide final row heights. */
749 accumulate_row_widths (page, V, rows, rules[V]);
752 /* Measure headers. If they are "too big", get rid of them. */
753 for (axis = 0; axis < TABLE_N_AXES; axis++)
755 int hw = headers_width (page, axis);
756 if (hw * 2 >= page->params->size[axis]
757 || hw + max_cell_width (page, axis) > page->params->size[axis])
759 page->table = table_unshare (page->table);
760 page->table->h[axis][0] = page->table->h[axis][1] = 0;
761 page->h[axis][0] = page->h[axis][1] = 0;
771 /* Increases PAGE's reference count. */
773 render_page_ref (const struct render_page *page_)
775 struct render_page *page = CONST_CAST (struct render_page *, page_);
780 /* Decreases PAGE's reference count and destroys PAGE if this causes the
781 reference count to fall to zero. */
783 render_page_unref (struct render_page *page)
785 if (page != NULL && --page->ref_cnt == 0)
788 struct render_overflow *overflow, *next;
790 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
793 hmap_destroy (&page->overflows);
795 table_unref (page->table);
797 for (i = 0; i < TABLE_N_AXES; ++i)
799 free (page->join_crossing[i]);
807 /* Returns the size of PAGE along AXIS. (This might be larger than the page
808 size specified in the parameters passed to render_page_create(). Use a
809 render_break to break up a render_page into page-sized chunks.) */
811 render_page_get_size (const struct render_page *page, enum table_axis axis)
813 return page->cp[axis][page->n[axis] * 2 + 1];
816 /* Drawing render_pages. */
818 static inline enum render_line_style
819 get_rule (const struct render_page *page, enum table_axis axis,
820 const int d[TABLE_N_AXES])
822 return rule_to_render_type (table_get_rule (page->table,
823 axis, d[H] / 2, d[V] / 2));
833 render_rule (const struct render_page *page, const int d[TABLE_N_AXES])
835 enum render_line_style styles[TABLE_N_AXES][2];
838 for (a = 0; a < TABLE_N_AXES; a++)
840 enum table_axis b = !a;
842 styles[a][0] = styles[a][1] = RENDER_LINE_NONE;
845 || (page->is_edge_cutoff[a][0] && d[a] == 0)
846 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
857 styles[a][0] = get_rule (page, a, e);
860 if (d[b] / 2 < page->table->n[b])
861 styles[a][1] = get_rule (page, a, d);
864 styles[a][0] = styles[a][1] = get_rule (page, a, d);
867 if (styles[H][0] != RENDER_LINE_NONE || styles[H][1] != RENDER_LINE_NONE
868 || styles[V][0] != RENDER_LINE_NONE || styles[V][1] != RENDER_LINE_NONE)
870 int bb[TABLE_N_AXES][2];
872 bb[H][0] = page->cp[H][d[H]];
873 bb[H][1] = page->cp[H][d[H] + 1];
874 bb[V][0] = page->cp[V][d[V]];
875 bb[V][1] = page->cp[V][d[V] + 1];
876 page->params->draw_line (page->params->aux, bb, styles);
881 render_cell (const struct render_page *page, const struct table_cell *cell)
883 const struct render_overflow *of;
884 int bb[TABLE_N_AXES][2];
885 int clip[TABLE_N_AXES][2];
887 bb[H][0] = clip[H][0] = page->cp[H][cell->d[H][0] * 2 + 1];
888 bb[H][1] = clip[H][1] = page->cp[H][cell->d[H][1] * 2];
889 bb[V][0] = clip[V][0] = page->cp[V][cell->d[V][0] * 2 + 1];
890 bb[V][1] = clip[V][1] = page->cp[V][cell->d[V][1] * 2];
892 of = find_overflow (page, cell->d[H][0], cell->d[V][0]);
895 enum table_axis axis;
897 for (axis = 0; axis < TABLE_N_AXES; axis++)
899 if (of->overflow[axis][0])
901 bb[axis][0] -= of->overflow[axis][0];
902 if (cell->d[axis][0] == 0)
903 clip[axis][0] = page->cp[axis][cell->d[axis][0] * 2];
905 if (of->overflow[axis][1])
907 bb[axis][1] += of->overflow[axis][1];
908 if (cell->d[axis][1] == page->n[axis])
909 clip[axis][1] = page->cp[axis][cell->d[axis][1] * 2 + 1];
914 page->params->draw_cell (page->params->aux, cell, bb, clip);
917 /* Draws the cells of PAGE indicated in BB. */
919 render_page_draw_cells (const struct render_page *page,
920 int bb[TABLE_N_AXES][2])
924 for (y = bb[V][0]; y < bb[V][1]; y++)
925 for (x = bb[H][0]; x < bb[H][1]; )
926 if (is_rule (x) || is_rule (y))
931 render_rule (page, d);
936 struct table_cell cell;
938 table_get_cell (page->table, x / 2, y / 2, &cell);
939 if (y == bb[V][0] || y / 2 == cell.d[V][0])
940 render_cell (page, &cell);
941 x = rule_ofs (cell.d[H][1]);
942 table_cell_free (&cell);
946 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
947 render_params provided to render_page_create(). */
949 render_page_draw (const struct render_page *page)
951 int bb[TABLE_N_AXES][2];
954 bb[H][1] = page->n[H] * 2 + 1;
956 bb[V][1] = page->n[V] * 2 + 1;
958 render_page_draw_cells (page, bb);
961 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
963 get_clip_min_extent (int x0, const int cp[], int n)
972 int middle = low + (high - low) / 2;
974 if (cp[middle] <= x0)
986 /* Returns the least value i, 0 <= i < n, such that cp[i + 1] >= x1. */
988 get_clip_max_extent (int x1, const int cp[], int n)
997 int middle = low + (high - low) / 2;
999 if (cp[middle] >= x1)
1000 best = high = middle;
1008 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1009 'draw_line' and 'draw_cell' functions from the render_params provided to
1010 render_page_create(). */
1012 render_page_draw_region (const struct render_page *page,
1013 int x, int y, int w, int h)
1015 int bb[TABLE_N_AXES][2];
1017 bb[H][0] = get_clip_min_extent (x, page->cp[H], page->n[H] * 2 + 1);
1018 bb[H][1] = get_clip_max_extent (x + w, page->cp[H], page->n[H] * 2 + 1);
1019 bb[V][0] = get_clip_min_extent (y, page->cp[V], page->n[V] * 2 + 1);
1020 bb[V][1] = get_clip_max_extent (y + h, page->cp[V], page->n[V] * 2 + 1);
1022 render_page_draw_cells (page, bb);
1025 /* Breaking up tables to fit on a page. */
1027 static int needed_size (const struct render_break *, int cell);
1028 static bool cell_is_breakable (const struct render_break *, int cell);
1029 static struct render_page *render_page_select (const struct render_page *,
1034 /* Initializes render_break B for breaking PAGE along AXIS.
1036 Ownership of PAGE is transferred to B. The caller must use
1037 render_page_ref() if it needs to keep a copy of PAGE. */
1039 render_break_init (struct render_break *b, struct render_page *page,
1040 enum table_axis axis)
1044 b->cell = page->h[axis][0];
1046 b->hw = headers_width (page, axis);
1049 /* Initializes B as a render_break structure for which
1050 render_break_has_next() always returns false. */
1052 render_break_init_empty (struct render_break *b)
1055 b->axis = TABLE_HORZ;
1061 /* Frees B and unrefs the render_page that it owns. */
1063 render_break_destroy (struct render_break *b)
1067 render_page_unref (b->page);
1072 /* Returns true if B still has cells that are yet to be returned,
1073 false if all of B's page has been processed. */
1075 render_break_has_next (const struct render_break *b)
1077 const struct render_page *page = b->page;
1078 enum table_axis axis = b->axis;
1080 return page != NULL && b->cell < page->n[axis] - page->h[axis][1];
1083 /* Returns the minimum SIZE argument that, if passed to render_break_next(),
1084 will avoid a null return value (if cells are still left). */
1086 render_break_next_size (const struct render_break *b)
1088 const struct render_page *page = b->page;
1089 enum table_axis axis = b->axis;
1091 return (!render_break_has_next (b) ? 0
1092 : !cell_is_breakable (b, b->cell) ? needed_size (b, b->cell + 1)
1093 : b->hw + page->params->font_size[axis]);
1096 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1097 Returns a null pointer if B has already been completely broken up, or if
1098 SIZE is too small to reasonably render any cells. The latter will never
1099 happen if SIZE is at least as large as the page size passed to
1100 render_page_create() along B's axis. */
1101 struct render_page *
1102 render_break_next (struct render_break *b, int size)
1104 const struct render_page *page = b->page;
1105 enum table_axis axis = b->axis;
1106 struct render_page *subpage;
1109 if (!render_break_has_next (b))
1113 for (cell = b->cell; cell < page->n[axis] - page->h[axis][1]; cell++)
1114 if (needed_size (b, cell + 1) > size)
1116 if (!cell_is_breakable (b, cell))
1118 if (cell == b->cell)
1122 pixel = (cell == b->cell
1123 ? b->pixel + size - b->hw
1124 : size - needed_size (b, cell));
1128 subpage = render_page_select (page, axis, b->cell, b->pixel,
1129 pixel ? cell + 1 : cell,
1130 pixel ? cell_width (page, axis, cell) - pixel
1137 /* Returns the width that would be required along B's axis to render a page
1138 from B's current position up to but not including CELL. */
1140 needed_size (const struct render_break *b, int cell)
1142 const struct render_page *page = b->page;
1143 enum table_axis axis = b->axis;
1146 size = joined_width (page, axis, b->cell, cell) + b->hw - b->pixel;
1147 if (page->h[axis][0] && page->h[axis][1])
1148 size += page->join_crossing[axis][b->cell];
1153 /* Returns true if CELL along B's axis may be broken across a page boundary.
1155 This is just a heuristic. Breaking cells across page boundaries can save
1156 space, but it looks ugly. */
1158 cell_is_breakable (const struct render_break *b, int cell)
1160 const struct render_page *page = b->page;
1161 enum table_axis axis = b->axis;
1163 return cell_width (page, axis, cell) > page->params->size[axis] / 2;
1166 /* render_page_select() and helpers. */
1168 struct render_page_selection
1170 const struct render_page *page; /* Page whose slice we are selecting. */
1171 struct render_page *subpage; /* New page under construction. */
1172 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1173 enum table_axis b; /* The opposite of 'a'. */
1174 int z0; /* First cell along 'a' being selected. */
1175 int z1; /* Last cell being selected, plus 1. */
1176 int p0; /* Number of pixels to trim off left side of z0. */
1177 int p1; /* Number of pixels to trim off right side of z1-1. */
1180 static void cell_to_subpage (struct render_page_selection *,
1181 const struct table_cell *,
1182 int subcell[TABLE_N_AXES]);
1183 static const struct render_overflow *find_overflow_for_cell (
1184 struct render_page_selection *, const struct table_cell *);
1185 static struct render_overflow *insert_overflow (struct render_page_selection *,
1186 const struct table_cell *);
1188 /* Creates and returns a new render_page whose contents are a subregion of
1189 PAGE's contents. The new render_page includes cells Z0 through Z1 along
1190 AXIS, plus any headers on AXIS.
1192 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1193 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1194 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1195 render cells that are too large to fit on a single page.)
1197 The whole of axis !AXIS is included. (The caller may follow up with another
1198 call to render_page_select() to select on !AXIS to select on that axis as
1201 The caller retains ownership of PAGE, which is not modified. */
1202 static struct render_page *
1203 render_page_select (const struct render_page *page, enum table_axis axis,
1204 int z0, int p0, int z1, int p1)
1206 struct render_page_selection s;
1207 enum table_axis a = axis;
1208 enum table_axis b = !a;
1209 struct render_page *subpage;
1210 struct render_overflow *ro;
1216 /* Optimize case where all of PAGE is selected by just incrementing the
1218 if (z0 == page->h[a][0] && p0 == 0
1219 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1221 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1226 /* Allocate subpage. */
1227 subpage = render_page_allocate (page->params,
1228 table_select_slice (
1229 table_ref (page->table),
1232 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1233 off that side of the page and there are no headers. */
1234 subpage->is_edge_cutoff[a][0] =
1235 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1236 subpage->is_edge_cutoff[a][1] =
1237 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1238 && page->is_edge_cutoff[a][1]));
1239 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1240 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1242 /* Select join crossings from PAGE into subpage. */
1243 jc = subpage->join_crossing[a];
1244 for (z = 0; z < page->h[a][0]; z++)
1245 *jc++ = page->join_crossing[a][z];
1246 for (z = z0; z <= z1; z++)
1247 *jc++ = page->join_crossing[a][z];
1248 for (z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1249 *jc++ = page->join_crossing[a][z];
1250 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1252 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1253 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1255 /* Select widths from PAGE into subpage. */
1257 dcp = subpage->cp[a];
1259 for (z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1260 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1261 for (z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1263 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1264 if (z == cell_ofs (z0))
1267 if (page->h[a][0] && page->h[a][1])
1268 dcp[1] += page->join_crossing[a][z / 2];
1270 if (z == cell_ofs (z1 - 1))
1273 for (z = rule_ofs_r (page, a, subpage->h[a][1]);
1274 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1275 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1276 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1278 for (z = 0; z < page->n[b] * 2 + 2; z++)
1279 subpage->cp[b][z] = page->cp[b][z];
1281 /* Add new overflows. */
1289 s.subpage = subpage;
1291 for (z = 0; z < page->n[b]; z++)
1293 struct table_cell cell;
1294 int d[TABLE_N_AXES];
1298 table_get_cell (page->table, d[H], d[V], &cell);
1299 if ((z == cell.d[b][0] && (p0 || cell.d[a][0] < z0))
1300 || (z == cell.d[b][1] - 1 && p1))
1302 ro = insert_overflow (&s, &cell);
1303 ro->overflow[a][0] += p0 + axis_width (page, a,
1304 cell_ofs (cell.d[a][0]),
1307 ro->overflow[a][1] += p1;
1308 if (page->h[a][0] && page->h[a][1])
1309 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0] + 1];
1310 if (cell.d[a][1] > z1)
1311 ro->overflow[a][1] += axis_width (page, a, cell_ofs (z1),
1312 cell_ofs (cell.d[a][1]));
1314 table_cell_free (&cell);
1317 for (z = 0; z < page->n[b]; z++)
1319 struct table_cell cell;
1320 int d[TABLE_N_AXES];
1322 /* XXX need to handle p1 below */
1325 table_get_cell (page->table, d[H], d[V], &cell);
1326 if (z == cell.d[b][0] && cell.d[a][1] > z1
1327 && find_overflow_for_cell (&s, &cell) == NULL)
1329 ro = insert_overflow (&s, &cell);
1330 ro->overflow[a][1] += axis_width (page, a, cell_ofs (z1),
1331 cell_ofs (cell.d[a][1]));
1333 table_cell_free (&cell);
1336 /* Copy overflows from PAGE into subpage. */
1337 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1339 struct table_cell cell;
1341 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1342 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1343 && find_overflow_for_cell (&s, &cell) == NULL)
1344 insert_overflow (&s, &cell);
1345 table_cell_free (&cell);
1351 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1352 coordinates of the top-left cell as it will appear in S->subpage.
1354 CELL must actually intersect the region of S->page that is being selected
1355 by render_page_select() or the results will not make any sense. */
1357 cell_to_subpage (struct render_page_selection *s,
1358 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1360 enum table_axis a = s->a;
1361 enum table_axis b = s->b;
1362 int ha0 = s->subpage->h[a][0];
1364 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1365 subcell[b] = cell->d[b][0];
1368 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1369 that cell in S->subpage, if there is one, and a null pointer otherwise.
1371 CELL must actually intersect the region of S->page that is being selected
1372 by render_page_select() or the results will not make any sense. */
1373 static const struct render_overflow *
1374 find_overflow_for_cell (struct render_page_selection *s,
1375 const struct table_cell *cell)
1379 cell_to_subpage (s, cell, subcell);
1380 return find_overflow (s->subpage, subcell[H], subcell[V]);
1383 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1384 cell in S->subpage (which must not already exist). Initializes the new
1385 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1388 CELL must actually intersect the region of S->page that is being selected
1389 by render_page_select() or the results will not make any sense. */
1390 static struct render_overflow *
1391 insert_overflow (struct render_page_selection *s,
1392 const struct table_cell *cell)
1394 const struct render_overflow *old;
1395 struct render_overflow *of;
1397 of = xzalloc (sizeof *of);
1398 cell_to_subpage (s, cell, of->d);
1399 hmap_insert (&s->subpage->overflows, &of->node,
1400 hash_overflow (of->d[H], of->d[V]));
1402 old = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1404 memcpy (of->overflow, old->overflow, sizeof of->overflow);