1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "output/render.h"
29 #include "output/table-item.h"
30 #include "output/table.h"
32 #include "gl/minmax.h"
33 #include "gl/xalloc.h"
35 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
39 /* A layout for rendering a specific table on a specific device.
41 May represent the layout of an entire table presented to
42 render_page_create(), or a rectangular subregion of a table broken out using
43 render_break_next() to allow a table to be broken across multiple pages.
45 A page's size is not limited to the size passed in as part of render_params.
46 render_pager breaks a render_page into smaller render_pages that will fit in
47 the available space. */
50 const struct render_params *params; /* Parameters of the target device. */
51 struct table *table; /* Table rendered. */
54 /* Local copies of table->n and table->h, for convenience. */
56 int h[TABLE_N_AXES][2];
58 /* cp[H] represents x positions within the table.
60 cp[H][1] = the width of the leftmost vertical rule.
61 cp[H][2] = cp[H][1] + the width of the leftmost column.
62 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
64 cp[H][2 * nc] = x position of the rightmost vertical rule.
65 cp[H][2 * nc + 1] = total table width including all rules.
67 Similarly, cp[V] represents y positions within the table.
69 cp[V][1] = the height of the topmost horizontal rule.
70 cp[V][2] = cp[V][1] + the height of the topmost row.
71 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
73 cp[V][2 * nr] = y position of the bottommost horizontal rule.
74 cp[V][2 * nr + 1] = total table height including all rules.
76 Rules and columns can have width or height 0, in which case consecutive
77 values in this array are equal. */
78 int *cp[TABLE_N_AXES];
80 /* render_break_next() can break a table such that some cells are not fully
81 contained within a render_page. This will happen if a cell is too wide
82 or two tall to fit on a single page, or if a cell spans multiple rows or
83 columns and the page only includes some of those rows or columns.
85 This hash table contains "struct render_overflow"s that represents each
86 such cell that doesn't completely fit on this page.
88 Each overflow cell borders at least one header edge of the table and may
89 border more. (A single table cell that is so large that it fills the
90 entire page can overflow on all four sides!) */
91 struct hmap overflows;
93 /* If a single column (or row) is too wide (or tall) to fit on a page
94 reasonably, then render_break_next() will split a single row or column
95 across multiple render_pages. This member indicates when this has
98 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
99 of the leftmost column in this page, and false otherwise.
101 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
102 of the rightmost column in this page, and false otherwise.
104 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
105 and bottom of the table.
107 The effect of is_edge_cutoff is to prevent rules along the edge in
108 question from being rendered.
110 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
111 contain a node for each cell along that edge. */
112 bool is_edge_cutoff[TABLE_N_AXES][2];
114 /* If part of a joined cell would be cut off by breaking a table along
115 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
116 join_crossing[axis][z] is the thickness of the rule that would be cut
119 This is used to know to allocate extra space for breaking at such a
120 position, so that part of the cell's content is not lost.
122 This affects breaking a table only when headers are present. When
123 headers are not present, the rule's thickness is used for cell content,
124 so no part of the cell's content is lost (and in fact it is duplicated
125 across both pages). */
126 int *join_crossing[TABLE_N_AXES];
129 static struct render_page *render_page_create (const struct render_params *,
130 const struct table *);
132 struct render_page *render_page_ref (const struct render_page *page_);
133 static void render_page_unref (struct render_page *);
135 /* Returns the offset in struct render_page's cp[axis] array of the rule with
136 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
137 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
138 next rule to the right (or below); and so on. */
140 rule_ofs (int rule_idx)
145 /* Returns the offset in struct render_page's cp[axis] array of the rule with
146 index RULE_IDX_R, which counts from the right side (or bottom) of the page
147 left (or up), according to whether AXIS is H or V, respectively. That is,
148 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
149 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
150 (or above); and so on. */
152 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
154 return (page->n[axis] - rule_idx_r) * 2;
157 /* Returns the offset in struct render_page's cp[axis] array of the cell with
158 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
159 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
160 next cell to the right (or below); and so on. */
162 cell_ofs (int cell_idx)
164 return cell_idx * 2 + 1;
167 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
169 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
171 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
174 /* Returns the width of the headers in PAGE along AXIS. */
176 headers_width (const struct render_page *page, int axis)
178 int h0 = page->h[axis][0];
179 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
180 int n = page->n[axis];
181 int h1 = page->h[axis][1];
182 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
186 /* Returns the width of cell X along AXIS in PAGE. */
188 cell_width (const struct render_page *page, int axis, int x)
190 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
193 /* Returns the width of rule X along AXIS in PAGE. */
195 rule_width (const struct render_page *page, int axis, int x)
197 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
200 /* Returns the width of rule X along AXIS in PAGE. */
202 rule_width_r (const struct render_page *page, int axis, int x)
204 int ofs = rule_ofs_r (page, axis, x);
205 return axis_width (page, axis, ofs, ofs + 1);
208 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
210 joined_width (const struct render_page *page, int axis, int x0, int x1)
212 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
215 /* Returns the width of the widest cell, excluding headers, along AXIS in
218 max_cell_width (const struct render_page *page, int axis)
220 int n = page->n[axis];
221 int x0 = page->h[axis][0];
222 int x1 = n - page->h[axis][1];
226 for (x = x0; x < x1; x++)
228 int w = cell_width (page, axis, x);
235 /* A cell that doesn't completely fit on the render_page. */
236 struct render_overflow
238 struct hmap_node node; /* In render_page's 'overflows' hmap. */
240 /* Occupied region of page.
242 d[H][0] is the leftmost column.
243 d[H][1] is the rightmost column, plus 1.
244 d[V][0] is the top row.
245 d[V][1] is the bottom row, plus 1.
247 The cell in its original table might occupy a larger region. This
248 member reflects the size of the cell in the current render_page, after
249 trimming off any rows or columns due to page-breaking. */
252 /* The space that has been trimmed off the cell:
254 overflow[H][0]: space trimmed off its left side.
255 overflow[H][1]: space trimmed off its right side.
256 overflow[V][0]: space trimmed off its top.
257 overflow[V][1]: space trimmed off its bottom.
259 During rendering, this information is used to position the rendered
260 portion of the cell within the available space.
262 When a cell is rendered, sometimes it is permitted to spill over into
263 space that is ordinarily reserved for rules. Either way, this space is
264 still included in overflow values.
266 Suppose, for example, that a cell that joins 2 columns has a width of 60
267 pixels and content "abcdef", that the 2 columns that it joins have
268 widths of 20 and 30 pixels, respectively, and that therefore the rule
269 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
270 It might render like this, if each character is 10x10, and showing a few
271 extra table cells for context:
279 If this render_page is broken at the rule that separates "gh" from
280 "ijk", then the page that contains the left side of the "abcdef" cell
281 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
282 and the page that contains the right side of the cell will have
283 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
301 int overflow[TABLE_N_AXES][2];
304 /* Returns a hash value for (X,Y). */
306 hash_overflow (int x, int y)
308 return hash_int (x + (y << 16), 0);
311 /* Searches PAGE's set of render_overflow for one whose top-left cell is
312 (X,Y). Returns it, if there is one, otherwise a null pointer. */
313 static const struct render_overflow *
314 find_overflow (const struct render_page *page, int x, int y)
316 if (!hmap_is_empty (&page->overflows))
318 const struct render_overflow *of;
320 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
321 hash_overflow (x, y), &page->overflows)
322 if (x == of->d[H] && y == of->d[V])
329 /* Row or column dimensions. Used to figure the size of a table in
330 render_page_create() and discarded after that. */
333 /* Width without considering rows (or columns) that span more than one (or
337 /* Width taking spanned rows (or columns) into consideration. */
341 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
342 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
345 distribute_spanned_width (int width,
346 struct render_row *rows, const int *rules, int n)
352 /* Sum up the unspanned widths of the N rows for use as weights. */
354 for (x = 0; x < n; x++)
355 total_unspanned += rows[x].unspanned;
356 for (x = 0; x < n - 1; x++)
357 total_unspanned += rules[x + 1];
358 if (total_unspanned >= width)
361 /* The algorithm used here is based on the following description from HTML 4:
363 For cells that span multiple columns, a simple approach consists of
364 apportioning the min/max widths evenly to each of the constituent
365 columns. A slightly more complex approach is to use the min/max
366 widths of unspanned cells to weight how spanned widths are
367 apportioned. Experiments suggest that a blend of the two approaches
368 gives good results for a wide range of tables.
370 We blend the two approaches half-and-half, except that we cannot use the
371 unspanned weights when 'total_unspanned' is 0 (because that would cause a
374 This implementation uses floating-point types and operators, but all the
375 values involved are integers. For integers smaller than 53 bits, this
376 should not lose any precision, and it should degrade gracefully for larger
379 The calculation we want to do is this:
382 w1 = width * (column's unspanned width) / (total unspanned width)
383 (column's width) = (w0 + w1) / 2
385 We implement it as a precise calculation in integers by multiplying w0 and
386 w1 by the common denominator of all three calculations (d), dividing that
387 out in the column width calculation, and then keeping the remainder for
390 (We actually compute the unspanned width of a column as twice the
391 unspanned width, plus the width of the rule on the left, plus the width of
392 the rule on the right. That way each rule contributes to both the cell on
393 its left and on its right.)
396 d1 = 2.0 * (total_unspanned > 0 ? total_unspanned : 1.0);
398 if (total_unspanned > 0)
401 for (x = 0; x < n; x++)
404 if (total_unspanned > 0)
406 double unspanned = rows[x].unspanned * 2.0;
408 unspanned += rules[x + 1];
410 unspanned += rules[x];
411 w += width * unspanned * d0;
414 rows[x].width = MAX (rows[x].width, w / d);
415 w -= rows[x].width * d;
419 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
422 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
423 const struct render_row *rows, const int *rules)
425 int n = page->n[axis];
431 for (z = 0; z < n; z++)
433 cp[1] = cp[0] + rules[z];
434 cp[2] = cp[1] + rows[z].width;
437 cp[1] = cp[0] + rules[n];
440 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
442 calculate_table_width (int n, const struct render_row *rows, int *rules)
448 for (x = 0; x < n; x++)
449 width += rows[x].width;
450 for (x = 0; x <= n; x++)
456 /* Rendering utility functions. */
458 /* Returns the line style to use for drawing a rule of the given TYPE. */
459 static enum render_line_style
460 rule_to_render_type (unsigned char type)
466 return RENDER_LINE_NONE;
468 return RENDER_LINE_SINGLE;
470 return RENDER_LINE_DOUBLE;
476 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
477 rendered with PARAMS. */
479 measure_rule (const struct render_params *params, const struct table *table,
480 enum table_axis a, int z)
482 enum table_axis b = !a;
487 /* Determine all types of rules that are present, as a bitmap in 'rules'
488 where rule type 't' is present if bit 2**t is set. */
491 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
492 rules |= 1u << table_get_rule (table, a, d[H], d[V]);
494 /* Calculate maximum width of the rules that are present. */
496 if (rules & (1u << TAL_1)
497 || (z > 0 && z < table->n[a] && rules & (1u << TAL_GAP)))
498 width = params->line_widths[a][RENDER_LINE_SINGLE];
499 if (rules & (1u << TAL_2))
500 width = MAX (width, params->line_widths[a][RENDER_LINE_DOUBLE]);
504 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
505 space for all of the members of the new page, but the caller must initialize
506 the 'cp' member itself. */
507 static struct render_page *
508 render_page_allocate (const struct render_params *params,
511 struct render_page *page;
514 page = xmalloc (sizeof *page);
515 page->params = params;
518 page->n[H] = table->n[H];
519 page->n[V] = table->n[V];
520 page->h[H][0] = table->h[H][0];
521 page->h[H][1] = table->h[H][1];
522 page->h[V][0] = table->h[V][0];
523 page->h[V][1] = table->h[V][1];
525 for (i = 0; i < TABLE_N_AXES; i++)
527 page->cp[i] = xmalloc ((2 * page->n[i] + 2) * sizeof *page->cp[i]);
528 page->join_crossing[i] = xzalloc ((page->n[i] + 1) * sizeof *page->join_crossing[i]);
531 hmap_init (&page->overflows);
532 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
537 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
538 cp[H] in the new page from ROWS and RULES. The caller must still initialize
540 static struct render_page *
541 create_page_with_exact_widths (const struct render_params *params,
543 const struct render_row *rows, int *rules)
545 struct render_page *page = render_page_allocate (params, table);
546 accumulate_row_widths (page, H, rows, rules);
550 /* Allocates and returns a new render_page for PARAMS and TABLE.
552 Initializes cp[H] in the new page by setting the width of each row 'i' to
553 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
554 ROW_MAX[i].width. Sets the width of rules to those in RULES.
556 W_MIN is the sum of ROWS_MIN[].width.
558 W_MAX is the sum of ROWS_MAX[].width.
560 The caller must still initialize cp[V]. */
561 static struct render_page *
562 create_page_with_interpolated_widths (const struct render_params *params,
564 const struct render_row *rows_min,
565 const struct render_row *rows_max,
566 int w_min, int w_max, const int *rules)
568 /* This implementation uses floating-point types and operators, but all the
569 values involved are integers. For integers smaller than 53 bits, this
570 should not lose any precision, and it should degrade gracefully for larger
572 const int n = table->n[H];
573 const double avail = params->size[H] - w_min;
574 const double wanted = w_max - w_min;
575 struct render_page *page;
582 page = render_page_allocate (params, table);
586 w = (int) wanted / 2;
587 for (x = 0; x < n; x++)
591 w += avail * (rows_max[x].width - rows_min[x].width);
595 cph[1] = cph[0] + rules[x];
596 cph[2] = cph[1] + rows_min[x].width + extra;
599 cph[1] = cph[0] + rules[n];
601 assert (page->cp[H][n * 2 + 1] == params->size[H]);
607 set_join_crossings (struct render_page *page, enum table_axis axis,
608 const struct table_cell *cell, int *rules)
612 for (z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
613 page->join_crossing[axis][z] = rules[z];
616 /* Creates and returns a new render_page for rendering TABLE on a device
619 The new render_page will be suitable for rendering on a device whose page
620 size is PARAMS->size, but the caller is responsible for actually breaking it
621 up to fit on such a device, using the render_break abstraction. */
622 static struct render_page *
623 render_page_create (const struct render_params *params,
624 const struct table *table_)
626 struct render_page *page;
629 struct render_row *columns[2];
630 struct render_row *rows;
632 int *rules[TABLE_N_AXES];
636 enum table_axis axis;
638 table = table_ref (table_);
639 nc = table_nc (table);
640 nr = table_nr (table);
642 /* Figure out rule widths. */
643 for (axis = 0; axis < TABLE_N_AXES; axis++)
645 int n = table->n[axis] + 1;
648 rules[axis] = xnmalloc (n, sizeof *rules);
649 for (z = 0; z < n; z++)
650 rules[axis][z] = measure_rule (params, table, axis, z);
653 /* Calculate minimum and maximum widths of cells that do not
654 span multiple columns. */
655 for (i = 0; i < 2; i++)
656 columns[i] = xzalloc (nc * sizeof *columns[i]);
657 for (y = 0; y < nr; y++)
658 for (x = 0; x < nc; )
660 struct table_cell cell;
662 table_get_cell (table, x, y, &cell);
663 if (y == cell.d[V][0] && table_cell_colspan (&cell) == 1)
668 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
669 for (i = 0; i < 2; i++)
670 if (columns[i][x].unspanned < w[i])
671 columns[i][x].unspanned = w[i];
674 table_cell_free (&cell);
677 /* Distribute widths of spanned columns. */
678 for (i = 0; i < 2; i++)
679 for (x = 0; x < nc; x++)
680 columns[i][x].width = columns[i][x].unspanned;
681 for (y = 0; y < nr; y++)
682 for (x = 0; x < nc; )
684 struct table_cell cell;
686 table_get_cell (table, x, y, &cell);
687 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
691 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
692 for (i = 0; i < 2; i++)
693 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
694 rules[H], table_cell_colspan (&cell));
697 table_cell_free (&cell);
700 /* Decide final column widths. */
701 for (i = 0; i < 2; i++)
702 table_widths[i] = calculate_table_width (table_nc (table),
703 columns[i], rules[H]);
704 if (table_widths[MAX] <= params->size[H])
706 /* Fits even with maximum widths. Use them. */
707 page = create_page_with_exact_widths (params, table, columns[MAX],
710 else if (table_widths[MIN] <= params->size[H])
712 /* Fits with minimum widths, so distribute the leftover space. */
713 page = create_page_with_interpolated_widths (
714 params, table, columns[MIN], columns[MAX],
715 table_widths[MIN], table_widths[MAX], rules[H]);
719 /* Doesn't fit even with minimum widths. Assign minimums for now, and
720 later we can break it horizontally into multiple pages. */
721 page = create_page_with_exact_widths (params, table, columns[MIN],
725 /* Calculate heights of cells that do not span multiple rows. */
726 rows = xzalloc (nr * sizeof *rows);
727 for (y = 0; y < nr; y++)
729 for (x = 0; x < nc; )
731 struct render_row *r = &rows[y];
732 struct table_cell cell;
734 table_get_cell (table, x, y, &cell);
735 if (y == cell.d[V][0])
737 if (table_cell_rowspan (&cell) == 1)
739 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
740 int h = params->measure_cell_height (params->aux, &cell, w);
741 if (h > r->unspanned)
742 r->unspanned = r->width = h;
745 set_join_crossings (page, V, &cell, rules[V]);
747 if (table_cell_colspan (&cell) > 1)
748 set_join_crossings (page, H, &cell, rules[H]);
751 table_cell_free (&cell);
754 for (i = 0; i < 2; i++)
757 /* Distribute heights of spanned rows. */
758 for (y = 0; y < nr; y++)
759 for (x = 0; x < nc; )
761 struct table_cell cell;
763 table_get_cell (table, x, y, &cell);
764 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
766 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
767 int h = params->measure_cell_height (params->aux, &cell, w);
768 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
769 table_cell_rowspan (&cell));
772 table_cell_free (&cell);
775 /* Decide final row heights. */
776 accumulate_row_widths (page, V, rows, rules[V]);
779 /* Measure headers. If they are "too big", get rid of them. */
780 for (axis = 0; axis < TABLE_N_AXES; axis++)
782 int hw = headers_width (page, axis);
783 if (hw * 2 >= page->params->size[axis]
784 || hw + max_cell_width (page, axis) > page->params->size[axis])
786 page->table = table_unshare (page->table);
787 page->table->h[axis][0] = page->table->h[axis][1] = 0;
788 page->h[axis][0] = page->h[axis][1] = 0;
798 /* Increases PAGE's reference count. */
800 render_page_ref (const struct render_page *page_)
802 struct render_page *page = CONST_CAST (struct render_page *, page_);
807 /* Decreases PAGE's reference count and destroys PAGE if this causes the
808 reference count to fall to zero. */
810 render_page_unref (struct render_page *page)
812 if (page != NULL && --page->ref_cnt == 0)
815 struct render_overflow *overflow, *next;
817 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
820 hmap_destroy (&page->overflows);
822 table_unref (page->table);
824 for (i = 0; i < TABLE_N_AXES; ++i)
826 free (page->join_crossing[i]);
834 /* Returns the size of PAGE along AXIS. (This might be larger than the page
835 size specified in the parameters passed to render_page_create(). Use a
836 render_break to break up a render_page into page-sized chunks.) */
838 render_page_get_size (const struct render_page *page, enum table_axis axis)
840 return page->cp[axis][page->n[axis] * 2 + 1];
844 render_page_get_best_breakpoint (const struct render_page *page, int height)
848 /* If there's no room for at least the top row and the rules above and below
849 it, don't include any of the table. */
850 if (page->cp[V][3] > height)
853 /* Otherwise include as many rows and rules as we can. */
854 for (y = 5; y <= 2 * page->n[V] + 1; y += 2)
855 if (page->cp[V][y] > height)
856 return page->cp[V][y - 2];
860 /* Drawing render_pages. */
862 static inline enum render_line_style
863 get_rule (const struct render_page *page, enum table_axis axis,
864 const int d[TABLE_N_AXES])
866 return rule_to_render_type (table_get_rule (page->table,
867 axis, d[H] / 2, d[V] / 2));
877 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
878 const int d[TABLE_N_AXES])
880 enum render_line_style styles[TABLE_N_AXES][2];
883 for (a = 0; a < TABLE_N_AXES; a++)
885 enum table_axis b = !a;
887 styles[a][0] = styles[a][1] = RENDER_LINE_NONE;
890 || (page->is_edge_cutoff[a][0] && d[a] == 0)
891 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
902 styles[a][0] = get_rule (page, a, e);
905 if (d[b] / 2 < page->table->n[b])
906 styles[a][1] = get_rule (page, a, d);
909 styles[a][0] = styles[a][1] = get_rule (page, a, d);
912 if (styles[H][0] != RENDER_LINE_NONE || styles[H][1] != RENDER_LINE_NONE
913 || styles[V][0] != RENDER_LINE_NONE || styles[V][1] != RENDER_LINE_NONE)
915 int bb[TABLE_N_AXES][2];
917 bb[H][0] = ofs[H] + page->cp[H][d[H]];
918 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
919 bb[V][0] = ofs[V] + page->cp[V][d[V]];
920 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
921 page->params->draw_line (page->params->aux, bb, styles);
926 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
927 const struct table_cell *cell)
929 const struct render_overflow *of;
930 int bb[TABLE_N_AXES][2];
931 int clip[TABLE_N_AXES][2];
933 bb[H][0] = clip[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
934 bb[H][1] = clip[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
935 bb[V][0] = clip[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
936 bb[V][1] = clip[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
938 of = find_overflow (page, cell->d[H][0], cell->d[V][0]);
941 enum table_axis axis;
943 for (axis = 0; axis < TABLE_N_AXES; axis++)
945 if (of->overflow[axis][0])
947 bb[axis][0] -= of->overflow[axis][0];
948 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
949 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
951 if (of->overflow[axis][1])
953 bb[axis][1] += of->overflow[axis][1];
954 if (cell->d[axis][1] == page->n[axis] && !page->is_edge_cutoff[axis][1])
955 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2 + 1];
960 page->params->draw_cell (page->params->aux, cell, bb, clip);
963 /* Draws the cells of PAGE indicated in BB. */
965 render_page_draw_cells (const struct render_page *page,
966 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
970 for (y = bb[V][0]; y < bb[V][1]; y++)
971 for (x = bb[H][0]; x < bb[H][1]; )
972 if (is_rule (x) || is_rule (y))
977 render_rule (page, ofs, d);
982 struct table_cell cell;
984 table_get_cell (page->table, x / 2, y / 2, &cell);
985 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
986 render_cell (page, ofs, &cell);
987 x = rule_ofs (cell.d[H][1]);
988 table_cell_free (&cell);
992 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
993 render_params provided to render_page_create(). */
995 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
997 int bb[TABLE_N_AXES][2];
1000 bb[H][1] = page->n[H] * 2 + 1;
1002 bb[V][1] = page->n[V] * 2 + 1;
1004 render_page_draw_cells (page, ofs, bb);
1007 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1009 get_clip_min_extent (int x0, const int cp[], int n)
1011 int low, high, best;
1018 int middle = low + (high - low) / 2;
1020 if (cp[middle] <= x0)
1032 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1034 get_clip_max_extent (int x1, const int cp[], int n)
1036 int low, high, best;
1043 int middle = low + (high - low) / 2;
1045 if (cp[middle] >= x1)
1046 best = high = middle;
1051 while (best > 0 && cp[best - 1] == cp[best])
1057 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1058 'draw_line' and 'draw_cell' functions from the render_params provided to
1059 render_page_create(). */
1061 render_page_draw_region (const struct render_page *page,
1062 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1064 int bb[TABLE_N_AXES][2];
1066 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1067 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1068 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1069 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1071 render_page_draw_cells (page, ofs, bb);
1074 /* Breaking up tables to fit on a page. */
1076 /* An iterator for breaking render_pages into smaller chunks. */
1079 struct render_page *page; /* Page being broken up. */
1080 enum table_axis axis; /* Axis along which 'page' is being broken. */
1081 int z; /* Next cell along 'axis'. */
1082 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1083 int hw; /* Width of headers of 'page' along 'axis'. */
1086 static int needed_size (const struct render_break *, int cell);
1087 static bool cell_is_breakable (const struct render_break *, int cell);
1088 static struct render_page *render_page_select (const struct render_page *,
1093 /* Initializes render_break B for breaking PAGE along AXIS. */
1095 render_break_init (struct render_break *b, const struct render_page *page,
1096 enum table_axis axis)
1098 b->page = render_page_ref (page);
1100 b->z = page->h[axis][0];
1102 b->hw = headers_width (page, axis);
1105 /* Initializes B as a render_break structure for which
1106 render_break_has_next() always returns false. */
1108 render_break_init_empty (struct render_break *b)
1111 b->axis = TABLE_HORZ;
1117 /* Frees B and unrefs the render_page that it owns. */
1119 render_break_destroy (struct render_break *b)
1123 render_page_unref (b->page);
1128 /* Returns true if B still has cells that are yet to be returned,
1129 false if all of B's page has been processed. */
1131 render_break_has_next (const struct render_break *b)
1133 const struct render_page *page = b->page;
1134 enum table_axis axis = b->axis;
1136 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1139 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1140 Returns a null pointer if B has already been completely broken up, or if
1141 SIZE is too small to reasonably render any cells. The latter will never
1142 happen if SIZE is at least as large as the page size passed to
1143 render_page_create() along B's axis. */
1144 static struct render_page *
1145 render_break_next (struct render_break *b, int size)
1147 const struct render_page *page = b->page;
1148 enum table_axis axis = b->axis;
1149 struct render_page *subpage;
1152 if (!render_break_has_next (b))
1156 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1158 int needed = needed_size (b, z + 1);
1161 if (cell_is_breakable (b, z))
1163 /* If there is no right header and we render a partial cell on
1164 the right side of the body, then we omit the rightmost rule of
1165 the body. Otherwise the rendering is deceptive because it
1166 looks like the whole cell is present instead of a partial
1169 This is similar to code for the left side in needed_size(). */
1170 int rule_allowance = (page->h[axis][1]
1172 : rule_width (page, axis, z));
1174 /* The amount that, if we added cell 'z', the rendering would
1175 overfill the allocated 'size'. */
1176 int overhang = needed - size - rule_allowance;
1178 /* The width of cell 'z'. */
1179 int cell_size = cell_width (page, axis, z);
1181 /* The amount trimmed off the left side of 'z',
1182 and the amount left to render. */
1183 int cell_ofs = z == b->z ? b->pixel : 0;
1184 int cell_left = cell_size - cell_ofs;
1186 /* A small but visible width. */
1187 int em = page->params->font_size[axis];
1189 /* If some of the cell remains to render,
1190 and there would still be some of the cell left afterward,
1191 then partially render that much of the cell. */
1192 pixel = (cell_left && cell_left > overhang
1193 ? cell_left - overhang + cell_ofs
1196 /* If there would be only a tiny amount of the cell left after
1197 rendering it partially, reduce the amount rendered slightly
1198 to make the output look a little better. */
1199 if (pixel + em > cell_size)
1200 pixel = MAX (pixel - em, 0);
1202 /* If we're breaking vertically, then consider whether the cells
1203 being broken have a better internal breakpoint than the exact
1204 number of pixels available, which might look bad e.g. because
1205 it breaks in the middle of a line of text. */
1206 if (axis == TABLE_VERT && page->params->adjust_break)
1210 for (x = 0; x < page->n[H]; )
1212 struct table_cell cell;
1216 table_get_cell (page->table, x, z, &cell);
1217 w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1218 better_pixel = page->params->adjust_break (
1219 page->params->aux, &cell, w, pixel);
1221 table_cell_free (&cell);
1223 if (better_pixel < pixel)
1225 if (better_pixel > (z == b->z ? b->pixel : 0))
1227 pixel = better_pixel;
1230 else if (better_pixel == 0 && z != b->z)
1243 if (z == b->z && !pixel)
1246 subpage = render_page_select (page, axis, b->z, b->pixel,
1248 pixel ? cell_width (page, axis, z) - pixel
1255 /* Returns the width that would be required along B's axis to render a page
1256 from B's current position up to but not including CELL. */
1258 needed_size (const struct render_break *b, int cell)
1260 const struct render_page *page = b->page;
1261 enum table_axis axis = b->axis;
1264 /* Width of left header not including its rightmost rule. */
1265 size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1267 /* If we have a pixel offset and there is no left header, then we omit the
1268 leftmost rule of the body. Otherwise the rendering is deceptive because
1269 it looks like the whole cell is present instead of a partial cell.
1271 Otherwise (if there are headers) we will be merging two rules: the
1272 rightmost rule in the header and the leftmost rule in the body. We assume
1273 that the width of a merged rule is the larger of the widths of either rule
1275 if (b->pixel == 0 || page->h[axis][0])
1276 size += MAX (rule_width (page, axis, page->h[axis][0]),
1277 rule_width (page, axis, b->z));
1279 /* Width of body, minus any pixel offset in the leftmost cell. */
1280 size += joined_width (page, axis, b->z, cell) - b->pixel;
1282 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1283 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1284 rule_width (page, axis, cell));
1286 /* Width of right header not including its leftmost rule. */
1287 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1288 rule_ofs_r (page, axis, 0));
1290 /* Join crossing. */
1291 if (page->h[axis][0] && page->h[axis][1])
1292 size += page->join_crossing[axis][b->z];
1297 /* Returns true if CELL along B's axis may be broken across a page boundary.
1299 This is just a heuristic. Breaking cells across page boundaries can save
1300 space, but it looks ugly. */
1302 cell_is_breakable (const struct render_break *b, int cell)
1304 const struct render_page *page = b->page;
1305 enum table_axis axis = b->axis;
1307 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1315 struct render_page **pages;
1316 size_t cur_page, n_pages;
1317 struct render_break x_break;
1318 struct render_break y_break;
1322 render_pager_add_table (struct render_pager *p, struct table *table,
1323 const struct render_params *params,
1324 size_t *allocated_pages)
1326 if (p->n_pages >= *allocated_pages)
1327 p->pages = x2nrealloc (p->pages, allocated_pages, sizeof *p->pages);
1328 p->pages[p->n_pages++] = render_page_create (params, table);
1332 render_pager_start_page (struct render_pager *p)
1334 render_break_init (&p->x_break, p->pages[p->cur_page++], H);
1335 render_break_init_empty (&p->y_break);
1338 /* Creates and returns a new render_pager for rendering TABLE_ITEM on the
1339 device with the given PARAMS. */
1340 struct render_pager *
1341 render_pager_create (const struct render_params *params,
1342 const struct table_item *table_item)
1344 struct render_pager *p;
1345 size_t allocated_pages = 0;
1346 const char *caption;
1348 p = xzalloc (sizeof *p);
1349 p->width = params->size[H];
1351 caption = table_item_get_caption (table_item);
1353 render_pager_add_table (p, table_from_string (TAB_LEFT, caption), params,
1355 render_pager_add_table (p, table_ref (table_item_get_table (table_item)), params,
1358 render_pager_start_page (p);
1365 render_pager_destroy (struct render_pager *p)
1371 render_break_destroy (&p->x_break);
1372 render_break_destroy (&p->y_break);
1373 for (i = 0; i < p->n_pages; i++)
1374 render_page_unref (p->pages[i]);
1380 /* Returns true if P has content remaining to render, false if rendering is
1383 render_pager_has_next (const struct render_pager *p_)
1385 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1387 while (!render_break_has_next (&p->y_break))
1389 render_break_destroy (&p->y_break);
1390 if (!render_break_has_next (&p->x_break))
1392 render_break_destroy (&p->x_break);
1393 if (p->cur_page >= p->n_pages)
1395 render_break_init_empty (&p->x_break);
1396 render_break_init_empty (&p->y_break);
1399 render_pager_start_page (p);
1402 render_break_init (&p->y_break,
1403 render_break_next (&p->x_break, p->width), V);
1408 /* Draws a chunk of content from P to fit in a space that has vertical size
1409 SPACE and the horizontal size specified in the render_params passed to
1410 render_page_create(). Returns the amount of space actually used by the
1411 rendered chunk, which will be 0 if SPACE is too small to render anything or
1412 if no content remains (use render_pager_has_next() to distinguish these
1415 render_pager_draw_next (struct render_pager *p, int space)
1417 int ofs[TABLE_N_AXES] = { 0, 0 };
1418 size_t start_page = SIZE_MAX;
1420 while (render_pager_has_next (p))
1422 struct render_page *page;
1424 if (start_page == p->cur_page)
1426 start_page = p->cur_page;
1428 page = render_break_next (&p->y_break, space - ofs[V]);
1432 render_page_draw (page, ofs);
1433 ofs[V] += render_page_get_size (page, V);
1434 render_page_unref (page);
1439 /* Draws all of P's content. */
1441 render_pager_draw (const struct render_pager *p)
1443 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1446 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1447 Some extra content might be drawn; the device should perform clipping as
1450 render_pager_draw_region (const struct render_pager *p,
1451 int x, int y, int w, int h)
1453 int ofs[TABLE_N_AXES] = { 0, 0 };
1454 int clip[TABLE_N_AXES][2];
1459 for (i = 0; i < p->n_pages; i++)
1461 const struct render_page *page = p->pages[i];
1463 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1464 clip[V][1] = MIN (y + h, ofs[V] + render_page_get_size (page, V)) - ofs[V];
1465 if (clip[V][1] > clip[V][0])
1466 render_page_draw_region (page, ofs, clip);
1470 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1471 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1473 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1478 for (i = 0; i < p->n_pages; i++)
1480 int subsize = render_page_get_size (p->pages[i], axis);
1481 size = axis == H ? MAX (size, subsize) : size + subsize;
1488 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1493 for (i = 0; i < p->n_pages; i++)
1495 int size = render_page_get_size (p->pages[i], V);
1496 if (y + size >= height)
1497 return render_page_get_best_breakpoint (p->pages[i], height - y) + y;
1504 /* render_page_select() and helpers. */
1506 struct render_page_selection
1508 const struct render_page *page; /* Page whose slice we are selecting. */
1509 struct render_page *subpage; /* New page under construction. */
1510 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1511 enum table_axis b; /* The opposite of 'a'. */
1512 int z0; /* First cell along 'a' being selected. */
1513 int z1; /* Last cell being selected, plus 1. */
1514 int p0; /* Number of pixels to trim off left side of z0. */
1515 int p1; /* Number of pixels to trim off right side of z1-1. */
1518 static void cell_to_subpage (struct render_page_selection *,
1519 const struct table_cell *,
1520 int subcell[TABLE_N_AXES]);
1521 static const struct render_overflow *find_overflow_for_cell (
1522 struct render_page_selection *, const struct table_cell *);
1523 static struct render_overflow *insert_overflow (struct render_page_selection *,
1524 const struct table_cell *);
1526 /* Creates and returns a new render_page whose contents are a subregion of
1527 PAGE's contents. The new render_page includes cells Z0 through Z1 along
1528 AXIS, plus any headers on AXIS.
1530 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1531 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1532 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1533 render cells that are too large to fit on a single page.)
1535 The whole of axis !AXIS is included. (The caller may follow up with another
1536 call to render_page_select() to select on !AXIS to select on that axis as
1539 The caller retains ownership of PAGE, which is not modified. */
1540 static struct render_page *
1541 render_page_select (const struct render_page *page, enum table_axis axis,
1542 int z0, int p0, int z1, int p1)
1544 struct render_page_selection s;
1545 enum table_axis a = axis;
1546 enum table_axis b = !a;
1547 struct render_page *subpage;
1548 struct render_overflow *ro;
1554 /* Optimize case where all of PAGE is selected by just incrementing the
1556 if (z0 == page->h[a][0] && p0 == 0
1557 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1559 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1564 /* Allocate subpage. */
1565 subpage = render_page_allocate (page->params,
1566 table_select_slice (
1567 table_ref (page->table),
1570 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1571 off that side of the page and there are no headers. */
1572 subpage->is_edge_cutoff[a][0] =
1573 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1574 subpage->is_edge_cutoff[a][1] =
1575 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1576 && page->is_edge_cutoff[a][1]));
1577 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1578 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1580 /* Select join crossings from PAGE into subpage. */
1581 jc = subpage->join_crossing[a];
1582 for (z = 0; z < page->h[a][0]; z++)
1583 *jc++ = page->join_crossing[a][z];
1584 for (z = z0; z <= z1; z++)
1585 *jc++ = page->join_crossing[a][z];
1586 for (z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1587 *jc++ = page->join_crossing[a][z];
1588 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1590 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1591 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1593 /* Select widths from PAGE into subpage. */
1595 dcp = subpage->cp[a];
1597 for (z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1599 if (z == 0 && subpage->is_edge_cutoff[a][0])
1602 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1604 for (z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1606 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1607 if (z == cell_ofs (z0))
1610 if (page->h[a][0] && page->h[a][1])
1611 dcp[1] += page->join_crossing[a][z / 2];
1613 if (z == cell_ofs (z1 - 1))
1616 for (z = rule_ofs_r (page, a, subpage->h[a][1]);
1617 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1619 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1622 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1624 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1626 for (z = 0; z < page->n[b] * 2 + 2; z++)
1627 subpage->cp[b][z] = page->cp[b][z];
1629 /* Add new overflows. */
1637 s.subpage = subpage;
1639 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1640 for (z = 0; z < page->n[b]; )
1642 struct table_cell cell;
1643 int d[TABLE_N_AXES];
1650 table_get_cell (page->table, d[H], d[V], &cell);
1651 overflow0 = p0 || cell.d[a][0] < z0;
1652 overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1653 if (overflow0 || overflow1)
1655 ro = insert_overflow (&s, &cell);
1659 ro->overflow[a][0] += p0 + axis_width (
1660 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1661 if (page->h[a][0] && page->h[a][1])
1662 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1668 ro->overflow[a][1] += p1 + axis_width (
1669 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1670 if (page->h[a][0] && page->h[a][1])
1671 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1675 table_cell_free (&cell);
1678 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1679 for (z = 0; z < page->n[b]; )
1681 struct table_cell cell;
1682 int d[TABLE_N_AXES];
1686 table_get_cell (page->table, d[H], d[V], &cell);
1687 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1688 && find_overflow_for_cell (&s, &cell) == NULL)
1690 ro = insert_overflow (&s, &cell);
1691 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1692 cell_ofs (cell.d[a][1]));
1695 table_cell_free (&cell);
1698 /* Copy overflows from PAGE into subpage. */
1699 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1701 struct table_cell cell;
1703 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1704 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1705 && find_overflow_for_cell (&s, &cell) == NULL)
1706 insert_overflow (&s, &cell);
1707 table_cell_free (&cell);
1713 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1714 coordinates of the top-left cell as it will appear in S->subpage.
1716 CELL must actually intersect the region of S->page that is being selected
1717 by render_page_select() or the results will not make any sense. */
1719 cell_to_subpage (struct render_page_selection *s,
1720 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1722 enum table_axis a = s->a;
1723 enum table_axis b = s->b;
1724 int ha0 = s->subpage->h[a][0];
1726 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1727 subcell[b] = cell->d[b][0];
1730 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1731 that cell in S->subpage, if there is one, and a null pointer otherwise.
1733 CELL must actually intersect the region of S->page that is being selected
1734 by render_page_select() or the results will not make any sense. */
1735 static const struct render_overflow *
1736 find_overflow_for_cell (struct render_page_selection *s,
1737 const struct table_cell *cell)
1741 cell_to_subpage (s, cell, subcell);
1742 return find_overflow (s->subpage, subcell[H], subcell[V]);
1745 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1746 cell in S->subpage (which must not already exist). Initializes the new
1747 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1750 CELL must actually intersect the region of S->page that is being selected
1751 by render_page_select() or the results will not make any sense. */
1752 static struct render_overflow *
1753 insert_overflow (struct render_page_selection *s,
1754 const struct table_cell *cell)
1756 const struct render_overflow *old;
1757 struct render_overflow *of;
1759 of = xzalloc (sizeof *of);
1760 cell_to_subpage (s, cell, of->d);
1761 hmap_insert (&s->subpage->overflows, &of->node,
1762 hash_overflow (of->d[H], of->d[V]));
1764 old = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1766 memcpy (of->overflow, old->overflow, sizeof of->overflow);