1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "output/render.h"
29 #include "output/tab.h"
30 #include "output/table-item.h"
31 #include "output/table.h"
33 #include "gl/minmax.h"
34 #include "gl/xalloc.h"
36 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
40 /* A layout for rendering a specific table on a specific device.
42 May represent the layout of an entire table presented to
43 render_page_create(), or a rectangular subregion of a table broken out using
44 render_break_next() to allow a table to be broken across multiple pages.
46 A page's size is not limited to the size passed in as part of render_params.
47 render_pager breaks a render_page into smaller render_pages that will fit in
48 the available space. */
51 const struct render_params *params; /* Parameters of the target device. */
52 struct table *table; /* Table rendered. */
55 /* Local copies of table->n and table->h, for convenience. */
57 int h[TABLE_N_AXES][2];
59 /* cp[H] represents x positions within the table.
61 cp[H][1] = the width of the leftmost vertical rule.
62 cp[H][2] = cp[H][1] + the width of the leftmost column.
63 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
65 cp[H][2 * nc] = x position of the rightmost vertical rule.
66 cp[H][2 * nc + 1] = total table width including all rules.
68 Similarly, cp[V] represents y positions within the table.
70 cp[V][1] = the height of the topmost horizontal rule.
71 cp[V][2] = cp[V][1] + the height of the topmost row.
72 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
74 cp[V][2 * nr] = y position of the bottommost horizontal rule.
75 cp[V][2 * nr + 1] = total table height including all rules.
77 Rules and columns can have width or height 0, in which case consecutive
78 values in this array are equal. */
79 int *cp[TABLE_N_AXES];
81 /* render_break_next() can break a table such that some cells are not fully
82 contained within a render_page. This will happen if a cell is too wide
83 or two tall to fit on a single page, or if a cell spans multiple rows or
84 columns and the page only includes some of those rows or columns.
86 This hash table contains "struct render_overflow"s that represents each
87 such cell that doesn't completely fit on this page.
89 Each overflow cell borders at least one header edge of the table and may
90 border more. (A single table cell that is so large that it fills the
91 entire page can overflow on all four sides!) */
92 struct hmap overflows;
94 /* Contains "struct render_footnote"s, one for each cell with one or more
97 'n_footnotes' is the number of footnotes in the table. There might be
98 more than hmap_count(&page->footnotes) because there can be more than
99 one footnote in a cell. */
100 struct hmap footnotes;
103 /* If a single column (or row) is too wide (or tall) to fit on a page
104 reasonably, then render_break_next() will split a single row or column
105 across multiple render_pages. This member indicates when this has
108 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
109 of the leftmost column in this page, and false otherwise.
111 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
112 of the rightmost column in this page, and false otherwise.
114 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
115 and bottom of the table.
117 The effect of is_edge_cutoff is to prevent rules along the edge in
118 question from being rendered.
120 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
121 contain a node for each cell along that edge. */
122 bool is_edge_cutoff[TABLE_N_AXES][2];
124 /* If part of a joined cell would be cut off by breaking a table along
125 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
126 join_crossing[axis][z] is the thickness of the rule that would be cut
129 This is used to know to allocate extra space for breaking at such a
130 position, so that part of the cell's content is not lost.
132 This affects breaking a table only when headers are present. When
133 headers are not present, the rule's thickness is used for cell content,
134 so no part of the cell's content is lost (and in fact it is duplicated
135 across both pages). */
136 int *join_crossing[TABLE_N_AXES];
139 static struct render_page *render_page_create (const struct render_params *,
140 const struct table *);
142 struct render_page *render_page_ref (const struct render_page *page_);
143 static void render_page_unref (struct render_page *);
145 /* Returns the offset in struct render_page's cp[axis] array of the rule with
146 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
147 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
148 next rule to the right (or below); and so on. */
150 rule_ofs (int rule_idx)
155 /* Returns the offset in struct render_page's cp[axis] array of the rule with
156 index RULE_IDX_R, which counts from the right side (or bottom) of the page
157 left (or up), according to whether AXIS is H or V, respectively. That is,
158 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
159 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
160 (or above); and so on. */
162 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
164 return (page->n[axis] - rule_idx_r) * 2;
167 /* Returns the offset in struct render_page's cp[axis] array of the cell with
168 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
169 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
170 next cell to the right (or below); and so on. */
172 cell_ofs (int cell_idx)
174 return cell_idx * 2 + 1;
177 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
179 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
181 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
184 /* Returns the width of the headers in PAGE along AXIS. */
186 headers_width (const struct render_page *page, int axis)
188 int h0 = page->h[axis][0];
189 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
190 int n = page->n[axis];
191 int h1 = page->h[axis][1];
192 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
196 /* Returns the width of cell X along AXIS in PAGE. */
198 cell_width (const struct render_page *page, int axis, int x)
200 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
203 /* Returns the width of rule X along AXIS in PAGE. */
205 rule_width (const struct render_page *page, int axis, int x)
207 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
210 /* Returns the width of rule X along AXIS in PAGE. */
212 rule_width_r (const struct render_page *page, int axis, int x)
214 int ofs = rule_ofs_r (page, axis, x);
215 return axis_width (page, axis, ofs, ofs + 1);
218 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
220 joined_width (const struct render_page *page, int axis, int x0, int x1)
222 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
225 /* Returns the width of the widest cell, excluding headers, along AXIS in
228 max_cell_width (const struct render_page *page, int axis)
230 int n = page->n[axis];
231 int x0 = page->h[axis][0];
232 int x1 = n - page->h[axis][1];
236 for (x = x0; x < x1; x++)
238 int w = cell_width (page, axis, x);
245 /* A cell that doesn't completely fit on the render_page. */
246 struct render_overflow
248 struct hmap_node node; /* In render_page's 'overflows' hmap. */
250 /* Occupied region of page.
252 d[H][0] is the leftmost column.
253 d[H][1] is the rightmost column, plus 1.
254 d[V][0] is the top row.
255 d[V][1] is the bottom row, plus 1.
257 The cell in its original table might occupy a larger region. This
258 member reflects the size of the cell in the current render_page, after
259 trimming off any rows or columns due to page-breaking. */
262 /* The space that has been trimmed off the cell:
264 overflow[H][0]: space trimmed off its left side.
265 overflow[H][1]: space trimmed off its right side.
266 overflow[V][0]: space trimmed off its top.
267 overflow[V][1]: space trimmed off its bottom.
269 During rendering, this information is used to position the rendered
270 portion of the cell within the available space.
272 When a cell is rendered, sometimes it is permitted to spill over into
273 space that is ordinarily reserved for rules. Either way, this space is
274 still included in overflow values.
276 Suppose, for example, that a cell that joins 2 columns has a width of 60
277 pixels and content "abcdef", that the 2 columns that it joins have
278 widths of 20 and 30 pixels, respectively, and that therefore the rule
279 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
280 It might render like this, if each character is 10x10, and showing a few
281 extra table cells for context:
289 If this render_page is broken at the rule that separates "gh" from
290 "ijk", then the page that contains the left side of the "abcdef" cell
291 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
292 and the page that contains the right side of the cell will have
293 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
311 int overflow[TABLE_N_AXES][2];
314 /* Returns a hash value for (,Y). */
316 hash_cell (int x, int y)
318 return hash_int (x + (y << 16), 0);
321 /* Searches PAGE's set of render_overflow for one whose top-left cell is
322 (X,Y). Returns it, if there is one, otherwise a null pointer. */
323 static const struct render_overflow *
324 find_overflow (const struct render_page *page, int x, int y)
326 if (!hmap_is_empty (&page->overflows))
328 const struct render_overflow *of;
330 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
331 hash_cell (x, y), &page->overflows)
332 if (x == of->d[H] && y == of->d[V])
340 struct render_footnote
342 struct hmap_node node;
344 /* The area of the table covered by the cell that has the footnote.
346 d[H][0] is the leftmost column.
347 d[H][1] is the rightmost column, plus 1.
348 d[V][0] is the top row.
349 d[V][1] is the bottom row, plus 1.
351 The cell in its original table might occupy a larger region. This
352 member reflects the size of the cell in the current render_page, after
353 trimming off any rows or columns due to page-breaking. */
354 int d[TABLE_N_AXES][2];
356 /* The index of the first footnote in the cell. */
361 count_footnotes (const struct table_cell *cell)
367 for (i = 0; i < cell->n_contents; i++)
368 n += cell->contents[i].n_footnotes;
373 find_footnote_idx (const struct table_cell *cell, const struct hmap *footnotes)
375 const struct render_footnote *f;
377 if (!count_footnotes (cell))
380 HMAP_FOR_EACH_WITH_HASH (f, struct render_footnote, node,
381 hash_cell (cell->d[H][0], cell->d[V][0]), footnotes)
382 if (f->d[H][0] == cell->d[H][0] && f->d[V][0] == cell->d[V][0])
388 /* Row or column dimensions. Used to figure the size of a table in
389 render_page_create() and discarded after that. */
392 /* Width without considering rows (or columns) that span more than one (or
396 /* Width taking spanned rows (or columns) into consideration. */
400 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
401 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
404 distribute_spanned_width (int width,
405 struct render_row *rows, const int *rules, int n)
411 /* Sum up the unspanned widths of the N rows for use as weights. */
413 for (x = 0; x < n; x++)
414 total_unspanned += rows[x].unspanned;
415 for (x = 0; x < n - 1; x++)
416 total_unspanned += rules[x + 1];
417 if (total_unspanned >= width)
420 /* The algorithm used here is based on the following description from HTML 4:
422 For cells that span multiple columns, a simple approach consists of
423 apportioning the min/max widths evenly to each of the constituent
424 columns. A slightly more complex approach is to use the min/max
425 widths of unspanned cells to weight how spanned widths are
426 apportioned. Experiments suggest that a blend of the two approaches
427 gives good results for a wide range of tables.
429 We blend the two approaches half-and-half, except that we cannot use the
430 unspanned weights when 'total_unspanned' is 0 (because that would cause a
433 This implementation uses floating-point types and operators, but all the
434 values involved are integers. For integers smaller than 53 bits, this
435 should not lose any precision, and it should degrade gracefully for larger
438 The calculation we want to do is this:
441 w1 = width * (column's unspanned width) / (total unspanned width)
442 (column's width) = (w0 + w1) / 2
444 We implement it as a precise calculation in integers by multiplying w0 and
445 w1 by the common denominator of all three calculations (d), dividing that
446 out in the column width calculation, and then keeping the remainder for
449 (We actually compute the unspanned width of a column as twice the
450 unspanned width, plus the width of the rule on the left, plus the width of
451 the rule on the right. That way each rule contributes to both the cell on
452 its left and on its right.)
455 d1 = 2.0 * (total_unspanned > 0 ? total_unspanned : 1.0);
457 if (total_unspanned > 0)
460 for (x = 0; x < n; x++)
463 if (total_unspanned > 0)
465 double unspanned = rows[x].unspanned * 2.0;
467 unspanned += rules[x + 1];
469 unspanned += rules[x];
470 w += width * unspanned * d0;
473 rows[x].width = MAX (rows[x].width, w / d);
474 w -= rows[x].width * d;
478 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
481 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
482 const struct render_row *rows, const int *rules)
484 int n = page->n[axis];
490 for (z = 0; z < n; z++)
492 cp[1] = cp[0] + rules[z];
493 cp[2] = cp[1] + rows[z].width;
496 cp[1] = cp[0] + rules[n];
499 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
501 calculate_table_width (int n, const struct render_row *rows, int *rules)
507 for (x = 0; x < n; x++)
508 width += rows[x].width;
509 for (x = 0; x <= n; x++)
515 /* Rendering utility functions. */
517 /* Returns the line style to use for drawing a rule of the given TYPE. */
518 static enum render_line_style
519 rule_to_render_type (unsigned char type)
525 return RENDER_LINE_NONE;
527 return RENDER_LINE_SINGLE;
529 return RENDER_LINE_DOUBLE;
535 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
536 rendered with PARAMS. */
538 measure_rule (const struct render_params *params, const struct table *table,
539 enum table_axis a, int z)
541 enum table_axis b = !a;
546 /* Determine all types of rules that are present, as a bitmap in 'rules'
547 where rule type 't' is present if bit 2**t is set. */
550 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
551 rules |= 1u << table_get_rule (table, a, d[H], d[V]);
553 /* Calculate maximum width of the rules that are present. */
555 if (rules & (1u << TAL_1)
556 || (z > 0 && z < table->n[a] && rules & (1u << TAL_GAP)))
557 width = params->line_widths[a][RENDER_LINE_SINGLE];
558 if (rules & (1u << TAL_2))
559 width = MAX (width, params->line_widths[a][RENDER_LINE_DOUBLE]);
563 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
564 space for all of the members of the new page, but the caller must initialize
565 the 'cp' member itself. */
566 static struct render_page *
567 render_page_allocate (const struct render_params *params,
570 struct render_page *page;
573 page = xmalloc (sizeof *page);
574 page->params = params;
577 page->n[H] = table->n[H];
578 page->n[V] = table->n[V];
579 page->h[H][0] = table->h[H][0];
580 page->h[H][1] = table->h[H][1];
581 page->h[V][0] = table->h[V][0];
582 page->h[V][1] = table->h[V][1];
584 for (i = 0; i < TABLE_N_AXES; i++)
586 page->cp[i] = xmalloc ((2 * page->n[i] + 2) * sizeof *page->cp[i]);
587 page->join_crossing[i] = xzalloc ((page->n[i] + 1) * sizeof *page->join_crossing[i]);
590 hmap_init (&page->overflows);
591 hmap_init (&page->footnotes);
592 page->n_footnotes = 0;
593 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
598 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
599 cp[H] in the new page from ROWS and RULES. The caller must still initialize
601 static struct render_page *
602 create_page_with_exact_widths (const struct render_params *params,
604 const struct render_row *rows, int *rules)
606 struct render_page *page = render_page_allocate (params, table);
607 accumulate_row_widths (page, H, rows, rules);
611 /* Allocates and returns a new render_page for PARAMS and TABLE.
613 Initializes cp[H] in the new page by setting the width of each row 'i' to
614 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
615 ROW_MAX[i].width. Sets the width of rules to those in RULES.
617 W_MIN is the sum of ROWS_MIN[].width.
619 W_MAX is the sum of ROWS_MAX[].width.
621 The caller must still initialize cp[V]. */
622 static struct render_page *
623 create_page_with_interpolated_widths (const struct render_params *params,
625 const struct render_row *rows_min,
626 const struct render_row *rows_max,
627 int w_min, int w_max, const int *rules)
629 /* This implementation uses floating-point types and operators, but all the
630 values involved are integers. For integers smaller than 53 bits, this
631 should not lose any precision, and it should degrade gracefully for larger
633 const int n = table->n[H];
634 const double avail = params->size[H] - w_min;
635 const double wanted = w_max - w_min;
636 struct render_page *page;
643 page = render_page_allocate (params, table);
647 w = (int) wanted / 2;
648 for (x = 0; x < n; x++)
652 w += avail * (rows_max[x].width - rows_min[x].width);
656 cph[1] = cph[0] + rules[x];
657 cph[2] = cph[1] + rows_min[x].width + extra;
660 cph[1] = cph[0] + rules[n];
662 assert (page->cp[H][n * 2 + 1] == params->size[H]);
668 set_join_crossings (struct render_page *page, enum table_axis axis,
669 const struct table_cell *cell, int *rules)
673 for (z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
674 page->join_crossing[axis][z] = rules[z];
677 /* Creates and returns a new render_page for rendering TABLE on a device
680 The new render_page will be suitable for rendering on a device whose page
681 size is PARAMS->size, but the caller is responsible for actually breaking it
682 up to fit on such a device, using the render_break abstraction. */
683 static struct render_page *
684 render_page_create (const struct render_params *params,
685 const struct table *table_)
687 struct render_page *page;
690 struct render_row *columns[2];
691 struct render_row *rows;
693 int *rules[TABLE_N_AXES];
694 struct hmap footnotes;
699 enum table_axis axis;
701 table = table_ref (table_);
702 nc = table_nc (table);
703 nr = table_nr (table);
705 /* Figure out rule widths. */
706 for (axis = 0; axis < TABLE_N_AXES; axis++)
708 int n = table->n[axis] + 1;
711 rules[axis] = xnmalloc (n, sizeof *rules);
712 for (z = 0; z < n; z++)
713 rules[axis][z] = measure_rule (params, table, axis, z);
716 /* Calculate minimum and maximum widths of cells that do not
717 span multiple columns. Assign footnote markers. */
718 hmap_init (&footnotes);
720 for (i = 0; i < 2; i++)
721 columns[i] = xzalloc (nc * sizeof *columns[i]);
722 for (y = 0; y < nr; y++)
723 for (x = 0; x < nc; )
725 struct table_cell cell;
727 table_get_cell (table, x, y, &cell);
728 if (y == cell.d[V][0])
732 if (table_cell_colspan (&cell) == 1)
737 params->measure_cell_width (params->aux, &cell, footnote_idx,
739 for (i = 0; i < 2; i++)
740 if (columns[i][x].unspanned < w[i])
741 columns[i][x].unspanned = w[i];
744 n = count_footnotes (&cell);
747 struct render_footnote *f = xmalloc (sizeof *f);
748 f->d[H][0] = cell.d[H][0];
749 f->d[H][1] = cell.d[H][1];
750 f->d[V][0] = cell.d[V][0];
751 f->d[V][1] = cell.d[V][1];
752 f->idx = footnote_idx;
753 hmap_insert (&footnotes, &f->node, hash_cell (x, y));
759 table_cell_free (&cell);
762 /* Distribute widths of spanned columns. */
763 for (i = 0; i < 2; i++)
764 for (x = 0; x < nc; x++)
765 columns[i][x].width = columns[i][x].unspanned;
766 for (y = 0; y < nr; y++)
767 for (x = 0; x < nc; )
769 struct table_cell cell;
771 table_get_cell (table, x, y, &cell);
772 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
776 params->measure_cell_width (params->aux, &cell,
777 find_footnote_idx (&cell, &footnotes),
779 for (i = 0; i < 2; i++)
780 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
781 rules[H], table_cell_colspan (&cell));
784 table_cell_free (&cell);
787 /* Decide final column widths. */
788 for (i = 0; i < 2; i++)
789 table_widths[i] = calculate_table_width (table_nc (table),
790 columns[i], rules[H]);
791 if (table_widths[MAX] <= params->size[H])
793 /* Fits even with maximum widths. Use them. */
794 page = create_page_with_exact_widths (params, table, columns[MAX],
797 else if (table_widths[MIN] <= params->size[H])
799 /* Fits with minimum widths, so distribute the leftover space. */
800 page = create_page_with_interpolated_widths (
801 params, table, columns[MIN], columns[MAX],
802 table_widths[MIN], table_widths[MAX], rules[H]);
806 /* Doesn't fit even with minimum widths. Assign minimums for now, and
807 later we can break it horizontally into multiple pages. */
808 page = create_page_with_exact_widths (params, table, columns[MIN],
812 /* Calculate heights of cells that do not span multiple rows. */
813 rows = xzalloc (nr * sizeof *rows);
814 for (y = 0; y < nr; y++)
816 for (x = 0; x < nc; )
818 struct render_row *r = &rows[y];
819 struct table_cell cell;
821 table_get_cell (table, x, y, &cell);
822 if (y == cell.d[V][0])
824 if (table_cell_rowspan (&cell) == 1)
826 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
827 int h = params->measure_cell_height (
828 params->aux, &cell, find_footnote_idx (&cell, &footnotes), w);
829 if (h > r->unspanned)
830 r->unspanned = r->width = h;
833 set_join_crossings (page, V, &cell, rules[V]);
835 if (table_cell_colspan (&cell) > 1)
836 set_join_crossings (page, H, &cell, rules[H]);
839 table_cell_free (&cell);
842 for (i = 0; i < 2; i++)
845 /* Distribute heights of spanned rows. */
846 for (y = 0; y < nr; y++)
847 for (x = 0; x < nc; )
849 struct table_cell cell;
851 table_get_cell (table, x, y, &cell);
852 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
854 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
855 int h = params->measure_cell_height (
856 params->aux, &cell, find_footnote_idx (&cell, &footnotes), w);
857 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
858 table_cell_rowspan (&cell));
861 table_cell_free (&cell);
864 /* Decide final row heights. */
865 accumulate_row_widths (page, V, rows, rules[V]);
868 /* Measure headers. If they are "too big", get rid of them. */
869 for (axis = 0; axis < TABLE_N_AXES; axis++)
871 int hw = headers_width (page, axis);
872 if (hw * 2 >= page->params->size[axis]
873 || hw + max_cell_width (page, axis) > page->params->size[axis])
875 page->table = table_unshare (page->table);
876 page->table->h[axis][0] = page->table->h[axis][1] = 0;
877 page->h[axis][0] = page->h[axis][1] = 0;
881 hmap_swap (&page->footnotes, &footnotes);
882 hmap_destroy (&footnotes);
883 page->n_footnotes = footnote_idx;
891 /* Increases PAGE's reference count. */
893 render_page_ref (const struct render_page *page_)
895 struct render_page *page = CONST_CAST (struct render_page *, page_);
900 /* Decreases PAGE's reference count and destroys PAGE if this causes the
901 reference count to fall to zero. */
903 render_page_unref (struct render_page *page)
905 if (page != NULL && --page->ref_cnt == 0)
908 struct render_overflow *overflow, *next;
910 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
913 hmap_destroy (&page->overflows);
915 table_unref (page->table);
917 for (i = 0; i < TABLE_N_AXES; ++i)
919 free (page->join_crossing[i]);
927 /* Returns the size of PAGE along AXIS. (This might be larger than the page
928 size specified in the parameters passed to render_page_create(). Use a
929 render_break to break up a render_page into page-sized chunks.) */
931 render_page_get_size (const struct render_page *page, enum table_axis axis)
933 return page->cp[axis][page->n[axis] * 2 + 1];
936 /* If PAGE is smaller than SIZE along AXIS, inserts extra space to increase
939 render_page_stretch (struct render_page *page, enum table_axis axis,
942 int extra = size - render_page_get_size (page, axis);
945 int n = page->n[axis];
950 for (i = 2; i <= 2 * n + 1; i++)
958 page->cp[axis][i] += adjust;
964 render_page_get_best_breakpoint (const struct render_page *page, int height)
968 /* If there's no room for at least the top row and the rules above and below
969 it, don't include any of the table. */
970 if (page->cp[V][3] > height)
973 /* Otherwise include as many rows and rules as we can. */
974 for (y = 5; y <= 2 * page->n[V] + 1; y += 2)
975 if (page->cp[V][y] > height)
976 return page->cp[V][y - 2];
980 /* Drawing render_pages. */
982 static inline enum render_line_style
983 get_rule (const struct render_page *page, enum table_axis axis,
984 const int d[TABLE_N_AXES])
986 return rule_to_render_type (table_get_rule (page->table,
987 axis, d[H] / 2, d[V] / 2));
997 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
998 const int d[TABLE_N_AXES])
1000 enum render_line_style styles[TABLE_N_AXES][2];
1003 for (a = 0; a < TABLE_N_AXES; a++)
1005 enum table_axis b = !a;
1007 styles[a][0] = styles[a][1] = RENDER_LINE_NONE;
1010 || (page->is_edge_cutoff[a][0] && d[a] == 0)
1011 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
1018 int e[TABLE_N_AXES];
1022 styles[a][0] = get_rule (page, a, e);
1025 if (d[b] / 2 < page->table->n[b])
1026 styles[a][1] = get_rule (page, a, d);
1029 styles[a][0] = styles[a][1] = get_rule (page, a, d);
1032 if (styles[H][0] != RENDER_LINE_NONE || styles[H][1] != RENDER_LINE_NONE
1033 || styles[V][0] != RENDER_LINE_NONE || styles[V][1] != RENDER_LINE_NONE)
1035 int bb[TABLE_N_AXES][2];
1037 bb[H][0] = ofs[H] + page->cp[H][d[H]];
1038 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
1039 bb[V][0] = ofs[V] + page->cp[V][d[V]];
1040 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
1041 page->params->draw_line (page->params->aux, bb, styles);
1046 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
1047 const struct table_cell *cell)
1049 const struct render_overflow *of;
1050 int bb[TABLE_N_AXES][2];
1051 int clip[TABLE_N_AXES][2];
1053 bb[H][0] = clip[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
1054 bb[H][1] = clip[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
1055 bb[V][0] = clip[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
1056 bb[V][1] = clip[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
1058 of = find_overflow (page, cell->d[H][0], cell->d[V][0]);
1061 enum table_axis axis;
1063 for (axis = 0; axis < TABLE_N_AXES; axis++)
1065 if (of->overflow[axis][0])
1067 bb[axis][0] -= of->overflow[axis][0];
1068 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
1069 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
1071 if (of->overflow[axis][1])
1073 bb[axis][1] += of->overflow[axis][1];
1074 if (cell->d[axis][1] == page->n[axis] && !page->is_edge_cutoff[axis][1])
1075 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2 + 1];
1080 page->params->draw_cell (page->params->aux, cell,
1081 find_footnote_idx (cell, &page->footnotes), bb, clip);
1084 /* Draws the cells of PAGE indicated in BB. */
1086 render_page_draw_cells (const struct render_page *page,
1087 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
1091 for (y = bb[V][0]; y < bb[V][1]; y++)
1092 for (x = bb[H][0]; x < bb[H][1]; )
1093 if (is_rule (x) || is_rule (y))
1095 int d[TABLE_N_AXES];
1098 render_rule (page, ofs, d);
1103 struct table_cell cell;
1105 table_get_cell (page->table, x / 2, y / 2, &cell);
1106 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
1107 render_cell (page, ofs, &cell);
1108 x = rule_ofs (cell.d[H][1]);
1109 table_cell_free (&cell);
1113 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
1114 render_params provided to render_page_create(). */
1116 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
1118 int bb[TABLE_N_AXES][2];
1121 bb[H][1] = page->n[H] * 2 + 1;
1123 bb[V][1] = page->n[V] * 2 + 1;
1125 render_page_draw_cells (page, ofs, bb);
1128 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1130 get_clip_min_extent (int x0, const int cp[], int n)
1132 int low, high, best;
1139 int middle = low + (high - low) / 2;
1141 if (cp[middle] <= x0)
1153 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1155 get_clip_max_extent (int x1, const int cp[], int n)
1157 int low, high, best;
1164 int middle = low + (high - low) / 2;
1166 if (cp[middle] >= x1)
1167 best = high = middle;
1172 while (best > 0 && cp[best - 1] == cp[best])
1178 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1179 'draw_line' and 'draw_cell' functions from the render_params provided to
1180 render_page_create(). */
1182 render_page_draw_region (const struct render_page *page,
1183 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1185 int bb[TABLE_N_AXES][2];
1187 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1188 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1189 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1190 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1192 render_page_draw_cells (page, ofs, bb);
1195 /* Breaking up tables to fit on a page. */
1197 /* An iterator for breaking render_pages into smaller chunks. */
1200 struct render_page *page; /* Page being broken up. */
1201 enum table_axis axis; /* Axis along which 'page' is being broken. */
1202 int z; /* Next cell along 'axis'. */
1203 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1204 int hw; /* Width of headers of 'page' along 'axis'. */
1207 static int needed_size (const struct render_break *, int cell);
1208 static bool cell_is_breakable (const struct render_break *, int cell);
1209 static struct render_page *render_page_select (const struct render_page *,
1214 /* Initializes render_break B for breaking PAGE along AXIS. */
1216 render_break_init (struct render_break *b, const struct render_page *page,
1217 enum table_axis axis)
1219 b->page = render_page_ref (page);
1221 b->z = page->h[axis][0];
1223 b->hw = headers_width (page, axis);
1226 /* Initializes B as a render_break structure for which
1227 render_break_has_next() always returns false. */
1229 render_break_init_empty (struct render_break *b)
1232 b->axis = TABLE_HORZ;
1238 /* Frees B and unrefs the render_page that it owns. */
1240 render_break_destroy (struct render_break *b)
1244 render_page_unref (b->page);
1249 /* Returns true if B still has cells that are yet to be returned,
1250 false if all of B's page has been processed. */
1252 render_break_has_next (const struct render_break *b)
1254 const struct render_page *page = b->page;
1255 enum table_axis axis = b->axis;
1257 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1260 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1261 Returns a null pointer if B has already been completely broken up, or if
1262 SIZE is too small to reasonably render any cells. The latter will never
1263 happen if SIZE is at least as large as the page size passed to
1264 render_page_create() along B's axis. */
1265 static struct render_page *
1266 render_break_next (struct render_break *b, int size)
1268 const struct render_page *page = b->page;
1269 enum table_axis axis = b->axis;
1270 struct render_page *subpage;
1273 if (!render_break_has_next (b))
1277 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1279 int needed = needed_size (b, z + 1);
1282 if (cell_is_breakable (b, z))
1284 /* If there is no right header and we render a partial cell on
1285 the right side of the body, then we omit the rightmost rule of
1286 the body. Otherwise the rendering is deceptive because it
1287 looks like the whole cell is present instead of a partial
1290 This is similar to code for the left side in needed_size(). */
1291 int rule_allowance = (page->h[axis][1]
1293 : rule_width (page, axis, z));
1295 /* The amount that, if we added cell 'z', the rendering would
1296 overfill the allocated 'size'. */
1297 int overhang = needed - size - rule_allowance;
1299 /* The width of cell 'z'. */
1300 int cell_size = cell_width (page, axis, z);
1302 /* The amount trimmed off the left side of 'z',
1303 and the amount left to render. */
1304 int cell_ofs = z == b->z ? b->pixel : 0;
1305 int cell_left = cell_size - cell_ofs;
1307 /* A small but visible width. */
1308 int em = page->params->font_size[axis];
1310 /* If some of the cell remains to render,
1311 and there would still be some of the cell left afterward,
1312 then partially render that much of the cell. */
1313 pixel = (cell_left && cell_left > overhang
1314 ? cell_left - overhang + cell_ofs
1317 /* If there would be only a tiny amount of the cell left after
1318 rendering it partially, reduce the amount rendered slightly
1319 to make the output look a little better. */
1320 if (pixel + em > cell_size)
1321 pixel = MAX (pixel - em, 0);
1323 /* If we're breaking vertically, then consider whether the cells
1324 being broken have a better internal breakpoint than the exact
1325 number of pixels available, which might look bad e.g. because
1326 it breaks in the middle of a line of text. */
1327 if (axis == TABLE_VERT && page->params->adjust_break)
1331 for (x = 0; x < page->n[H]; )
1333 struct table_cell cell;
1337 table_get_cell (page->table, x, z, &cell);
1338 w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1339 better_pixel = page->params->adjust_break (
1340 page->params->aux, &cell,
1341 find_footnote_idx (&cell, &page->footnotes), w, pixel);
1343 table_cell_free (&cell);
1345 if (better_pixel < pixel)
1347 if (better_pixel > (z == b->z ? b->pixel : 0))
1349 pixel = better_pixel;
1352 else if (better_pixel == 0 && z != b->z)
1365 if (z == b->z && !pixel)
1368 subpage = render_page_select (page, axis, b->z, b->pixel,
1370 pixel ? cell_width (page, axis, z) - pixel
1377 /* Returns the width that would be required along B's axis to render a page
1378 from B's current position up to but not including CELL. */
1380 needed_size (const struct render_break *b, int cell)
1382 const struct render_page *page = b->page;
1383 enum table_axis axis = b->axis;
1386 /* Width of left header not including its rightmost rule. */
1387 size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1389 /* If we have a pixel offset and there is no left header, then we omit the
1390 leftmost rule of the body. Otherwise the rendering is deceptive because
1391 it looks like the whole cell is present instead of a partial cell.
1393 Otherwise (if there are headers) we will be merging two rules: the
1394 rightmost rule in the header and the leftmost rule in the body. We assume
1395 that the width of a merged rule is the larger of the widths of either rule
1397 if (b->pixel == 0 || page->h[axis][0])
1398 size += MAX (rule_width (page, axis, page->h[axis][0]),
1399 rule_width (page, axis, b->z));
1401 /* Width of body, minus any pixel offset in the leftmost cell. */
1402 size += joined_width (page, axis, b->z, cell) - b->pixel;
1404 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1405 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1406 rule_width (page, axis, cell));
1408 /* Width of right header not including its leftmost rule. */
1409 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1410 rule_ofs_r (page, axis, 0));
1412 /* Join crossing. */
1413 if (page->h[axis][0] && page->h[axis][1])
1414 size += page->join_crossing[axis][b->z];
1419 /* Returns true if CELL along B's axis may be broken across a page boundary.
1421 This is just a heuristic. Breaking cells across page boundaries can save
1422 space, but it looks ugly. */
1424 cell_is_breakable (const struct render_break *b, int cell)
1426 const struct render_page *page = b->page;
1427 enum table_axis axis = b->axis;
1429 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1436 const struct render_params *params;
1438 struct render_page **pages;
1439 size_t n_pages, allocated_pages;
1442 struct render_break x_break;
1443 struct render_break y_break;
1446 static struct render_page *
1447 render_pager_add_table (struct render_pager *p, struct table *table)
1449 struct render_page *page;
1451 if (p->n_pages >= p->allocated_pages)
1452 p->pages = x2nrealloc (p->pages, &p->allocated_pages, sizeof *p->pages);
1453 page = p->pages[p->n_pages++] = render_page_create (p->params, table);
1458 render_pager_start_page (struct render_pager *p)
1460 render_break_init (&p->x_break, p->pages[p->cur_page++], H);
1461 render_break_init_empty (&p->y_break);
1465 add_footnote_page (struct render_pager *p, const struct render_page *body)
1467 const struct table *table = body->table;
1468 int nc = table_nc (table);
1469 int nr = table_nr (table);
1470 int footnote_idx = 0;
1471 struct tab_table *t;
1474 if (!body->n_footnotes)
1477 t = tab_create (2, body->n_footnotes);
1478 for (y = 0; y < nr; y++)
1479 for (x = 0; x < nc; )
1481 struct table_cell cell;
1483 table_get_cell (table, x, y, &cell);
1484 if (y == cell.d[V][0])
1488 for (i = 0; i < cell.n_contents; i++)
1490 const struct cell_contents *cc = &cell.contents[i];
1493 for (j = 0; j < cc->n_footnotes; j++)
1495 const char *f = cc->footnotes[j];
1497 tab_text (t, 0, footnote_idx, TAB_LEFT, "");
1498 tab_footnote (t, 0, footnote_idx, "(none)");
1499 tab_text (t, 1, footnote_idx, TAB_LEFT, f);
1505 table_cell_free (&cell);
1507 render_pager_add_table (p, &t->table);
1510 /* Creates and returns a new render_pager for rendering TABLE_ITEM on the
1511 device with the given PARAMS. */
1512 struct render_pager *
1513 render_pager_create (const struct render_params *params,
1514 const struct table_item *table_item)
1516 const char *caption = table_item_get_caption (table_item);
1517 const char *title = table_item_get_title (table_item);
1518 const struct render_page *body_page;
1519 struct render_page *title_page;
1520 struct render_pager *p;
1522 p = xzalloc (sizeof *p);
1527 ? render_pager_add_table (
1528 p, table_from_string (TAB_CENTER | TAB_EMPH, title))
1532 body_page = render_pager_add_table (p, table_ref (table_item_get_table (
1534 if (title_page && body_page)
1536 int body_page_size = render_page_get_size (body_page, H);
1537 render_page_stretch (title_page, H,
1538 MIN (body_page_size, params->size[H]));
1543 render_pager_add_table (p, table_from_string (TAB_LEFT, caption));
1546 add_footnote_page (p, body_page);
1548 render_pager_start_page (p);
1555 render_pager_destroy (struct render_pager *p)
1561 render_break_destroy (&p->x_break);
1562 render_break_destroy (&p->y_break);
1563 for (i = 0; i < p->n_pages; i++)
1564 render_page_unref (p->pages[i]);
1570 /* Returns true if P has content remaining to render, false if rendering is
1573 render_pager_has_next (const struct render_pager *p_)
1575 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1577 while (!render_break_has_next (&p->y_break))
1579 render_break_destroy (&p->y_break);
1580 if (!render_break_has_next (&p->x_break))
1582 render_break_destroy (&p->x_break);
1583 if (p->cur_page >= p->n_pages)
1585 render_break_init_empty (&p->x_break);
1586 render_break_init_empty (&p->y_break);
1589 render_pager_start_page (p);
1592 render_break_init (&p->y_break,
1593 render_break_next (&p->x_break, p->params->size[H]), V);
1598 /* Draws a chunk of content from P to fit in a space that has vertical size
1599 SPACE and the horizontal size specified in the render_params passed to
1600 render_page_create(). Returns the amount of space actually used by the
1601 rendered chunk, which will be 0 if SPACE is too small to render anything or
1602 if no content remains (use render_pager_has_next() to distinguish these
1605 render_pager_draw_next (struct render_pager *p, int space)
1607 int ofs[TABLE_N_AXES] = { 0, 0 };
1608 size_t start_page = SIZE_MAX;
1610 while (render_pager_has_next (p))
1612 struct render_page *page;
1614 if (start_page == p->cur_page)
1616 start_page = p->cur_page;
1618 page = render_break_next (&p->y_break, space - ofs[V]);
1622 render_page_draw (page, ofs);
1623 ofs[V] += render_page_get_size (page, V);
1624 render_page_unref (page);
1629 /* Draws all of P's content. */
1631 render_pager_draw (const struct render_pager *p)
1633 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1636 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1637 Some extra content might be drawn; the device should perform clipping as
1640 render_pager_draw_region (const struct render_pager *p,
1641 int x, int y, int w, int h)
1643 int ofs[TABLE_N_AXES] = { 0, 0 };
1644 int clip[TABLE_N_AXES][2];
1649 for (i = 0; i < p->n_pages; i++)
1651 const struct render_page *page = p->pages[i];
1652 int size = render_page_get_size (page, V);
1654 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1655 clip[V][1] = MIN (y + h, ofs[V] + size) - ofs[V];
1656 if (clip[V][1] > clip[V][0])
1657 render_page_draw_region (page, ofs, clip);
1663 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1664 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1666 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1671 for (i = 0; i < p->n_pages; i++)
1673 int subsize = render_page_get_size (p->pages[i], axis);
1674 size = axis == H ? MAX (size, subsize) : size + subsize;
1681 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1686 for (i = 0; i < p->n_pages; i++)
1688 int size = render_page_get_size (p->pages[i], V);
1689 if (y + size >= height)
1690 return render_page_get_best_breakpoint (p->pages[i], height - y) + y;
1697 /* render_page_select() and helpers. */
1699 struct render_page_selection
1701 const struct render_page *page; /* Page whose slice we are selecting. */
1702 struct render_page *subpage; /* New page under construction. */
1703 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1704 enum table_axis b; /* The opposite of 'a'. */
1705 int z0; /* First cell along 'a' being selected. */
1706 int z1; /* Last cell being selected, plus 1. */
1707 int p0; /* Number of pixels to trim off left side of z0. */
1708 int p1; /* Number of pixels to trim off right side of z1-1. */
1711 static void cell_to_subpage (struct render_page_selection *,
1712 const struct table_cell *,
1713 int subcell[TABLE_N_AXES]);
1714 static const struct render_overflow *find_overflow_for_cell (
1715 struct render_page_selection *, const struct table_cell *);
1716 static struct render_overflow *insert_overflow (struct render_page_selection *,
1717 const struct table_cell *);
1719 /* Creates and returns a new render_page whose contents are a subregion of
1720 PAGE's contents. The new render_page includes cells Z0 through Z1
1721 (exclusive) along AXIS, plus any headers on AXIS.
1723 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1724 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1725 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1726 render cells that are too large to fit on a single page.)
1728 The whole of axis !AXIS is included. (The caller may follow up with another
1729 call to render_page_select() to select on !AXIS to select on that axis as
1732 The caller retains ownership of PAGE, which is not modified. */
1733 static struct render_page *
1734 render_page_select (const struct render_page *page, enum table_axis axis,
1735 int z0, int p0, int z1, int p1)
1737 const struct render_footnote *f;
1738 struct render_page_selection s;
1739 enum table_axis a = axis;
1740 enum table_axis b = !a;
1741 struct render_page *subpage;
1742 struct render_overflow *ro;
1748 /* Optimize case where all of PAGE is selected by just incrementing the
1750 if (z0 == page->h[a][0] && p0 == 0
1751 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1753 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1758 /* Allocate subpage. */
1759 subpage = render_page_allocate (page->params,
1760 table_select_slice (
1761 table_ref (page->table),
1764 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1765 off that side of the page and there are no headers. */
1766 subpage->is_edge_cutoff[a][0] =
1767 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1768 subpage->is_edge_cutoff[a][1] =
1769 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1770 && page->is_edge_cutoff[a][1]));
1771 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1772 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1774 /* Select join crossings from PAGE into subpage. */
1775 jc = subpage->join_crossing[a];
1776 for (z = 0; z < page->h[a][0]; z++)
1777 *jc++ = page->join_crossing[a][z];
1778 for (z = z0; z <= z1; z++)
1779 *jc++ = page->join_crossing[a][z];
1780 for (z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1781 *jc++ = page->join_crossing[a][z];
1782 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1784 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1785 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1787 /* Select widths from PAGE into subpage. */
1789 dcp = subpage->cp[a];
1791 for (z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1793 if (z == 0 && subpage->is_edge_cutoff[a][0])
1796 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1798 for (z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1800 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1801 if (z == cell_ofs (z0))
1804 if (page->h[a][0] && page->h[a][1])
1805 dcp[1] += page->join_crossing[a][z / 2];
1807 if (z == cell_ofs (z1 - 1))
1810 for (z = rule_ofs_r (page, a, subpage->h[a][1]);
1811 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1813 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1816 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1818 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1820 for (z = 0; z < page->n[b] * 2 + 2; z++)
1821 subpage->cp[b][z] = page->cp[b][z];
1823 /* Add new overflows. */
1831 s.subpage = subpage;
1833 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1834 for (z = 0; z < page->n[b]; )
1836 struct table_cell cell;
1837 int d[TABLE_N_AXES];
1844 table_get_cell (page->table, d[H], d[V], &cell);
1845 overflow0 = p0 || cell.d[a][0] < z0;
1846 overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1847 if (overflow0 || overflow1)
1849 ro = insert_overflow (&s, &cell);
1853 ro->overflow[a][0] += p0 + axis_width (
1854 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1855 if (page->h[a][0] && page->h[a][1])
1856 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1862 ro->overflow[a][1] += p1 + axis_width (
1863 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1864 if (page->h[a][0] && page->h[a][1])
1865 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1869 table_cell_free (&cell);
1872 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1873 for (z = 0; z < page->n[b]; )
1875 struct table_cell cell;
1876 int d[TABLE_N_AXES];
1880 table_get_cell (page->table, d[H], d[V], &cell);
1881 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1882 && find_overflow_for_cell (&s, &cell) == NULL)
1884 ro = insert_overflow (&s, &cell);
1885 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1886 cell_ofs (cell.d[a][1]));
1889 table_cell_free (&cell);
1892 /* Copy overflows from PAGE into subpage. */
1893 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1895 struct table_cell cell;
1897 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1898 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1899 && find_overflow_for_cell (&s, &cell) == NULL)
1900 insert_overflow (&s, &cell);
1901 table_cell_free (&cell);
1904 /* Copy footnotes from PAGE into subpage. */
1905 HMAP_FOR_EACH (f, struct render_footnote, node, &page->footnotes)
1906 if ((f->d[a][0] >= z0 && f->d[a][0] < z1)
1907 || (f->d[a][1] - 1 >= z0 && f->d[a][1] - 1 < z1))
1909 struct render_footnote *nf = xmalloc (sizeof *nf);
1910 nf->d[a][0] = MAX (z0, f->d[a][0]) - z0 + page->h[a][0];
1911 nf->d[a][1] = MIN (z1, f->d[a][1]) - z0 + page->h[a][0];
1912 nf->d[b][0] = f->d[b][0];
1913 nf->d[b][1] = f->d[b][1];
1915 hmap_insert (&subpage->footnotes, &nf->node,
1916 hash_cell (nf->d[H][0], nf->d[V][0]));
1922 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1923 coordinates of the top-left cell as it will appear in S->subpage.
1925 CELL must actually intersect the region of S->page that is being selected
1926 by render_page_select() or the results will not make any sense. */
1928 cell_to_subpage (struct render_page_selection *s,
1929 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1931 enum table_axis a = s->a;
1932 enum table_axis b = s->b;
1933 int ha0 = s->subpage->h[a][0];
1935 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1936 subcell[b] = cell->d[b][0];
1939 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1940 that cell in S->subpage, if there is one, and a null pointer otherwise.
1942 CELL must actually intersect the region of S->page that is being selected
1943 by render_page_select() or the results will not make any sense. */
1944 static const struct render_overflow *
1945 find_overflow_for_cell (struct render_page_selection *s,
1946 const struct table_cell *cell)
1950 cell_to_subpage (s, cell, subcell);
1951 return find_overflow (s->subpage, subcell[H], subcell[V]);
1954 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1955 cell in S->subpage (which must not already exist). Initializes the new
1956 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1959 CELL must actually intersect the region of S->page that is being selected
1960 by render_page_select() or the results will not make any sense. */
1961 static struct render_overflow *
1962 insert_overflow (struct render_page_selection *s,
1963 const struct table_cell *cell)
1965 const struct render_overflow *old;
1966 struct render_overflow *of;
1968 of = xzalloc (sizeof *of);
1969 cell_to_subpage (s, cell, of->d);
1970 hmap_insert (&s->subpage->overflows, &of->node,
1971 hash_cell (of->d[H], of->d[V]));
1973 old = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1975 memcpy (of->overflow, old->overflow, sizeof of->overflow);