1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "libpspp/pool.h"
29 #include "output/render.h"
30 #include "output/tab.h"
31 #include "output/table-item.h"
32 #include "output/table.h"
34 #include "gl/minmax.h"
35 #include "gl/xalloc.h"
38 #define _(msgid) gettext (msgid)
40 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
44 /* A layout for rendering a specific table on a specific device.
46 May represent the layout of an entire table presented to
47 render_page_create(), or a rectangular subregion of a table broken out using
48 render_break_next() to allow a table to be broken across multiple pages.
50 A page's size is not limited to the size passed in as part of render_params.
51 render_pager breaks a render_page into smaller render_pages that will fit in
52 the available space. */
55 const struct render_params *params; /* Parameters of the target device. */
56 struct table *table; /* Table rendered. */
59 /* Local copies of table->n and table->h, for convenience. */
61 int h[TABLE_N_AXES][2];
65 cp[H] represents x positions within the table.
67 cp[H][1] = the width of the leftmost vertical rule.
68 cp[H][2] = cp[H][1] + the width of the leftmost column.
69 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
71 cp[H][2 * nc] = x position of the rightmost vertical rule.
72 cp[H][2 * nc + 1] = total table width including all rules.
74 Similarly, cp[V] represents y positions within the table.
76 cp[V][1] = the height of the topmost horizontal rule.
77 cp[V][2] = cp[V][1] + the height of the topmost row.
78 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
80 cp[V][2 * nr] = y position of the bottommost horizontal rule.
81 cp[V][2 * nr + 1] = total table height including all rules.
83 Rules and columns can have width or height 0, in which case consecutive
84 values in this array are equal. */
85 int *cp[TABLE_N_AXES];
87 /* render_break_next() can break a table such that some cells are not fully
88 contained within a render_page. This will happen if a cell is too wide
89 or two tall to fit on a single page, or if a cell spans multiple rows or
90 columns and the page only includes some of those rows or columns.
92 This hash table contains "struct render_overflow"s that represents each
93 such cell that doesn't completely fit on this page.
95 Each overflow cell borders at least one header edge of the table and may
96 border more. (A single table cell that is so large that it fills the
97 entire page can overflow on all four sides!) */
98 struct hmap overflows;
100 /* If a single column (or row) is too wide (or tall) to fit on a page
101 reasonably, then render_break_next() will split a single row or column
102 across multiple render_pages. This member indicates when this has
105 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
106 of the leftmost column in this page, and false otherwise.
108 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
109 of the rightmost column in this page, and false otherwise.
111 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
112 and bottom of the table.
114 The effect of is_edge_cutoff is to prevent rules along the edge in
115 question from being rendered.
117 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
118 contain a node for each cell along that edge. */
119 bool is_edge_cutoff[TABLE_N_AXES][2];
121 /* If part of a joined cell would be cut off by breaking a table along
122 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
123 join_crossing[axis][z] is the thickness of the rule that would be cut
126 This is used to know to allocate extra space for breaking at such a
127 position, so that part of the cell's content is not lost.
129 This affects breaking a table only when headers are present. When
130 headers are not present, the rule's thickness is used for cell content,
131 so no part of the cell's content is lost (and in fact it is duplicated
132 across both pages). */
133 int *join_crossing[TABLE_N_AXES];
136 static struct render_page *render_page_create (const struct render_params *,
137 struct table *, int min_width);
139 struct render_page *render_page_ref (const struct render_page *page_);
140 static void render_page_unref (struct render_page *);
142 /* Returns the offset in struct render_page's cp[axis] array of the rule with
143 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
144 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
145 next rule to the right (or below); and so on. */
147 rule_ofs (int rule_idx)
152 /* Returns the offset in struct render_page's cp[axis] array of the rule with
153 index RULE_IDX_R, which counts from the right side (or bottom) of the page
154 left (or up), according to whether AXIS is H or V, respectively. That is,
155 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
156 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
157 (or above); and so on. */
159 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
161 return (page->n[axis] - rule_idx_r) * 2;
164 /* Returns the offset in struct render_page's cp[axis] array of the cell with
165 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
166 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
167 next cell to the right (or below); and so on. */
169 cell_ofs (int cell_idx)
171 return cell_idx * 2 + 1;
174 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
176 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
178 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
181 /* Returns the width of the headers in PAGE along AXIS. */
183 headers_width (const struct render_page *page, int axis)
185 int h0 = page->h[axis][0];
186 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
187 int n = page->n[axis];
188 int h1 = page->h[axis][1];
189 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
193 /* Returns the width of cell X along AXIS in PAGE. */
195 cell_width (const struct render_page *page, int axis, int x)
197 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
200 /* Returns the width of rule X along AXIS in PAGE. */
202 rule_width (const struct render_page *page, int axis, int x)
204 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
207 /* Returns the width of rule X along AXIS in PAGE. */
209 rule_width_r (const struct render_page *page, int axis, int x)
211 int ofs = rule_ofs_r (page, axis, x);
212 return axis_width (page, axis, ofs, ofs + 1);
215 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
217 joined_width (const struct render_page *page, int axis, int x0, int x1)
219 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
222 /* Returns the width of the widest cell, excluding headers, along AXIS in
225 max_cell_width (const struct render_page *page, int axis)
227 int n = page->n[axis];
228 int x0 = page->h[axis][0];
229 int x1 = n - page->h[axis][1];
232 for (int x = x0; x < x1; x++)
234 int w = cell_width (page, axis, x);
241 /* A cell that doesn't completely fit on the render_page. */
242 struct render_overflow
244 struct hmap_node node; /* In render_page's 'overflows' hmap. */
246 /* Occupied region of page.
248 d[H][0] is the leftmost column.
249 d[H][1] is the rightmost column, plus 1.
250 d[V][0] is the top row.
251 d[V][1] is the bottom row, plus 1.
253 The cell in its original table might occupy a larger region. This
254 member reflects the size of the cell in the current render_page, after
255 trimming off any rows or columns due to page-breaking. */
258 /* The space that has been trimmed off the cell:
260 overflow[H][0]: space trimmed off its left side.
261 overflow[H][1]: space trimmed off its right side.
262 overflow[V][0]: space trimmed off its top.
263 overflow[V][1]: space trimmed off its bottom.
265 During rendering, this information is used to position the rendered
266 portion of the cell within the available space.
268 When a cell is rendered, sometimes it is permitted to spill over into
269 space that is ordinarily reserved for rules. Either way, this space is
270 still included in overflow values.
272 Suppose, for example, that a cell that joins 2 columns has a width of 60
273 pixels and content "abcdef", that the 2 columns that it joins have
274 widths of 20 and 30 pixels, respectively, and that therefore the rule
275 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
276 It might render like this, if each character is 10x10, and showing a few
277 extra table cells for context:
285 If this render_page is broken at the rule that separates "gh" from
286 "ijk", then the page that contains the left side of the "abcdef" cell
287 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
288 and the page that contains the right side of the cell will have
289 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
307 int overflow[TABLE_N_AXES][2];
310 /* Returns a hash value for (,Y). */
312 hash_cell (int x, int y)
314 return hash_int (x + (y << 16), 0);
317 /* Searches PAGE's set of render_overflow for one whose top-left cell is
318 (X,Y). Returns it, if there is one, otherwise a null pointer. */
319 static const struct render_overflow *
320 find_overflow (const struct render_page *page, int x, int y)
322 if (!hmap_is_empty (&page->overflows))
324 const struct render_overflow *of;
326 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
327 hash_cell (x, y), &page->overflows)
328 if (x == of->d[H] && y == of->d[V])
335 /* Row or column dimensions. Used to figure the size of a table in
336 render_page_create() and discarded after that. */
339 /* Width without considering rows (or columns) that span more than one (or
343 /* Width taking spanned rows (or columns) into consideration. */
347 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
348 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
351 distribute_spanned_width (int width,
352 struct render_row *rows, const int *rules, int n)
354 /* Sum up the unspanned widths of the N rows for use as weights. */
355 int total_unspanned = 0;
356 for (int x = 0; x < n; x++)
357 total_unspanned += rows[x].unspanned;
358 for (int x = 0; x < n - 1; x++)
359 total_unspanned += rules[x + 1];
360 if (total_unspanned >= width)
363 /* The algorithm used here is based on the following description from HTML 4:
365 For cells that span multiple columns, a simple approach consists of
366 apportioning the min/max widths evenly to each of the constituent
367 columns. A slightly more complex approach is to use the min/max
368 widths of unspanned cells to weight how spanned widths are
369 apportioned. Experiments suggest that a blend of the two approaches
370 gives good results for a wide range of tables.
372 We blend the two approaches half-and-half, except that we cannot use the
373 unspanned weights when 'total_unspanned' is 0 (because that would cause a
376 The calculation we want to do is this:
379 w1 = width * (column's unspanned width) / (total unspanned width)
380 (column's width) = (w0 + w1) / 2
382 We implement it as a precise calculation in integers by multiplying w0 and
383 w1 by the common denominator of all three calculations (d), dividing that
384 out in the column width calculation, and then keeping the remainder for
387 (We actually compute the unspanned width of a column as twice the
388 unspanned width, plus the width of the rule on the left, plus the width of
389 the rule on the right. That way each rule contributes to both the cell on
390 its left and on its right.)
392 long long int d0 = n;
393 long long int d1 = 2LL * MAX (total_unspanned, 1);
394 long long int d = d0 * d1;
395 if (total_unspanned > 0)
397 long long int w = d / 2;
398 for (int x = 0; x < n; x++)
401 if (total_unspanned > 0)
403 long long int unspanned = rows[x].unspanned * 2LL;
405 unspanned += rules[x + 1];
407 unspanned += rules[x];
408 w += width * unspanned * d0;
411 rows[x].width = MAX (rows[x].width, w / d);
412 w -= rows[x].width * d;
416 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
419 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
420 const struct render_row *rows, const int *rules)
422 int n = page->n[axis];
423 int *cp = page->cp[axis];
425 for (int z = 0; z < n; z++)
427 cp[1] = cp[0] + rules[z];
428 cp[2] = cp[1] + rows[z].width;
431 cp[1] = cp[0] + rules[n];
434 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
436 calculate_table_width (int n, const struct render_row *rows, int *rules)
439 for (int x = 0; x < n; x++)
440 width += rows[x].width;
441 for (int x = 0; x <= n; x++)
447 /* Rendering utility functions. */
449 /* Returns the line style to use for drawing a rule of the given TYPE. */
450 static enum render_line_style
451 rule_to_render_type (unsigned char type)
456 return RENDER_LINE_NONE;
458 return RENDER_LINE_SINGLE;
460 return RENDER_LINE_DASHED;
462 return RENDER_LINE_THICK;
464 return RENDER_LINE_THIN;
466 return RENDER_LINE_DOUBLE;
472 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
473 rendered with PARAMS. */
475 measure_rule (const struct render_params *params, const struct table *table,
476 enum table_axis a, int z)
478 enum table_axis b = !a;
480 /* Determine all types of rules that are present, as a bitmap in 'rules'
481 where rule type 't' is present if bit 2**t is set. */
482 struct cell_color color;
483 unsigned int rules = 0;
486 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
487 rules |= 1u << table_get_rule (table, a, d[H], d[V], &color);
489 /* Turn off TAL_NONE because it has width 0 and we needn't bother. However,
490 if the device doesn't support margins, make sure that there is at least a
491 small gap between cells (but we don't need any at the left or right edge
493 if (rules & (1u << TAL_NONE))
495 rules &= ~(1u << TAL_NONE);
496 if (z > 0 && z < table->n[a] && !params->supports_margins && a == H)
497 rules |= 1u << TAL_SOLID;
500 /* Calculate maximum width of the rules that are present. */
502 for (size_t i = 0; i < TABLE_N_STROKES; i++)
503 if (rules & (1u << i))
504 width = MAX (width, params->line_widths[a][rule_to_render_type (i)]);
508 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
509 space for all of the members of the new page, but the caller must initialize
510 the 'cp' member itself. */
511 static struct render_page *
512 render_page_allocate (const struct render_params *params,
515 struct render_page *page = xmalloc (sizeof *page);
516 page->params = params;
519 page->n[H] = table->n[H];
520 page->n[V] = table->n[V];
521 page->h[H][0] = table->h[H][0];
522 page->h[H][1] = table->h[H][1];
523 page->h[V][0] = table->h[V][0];
524 page->h[V][1] = table->h[V][1];
526 for (int i = 0; i < TABLE_N_AXES; i++)
528 page->cp[i] = xmalloc ((2 * page->n[i] + 2) * sizeof *page->cp[i]);
529 page->join_crossing[i] = xzalloc ((page->n[i] + 1)
530 * sizeof *page->join_crossing[i]);
533 hmap_init (&page->overflows);
534 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
539 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
540 cp[H] in the new page from ROWS and RULES. The caller must still initialize
542 static struct render_page *
543 create_page_with_exact_widths (const struct render_params *params,
545 const struct render_row *rows, int *rules)
547 struct render_page *page = render_page_allocate (params, table);
548 accumulate_row_widths (page, H, rows, rules);
552 /* Allocates and returns a new render_page for PARAMS and TABLE.
554 Initializes cp[H] in the new page by setting the width of each row 'i' to
555 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
556 ROW_MAX[i].width. Sets the width of rules to those in RULES.
558 W_MIN is the sum of ROWS_MIN[].width.
560 W_MAX is the sum of ROWS_MAX[].width.
562 The caller must still initialize cp[V]. */
563 static struct render_page *
564 create_page_with_interpolated_widths (const struct render_params *params,
566 const struct render_row *rows_min,
567 const struct render_row *rows_max,
568 int w_min, int w_max, const int *rules)
570 const int n = table->n[H];
571 const long long int avail = params->size[H] - w_min;
572 const long long int wanted = w_max - w_min;
576 struct render_page *page = render_page_allocate (params, table);
578 int *cph = page->cp[H];
580 long long int w = wanted / 2;
581 for (int x = 0; x < n; x++)
583 w += avail * (rows_max[x].width - rows_min[x].width);
584 int extra = w / wanted;
587 cph[1] = cph[0] + rules[x];
588 cph[2] = cph[1] + rows_min[x].width + extra;
591 cph[1] = cph[0] + rules[n];
593 assert (page->cp[H][n * 2 + 1] == params->size[H]);
599 set_join_crossings (struct render_page *page, enum table_axis axis,
600 const struct table_cell *cell, int *rules)
602 for (int z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
603 page->join_crossing[axis][z] = rules[z];
606 /* Creates and returns a new render_page for rendering TABLE on a device
609 The new render_page will be suitable for rendering on a device whose page
610 size is PARAMS->size, but the caller is responsible for actually breaking it
611 up to fit on such a device, using the render_break abstraction. */
612 static struct render_page *
613 render_page_create (const struct render_params *params, struct table *table,
618 int nc = table_nc (table);
619 int nr = table_nr (table);
621 /* Figure out rule widths. */
622 int *rules[TABLE_N_AXES];
623 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
625 int n = table->n[axis] + 1;
627 rules[axis] = xnmalloc (n, sizeof *rules);
628 for (int z = 0; z < n; z++)
629 rules[axis][z] = measure_rule (params, table, axis, z);
632 /* Calculate minimum and maximum widths of cells that do not
633 span multiple columns. */
634 struct render_row *columns[2];
635 for (int i = 0; i < 2; i++)
636 columns[i] = xzalloc (nc * sizeof *columns[i]);
637 for (int y = 0; y < nr; y++)
638 for (int x = 0; x < nc; )
640 struct table_cell cell;
642 table_get_cell (table, x, y, &cell);
643 if (y == cell.d[V][0])
645 if (table_cell_colspan (&cell) == 1)
648 params->measure_cell_width (params->aux, &cell,
650 for (int i = 0; i < 2; i++)
651 if (columns[i][x].unspanned < w[i])
652 columns[i][x].unspanned = w[i];
656 table_cell_free (&cell);
659 /* Distribute widths of spanned columns. */
660 for (int i = 0; i < 2; i++)
661 for (int x = 0; x < nc; x++)
662 columns[i][x].width = columns[i][x].unspanned;
663 for (int y = 0; y < nr; y++)
664 for (int x = 0; x < nc; )
666 struct table_cell cell;
668 table_get_cell (table, x, y, &cell);
669 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
673 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
674 for (int i = 0; i < 2; i++)
675 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
676 rules[H], table_cell_colspan (&cell));
679 table_cell_free (&cell);
682 for (int i = 0; i < 2; i++)
683 distribute_spanned_width (min_width, &columns[i][0], rules[H], nc);
685 /* In pathological cases, spans can cause the minimum width of a column to
686 exceed the maximum width. This bollixes our interpolation algorithm
687 later, so fix it up. */
688 for (int i = 0; i < nc; i++)
689 if (columns[MIN][i].width > columns[MAX][i].width)
690 columns[MAX][i].width = columns[MIN][i].width;
692 /* Decide final column widths. */
694 for (int i = 0; i < 2; i++)
695 table_widths[i] = calculate_table_width (table_nc (table),
696 columns[i], rules[H]);
698 struct render_page *page;
699 if (table_widths[MAX] <= params->size[H])
701 /* Fits even with maximum widths. Use them. */
702 page = create_page_with_exact_widths (params, table, columns[MAX],
705 else if (table_widths[MIN] <= params->size[H])
707 /* Fits with minimum widths, so distribute the leftover space. */
708 page = create_page_with_interpolated_widths (
709 params, table, columns[MIN], columns[MAX],
710 table_widths[MIN], table_widths[MAX], rules[H]);
714 /* Doesn't fit even with minimum widths. Assign minimums for now, and
715 later we can break it horizontally into multiple pages. */
716 page = create_page_with_exact_widths (params, table, columns[MIN],
720 /* Calculate heights of cells that do not span multiple rows. */
721 struct render_row *rows = xzalloc (nr * sizeof *rows);
722 for (int y = 0; y < nr; y++)
723 for (int x = 0; x < nc; )
725 struct render_row *r = &rows[y];
726 struct table_cell cell;
728 table_get_cell (table, x, y, &cell);
729 if (y == cell.d[V][0])
731 if (table_cell_rowspan (&cell) == 1)
733 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
734 int h = params->measure_cell_height (params->aux, &cell, w);
735 if (h > r->unspanned)
736 r->unspanned = r->width = h;
739 set_join_crossings (page, V, &cell, rules[V]);
741 if (table_cell_colspan (&cell) > 1)
742 set_join_crossings (page, H, &cell, rules[H]);
745 table_cell_free (&cell);
747 for (int i = 0; i < 2; i++)
750 /* Distribute heights of spanned rows. */
751 for (int y = 0; y < nr; y++)
752 for (int x = 0; x < nc; )
754 struct table_cell cell;
756 table_get_cell (table, x, y, &cell);
757 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
759 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
760 int h = params->measure_cell_height (params->aux, &cell, w);
761 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
762 table_cell_rowspan (&cell));
765 table_cell_free (&cell);
768 /* Decide final row heights. */
769 accumulate_row_widths (page, V, rows, rules[V]);
772 /* Measure headers. If they are "too big", get rid of them. */
773 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
775 int hw = headers_width (page, axis);
776 if (hw * 2 >= page->params->size[axis]
777 || hw + max_cell_width (page, axis) > page->params->size[axis])
779 page->table = table_unshare (page->table);
780 page->table->h[axis][0] = page->table->h[axis][1] = 0;
781 page->h[axis][0] = page->h[axis][1] = 0;
791 /* Increases PAGE's reference count. */
793 render_page_ref (const struct render_page *page_)
795 struct render_page *page = CONST_CAST (struct render_page *, page_);
800 /* Decreases PAGE's reference count and destroys PAGE if this causes the
801 reference count to fall to zero. */
803 render_page_unref (struct render_page *page)
805 if (page != NULL && --page->ref_cnt == 0)
807 struct render_overflow *overflow, *next;
808 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
811 hmap_destroy (&page->overflows);
813 table_unref (page->table);
815 for (int i = 0; i < TABLE_N_AXES; ++i)
817 free (page->join_crossing[i]);
825 /* Returns the size of PAGE along AXIS. (This might be larger than the page
826 size specified in the parameters passed to render_page_create(). Use a
827 render_break to break up a render_page into page-sized chunks.) */
829 render_page_get_size (const struct render_page *page, enum table_axis axis)
831 return page->cp[axis][page->n[axis] * 2 + 1];
835 render_page_get_best_breakpoint (const struct render_page *page, int height)
837 /* If there's no room for at least the top row and the rules above and below
838 it, don't include any of the table. */
839 if (page->cp[V][3] > height)
842 /* Otherwise include as many rows and rules as we can. */
843 for (int y = 5; y <= 2 * page->n[V] + 1; y += 2)
844 if (page->cp[V][y] > height)
845 return page->cp[V][y - 2];
849 /* Drawing render_pages. */
851 static inline enum render_line_style
852 get_rule (const struct render_page *page, enum table_axis axis,
853 const int d[TABLE_N_AXES], struct cell_color *color)
855 return rule_to_render_type (table_get_rule (page->table,
856 axis, d[H] / 2, d[V] / 2,
867 render_direction_rtl (void)
869 /* TRANSLATORS: Do not translate this string. If the script of your language
870 reads from right to left (eg Persian, Arabic, Hebrew etc), then replace
871 this string with "output-direction-rtl". Otherwise either leave it
872 untranslated or copy it verbatim. */
873 const char *dir = _("output-direction-ltr");
874 if ( 0 == strcmp ("output-direction-rtl", dir))
877 if ( 0 != strcmp ("output-direction-ltr", dir))
878 fprintf (stderr, "This localisation has been incorrectly translated. "
879 "Complain to the translator.\n");
885 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
886 const int d[TABLE_N_AXES])
888 enum render_line_style styles[TABLE_N_AXES][2];
889 struct cell_color colors[TABLE_N_AXES][2];
891 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
893 enum table_axis b = !a;
895 styles[a][0] = styles[a][1] = RENDER_LINE_NONE;
898 || (page->is_edge_cutoff[a][0] && d[a] == 0)
899 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
910 styles[a][0] = get_rule (page, a, e, &colors[a][0]);
913 if (d[b] / 2 < page->table->n[b])
914 styles[a][1] = get_rule (page, a, d, &colors[a][1]);
918 styles[a][0] = styles[a][1] = get_rule (page, a, d, &colors[a][0]);
919 colors[a][1] = colors[a][0];
923 if (styles[H][0] != RENDER_LINE_NONE || styles[H][1] != RENDER_LINE_NONE
924 || styles[V][0] != RENDER_LINE_NONE || styles[V][1] != RENDER_LINE_NONE)
926 int bb[TABLE_N_AXES][2];
928 bb[H][0] = ofs[H] + page->cp[H][d[H]];
929 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
930 if (page->params->rtl)
933 bb[H][0] = render_page_get_size (page, H) - bb[H][1];
934 bb[H][1] = render_page_get_size (page, H) - temp;
936 bb[V][0] = ofs[V] + page->cp[V][d[V]];
937 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
938 page->params->draw_line (page->params->aux, bb, styles, colors);
943 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
944 const struct table_cell *cell)
946 int bb[TABLE_N_AXES][2];
947 int clip[TABLE_N_AXES][2];
949 bb[H][0] = clip[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
950 bb[H][1] = clip[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
951 if (page->params->rtl)
954 bb[H][0] = clip[H][0] = render_page_get_size (page, H) - bb[H][1];
955 bb[H][1] = clip[H][1] = render_page_get_size (page, H) - temp;
957 bb[V][0] = clip[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
958 bb[V][1] = clip[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
960 enum table_valign valign = cell->style->cell_style.valign;
961 if (valign != TABLE_VALIGN_TOP)
963 int height = page->params->measure_cell_height (
964 page->params->aux, cell, bb[H][1] - bb[H][0]);
965 int extra = bb[V][1] - bb[V][0] - height;
968 if (valign == TABLE_VALIGN_CENTER)
974 const struct render_overflow *of = find_overflow (
975 page, cell->d[H][0], cell->d[V][0]);
977 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
979 if (of->overflow[axis][0])
981 bb[axis][0] -= of->overflow[axis][0];
982 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
983 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
985 if (of->overflow[axis][1])
987 bb[axis][1] += of->overflow[axis][1];
988 if (cell->d[axis][1] == page->n[axis]
989 && !page->is_edge_cutoff[axis][1])
990 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2
995 int spill[TABLE_N_AXES][2];
996 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
998 spill[axis][0] = rule_width (page, axis, cell->d[axis][0]) / 2;
999 spill[axis][1] = rule_width (page, axis, cell->d[axis][1]) / 2;
1002 int color_idx = (cell->d[V][0] < page->h[V][0]
1003 || page->n[V] - (cell->d[V][0] + 1) < page->h[V][1]
1005 : (cell->d[V][0] - page->h[V][0]) & 1);
1006 page->params->draw_cell (page->params->aux, cell, color_idx,
1010 /* Draws the cells of PAGE indicated in BB. */
1012 render_page_draw_cells (const struct render_page *page,
1013 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
1015 for (int y = bb[V][0]; y < bb[V][1]; y++)
1016 for (int x = bb[H][0]; x < bb[H][1]; )
1017 if (!is_rule (x) && !is_rule (y))
1019 struct table_cell cell;
1021 table_get_cell (page->table, x / 2, y / 2, &cell);
1022 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
1023 render_cell (page, ofs, &cell);
1024 x = rule_ofs (cell.d[H][1]);
1025 table_cell_free (&cell);
1030 for (int y = bb[V][0]; y < bb[V][1]; y++)
1031 for (int x = bb[H][0]; x < bb[H][1]; x++)
1032 if (is_rule (x) || is_rule (y))
1034 int d[TABLE_N_AXES];
1037 render_rule (page, ofs, d);
1041 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
1042 render_params provided to render_page_create(). */
1044 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
1046 int bb[TABLE_N_AXES][2];
1049 bb[H][1] = page->n[H] * 2 + 1;
1051 bb[V][1] = page->n[V] * 2 + 1;
1053 render_page_draw_cells (page, ofs, bb);
1056 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1058 get_clip_min_extent (int x0, const int cp[], int n)
1065 int middle = low + (high - low) / 2;
1067 if (cp[middle] <= x0)
1079 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1081 get_clip_max_extent (int x1, const int cp[], int n)
1088 int middle = low + (high - low) / 2;
1090 if (cp[middle] >= x1)
1091 best = high = middle;
1096 while (best > 0 && cp[best - 1] == cp[best])
1102 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1103 'draw_line' and 'draw_cell' functions from the render_params provided to
1104 render_page_create(). */
1106 render_page_draw_region (const struct render_page *page,
1107 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1109 int bb[TABLE_N_AXES][2];
1111 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1112 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1113 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1114 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1116 render_page_draw_cells (page, ofs, bb);
1119 /* Breaking up tables to fit on a page. */
1121 /* An iterator for breaking render_pages into smaller chunks. */
1124 struct render_page *page; /* Page being broken up. */
1125 enum table_axis axis; /* Axis along which 'page' is being broken. */
1126 int z; /* Next cell along 'axis'. */
1127 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1128 int hw; /* Width of headers of 'page' along 'axis'. */
1131 static int needed_size (const struct render_break *, int cell);
1132 static bool cell_is_breakable (const struct render_break *, int cell);
1133 static struct render_page *render_page_select (const struct render_page *,
1138 /* Initializes render_break B for breaking PAGE along AXIS.
1139 Takes ownership of PAGE. */
1141 render_break_init (struct render_break *b, struct render_page *page,
1142 enum table_axis axis)
1146 b->z = page->h[axis][0];
1148 b->hw = headers_width (page, axis);
1151 /* Initializes B as a render_break structure for which
1152 render_break_has_next() always returns false. */
1154 render_break_init_empty (struct render_break *b)
1157 b->axis = TABLE_HORZ;
1163 /* Frees B and unrefs the render_page that it owns. */
1165 render_break_destroy (struct render_break *b)
1169 render_page_unref (b->page);
1174 /* Returns true if B still has cells that are yet to be returned,
1175 false if all of B's page has been processed. */
1177 render_break_has_next (const struct render_break *b)
1179 const struct render_page *page = b->page;
1180 enum table_axis axis = b->axis;
1182 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1185 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1186 Returns a null pointer if B has already been completely broken up, or if
1187 SIZE is too small to reasonably render any cells. The latter will never
1188 happen if SIZE is at least as large as the page size passed to
1189 render_page_create() along B's axis. */
1190 static struct render_page *
1191 render_break_next (struct render_break *b, int size)
1193 const struct render_page *page = b->page;
1194 enum table_axis axis = b->axis;
1195 struct render_page *subpage;
1197 if (!render_break_has_next (b))
1202 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1204 int needed = needed_size (b, z + 1);
1207 if (cell_is_breakable (b, z))
1209 /* If there is no right header and we render a partial cell on
1210 the right side of the body, then we omit the rightmost rule of
1211 the body. Otherwise the rendering is deceptive because it
1212 looks like the whole cell is present instead of a partial
1215 This is similar to code for the left side in needed_size(). */
1216 int rule_allowance = (page->h[axis][1]
1218 : rule_width (page, axis, z));
1220 /* The amount that, if we added cell 'z', the rendering would
1221 overfill the allocated 'size'. */
1222 int overhang = needed - size - rule_allowance;
1224 /* The width of cell 'z'. */
1225 int cell_size = cell_width (page, axis, z);
1227 /* The amount trimmed off the left side of 'z',
1228 and the amount left to render. */
1229 int cell_ofs = z == b->z ? b->pixel : 0;
1230 int cell_left = cell_size - cell_ofs;
1232 /* A small but visible width. */
1233 int em = page->params->font_size[axis];
1235 /* If some of the cell remains to render,
1236 and there would still be some of the cell left afterward,
1237 then partially render that much of the cell. */
1238 pixel = (cell_left && cell_left > overhang
1239 ? cell_left - overhang + cell_ofs
1242 /* If there would be only a tiny amount of the cell left after
1243 rendering it partially, reduce the amount rendered slightly
1244 to make the output look a little better. */
1245 if (pixel + em > cell_size)
1246 pixel = MAX (pixel - em, 0);
1248 /* If we're breaking vertically, then consider whether the cells
1249 being broken have a better internal breakpoint than the exact
1250 number of pixels available, which might look bad e.g. because
1251 it breaks in the middle of a line of text. */
1252 if (axis == TABLE_VERT && page->params->adjust_break)
1253 for (int x = 0; x < page->n[H]; )
1255 struct table_cell cell;
1257 table_get_cell (page->table, x, z, &cell);
1258 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1259 int better_pixel = page->params->adjust_break (
1260 page->params->aux, &cell, w, pixel);
1262 table_cell_free (&cell);
1264 if (better_pixel < pixel)
1266 if (better_pixel > (z == b->z ? b->pixel : 0))
1268 pixel = better_pixel;
1271 else if (better_pixel == 0 && z != b->z)
1283 if (z == b->z && !pixel)
1286 subpage = render_page_select (page, axis, b->z, b->pixel,
1288 pixel ? cell_width (page, axis, z) - pixel
1295 /* Returns the width that would be required along B's axis to render a page
1296 from B's current position up to but not including CELL. */
1298 needed_size (const struct render_break *b, int cell)
1300 const struct render_page *page = b->page;
1301 enum table_axis axis = b->axis;
1303 /* Width of left header not including its rightmost rule. */
1304 int size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1306 /* If we have a pixel offset and there is no left header, then we omit the
1307 leftmost rule of the body. Otherwise the rendering is deceptive because
1308 it looks like the whole cell is present instead of a partial cell.
1310 Otherwise (if there are headers) we will be merging two rules: the
1311 rightmost rule in the header and the leftmost rule in the body. We assume
1312 that the width of a merged rule is the larger of the widths of either rule
1314 if (b->pixel == 0 || page->h[axis][0])
1315 size += MAX (rule_width (page, axis, page->h[axis][0]),
1316 rule_width (page, axis, b->z));
1318 /* Width of body, minus any pixel offset in the leftmost cell. */
1319 size += joined_width (page, axis, b->z, cell) - b->pixel;
1321 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1322 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1323 rule_width (page, axis, cell));
1325 /* Width of right header not including its leftmost rule. */
1326 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1327 rule_ofs_r (page, axis, 0));
1329 /* Join crossing. */
1330 if (page->h[axis][0] && page->h[axis][1])
1331 size += page->join_crossing[axis][b->z];
1336 /* Returns true if CELL along B's axis may be broken across a page boundary.
1338 This is just a heuristic. Breaking cells across page boundaries can save
1339 space, but it looks ugly. */
1341 cell_is_breakable (const struct render_break *b, int cell)
1343 const struct render_page *page = b->page;
1344 enum table_axis axis = b->axis;
1346 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1353 const struct render_params *params;
1355 struct render_page **pages;
1356 size_t n_pages, allocated_pages;
1359 struct render_break x_break;
1360 struct render_break y_break;
1363 static const struct render_page *
1364 render_pager_add_table (struct render_pager *p, struct table *table,
1367 if (p->n_pages >= p->allocated_pages)
1368 p->pages = x2nrealloc (p->pages, &p->allocated_pages, sizeof *p->pages);
1370 struct render_page *page = render_page_create (p->params, table, min_width);
1371 p->pages[p->n_pages++] = page;
1376 render_pager_start_page (struct render_pager *p)
1378 render_break_init (&p->x_break, render_page_ref (p->pages[p->cur_page++]),
1380 render_break_init_empty (&p->y_break);
1384 add_footnote_page (struct render_pager *p, const struct table_item *item)
1386 const struct footnote **f;
1387 size_t n_footnotes = table_collect_footnotes (item, &f);
1391 struct tab_table *t = tab_create (2, n_footnotes);
1393 for (size_t i = 0; i < n_footnotes; i++)
1396 tab_text_format (t, 0, i, TAB_LEFT, "%s.", f[i]->marker);
1397 tab_text (t, 1, i, TAB_LEFT, f[i]->content);
1400 tab_add_style (t, 0, i, f[i]->style);
1401 tab_add_style (t, 1, i, f[i]->style);
1404 render_pager_add_table (p, &t->table, 0);
1410 add_text_page (struct render_pager *p, const struct table_item_text *t,
1416 struct tab_table *tab = tab_create (1, 1);
1417 tab_text (tab, 0, 0, 0, t->content);
1418 for (size_t i = 0; i < t->n_footnotes; i++)
1419 tab_add_footnote (tab, 0, 0, t->footnotes[i]);
1421 tab->styles[0] = area_style_clone (tab->container, t->style);
1422 render_pager_add_table (p, &tab->table, min_width);
1425 /* Creates and returns a new render_pager for rendering TABLE_ITEM on the
1426 device with the given PARAMS. */
1427 struct render_pager *
1428 render_pager_create (const struct render_params *params,
1429 const struct table_item *table_item)
1431 const struct table *table = table_item_get_table (table_item);
1433 struct render_pager *p = xzalloc (sizeof *p);
1436 struct render_page *page = render_page_create (params, table_ref (table), 0);
1437 struct render_break b;
1438 render_break_init (&b, page, H);
1439 struct render_page *subpage = render_break_next (&b, p->params->size[H]);
1440 int title_width = subpage ? subpage->cp[H][2 * subpage->n[H] + 1] : 0;
1441 render_page_unref (subpage);
1442 render_break_destroy (&b);
1445 add_text_page (p, table_item_get_title (table_item), title_width);
1448 add_text_page (p, table_item_get_layers (table_item), title_width);
1451 render_pager_add_table (p, table_ref (table_item_get_table (table_item)), 0);
1454 add_text_page (p, table_item_get_caption (table_item), 0);
1457 add_footnote_page (p, table_item);
1459 render_pager_start_page (p);
1466 render_pager_destroy (struct render_pager *p)
1470 render_break_destroy (&p->x_break);
1471 render_break_destroy (&p->y_break);
1472 for (size_t i = 0; i < p->n_pages; i++)
1473 render_page_unref (p->pages[i]);
1479 /* Returns true if P has content remaining to render, false if rendering is
1482 render_pager_has_next (const struct render_pager *p_)
1484 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1486 while (!render_break_has_next (&p->y_break))
1488 render_break_destroy (&p->y_break);
1489 if (!render_break_has_next (&p->x_break))
1491 render_break_destroy (&p->x_break);
1492 if (p->cur_page >= p->n_pages)
1494 render_break_init_empty (&p->x_break);
1495 render_break_init_empty (&p->y_break);
1498 render_pager_start_page (p);
1502 &p->y_break, render_break_next (&p->x_break, p->params->size[H]), V);
1507 /* Draws a chunk of content from P to fit in a space that has vertical size
1508 SPACE and the horizontal size specified in the render_params passed to
1509 render_page_create(). Returns the amount of space actually used by the
1510 rendered chunk, which will be 0 if SPACE is too small to render anything or
1511 if no content remains (use render_pager_has_next() to distinguish these
1514 render_pager_draw_next (struct render_pager *p, int space)
1516 int ofs[TABLE_N_AXES] = { 0, 0 };
1517 size_t start_page = SIZE_MAX;
1519 while (render_pager_has_next (p))
1521 if (start_page == p->cur_page)
1523 start_page = p->cur_page;
1525 struct render_page *page
1526 = render_break_next (&p->y_break, space - ofs[V]);
1530 render_page_draw (page, ofs);
1531 ofs[V] += render_page_get_size (page, V);
1532 render_page_unref (page);
1537 /* Draws all of P's content. */
1539 render_pager_draw (const struct render_pager *p)
1541 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1544 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1545 Some extra content might be drawn; the device should perform clipping as
1548 render_pager_draw_region (const struct render_pager *p,
1549 int x, int y, int w, int h)
1551 int ofs[TABLE_N_AXES] = { 0, 0 };
1552 int clip[TABLE_N_AXES][2];
1556 for (size_t i = 0; i < p->n_pages; i++)
1558 const struct render_page *page = p->pages[i];
1559 int size = render_page_get_size (page, V);
1561 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1562 clip[V][1] = MIN (y + h, ofs[V] + size) - ofs[V];
1563 if (clip[V][1] > clip[V][0])
1564 render_page_draw_region (page, ofs, clip);
1570 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1571 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1573 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1577 for (size_t i = 0; i < p->n_pages; i++)
1579 int subsize = render_page_get_size (p->pages[i], axis);
1580 size = axis == H ? MAX (size, subsize) : size + subsize;
1587 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1592 for (i = 0; i < p->n_pages; i++)
1594 int size = render_page_get_size (p->pages[i], V);
1595 if (y + size >= height)
1596 return render_page_get_best_breakpoint (p->pages[i], height - y) + y;
1603 /* render_page_select() and helpers. */
1605 struct render_page_selection
1607 const struct render_page *page; /* Page whose slice we are selecting. */
1608 struct render_page *subpage; /* New page under construction. */
1609 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1610 enum table_axis b; /* The opposite of 'a'. */
1611 int z0; /* First cell along 'a' being selected. */
1612 int z1; /* Last cell being selected, plus 1. */
1613 int p0; /* Number of pixels to trim off left side of z0. */
1614 int p1; /* Number of pixels to trim off right side of z1-1. */
1617 static void cell_to_subpage (struct render_page_selection *,
1618 const struct table_cell *,
1619 int subcell[TABLE_N_AXES]);
1620 static const struct render_overflow *find_overflow_for_cell (
1621 struct render_page_selection *, const struct table_cell *);
1622 static struct render_overflow *insert_overflow (struct render_page_selection *,
1623 const struct table_cell *);
1625 /* Creates and returns a new render_page whose contents are a subregion of
1626 PAGE's contents. The new render_page includes cells Z0 through Z1
1627 (exclusive) along AXIS, plus any headers on AXIS.
1629 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1630 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1631 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1632 render cells that are too large to fit on a single page.)
1634 The whole of axis !AXIS is included. (The caller may follow up with another
1635 call to render_page_select() to select on !AXIS to select on that axis as
1638 The caller retains ownership of PAGE, which is not modified. */
1639 static struct render_page *
1640 render_page_select (const struct render_page *page, enum table_axis axis,
1641 int z0, int p0, int z1, int p1)
1643 enum table_axis a = axis;
1644 enum table_axis b = !a;
1646 /* Optimize case where all of PAGE is selected by just incrementing the
1648 if (z0 == page->h[a][0] && p0 == 0
1649 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1651 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1656 /* Allocate subpage. */
1657 struct render_page *subpage = render_page_allocate (
1658 page->params, table_select_slice (table_ref (page->table),
1661 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1662 off that side of the page and there are no headers. */
1663 subpage->is_edge_cutoff[a][0] =
1664 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1665 subpage->is_edge_cutoff[a][1] =
1666 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1667 && page->is_edge_cutoff[a][1]));
1668 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1669 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1671 /* Select join crossings from PAGE into subpage. */
1672 int *jc = subpage->join_crossing[a];
1673 for (int z = 0; z < page->h[a][0]; z++)
1674 *jc++ = page->join_crossing[a][z];
1675 for (int z = z0; z <= z1; z++)
1676 *jc++ = page->join_crossing[a][z];
1677 for (int z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1678 *jc++ = page->join_crossing[a][z];
1679 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1681 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1682 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1684 /* Select widths from PAGE into subpage. */
1685 int *scp = page->cp[a];
1686 int *dcp = subpage->cp[a];
1688 for (int z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1690 if (z == 0 && subpage->is_edge_cutoff[a][0])
1693 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1695 for (int z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1697 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1698 if (z == cell_ofs (z0))
1701 if (page->h[a][0] && page->h[a][1])
1702 dcp[1] += page->join_crossing[a][z / 2];
1704 if (z == cell_ofs (z1 - 1))
1707 for (int z = rule_ofs_r (page, a, subpage->h[a][1]);
1708 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1710 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1713 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1715 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1717 for (int z = 0; z < page->n[b] * 2 + 2; z++)
1718 subpage->cp[b][z] = page->cp[b][z];
1720 /* Add new overflows. */
1721 struct render_page_selection s = {
1732 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1733 for (int z = 0; z < page->n[b]; )
1735 int d[TABLE_N_AXES];
1739 struct table_cell cell;
1740 table_get_cell (page->table, d[H], d[V], &cell);
1741 bool overflow0 = p0 || cell.d[a][0] < z0;
1742 bool overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1743 if (overflow0 || overflow1)
1745 struct render_overflow *ro = insert_overflow (&s, &cell);
1749 ro->overflow[a][0] += p0 + axis_width (
1750 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1751 if (page->h[a][0] && page->h[a][1])
1752 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1758 ro->overflow[a][1] += p1 + axis_width (
1759 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1760 if (page->h[a][0] && page->h[a][1])
1761 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1765 table_cell_free (&cell);
1768 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1769 for (int z = 0; z < page->n[b]; )
1771 int d[TABLE_N_AXES];
1775 struct table_cell cell;
1776 table_get_cell (page->table, d[H], d[V], &cell);
1777 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1778 && find_overflow_for_cell (&s, &cell) == NULL)
1780 struct render_overflow *ro = insert_overflow (&s, &cell);
1781 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1782 cell_ofs (cell.d[a][1]));
1785 table_cell_free (&cell);
1788 /* Copy overflows from PAGE into subpage. */
1789 struct render_overflow *ro;
1790 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1792 struct table_cell cell;
1794 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1795 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1796 && find_overflow_for_cell (&s, &cell) == NULL)
1797 insert_overflow (&s, &cell);
1798 table_cell_free (&cell);
1804 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1805 coordinates of the top-left cell as it will appear in S->subpage.
1807 CELL must actually intersect the region of S->page that is being selected
1808 by render_page_select() or the results will not make any sense. */
1810 cell_to_subpage (struct render_page_selection *s,
1811 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1813 enum table_axis a = s->a;
1814 enum table_axis b = s->b;
1815 int ha0 = s->subpage->h[a][0];
1817 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1818 subcell[b] = cell->d[b][0];
1821 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1822 that cell in S->subpage, if there is one, and a null pointer otherwise.
1824 CELL must actually intersect the region of S->page that is being selected
1825 by render_page_select() or the results will not make any sense. */
1826 static const struct render_overflow *
1827 find_overflow_for_cell (struct render_page_selection *s,
1828 const struct table_cell *cell)
1832 cell_to_subpage (s, cell, subcell);
1833 return find_overflow (s->subpage, subcell[H], subcell[V]);
1836 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1837 cell in S->subpage (which must not already exist). Initializes the new
1838 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1841 CELL must actually intersect the region of S->page that is being selected
1842 by render_page_select() or the results will not make any sense. */
1843 static struct render_overflow *
1844 insert_overflow (struct render_page_selection *s,
1845 const struct table_cell *cell)
1847 struct render_overflow *of = xzalloc (sizeof *of);
1848 cell_to_subpage (s, cell, of->d);
1849 hmap_insert (&s->subpage->overflows, &of->node,
1850 hash_cell (of->d[H], of->d[V]));
1852 const struct render_overflow *old
1853 = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1855 memcpy (of->overflow, old->overflow, sizeof of->overflow);