1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "libpspp/pool.h"
29 #include "output/pivot-output.h"
30 #include "output/pivot-table.h"
31 #include "output/render.h"
32 #include "output/table.h"
34 #include "gl/minmax.h"
35 #include "gl/xalloc.h"
38 #define _(msgid) gettext (msgid)
40 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
44 /* A layout for rendering a specific table on a specific device.
46 May represent the layout of an entire table presented to
47 render_page_create(), or a rectangular subregion of a table broken out using
48 render_break_next() to allow a table to be broken across multiple pages.
50 A page's size is not limited to the size passed in as part of render_params.
51 render_pager breaks a render_page into smaller render_pages that will fit in
52 the available space. */
55 const struct render_params *params; /* Parameters of the target device. */
56 struct table *table; /* Table rendered. */
59 /* Region of 'table' to render.
61 The horizontal cells rendered are the leftmost h[H], then
64 The vertical cells rendered are the topmost h[V], then r[V][0]
67 n[H] = h[H] + (r[H][1] - r[H][0])
68 n[V] = h[V] + (r[V][1] - r[V][0])
71 int r[TABLE_N_AXES][2];
76 cp[H] represents x positions within the table.
78 cp[H][1] = the width of the leftmost vertical rule.
79 cp[H][2] = cp[H][1] + the width of the leftmost column.
80 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
82 cp[H][2 * n[H]] = x position of the rightmost vertical rule.
83 cp[H][2 * n[H] + 1] = total table width including all rules.
85 Similarly, cp[V] represents y positions within the table.
87 cp[V][1] = the height of the topmost horizontal rule.
88 cp[V][2] = cp[V][1] + the height of the topmost row.
89 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
91 cp[V][2 * n[V]] = y position of the bottommost horizontal rule.
92 cp[V][2 * n[V] + 1] = total table height including all rules.
94 Rules and columns can have width or height 0, in which case consecutive
95 values in this array are equal. */
96 int *cp[TABLE_N_AXES];
98 /* render_break_next() can break a table such that some cells are not fully
99 contained within a render_page. This will happen if a cell is too wide
100 or two tall to fit on a single page, or if a cell spans multiple rows or
101 columns and the page only includes some of those rows or columns.
103 This hash table contains "struct render_overflow"s that represents each
104 such cell that doesn't completely fit on this page.
106 Each overflow cell borders at least one header edge of the table and may
107 border more. (A single table cell that is so large that it fills the
108 entire page can overflow on all four sides!) */
109 struct hmap overflows;
111 /* If a single column (or row) is too wide (or tall) to fit on a page
112 reasonably, then render_break_next() will split a single row or column
113 across multiple render_pages. This member indicates when this has
116 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
117 of the leftmost column in this page, and false otherwise.
119 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
120 of the rightmost column in this page, and false otherwise.
122 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
123 and bottom of the table.
125 The effect of is_edge_cutoff is to prevent rules along the edge in
126 question from being rendered.
128 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
129 contain a node for each cell along that edge. */
130 bool is_edge_cutoff[TABLE_N_AXES][2];
133 static struct render_page *render_page_create (const struct render_params *,
134 struct table *, int min_width);
136 struct render_page *render_page_ref (const struct render_page *page_);
137 static void render_page_unref (struct render_page *);
139 /* Returns the offset in struct render_page's cp[axis] array of the rule with
140 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
141 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
142 next rule to the right (or below); and so on. */
144 rule_ofs (int rule_idx)
149 /* Returns the offset in struct render_page's cp[axis] array of the rule with
150 index RULE_IDX_R, which counts from the right side (or bottom) of the page
151 left (or up), according to whether AXIS is H or V, respectively. That is,
152 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
153 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
154 (or above); and so on. */
156 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
158 return (page->n[axis] - rule_idx_r) * 2;
161 /* Returns the offset in struct render_page's cp[axis] array of the cell with
162 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
163 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
164 next cell to the right (or below); and so on. */
166 cell_ofs (int cell_idx)
168 return cell_idx * 2 + 1;
171 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
173 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
175 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
178 /* Returns the total width of PAGE along AXIS. */
180 table_width (const struct render_page *page, int axis)
182 return page->cp[axis][2 * page->n[axis] + 1];
185 /* Returns the width of the headers in PAGE along AXIS. */
187 headers_width (const struct render_page *page, int axis)
189 return axis_width (page, axis, rule_ofs (0), cell_ofs (page->h[axis]));
192 /* Returns the width of cell X along AXIS in PAGE. */
194 cell_width (const struct render_page *page, int axis, int x)
196 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
199 /* Returns the width of rule X along AXIS in PAGE. */
201 rule_width (const struct render_page *page, int axis, int x)
203 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
206 /* Returns the width of rule X along AXIS in PAGE. */
208 rule_width_r (const struct render_page *page, int axis, int x)
210 int ofs = rule_ofs_r (page, axis, x);
211 return axis_width (page, axis, ofs, ofs + 1);
214 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
216 joined_width (const struct render_page *page, int axis, int x0, int x1)
218 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
221 /* Returns the width of the widest cell, excluding headers, along AXIS in
224 max_cell_width (const struct render_page *page, int axis)
226 int x0 = page->h[axis];
227 int x1 = page->n[axis];
230 for (int x = x0; x < x1; x++)
232 int w = cell_width (page, axis, x);
239 /* A cell that doesn't completely fit on the render_page. */
240 struct render_overflow
242 struct hmap_node node; /* In render_page's 'overflows' hmap. */
244 /* Occupied region of page.
246 d[H][0] is the leftmost column.
247 d[H][1] is the rightmost column, plus 1.
248 d[V][0] is the top row.
249 d[V][1] is the bottom row, plus 1.
251 The cell in its original table might occupy a larger region. This
252 member reflects the size of the cell in the current render_page, after
253 trimming off any rows or columns due to page-breaking. */
256 /* The space that has been trimmed off the cell:
258 overflow[H][0]: space trimmed off its left side.
259 overflow[H][1]: space trimmed off its right side.
260 overflow[V][0]: space trimmed off its top.
261 overflow[V][1]: space trimmed off its bottom.
263 During rendering, this information is used to position the rendered
264 portion of the cell within the available space.
266 When a cell is rendered, sometimes it is permitted to spill over into
267 space that is ordinarily reserved for rules. Either way, this space is
268 still included in overflow values.
270 Suppose, for example, that a cell that joins 2 columns has a width of 60
271 pixels and content "abcdef", that the 2 columns that it joins have
272 widths of 20 and 30 pixels, respectively, and that therefore the rule
273 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
274 It might render like this, if each character is 10x10, and showing a few
275 extra table cells for context:
283 If this render_page is broken at the rule that separates "gh" from
284 "ijk", then the page that contains the left side of the "abcdef" cell
285 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
286 and the page that contains the right side of the cell will have
287 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
305 int overflow[TABLE_N_AXES][2];
308 /* Returns a hash value for (,Y). */
310 hash_cell (int x, int y)
312 return hash_int (x + (y << 16), 0);
315 /* Searches PAGE's set of render_overflow for one whose top-left cell is
316 (X,Y). Returns it, if there is one, otherwise a null pointer. */
317 static const struct render_overflow *
318 find_overflow (const struct render_page *page, int x, int y)
320 if (!hmap_is_empty (&page->overflows))
322 const struct render_overflow *of;
324 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
325 hash_cell (x, y), &page->overflows)
326 if (x == of->d[H] && y == of->d[V])
333 /* Row or column dimensions. Used to figure the size of a table in
334 render_page_create() and discarded after that. */
337 /* Width without considering rows (or columns) that span more than one (or
341 /* Width taking spanned rows (or columns) into consideration. */
345 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
346 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
349 distribute_spanned_width (int width,
350 struct render_row *rows, const int *rules, int n)
352 /* Sum up the unspanned widths of the N rows for use as weights. */
353 int total_unspanned = 0;
354 for (int x = 0; x < n; x++)
355 total_unspanned += rows[x].unspanned;
356 for (int x = 0; x < n - 1; x++)
357 total_unspanned += rules[x + 1];
358 if (total_unspanned >= width)
361 /* The algorithm used here is based on the following description from HTML 4:
363 For cells that span multiple columns, a simple approach consists of
364 apportioning the min/max widths evenly to each of the constituent
365 columns. A slightly more complex approach is to use the min/max
366 widths of unspanned cells to weight how spanned widths are
367 apportioned. Experiments suggest that a blend of the two approaches
368 gives good results for a wide range of tables.
370 We blend the two approaches half-and-half, except that we cannot use the
371 unspanned weights when 'total_unspanned' is 0 (because that would cause a
374 The calculation we want to do is this:
377 w1 = width * (column's unspanned width) / (total unspanned width)
378 (column's width) = (w0 + w1) / 2
380 We implement it as a precise calculation in integers by multiplying w0 and
381 w1 by the common denominator of all three calculations (d), dividing that
382 out in the column width calculation, and then keeping the remainder for
385 (We actually compute the unspanned width of a column as twice the
386 unspanned width, plus the width of the rule on the left, plus the width of
387 the rule on the right. That way each rule contributes to both the cell on
388 its left and on its right.)
390 long long int d0 = n;
391 long long int d1 = 2LL * MAX (total_unspanned, 1);
392 long long int d = d0 * d1;
393 if (total_unspanned > 0)
395 long long int w = d / 2;
396 for (int x = 0; x < n; x++)
399 if (total_unspanned > 0)
401 long long int unspanned = rows[x].unspanned * 2LL;
403 unspanned += rules[x + 1];
405 unspanned += rules[x];
406 w += width * unspanned * d0;
409 rows[x].width = MAX (rows[x].width, w / d);
410 w -= rows[x].width * d;
414 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
417 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
418 const struct render_row *rows, const int *rules)
420 int n = page->n[axis];
421 int *cp = page->cp[axis];
423 for (int z = 0; z < n; z++)
425 cp[1] = cp[0] + rules[z];
426 cp[2] = cp[1] + rows[z].width;
429 cp[1] = cp[0] + rules[n];
432 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
434 calculate_table_width (int n, const struct render_row *rows, int *rules)
437 for (int x = 0; x < n; x++)
438 width += rows[x].width;
439 for (int x = 0; x <= n; x++)
445 /* Rendering utility functions. */
447 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
448 rendered with PARAMS. */
450 measure_rule (const struct render_params *params, const struct table *table,
451 enum table_axis a, int z)
453 enum table_axis b = !a;
455 /* Determine all types of rules that are present, as a bitmap in 'rules'
456 where rule type 't' is present if bit 2**t is set. */
457 unsigned int rules = 0;
460 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
461 rules |= 1u << table_get_rule (table, a, d[H], d[V]).stroke;
463 /* Turn off TABLE_STROKE_NONE because it has width 0 and we needn't bother.
464 However, if the device doesn't support margins, make sure that there is at
465 least a small gap between cells (but we don't need any at the left or
466 right edge of the table). */
467 if (rules & (1u << TABLE_STROKE_NONE))
469 rules &= ~(1u << TABLE_STROKE_NONE);
470 if (z > 0 && z < table->n[a] && !params->supports_margins && a == H)
471 rules |= 1u << TABLE_STROKE_SOLID;
474 /* Calculate maximum width of the rules that are present. */
476 for (size_t i = 0; i < TABLE_N_STROKES; i++)
477 if (rules & (1u << i))
478 width = MAX (width, params->line_widths[i]);
482 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
483 space for rendering a table with dimensions given in N, headers in H, and
484 content in R. The caller must initialize most of the members itself. */
485 static struct render_page *
486 render_page_allocate__ (const struct render_params *params,
488 const int n[TABLE_N_AXES],
489 const int h[TABLE_N_AXES],
490 const int r[TABLE_N_AXES][2])
492 struct render_page *page = xmalloc (sizeof *page);
493 *page = (struct render_page) {
497 .n = { [H] = n[H], [V] = n[V] },
498 .h = { [H] = h[H], [V] = h[V] },
499 .r = { [H] = { r[H][0], r[H][1] }, [V] = { r[V][0], r[V][1] } },
500 .cp = { [H] = xcalloc (2 * n[H] + 2, sizeof *page->cp[H]),
501 [V] = xcalloc (2 * n[V] + 2, sizeof *page->cp[V]) },
502 .overflows = HMAP_INITIALIZER (page->overflows),
507 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
508 space for all of the members of the new page, but the caller must initialize
509 the 'cp' member itself. */
510 static struct render_page *
511 render_page_allocate (const struct render_params *params, struct table *table)
514 int r[TABLE_N_AXES][2];
515 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
518 r[a][0] = table->h[a];
519 r[a][1] = table->n[a];
521 return render_page_allocate__ (params, table, table->n, h, r);
524 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
525 cp[H] in the new page from ROWS and RULES. The caller must still initialize
527 static struct render_page *
528 create_page_with_exact_widths (const struct render_params *params,
530 const struct render_row *rows, int *rules)
532 struct render_page *page = render_page_allocate (params, table);
533 accumulate_row_widths (page, H, rows, rules);
537 /* Allocates and returns a new render_page for PARAMS and TABLE.
539 Initializes cp[H] in the new page by setting the width of each row 'i' to
540 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
541 ROW_MAX[i].width. Sets the width of rules to those in RULES.
543 W_MIN is the sum of ROWS_MIN[].width.
545 W_MAX is the sum of ROWS_MAX[].width.
547 The caller must still initialize cp[V]. */
548 static struct render_page *
549 create_page_with_interpolated_widths (const struct render_params *params,
551 const struct render_row *rows_min,
552 const struct render_row *rows_max,
553 int w_min, int w_max, const int *rules)
555 const int n = table->n[H];
556 const long long int avail = params->size[H] - w_min;
557 const long long int wanted = w_max - w_min;
561 struct render_page *page = render_page_allocate (params, table);
563 int *cph = page->cp[H];
565 long long int w = wanted / 2;
566 for (int x = 0; x < n; x++)
568 w += avail * (rows_max[x].width - rows_min[x].width);
569 int extra = w / wanted;
572 cph[1] = cph[0] + rules[x];
573 cph[2] = cph[1] + rows_min[x].width + extra;
576 cph[1] = cph[0] + rules[n];
578 assert (page->cp[H][n * 2 + 1] == params->size[H]);
582 /* Maps a contiguous range of cells from a page to the underlying table along
583 the horizontal or vertical dimension. */
586 int p0; /* First ordinate in the page. */
587 int t0; /* First ordinate in the table. */
588 int n; /* Number of ordinates in page and table. */
591 /* Initializes M to a mapping from PAGE to PAGE->table along axis A. The
592 mapping includes ordinate Z (in PAGE). */
594 get_map (const struct render_page *page, enum table_axis a, int z,
605 assert (z < page->n[a]);
607 m->t0 = page->r[a][0];
608 m->n = page->r[a][1] - page->r[a][0];
612 /* Initializes CELL with the contents of the table cell at column X and row Y
613 within PAGE. When CELL is no longer needed, the caller is responsible for
614 freeing it by calling table_cell_free(CELL).
616 The caller must ensure that CELL is destroyed before TABLE is unref'ed.
618 This is equivalent to table_get_cell(), except X and Y are in terms of the
619 page's rows and columns rather than the underlying table's. */
621 render_get_cell (const struct render_page *page, int x, int y,
622 struct table_cell *cell)
624 int d[TABLE_N_AXES] = { [H] = x, [V] = y };
625 struct map map[TABLE_N_AXES];
627 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
629 struct map *m = &map[a];
630 get_map (page, a, d[a], m);
631 d[a] += m->t0 - m->p0;
633 table_get_cell (page->table, d[H], d[V], cell);
635 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
637 struct map *m = &map[a];
639 for (int i = 0; i < 2; i++)
640 cell->d[a][i] -= m->t0 - m->p0;
641 cell->d[a][0] = MAX (cell->d[a][0], m->p0);
642 cell->d[a][1] = MIN (cell->d[a][1], m->p0 + m->n);
645 if (cell->options & TABLE_CELL_FULL_WIDTH)
648 cell->d[H][1] = page->n[H];
652 /* Creates and returns a new render_page for rendering TABLE on a device
655 The new render_page will be suitable for rendering on a device whose page
656 size is PARAMS->size, but the caller is responsible for actually breaking it
657 up to fit on such a device, using the render_break abstraction. */
658 static struct render_page *
659 render_page_create (const struct render_params *params, struct table *table,
664 int nc = table->n[H];
665 int nr = table->n[V];
667 /* Figure out rule widths. */
668 int *rules[TABLE_N_AXES];
669 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
671 int n = table->n[axis] + 1;
673 rules[axis] = xnmalloc (n, sizeof *rules);
674 for (int z = 0; z < n; z++)
675 rules[axis][z] = measure_rule (params, table, axis, z);
678 /* Calculate minimum and maximum widths of cells that do not
679 span multiple columns. */
680 struct render_row *columns[2];
681 for (int i = 0; i < 2; i++)
682 columns[i] = xcalloc (nc, sizeof *columns[i]);
683 for (int y = 0; y < nr; y++)
684 for (int x = 0; x < nc;)
686 struct table_cell cell;
688 table_get_cell (table, x, y, &cell);
689 if (y == cell.d[V][0])
691 if (table_cell_colspan (&cell) == 1)
694 params->ops->measure_cell_width (params->aux, &cell,
696 for (int i = 0; i < 2; i++)
697 if (columns[i][x].unspanned < w[i])
698 columns[i][x].unspanned = w[i];
704 /* Distribute widths of spanned columns. */
705 for (int i = 0; i < 2; i++)
706 for (int x = 0; x < nc; x++)
707 columns[i][x].width = columns[i][x].unspanned;
708 for (int y = 0; y < nr; y++)
709 for (int x = 0; x < nc;)
711 struct table_cell cell;
713 table_get_cell (table, x, y, &cell);
714 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
718 params->ops->measure_cell_width (params->aux, &cell,
720 for (int i = 0; i < 2; i++)
721 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
722 rules[H], table_cell_colspan (&cell));
727 for (int i = 0; i < 2; i++)
728 distribute_spanned_width (min_width, &columns[i][0], rules[H], nc);
730 /* In pathological cases, spans can cause the minimum width of a column to
731 exceed the maximum width. This bollixes our interpolation algorithm
732 later, so fix it up. */
733 for (int i = 0; i < nc; i++)
734 if (columns[MIN][i].width > columns[MAX][i].width)
735 columns[MAX][i].width = columns[MIN][i].width;
737 /* Decide final column widths. */
739 for (int i = 0; i < 2; i++)
740 table_widths[i] = calculate_table_width (table->n[H],
741 columns[i], rules[H]);
743 struct render_page *page;
744 if (table_widths[MAX] <= params->size[H])
746 /* Fits even with maximum widths. Use them. */
747 page = create_page_with_exact_widths (params, table, columns[MAX],
750 else if (table_widths[MIN] <= params->size[H])
752 /* Fits with minimum widths, so distribute the leftover space. */
753 page = create_page_with_interpolated_widths (
754 params, table, columns[MIN], columns[MAX],
755 table_widths[MIN], table_widths[MAX], rules[H]);
759 /* Doesn't fit even with minimum widths. Assign minimums for now, and
760 later we can break it horizontally into multiple pages. */
761 page = create_page_with_exact_widths (params, table, columns[MIN],
765 /* Calculate heights of cells that do not span multiple rows. */
766 struct render_row *rows = XCALLOC (nr, struct render_row);
767 for (int y = 0; y < nr; y++)
768 for (int x = 0; x < nc;)
770 struct render_row *r = &rows[y];
771 struct table_cell cell;
773 render_get_cell (page, x, y, &cell);
774 if (y == cell.d[V][0] && table_cell_rowspan (&cell) == 1)
776 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
777 int h = params->ops->measure_cell_height (params->aux,
779 if (h > r->unspanned)
780 r->unspanned = r->width = h;
784 for (int i = 0; i < 2; i++)
787 /* Distribute heights of spanned rows. */
788 for (int y = 0; y < nr; y++)
789 for (int x = 0; x < nc;)
791 struct table_cell cell;
793 render_get_cell (page, x, y, &cell);
794 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
796 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
797 int h = params->ops->measure_cell_height (params->aux, &cell, w);
798 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
799 table_cell_rowspan (&cell));
804 /* Decide final row heights. */
805 accumulate_row_widths (page, V, rows, rules[V]);
808 /* Measure headers. If they are "too big", get rid of them. */
809 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
811 int hw = headers_width (page, axis);
812 if (hw * 2 >= page->params->size[axis]
813 || hw + max_cell_width (page, axis) > page->params->size[axis])
816 page->r[axis][0] = 0;
817 page->r[axis][1] = page->n[axis];
827 /* Increases PAGE's reference count. */
829 render_page_ref (const struct render_page *page_)
831 struct render_page *page = CONST_CAST (struct render_page *, page_);
836 /* Decreases PAGE's reference count and destroys PAGE if this causes the
837 reference count to fall to zero. */
839 render_page_unref (struct render_page *page)
841 if (page != NULL && --page->ref_cnt == 0)
843 struct render_overflow *overflow, *next;
844 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
847 hmap_destroy (&page->overflows);
849 table_unref (page->table);
851 for (int i = 0; i < TABLE_N_AXES; ++i)
858 /* Returns the size of PAGE along AXIS. (This might be larger than the page
859 size specified in the parameters passed to render_page_create(). Use a
860 render_break to break up a render_page into page-sized chunks.) */
862 render_page_get_size (const struct render_page *page, enum table_axis axis)
864 return page->cp[axis][page->n[axis] * 2 + 1];
868 render_page_get_best_breakpoint (const struct render_page *page, int height)
870 /* If there's no room for at least the top row and the rules above and below
871 it, don't include any of the table. */
872 if (page->cp[V][3] > height)
875 /* Otherwise include as many rows and rules as we can. */
876 for (int y = 5; y <= 2 * page->n[V] + 1; y += 2)
877 if (page->cp[V][y] > height)
878 return page->cp[V][y - 2];
882 /* Drawing render_pages. */
884 /* This is like table_get_rule() except that D is in terms of the page's rows
885 and column rather than the underlying table's. */
886 static struct table_border_style
887 get_rule (const struct render_page *page, enum table_axis axis,
888 const int d_[TABLE_N_AXES])
890 int d[TABLE_N_AXES] = { d_[0] / 2, d_[1] / 2 };
893 enum table_axis a = axis;
894 if (d[a] < page->h[a])
896 else if (d[a] <= page->n[a])
898 if (page->h[a] && d[a] == page->h[a])
900 d[a] += page->r[a][0] - page->h[a];
903 enum table_axis b = !axis;
905 get_map (page, b, d[b], &m);
908 struct table_border_style border
909 = table_get_rule (page->table, axis, d[H], d[V]);
913 struct table_border_style border2 = table_get_rule (page->table, axis,
915 border.stroke = table_stroke_combine (border.stroke, border2.stroke);
927 render_direction_rtl (void)
929 /* TRANSLATORS: Do not translate this string. If the script of your language
930 reads from right to left (eg Persian, Arabic, Hebrew etc), then replace
931 this string with "output-direction-rtl". Otherwise either leave it
932 untranslated or copy it verbatim. */
933 const char *dir = _("output-direction-ltr");
934 if (0 == strcmp ("output-direction-rtl", dir))
937 if (0 != strcmp ("output-direction-ltr", dir))
938 fprintf (stderr, "This localisation has been incorrectly translated. "
939 "Complain to the translator.\n");
945 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
946 const int d[TABLE_N_AXES])
948 const struct table_border_style none = { .stroke = TABLE_STROKE_NONE };
949 struct table_border_style styles[TABLE_N_AXES][2];
951 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
953 enum table_axis b = !a;
956 || (page->is_edge_cutoff[a][0] && d[a] == 0)
957 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
958 styles[a][0] = styles[a][1] = none;
959 else if (is_rule (d[b]))
967 styles[a][0] = get_rule (page, a, e);
972 if (d[b] / 2 < page->n[b])
973 styles[a][1] = get_rule (page, a, d);
978 styles[a][0] = styles[a][1] = get_rule (page, a, d);
981 if (styles[H][0].stroke != TABLE_STROKE_NONE
982 || styles[H][1].stroke != TABLE_STROKE_NONE
983 || styles[V][0].stroke != TABLE_STROKE_NONE
984 || styles[V][1].stroke != TABLE_STROKE_NONE)
986 int bb[TABLE_N_AXES][2];
988 bb[H][0] = ofs[H] + page->cp[H][d[H]];
989 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
990 if (page->params->rtl)
993 bb[H][0] = render_page_get_size (page, H) - bb[H][1];
994 bb[H][1] = render_page_get_size (page, H) - temp;
996 bb[V][0] = ofs[V] + page->cp[V][d[V]];
997 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
998 page->params->ops->draw_line (page->params->aux, bb, styles);
1003 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
1004 const struct table_cell *cell)
1006 const bool debugging = false;
1010 if (cell->d[H][0] + 1 == cell->d[H][1])
1011 printf ("%d", cell->d[H][0]);
1013 printf ("%d-%d", cell->d[H][0], cell->d[H][1] - 1);
1015 if (cell->d[V][0] + 1 == cell->d[V][1])
1016 printf ("%d", cell->d[V][0]);
1018 printf ("%d-%d", cell->d[V][0], cell->d[V][1] - 1);
1020 char *value = pivot_value_to_string (cell->value, NULL);
1021 printf (": \"%s\"\n", value);
1025 int bb[TABLE_N_AXES][2];
1026 int clip[TABLE_N_AXES][2];
1028 bb[H][0] = clip[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
1029 bb[H][1] = clip[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
1030 if (page->params->rtl)
1032 int temp = bb[H][0];
1033 bb[H][0] = clip[H][0] = render_page_get_size (page, H) - bb[H][1];
1034 bb[H][1] = clip[H][1] = render_page_get_size (page, H) - temp;
1036 bb[V][0] = clip[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
1037 bb[V][1] = clip[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
1039 enum table_valign valign = cell->cell_style->valign;
1040 int valign_offset = 0;
1041 if (valign != TABLE_VALIGN_TOP)
1043 int height = page->params->ops->measure_cell_height (
1044 page->params->aux, cell, bb[H][1] - bb[H][0]);
1045 int extra = bb[V][1] - bb[V][0] - height;
1048 if (valign == TABLE_VALIGN_CENTER)
1050 valign_offset += extra;
1054 const struct render_overflow *of = find_overflow (
1055 page, cell->d[H][0], cell->d[V][0]);
1057 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1059 if (of->overflow[axis][0])
1061 bb[axis][0] -= of->overflow[axis][0];
1062 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
1063 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
1065 if (of->overflow[axis][1])
1067 bb[axis][1] += of->overflow[axis][1];
1068 if (cell->d[axis][1] == page->n[axis]
1069 && !page->is_edge_cutoff[axis][1])
1070 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2
1075 int spill[TABLE_N_AXES][2];
1076 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1078 spill[axis][0] = rule_width (page, axis, cell->d[axis][0]) / 2;
1079 spill[axis][1] = rule_width (page, axis, cell->d[axis][1]) / 2;
1082 int color_idx = (cell->d[V][0] < page->h[V]
1084 : (cell->d[V][0] - page->h[V]) & 1);
1085 page->params->ops->draw_cell (page->params->aux, cell, color_idx,
1086 bb, valign_offset, spill, clip);
1089 /* Draws the cells of PAGE indicated in BB. */
1091 render_page_draw_cells (const struct render_page *page,
1092 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
1094 for (int y = bb[V][0]; y < bb[V][1]; y++)
1095 for (int x = bb[H][0]; x < bb[H][1];)
1096 if (!is_rule (x) && !is_rule (y))
1098 struct table_cell cell;
1100 render_get_cell (page, x / 2, y / 2, &cell);
1101 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
1102 render_cell (page, ofs, &cell);
1103 x = rule_ofs (cell.d[H][1]);
1108 for (int y = bb[V][0]; y < bb[V][1]; y++)
1109 for (int x = bb[H][0]; x < bb[H][1]; x++)
1110 if (is_rule (x) || is_rule (y))
1112 int d[TABLE_N_AXES];
1115 render_rule (page, ofs, d);
1119 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
1120 render_params provided to render_page_create(). */
1122 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
1124 int bb[TABLE_N_AXES][2];
1127 bb[H][1] = page->n[H] * 2 + 1;
1129 bb[V][1] = page->n[V] * 2 + 1;
1131 render_page_draw_cells (page, ofs, bb);
1134 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1136 get_clip_min_extent (int x0, const int cp[], int n)
1143 int middle = low + (high - low) / 2;
1145 if (cp[middle] <= x0)
1157 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1159 get_clip_max_extent (int x1, const int cp[], int n)
1166 int middle = low + (high - low) / 2;
1168 if (cp[middle] >= x1)
1169 best = high = middle;
1174 while (best > 0 && cp[best - 1] == cp[best])
1180 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1181 'draw_line' and 'draw_cell' functions from the render_params provided to
1182 render_page_create(). */
1184 render_page_draw_region (const struct render_page *page,
1185 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1187 int bb[TABLE_N_AXES][2];
1189 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1190 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1191 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1192 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1194 render_page_draw_cells (page, ofs, bb);
1197 /* Breaking up tables to fit on a page. */
1199 /* An iterator for breaking render_pages into smaller chunks. */
1202 struct render_page *page; /* Page being broken up. */
1203 enum table_axis axis; /* Axis along which 'page' is being broken. */
1204 int z; /* Next cell along 'axis'. */
1205 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1206 int hw; /* Width of headers of 'page' along 'axis'. */
1209 static int needed_size (const struct render_break *, int cell);
1210 static bool cell_is_breakable (const struct render_break *, int cell);
1211 static struct render_page *render_page_select (const struct render_page *,
1216 /* Initializes render_break B for breaking PAGE along AXIS.
1217 Takes ownership of PAGE. */
1219 render_break_init (struct render_break *b, struct render_page *page,
1220 enum table_axis axis)
1224 b->z = page->h[axis];
1226 b->hw = headers_width (page, axis);
1229 /* Initializes B as a render_break structure for which
1230 render_break_has_next() always returns false. */
1232 render_break_init_empty (struct render_break *b)
1235 b->axis = TABLE_HORZ;
1241 /* Frees B and unrefs the render_page that it owns. */
1243 render_break_destroy (struct render_break *b)
1247 render_page_unref (b->page);
1252 /* Returns true if B still has cells that are yet to be returned,
1253 false if all of B's page has been processed. */
1255 render_break_has_next (const struct render_break *b)
1257 const struct render_page *page = b->page;
1258 enum table_axis axis = b->axis;
1260 return page != NULL && b->z < page->n[axis];
1263 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1264 Returns a null pointer if B has already been completely broken up, or if
1265 SIZE is too small to reasonably render any cells. The latter will never
1266 happen if SIZE is at least as large as the page size passed to
1267 render_page_create() along B's axis. */
1268 static struct render_page *
1269 render_break_next (struct render_break *b, int size)
1271 const struct render_page *page = b->page;
1272 enum table_axis axis = b->axis;
1273 struct render_page *subpage;
1275 if (!render_break_has_next (b))
1280 for (z = b->z; z < page->n[axis]; z++)
1282 int needed = needed_size (b, z + 1);
1285 if (cell_is_breakable (b, z))
1287 /* If there is no right header and we render a partial cell on
1288 the right side of the body, then we omit the rightmost rule of
1289 the body. Otherwise the rendering is deceptive because it
1290 looks like the whole cell is present instead of a partial
1293 This is similar to code for the left side in needed_size(). */
1294 int rule_allowance = rule_width (page, axis, z);
1296 /* The amount that, if we added cell 'z', the rendering would
1297 overfill the allocated 'size'. */
1298 int overhang = needed - size - rule_allowance;
1300 /* The width of cell 'z'. */
1301 int cell_size = cell_width (page, axis, z);
1303 /* The amount trimmed off the left side of 'z',
1304 and the amount left to render. */
1305 int cell_ofs = z == b->z ? b->pixel : 0;
1306 int cell_left = cell_size - cell_ofs;
1308 /* A small but visible width. */
1309 int em = page->params->font_size[axis];
1311 /* If some of the cell remains to render,
1312 and there would still be some of the cell left afterward,
1313 then partially render that much of the cell. */
1314 pixel = (cell_left && cell_left > overhang
1315 ? cell_left - overhang + cell_ofs
1318 /* If there would be only a tiny amount of the cell left after
1319 rendering it partially, reduce the amount rendered slightly
1320 to make the output look a little better. */
1321 if (pixel + em > cell_size)
1322 pixel = MAX (pixel - em, 0);
1324 /* If we're breaking vertically, then consider whether the cells
1325 being broken have a better internal breakpoint than the exact
1326 number of pixels available, which might look bad e.g. because
1327 it breaks in the middle of a line of text. */
1328 if (axis == TABLE_VERT && page->params->ops->adjust_break)
1329 for (int x = 0; x < page->n[H];)
1331 struct table_cell cell;
1333 render_get_cell (page, x, z, &cell);
1334 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1335 int better_pixel = page->params->ops->adjust_break (
1336 page->params->aux, &cell, w, pixel);
1339 if (better_pixel < pixel)
1341 if (better_pixel > (z == b->z ? b->pixel : 0))
1343 pixel = better_pixel;
1346 else if (better_pixel == 0 && z != b->z)
1358 if (z == b->z && !pixel)
1361 subpage = render_page_select (page, axis, b->z, b->pixel,
1363 pixel ? cell_width (page, axis, z) - pixel
1370 /* Returns the width that would be required along B's axis to render a page
1371 from B's current position up to but not including CELL. */
1373 needed_size (const struct render_break *b, int cell)
1375 const struct render_page *page = b->page;
1376 enum table_axis axis = b->axis;
1378 /* Width of left header not including its rightmost rule. */
1379 int size = axis_width (page, axis, 0, rule_ofs (page->h[axis]));
1381 /* If we have a pixel offset and there is no left header, then we omit the
1382 leftmost rule of the body. Otherwise the rendering is deceptive because
1383 it looks like the whole cell is present instead of a partial cell.
1385 Otherwise (if there are headers) we will be merging two rules: the
1386 rightmost rule in the header and the leftmost rule in the body. We assume
1387 that the width of a merged rule is the larger of the widths of either rule
1389 if (b->pixel == 0 || page->h[axis])
1390 size += MAX (rule_width (page, axis, page->h[axis]),
1391 rule_width (page, axis, b->z));
1393 /* Width of body, minus any pixel offset in the leftmost cell. */
1394 size += joined_width (page, axis, b->z, cell) - b->pixel;
1396 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1397 size += MAX (rule_width_r (page, axis, 0), rule_width (page, axis, cell));
1402 /* Returns true if CELL along B's axis may be broken across a page boundary.
1404 This is just a heuristic. Breaking cells across page boundaries can save
1405 space, but it looks ugly. */
1407 cell_is_breakable (const struct render_break *b, int cell)
1409 const struct render_page *page = b->page;
1410 enum table_axis axis = b->axis;
1412 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1419 const struct render_params *params;
1422 struct render_page *page;
1424 struct render_break x_break;
1425 struct render_break y_break;
1429 render_pager_start_page (struct render_pager *p)
1431 render_break_init (&p->x_break, render_page_ref (p->page), H);
1432 render_break_init_empty (&p->y_break);
1435 /* Creates and returns a new render_pager for rendering PT on the device
1436 with the given PARAMS. */
1437 struct render_pager *
1438 render_pager_create (const struct render_params *params,
1439 const struct pivot_table *pt,
1440 const size_t *layer_indexes)
1443 layer_indexes = pt->current_layer;
1445 struct table *table = pivot_output_monolithic (pt, layer_indexes,
1448 /* Measure the table width and use it to determine the base scale. */
1449 struct render_page *page = render_page_create (params, table, 0);
1450 int width = table_width (page, H);
1452 if (width > params->size[H])
1454 if (pt->look->shrink_to_fit[H] && params->ops->scale)
1455 scale = params->size[H] / (double) width;
1458 struct render_break b;
1459 render_break_init (&b, render_page_ref (page), H);
1460 struct render_page *subpage
1461 = render_break_next (&b, params->size[H]);
1462 width = subpage ? subpage->cp[H][2 * subpage->n[H] + 1] : 0;
1463 render_page_unref (subpage);
1464 render_break_destroy (&b);
1468 /* Create the pager. */
1469 struct render_pager *p = xmalloc (sizeof *p);
1470 *p = (struct render_pager) { .params = params, .scale = scale, .page = page };
1472 /* If we're shrinking tables to fit the page length, then adjust the scale
1475 XXX This will sometimes shrink more than needed, because adjusting the
1476 scale factor allows for cells to be "wider", which means that sometimes
1477 they won't break across as much vertical space, thus shrinking the table
1478 vertically more than the scale would imply. Shrinking only as much as
1479 necessary would require an iterative search. */
1480 if (pt->look->shrink_to_fit[V] && params->ops->scale)
1482 double height = table_width (p->page, V);
1483 if (height * p->scale >= params->size[V])
1484 p->scale *= params->size[V] / height;
1487 render_pager_start_page (p);
1494 render_pager_destroy (struct render_pager *p)
1498 render_break_destroy (&p->x_break);
1499 render_break_destroy (&p->y_break);
1500 render_page_unref (p->page);
1505 /* Returns true if P has content remaining to render, false if rendering is
1508 render_pager_has_next (const struct render_pager *p_)
1510 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1512 while (!render_break_has_next (&p->y_break))
1514 render_break_destroy (&p->y_break);
1515 if (!render_break_has_next (&p->x_break))
1517 render_break_destroy (&p->x_break);
1518 render_break_init_empty (&p->x_break);
1519 render_break_init_empty (&p->y_break);
1524 &p->y_break, render_break_next (&p->x_break,
1525 p->params->size[H] / p->scale), V);
1530 /* Draws a chunk of content from P to fit in a space that has vertical size
1531 SPACE and the horizontal size specified in the render_params passed to
1532 render_page_create(). Returns the amount of space actually used by the
1533 rendered chunk, which will be 0 if SPACE is too small to render anything or
1534 if no content remains (use render_pager_has_next() to distinguish these
1537 render_pager_draw_next (struct render_pager *p, int space)
1539 if (p->scale != 1.0)
1541 p->params->ops->scale (p->params->aux, p->scale);
1545 int ofs[TABLE_N_AXES] = { 0, 0 };
1547 if (render_pager_has_next (p))
1549 struct render_page *page
1550 = render_break_next (&p->y_break, space - ofs[V]);
1553 render_page_draw (page, ofs);
1554 ofs[V] += render_page_get_size (page, V);
1555 render_page_unref (page);
1559 if (p->scale != 1.0)
1565 /* Draws all of P's content. */
1567 render_pager_draw (const struct render_pager *p)
1569 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1572 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1573 Some extra content might be drawn; the device should perform clipping as
1576 render_pager_draw_region (const struct render_pager *p,
1577 int x, int y, int w, int h)
1579 int ofs[TABLE_N_AXES] = { 0, 0 };
1580 int clip[TABLE_N_AXES][2];
1584 int size = render_page_get_size (p->page, V);
1586 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1587 clip[V][1] = MIN (y + h, ofs[V] + size) - ofs[V];
1588 if (clip[V][1] > clip[V][0])
1589 render_page_draw_region (p->page, ofs, clip);
1594 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1595 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1597 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1599 return render_page_get_size (p->page, axis);
1603 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1605 int size = render_page_get_size (p->page, V);
1606 return (size < height
1608 : render_page_get_best_breakpoint (p->page, height));
1611 /* render_page_select() and helpers. */
1613 struct render_page_selection
1615 const struct render_page *page; /* Page whose slice we are selecting. */
1616 struct render_page *subpage; /* New page under construction. */
1617 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1618 enum table_axis b; /* The opposite of 'a'. */
1619 int z0; /* First cell along 'a' being selected. */
1620 int z1; /* Last cell being selected, plus 1. */
1621 int p0; /* Number of pixels to trim off left side of z0. */
1622 int p1; /* Number of pixels to trim off right side of z1-1. */
1625 static void cell_to_subpage (struct render_page_selection *,
1626 const struct table_cell *,
1627 int subcell[TABLE_N_AXES]);
1628 static const struct render_overflow *find_overflow_for_cell (
1629 struct render_page_selection *, const struct table_cell *);
1630 static struct render_overflow *insert_overflow (struct render_page_selection *,
1631 const struct table_cell *);
1633 /* Creates and returns a new render_page whose contents are a subregion of
1634 PAGE's contents. The new render_page includes cells Z0 through Z1
1635 (exclusive) along AXIS, plus any headers on AXIS.
1637 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1638 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1639 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1640 render cells that are too large to fit on a single page.)
1642 The whole of axis !AXIS is included. (The caller may follow up with another
1643 call to render_page_select() to select on !AXIS to select on that axis as
1646 The caller retains ownership of PAGE, which is not modified. */
1647 static struct render_page *
1648 render_page_select (const struct render_page *page, enum table_axis axis,
1649 int z0, int p0, int z1, int p1)
1651 enum table_axis a = axis;
1652 enum table_axis b = !a;
1654 /* Optimize case where all of PAGE is selected by just incrementing the
1656 if (z0 == page->h[a] && p0 == 0 && z1 == page->n[a] && p1 == 0)
1658 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1663 /* Allocate subpage. */
1664 int trim[2] = { z0 - page->h[a], page->n[a] - z1 };
1666 int n[TABLE_N_AXES] = { [H] = page->n[H], [V] = page->n[V] };
1667 n[a] -= trim[0] + trim[1];
1669 int r[TABLE_N_AXES][2];
1670 for (enum table_axis k = 0; k < TABLE_N_AXES; k++)
1672 r[k][0] = page->r[k][0];
1673 r[k][1] = page->r[k][1];
1678 struct render_page *subpage = render_page_allocate__ (
1679 page->params, table_ref (page->table), n, page->h, r);
1681 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1682 off that side of the page and there are no headers. */
1683 subpage->is_edge_cutoff[a][0] =
1684 subpage->h[a] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1685 subpage->is_edge_cutoff[a][1] =
1686 p1 || (z1 == page->n[a] && page->is_edge_cutoff[a][1]);
1687 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1688 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1690 /* Select widths from PAGE into subpage. */
1691 int *scp = page->cp[a];
1692 int *dcp = subpage->cp[a];
1694 for (int z = 0; z <= rule_ofs (subpage->h[a]); z++, dcp++)
1696 int w = !z && subpage->is_edge_cutoff[a][0] ? 0 : scp[z + 1] - scp[z];
1697 dcp[1] = dcp[0] + w;
1699 for (int z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1701 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1702 if (z == cell_ofs (z0))
1704 if (z == cell_ofs (z1 - 1))
1707 for (int z = rule_ofs_r (page, a, 0);
1708 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1710 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1713 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1715 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1717 for (int z = 0; z < page->n[b] * 2 + 2; z++)
1718 subpage->cp[b][z] = page->cp[b][z];
1720 /* Add new overflows. */
1721 struct render_page_selection s = {
1732 if (!page->h[a] || z0 > page->h[a] || p0)
1733 for (int z = 0; z < page->n[b];)
1735 int d[TABLE_N_AXES];
1739 struct table_cell cell;
1740 render_get_cell (page, d[H], d[V], &cell);
1741 bool overflow0 = p0 || cell.d[a][0] < z0;
1742 bool overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1743 if (overflow0 || overflow1)
1745 struct render_overflow *ro = insert_overflow (&s, &cell);
1748 ro->overflow[a][0] += p0 + axis_width (
1749 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1752 ro->overflow[a][1] += p1 + axis_width (
1753 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1758 for (int z = 0; z < page->n[b];)
1760 int d[TABLE_N_AXES];
1764 struct table_cell cell;
1765 render_get_cell (page, d[H], d[V], &cell);
1766 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1767 && find_overflow_for_cell (&s, &cell) == NULL)
1769 struct render_overflow *ro = insert_overflow (&s, &cell);
1770 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1771 cell_ofs (cell.d[a][1]));
1776 /* Copy overflows from PAGE into subpage. */
1777 struct render_overflow *ro;
1778 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1780 struct table_cell cell;
1782 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1783 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1784 && find_overflow_for_cell (&s, &cell) == NULL)
1785 insert_overflow (&s, &cell);
1791 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1792 coordinates of the top-left cell as it will appear in S->subpage.
1794 CELL must actually intersect the region of S->page that is being selected
1795 by render_page_select() or the results will not make any sense. */
1797 cell_to_subpage (struct render_page_selection *s,
1798 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1800 enum table_axis a = s->a;
1801 enum table_axis b = s->b;
1802 int ha0 = s->subpage->h[a];
1804 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1805 subcell[b] = cell->d[b][0];
1808 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1809 that cell in S->subpage, if there is one, and a null pointer otherwise.
1811 CELL must actually intersect the region of S->page that is being selected
1812 by render_page_select() or the results will not make any sense. */
1813 static const struct render_overflow *
1814 find_overflow_for_cell (struct render_page_selection *s,
1815 const struct table_cell *cell)
1819 cell_to_subpage (s, cell, subcell);
1820 return find_overflow (s->subpage, subcell[H], subcell[V]);
1823 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1824 cell in S->subpage (which must not already exist). Initializes the new
1825 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1828 CELL must actually intersect the region of S->page that is being selected
1829 by render_page_select() or the results will not make any sense. */
1830 static struct render_overflow *
1831 insert_overflow (struct render_page_selection *s,
1832 const struct table_cell *cell)
1834 struct render_overflow *of = XZALLOC (struct render_overflow);
1835 cell_to_subpage (s, cell, of->d);
1836 hmap_insert (&s->subpage->overflows, &of->node,
1837 hash_cell (of->d[H], of->d[V]));
1839 const struct render_overflow *old
1840 = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1842 memcpy (of->overflow, old->overflow, sizeof of->overflow);