1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "libpspp/pool.h"
29 #include "output/pivot-output.h"
30 #include "output/pivot-table.h"
31 #include "output/render.h"
32 #include "output/table.h"
34 #include "gl/minmax.h"
35 #include "gl/xalloc.h"
38 #define _(msgid) gettext (msgid)
40 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
44 /* A layout for rendering a specific table on a specific device.
46 May represent the layout of an entire table presented to
47 render_page_create(), or a rectangular subregion of a table broken out using
48 render_break_next() to allow a table to be broken across multiple pages.
50 A page's size is not limited to the size passed in as part of render_params.
51 render_pager breaks a render_page into smaller render_pages that will fit in
52 the available space. */
55 const struct render_params *params; /* Parameters of the target device. */
56 struct table *table; /* Table rendered. */
59 /* Region of 'table' to render.
61 The horizontal cells rendered are the leftmost h[H][0], then
62 r[H][0] through r[H][1], exclusive, then the rightmost h[H][1].
64 The vertical cells rendered are the topmost h[V][0], then r[V][0]
65 through r[V][1], exclusive, then the bottommost h[V][1].
67 n[H] = h[H][0] + (r[H][1] - r[H][0]) + h[H][1]
68 n[V] = h[V][0] + (r[V][1] - r[V][0]) + h[V][1]
70 int h[TABLE_N_AXES][2];
71 int r[TABLE_N_AXES][2];
76 cp[H] represents x positions within the table.
78 cp[H][1] = the width of the leftmost vertical rule.
79 cp[H][2] = cp[H][1] + the width of the leftmost column.
80 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
82 cp[H][2 * n[H]] = x position of the rightmost vertical rule.
83 cp[H][2 * n[H] + 1] = total table width including all rules.
85 Similarly, cp[V] represents y positions within the table.
87 cp[V][1] = the height of the topmost horizontal rule.
88 cp[V][2] = cp[V][1] + the height of the topmost row.
89 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
91 cp[V][2 * n[V]] = y position of the bottommost horizontal rule.
92 cp[V][2 * n[V] + 1] = total table height including all rules.
94 Rules and columns can have width or height 0, in which case consecutive
95 values in this array are equal. */
96 int *cp[TABLE_N_AXES];
98 /* render_break_next() can break a table such that some cells are not fully
99 contained within a render_page. This will happen if a cell is too wide
100 or two tall to fit on a single page, or if a cell spans multiple rows or
101 columns and the page only includes some of those rows or columns.
103 This hash table contains "struct render_overflow"s that represents each
104 such cell that doesn't completely fit on this page.
106 Each overflow cell borders at least one header edge of the table and may
107 border more. (A single table cell that is so large that it fills the
108 entire page can overflow on all four sides!) */
109 struct hmap overflows;
111 /* If a single column (or row) is too wide (or tall) to fit on a page
112 reasonably, then render_break_next() will split a single row or column
113 across multiple render_pages. This member indicates when this has
116 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
117 of the leftmost column in this page, and false otherwise.
119 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
120 of the rightmost column in this page, and false otherwise.
122 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
123 and bottom of the table.
125 The effect of is_edge_cutoff is to prevent rules along the edge in
126 question from being rendered.
128 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
129 contain a node for each cell along that edge. */
130 bool is_edge_cutoff[TABLE_N_AXES][2];
132 /* If part of a joined cell would be cut off by breaking a table along
133 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
134 join_crossing[axis][z] is the thickness of the rule that would be cut
137 This is used to know to allocate extra space for breaking at such a
138 position, so that part of the cell's content is not lost.
140 This affects breaking a table only when headers are present. When
141 headers are not present, the rule's thickness is used for cell content,
142 so no part of the cell's content is lost (and in fact it is duplicated
143 across both pages). */
144 int *join_crossing[TABLE_N_AXES];
147 static struct render_page *render_page_create (const struct render_params *,
148 struct table *, int min_width);
150 struct render_page *render_page_ref (const struct render_page *page_);
151 static void render_page_unref (struct render_page *);
153 /* Returns the offset in struct render_page's cp[axis] array of the rule with
154 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
155 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
156 next rule to the right (or below); and so on. */
158 rule_ofs (int rule_idx)
163 /* Returns the offset in struct render_page's cp[axis] array of the rule with
164 index RULE_IDX_R, which counts from the right side (or bottom) of the page
165 left (or up), according to whether AXIS is H or V, respectively. That is,
166 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
167 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
168 (or above); and so on. */
170 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
172 return (page->n[axis] - rule_idx_r) * 2;
175 /* Returns the offset in struct render_page's cp[axis] array of the cell with
176 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
177 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
178 next cell to the right (or below); and so on. */
180 cell_ofs (int cell_idx)
182 return cell_idx * 2 + 1;
185 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
187 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
189 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
192 /* Returns the total width of PAGE along AXIS. */
194 table_width (const struct render_page *page, int axis)
196 return page->cp[axis][2 * page->n[axis] + 1];
199 /* Returns the width of the headers in PAGE along AXIS. */
201 headers_width (const struct render_page *page, int axis)
203 int h0 = page->h[axis][0];
204 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
205 int n = page->n[axis];
206 int h1 = page->h[axis][1];
207 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
211 /* Returns the width of cell X along AXIS in PAGE. */
213 cell_width (const struct render_page *page, int axis, int x)
215 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
218 /* Returns the width of rule X along AXIS in PAGE. */
220 rule_width (const struct render_page *page, int axis, int x)
222 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
225 /* Returns the width of rule X along AXIS in PAGE. */
227 rule_width_r (const struct render_page *page, int axis, int x)
229 int ofs = rule_ofs_r (page, axis, x);
230 return axis_width (page, axis, ofs, ofs + 1);
233 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
235 joined_width (const struct render_page *page, int axis, int x0, int x1)
237 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
240 /* Returns the width of the widest cell, excluding headers, along AXIS in
243 max_cell_width (const struct render_page *page, int axis)
245 int n = page->n[axis];
246 int x0 = page->h[axis][0];
247 int x1 = n - page->h[axis][1];
250 for (int x = x0; x < x1; x++)
252 int w = cell_width (page, axis, x);
259 /* A cell that doesn't completely fit on the render_page. */
260 struct render_overflow
262 struct hmap_node node; /* In render_page's 'overflows' hmap. */
264 /* Occupied region of page.
266 d[H][0] is the leftmost column.
267 d[H][1] is the rightmost column, plus 1.
268 d[V][0] is the top row.
269 d[V][1] is the bottom row, plus 1.
271 The cell in its original table might occupy a larger region. This
272 member reflects the size of the cell in the current render_page, after
273 trimming off any rows or columns due to page-breaking. */
276 /* The space that has been trimmed off the cell:
278 overflow[H][0]: space trimmed off its left side.
279 overflow[H][1]: space trimmed off its right side.
280 overflow[V][0]: space trimmed off its top.
281 overflow[V][1]: space trimmed off its bottom.
283 During rendering, this information is used to position the rendered
284 portion of the cell within the available space.
286 When a cell is rendered, sometimes it is permitted to spill over into
287 space that is ordinarily reserved for rules. Either way, this space is
288 still included in overflow values.
290 Suppose, for example, that a cell that joins 2 columns has a width of 60
291 pixels and content "abcdef", that the 2 columns that it joins have
292 widths of 20 and 30 pixels, respectively, and that therefore the rule
293 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
294 It might render like this, if each character is 10x10, and showing a few
295 extra table cells for context:
303 If this render_page is broken at the rule that separates "gh" from
304 "ijk", then the page that contains the left side of the "abcdef" cell
305 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
306 and the page that contains the right side of the cell will have
307 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
325 int overflow[TABLE_N_AXES][2];
328 /* Returns a hash value for (,Y). */
330 hash_cell (int x, int y)
332 return hash_int (x + (y << 16), 0);
335 /* Searches PAGE's set of render_overflow for one whose top-left cell is
336 (X,Y). Returns it, if there is one, otherwise a null pointer. */
337 static const struct render_overflow *
338 find_overflow (const struct render_page *page, int x, int y)
340 if (!hmap_is_empty (&page->overflows))
342 const struct render_overflow *of;
344 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
345 hash_cell (x, y), &page->overflows)
346 if (x == of->d[H] && y == of->d[V])
353 /* Row or column dimensions. Used to figure the size of a table in
354 render_page_create() and discarded after that. */
357 /* Width without considering rows (or columns) that span more than one (or
361 /* Width taking spanned rows (or columns) into consideration. */
365 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
366 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
369 distribute_spanned_width (int width,
370 struct render_row *rows, const int *rules, int n)
372 /* Sum up the unspanned widths of the N rows for use as weights. */
373 int total_unspanned = 0;
374 for (int x = 0; x < n; x++)
375 total_unspanned += rows[x].unspanned;
376 for (int x = 0; x < n - 1; x++)
377 total_unspanned += rules[x + 1];
378 if (total_unspanned >= width)
381 /* The algorithm used here is based on the following description from HTML 4:
383 For cells that span multiple columns, a simple approach consists of
384 apportioning the min/max widths evenly to each of the constituent
385 columns. A slightly more complex approach is to use the min/max
386 widths of unspanned cells to weight how spanned widths are
387 apportioned. Experiments suggest that a blend of the two approaches
388 gives good results for a wide range of tables.
390 We blend the two approaches half-and-half, except that we cannot use the
391 unspanned weights when 'total_unspanned' is 0 (because that would cause a
394 The calculation we want to do is this:
397 w1 = width * (column's unspanned width) / (total unspanned width)
398 (column's width) = (w0 + w1) / 2
400 We implement it as a precise calculation in integers by multiplying w0 and
401 w1 by the common denominator of all three calculations (d), dividing that
402 out in the column width calculation, and then keeping the remainder for
405 (We actually compute the unspanned width of a column as twice the
406 unspanned width, plus the width of the rule on the left, plus the width of
407 the rule on the right. That way each rule contributes to both the cell on
408 its left and on its right.)
410 long long int d0 = n;
411 long long int d1 = 2LL * MAX (total_unspanned, 1);
412 long long int d = d0 * d1;
413 if (total_unspanned > 0)
415 long long int w = d / 2;
416 for (int x = 0; x < n; x++)
419 if (total_unspanned > 0)
421 long long int unspanned = rows[x].unspanned * 2LL;
423 unspanned += rules[x + 1];
425 unspanned += rules[x];
426 w += width * unspanned * d0;
429 rows[x].width = MAX (rows[x].width, w / d);
430 w -= rows[x].width * d;
434 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
437 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
438 const struct render_row *rows, const int *rules)
440 int n = page->n[axis];
441 int *cp = page->cp[axis];
443 for (int z = 0; z < n; z++)
445 cp[1] = cp[0] + rules[z];
446 cp[2] = cp[1] + rows[z].width;
449 cp[1] = cp[0] + rules[n];
452 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
454 calculate_table_width (int n, const struct render_row *rows, int *rules)
457 for (int x = 0; x < n; x++)
458 width += rows[x].width;
459 for (int x = 0; x <= n; x++)
465 /* Rendering utility functions. */
467 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
468 rendered with PARAMS. */
470 measure_rule (const struct render_params *params, const struct table *table,
471 enum table_axis a, int z)
473 enum table_axis b = !a;
475 /* Determine all types of rules that are present, as a bitmap in 'rules'
476 where rule type 't' is present if bit 2**t is set. */
477 unsigned int rules = 0;
480 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
481 rules |= 1u << table_get_rule (table, a, d[H], d[V]).stroke;
483 /* Turn off TABLE_STROKE_NONE because it has width 0 and we needn't bother.
484 However, if the device doesn't support margins, make sure that there is at
485 least a small gap between cells (but we don't need any at the left or
486 right edge of the table). */
487 if (rules & (1u << TABLE_STROKE_NONE))
489 rules &= ~(1u << TABLE_STROKE_NONE);
490 if (z > 0 && z < table->n[a] && !params->supports_margins && a == H)
491 rules |= 1u << TABLE_STROKE_SOLID;
494 /* Calculate maximum width of the rules that are present. */
496 for (size_t i = 0; i < TABLE_N_STROKES; i++)
497 if (rules & (1u << i))
498 width = MAX (width, params->line_widths[i]);
502 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
503 space for rendering a table with dimensions given in N. The caller must
504 initialize most of the members itself. */
505 static struct render_page *
506 render_page_allocate__ (const struct render_params *params,
507 struct table *table, int n[TABLE_N_AXES])
509 struct render_page *page = xmalloc (sizeof *page);
510 page->params = params;
516 for (int i = 0; i < TABLE_N_AXES; i++)
518 page->cp[i] = xcalloc ((2 * n[i] + 2) , sizeof *page->cp[i]);
519 page->join_crossing[i] = xcalloc ((n[i] + 1) , sizeof *page->join_crossing[i]);
522 hmap_init (&page->overflows);
523 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
528 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
529 space for all of the members of the new page, but the caller must initialize
530 the 'cp' member itself. */
531 static struct render_page *
532 render_page_allocate (const struct render_params *params, struct table *table)
534 struct render_page *page = render_page_allocate__ (params, table, table->n);
535 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
537 page->h[a][0] = table->h[a][0];
538 page->h[a][1] = table->h[a][1];
539 page->r[a][0] = table->h[a][0];
540 page->r[a][1] = table->n[a] - table->h[a][1];
545 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
546 cp[H] in the new page from ROWS and RULES. The caller must still initialize
548 static struct render_page *
549 create_page_with_exact_widths (const struct render_params *params,
551 const struct render_row *rows, int *rules)
553 struct render_page *page = render_page_allocate (params, table);
554 accumulate_row_widths (page, H, rows, rules);
558 /* Allocates and returns a new render_page for PARAMS and TABLE.
560 Initializes cp[H] in the new page by setting the width of each row 'i' to
561 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
562 ROW_MAX[i].width. Sets the width of rules to those in RULES.
564 W_MIN is the sum of ROWS_MIN[].width.
566 W_MAX is the sum of ROWS_MAX[].width.
568 The caller must still initialize cp[V]. */
569 static struct render_page *
570 create_page_with_interpolated_widths (const struct render_params *params,
572 const struct render_row *rows_min,
573 const struct render_row *rows_max,
574 int w_min, int w_max, const int *rules)
576 const int n = table->n[H];
577 const long long int avail = params->size[H] - w_min;
578 const long long int wanted = w_max - w_min;
582 struct render_page *page = render_page_allocate (params, table);
584 int *cph = page->cp[H];
586 long long int w = wanted / 2;
587 for (int x = 0; x < n; x++)
589 w += avail * (rows_max[x].width - rows_min[x].width);
590 int extra = w / wanted;
593 cph[1] = cph[0] + rules[x];
594 cph[2] = cph[1] + rows_min[x].width + extra;
597 cph[1] = cph[0] + rules[n];
599 assert (page->cp[H][n * 2 + 1] == params->size[H]);
604 set_join_crossings (struct render_page *page, enum table_axis axis,
605 const struct table_cell *cell, int *rules)
607 for (int z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
608 page->join_crossing[axis][z] = rules[z];
611 /* Maps a contiguous range of cells from a page to the underlying table along
612 the horizontal or vertical dimension. */
615 int p0; /* First ordinate in the page. */
616 int t0; /* First ordinate in the table. */
617 int n; /* Number of ordinates in page and table. */
620 /* Initializes M to a mapping from PAGE to PAGE->table along axis A. The
621 mapping includes ordinate Z (in PAGE). */
623 get_map (const struct render_page *page, enum table_axis a, int z,
626 if (z < page->h[a][0])
630 m->n = page->h[a][0];
632 else if (z < page->n[a] - page->h[a][1])
634 m->p0 = page->h[a][0];
635 m->t0 = page->r[a][0];
636 m->n = page->r[a][1] - page->r[a][0];
640 m->p0 = page->n[a] - page->h[a][1];
641 m->t0 = page->table->n[a] - page->table->h[a][1];
642 m->n = page->h[a][1];
646 /* Initializes CELL with the contents of the table cell at column X and row Y
647 within PAGE. When CELL is no longer needed, the caller is responsible for
648 freeing it by calling table_cell_free(CELL).
650 The caller must ensure that CELL is destroyed before TABLE is unref'ed.
652 This is equivalent to table_get_cell(), except X and Y are in terms of the
653 page's rows and columns rather than the underlying table's. */
655 render_get_cell (const struct render_page *page, int x, int y,
656 struct table_cell *cell)
658 int d[TABLE_N_AXES] = { [H] = x, [V] = y };
659 struct map map[TABLE_N_AXES];
661 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
663 struct map *m = &map[a];
664 get_map (page, a, d[a], m);
665 d[a] += m->t0 - m->p0;
667 table_get_cell (page->table, d[H], d[V], cell);
669 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
671 struct map *m = &map[a];
673 for (int i = 0; i < 2; i++)
674 cell->d[a][i] -= m->t0 - m->p0;
675 cell->d[a][0] = MAX (cell->d[a][0], m->p0);
676 cell->d[a][1] = MIN (cell->d[a][1], m->p0 + m->n);
680 /* Creates and returns a new render_page for rendering TABLE on a device
683 The new render_page will be suitable for rendering on a device whose page
684 size is PARAMS->size, but the caller is responsible for actually breaking it
685 up to fit on such a device, using the render_break abstraction. */
686 static struct render_page *
687 render_page_create (const struct render_params *params, struct table *table,
692 int nc = table->n[H];
693 int nr = table->n[V];
695 /* Figure out rule widths. */
696 int *rules[TABLE_N_AXES];
697 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
699 int n = table->n[axis] + 1;
701 rules[axis] = xnmalloc (n, sizeof *rules);
702 for (int z = 0; z < n; z++)
703 rules[axis][z] = measure_rule (params, table, axis, z);
706 /* Calculate minimum and maximum widths of cells that do not
707 span multiple columns. */
708 struct render_row *columns[2];
709 for (int i = 0; i < 2; i++)
710 columns[i] = xcalloc (nc, sizeof *columns[i]);
711 for (int y = 0; y < nr; y++)
712 for (int x = 0; x < nc;)
714 struct table_cell cell;
716 table_get_cell (table, x, y, &cell);
717 if (y == cell.d[V][0])
719 if (table_cell_colspan (&cell) == 1)
722 params->ops->measure_cell_width (params->aux, &cell,
724 for (int i = 0; i < 2; i++)
725 if (columns[i][x].unspanned < w[i])
726 columns[i][x].unspanned = w[i];
732 /* Distribute widths of spanned columns. */
733 for (int i = 0; i < 2; i++)
734 for (int x = 0; x < nc; x++)
735 columns[i][x].width = columns[i][x].unspanned;
736 for (int y = 0; y < nr; y++)
737 for (int x = 0; x < nc;)
739 struct table_cell cell;
741 table_get_cell (table, x, y, &cell);
742 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
746 params->ops->measure_cell_width (params->aux, &cell,
748 for (int i = 0; i < 2; i++)
749 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
750 rules[H], table_cell_colspan (&cell));
755 for (int i = 0; i < 2; i++)
756 distribute_spanned_width (min_width, &columns[i][0], rules[H], nc);
758 /* In pathological cases, spans can cause the minimum width of a column to
759 exceed the maximum width. This bollixes our interpolation algorithm
760 later, so fix it up. */
761 for (int i = 0; i < nc; i++)
762 if (columns[MIN][i].width > columns[MAX][i].width)
763 columns[MAX][i].width = columns[MIN][i].width;
765 /* Decide final column widths. */
767 for (int i = 0; i < 2; i++)
768 table_widths[i] = calculate_table_width (table->n[H],
769 columns[i], rules[H]);
771 struct render_page *page;
772 if (table_widths[MAX] <= params->size[H])
774 /* Fits even with maximum widths. Use them. */
775 page = create_page_with_exact_widths (params, table, columns[MAX],
778 else if (table_widths[MIN] <= params->size[H])
780 /* Fits with minimum widths, so distribute the leftover space. */
781 page = create_page_with_interpolated_widths (
782 params, table, columns[MIN], columns[MAX],
783 table_widths[MIN], table_widths[MAX], rules[H]);
787 /* Doesn't fit even with minimum widths. Assign minimums for now, and
788 later we can break it horizontally into multiple pages. */
789 page = create_page_with_exact_widths (params, table, columns[MIN],
793 /* Calculate heights of cells that do not span multiple rows. */
794 struct render_row *rows = XCALLOC (nr, struct render_row);
795 for (int y = 0; y < nr; y++)
796 for (int x = 0; x < nc;)
798 struct render_row *r = &rows[y];
799 struct table_cell cell;
801 render_get_cell (page, x, y, &cell);
802 if (y == cell.d[V][0])
804 if (table_cell_rowspan (&cell) == 1)
806 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
807 int h = params->ops->measure_cell_height (params->aux,
809 if (h > r->unspanned)
810 r->unspanned = r->width = h;
813 set_join_crossings (page, V, &cell, rules[V]);
815 if (table_cell_colspan (&cell) > 1)
816 set_join_crossings (page, H, &cell, rules[H]);
820 for (int i = 0; i < 2; i++)
823 /* Distribute heights of spanned rows. */
824 for (int y = 0; y < nr; y++)
825 for (int x = 0; x < nc;)
827 struct table_cell cell;
829 render_get_cell (page, x, y, &cell);
830 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
832 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
833 int h = params->ops->measure_cell_height (params->aux, &cell, w);
834 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
835 table_cell_rowspan (&cell));
840 /* Decide final row heights. */
841 accumulate_row_widths (page, V, rows, rules[V]);
844 /* Measure headers. If they are "too big", get rid of them. */
845 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
847 int hw = headers_width (page, axis);
848 if (hw * 2 >= page->params->size[axis]
849 || hw + max_cell_width (page, axis) > page->params->size[axis])
851 page->h[axis][0] = page->h[axis][1] = 0;
852 page->r[axis][0] = 0;
853 page->r[axis][1] = page->n[axis];
863 /* Increases PAGE's reference count. */
865 render_page_ref (const struct render_page *page_)
867 struct render_page *page = CONST_CAST (struct render_page *, page_);
872 /* Decreases PAGE's reference count and destroys PAGE if this causes the
873 reference count to fall to zero. */
875 render_page_unref (struct render_page *page)
877 if (page != NULL && --page->ref_cnt == 0)
879 struct render_overflow *overflow, *next;
880 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
883 hmap_destroy (&page->overflows);
885 table_unref (page->table);
887 for (int i = 0; i < TABLE_N_AXES; ++i)
889 free (page->join_crossing[i]);
897 /* Returns the size of PAGE along AXIS. (This might be larger than the page
898 size specified in the parameters passed to render_page_create(). Use a
899 render_break to break up a render_page into page-sized chunks.) */
901 render_page_get_size (const struct render_page *page, enum table_axis axis)
903 return page->cp[axis][page->n[axis] * 2 + 1];
907 render_page_get_best_breakpoint (const struct render_page *page, int height)
909 /* If there's no room for at least the top row and the rules above and below
910 it, don't include any of the table. */
911 if (page->cp[V][3] > height)
914 /* Otherwise include as many rows and rules as we can. */
915 for (int y = 5; y <= 2 * page->n[V] + 1; y += 2)
916 if (page->cp[V][y] > height)
917 return page->cp[V][y - 2];
921 /* Drawing render_pages. */
923 /* This is like table_get_rule() except that D is in terms of the page's rows
924 and column rather than the underlying table's. */
925 static struct table_border_style
926 get_rule (const struct render_page *page, enum table_axis axis,
927 const int d_[TABLE_N_AXES])
929 int d[TABLE_N_AXES] = { d_[0] / 2, d_[1] / 2 };
932 enum table_axis a = axis;
933 if (d[a] < page->h[a][0])
935 else if (d[a] <= page->n[a] - page->h[a][1])
937 if (page->h[a][0] && d[a] == page->h[a][0])
939 else if (page->h[a][1] && d[a] == page->n[a] - page->h[a][1])
940 d2 = page->table->n[a] - page->h[a][1];
941 d[a] += page->r[a][0] - page->h[a][0];
944 d[a] += ((page->table->n[a] - page->table->h[a][1])
945 - (page->n[a] - page->h[a][1]));
947 enum table_axis b = !axis;
949 get_map (page, b, d[b], &m);
952 struct table_border_style border
953 = table_get_rule (page->table, axis, d[H], d[V]);
957 struct table_border_style border2 = table_get_rule (page->table, axis,
959 border.stroke = table_stroke_combine (border.stroke, border2.stroke);
971 render_direction_rtl (void)
973 /* TRANSLATORS: Do not translate this string. If the script of your language
974 reads from right to left (eg Persian, Arabic, Hebrew etc), then replace
975 this string with "output-direction-rtl". Otherwise either leave it
976 untranslated or copy it verbatim. */
977 const char *dir = _("output-direction-ltr");
978 if (0 == strcmp ("output-direction-rtl", dir))
981 if (0 != strcmp ("output-direction-ltr", dir))
982 fprintf (stderr, "This localisation has been incorrectly translated. "
983 "Complain to the translator.\n");
989 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
990 const int d[TABLE_N_AXES])
992 const struct table_border_style none = { .stroke = TABLE_STROKE_NONE };
993 struct table_border_style styles[TABLE_N_AXES][2];
995 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
997 enum table_axis b = !a;
1000 || (page->is_edge_cutoff[a][0] && d[a] == 0)
1001 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
1002 styles[a][0] = styles[a][1] = none;
1003 else if (is_rule (d[b]))
1007 int e[TABLE_N_AXES];
1011 styles[a][0] = get_rule (page, a, e);
1014 styles[a][0] = none;
1016 if (d[b] / 2 < page->n[b])
1017 styles[a][1] = get_rule (page, a, d);
1019 styles[a][1] = none;
1022 styles[a][0] = styles[a][1] = get_rule (page, a, d);
1025 if (styles[H][0].stroke != TABLE_STROKE_NONE
1026 || styles[H][1].stroke != TABLE_STROKE_NONE
1027 || styles[V][0].stroke != TABLE_STROKE_NONE
1028 || styles[V][1].stroke != TABLE_STROKE_NONE)
1030 int bb[TABLE_N_AXES][2];
1032 bb[H][0] = ofs[H] + page->cp[H][d[H]];
1033 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
1034 if (page->params->rtl)
1036 int temp = bb[H][0];
1037 bb[H][0] = render_page_get_size (page, H) - bb[H][1];
1038 bb[H][1] = render_page_get_size (page, H) - temp;
1040 bb[V][0] = ofs[V] + page->cp[V][d[V]];
1041 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
1042 page->params->ops->draw_line (page->params->aux, bb, styles);
1047 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
1048 const struct table_cell *cell)
1050 const bool debugging = false;
1054 if (cell->d[H][0] + 1 == cell->d[H][1])
1055 printf ("%d", cell->d[H][0]);
1057 printf ("%d-%d", cell->d[H][0], cell->d[H][1] - 1);
1059 if (cell->d[V][0] + 1 == cell->d[V][1])
1060 printf ("%d", cell->d[V][0]);
1062 printf ("%d-%d", cell->d[V][0], cell->d[V][1] - 1);
1064 char *value = pivot_value_to_string (cell->value, NULL);
1065 printf (": \"%s\"\n", value);
1069 int bb[TABLE_N_AXES][2];
1070 int clip[TABLE_N_AXES][2];
1072 bb[H][0] = clip[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
1073 bb[H][1] = clip[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
1074 if (page->params->rtl)
1076 int temp = bb[H][0];
1077 bb[H][0] = clip[H][0] = render_page_get_size (page, H) - bb[H][1];
1078 bb[H][1] = clip[H][1] = render_page_get_size (page, H) - temp;
1080 bb[V][0] = clip[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
1081 bb[V][1] = clip[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
1083 enum table_valign valign = cell->cell_style->valign;
1084 int valign_offset = 0;
1085 if (valign != TABLE_VALIGN_TOP)
1087 int height = page->params->ops->measure_cell_height (
1088 page->params->aux, cell, bb[H][1] - bb[H][0]);
1089 int extra = bb[V][1] - bb[V][0] - height;
1092 if (valign == TABLE_VALIGN_CENTER)
1094 valign_offset += extra;
1098 const struct render_overflow *of = find_overflow (
1099 page, cell->d[H][0], cell->d[V][0]);
1101 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1103 if (of->overflow[axis][0])
1105 bb[axis][0] -= of->overflow[axis][0];
1106 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
1107 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
1109 if (of->overflow[axis][1])
1111 bb[axis][1] += of->overflow[axis][1];
1112 if (cell->d[axis][1] == page->n[axis]
1113 && !page->is_edge_cutoff[axis][1])
1114 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2
1119 int spill[TABLE_N_AXES][2];
1120 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1122 spill[axis][0] = rule_width (page, axis, cell->d[axis][0]) / 2;
1123 spill[axis][1] = rule_width (page, axis, cell->d[axis][1]) / 2;
1126 int color_idx = (cell->d[V][0] < page->h[V][0]
1127 || page->n[V] - (cell->d[V][0] + 1) < page->h[V][1]
1129 : (cell->d[V][0] - page->h[V][0]) & 1);
1130 page->params->ops->draw_cell (page->params->aux, cell, color_idx,
1131 bb, valign_offset, spill, clip);
1134 /* Draws the cells of PAGE indicated in BB. */
1136 render_page_draw_cells (const struct render_page *page,
1137 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
1139 for (int y = bb[V][0]; y < bb[V][1]; y++)
1140 for (int x = bb[H][0]; x < bb[H][1];)
1141 if (!is_rule (x) && !is_rule (y))
1143 struct table_cell cell;
1145 render_get_cell (page, x / 2, y / 2, &cell);
1146 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
1147 render_cell (page, ofs, &cell);
1148 x = rule_ofs (cell.d[H][1]);
1153 for (int y = bb[V][0]; y < bb[V][1]; y++)
1154 for (int x = bb[H][0]; x < bb[H][1]; x++)
1155 if (is_rule (x) || is_rule (y))
1157 int d[TABLE_N_AXES];
1160 render_rule (page, ofs, d);
1164 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
1165 render_params provided to render_page_create(). */
1167 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
1169 int bb[TABLE_N_AXES][2];
1172 bb[H][1] = page->n[H] * 2 + 1;
1174 bb[V][1] = page->n[V] * 2 + 1;
1176 render_page_draw_cells (page, ofs, bb);
1179 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1181 get_clip_min_extent (int x0, const int cp[], int n)
1188 int middle = low + (high - low) / 2;
1190 if (cp[middle] <= x0)
1202 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1204 get_clip_max_extent (int x1, const int cp[], int n)
1211 int middle = low + (high - low) / 2;
1213 if (cp[middle] >= x1)
1214 best = high = middle;
1219 while (best > 0 && cp[best - 1] == cp[best])
1225 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1226 'draw_line' and 'draw_cell' functions from the render_params provided to
1227 render_page_create(). */
1229 render_page_draw_region (const struct render_page *page,
1230 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1232 int bb[TABLE_N_AXES][2];
1234 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1235 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1236 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1237 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1239 render_page_draw_cells (page, ofs, bb);
1242 /* Breaking up tables to fit on a page. */
1244 /* An iterator for breaking render_pages into smaller chunks. */
1247 struct render_page *page; /* Page being broken up. */
1248 enum table_axis axis; /* Axis along which 'page' is being broken. */
1249 int z; /* Next cell along 'axis'. */
1250 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1251 int hw; /* Width of headers of 'page' along 'axis'. */
1254 static int needed_size (const struct render_break *, int cell);
1255 static bool cell_is_breakable (const struct render_break *, int cell);
1256 static struct render_page *render_page_select (const struct render_page *,
1261 /* Initializes render_break B for breaking PAGE along AXIS.
1262 Takes ownership of PAGE. */
1264 render_break_init (struct render_break *b, struct render_page *page,
1265 enum table_axis axis)
1269 b->z = page->h[axis][0];
1271 b->hw = headers_width (page, axis);
1274 /* Initializes B as a render_break structure for which
1275 render_break_has_next() always returns false. */
1277 render_break_init_empty (struct render_break *b)
1280 b->axis = TABLE_HORZ;
1286 /* Frees B and unrefs the render_page that it owns. */
1288 render_break_destroy (struct render_break *b)
1292 render_page_unref (b->page);
1297 /* Returns true if B still has cells that are yet to be returned,
1298 false if all of B's page has been processed. */
1300 render_break_has_next (const struct render_break *b)
1302 const struct render_page *page = b->page;
1303 enum table_axis axis = b->axis;
1305 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1308 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1309 Returns a null pointer if B has already been completely broken up, or if
1310 SIZE is too small to reasonably render any cells. The latter will never
1311 happen if SIZE is at least as large as the page size passed to
1312 render_page_create() along B's axis. */
1313 static struct render_page *
1314 render_break_next (struct render_break *b, int size)
1316 const struct render_page *page = b->page;
1317 enum table_axis axis = b->axis;
1318 struct render_page *subpage;
1320 if (!render_break_has_next (b))
1325 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1327 int needed = needed_size (b, z + 1);
1330 if (cell_is_breakable (b, z))
1332 /* If there is no right header and we render a partial cell on
1333 the right side of the body, then we omit the rightmost rule of
1334 the body. Otherwise the rendering is deceptive because it
1335 looks like the whole cell is present instead of a partial
1338 This is similar to code for the left side in needed_size(). */
1339 int rule_allowance = (page->h[axis][1]
1341 : rule_width (page, axis, z));
1343 /* The amount that, if we added cell 'z', the rendering would
1344 overfill the allocated 'size'. */
1345 int overhang = needed - size - rule_allowance;
1347 /* The width of cell 'z'. */
1348 int cell_size = cell_width (page, axis, z);
1350 /* The amount trimmed off the left side of 'z',
1351 and the amount left to render. */
1352 int cell_ofs = z == b->z ? b->pixel : 0;
1353 int cell_left = cell_size - cell_ofs;
1355 /* A small but visible width. */
1356 int em = page->params->font_size[axis];
1358 /* If some of the cell remains to render,
1359 and there would still be some of the cell left afterward,
1360 then partially render that much of the cell. */
1361 pixel = (cell_left && cell_left > overhang
1362 ? cell_left - overhang + cell_ofs
1365 /* If there would be only a tiny amount of the cell left after
1366 rendering it partially, reduce the amount rendered slightly
1367 to make the output look a little better. */
1368 if (pixel + em > cell_size)
1369 pixel = MAX (pixel - em, 0);
1371 /* If we're breaking vertically, then consider whether the cells
1372 being broken have a better internal breakpoint than the exact
1373 number of pixels available, which might look bad e.g. because
1374 it breaks in the middle of a line of text. */
1375 if (axis == TABLE_VERT && page->params->ops->adjust_break)
1376 for (int x = 0; x < page->n[H];)
1378 struct table_cell cell;
1380 render_get_cell (page, x, z, &cell);
1381 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1382 int better_pixel = page->params->ops->adjust_break (
1383 page->params->aux, &cell, w, pixel);
1386 if (better_pixel < pixel)
1388 if (better_pixel > (z == b->z ? b->pixel : 0))
1390 pixel = better_pixel;
1393 else if (better_pixel == 0 && z != b->z)
1405 if (z == b->z && !pixel)
1408 subpage = render_page_select (page, axis, b->z, b->pixel,
1410 pixel ? cell_width (page, axis, z) - pixel
1417 /* Returns the width that would be required along B's axis to render a page
1418 from B's current position up to but not including CELL. */
1420 needed_size (const struct render_break *b, int cell)
1422 const struct render_page *page = b->page;
1423 enum table_axis axis = b->axis;
1425 /* Width of left header not including its rightmost rule. */
1426 int size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1428 /* If we have a pixel offset and there is no left header, then we omit the
1429 leftmost rule of the body. Otherwise the rendering is deceptive because
1430 it looks like the whole cell is present instead of a partial cell.
1432 Otherwise (if there are headers) we will be merging two rules: the
1433 rightmost rule in the header and the leftmost rule in the body. We assume
1434 that the width of a merged rule is the larger of the widths of either rule
1436 if (b->pixel == 0 || page->h[axis][0])
1437 size += MAX (rule_width (page, axis, page->h[axis][0]),
1438 rule_width (page, axis, b->z));
1440 /* Width of body, minus any pixel offset in the leftmost cell. */
1441 size += joined_width (page, axis, b->z, cell) - b->pixel;
1443 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1444 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1445 rule_width (page, axis, cell));
1447 /* Width of right header not including its leftmost rule. */
1448 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1449 rule_ofs_r (page, axis, 0));
1451 /* Join crossing. */
1452 if (page->h[axis][0] && page->h[axis][1])
1453 size += page->join_crossing[axis][b->z];
1458 /* Returns true if CELL along B's axis may be broken across a page boundary.
1460 This is just a heuristic. Breaking cells across page boundaries can save
1461 space, but it looks ugly. */
1463 cell_is_breakable (const struct render_break *b, int cell)
1465 const struct render_page *page = b->page;
1466 enum table_axis axis = b->axis;
1468 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1475 const struct render_params *params;
1478 /* An array of "render_page"s to be rendered, in order, vertically. There
1479 may be up to 5 pages, for the pivot table's title, layers, body,
1480 captions, and footnotes. */
1481 struct render_page *pages[5];
1485 struct render_break x_break;
1486 struct render_break y_break;
1490 render_pager_add_table (struct render_pager *p, struct table *table,
1494 p->pages[p->n_pages++] = render_page_create (p->params, table, min_width);
1498 render_pager_start_page (struct render_pager *p)
1500 render_break_init (&p->x_break, render_page_ref (p->pages[p->cur_page++]),
1502 render_break_init_empty (&p->y_break);
1505 /* Creates and returns a new render_pager for rendering PT on the device
1506 with the given PARAMS. */
1507 struct render_pager *
1508 render_pager_create (const struct render_params *params,
1509 const struct pivot_table *pt,
1510 const size_t *layer_indexes)
1513 layer_indexes = pt->current_layer;
1515 struct table *title, *layers, *body, *caption, *footnotes;
1516 pivot_output (pt, layer_indexes, params->printing,
1517 &title, &layers, &body, &caption, &footnotes, NULL, NULL);
1519 /* Figure out the width of the body of the table. Use this to determine the
1521 struct render_page *body_page = render_page_create (params, body, 0);
1522 int body_width = table_width (body_page, H);
1524 if (body_width > params->size[H])
1526 if (pt->look->shrink_to_fit[H] && params->ops->scale)
1527 scale = params->size[H] / (double) body_width;
1530 struct render_break b;
1531 render_break_init (&b, render_page_ref (body_page), H);
1532 struct render_page *subpage
1533 = render_break_next (&b, params->size[H]);
1534 body_width = subpage ? subpage->cp[H][2 * subpage->n[H] + 1] : 0;
1535 render_page_unref (subpage);
1536 render_break_destroy (&b);
1540 /* Create the pager. */
1541 struct render_pager *p = xmalloc (sizeof *p);
1542 *p = (struct render_pager) { .params = params, .scale = scale };
1543 render_pager_add_table (p, title, body_width);
1544 render_pager_add_table (p, layers, body_width);
1545 p->pages[p->n_pages++] = body_page;
1546 render_pager_add_table (p, caption, 0);
1547 render_pager_add_table (p, footnotes, 0);
1548 assert (p->n_pages <= sizeof p->pages / sizeof *p->pages);
1550 /* If we're shrinking tables to fit the page length, then adjust the scale
1553 XXX This will sometimes shrink more than needed, because adjusting the
1554 scale factor allows for cells to be "wider", which means that sometimes
1555 they won't break across as much vertical space, thus shrinking the table
1556 vertically more than the scale would imply. Shrinking only as much as
1557 necessary would require an iterative search. */
1558 if (pt->look->shrink_to_fit[V] && params->ops->scale)
1560 int total_height = 0;
1561 for (size_t i = 0; i < p->n_pages; i++)
1562 total_height += table_width (p->pages[i], V);
1563 if (total_height * p->scale >= params->size[V])
1564 p->scale *= params->size[V] / (double) total_height;
1567 render_pager_start_page (p);
1574 render_pager_destroy (struct render_pager *p)
1578 render_break_destroy (&p->x_break);
1579 render_break_destroy (&p->y_break);
1580 for (size_t i = 0; i < p->n_pages; i++)
1581 render_page_unref (p->pages[i]);
1586 /* Returns true if P has content remaining to render, false if rendering is
1589 render_pager_has_next (const struct render_pager *p_)
1591 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1593 while (!render_break_has_next (&p->y_break))
1595 render_break_destroy (&p->y_break);
1596 if (!render_break_has_next (&p->x_break))
1598 render_break_destroy (&p->x_break);
1599 if (p->cur_page >= p->n_pages)
1601 render_break_init_empty (&p->x_break);
1602 render_break_init_empty (&p->y_break);
1605 render_pager_start_page (p);
1609 &p->y_break, render_break_next (&p->x_break,
1610 p->params->size[H] / p->scale), V);
1615 /* Draws a chunk of content from P to fit in a space that has vertical size
1616 SPACE and the horizontal size specified in the render_params passed to
1617 render_page_create(). Returns the amount of space actually used by the
1618 rendered chunk, which will be 0 if SPACE is too small to render anything or
1619 if no content remains (use render_pager_has_next() to distinguish these
1622 render_pager_draw_next (struct render_pager *p, int space)
1624 if (p->scale != 1.0)
1626 p->params->ops->scale (p->params->aux, p->scale);
1630 int ofs[TABLE_N_AXES] = { 0, 0 };
1631 size_t start_page = SIZE_MAX;
1633 while (render_pager_has_next (p))
1635 if (start_page == p->cur_page)
1637 start_page = p->cur_page;
1639 struct render_page *page
1640 = render_break_next (&p->y_break, space - ofs[V]);
1644 render_page_draw (page, ofs);
1645 ofs[V] += render_page_get_size (page, V);
1646 render_page_unref (page);
1649 if (p->scale != 1.0)
1655 /* Draws all of P's content. */
1657 render_pager_draw (const struct render_pager *p)
1659 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1662 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1663 Some extra content might be drawn; the device should perform clipping as
1666 render_pager_draw_region (const struct render_pager *p,
1667 int x, int y, int w, int h)
1669 int ofs[TABLE_N_AXES] = { 0, 0 };
1670 int clip[TABLE_N_AXES][2];
1674 for (size_t i = 0; i < p->n_pages; i++)
1676 const struct render_page *page = p->pages[i];
1677 int size = render_page_get_size (page, V);
1679 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1680 clip[V][1] = MIN (y + h, ofs[V] + size) - ofs[V];
1681 if (clip[V][1] > clip[V][0])
1682 render_page_draw_region (page, ofs, clip);
1688 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1689 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1691 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1695 for (size_t i = 0; i < p->n_pages; i++)
1697 int subsize = render_page_get_size (p->pages[i], axis);
1698 size = axis == H ? MAX (size, subsize) : size + subsize;
1705 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1710 for (i = 0; i < p->n_pages; i++)
1712 int size = render_page_get_size (p->pages[i], V);
1713 if (y + size >= height)
1714 return render_page_get_best_breakpoint (p->pages[i], height - y) + y;
1721 /* render_page_select() and helpers. */
1723 struct render_page_selection
1725 const struct render_page *page; /* Page whose slice we are selecting. */
1726 struct render_page *subpage; /* New page under construction. */
1727 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1728 enum table_axis b; /* The opposite of 'a'. */
1729 int z0; /* First cell along 'a' being selected. */
1730 int z1; /* Last cell being selected, plus 1. */
1731 int p0; /* Number of pixels to trim off left side of z0. */
1732 int p1; /* Number of pixels to trim off right side of z1-1. */
1735 static void cell_to_subpage (struct render_page_selection *,
1736 const struct table_cell *,
1737 int subcell[TABLE_N_AXES]);
1738 static const struct render_overflow *find_overflow_for_cell (
1739 struct render_page_selection *, const struct table_cell *);
1740 static struct render_overflow *insert_overflow (struct render_page_selection *,
1741 const struct table_cell *);
1743 /* Creates and returns a new render_page whose contents are a subregion of
1744 PAGE's contents. The new render_page includes cells Z0 through Z1
1745 (exclusive) along AXIS, plus any headers on AXIS.
1747 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1748 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1749 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1750 render cells that are too large to fit on a single page.)
1752 The whole of axis !AXIS is included. (The caller may follow up with another
1753 call to render_page_select() to select on !AXIS to select on that axis as
1756 The caller retains ownership of PAGE, which is not modified. */
1757 static struct render_page *
1758 render_page_select (const struct render_page *page, enum table_axis axis,
1759 int z0, int p0, int z1, int p1)
1761 enum table_axis a = axis;
1762 enum table_axis b = !a;
1764 /* Optimize case where all of PAGE is selected by just incrementing the
1766 if (z0 == page->h[a][0] && p0 == 0
1767 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1769 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1774 /* Allocate subpage. */
1775 int trim[2] = { z0 - page->h[a][0], (page->n[a] - page->h[a][1]) - z1 };
1776 int n[TABLE_N_AXES] = { [H] = page->n[H], [V] = page->n[V] };
1777 n[a] -= trim[0] + trim[1];
1778 struct render_page *subpage = render_page_allocate__ (
1779 page->params, table_ref (page->table), n);
1780 for (enum table_axis k = 0; k < TABLE_N_AXES; k++)
1782 subpage->h[k][0] = page->h[k][0];
1783 subpage->h[k][1] = page->h[k][1];
1784 subpage->r[k][0] = page->r[k][0];
1785 subpage->r[k][1] = page->r[k][1];
1787 subpage->r[a][0] += trim[0];
1788 subpage->r[a][1] -= trim[1];
1790 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1791 off that side of the page and there are no headers. */
1792 subpage->is_edge_cutoff[a][0] =
1793 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1794 subpage->is_edge_cutoff[a][1] =
1795 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1796 && page->is_edge_cutoff[a][1]));
1797 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1798 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1800 /* Select join crossings from PAGE into subpage. */
1801 int *jc = subpage->join_crossing[a];
1802 for (int z = 0; z < page->h[a][0]; z++)
1803 *jc++ = page->join_crossing[a][z];
1804 for (int z = z0; z <= z1; z++)
1805 *jc++ = page->join_crossing[a][z];
1806 for (int z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1807 *jc++ = page->join_crossing[a][z];
1808 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1810 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1811 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1813 /* Select widths from PAGE into subpage. */
1814 int *scp = page->cp[a];
1815 int *dcp = subpage->cp[a];
1817 for (int z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1819 int w = !z && subpage->is_edge_cutoff[a][0] ? 0 : scp[z + 1] - scp[z];
1820 dcp[1] = dcp[0] + w;
1822 for (int z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1824 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1825 if (z == cell_ofs (z0))
1828 if (page->h[a][0] && page->h[a][1])
1829 dcp[1] += page->join_crossing[a][z / 2];
1831 if (z == cell_ofs (z1 - 1))
1834 for (int z = rule_ofs_r (page, a, subpage->h[a][1]);
1835 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1837 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1840 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1842 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1844 for (int z = 0; z < page->n[b] * 2 + 2; z++)
1845 subpage->cp[b][z] = page->cp[b][z];
1847 /* Add new overflows. */
1848 struct render_page_selection s = {
1859 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1860 for (int z = 0; z < page->n[b];)
1862 int d[TABLE_N_AXES];
1866 struct table_cell cell;
1867 render_get_cell (page, d[H], d[V], &cell);
1868 bool overflow0 = p0 || cell.d[a][0] < z0;
1869 bool overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1870 if (overflow0 || overflow1)
1872 struct render_overflow *ro = insert_overflow (&s, &cell);
1876 ro->overflow[a][0] += p0 + axis_width (
1877 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1878 if (page->h[a][0] && page->h[a][1])
1879 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1885 ro->overflow[a][1] += p1 + axis_width (
1886 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1887 if (page->h[a][0] && page->h[a][1])
1888 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1894 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1895 for (int z = 0; z < page->n[b];)
1897 int d[TABLE_N_AXES];
1901 struct table_cell cell;
1902 render_get_cell (page, d[H], d[V], &cell);
1903 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1904 && find_overflow_for_cell (&s, &cell) == NULL)
1906 struct render_overflow *ro = insert_overflow (&s, &cell);
1907 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1908 cell_ofs (cell.d[a][1]));
1913 /* Copy overflows from PAGE into subpage. */
1914 struct render_overflow *ro;
1915 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1917 struct table_cell cell;
1919 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1920 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1921 && find_overflow_for_cell (&s, &cell) == NULL)
1922 insert_overflow (&s, &cell);
1928 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1929 coordinates of the top-left cell as it will appear in S->subpage.
1931 CELL must actually intersect the region of S->page that is being selected
1932 by render_page_select() or the results will not make any sense. */
1934 cell_to_subpage (struct render_page_selection *s,
1935 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1937 enum table_axis a = s->a;
1938 enum table_axis b = s->b;
1939 int ha0 = s->subpage->h[a][0];
1941 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1942 subcell[b] = cell->d[b][0];
1945 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1946 that cell in S->subpage, if there is one, and a null pointer otherwise.
1948 CELL must actually intersect the region of S->page that is being selected
1949 by render_page_select() or the results will not make any sense. */
1950 static const struct render_overflow *
1951 find_overflow_for_cell (struct render_page_selection *s,
1952 const struct table_cell *cell)
1956 cell_to_subpage (s, cell, subcell);
1957 return find_overflow (s->subpage, subcell[H], subcell[V]);
1960 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1961 cell in S->subpage (which must not already exist). Initializes the new
1962 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1965 CELL must actually intersect the region of S->page that is being selected
1966 by render_page_select() or the results will not make any sense. */
1967 static struct render_overflow *
1968 insert_overflow (struct render_page_selection *s,
1969 const struct table_cell *cell)
1971 struct render_overflow *of = XZALLOC (struct render_overflow);
1972 cell_to_subpage (s, cell, of->d);
1973 hmap_insert (&s->subpage->overflows, &of->node,
1974 hash_cell (of->d[H], of->d[V]));
1976 const struct render_overflow *old
1977 = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1979 memcpy (of->overflow, old->overflow, sizeof of->overflow);