1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "libpspp/pool.h"
29 #include "output/pivot-output.h"
30 #include "output/pivot-table.h"
31 #include "output/render.h"
32 #include "output/table.h"
34 #include "gl/minmax.h"
35 #include "gl/xalloc.h"
38 #define _(msgid) gettext (msgid)
40 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
44 /* A layout for rendering a specific table on a specific device.
46 May represent the layout of an entire table presented to
47 render_page_create(), or a rectangular subregion of a table broken out using
48 render_break_next() to allow a table to be broken across multiple pages.
50 A page's size is not limited to the size passed in as part of render_params.
51 render_pager breaks a render_page into smaller render_pages that will fit in
52 the available space. */
55 const struct render_params *params; /* Parameters of the target device. */
56 struct table *table; /* Table rendered. */
59 /* Region of 'table' to render.
61 The horizontal cells rendered are the leftmost h[H][0], then
62 r[H][0] through r[H][1], exclusive, then the rightmost h[H][1].
64 The vertical cells rendered are the topmost h[V][0], then r[V][0]
65 through r[V][1], exclusive, then the bottommost h[V][1].
67 n[H] = h[H][0] + (r[H][1] - r[H][0]) + h[H][1]
68 n[V] = h[V][0] + (r[V][1] - r[V][0]) + h[V][1]
70 int h[TABLE_N_AXES][2];
71 int r[TABLE_N_AXES][2];
76 cp[H] represents x positions within the table.
78 cp[H][1] = the width of the leftmost vertical rule.
79 cp[H][2] = cp[H][1] + the width of the leftmost column.
80 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
82 cp[H][2 * n[H]] = x position of the rightmost vertical rule.
83 cp[H][2 * n[H] + 1] = total table width including all rules.
85 Similarly, cp[V] represents y positions within the table.
87 cp[V][1] = the height of the topmost horizontal rule.
88 cp[V][2] = cp[V][1] + the height of the topmost row.
89 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
91 cp[V][2 * n[V]] = y position of the bottommost horizontal rule.
92 cp[V][2 * n[V] + 1] = total table height including all rules.
94 Rules and columns can have width or height 0, in which case consecutive
95 values in this array are equal. */
96 int *cp[TABLE_N_AXES];
98 /* render_break_next() can break a table such that some cells are not fully
99 contained within a render_page. This will happen if a cell is too wide
100 or two tall to fit on a single page, or if a cell spans multiple rows or
101 columns and the page only includes some of those rows or columns.
103 This hash table contains "struct render_overflow"s that represents each
104 such cell that doesn't completely fit on this page.
106 Each overflow cell borders at least one header edge of the table and may
107 border more. (A single table cell that is so large that it fills the
108 entire page can overflow on all four sides!) */
109 struct hmap overflows;
111 /* If a single column (or row) is too wide (or tall) to fit on a page
112 reasonably, then render_break_next() will split a single row or column
113 across multiple render_pages. This member indicates when this has
116 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
117 of the leftmost column in this page, and false otherwise.
119 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
120 of the rightmost column in this page, and false otherwise.
122 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
123 and bottom of the table.
125 The effect of is_edge_cutoff is to prevent rules along the edge in
126 question from being rendered.
128 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
129 contain a node for each cell along that edge. */
130 bool is_edge_cutoff[TABLE_N_AXES][2];
132 /* If part of a joined cell would be cut off by breaking a table along
133 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
134 join_crossing[axis][z] is the thickness of the rule that would be cut
137 This is used to know to allocate extra space for breaking at such a
138 position, so that part of the cell's content is not lost.
140 This affects breaking a table only when headers are present. When
141 headers are not present, the rule's thickness is used for cell content,
142 so no part of the cell's content is lost (and in fact it is duplicated
143 across both pages). */
144 int *join_crossing[TABLE_N_AXES];
147 static struct render_page *render_page_create (const struct render_params *,
148 struct table *, int min_width);
150 struct render_page *render_page_ref (const struct render_page *page_);
151 static void render_page_unref (struct render_page *);
153 /* Returns the offset in struct render_page's cp[axis] array of the rule with
154 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
155 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
156 next rule to the right (or below); and so on. */
158 rule_ofs (int rule_idx)
163 /* Returns the offset in struct render_page's cp[axis] array of the rule with
164 index RULE_IDX_R, which counts from the right side (or bottom) of the page
165 left (or up), according to whether AXIS is H or V, respectively. That is,
166 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
167 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
168 (or above); and so on. */
170 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
172 return (page->n[axis] - rule_idx_r) * 2;
175 /* Returns the offset in struct render_page's cp[axis] array of the cell with
176 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
177 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
178 next cell to the right (or below); and so on. */
180 cell_ofs (int cell_idx)
182 return cell_idx * 2 + 1;
185 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
187 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
189 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
192 /* Returns the total width of PAGE along AXIS. */
194 table_width (const struct render_page *page, int axis)
196 return page->cp[axis][2 * page->n[axis] + 1];
199 /* Returns the width of the headers in PAGE along AXIS. */
201 headers_width (const struct render_page *page, int axis)
203 int h0 = page->h[axis][0];
204 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
205 int n = page->n[axis];
206 int h1 = page->h[axis][1];
207 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
211 /* Returns the width of cell X along AXIS in PAGE. */
213 cell_width (const struct render_page *page, int axis, int x)
215 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
218 /* Returns the width of rule X along AXIS in PAGE. */
220 rule_width (const struct render_page *page, int axis, int x)
222 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
225 /* Returns the width of rule X along AXIS in PAGE. */
227 rule_width_r (const struct render_page *page, int axis, int x)
229 int ofs = rule_ofs_r (page, axis, x);
230 return axis_width (page, axis, ofs, ofs + 1);
233 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
235 joined_width (const struct render_page *page, int axis, int x0, int x1)
237 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
240 /* Returns the width of the widest cell, excluding headers, along AXIS in
243 max_cell_width (const struct render_page *page, int axis)
245 int n = page->n[axis];
246 int x0 = page->h[axis][0];
247 int x1 = n - page->h[axis][1];
250 for (int x = x0; x < x1; x++)
252 int w = cell_width (page, axis, x);
259 /* A cell that doesn't completely fit on the render_page. */
260 struct render_overflow
262 struct hmap_node node; /* In render_page's 'overflows' hmap. */
264 /* Occupied region of page.
266 d[H][0] is the leftmost column.
267 d[H][1] is the rightmost column, plus 1.
268 d[V][0] is the top row.
269 d[V][1] is the bottom row, plus 1.
271 The cell in its original table might occupy a larger region. This
272 member reflects the size of the cell in the current render_page, after
273 trimming off any rows or columns due to page-breaking. */
276 /* The space that has been trimmed off the cell:
278 overflow[H][0]: space trimmed off its left side.
279 overflow[H][1]: space trimmed off its right side.
280 overflow[V][0]: space trimmed off its top.
281 overflow[V][1]: space trimmed off its bottom.
283 During rendering, this information is used to position the rendered
284 portion of the cell within the available space.
286 When a cell is rendered, sometimes it is permitted to spill over into
287 space that is ordinarily reserved for rules. Either way, this space is
288 still included in overflow values.
290 Suppose, for example, that a cell that joins 2 columns has a width of 60
291 pixels and content "abcdef", that the 2 columns that it joins have
292 widths of 20 and 30 pixels, respectively, and that therefore the rule
293 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
294 It might render like this, if each character is 10x10, and showing a few
295 extra table cells for context:
303 If this render_page is broken at the rule that separates "gh" from
304 "ijk", then the page that contains the left side of the "abcdef" cell
305 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
306 and the page that contains the right side of the cell will have
307 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
325 int overflow[TABLE_N_AXES][2];
328 /* Returns a hash value for (,Y). */
330 hash_cell (int x, int y)
332 return hash_int (x + (y << 16), 0);
335 /* Searches PAGE's set of render_overflow for one whose top-left cell is
336 (X,Y). Returns it, if there is one, otherwise a null pointer. */
337 static const struct render_overflow *
338 find_overflow (const struct render_page *page, int x, int y)
340 if (!hmap_is_empty (&page->overflows))
342 const struct render_overflow *of;
344 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
345 hash_cell (x, y), &page->overflows)
346 if (x == of->d[H] && y == of->d[V])
353 /* Row or column dimensions. Used to figure the size of a table in
354 render_page_create() and discarded after that. */
357 /* Width without considering rows (or columns) that span more than one (or
361 /* Width taking spanned rows (or columns) into consideration. */
365 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
366 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
369 distribute_spanned_width (int width,
370 struct render_row *rows, const int *rules, int n)
372 /* Sum up the unspanned widths of the N rows for use as weights. */
373 int total_unspanned = 0;
374 for (int x = 0; x < n; x++)
375 total_unspanned += rows[x].unspanned;
376 for (int x = 0; x < n - 1; x++)
377 total_unspanned += rules[x + 1];
378 if (total_unspanned >= width)
381 /* The algorithm used here is based on the following description from HTML 4:
383 For cells that span multiple columns, a simple approach consists of
384 apportioning the min/max widths evenly to each of the constituent
385 columns. A slightly more complex approach is to use the min/max
386 widths of unspanned cells to weight how spanned widths are
387 apportioned. Experiments suggest that a blend of the two approaches
388 gives good results for a wide range of tables.
390 We blend the two approaches half-and-half, except that we cannot use the
391 unspanned weights when 'total_unspanned' is 0 (because that would cause a
394 The calculation we want to do is this:
397 w1 = width * (column's unspanned width) / (total unspanned width)
398 (column's width) = (w0 + w1) / 2
400 We implement it as a precise calculation in integers by multiplying w0 and
401 w1 by the common denominator of all three calculations (d), dividing that
402 out in the column width calculation, and then keeping the remainder for
405 (We actually compute the unspanned width of a column as twice the
406 unspanned width, plus the width of the rule on the left, plus the width of
407 the rule on the right. That way each rule contributes to both the cell on
408 its left and on its right.)
410 long long int d0 = n;
411 long long int d1 = 2LL * MAX (total_unspanned, 1);
412 long long int d = d0 * d1;
413 if (total_unspanned > 0)
415 long long int w = d / 2;
416 for (int x = 0; x < n; x++)
419 if (total_unspanned > 0)
421 long long int unspanned = rows[x].unspanned * 2LL;
423 unspanned += rules[x + 1];
425 unspanned += rules[x];
426 w += width * unspanned * d0;
429 rows[x].width = MAX (rows[x].width, w / d);
430 w -= rows[x].width * d;
434 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
437 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
438 const struct render_row *rows, const int *rules)
440 int n = page->n[axis];
441 int *cp = page->cp[axis];
443 for (int z = 0; z < n; z++)
445 cp[1] = cp[0] + rules[z];
446 cp[2] = cp[1] + rows[z].width;
449 cp[1] = cp[0] + rules[n];
452 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
454 calculate_table_width (int n, const struct render_row *rows, int *rules)
457 for (int x = 0; x < n; x++)
458 width += rows[x].width;
459 for (int x = 0; x <= n; x++)
465 /* Rendering utility functions. */
467 /* Returns the line style to use for drawing a rule of the given TYPE. */
468 static enum render_line_style
469 rule_to_render_type (unsigned char type)
473 case TABLE_STROKE_NONE:
474 return RENDER_LINE_NONE;
475 case TABLE_STROKE_SOLID:
476 return RENDER_LINE_SINGLE;
477 case TABLE_STROKE_DASHED:
478 return RENDER_LINE_DASHED;
479 case TABLE_STROKE_THICK:
480 return RENDER_LINE_THICK;
481 case TABLE_STROKE_THIN:
482 return RENDER_LINE_THIN;
483 case TABLE_STROKE_DOUBLE:
484 return RENDER_LINE_DOUBLE;
490 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
491 rendered with PARAMS. */
493 measure_rule (const struct render_params *params, const struct table *table,
494 enum table_axis a, int z)
496 enum table_axis b = !a;
498 /* Determine all types of rules that are present, as a bitmap in 'rules'
499 where rule type 't' is present if bit 2**t is set. */
500 struct cell_color color;
501 unsigned int rules = 0;
504 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
505 rules |= 1u << table_get_rule (table, a, d[H], d[V], &color);
507 /* Turn off TABLE_STROKE_NONE because it has width 0 and we needn't bother.
508 However, if the device doesn't support margins, make sure that there is at
509 least a small gap between cells (but we don't need any at the left or
510 right edge of the table). */
511 if (rules & (1u << TABLE_STROKE_NONE))
513 rules &= ~(1u << TABLE_STROKE_NONE);
514 if (z > 0 && z < table->n[a] && !params->supports_margins && a == H)
515 rules |= 1u << TABLE_STROKE_SOLID;
518 /* Calculate maximum width of the rules that are present. */
520 for (size_t i = 0; i < TABLE_N_STROKES; i++)
521 if (rules & (1u << i))
522 width = MAX (width, params->line_widths[rule_to_render_type (i)]);
526 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
527 space for rendering a table with dimensions given in N. The caller must
528 initialize most of the members itself. */
529 static struct render_page *
530 render_page_allocate__ (const struct render_params *params,
531 struct table *table, int n[TABLE_N_AXES])
533 struct render_page *page = xmalloc (sizeof *page);
534 page->params = params;
540 for (int i = 0; i < TABLE_N_AXES; i++)
542 page->cp[i] = xmalloc ((2 * n[i] + 2) * sizeof *page->cp[i]);
543 page->join_crossing[i] = xzalloc ((n[i] + 1)
544 * sizeof *page->join_crossing[i]);
547 hmap_init (&page->overflows);
548 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
553 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
554 space for all of the members of the new page, but the caller must initialize
555 the 'cp' member itself. */
556 static struct render_page *
557 render_page_allocate (const struct render_params *params, struct table *table)
559 struct render_page *page = render_page_allocate__ (params, table, table->n);
560 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
562 page->h[a][0] = table->h[a][0];
563 page->h[a][1] = table->h[a][1];
564 page->r[a][0] = table->h[a][0];
565 page->r[a][1] = table->n[a] - table->h[a][1];
570 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
571 cp[H] in the new page from ROWS and RULES. The caller must still initialize
573 static struct render_page *
574 create_page_with_exact_widths (const struct render_params *params,
576 const struct render_row *rows, int *rules)
578 struct render_page *page = render_page_allocate (params, table);
579 accumulate_row_widths (page, H, rows, rules);
583 /* Allocates and returns a new render_page for PARAMS and TABLE.
585 Initializes cp[H] in the new page by setting the width of each row 'i' to
586 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
587 ROW_MAX[i].width. Sets the width of rules to those in RULES.
589 W_MIN is the sum of ROWS_MIN[].width.
591 W_MAX is the sum of ROWS_MAX[].width.
593 The caller must still initialize cp[V]. */
594 static struct render_page *
595 create_page_with_interpolated_widths (const struct render_params *params,
597 const struct render_row *rows_min,
598 const struct render_row *rows_max,
599 int w_min, int w_max, const int *rules)
601 const int n = table->n[H];
602 const long long int avail = params->size[H] - w_min;
603 const long long int wanted = w_max - w_min;
607 struct render_page *page = render_page_allocate (params, table);
609 int *cph = page->cp[H];
611 long long int w = wanted / 2;
612 for (int x = 0; x < n; x++)
614 w += avail * (rows_max[x].width - rows_min[x].width);
615 int extra = w / wanted;
618 cph[1] = cph[0] + rules[x];
619 cph[2] = cph[1] + rows_min[x].width + extra;
622 cph[1] = cph[0] + rules[n];
624 assert (page->cp[H][n * 2 + 1] == params->size[H]);
629 set_join_crossings (struct render_page *page, enum table_axis axis,
630 const struct table_cell *cell, int *rules)
632 for (int z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
633 page->join_crossing[axis][z] = rules[z];
636 /* Maps a contiguous range of cells from a page to the underlying table along
637 the horizpntal or vertical dimension. */
640 int p0; /* First ordinate in the page. */
641 int t0; /* First ordinate in the table. */
642 int n; /* Number of ordinates in page and table. */
645 /* Initializes M to a mapping from PAGE to PAGE->table along axis A. The
646 mapping includes ordinate Z (in PAGE). */
648 get_map (const struct render_page *page, enum table_axis a, int z,
651 if (z < page->h[a][0])
655 m->n = page->h[a][0];
657 else if (z < page->n[a] - page->h[a][1])
659 m->p0 = page->h[a][0];
660 m->t0 = page->r[a][0];
661 m->n = page->r[a][1] - page->r[a][0];
665 m->p0 = page->n[a] - page->h[a][1];
666 m->t0 = page->table->n[a] - page->table->h[a][1];
667 m->n = page->h[a][1];
671 /* Initializes CELL with the contents of the table cell at column X and row Y
672 within PAGE. When CELL is no longer needed, the caller is responsible for
673 freeing it by calling table_cell_free(CELL).
675 The caller must ensure that CELL is destroyed before TABLE is unref'ed.
677 This is equivalent to table_get_cell(), except X and Y are in terms of the
678 page's rows and columns rather than the underlying table's. */
680 render_get_cell (const struct render_page *page, int x, int y,
681 struct table_cell *cell)
683 int d[TABLE_N_AXES] = { [H] = x, [V] = y };
684 struct map map[TABLE_N_AXES];
686 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
688 struct map *m = &map[a];
689 get_map (page, a, d[a], m);
690 d[a] += m->t0 - m->p0;
692 table_get_cell (page->table, d[H], d[V], cell);
694 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
696 struct map *m = &map[a];
698 for (int i = 0; i < 2; i++)
699 cell->d[a][i] -= m->t0 - m->p0;
700 cell->d[a][0] = MAX (cell->d[a][0], m->p0);
701 cell->d[a][1] = MIN (cell->d[a][1], m->p0 + m->n);
705 /* Creates and returns a new render_page for rendering TABLE on a device
708 The new render_page will be suitable for rendering on a device whose page
709 size is PARAMS->size, but the caller is responsible for actually breaking it
710 up to fit on such a device, using the render_break abstraction. */
711 static struct render_page *
712 render_page_create (const struct render_params *params, struct table *table,
717 int nc = table->n[H];
718 int nr = table->n[V];
720 /* Figure out rule widths. */
721 int *rules[TABLE_N_AXES];
722 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
724 int n = table->n[axis] + 1;
726 rules[axis] = xnmalloc (n, sizeof *rules);
727 for (int z = 0; z < n; z++)
728 rules[axis][z] = measure_rule (params, table, axis, z);
731 /* Calculate minimum and maximum widths of cells that do not
732 span multiple columns. */
733 struct render_row *columns[2];
734 for (int i = 0; i < 2; i++)
735 columns[i] = xzalloc (nc * sizeof *columns[i]);
736 for (int y = 0; y < nr; y++)
737 for (int x = 0; x < nc;)
739 struct table_cell cell;
741 table_get_cell (table, x, y, &cell);
742 if (y == cell.d[V][0])
744 if (table_cell_colspan (&cell) == 1)
747 params->ops->measure_cell_width (params->aux, &cell,
749 for (int i = 0; i < 2; i++)
750 if (columns[i][x].unspanned < w[i])
751 columns[i][x].unspanned = w[i];
757 /* Distribute widths of spanned columns. */
758 for (int i = 0; i < 2; i++)
759 for (int x = 0; x < nc; x++)
760 columns[i][x].width = columns[i][x].unspanned;
761 for (int y = 0; y < nr; y++)
762 for (int x = 0; x < nc;)
764 struct table_cell cell;
766 table_get_cell (table, x, y, &cell);
767 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
771 params->ops->measure_cell_width (params->aux, &cell,
773 for (int i = 0; i < 2; i++)
774 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
775 rules[H], table_cell_colspan (&cell));
780 for (int i = 0; i < 2; i++)
781 distribute_spanned_width (min_width, &columns[i][0], rules[H], nc);
783 /* In pathological cases, spans can cause the minimum width of a column to
784 exceed the maximum width. This bollixes our interpolation algorithm
785 later, so fix it up. */
786 for (int i = 0; i < nc; i++)
787 if (columns[MIN][i].width > columns[MAX][i].width)
788 columns[MAX][i].width = columns[MIN][i].width;
790 /* Decide final column widths. */
792 for (int i = 0; i < 2; i++)
793 table_widths[i] = calculate_table_width (table->n[H],
794 columns[i], rules[H]);
796 struct render_page *page;
797 if (table_widths[MAX] <= params->size[H])
799 /* Fits even with maximum widths. Use them. */
800 page = create_page_with_exact_widths (params, table, columns[MAX],
803 else if (table_widths[MIN] <= params->size[H])
805 /* Fits with minimum widths, so distribute the leftover space. */
806 page = create_page_with_interpolated_widths (
807 params, table, columns[MIN], columns[MAX],
808 table_widths[MIN], table_widths[MAX], rules[H]);
812 /* Doesn't fit even with minimum widths. Assign minimums for now, and
813 later we can break it horizontally into multiple pages. */
814 page = create_page_with_exact_widths (params, table, columns[MIN],
818 /* Calculate heights of cells that do not span multiple rows. */
819 struct render_row *rows = xzalloc (nr * sizeof *rows);
820 for (int y = 0; y < nr; y++)
821 for (int x = 0; x < nc;)
823 struct render_row *r = &rows[y];
824 struct table_cell cell;
826 render_get_cell (page, x, y, &cell);
827 if (y == cell.d[V][0])
829 if (table_cell_rowspan (&cell) == 1)
831 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
832 int h = params->ops->measure_cell_height (params->aux,
834 if (h > r->unspanned)
835 r->unspanned = r->width = h;
838 set_join_crossings (page, V, &cell, rules[V]);
840 if (table_cell_colspan (&cell) > 1)
841 set_join_crossings (page, H, &cell, rules[H]);
845 for (int i = 0; i < 2; i++)
848 /* Distribute heights of spanned rows. */
849 for (int y = 0; y < nr; y++)
850 for (int x = 0; x < nc;)
852 struct table_cell cell;
854 render_get_cell (page, x, y, &cell);
855 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
857 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
858 int h = params->ops->measure_cell_height (params->aux, &cell, w);
859 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
860 table_cell_rowspan (&cell));
865 /* Decide final row heights. */
866 accumulate_row_widths (page, V, rows, rules[V]);
869 /* Measure headers. If they are "too big", get rid of them. */
870 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
872 int hw = headers_width (page, axis);
873 if (hw * 2 >= page->params->size[axis]
874 || hw + max_cell_width (page, axis) > page->params->size[axis])
876 page->h[axis][0] = page->h[axis][1] = 0;
877 page->r[axis][0] = 0;
878 page->r[axis][1] = page->n[axis];
888 /* Increases PAGE's reference count. */
890 render_page_ref (const struct render_page *page_)
892 struct render_page *page = CONST_CAST (struct render_page *, page_);
897 /* Decreases PAGE's reference count and destroys PAGE if this causes the
898 reference count to fall to zero. */
900 render_page_unref (struct render_page *page)
902 if (page != NULL && --page->ref_cnt == 0)
904 struct render_overflow *overflow, *next;
905 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
908 hmap_destroy (&page->overflows);
910 table_unref (page->table);
912 for (int i = 0; i < TABLE_N_AXES; ++i)
914 free (page->join_crossing[i]);
922 /* Returns the size of PAGE along AXIS. (This might be larger than the page
923 size specified in the parameters passed to render_page_create(). Use a
924 render_break to break up a render_page into page-sized chunks.) */
926 render_page_get_size (const struct render_page *page, enum table_axis axis)
928 return page->cp[axis][page->n[axis] * 2 + 1];
932 render_page_get_best_breakpoint (const struct render_page *page, int height)
934 /* If there's no room for at least the top row and the rules above and below
935 it, don't include any of the table. */
936 if (page->cp[V][3] > height)
939 /* Otherwise include as many rows and rules as we can. */
940 for (int y = 5; y <= 2 * page->n[V] + 1; y += 2)
941 if (page->cp[V][y] > height)
942 return page->cp[V][y - 2];
946 /* Drawing render_pages. */
948 /* This is like table_get_rule() except:
950 - D is in terms of the page's rows and column rather than the underlying
953 - The result is in the form of a render_line_style. */
954 static enum render_line_style
955 get_rule (const struct render_page *page, enum table_axis axis,
956 const int d_[TABLE_N_AXES], struct cell_color *color)
958 int d[TABLE_N_AXES] = { d_[0] / 2, d_[1] / 2 };
961 enum table_axis a = axis;
962 if (d[a] < page->h[a][0])
964 else if (d[a] <= page->n[a] - page->h[a][1])
966 if (page->h[a][0] && d[a] == page->h[a][0])
968 else if (page->h[a][1] && d[a] == page->n[a] - page->h[a][1])
969 d2 = page->table->n[a] - page->h[a][1];
970 d[a] += page->r[a][0] - page->h[a][0];
973 d[a] += ((page->table->n[a] - page->table->h[a][1])
974 - (page->n[a] - page->h[a][1]));
976 enum table_axis b = !axis;
978 get_map (page, b, d[b], &m);
981 int r = table_get_rule (page->table, axis, d[H], d[V], color);
985 int r2 = table_get_rule (page->table, axis, d[H], d[V], color);
986 r = table_stroke_combine (r, r2);
988 return rule_to_render_type (r);
998 render_direction_rtl (void)
1000 /* TRANSLATORS: Do not translate this string. If the script of your language
1001 reads from right to left (eg Persian, Arabic, Hebrew etc), then replace
1002 this string with "output-direction-rtl". Otherwise either leave it
1003 untranslated or copy it verbatim. */
1004 const char *dir = _("output-direction-ltr");
1005 if (0 == strcmp ("output-direction-rtl", dir))
1008 if (0 != strcmp ("output-direction-ltr", dir))
1009 fprintf (stderr, "This localisation has been incorrectly translated. "
1010 "Complain to the translator.\n");
1016 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
1017 const int d[TABLE_N_AXES])
1019 enum render_line_style styles[TABLE_N_AXES][2];
1020 struct cell_color colors[TABLE_N_AXES][2];
1022 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
1024 enum table_axis b = !a;
1026 styles[a][0] = styles[a][1] = RENDER_LINE_NONE;
1029 || (page->is_edge_cutoff[a][0] && d[a] == 0)
1030 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
1037 int e[TABLE_N_AXES];
1041 styles[a][0] = get_rule (page, a, e, &colors[a][0]);
1044 if (d[b] / 2 < page->n[b])
1045 styles[a][1] = get_rule (page, a, d, &colors[a][1]);
1049 styles[a][0] = styles[a][1] = get_rule (page, a, d, &colors[a][0]);
1050 colors[a][1] = colors[a][0];
1054 if (styles[H][0] != RENDER_LINE_NONE || styles[H][1] != RENDER_LINE_NONE
1055 || styles[V][0] != RENDER_LINE_NONE || styles[V][1] != RENDER_LINE_NONE)
1057 int bb[TABLE_N_AXES][2];
1059 bb[H][0] = ofs[H] + page->cp[H][d[H]];
1060 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
1061 if (page->params->rtl)
1063 int temp = bb[H][0];
1064 bb[H][0] = render_page_get_size (page, H) - bb[H][1];
1065 bb[H][1] = render_page_get_size (page, H) - temp;
1067 bb[V][0] = ofs[V] + page->cp[V][d[V]];
1068 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
1069 page->params->ops->draw_line (page->params->aux, bb, styles, colors);
1074 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
1075 const struct table_cell *cell)
1077 int bb[TABLE_N_AXES][2];
1078 int clip[TABLE_N_AXES][2];
1080 bb[H][0] = clip[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
1081 bb[H][1] = clip[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
1082 if (page->params->rtl)
1084 int temp = bb[H][0];
1085 bb[H][0] = clip[H][0] = render_page_get_size (page, H) - bb[H][1];
1086 bb[H][1] = clip[H][1] = render_page_get_size (page, H) - temp;
1088 bb[V][0] = clip[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
1089 bb[V][1] = clip[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
1091 enum table_valign valign = cell->cell_style->valign;
1092 int valign_offset = 0;
1093 if (valign != TABLE_VALIGN_TOP)
1095 int height = page->params->ops->measure_cell_height (
1096 page->params->aux, cell, bb[H][1] - bb[H][0]);
1097 int extra = bb[V][1] - bb[V][0] - height;
1100 if (valign == TABLE_VALIGN_CENTER)
1102 valign_offset += extra;
1106 const struct render_overflow *of = find_overflow (
1107 page, cell->d[H][0], cell->d[V][0]);
1109 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1111 if (of->overflow[axis][0])
1113 bb[axis][0] -= of->overflow[axis][0];
1114 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
1115 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
1117 if (of->overflow[axis][1])
1119 bb[axis][1] += of->overflow[axis][1];
1120 if (cell->d[axis][1] == page->n[axis]
1121 && !page->is_edge_cutoff[axis][1])
1122 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2
1127 int spill[TABLE_N_AXES][2];
1128 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1130 spill[axis][0] = rule_width (page, axis, cell->d[axis][0]) / 2;
1131 spill[axis][1] = rule_width (page, axis, cell->d[axis][1]) / 2;
1134 int color_idx = (cell->d[V][0] < page->h[V][0]
1135 || page->n[V] - (cell->d[V][0] + 1) < page->h[V][1]
1137 : (cell->d[V][0] - page->h[V][0]) & 1);
1138 page->params->ops->draw_cell (page->params->aux, cell, color_idx,
1139 bb, valign_offset, spill, clip);
1142 /* Draws the cells of PAGE indicated in BB. */
1144 render_page_draw_cells (const struct render_page *page,
1145 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
1147 for (int y = bb[V][0]; y < bb[V][1]; y++)
1148 for (int x = bb[H][0]; x < bb[H][1];)
1149 if (!is_rule (x) && !is_rule (y))
1151 struct table_cell cell;
1153 render_get_cell (page, x / 2, y / 2, &cell);
1154 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
1155 render_cell (page, ofs, &cell);
1156 x = rule_ofs (cell.d[H][1]);
1161 for (int y = bb[V][0]; y < bb[V][1]; y++)
1162 for (int x = bb[H][0]; x < bb[H][1]; x++)
1163 if (is_rule (x) || is_rule (y))
1165 int d[TABLE_N_AXES];
1168 render_rule (page, ofs, d);
1172 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
1173 render_params provided to render_page_create(). */
1175 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
1177 int bb[TABLE_N_AXES][2];
1180 bb[H][1] = page->n[H] * 2 + 1;
1182 bb[V][1] = page->n[V] * 2 + 1;
1184 render_page_draw_cells (page, ofs, bb);
1187 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1189 get_clip_min_extent (int x0, const int cp[], int n)
1196 int middle = low + (high - low) / 2;
1198 if (cp[middle] <= x0)
1210 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1212 get_clip_max_extent (int x1, const int cp[], int n)
1219 int middle = low + (high - low) / 2;
1221 if (cp[middle] >= x1)
1222 best = high = middle;
1227 while (best > 0 && cp[best - 1] == cp[best])
1233 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1234 'draw_line' and 'draw_cell' functions from the render_params provided to
1235 render_page_create(). */
1237 render_page_draw_region (const struct render_page *page,
1238 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1240 int bb[TABLE_N_AXES][2];
1242 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1243 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1244 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1245 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1247 render_page_draw_cells (page, ofs, bb);
1250 /* Breaking up tables to fit on a page. */
1252 /* An iterator for breaking render_pages into smaller chunks. */
1255 struct render_page *page; /* Page being broken up. */
1256 enum table_axis axis; /* Axis along which 'page' is being broken. */
1257 int z; /* Next cell along 'axis'. */
1258 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1259 int hw; /* Width of headers of 'page' along 'axis'. */
1262 static int needed_size (const struct render_break *, int cell);
1263 static bool cell_is_breakable (const struct render_break *, int cell);
1264 static struct render_page *render_page_select (const struct render_page *,
1269 /* Initializes render_break B for breaking PAGE along AXIS.
1270 Takes ownership of PAGE. */
1272 render_break_init (struct render_break *b, struct render_page *page,
1273 enum table_axis axis)
1277 b->z = page->h[axis][0];
1279 b->hw = headers_width (page, axis);
1282 /* Initializes B as a render_break structure for which
1283 render_break_has_next() always returns false. */
1285 render_break_init_empty (struct render_break *b)
1288 b->axis = TABLE_HORZ;
1294 /* Frees B and unrefs the render_page that it owns. */
1296 render_break_destroy (struct render_break *b)
1300 render_page_unref (b->page);
1305 /* Returns true if B still has cells that are yet to be returned,
1306 false if all of B's page has been processed. */
1308 render_break_has_next (const struct render_break *b)
1310 const struct render_page *page = b->page;
1311 enum table_axis axis = b->axis;
1313 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1316 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1317 Returns a null pointer if B has already been completely broken up, or if
1318 SIZE is too small to reasonably render any cells. The latter will never
1319 happen if SIZE is at least as large as the page size passed to
1320 render_page_create() along B's axis. */
1321 static struct render_page *
1322 render_break_next (struct render_break *b, int size)
1324 const struct render_page *page = b->page;
1325 enum table_axis axis = b->axis;
1326 struct render_page *subpage;
1328 if (!render_break_has_next (b))
1333 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1335 int needed = needed_size (b, z + 1);
1338 if (cell_is_breakable (b, z))
1340 /* If there is no right header and we render a partial cell on
1341 the right side of the body, then we omit the rightmost rule of
1342 the body. Otherwise the rendering is deceptive because it
1343 looks like the whole cell is present instead of a partial
1346 This is similar to code for the left side in needed_size(). */
1347 int rule_allowance = (page->h[axis][1]
1349 : rule_width (page, axis, z));
1351 /* The amount that, if we added cell 'z', the rendering would
1352 overfill the allocated 'size'. */
1353 int overhang = needed - size - rule_allowance;
1355 /* The width of cell 'z'. */
1356 int cell_size = cell_width (page, axis, z);
1358 /* The amount trimmed off the left side of 'z',
1359 and the amount left to render. */
1360 int cell_ofs = z == b->z ? b->pixel : 0;
1361 int cell_left = cell_size - cell_ofs;
1363 /* A small but visible width. */
1364 int em = page->params->font_size[axis];
1366 /* If some of the cell remains to render,
1367 and there would still be some of the cell left afterward,
1368 then partially render that much of the cell. */
1369 pixel = (cell_left && cell_left > overhang
1370 ? cell_left - overhang + cell_ofs
1373 /* If there would be only a tiny amount of the cell left after
1374 rendering it partially, reduce the amount rendered slightly
1375 to make the output look a little better. */
1376 if (pixel + em > cell_size)
1377 pixel = MAX (pixel - em, 0);
1379 /* If we're breaking vertically, then consider whether the cells
1380 being broken have a better internal breakpoint than the exact
1381 number of pixels available, which might look bad e.g. because
1382 it breaks in the middle of a line of text. */
1383 if (axis == TABLE_VERT && page->params->ops->adjust_break)
1384 for (int x = 0; x < page->n[H];)
1386 struct table_cell cell;
1388 render_get_cell (page, x, z, &cell);
1389 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1390 int better_pixel = page->params->ops->adjust_break (
1391 page->params->aux, &cell, w, pixel);
1394 if (better_pixel < pixel)
1396 if (better_pixel > (z == b->z ? b->pixel : 0))
1398 pixel = better_pixel;
1401 else if (better_pixel == 0 && z != b->z)
1413 if (z == b->z && !pixel)
1416 subpage = render_page_select (page, axis, b->z, b->pixel,
1418 pixel ? cell_width (page, axis, z) - pixel
1425 /* Returns the width that would be required along B's axis to render a page
1426 from B's current position up to but not including CELL. */
1428 needed_size (const struct render_break *b, int cell)
1430 const struct render_page *page = b->page;
1431 enum table_axis axis = b->axis;
1433 /* Width of left header not including its rightmost rule. */
1434 int size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1436 /* If we have a pixel offset and there is no left header, then we omit the
1437 leftmost rule of the body. Otherwise the rendering is deceptive because
1438 it looks like the whole cell is present instead of a partial cell.
1440 Otherwise (if there are headers) we will be merging two rules: the
1441 rightmost rule in the header and the leftmost rule in the body. We assume
1442 that the width of a merged rule is the larger of the widths of either rule
1444 if (b->pixel == 0 || page->h[axis][0])
1445 size += MAX (rule_width (page, axis, page->h[axis][0]),
1446 rule_width (page, axis, b->z));
1448 /* Width of body, minus any pixel offset in the leftmost cell. */
1449 size += joined_width (page, axis, b->z, cell) - b->pixel;
1451 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1452 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1453 rule_width (page, axis, cell));
1455 /* Width of right header not including its leftmost rule. */
1456 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1457 rule_ofs_r (page, axis, 0));
1459 /* Join crossing. */
1460 if (page->h[axis][0] && page->h[axis][1])
1461 size += page->join_crossing[axis][b->z];
1466 /* Returns true if CELL along B's axis may be broken across a page boundary.
1468 This is just a heuristic. Breaking cells across page boundaries can save
1469 space, but it looks ugly. */
1471 cell_is_breakable (const struct render_break *b, int cell)
1473 const struct render_page *page = b->page;
1474 enum table_axis axis = b->axis;
1476 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1483 const struct render_params *params;
1486 /* An array of "render_page"s to be rendered, in order, vertically. There
1487 may be up to 5 pages, for the pivot table's title, layers, body,
1488 captions, and footnotes. */
1489 struct render_page *pages[5];
1493 struct render_break x_break;
1494 struct render_break y_break;
1498 render_pager_add_table (struct render_pager *p, struct table *table,
1502 p->pages[p->n_pages++] = render_page_create (p->params, table, min_width);
1506 render_pager_start_page (struct render_pager *p)
1508 render_break_init (&p->x_break, render_page_ref (p->pages[p->cur_page++]),
1510 render_break_init_empty (&p->y_break);
1513 /* Creates and returns a new render_pager for rendering PT on the device
1514 with the given PARAMS. */
1515 struct render_pager *
1516 render_pager_create (const struct render_params *params,
1517 const struct pivot_table *pt,
1518 const size_t *layer_indexes)
1521 layer_indexes = pt->current_layer;
1523 struct table *title, *layers, *body, *caption, *footnotes;
1524 pivot_output (pt, layer_indexes, params->printing,
1525 &title, &layers, &body, &caption, &footnotes, NULL, NULL);
1527 /* Figure out the width of the body of the table. Use this to determine the
1529 struct render_page *body_page = render_page_create (params, body, 0);
1530 int body_width = table_width (body_page, H);
1532 if (body_width > params->size[H])
1534 if (pt->look->shrink_to_fit[H] && params->ops->scale)
1535 scale = params->size[H] / (double) body_width;
1538 struct render_break b;
1539 render_break_init (&b, render_page_ref (body_page), H);
1540 struct render_page *subpage
1541 = render_break_next (&b, params->size[H]);
1542 body_width = subpage ? subpage->cp[H][2 * subpage->n[H] + 1] : 0;
1543 render_page_unref (subpage);
1544 render_break_destroy (&b);
1548 /* Create the pager. */
1549 struct render_pager *p = xmalloc (sizeof *p);
1550 *p = (struct render_pager) { .params = params, .scale = scale };
1551 render_pager_add_table (p, title, body_width);
1552 render_pager_add_table (p, layers, body_width);
1553 p->pages[p->n_pages++] = body_page;
1554 render_pager_add_table (p, caption, 0);
1555 render_pager_add_table (p, footnotes, 0);
1556 assert (p->n_pages <= sizeof p->pages / sizeof *p->pages);
1558 /* If we're shrinking tables to fit the page length, then adjust the scale
1561 XXX This will sometimes shrink more than needed, because adjusting the
1562 scale factor allows for cells to be "wider", which means that sometimes
1563 they won't break across as much vertical space, thus shrinking the table
1564 vertically more than the scale would imply. Shrinking only as much as
1565 necessary would require an iterative search. */
1566 if (pt->look->shrink_to_fit[V] && params->ops->scale)
1568 int total_height = 0;
1569 for (size_t i = 0; i < p->n_pages; i++)
1570 total_height += table_width (p->pages[i], V);
1571 if (total_height * p->scale >= params->size[V])
1572 p->scale *= params->size[V] / (double) total_height;
1575 render_pager_start_page (p);
1582 render_pager_destroy (struct render_pager *p)
1586 render_break_destroy (&p->x_break);
1587 render_break_destroy (&p->y_break);
1588 for (size_t i = 0; i < p->n_pages; i++)
1589 render_page_unref (p->pages[i]);
1594 /* Returns true if P has content remaining to render, false if rendering is
1597 render_pager_has_next (const struct render_pager *p_)
1599 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1601 while (!render_break_has_next (&p->y_break))
1603 render_break_destroy (&p->y_break);
1604 if (!render_break_has_next (&p->x_break))
1606 render_break_destroy (&p->x_break);
1607 if (p->cur_page >= p->n_pages)
1609 render_break_init_empty (&p->x_break);
1610 render_break_init_empty (&p->y_break);
1613 render_pager_start_page (p);
1617 &p->y_break, render_break_next (&p->x_break,
1618 p->params->size[H] / p->scale), V);
1623 /* Draws a chunk of content from P to fit in a space that has vertical size
1624 SPACE and the horizontal size specified in the render_params passed to
1625 render_page_create(). Returns the amount of space actually used by the
1626 rendered chunk, which will be 0 if SPACE is too small to render anything or
1627 if no content remains (use render_pager_has_next() to distinguish these
1630 render_pager_draw_next (struct render_pager *p, int space)
1632 if (p->scale != 1.0)
1634 p->params->ops->scale (p->params->aux, p->scale);
1638 int ofs[TABLE_N_AXES] = { 0, 0 };
1639 size_t start_page = SIZE_MAX;
1641 while (render_pager_has_next (p))
1643 if (start_page == p->cur_page)
1645 start_page = p->cur_page;
1647 struct render_page *page
1648 = render_break_next (&p->y_break, space - ofs[V]);
1652 render_page_draw (page, ofs);
1653 ofs[V] += render_page_get_size (page, V);
1654 render_page_unref (page);
1657 if (p->scale != 1.0)
1663 /* Draws all of P's content. */
1665 render_pager_draw (const struct render_pager *p)
1667 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1670 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1671 Some extra content might be drawn; the device should perform clipping as
1674 render_pager_draw_region (const struct render_pager *p,
1675 int x, int y, int w, int h)
1677 int ofs[TABLE_N_AXES] = { 0, 0 };
1678 int clip[TABLE_N_AXES][2];
1682 for (size_t i = 0; i < p->n_pages; i++)
1684 const struct render_page *page = p->pages[i];
1685 int size = render_page_get_size (page, V);
1687 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1688 clip[V][1] = MIN (y + h, ofs[V] + size) - ofs[V];
1689 if (clip[V][1] > clip[V][0])
1690 render_page_draw_region (page, ofs, clip);
1696 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1697 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1699 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1703 for (size_t i = 0; i < p->n_pages; i++)
1705 int subsize = render_page_get_size (p->pages[i], axis);
1706 size = axis == H ? MAX (size, subsize) : size + subsize;
1713 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1718 for (i = 0; i < p->n_pages; i++)
1720 int size = render_page_get_size (p->pages[i], V);
1721 if (y + size >= height)
1722 return render_page_get_best_breakpoint (p->pages[i], height - y) + y;
1729 /* render_page_select() and helpers. */
1731 struct render_page_selection
1733 const struct render_page *page; /* Page whose slice we are selecting. */
1734 struct render_page *subpage; /* New page under construction. */
1735 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1736 enum table_axis b; /* The opposite of 'a'. */
1737 int z0; /* First cell along 'a' being selected. */
1738 int z1; /* Last cell being selected, plus 1. */
1739 int p0; /* Number of pixels to trim off left side of z0. */
1740 int p1; /* Number of pixels to trim off right side of z1-1. */
1743 static void cell_to_subpage (struct render_page_selection *,
1744 const struct table_cell *,
1745 int subcell[TABLE_N_AXES]);
1746 static const struct render_overflow *find_overflow_for_cell (
1747 struct render_page_selection *, const struct table_cell *);
1748 static struct render_overflow *insert_overflow (struct render_page_selection *,
1749 const struct table_cell *);
1751 /* Creates and returns a new render_page whose contents are a subregion of
1752 PAGE's contents. The new render_page includes cells Z0 through Z1
1753 (exclusive) along AXIS, plus any headers on AXIS.
1755 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1756 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1757 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1758 render cells that are too large to fit on a single page.)
1760 The whole of axis !AXIS is included. (The caller may follow up with another
1761 call to render_page_select() to select on !AXIS to select on that axis as
1764 The caller retains ownership of PAGE, which is not modified. */
1765 static struct render_page *
1766 render_page_select (const struct render_page *page, enum table_axis axis,
1767 int z0, int p0, int z1, int p1)
1769 enum table_axis a = axis;
1770 enum table_axis b = !a;
1772 /* Optimize case where all of PAGE is selected by just incrementing the
1774 if (z0 == page->h[a][0] && p0 == 0
1775 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1777 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1782 /* Allocate subpage. */
1783 int trim[2] = { z0 - page->h[a][0], (page->n[a] - page->h[a][1]) - z1 };
1784 int n[TABLE_N_AXES] = { [H] = page->n[H], [V] = page->n[V] };
1785 n[a] -= trim[0] + trim[1];
1786 struct render_page *subpage = render_page_allocate__ (
1787 page->params, table_ref (page->table), n);
1788 for (enum table_axis k = 0; k < TABLE_N_AXES; k++)
1790 subpage->h[k][0] = page->h[k][0];
1791 subpage->h[k][1] = page->h[k][1];
1792 subpage->r[k][0] = page->r[k][0];
1793 subpage->r[k][1] = page->r[k][1];
1795 subpage->r[a][0] += trim[0];
1796 subpage->r[a][1] -= trim[1];
1798 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1799 off that side of the page and there are no headers. */
1800 subpage->is_edge_cutoff[a][0] =
1801 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1802 subpage->is_edge_cutoff[a][1] =
1803 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1804 && page->is_edge_cutoff[a][1]));
1805 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1806 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1808 /* Select join crossings from PAGE into subpage. */
1809 int *jc = subpage->join_crossing[a];
1810 for (int z = 0; z < page->h[a][0]; z++)
1811 *jc++ = page->join_crossing[a][z];
1812 for (int z = z0; z <= z1; z++)
1813 *jc++ = page->join_crossing[a][z];
1814 for (int z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1815 *jc++ = page->join_crossing[a][z];
1816 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1818 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1819 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1821 /* Select widths from PAGE into subpage. */
1822 int *scp = page->cp[a];
1823 int *dcp = subpage->cp[a];
1825 for (int z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1827 int w = !z && subpage->is_edge_cutoff[a][0] ? 0 : scp[z + 1] - scp[z];
1828 dcp[1] = dcp[0] + w;
1830 for (int z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1832 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1833 if (z == cell_ofs (z0))
1836 if (page->h[a][0] && page->h[a][1])
1837 dcp[1] += page->join_crossing[a][z / 2];
1839 if (z == cell_ofs (z1 - 1))
1842 for (int z = rule_ofs_r (page, a, subpage->h[a][1]);
1843 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1845 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1848 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1850 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1852 for (int z = 0; z < page->n[b] * 2 + 2; z++)
1853 subpage->cp[b][z] = page->cp[b][z];
1855 /* Add new overflows. */
1856 struct render_page_selection s = {
1867 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1868 for (int z = 0; z < page->n[b];)
1870 int d[TABLE_N_AXES];
1874 struct table_cell cell;
1875 render_get_cell (page, d[H], d[V], &cell);
1876 bool overflow0 = p0 || cell.d[a][0] < z0;
1877 bool overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1878 if (overflow0 || overflow1)
1880 struct render_overflow *ro = insert_overflow (&s, &cell);
1884 ro->overflow[a][0] += p0 + axis_width (
1885 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1886 if (page->h[a][0] && page->h[a][1])
1887 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1893 ro->overflow[a][1] += p1 + axis_width (
1894 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1895 if (page->h[a][0] && page->h[a][1])
1896 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1902 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1903 for (int z = 0; z < page->n[b];)
1905 int d[TABLE_N_AXES];
1909 struct table_cell cell;
1910 render_get_cell (page, d[H], d[V], &cell);
1911 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1912 && find_overflow_for_cell (&s, &cell) == NULL)
1914 struct render_overflow *ro = insert_overflow (&s, &cell);
1915 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1916 cell_ofs (cell.d[a][1]));
1921 /* Copy overflows from PAGE into subpage. */
1922 struct render_overflow *ro;
1923 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1925 struct table_cell cell;
1927 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1928 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1929 && find_overflow_for_cell (&s, &cell) == NULL)
1930 insert_overflow (&s, &cell);
1936 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1937 coordinates of the top-left cell as it will appear in S->subpage.
1939 CELL must actually intersect the region of S->page that is being selected
1940 by render_page_select() or the results will not make any sense. */
1942 cell_to_subpage (struct render_page_selection *s,
1943 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1945 enum table_axis a = s->a;
1946 enum table_axis b = s->b;
1947 int ha0 = s->subpage->h[a][0];
1949 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1950 subcell[b] = cell->d[b][0];
1953 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1954 that cell in S->subpage, if there is one, and a null pointer otherwise.
1956 CELL must actually intersect the region of S->page that is being selected
1957 by render_page_select() or the results will not make any sense. */
1958 static const struct render_overflow *
1959 find_overflow_for_cell (struct render_page_selection *s,
1960 const struct table_cell *cell)
1964 cell_to_subpage (s, cell, subcell);
1965 return find_overflow (s->subpage, subcell[H], subcell[V]);
1968 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1969 cell in S->subpage (which must not already exist). Initializes the new
1970 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1973 CELL must actually intersect the region of S->page that is being selected
1974 by render_page_select() or the results will not make any sense. */
1975 static struct render_overflow *
1976 insert_overflow (struct render_page_selection *s,
1977 const struct table_cell *cell)
1979 struct render_overflow *of = xzalloc (sizeof *of);
1980 cell_to_subpage (s, cell, of->d);
1981 hmap_insert (&s->subpage->overflows, &of->node,
1982 hash_cell (of->d[H], of->d[V]));
1984 const struct render_overflow *old
1985 = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1987 memcpy (of->overflow, old->overflow, sizeof of->overflow);