1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "libpspp/pool.h"
29 #include "output/pivot-output.h"
30 #include "output/pivot-table.h"
31 #include "output/render.h"
32 #include "output/table-item.h"
33 #include "output/table.h"
35 #include "gl/minmax.h"
36 #include "gl/xalloc.h"
39 #define _(msgid) gettext (msgid)
41 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
45 /* A layout for rendering a specific table on a specific device.
47 May represent the layout of an entire table presented to
48 render_page_create(), or a rectangular subregion of a table broken out using
49 render_break_next() to allow a table to be broken across multiple pages.
51 A page's size is not limited to the size passed in as part of render_params.
52 render_pager breaks a render_page into smaller render_pages that will fit in
53 the available space. */
56 const struct render_params *params; /* Parameters of the target device. */
57 struct table *table; /* Table rendered. */
60 /* Region of 'table' to render.
62 The horizontal cells rendered are the leftmost h[H][0], then
63 r[H][0] through r[H][1], exclusive, then the rightmost h[H][1].
65 The vertical cells rendered are the topmost h[V][0], then r[V][0]
66 through r[V][1], exclusive, then the bottommost h[V][1].
68 n[H] = h[H][0] + (r[H][1] - r[H][0]) + h[H][1]
69 n[V] = h[V][0] + (r[V][1] - r[V][0]) + h[V][1]
71 int h[TABLE_N_AXES][2];
72 int r[TABLE_N_AXES][2];
77 cp[H] represents x positions within the table.
79 cp[H][1] = the width of the leftmost vertical rule.
80 cp[H][2] = cp[H][1] + the width of the leftmost column.
81 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
83 cp[H][2 * n[H]] = x position of the rightmost vertical rule.
84 cp[H][2 * n[H] + 1] = total table width including all rules.
86 Similarly, cp[V] represents y positions within the table.
88 cp[V][1] = the height of the topmost horizontal rule.
89 cp[V][2] = cp[V][1] + the height of the topmost row.
90 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
92 cp[V][2 * n[V]] = y position of the bottommost horizontal rule.
93 cp[V][2 * n[V] + 1] = total table height including all rules.
95 Rules and columns can have width or height 0, in which case consecutive
96 values in this array are equal. */
97 int *cp[TABLE_N_AXES];
99 /* render_break_next() can break a table such that some cells are not fully
100 contained within a render_page. This will happen if a cell is too wide
101 or two tall to fit on a single page, or if a cell spans multiple rows or
102 columns and the page only includes some of those rows or columns.
104 This hash table contains "struct render_overflow"s that represents each
105 such cell that doesn't completely fit on this page.
107 Each overflow cell borders at least one header edge of the table and may
108 border more. (A single table cell that is so large that it fills the
109 entire page can overflow on all four sides!) */
110 struct hmap overflows;
112 /* If a single column (or row) is too wide (or tall) to fit on a page
113 reasonably, then render_break_next() will split a single row or column
114 across multiple render_pages. This member indicates when this has
117 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
118 of the leftmost column in this page, and false otherwise.
120 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
121 of the rightmost column in this page, and false otherwise.
123 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
124 and bottom of the table.
126 The effect of is_edge_cutoff is to prevent rules along the edge in
127 question from being rendered.
129 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
130 contain a node for each cell along that edge. */
131 bool is_edge_cutoff[TABLE_N_AXES][2];
133 /* If part of a joined cell would be cut off by breaking a table along
134 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
135 join_crossing[axis][z] is the thickness of the rule that would be cut
138 This is used to know to allocate extra space for breaking at such a
139 position, so that part of the cell's content is not lost.
141 This affects breaking a table only when headers are present. When
142 headers are not present, the rule's thickness is used for cell content,
143 so no part of the cell's content is lost (and in fact it is duplicated
144 across both pages). */
145 int *join_crossing[TABLE_N_AXES];
148 static struct render_page *render_page_create (const struct render_params *,
149 struct table *, int min_width);
151 struct render_page *render_page_ref (const struct render_page *page_);
152 static void render_page_unref (struct render_page *);
154 /* Returns the offset in struct render_page's cp[axis] array of the rule with
155 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
156 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
157 next rule to the right (or below); and so on. */
159 rule_ofs (int rule_idx)
164 /* Returns the offset in struct render_page's cp[axis] array of the rule with
165 index RULE_IDX_R, which counts from the right side (or bottom) of the page
166 left (or up), according to whether AXIS is H or V, respectively. That is,
167 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
168 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
169 (or above); and so on. */
171 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
173 return (page->n[axis] - rule_idx_r) * 2;
176 /* Returns the offset in struct render_page's cp[axis] array of the cell with
177 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
178 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
179 next cell to the right (or below); and so on. */
181 cell_ofs (int cell_idx)
183 return cell_idx * 2 + 1;
186 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
188 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
190 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
193 /* Returns the total width of PAGE along AXIS. */
195 table_width (const struct render_page *page, int axis)
197 return page->cp[axis][2 * page->n[axis] + 1];
200 /* Returns the width of the headers in PAGE along AXIS. */
202 headers_width (const struct render_page *page, int axis)
204 int h0 = page->h[axis][0];
205 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
206 int n = page->n[axis];
207 int h1 = page->h[axis][1];
208 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
212 /* Returns the width of cell X along AXIS in PAGE. */
214 cell_width (const struct render_page *page, int axis, int x)
216 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
219 /* Returns the width of rule X along AXIS in PAGE. */
221 rule_width (const struct render_page *page, int axis, int x)
223 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
226 /* Returns the width of rule X along AXIS in PAGE. */
228 rule_width_r (const struct render_page *page, int axis, int x)
230 int ofs = rule_ofs_r (page, axis, x);
231 return axis_width (page, axis, ofs, ofs + 1);
234 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
236 joined_width (const struct render_page *page, int axis, int x0, int x1)
238 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
241 /* Returns the width of the widest cell, excluding headers, along AXIS in
244 max_cell_width (const struct render_page *page, int axis)
246 int n = page->n[axis];
247 int x0 = page->h[axis][0];
248 int x1 = n - page->h[axis][1];
251 for (int x = x0; x < x1; x++)
253 int w = cell_width (page, axis, x);
260 /* A cell that doesn't completely fit on the render_page. */
261 struct render_overflow
263 struct hmap_node node; /* In render_page's 'overflows' hmap. */
265 /* Occupied region of page.
267 d[H][0] is the leftmost column.
268 d[H][1] is the rightmost column, plus 1.
269 d[V][0] is the top row.
270 d[V][1] is the bottom row, plus 1.
272 The cell in its original table might occupy a larger region. This
273 member reflects the size of the cell in the current render_page, after
274 trimming off any rows or columns due to page-breaking. */
277 /* The space that has been trimmed off the cell:
279 overflow[H][0]: space trimmed off its left side.
280 overflow[H][1]: space trimmed off its right side.
281 overflow[V][0]: space trimmed off its top.
282 overflow[V][1]: space trimmed off its bottom.
284 During rendering, this information is used to position the rendered
285 portion of the cell within the available space.
287 When a cell is rendered, sometimes it is permitted to spill over into
288 space that is ordinarily reserved for rules. Either way, this space is
289 still included in overflow values.
291 Suppose, for example, that a cell that joins 2 columns has a width of 60
292 pixels and content "abcdef", that the 2 columns that it joins have
293 widths of 20 and 30 pixels, respectively, and that therefore the rule
294 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
295 It might render like this, if each character is 10x10, and showing a few
296 extra table cells for context:
304 If this render_page is broken at the rule that separates "gh" from
305 "ijk", then the page that contains the left side of the "abcdef" cell
306 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
307 and the page that contains the right side of the cell will have
308 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
326 int overflow[TABLE_N_AXES][2];
329 /* Returns a hash value for (,Y). */
331 hash_cell (int x, int y)
333 return hash_int (x + (y << 16), 0);
336 /* Searches PAGE's set of render_overflow for one whose top-left cell is
337 (X,Y). Returns it, if there is one, otherwise a null pointer. */
338 static const struct render_overflow *
339 find_overflow (const struct render_page *page, int x, int y)
341 if (!hmap_is_empty (&page->overflows))
343 const struct render_overflow *of;
345 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
346 hash_cell (x, y), &page->overflows)
347 if (x == of->d[H] && y == of->d[V])
354 /* Row or column dimensions. Used to figure the size of a table in
355 render_page_create() and discarded after that. */
358 /* Width without considering rows (or columns) that span more than one (or
362 /* Width taking spanned rows (or columns) into consideration. */
366 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
367 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
370 distribute_spanned_width (int width,
371 struct render_row *rows, const int *rules, int n)
373 /* Sum up the unspanned widths of the N rows for use as weights. */
374 int total_unspanned = 0;
375 for (int x = 0; x < n; x++)
376 total_unspanned += rows[x].unspanned;
377 for (int x = 0; x < n - 1; x++)
378 total_unspanned += rules[x + 1];
379 if (total_unspanned >= width)
382 /* The algorithm used here is based on the following description from HTML 4:
384 For cells that span multiple columns, a simple approach consists of
385 apportioning the min/max widths evenly to each of the constituent
386 columns. A slightly more complex approach is to use the min/max
387 widths of unspanned cells to weight how spanned widths are
388 apportioned. Experiments suggest that a blend of the two approaches
389 gives good results for a wide range of tables.
391 We blend the two approaches half-and-half, except that we cannot use the
392 unspanned weights when 'total_unspanned' is 0 (because that would cause a
395 The calculation we want to do is this:
398 w1 = width * (column's unspanned width) / (total unspanned width)
399 (column's width) = (w0 + w1) / 2
401 We implement it as a precise calculation in integers by multiplying w0 and
402 w1 by the common denominator of all three calculations (d), dividing that
403 out in the column width calculation, and then keeping the remainder for
406 (We actually compute the unspanned width of a column as twice the
407 unspanned width, plus the width of the rule on the left, plus the width of
408 the rule on the right. That way each rule contributes to both the cell on
409 its left and on its right.)
411 long long int d0 = n;
412 long long int d1 = 2LL * MAX (total_unspanned, 1);
413 long long int d = d0 * d1;
414 if (total_unspanned > 0)
416 long long int w = d / 2;
417 for (int x = 0; x < n; x++)
420 if (total_unspanned > 0)
422 long long int unspanned = rows[x].unspanned * 2LL;
424 unspanned += rules[x + 1];
426 unspanned += rules[x];
427 w += width * unspanned * d0;
430 rows[x].width = MAX (rows[x].width, w / d);
431 w -= rows[x].width * d;
435 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
438 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
439 const struct render_row *rows, const int *rules)
441 int n = page->n[axis];
442 int *cp = page->cp[axis];
444 for (int z = 0; z < n; z++)
446 cp[1] = cp[0] + rules[z];
447 cp[2] = cp[1] + rows[z].width;
450 cp[1] = cp[0] + rules[n];
453 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
455 calculate_table_width (int n, const struct render_row *rows, int *rules)
458 for (int x = 0; x < n; x++)
459 width += rows[x].width;
460 for (int x = 0; x <= n; x++)
466 /* Rendering utility functions. */
468 /* Returns the line style to use for drawing a rule of the given TYPE. */
469 static enum render_line_style
470 rule_to_render_type (unsigned char type)
474 case TABLE_STROKE_NONE:
475 return RENDER_LINE_NONE;
476 case TABLE_STROKE_SOLID:
477 return RENDER_LINE_SINGLE;
478 case TABLE_STROKE_DASHED:
479 return RENDER_LINE_DASHED;
480 case TABLE_STROKE_THICK:
481 return RENDER_LINE_THICK;
482 case TABLE_STROKE_THIN:
483 return RENDER_LINE_THIN;
484 case TABLE_STROKE_DOUBLE:
485 return RENDER_LINE_DOUBLE;
491 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
492 rendered with PARAMS. */
494 measure_rule (const struct render_params *params, const struct table *table,
495 enum table_axis a, int z)
497 enum table_axis b = !a;
499 /* Determine all types of rules that are present, as a bitmap in 'rules'
500 where rule type 't' is present if bit 2**t is set. */
501 struct cell_color color;
502 unsigned int rules = 0;
505 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
506 rules |= 1u << table_get_rule (table, a, d[H], d[V], &color);
508 /* Turn off TABLE_STROKE_NONE because it has width 0 and we needn't bother.
509 However, if the device doesn't support margins, make sure that there is at
510 least a small gap between cells (but we don't need any at the left or
511 right edge of the table). */
512 if (rules & (1u << TABLE_STROKE_NONE))
514 rules &= ~(1u << TABLE_STROKE_NONE);
515 if (z > 0 && z < table->n[a] && !params->supports_margins && a == H)
516 rules |= 1u << TABLE_STROKE_SOLID;
519 /* Calculate maximum width of the rules that are present. */
521 for (size_t i = 0; i < TABLE_N_STROKES; i++)
522 if (rules & (1u << i))
523 width = MAX (width, params->line_widths[rule_to_render_type (i)]);
527 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
528 space for rendering a table with dimensions given in N. The caller must
529 initialize most of the members itself. */
530 static struct render_page *
531 render_page_allocate__ (const struct render_params *params,
532 struct table *table, int n[TABLE_N_AXES])
534 struct render_page *page = xmalloc (sizeof *page);
535 page->params = params;
541 for (int i = 0; i < TABLE_N_AXES; i++)
543 page->cp[i] = xmalloc ((2 * n[i] + 2) * sizeof *page->cp[i]);
544 page->join_crossing[i] = xzalloc ((n[i] + 1)
545 * sizeof *page->join_crossing[i]);
548 hmap_init (&page->overflows);
549 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
554 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
555 space for all of the members of the new page, but the caller must initialize
556 the 'cp' member itself. */
557 static struct render_page *
558 render_page_allocate (const struct render_params *params, struct table *table)
560 struct render_page *page = render_page_allocate__ (params, table, table->n);
561 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
563 page->h[a][0] = table->h[a][0];
564 page->h[a][1] = table->h[a][1];
565 page->r[a][0] = table->h[a][0];
566 page->r[a][1] = table->n[a] - table->h[a][1];
571 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
572 cp[H] in the new page from ROWS and RULES. The caller must still initialize
574 static struct render_page *
575 create_page_with_exact_widths (const struct render_params *params,
577 const struct render_row *rows, int *rules)
579 struct render_page *page = render_page_allocate (params, table);
580 accumulate_row_widths (page, H, rows, rules);
584 /* Allocates and returns a new render_page for PARAMS and TABLE.
586 Initializes cp[H] in the new page by setting the width of each row 'i' to
587 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
588 ROW_MAX[i].width. Sets the width of rules to those in RULES.
590 W_MIN is the sum of ROWS_MIN[].width.
592 W_MAX is the sum of ROWS_MAX[].width.
594 The caller must still initialize cp[V]. */
595 static struct render_page *
596 create_page_with_interpolated_widths (const struct render_params *params,
598 const struct render_row *rows_min,
599 const struct render_row *rows_max,
600 int w_min, int w_max, const int *rules)
602 const int n = table->n[H];
603 const long long int avail = params->size[H] - w_min;
604 const long long int wanted = w_max - w_min;
608 struct render_page *page = render_page_allocate (params, table);
610 int *cph = page->cp[H];
612 long long int w = wanted / 2;
613 for (int x = 0; x < n; x++)
615 w += avail * (rows_max[x].width - rows_min[x].width);
616 int extra = w / wanted;
619 cph[1] = cph[0] + rules[x];
620 cph[2] = cph[1] + rows_min[x].width + extra;
623 cph[1] = cph[0] + rules[n];
625 assert (page->cp[H][n * 2 + 1] == params->size[H]);
630 set_join_crossings (struct render_page *page, enum table_axis axis,
631 const struct table_cell *cell, int *rules)
633 for (int z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
634 page->join_crossing[axis][z] = rules[z];
637 /* Maps a contiguous range of cells from a page to the underlying table along
638 the horizpntal or vertical dimension. */
641 int p0; /* First ordinate in the page. */
642 int t0; /* First ordinate in the table. */
643 int n; /* Number of ordinates in page and table. */
646 /* Initializes M to a mapping from PAGE to PAGE->table along axis A. The
647 mapping includes ordinate Z (in PAGE). */
649 get_map (const struct render_page *page, enum table_axis a, int z,
652 if (z < page->h[a][0])
656 m->n = page->h[a][0];
658 else if (z < page->n[a] - page->h[a][1])
660 m->p0 = page->h[a][0];
661 m->t0 = page->r[a][0];
662 m->n = page->r[a][1] - page->r[a][0];
666 m->p0 = page->n[a] - page->h[a][1];
667 m->t0 = page->table->n[a] - page->table->h[a][1];
668 m->n = page->h[a][1];
672 /* Initializes CELL with the contents of the table cell at column X and row Y
673 within PAGE. When CELL is no longer needed, the caller is responsible for
674 freeing it by calling table_cell_free(CELL).
676 The caller must ensure that CELL is destroyed before TABLE is unref'ed.
678 This is equivalent to table_get_cell(), except X and Y are in terms of the
679 page's rows and columns rather than the underlying table's. */
681 render_get_cell (const struct render_page *page, int x, int y,
682 struct table_cell *cell)
684 int d[TABLE_N_AXES] = { [H] = x, [V] = y };
685 struct map map[TABLE_N_AXES];
687 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
689 struct map *m = &map[a];
690 get_map (page, a, d[a], m);
691 d[a] += m->t0 - m->p0;
693 table_get_cell (page->table, d[H], d[V], cell);
695 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
697 struct map *m = &map[a];
699 for (int i = 0; i < 2; i++)
700 cell->d[a][i] -= m->t0 - m->p0;
701 cell->d[a][0] = MAX (cell->d[a][0], m->p0);
702 cell->d[a][1] = MIN (cell->d[a][1], m->p0 + m->n);
706 /* Creates and returns a new render_page for rendering TABLE on a device
709 The new render_page will be suitable for rendering on a device whose page
710 size is PARAMS->size, but the caller is responsible for actually breaking it
711 up to fit on such a device, using the render_break abstraction. */
712 static struct render_page *
713 render_page_create (const struct render_params *params, struct table *table,
718 int nc = table->n[H];
719 int nr = table->n[V];
721 /* Figure out rule widths. */
722 int *rules[TABLE_N_AXES];
723 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
725 int n = table->n[axis] + 1;
727 rules[axis] = xnmalloc (n, sizeof *rules);
728 for (int z = 0; z < n; z++)
729 rules[axis][z] = measure_rule (params, table, axis, z);
732 /* Calculate minimum and maximum widths of cells that do not
733 span multiple columns. */
734 struct render_row *columns[2];
735 for (int i = 0; i < 2; i++)
736 columns[i] = xzalloc (nc * sizeof *columns[i]);
737 for (int y = 0; y < nr; y++)
738 for (int x = 0; x < nc;)
740 struct table_cell cell;
742 table_get_cell (table, x, y, &cell);
743 if (y == cell.d[V][0])
745 if (table_cell_colspan (&cell) == 1)
748 params->ops->measure_cell_width (params->aux, &cell,
750 for (int i = 0; i < 2; i++)
751 if (columns[i][x].unspanned < w[i])
752 columns[i][x].unspanned = w[i];
758 /* Distribute widths of spanned columns. */
759 for (int i = 0; i < 2; i++)
760 for (int x = 0; x < nc; x++)
761 columns[i][x].width = columns[i][x].unspanned;
762 for (int y = 0; y < nr; y++)
763 for (int x = 0; x < nc;)
765 struct table_cell cell;
767 table_get_cell (table, x, y, &cell);
768 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
772 params->ops->measure_cell_width (params->aux, &cell,
774 for (int i = 0; i < 2; i++)
775 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
776 rules[H], table_cell_colspan (&cell));
781 for (int i = 0; i < 2; i++)
782 distribute_spanned_width (min_width, &columns[i][0], rules[H], nc);
784 /* In pathological cases, spans can cause the minimum width of a column to
785 exceed the maximum width. This bollixes our interpolation algorithm
786 later, so fix it up. */
787 for (int i = 0; i < nc; i++)
788 if (columns[MIN][i].width > columns[MAX][i].width)
789 columns[MAX][i].width = columns[MIN][i].width;
791 /* Decide final column widths. */
793 for (int i = 0; i < 2; i++)
794 table_widths[i] = calculate_table_width (table->n[H],
795 columns[i], rules[H]);
797 struct render_page *page;
798 if (table_widths[MAX] <= params->size[H])
800 /* Fits even with maximum widths. Use them. */
801 page = create_page_with_exact_widths (params, table, columns[MAX],
804 else if (table_widths[MIN] <= params->size[H])
806 /* Fits with minimum widths, so distribute the leftover space. */
807 page = create_page_with_interpolated_widths (
808 params, table, columns[MIN], columns[MAX],
809 table_widths[MIN], table_widths[MAX], rules[H]);
813 /* Doesn't fit even with minimum widths. Assign minimums for now, and
814 later we can break it horizontally into multiple pages. */
815 page = create_page_with_exact_widths (params, table, columns[MIN],
819 /* Calculate heights of cells that do not span multiple rows. */
820 struct render_row *rows = xzalloc (nr * sizeof *rows);
821 for (int y = 0; y < nr; y++)
822 for (int x = 0; x < nc;)
824 struct render_row *r = &rows[y];
825 struct table_cell cell;
827 render_get_cell (page, x, y, &cell);
828 if (y == cell.d[V][0])
830 if (table_cell_rowspan (&cell) == 1)
832 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
833 int h = params->ops->measure_cell_height (params->aux,
835 if (h > r->unspanned)
836 r->unspanned = r->width = h;
839 set_join_crossings (page, V, &cell, rules[V]);
841 if (table_cell_colspan (&cell) > 1)
842 set_join_crossings (page, H, &cell, rules[H]);
846 for (int i = 0; i < 2; i++)
849 /* Distribute heights of spanned rows. */
850 for (int y = 0; y < nr; y++)
851 for (int x = 0; x < nc;)
853 struct table_cell cell;
855 render_get_cell (page, x, y, &cell);
856 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
858 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
859 int h = params->ops->measure_cell_height (params->aux, &cell, w);
860 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
861 table_cell_rowspan (&cell));
866 /* Decide final row heights. */
867 accumulate_row_widths (page, V, rows, rules[V]);
870 /* Measure headers. If they are "too big", get rid of them. */
871 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
873 int hw = headers_width (page, axis);
874 if (hw * 2 >= page->params->size[axis]
875 || hw + max_cell_width (page, axis) > page->params->size[axis])
877 page->h[axis][0] = page->h[axis][1] = 0;
878 page->r[axis][0] = 0;
879 page->r[axis][1] = page->n[axis];
889 /* Increases PAGE's reference count. */
891 render_page_ref (const struct render_page *page_)
893 struct render_page *page = CONST_CAST (struct render_page *, page_);
898 /* Decreases PAGE's reference count and destroys PAGE if this causes the
899 reference count to fall to zero. */
901 render_page_unref (struct render_page *page)
903 if (page != NULL && --page->ref_cnt == 0)
905 struct render_overflow *overflow, *next;
906 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
909 hmap_destroy (&page->overflows);
911 table_unref (page->table);
913 for (int i = 0; i < TABLE_N_AXES; ++i)
915 free (page->join_crossing[i]);
923 /* Returns the size of PAGE along AXIS. (This might be larger than the page
924 size specified in the parameters passed to render_page_create(). Use a
925 render_break to break up a render_page into page-sized chunks.) */
927 render_page_get_size (const struct render_page *page, enum table_axis axis)
929 return page->cp[axis][page->n[axis] * 2 + 1];
933 render_page_get_best_breakpoint (const struct render_page *page, int height)
935 /* If there's no room for at least the top row and the rules above and below
936 it, don't include any of the table. */
937 if (page->cp[V][3] > height)
940 /* Otherwise include as many rows and rules as we can. */
941 for (int y = 5; y <= 2 * page->n[V] + 1; y += 2)
942 if (page->cp[V][y] > height)
943 return page->cp[V][y - 2];
947 /* Drawing render_pages. */
949 /* This is like table_get_rule() except:
951 - D is in terms of the page's rows and column rather than the underlying
954 - The result is in the form of a render_line_style. */
955 static enum render_line_style
956 get_rule (const struct render_page *page, enum table_axis axis,
957 const int d_[TABLE_N_AXES], struct cell_color *color)
959 int d[TABLE_N_AXES] = { d_[0] / 2, d_[1] / 2 };
962 enum table_axis a = axis;
963 if (d[a] < page->h[a][0])
965 else if (d[a] <= page->n[a] - page->h[a][1])
967 if (page->h[a][0] && d[a] == page->h[a][0])
969 else if (page->h[a][1] && d[a] == page->n[a] - page->h[a][1])
970 d2 = page->table->n[a] - page->h[a][1];
971 d[a] += page->r[a][0] - page->h[a][0];
974 d[a] += ((page->table->n[a] - page->table->h[a][1])
975 - (page->n[a] - page->h[a][1]));
977 enum table_axis b = !axis;
979 get_map (page, b, d[b], &m);
982 int r = table_get_rule (page->table, axis, d[H], d[V], color);
986 int r2 = table_get_rule (page->table, axis, d[H], d[V], color);
987 r = table_stroke_combine (r, r2);
989 return rule_to_render_type (r);
999 render_direction_rtl (void)
1001 /* TRANSLATORS: Do not translate this string. If the script of your language
1002 reads from right to left (eg Persian, Arabic, Hebrew etc), then replace
1003 this string with "output-direction-rtl". Otherwise either leave it
1004 untranslated or copy it verbatim. */
1005 const char *dir = _("output-direction-ltr");
1006 if (0 == strcmp ("output-direction-rtl", dir))
1009 if (0 != strcmp ("output-direction-ltr", dir))
1010 fprintf (stderr, "This localisation has been incorrectly translated. "
1011 "Complain to the translator.\n");
1017 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
1018 const int d[TABLE_N_AXES])
1020 enum render_line_style styles[TABLE_N_AXES][2];
1021 struct cell_color colors[TABLE_N_AXES][2];
1023 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
1025 enum table_axis b = !a;
1027 styles[a][0] = styles[a][1] = RENDER_LINE_NONE;
1030 || (page->is_edge_cutoff[a][0] && d[a] == 0)
1031 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
1038 int e[TABLE_N_AXES];
1042 styles[a][0] = get_rule (page, a, e, &colors[a][0]);
1045 if (d[b] / 2 < page->n[b])
1046 styles[a][1] = get_rule (page, a, d, &colors[a][1]);
1050 styles[a][0] = styles[a][1] = get_rule (page, a, d, &colors[a][0]);
1051 colors[a][1] = colors[a][0];
1055 if (styles[H][0] != RENDER_LINE_NONE || styles[H][1] != RENDER_LINE_NONE
1056 || styles[V][0] != RENDER_LINE_NONE || styles[V][1] != RENDER_LINE_NONE)
1058 int bb[TABLE_N_AXES][2];
1060 bb[H][0] = ofs[H] + page->cp[H][d[H]];
1061 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
1062 if (page->params->rtl)
1064 int temp = bb[H][0];
1065 bb[H][0] = render_page_get_size (page, H) - bb[H][1];
1066 bb[H][1] = render_page_get_size (page, H) - temp;
1068 bb[V][0] = ofs[V] + page->cp[V][d[V]];
1069 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
1070 page->params->ops->draw_line (page->params->aux, bb, styles, colors);
1075 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
1076 const struct table_cell *cell)
1078 int bb[TABLE_N_AXES][2];
1079 int clip[TABLE_N_AXES][2];
1081 bb[H][0] = clip[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
1082 bb[H][1] = clip[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
1083 if (page->params->rtl)
1085 int temp = bb[H][0];
1086 bb[H][0] = clip[H][0] = render_page_get_size (page, H) - bb[H][1];
1087 bb[H][1] = clip[H][1] = render_page_get_size (page, H) - temp;
1089 bb[V][0] = clip[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
1090 bb[V][1] = clip[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
1092 enum table_valign valign = cell->cell_style->valign;
1093 int valign_offset = 0;
1094 if (valign != TABLE_VALIGN_TOP)
1096 int height = page->params->ops->measure_cell_height (
1097 page->params->aux, cell, bb[H][1] - bb[H][0]);
1098 int extra = bb[V][1] - bb[V][0] - height;
1101 if (valign == TABLE_VALIGN_CENTER)
1103 valign_offset += extra;
1107 const struct render_overflow *of = find_overflow (
1108 page, cell->d[H][0], cell->d[V][0]);
1110 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1112 if (of->overflow[axis][0])
1114 bb[axis][0] -= of->overflow[axis][0];
1115 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
1116 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
1118 if (of->overflow[axis][1])
1120 bb[axis][1] += of->overflow[axis][1];
1121 if (cell->d[axis][1] == page->n[axis]
1122 && !page->is_edge_cutoff[axis][1])
1123 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2
1128 int spill[TABLE_N_AXES][2];
1129 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1131 spill[axis][0] = rule_width (page, axis, cell->d[axis][0]) / 2;
1132 spill[axis][1] = rule_width (page, axis, cell->d[axis][1]) / 2;
1135 int color_idx = (cell->d[V][0] < page->h[V][0]
1136 || page->n[V] - (cell->d[V][0] + 1) < page->h[V][1]
1138 : (cell->d[V][0] - page->h[V][0]) & 1);
1139 page->params->ops->draw_cell (page->params->aux, cell, color_idx,
1140 bb, valign_offset, spill, clip);
1143 /* Draws the cells of PAGE indicated in BB. */
1145 render_page_draw_cells (const struct render_page *page,
1146 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
1148 for (int y = bb[V][0]; y < bb[V][1]; y++)
1149 for (int x = bb[H][0]; x < bb[H][1];)
1150 if (!is_rule (x) && !is_rule (y))
1152 struct table_cell cell;
1154 render_get_cell (page, x / 2, y / 2, &cell);
1155 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
1156 render_cell (page, ofs, &cell);
1157 x = rule_ofs (cell.d[H][1]);
1162 for (int y = bb[V][0]; y < bb[V][1]; y++)
1163 for (int x = bb[H][0]; x < bb[H][1]; x++)
1164 if (is_rule (x) || is_rule (y))
1166 int d[TABLE_N_AXES];
1169 render_rule (page, ofs, d);
1173 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
1174 render_params provided to render_page_create(). */
1176 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
1178 int bb[TABLE_N_AXES][2];
1181 bb[H][1] = page->n[H] * 2 + 1;
1183 bb[V][1] = page->n[V] * 2 + 1;
1185 render_page_draw_cells (page, ofs, bb);
1188 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1190 get_clip_min_extent (int x0, const int cp[], int n)
1197 int middle = low + (high - low) / 2;
1199 if (cp[middle] <= x0)
1211 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1213 get_clip_max_extent (int x1, const int cp[], int n)
1220 int middle = low + (high - low) / 2;
1222 if (cp[middle] >= x1)
1223 best = high = middle;
1228 while (best > 0 && cp[best - 1] == cp[best])
1234 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1235 'draw_line' and 'draw_cell' functions from the render_params provided to
1236 render_page_create(). */
1238 render_page_draw_region (const struct render_page *page,
1239 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1241 int bb[TABLE_N_AXES][2];
1243 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1244 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1245 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1246 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1248 render_page_draw_cells (page, ofs, bb);
1251 /* Breaking up tables to fit on a page. */
1253 /* An iterator for breaking render_pages into smaller chunks. */
1256 struct render_page *page; /* Page being broken up. */
1257 enum table_axis axis; /* Axis along which 'page' is being broken. */
1258 int z; /* Next cell along 'axis'. */
1259 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1260 int hw; /* Width of headers of 'page' along 'axis'. */
1263 static int needed_size (const struct render_break *, int cell);
1264 static bool cell_is_breakable (const struct render_break *, int cell);
1265 static struct render_page *render_page_select (const struct render_page *,
1270 /* Initializes render_break B for breaking PAGE along AXIS.
1271 Takes ownership of PAGE. */
1273 render_break_init (struct render_break *b, struct render_page *page,
1274 enum table_axis axis)
1278 b->z = page->h[axis][0];
1280 b->hw = headers_width (page, axis);
1283 /* Initializes B as a render_break structure for which
1284 render_break_has_next() always returns false. */
1286 render_break_init_empty (struct render_break *b)
1289 b->axis = TABLE_HORZ;
1295 /* Frees B and unrefs the render_page that it owns. */
1297 render_break_destroy (struct render_break *b)
1301 render_page_unref (b->page);
1306 /* Returns true if B still has cells that are yet to be returned,
1307 false if all of B's page has been processed. */
1309 render_break_has_next (const struct render_break *b)
1311 const struct render_page *page = b->page;
1312 enum table_axis axis = b->axis;
1314 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1317 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1318 Returns a null pointer if B has already been completely broken up, or if
1319 SIZE is too small to reasonably render any cells. The latter will never
1320 happen if SIZE is at least as large as the page size passed to
1321 render_page_create() along B's axis. */
1322 static struct render_page *
1323 render_break_next (struct render_break *b, int size)
1325 const struct render_page *page = b->page;
1326 enum table_axis axis = b->axis;
1327 struct render_page *subpage;
1329 if (!render_break_has_next (b))
1334 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1336 int needed = needed_size (b, z + 1);
1339 if (cell_is_breakable (b, z))
1341 /* If there is no right header and we render a partial cell on
1342 the right side of the body, then we omit the rightmost rule of
1343 the body. Otherwise the rendering is deceptive because it
1344 looks like the whole cell is present instead of a partial
1347 This is similar to code for the left side in needed_size(). */
1348 int rule_allowance = (page->h[axis][1]
1350 : rule_width (page, axis, z));
1352 /* The amount that, if we added cell 'z', the rendering would
1353 overfill the allocated 'size'. */
1354 int overhang = needed - size - rule_allowance;
1356 /* The width of cell 'z'. */
1357 int cell_size = cell_width (page, axis, z);
1359 /* The amount trimmed off the left side of 'z',
1360 and the amount left to render. */
1361 int cell_ofs = z == b->z ? b->pixel : 0;
1362 int cell_left = cell_size - cell_ofs;
1364 /* A small but visible width. */
1365 int em = page->params->font_size[axis];
1367 /* If some of the cell remains to render,
1368 and there would still be some of the cell left afterward,
1369 then partially render that much of the cell. */
1370 pixel = (cell_left && cell_left > overhang
1371 ? cell_left - overhang + cell_ofs
1374 /* If there would be only a tiny amount of the cell left after
1375 rendering it partially, reduce the amount rendered slightly
1376 to make the output look a little better. */
1377 if (pixel + em > cell_size)
1378 pixel = MAX (pixel - em, 0);
1380 /* If we're breaking vertically, then consider whether the cells
1381 being broken have a better internal breakpoint than the exact
1382 number of pixels available, which might look bad e.g. because
1383 it breaks in the middle of a line of text. */
1384 if (axis == TABLE_VERT && page->params->ops->adjust_break)
1385 for (int x = 0; x < page->n[H];)
1387 struct table_cell cell;
1389 render_get_cell (page, x, z, &cell);
1390 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1391 int better_pixel = page->params->ops->adjust_break (
1392 page->params->aux, &cell, w, pixel);
1395 if (better_pixel < pixel)
1397 if (better_pixel > (z == b->z ? b->pixel : 0))
1399 pixel = better_pixel;
1402 else if (better_pixel == 0 && z != b->z)
1414 if (z == b->z && !pixel)
1417 subpage = render_page_select (page, axis, b->z, b->pixel,
1419 pixel ? cell_width (page, axis, z) - pixel
1426 /* Returns the width that would be required along B's axis to render a page
1427 from B's current position up to but not including CELL. */
1429 needed_size (const struct render_break *b, int cell)
1431 const struct render_page *page = b->page;
1432 enum table_axis axis = b->axis;
1434 /* Width of left header not including its rightmost rule. */
1435 int size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1437 /* If we have a pixel offset and there is no left header, then we omit the
1438 leftmost rule of the body. Otherwise the rendering is deceptive because
1439 it looks like the whole cell is present instead of a partial cell.
1441 Otherwise (if there are headers) we will be merging two rules: the
1442 rightmost rule in the header and the leftmost rule in the body. We assume
1443 that the width of a merged rule is the larger of the widths of either rule
1445 if (b->pixel == 0 || page->h[axis][0])
1446 size += MAX (rule_width (page, axis, page->h[axis][0]),
1447 rule_width (page, axis, b->z));
1449 /* Width of body, minus any pixel offset in the leftmost cell. */
1450 size += joined_width (page, axis, b->z, cell) - b->pixel;
1452 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1453 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1454 rule_width (page, axis, cell));
1456 /* Width of right header not including its leftmost rule. */
1457 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1458 rule_ofs_r (page, axis, 0));
1460 /* Join crossing. */
1461 if (page->h[axis][0] && page->h[axis][1])
1462 size += page->join_crossing[axis][b->z];
1467 /* Returns true if CELL along B's axis may be broken across a page boundary.
1469 This is just a heuristic. Breaking cells across page boundaries can save
1470 space, but it looks ugly. */
1472 cell_is_breakable (const struct render_break *b, int cell)
1474 const struct render_page *page = b->page;
1475 enum table_axis axis = b->axis;
1477 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1484 const struct render_params *params;
1487 /* An array of "render_page"s to be rendered, in order, vertically. There
1488 may be up to 5 pages, for the pivot table's title, layers, body,
1489 captions, and footnotes. */
1490 struct render_page *pages[5];
1494 struct render_break x_break;
1495 struct render_break y_break;
1499 render_pager_add_table (struct render_pager *p, struct table *table,
1503 p->pages[p->n_pages++] = render_page_create (p->params, table, min_width);
1507 render_pager_start_page (struct render_pager *p)
1509 render_break_init (&p->x_break, render_page_ref (p->pages[p->cur_page++]),
1511 render_break_init_empty (&p->y_break);
1514 /* Creates and returns a new render_pager for rendering TABLE_ITEM on the
1515 device with the given PARAMS. */
1516 struct render_pager *
1517 render_pager_create (const struct render_params *params,
1518 const struct table_item *table_item,
1519 const size_t *layer_indexes)
1521 const struct pivot_table *pt = table_item->pt;
1523 layer_indexes = pt->current_layer;
1525 struct table *title, *layers, *body, *caption, *footnotes;
1526 pivot_output (pt, layer_indexes, params->printing,
1527 &title, &layers, &body, &caption, &footnotes, NULL, NULL);
1529 /* Figure out the width of the body of the table. Use this to determine the
1531 struct render_page *body_page = render_page_create (params, body, 0);
1532 int body_width = table_width (body_page, H);
1534 if (body_width > params->size[H])
1537 && table_item->pt->look->shrink_to_fit[H]
1538 && params->ops->scale)
1539 scale = params->size[H] / (double) body_width;
1542 struct render_break b;
1543 render_break_init (&b, render_page_ref (body_page), H);
1544 struct render_page *subpage
1545 = render_break_next (&b, params->size[H]);
1546 body_width = subpage ? subpage->cp[H][2 * subpage->n[H] + 1] : 0;
1547 render_page_unref (subpage);
1548 render_break_destroy (&b);
1552 /* Create the pager. */
1553 struct render_pager *p = xmalloc (sizeof *p);
1554 *p = (struct render_pager) { .params = params, .scale = scale };
1555 render_pager_add_table (p, title, body_width);
1556 render_pager_add_table (p, layers, body_width);
1557 p->pages[p->n_pages++] = body_page;
1558 render_pager_add_table (p, caption, 0);
1559 render_pager_add_table (p, footnotes, 0);
1560 assert (p->n_pages <= sizeof p->pages / sizeof *p->pages);
1562 /* If we're shrinking tables to fit the page length, then adjust the scale
1565 XXX This will sometimes shrink more than needed, because adjusting the
1566 scale factor allows for cells to be "wider", which means that sometimes
1567 they won't break across as much vertical space, thus shrinking the table
1568 vertically more than the scale would imply. Shrinking only as much as
1569 necessary would require an iterative search. */
1571 && table_item->pt->look->shrink_to_fit[V]
1572 && params->ops->scale)
1574 int total_height = 0;
1575 for (size_t i = 0; i < p->n_pages; i++)
1576 total_height += table_width (p->pages[i], V);
1577 if (total_height * p->scale >= params->size[V])
1578 p->scale *= params->size[V] / (double) total_height;
1581 render_pager_start_page (p);
1588 render_pager_destroy (struct render_pager *p)
1592 render_break_destroy (&p->x_break);
1593 render_break_destroy (&p->y_break);
1594 for (size_t i = 0; i < p->n_pages; i++)
1595 render_page_unref (p->pages[i]);
1600 /* Returns true if P has content remaining to render, false if rendering is
1603 render_pager_has_next (const struct render_pager *p_)
1605 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1607 while (!render_break_has_next (&p->y_break))
1609 render_break_destroy (&p->y_break);
1610 if (!render_break_has_next (&p->x_break))
1612 render_break_destroy (&p->x_break);
1613 if (p->cur_page >= p->n_pages)
1615 render_break_init_empty (&p->x_break);
1616 render_break_init_empty (&p->y_break);
1619 render_pager_start_page (p);
1623 &p->y_break, render_break_next (&p->x_break,
1624 p->params->size[H] / p->scale), V);
1629 /* Draws a chunk of content from P to fit in a space that has vertical size
1630 SPACE and the horizontal size specified in the render_params passed to
1631 render_page_create(). Returns the amount of space actually used by the
1632 rendered chunk, which will be 0 if SPACE is too small to render anything or
1633 if no content remains (use render_pager_has_next() to distinguish these
1636 render_pager_draw_next (struct render_pager *p, int space)
1638 if (p->scale != 1.0)
1640 p->params->ops->scale (p->params->aux, p->scale);
1644 int ofs[TABLE_N_AXES] = { 0, 0 };
1645 size_t start_page = SIZE_MAX;
1647 while (render_pager_has_next (p))
1649 if (start_page == p->cur_page)
1651 start_page = p->cur_page;
1653 struct render_page *page
1654 = render_break_next (&p->y_break, space - ofs[V]);
1658 render_page_draw (page, ofs);
1659 ofs[V] += render_page_get_size (page, V);
1660 render_page_unref (page);
1663 if (p->scale != 1.0)
1669 /* Draws all of P's content. */
1671 render_pager_draw (const struct render_pager *p)
1673 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1676 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1677 Some extra content might be drawn; the device should perform clipping as
1680 render_pager_draw_region (const struct render_pager *p,
1681 int x, int y, int w, int h)
1683 int ofs[TABLE_N_AXES] = { 0, 0 };
1684 int clip[TABLE_N_AXES][2];
1688 for (size_t i = 0; i < p->n_pages; i++)
1690 const struct render_page *page = p->pages[i];
1691 int size = render_page_get_size (page, V);
1693 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1694 clip[V][1] = MIN (y + h, ofs[V] + size) - ofs[V];
1695 if (clip[V][1] > clip[V][0])
1696 render_page_draw_region (page, ofs, clip);
1702 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1703 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1705 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1709 for (size_t i = 0; i < p->n_pages; i++)
1711 int subsize = render_page_get_size (p->pages[i], axis);
1712 size = axis == H ? MAX (size, subsize) : size + subsize;
1719 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1724 for (i = 0; i < p->n_pages; i++)
1726 int size = render_page_get_size (p->pages[i], V);
1727 if (y + size >= height)
1728 return render_page_get_best_breakpoint (p->pages[i], height - y) + y;
1735 /* render_page_select() and helpers. */
1737 struct render_page_selection
1739 const struct render_page *page; /* Page whose slice we are selecting. */
1740 struct render_page *subpage; /* New page under construction. */
1741 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1742 enum table_axis b; /* The opposite of 'a'. */
1743 int z0; /* First cell along 'a' being selected. */
1744 int z1; /* Last cell being selected, plus 1. */
1745 int p0; /* Number of pixels to trim off left side of z0. */
1746 int p1; /* Number of pixels to trim off right side of z1-1. */
1749 static void cell_to_subpage (struct render_page_selection *,
1750 const struct table_cell *,
1751 int subcell[TABLE_N_AXES]);
1752 static const struct render_overflow *find_overflow_for_cell (
1753 struct render_page_selection *, const struct table_cell *);
1754 static struct render_overflow *insert_overflow (struct render_page_selection *,
1755 const struct table_cell *);
1757 /* Creates and returns a new render_page whose contents are a subregion of
1758 PAGE's contents. The new render_page includes cells Z0 through Z1
1759 (exclusive) along AXIS, plus any headers on AXIS.
1761 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1762 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1763 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1764 render cells that are too large to fit on a single page.)
1766 The whole of axis !AXIS is included. (The caller may follow up with another
1767 call to render_page_select() to select on !AXIS to select on that axis as
1770 The caller retains ownership of PAGE, which is not modified. */
1771 static struct render_page *
1772 render_page_select (const struct render_page *page, enum table_axis axis,
1773 int z0, int p0, int z1, int p1)
1775 enum table_axis a = axis;
1776 enum table_axis b = !a;
1778 /* Optimize case where all of PAGE is selected by just incrementing the
1780 if (z0 == page->h[a][0] && p0 == 0
1781 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1783 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1788 /* Allocate subpage. */
1789 int trim[2] = { z0 - page->h[a][0], (page->n[a] - page->h[a][1]) - z1 };
1790 int n[TABLE_N_AXES] = { [H] = page->n[H], [V] = page->n[V] };
1791 n[a] -= trim[0] + trim[1];
1792 struct render_page *subpage = render_page_allocate__ (
1793 page->params, table_ref (page->table), n);
1794 for (enum table_axis k = 0; k < TABLE_N_AXES; k++)
1796 subpage->h[k][0] = page->h[k][0];
1797 subpage->h[k][1] = page->h[k][1];
1798 subpage->r[k][0] = page->r[k][0];
1799 subpage->r[k][1] = page->r[k][1];
1801 subpage->r[a][0] += trim[0];
1802 subpage->r[a][1] -= trim[1];
1804 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1805 off that side of the page and there are no headers. */
1806 subpage->is_edge_cutoff[a][0] =
1807 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1808 subpage->is_edge_cutoff[a][1] =
1809 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1810 && page->is_edge_cutoff[a][1]));
1811 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1812 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1814 /* Select join crossings from PAGE into subpage. */
1815 int *jc = subpage->join_crossing[a];
1816 for (int z = 0; z < page->h[a][0]; z++)
1817 *jc++ = page->join_crossing[a][z];
1818 for (int z = z0; z <= z1; z++)
1819 *jc++ = page->join_crossing[a][z];
1820 for (int z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1821 *jc++ = page->join_crossing[a][z];
1822 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1824 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1825 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1827 /* Select widths from PAGE into subpage. */
1828 int *scp = page->cp[a];
1829 int *dcp = subpage->cp[a];
1831 for (int z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1833 int w = !z && subpage->is_edge_cutoff[a][0] ? 0 : scp[z + 1] - scp[z];
1834 dcp[1] = dcp[0] + w;
1836 for (int z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1838 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1839 if (z == cell_ofs (z0))
1842 if (page->h[a][0] && page->h[a][1])
1843 dcp[1] += page->join_crossing[a][z / 2];
1845 if (z == cell_ofs (z1 - 1))
1848 for (int z = rule_ofs_r (page, a, subpage->h[a][1]);
1849 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1851 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1854 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1856 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1858 for (int z = 0; z < page->n[b] * 2 + 2; z++)
1859 subpage->cp[b][z] = page->cp[b][z];
1861 /* Add new overflows. */
1862 struct render_page_selection s = {
1873 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1874 for (int z = 0; z < page->n[b];)
1876 int d[TABLE_N_AXES];
1880 struct table_cell cell;
1881 render_get_cell (page, d[H], d[V], &cell);
1882 bool overflow0 = p0 || cell.d[a][0] < z0;
1883 bool overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1884 if (overflow0 || overflow1)
1886 struct render_overflow *ro = insert_overflow (&s, &cell);
1890 ro->overflow[a][0] += p0 + axis_width (
1891 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1892 if (page->h[a][0] && page->h[a][1])
1893 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1899 ro->overflow[a][1] += p1 + axis_width (
1900 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1901 if (page->h[a][0] && page->h[a][1])
1902 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1908 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1909 for (int z = 0; z < page->n[b];)
1911 int d[TABLE_N_AXES];
1915 struct table_cell cell;
1916 render_get_cell (page, d[H], d[V], &cell);
1917 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1918 && find_overflow_for_cell (&s, &cell) == NULL)
1920 struct render_overflow *ro = insert_overflow (&s, &cell);
1921 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1922 cell_ofs (cell.d[a][1]));
1927 /* Copy overflows from PAGE into subpage. */
1928 struct render_overflow *ro;
1929 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1931 struct table_cell cell;
1933 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1934 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1935 && find_overflow_for_cell (&s, &cell) == NULL)
1936 insert_overflow (&s, &cell);
1942 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1943 coordinates of the top-left cell as it will appear in S->subpage.
1945 CELL must actually intersect the region of S->page that is being selected
1946 by render_page_select() or the results will not make any sense. */
1948 cell_to_subpage (struct render_page_selection *s,
1949 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1951 enum table_axis a = s->a;
1952 enum table_axis b = s->b;
1953 int ha0 = s->subpage->h[a][0];
1955 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1956 subcell[b] = cell->d[b][0];
1959 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1960 that cell in S->subpage, if there is one, and a null pointer otherwise.
1962 CELL must actually intersect the region of S->page that is being selected
1963 by render_page_select() or the results will not make any sense. */
1964 static const struct render_overflow *
1965 find_overflow_for_cell (struct render_page_selection *s,
1966 const struct table_cell *cell)
1970 cell_to_subpage (s, cell, subcell);
1971 return find_overflow (s->subpage, subcell[H], subcell[V]);
1974 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1975 cell in S->subpage (which must not already exist). Initializes the new
1976 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1979 CELL must actually intersect the region of S->page that is being selected
1980 by render_page_select() or the results will not make any sense. */
1981 static struct render_overflow *
1982 insert_overflow (struct render_page_selection *s,
1983 const struct table_cell *cell)
1985 struct render_overflow *of = xzalloc (sizeof *of);
1986 cell_to_subpage (s, cell, of->d);
1987 hmap_insert (&s->subpage->overflows, &of->node,
1988 hash_cell (of->d[H], of->d[V]));
1990 const struct render_overflow *old
1991 = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1993 memcpy (of->overflow, old->overflow, sizeof of->overflow);