1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "libpspp/pool.h"
29 #include "output/pivot-output.h"
30 #include "output/pivot-table.h"
31 #include "output/render.h"
32 #include "output/table.h"
34 #include "gl/minmax.h"
35 #include "gl/xalloc.h"
38 #define _(msgid) gettext (msgid)
40 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
44 /* A layout for rendering a specific table on a specific device.
46 May represent the layout of an entire table presented to
47 render_page_create(), or a rectangular subregion of a table broken out using
48 render_break_next() to allow a table to be broken across multiple pages.
50 A page's size is not limited to the size passed in as part of render_params.
51 render_pager breaks a render_page into smaller render_pages that will fit in
52 the available space. */
55 const struct render_params *params; /* Parameters of the target device. */
56 struct table *table; /* Table rendered. */
59 /* Region of 'table' to render.
61 The horizontal cells rendered are the leftmost h[H], then
64 The vertical cells rendered are the topmost h[V], then r[V][0]
67 n[H] = h[H] + (r[H][1] - r[H][0])
68 n[V] = h[V] + (r[V][1] - r[V][0])
71 int r[TABLE_N_AXES][2];
76 cp[H] represents x positions within the table.
78 cp[H][1] = the width of the leftmost vertical rule.
79 cp[H][2] = cp[H][1] + the width of the leftmost column.
80 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
82 cp[H][2 * n[H]] = x position of the rightmost vertical rule.
83 cp[H][2 * n[H] + 1] = total table width including all rules.
85 Similarly, cp[V] represents y positions within the table.
87 cp[V][1] = the height of the topmost horizontal rule.
88 cp[V][2] = cp[V][1] + the height of the topmost row.
89 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
91 cp[V][2 * n[V]] = y position of the bottommost horizontal rule.
92 cp[V][2 * n[V] + 1] = total table height including all rules.
94 Rules and columns can have width or height 0, in which case consecutive
95 values in this array are equal. */
96 int *cp[TABLE_N_AXES];
98 /* render_break_next() can break a table such that some cells are not fully
99 contained within a render_page. This will happen if a cell is too wide
100 or two tall to fit on a single page, or if a cell spans multiple rows or
101 columns and the page only includes some of those rows or columns.
103 This hash table contains "struct render_overflow"s that represents each
104 such cell that doesn't completely fit on this page.
106 Each overflow cell borders at least one header edge of the table and may
107 border more. (A single table cell that is so large that it fills the
108 entire page can overflow on all four sides!) */
109 struct hmap overflows;
111 /* If a single column (or row) is too wide (or tall) to fit on a page
112 reasonably, then render_break_next() will split a single row or column
113 across multiple render_pages. This member indicates when this has
116 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
117 of the leftmost column in this page, and false otherwise.
119 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
120 of the rightmost column in this page, and false otherwise.
122 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
123 and bottom of the table.
125 The effect of is_edge_cutoff is to prevent rules along the edge in
126 question from being rendered.
128 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
129 contain a node for each cell along that edge. */
130 bool is_edge_cutoff[TABLE_N_AXES][2];
132 /* Minimum and maximum widths of columns based on headings.
134 For this purpose, a table has the following three regions:
136 +------------------+-------------------------------------------------+
137 | | column headings |
138 | +-------------------------------------------------+
141 | row headings | data |
144 +------------------+-------------------------------------------------+
146 - width_ranges[TABLE_HORZ] controls the minimum and maximum width that
147 columns in the column headings will be based on the column headings
148 themselves. That is, these columns will have width at least
149 width_ranges[TABLE_HORZ][0] wide, and no more than
150 width_ranges[TABLE_HORZ][1] unless the data requires it.
152 - width_ranges[TABLE_VERT] controls the minimum and maximum width that
153 columns in the corner and row headings will be based on the corner and
154 row headings themselves. That is, these columns will have width at
155 least width_ranges[TABLE_VERT][0] wide, and no more than
156 width_ranges[TABLE_VERT][1]. (The corner and row headings don't have
157 data in their columns so data can't affect their widths.)
159 int width_ranges[TABLE_N_AXES][2];
162 static struct render_page *render_page_create (const struct render_params *,
164 const struct pivot_table_look *);
166 struct render_page *render_page_ref (const struct render_page *page_);
167 static void render_page_unref (struct render_page *);
169 /* Returns the offset in struct render_page's cp[axis] array of the rule with
170 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
171 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
172 next rule to the right (or below); and so on. */
174 rule_ofs (int rule_idx)
179 /* Returns the offset in struct render_page's cp[axis] array of the rule with
180 index RULE_IDX_R, which counts from the right side (or bottom) of the page
181 left (or up), according to whether AXIS is H or V, respectively. That is,
182 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
183 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
184 (or above); and so on. */
186 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
188 return (page->n[axis] - rule_idx_r) * 2;
191 /* Returns the offset in struct render_page's cp[axis] array of the cell with
192 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
193 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
194 next cell to the right (or below); and so on. */
196 cell_ofs (int cell_idx)
198 return cell_idx * 2 + 1;
201 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
203 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
205 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
208 /* Returns the total width of PAGE along AXIS. */
210 table_width (const struct render_page *page, int axis)
212 return page->cp[axis][2 * page->n[axis] + 1];
215 /* Returns the width of the headers in PAGE along AXIS. */
217 headers_width (const struct render_page *page, int axis)
219 return axis_width (page, axis, rule_ofs (0), cell_ofs (page->h[axis]));
222 /* Returns the width of cell X along AXIS in PAGE. */
224 cell_width (const struct render_page *page, int axis, int x)
226 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
229 /* Returns the width of rule X along AXIS in PAGE. */
231 rule_width (const struct render_page *page, int axis, int x)
233 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
236 /* Returns the width of rule X along AXIS in PAGE. */
238 rule_width_r (const struct render_page *page, int axis, int x)
240 int ofs = rule_ofs_r (page, axis, x);
241 return axis_width (page, axis, ofs, ofs + 1);
244 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
246 joined_width (const struct render_page *page, int axis, int x0, int x1)
248 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
251 /* Returns the width of the widest cell, excluding headers, along AXIS in
254 max_cell_width (const struct render_page *page, int axis)
256 int x0 = page->h[axis];
257 int x1 = page->n[axis];
260 for (int x = x0; x < x1; x++)
262 int w = cell_width (page, axis, x);
269 /* A cell that doesn't completely fit on the render_page. */
270 struct render_overflow
272 struct hmap_node node; /* In render_page's 'overflows' hmap. */
274 /* Occupied region of page.
276 d[H][0] is the leftmost column.
277 d[H][1] is the rightmost column, plus 1.
278 d[V][0] is the top row.
279 d[V][1] is the bottom row, plus 1.
281 The cell in its original table might occupy a larger region. This
282 member reflects the size of the cell in the current render_page, after
283 trimming off any rows or columns due to page-breaking. */
286 /* The space that has been trimmed off the cell:
288 overflow[H][0]: space trimmed off its left side.
289 overflow[H][1]: space trimmed off its right side.
290 overflow[V][0]: space trimmed off its top.
291 overflow[V][1]: space trimmed off its bottom.
293 During rendering, this information is used to position the rendered
294 portion of the cell within the available space.
296 When a cell is rendered, sometimes it is permitted to spill over into
297 space that is ordinarily reserved for rules. Either way, this space is
298 still included in overflow values.
300 Suppose, for example, that a cell that joins 2 columns has a width of 60
301 pixels and content "abcdef", that the 2 columns that it joins have
302 widths of 20 and 30 pixels, respectively, and that therefore the rule
303 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
304 It might render like this, if each character is 10x10, and showing a few
305 extra table cells for context:
313 If this render_page is broken at the rule that separates "gh" from
314 "ijk", then the page that contains the left side of the "abcdef" cell
315 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
316 and the page that contains the right side of the cell will have
317 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
335 int overflow[TABLE_N_AXES][2];
338 /* Returns a hash value for (,Y). */
340 hash_cell (int x, int y)
342 return hash_int (x + (y << 16), 0);
345 /* Searches PAGE's set of render_overflow for one whose top-left cell is
346 (X,Y). Returns it, if there is one, otherwise a null pointer. */
347 static const struct render_overflow *
348 find_overflow (const struct render_page *page, int x, int y)
350 if (!hmap_is_empty (&page->overflows))
352 const struct render_overflow *of;
354 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
355 hash_cell (x, y), &page->overflows)
356 if (x == of->d[H] && y == of->d[V])
363 /* Row or column dimensions. Used to figure the size of a table in
364 render_page_create() and discarded after that. */
367 /* Width without considering rows (or columns) that span more than one (or
371 /* Width taking spanned rows (or columns) into consideration. */
375 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
376 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
379 distribute_spanned_width (int width,
380 struct render_row *rows, const int *rules, int n)
382 /* Sum up the unspanned widths of the N rows for use as weights. */
383 int total_unspanned = 0;
384 for (int x = 0; x < n; x++)
385 total_unspanned += rows[x].unspanned;
386 for (int x = 0; x < n - 1; x++)
387 total_unspanned += rules[x + 1];
388 if (total_unspanned >= width)
391 /* The algorithm used here is based on the following description from HTML 4:
393 For cells that span multiple columns, a simple approach consists of
394 apportioning the min/max widths evenly to each of the constituent
395 columns. A slightly more complex approach is to use the min/max
396 widths of unspanned cells to weight how spanned widths are
397 apportioned. Experiments suggest that a blend of the two approaches
398 gives good results for a wide range of tables.
400 We blend the two approaches half-and-half, except that we cannot use the
401 unspanned weights when 'total_unspanned' is 0 (because that would cause a
404 The calculation we want to do is this:
407 w1 = width * (column's unspanned width) / (total unspanned width)
408 (column's width) = (w0 + w1) / 2
410 We implement it as a precise calculation in integers by multiplying w0 and
411 w1 by the common denominator of all three calculations (d), dividing that
412 out in the column width calculation, and then keeping the remainder for
415 (We actually compute the unspanned width of a column as twice the
416 unspanned width, plus the width of the rule on the left, plus the width of
417 the rule on the right. That way each rule contributes to both the cell on
418 its left and on its right.)
420 long long int d0 = n;
421 long long int d1 = 2LL * MAX (total_unspanned, 1);
422 long long int d = d0 * d1;
423 if (total_unspanned > 0)
425 long long int w = d / 2;
426 for (int x = 0; x < n; x++)
429 if (total_unspanned > 0)
431 long long int unspanned = rows[x].unspanned * 2LL;
433 unspanned += rules[x + 1];
435 unspanned += rules[x];
436 w += width * unspanned * d0;
439 rows[x].width = MAX (rows[x].width, w / d);
440 w -= rows[x].width * d;
444 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
447 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
448 const struct render_row *rows, const int *rules)
450 int n = page->n[axis];
451 int *cp = page->cp[axis];
453 for (int z = 0; z < n; z++)
455 cp[1] = cp[0] + rules[z];
456 cp[2] = cp[1] + rows[z].width;
459 cp[1] = cp[0] + rules[n];
462 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
464 calculate_table_width (int n, const struct render_row *rows, int *rules)
467 for (int x = 0; x < n; x++)
468 width += rows[x].width;
469 for (int x = 0; x <= n; x++)
475 /* Rendering utility functions. */
477 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
478 rendered with PARAMS. */
480 measure_rule (const struct render_params *params, const struct table *table,
481 enum table_axis a, int z)
483 enum table_axis b = !a;
485 /* Determine all types of rules that are present, as a bitmap in 'rules'
486 where rule type 't' is present if bit 2**t is set. */
487 unsigned int rules = 0;
490 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
491 rules |= 1u << table_get_rule (table, a, d[H], d[V]).stroke;
493 /* Turn off TABLE_STROKE_NONE because it has width 0 and we needn't bother.
494 However, if the device doesn't support margins, make sure that there is at
495 least a small gap between cells (but we don't need any at the left or
496 right edge of the table). */
497 if (rules & (1u << TABLE_STROKE_NONE))
499 rules &= ~(1u << TABLE_STROKE_NONE);
500 if (z > 0 && z < table->n[a] && !params->supports_margins && a == H)
501 rules |= 1u << TABLE_STROKE_SOLID;
504 /* Calculate maximum width of the rules that are present. */
506 for (size_t i = 0; i < TABLE_N_STROKES; i++)
507 if (rules & (1u << i))
508 width = MAX (width, params->line_widths[i]);
512 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
513 space for rendering a table with dimensions given in N, headers in H, and
514 content in R. The caller must initialize most of the members itself. */
515 static struct render_page *
516 render_page_allocate__ (const struct render_params *params,
518 const int n[TABLE_N_AXES],
519 const int h[TABLE_N_AXES],
520 const int r[TABLE_N_AXES][2])
522 struct render_page *page = xmalloc (sizeof *page);
523 *page = (struct render_page) {
527 .n = { [H] = n[H], [V] = n[V] },
528 .h = { [H] = h[H], [V] = h[V] },
529 .r = { [H] = { r[H][0], r[H][1] }, [V] = { r[V][0], r[V][1] } },
530 .cp = { [H] = xcalloc (2 * n[H] + 2, sizeof *page->cp[H]),
531 [V] = xcalloc (2 * n[V] + 2, sizeof *page->cp[V]) },
532 .overflows = HMAP_INITIALIZER (page->overflows),
537 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
538 space for all of the members of the new page, but the caller must initialize
539 the 'cp' member itself. */
540 static struct render_page *
541 render_page_allocate (const struct render_params *params, struct table *table)
544 int r[TABLE_N_AXES][2];
545 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
548 r[a][0] = table->h[a];
549 r[a][1] = table->n[a];
551 return render_page_allocate__ (params, table, table->n, h, r);
554 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
555 cp[H] in the new page from ROWS and RULES. The caller must still initialize
557 static struct render_page *
558 create_page_with_exact_widths (const struct render_params *params,
560 const struct render_row *rows, int *rules)
562 struct render_page *page = render_page_allocate (params, table);
563 accumulate_row_widths (page, H, rows, rules);
567 /* Allocates and returns a new render_page for PARAMS and TABLE.
569 Initializes cp[H] in the new page by setting the width of each row 'i' to
570 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
571 ROW_MAX[i].width. Sets the width of rules to those in RULES.
573 W_MIN is the sum of ROWS_MIN[].width.
575 W_MAX is the sum of ROWS_MAX[].width.
577 The caller must still initialize cp[V]. */
578 static struct render_page *
579 create_page_with_interpolated_widths (const struct render_params *params,
581 const struct render_row *rows_min,
582 const struct render_row *rows_max,
583 int w_min, int w_max, const int *rules)
585 const int n = table->n[H];
586 const long long int avail = params->size[H] - w_min;
587 const long long int wanted = w_max - w_min;
591 struct render_page *page = render_page_allocate (params, table);
593 int *cph = page->cp[H];
595 long long int w = wanted / 2;
596 for (int x = 0; x < n; x++)
598 w += avail * (rows_max[x].width - rows_min[x].width);
599 int extra = w / wanted;
602 cph[1] = cph[0] + rules[x];
603 cph[2] = cph[1] + rows_min[x].width + extra;
606 cph[1] = cph[0] + rules[n];
608 assert (page->cp[H][n * 2 + 1] == params->size[H]);
612 /* Maps a contiguous range of cells from a page to the underlying table along
613 the horizontal or vertical dimension. */
616 int p0; /* First ordinate in the page. */
617 int t0; /* First ordinate in the table. */
618 int n; /* Number of ordinates in page and table. */
621 /* Initializes M to a mapping from PAGE to PAGE->table along axis A. The
622 mapping includes ordinate Z (in PAGE). */
624 get_map (const struct render_page *page, enum table_axis a, int z,
635 assert (z < page->n[a]);
637 m->t0 = page->r[a][0];
638 m->n = page->r[a][1] - page->r[a][0];
642 /* Initializes CELL with the contents of the table cell at column X and row Y
643 within PAGE. When CELL is no longer needed, the caller is responsible for
644 freeing it by calling table_cell_free(CELL).
646 The caller must ensure that CELL is destroyed before TABLE is unref'ed.
648 This is equivalent to table_get_cell(), except X and Y are in terms of the
649 page's rows and columns rather than the underlying table's. */
651 render_get_cell (const struct render_page *page, int x, int y,
652 struct table_cell *cell)
654 int d[TABLE_N_AXES] = { [H] = x, [V] = y };
655 struct map map[TABLE_N_AXES];
657 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
659 struct map *m = &map[a];
660 get_map (page, a, d[a], m);
661 d[a] += m->t0 - m->p0;
663 table_get_cell (page->table, d[H], d[V], cell);
665 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
667 struct map *m = &map[a];
669 for (int i = 0; i < 2; i++)
670 cell->d[a][i] -= m->t0 - m->p0;
671 cell->d[a][0] = MAX (cell->d[a][0], m->p0);
672 cell->d[a][1] = MIN (cell->d[a][1], m->p0 + m->n);
675 if (cell->options & TABLE_CELL_FULL_WIDTH)
678 cell->d[H][1] = page->n[H];
682 /* Creates and returns a new render_page for rendering TABLE with the given
683 LOOK on a device described by PARAMS.
685 The new render_page will be suitable for rendering on a device whose page
686 size is PARAMS->size, but the caller is responsible for actually breaking it
687 up to fit on such a device, using the render_break abstraction. */
688 static struct render_page *
689 render_page_create (const struct render_params *params, struct table *table,
690 const struct pivot_table_look *look)
694 int nc = table->n[H];
695 int nr = table->n[V];
697 /* Figure out rule widths. */
698 int *rules[TABLE_N_AXES];
699 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
701 int n = table->n[axis] + 1;
703 rules[axis] = xnmalloc (n, sizeof *rules);
704 for (int z = 0; z < n; z++)
705 rules[axis][z] = measure_rule (params, table, axis, z);
708 int col_heading_width_range[2];
709 int row_heading_width_range[2];
710 for (int i = 0; i < 2; i++)
711 col_heading_width_range[i] = look->col_heading_width_range[i] * params->px_size;
712 for (int i = 0; i < 2; i++)
713 row_heading_width_range[i] = look->row_heading_width_range[i] * params->px_size;
715 /* Calculate minimum and maximum widths of cells that do not
716 span multiple columns. */
717 struct render_row *columns[2];
718 for (int i = 0; i < 2; i++)
719 columns[i] = xcalloc (nc, sizeof *columns[i]);
720 for (int y = 0; y < nr; y++)
721 for (int x = 0; x < nc;)
723 struct table_cell cell;
725 table_get_cell (table, x, y, &cell);
726 if (y == cell.d[V][0]
727 && table_cell_colspan (&cell) == 1)
730 params->ops->measure_cell_width (params->aux, &cell,
733 if (cell.options & TABLE_CELL_FULL_WIDTH)
735 int rule_width = rules[H][0] + rules[H][table->n[H]];
736 w[MIN] = w[MAX] = MAX (0, w[MIN] - rule_width);
741 const int *wr = (x < table->h[H] ? row_heading_width_range
742 : y < table->h[V] ? col_heading_width_range
752 else if (w[1] > wr[1])
761 for (int i = 0; i < 2; i++)
762 if (columns[i][x].unspanned < w[i])
763 columns[i][x].unspanned = w[i];
768 /* Distribute widths of spanned columns. */
769 for (int i = 0; i < 2; i++)
770 for (int x = 0; x < nc; x++)
771 columns[i][x].width = columns[i][x].unspanned;
772 for (int y = 0; y < nr; y++)
773 for (int x = 0; x < nc;)
775 struct table_cell cell;
777 table_get_cell (table, x, y, &cell);
778 if (y == cell.d[V][0]
779 && table_cell_colspan (&cell) > 1
780 && !(cell.options & TABLE_CELL_FULL_WIDTH))
784 params->ops->measure_cell_width (params->aux, &cell,
786 if (cell.options & TABLE_CELL_FULL_WIDTH)
788 int rule_width = rules[H][0] + rules[H][table->n[H]];
789 w[MIN] = w[MAX] = MAX (0, w[MIN] - rule_width);
791 for (int i = 0; i < 2; i++)
792 distribute_spanned_width (w[i],
793 &columns[i][cell.d[H][0]],
794 &rules[H][cell.d[H][0]],
795 table_cell_colspan (&cell));
800 /* In pathological cases, spans can cause the minimum width of a column to
801 exceed the maximum width. This bollixes our interpolation algorithm
802 later, so fix it up. */
803 for (int i = 0; i < nc; i++)
804 if (columns[MIN][i].width > columns[MAX][i].width)
805 columns[MAX][i].width = columns[MIN][i].width;
807 /* Decide final column widths. */
809 for (int i = 0; i < 2; i++)
810 table_widths[i] = calculate_table_width (table->n[H],
811 columns[i], rules[H]);
813 struct render_page *page;
814 if (table_widths[MAX] <= params->size[H])
816 /* Fits even with maximum widths. Use them. */
817 page = create_page_with_exact_widths (params, table, columns[MAX],
820 else if (table_widths[MIN] <= params->size[H])
822 /* Fits with minimum widths, so distribute the leftover space. */
823 page = create_page_with_interpolated_widths (
824 params, table, columns[MIN], columns[MAX],
825 table_widths[MIN], table_widths[MAX], rules[H]);
829 /* Doesn't fit even with minimum widths. Assign minimums for now, and
830 later we can break it horizontally into multiple pages. */
831 page = create_page_with_exact_widths (params, table, columns[MIN],
835 /* Calculate heights of cells that do not span multiple rows. */
836 struct render_row *rows = XCALLOC (nr, struct render_row);
837 for (int y = 0; y < nr; y++)
838 for (int x = 0; x < nc;)
840 struct render_row *r = &rows[y];
841 struct table_cell cell;
843 render_get_cell (page, x, y, &cell);
844 if (y == cell.d[V][0] && table_cell_rowspan (&cell) == 1)
846 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
847 if (cell.options & TABLE_CELL_FULL_WIDTH)
848 w += rules[H][0] + rules[H][table->n[H]];
849 int h = params->ops->measure_cell_height (params->aux,
851 if (h > r->unspanned)
852 r->unspanned = r->width = h;
856 for (int i = 0; i < 2; i++)
859 /* Distribute heights of spanned rows. */
860 for (int y = 0; y < nr; y++)
861 for (int x = 0; x < nc;)
863 struct table_cell cell;
865 render_get_cell (page, x, y, &cell);
866 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
868 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
869 if (cell.options & TABLE_CELL_FULL_WIDTH)
870 w += rules[H][0] + rules[H][table->n[H]];
871 int h = params->ops->measure_cell_height (params->aux, &cell, w);
872 distribute_spanned_width (h,
874 &rules[V][cell.d[V][0]],
875 table_cell_rowspan (&cell));
880 /* Decide final row heights. */
881 accumulate_row_widths (page, V, rows, rules[V]);
884 /* Measure headers. If they are "too big", get rid of them. */
885 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
887 int hw = headers_width (page, axis);
888 if (hw * 2 >= page->params->size[axis]
889 || hw + max_cell_width (page, axis) > page->params->size[axis])
892 page->r[axis][0] = 0;
893 page->r[axis][1] = page->n[axis];
903 /* Increases PAGE's reference count. */
905 render_page_ref (const struct render_page *page_)
907 struct render_page *page = CONST_CAST (struct render_page *, page_);
912 /* Decreases PAGE's reference count and destroys PAGE if this causes the
913 reference count to fall to zero. */
915 render_page_unref (struct render_page *page)
917 if (page != NULL && --page->ref_cnt == 0)
919 struct render_overflow *overflow, *next;
920 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
923 hmap_destroy (&page->overflows);
925 table_unref (page->table);
927 for (int i = 0; i < TABLE_N_AXES; ++i)
934 /* Returns the size of PAGE along AXIS. (This might be larger than the page
935 size specified in the parameters passed to render_page_create(). Use a
936 render_break to break up a render_page into page-sized chunks.) */
938 render_page_get_size (const struct render_page *page, enum table_axis axis)
940 return page->cp[axis][page->n[axis] * 2 + 1];
944 render_page_get_best_breakpoint (const struct render_page *page, int height)
946 /* If there's no room for at least the top row and the rules above and below
947 it, don't include any of the table. */
948 if (page->cp[V][3] > height)
951 /* Otherwise include as many rows and rules as we can. */
952 for (int y = 5; y <= 2 * page->n[V] + 1; y += 2)
953 if (page->cp[V][y] > height)
954 return page->cp[V][y - 2];
958 /* Drawing render_pages. */
960 /* This is like table_get_rule() except that D is in terms of the page's rows
961 and column rather than the underlying table's. */
962 static struct table_border_style
963 get_rule (const struct render_page *page, enum table_axis axis,
964 const int d_[TABLE_N_AXES])
966 int d[TABLE_N_AXES] = { d_[0] / 2, d_[1] / 2 };
969 enum table_axis a = axis;
970 if (d[a] < page->h[a])
972 else if (d[a] <= page->n[a])
974 if (page->h[a] && d[a] == page->h[a])
976 d[a] += page->r[a][0] - page->h[a];
979 enum table_axis b = !axis;
981 get_map (page, b, d[b], &m);
984 struct table_border_style border
985 = table_get_rule (page->table, axis, d[H], d[V]);
989 struct table_border_style border2 = table_get_rule (page->table, axis,
991 border.stroke = table_stroke_combine (border.stroke, border2.stroke);
1003 render_direction_rtl (void)
1005 /* TRANSLATORS: Do not translate this string. If the script of your language
1006 reads from right to left (eg Persian, Arabic, Hebrew etc), then replace
1007 this string with "output-direction-rtl". Otherwise either leave it
1008 untranslated or copy it verbatim. */
1009 const char *dir = _("output-direction-ltr");
1010 if (0 == strcmp ("output-direction-rtl", dir))
1013 if (0 != strcmp ("output-direction-ltr", dir))
1014 fprintf (stderr, "This localisation has been incorrectly translated. "
1015 "Complain to the translator.\n");
1021 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
1022 const int d[TABLE_N_AXES])
1024 const struct table_border_style none = { .stroke = TABLE_STROKE_NONE };
1025 struct table_border_style styles[TABLE_N_AXES][2];
1027 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
1029 enum table_axis b = !a;
1032 || (page->is_edge_cutoff[a][0] && d[a] == 0)
1033 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
1034 styles[a][0] = styles[a][1] = none;
1035 else if (is_rule (d[b]))
1039 int e[TABLE_N_AXES];
1043 styles[a][0] = get_rule (page, a, e);
1046 styles[a][0] = none;
1048 if (d[b] / 2 < page->n[b])
1049 styles[a][1] = get_rule (page, a, d);
1051 styles[a][1] = none;
1054 styles[a][0] = styles[a][1] = get_rule (page, a, d);
1057 if (styles[H][0].stroke != TABLE_STROKE_NONE
1058 || styles[H][1].stroke != TABLE_STROKE_NONE
1059 || styles[V][0].stroke != TABLE_STROKE_NONE
1060 || styles[V][1].stroke != TABLE_STROKE_NONE)
1062 int bb[TABLE_N_AXES][2];
1064 bb[H][0] = ofs[H] + page->cp[H][d[H]];
1065 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
1066 if (page->params->rtl)
1068 int temp = bb[H][0];
1069 bb[H][0] = render_page_get_size (page, H) - bb[H][1];
1070 bb[H][1] = render_page_get_size (page, H) - temp;
1072 bb[V][0] = ofs[V] + page->cp[V][d[V]];
1073 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
1074 page->params->ops->draw_line (page->params->aux, bb, styles);
1079 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
1080 const struct table_cell *cell)
1082 const bool debugging = false;
1086 if (cell->d[H][0] + 1 == cell->d[H][1])
1087 printf ("%d", cell->d[H][0]);
1089 printf ("%d-%d", cell->d[H][0], cell->d[H][1] - 1);
1091 if (cell->d[V][0] + 1 == cell->d[V][1])
1092 printf ("%d", cell->d[V][0]);
1094 printf ("%d-%d", cell->d[V][0], cell->d[V][1] - 1);
1096 char *value = pivot_value_to_string (cell->value, NULL);
1097 printf (": \"%s\"\n", value);
1101 int bb[TABLE_N_AXES][2];
1103 if (!(cell->options & TABLE_CELL_FULL_WIDTH))
1105 bb[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
1106 bb[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
1107 if (page->params->rtl)
1109 int temp = bb[H][0];
1110 bb[H][0] = render_page_get_size (page, H) - bb[H][1];
1111 bb[H][1] = render_page_get_size (page, H) - temp;
1117 bb[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2 + 1];
1119 bb[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
1120 bb[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
1122 int clip[TABLE_N_AXES][2] = {
1123 [H] = { bb[H][0], bb[H][1] },
1124 [V] = { bb[V][0], bb[V][1] },
1127 enum table_valign valign = cell->cell_style->valign;
1128 int valign_offset = 0;
1129 if (valign != TABLE_VALIGN_TOP)
1131 int height = page->params->ops->measure_cell_height (
1132 page->params->aux, cell, bb[H][1] - bb[H][0]);
1133 int extra = bb[V][1] - bb[V][0] - height;
1136 if (valign == TABLE_VALIGN_CENTER)
1138 valign_offset += extra;
1142 const struct render_overflow *of = find_overflow (
1143 page, cell->d[H][0], cell->d[V][0]);
1145 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1147 if (of->overflow[axis][0])
1149 bb[axis][0] -= of->overflow[axis][0];
1150 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
1151 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
1153 if (of->overflow[axis][1])
1155 bb[axis][1] += of->overflow[axis][1];
1156 if (cell->d[axis][1] == page->n[axis]
1157 && !page->is_edge_cutoff[axis][1])
1158 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2
1163 int spill[TABLE_N_AXES][2];
1164 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1166 spill[axis][0] = rule_width (page, axis, cell->d[axis][0]) / 2;
1167 spill[axis][1] = rule_width (page, axis, cell->d[axis][1]) / 2;
1170 int color_idx = (cell->d[V][0] < page->h[V]
1172 : (cell->d[V][0] - page->h[V]) & 1);
1173 page->params->ops->draw_cell (page->params->aux, cell, color_idx,
1174 bb, valign_offset, spill, clip);
1177 /* Draws the cells of PAGE indicated in BB. */
1179 render_page_draw_cells (const struct render_page *page,
1180 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
1182 for (int y = bb[V][0]; y < bb[V][1]; y++)
1183 for (int x = bb[H][0]; x < bb[H][1];)
1184 if (!is_rule (x) && !is_rule (y))
1186 struct table_cell cell;
1188 render_get_cell (page, x / 2, y / 2, &cell);
1189 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
1190 render_cell (page, ofs, &cell);
1191 x = rule_ofs (cell.d[H][1]);
1196 for (int y = bb[V][0]; y < bb[V][1]; y++)
1197 for (int x = bb[H][0]; x < bb[H][1]; x++)
1198 if (is_rule (x) || is_rule (y))
1200 int d[TABLE_N_AXES];
1203 render_rule (page, ofs, d);
1207 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
1208 render_params provided to render_page_create(). */
1210 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
1212 int bb[TABLE_N_AXES][2];
1215 bb[H][1] = page->n[H] * 2 + 1;
1217 bb[V][1] = page->n[V] * 2 + 1;
1219 render_page_draw_cells (page, ofs, bb);
1222 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1224 get_clip_min_extent (int x0, const int cp[], int n)
1231 int middle = low + (high - low) / 2;
1233 if (cp[middle] <= x0)
1245 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1247 get_clip_max_extent (int x1, const int cp[], int n)
1254 int middle = low + (high - low) / 2;
1256 if (cp[middle] >= x1)
1257 best = high = middle;
1262 while (best > 0 && cp[best - 1] == cp[best])
1268 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1269 'draw_line' and 'draw_cell' functions from the render_params provided to
1270 render_page_create(). */
1272 render_page_draw_region (const struct render_page *page,
1273 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1275 int bb[TABLE_N_AXES][2];
1277 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1278 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1279 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1280 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1282 render_page_draw_cells (page, ofs, bb);
1285 /* Breaking up tables to fit on a page. */
1287 /* An iterator for breaking render_pages into smaller chunks. */
1290 struct render_page *page; /* Page being broken up. */
1291 enum table_axis axis; /* Axis along which 'page' is being broken. */
1292 int z; /* Next cell along 'axis'. */
1293 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1294 int hw; /* Width of headers of 'page' along 'axis'. */
1297 static int needed_size (const struct render_break *, int cell);
1298 static bool cell_is_breakable (const struct render_break *, int cell);
1299 static struct render_page *render_page_select (const struct render_page *,
1304 /* Initializes render_break B for breaking PAGE along AXIS.
1305 Takes ownership of PAGE. */
1307 render_break_init (struct render_break *b, struct render_page *page,
1308 enum table_axis axis)
1312 b->z = page->h[axis];
1314 b->hw = headers_width (page, axis);
1317 /* Initializes B as a render_break structure for which
1318 render_break_has_next() always returns false. */
1320 render_break_init_empty (struct render_break *b)
1323 b->axis = TABLE_HORZ;
1329 /* Frees B and unrefs the render_page that it owns. */
1331 render_break_destroy (struct render_break *b)
1335 render_page_unref (b->page);
1340 /* Returns true if B still has cells that are yet to be returned,
1341 false if all of B's page has been processed. */
1343 render_break_has_next (const struct render_break *b)
1345 const struct render_page *page = b->page;
1346 enum table_axis axis = b->axis;
1348 return page != NULL && b->z < page->n[axis];
1351 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1352 Returns a null pointer if B has already been completely broken up, or if
1353 SIZE is too small to reasonably render any cells. The latter will never
1354 happen if SIZE is at least as large as the page size passed to
1355 render_page_create() along B's axis. */
1356 static struct render_page *
1357 render_break_next (struct render_break *b, int size)
1359 const struct render_page *page = b->page;
1360 enum table_axis axis = b->axis;
1361 struct render_page *subpage;
1363 if (!render_break_has_next (b))
1368 for (z = b->z; z < page->n[axis]; z++)
1370 int needed = needed_size (b, z + 1);
1373 if (cell_is_breakable (b, z))
1375 /* If there is no right header and we render a partial cell on
1376 the right side of the body, then we omit the rightmost rule of
1377 the body. Otherwise the rendering is deceptive because it
1378 looks like the whole cell is present instead of a partial
1381 This is similar to code for the left side in needed_size(). */
1382 int rule_allowance = rule_width (page, axis, z);
1384 /* The amount that, if we added cell 'z', the rendering would
1385 overfill the allocated 'size'. */
1386 int overhang = needed - size - rule_allowance;
1388 /* The width of cell 'z'. */
1389 int cell_size = cell_width (page, axis, z);
1391 /* The amount trimmed off the left side of 'z',
1392 and the amount left to render. */
1393 int cell_ofs = z == b->z ? b->pixel : 0;
1394 int cell_left = cell_size - cell_ofs;
1396 /* A small but visible width. */
1397 int em = page->params->font_size[axis];
1399 /* If some of the cell remains to render,
1400 and there would still be some of the cell left afterward,
1401 then partially render that much of the cell. */
1402 pixel = (cell_left && cell_left > overhang
1403 ? cell_left - overhang + cell_ofs
1406 /* If there would be only a tiny amount of the cell left after
1407 rendering it partially, reduce the amount rendered slightly
1408 to make the output look a little better. */
1409 if (pixel + em > cell_size)
1410 pixel = MAX (pixel - em, 0);
1412 /* If we're breaking vertically, then consider whether the cells
1413 being broken have a better internal breakpoint than the exact
1414 number of pixels available, which might look bad e.g. because
1415 it breaks in the middle of a line of text. */
1416 if (axis == TABLE_VERT && page->params->ops->adjust_break)
1417 for (int x = 0; x < page->n[H];)
1419 struct table_cell cell;
1421 render_get_cell (page, x, z, &cell);
1422 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1423 int better_pixel = page->params->ops->adjust_break (
1424 page->params->aux, &cell, w, pixel);
1427 if (better_pixel < pixel)
1429 if (better_pixel > (z == b->z ? b->pixel : 0))
1431 pixel = better_pixel;
1434 else if (better_pixel == 0 && z != b->z)
1446 if (z == b->z && !pixel)
1449 subpage = render_page_select (page, axis, b->z, b->pixel,
1451 pixel ? cell_width (page, axis, z) - pixel
1458 /* Returns the width that would be required along B's axis to render a page
1459 from B's current position up to but not including CELL. */
1461 needed_size (const struct render_break *b, int cell)
1463 const struct render_page *page = b->page;
1464 enum table_axis axis = b->axis;
1466 /* Width of left header not including its rightmost rule. */
1467 int size = axis_width (page, axis, 0, rule_ofs (page->h[axis]));
1469 /* If we have a pixel offset and there is no left header, then we omit the
1470 leftmost rule of the body. Otherwise the rendering is deceptive because
1471 it looks like the whole cell is present instead of a partial cell.
1473 Otherwise (if there are headers) we will be merging two rules: the
1474 rightmost rule in the header and the leftmost rule in the body. We assume
1475 that the width of a merged rule is the larger of the widths of either rule
1477 if (b->pixel == 0 || page->h[axis])
1478 size += MAX (rule_width (page, axis, page->h[axis]),
1479 rule_width (page, axis, b->z));
1481 /* Width of body, minus any pixel offset in the leftmost cell. */
1482 size += joined_width (page, axis, b->z, cell) - b->pixel;
1484 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1485 size += MAX (rule_width_r (page, axis, 0), rule_width (page, axis, cell));
1490 /* Returns true if CELL along B's axis may be broken across a page boundary.
1492 This is just a heuristic. Breaking cells across page boundaries can save
1493 space, but it looks ugly. */
1495 cell_is_breakable (const struct render_break *b, int cell)
1497 const struct render_page *page = b->page;
1498 enum table_axis axis = b->axis;
1500 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1507 const struct render_params *params;
1510 struct render_page *page;
1512 struct render_break x_break;
1513 struct render_break y_break;
1517 render_pager_start_page (struct render_pager *p)
1519 render_break_init (&p->x_break, render_page_ref (p->page), H);
1520 render_break_init_empty (&p->y_break);
1523 /* Creates and returns a new render_pager for rendering PT on the device
1524 with the given PARAMS. */
1525 struct render_pager *
1526 render_pager_create (const struct render_params *params,
1527 const struct pivot_table *pt,
1528 const size_t *layer_indexes)
1531 layer_indexes = pt->current_layer;
1533 struct table *table = pivot_output_monolithic (pt, layer_indexes,
1536 /* Measure the table width and use it to determine the base scale. */
1537 struct render_page *page = render_page_create (params, table, pt->look);
1538 int width = table_width (page, H);
1540 if (width > params->size[H])
1542 if (pt->look->shrink_to_fit[H] && params->ops->scale)
1543 scale = params->size[H] / (double) width;
1546 struct render_break b;
1547 render_break_init (&b, render_page_ref (page), H);
1548 struct render_page *subpage
1549 = render_break_next (&b, params->size[H]);
1550 width = subpage ? subpage->cp[H][2 * subpage->n[H] + 1] : 0;
1551 render_page_unref (subpage);
1552 render_break_destroy (&b);
1556 /* Create the pager. */
1557 struct render_pager *p = xmalloc (sizeof *p);
1558 *p = (struct render_pager) { .params = params, .scale = scale, .page = page };
1560 /* If we're shrinking tables to fit the page length, then adjust the scale
1563 XXX This will sometimes shrink more than needed, because adjusting the
1564 scale factor allows for cells to be "wider", which means that sometimes
1565 they won't break across as much vertical space, thus shrinking the table
1566 vertically more than the scale would imply. Shrinking only as much as
1567 necessary would require an iterative search. */
1568 if (pt->look->shrink_to_fit[V] && params->ops->scale)
1570 double height = table_width (p->page, V);
1571 if (height * p->scale >= params->size[V])
1572 p->scale *= params->size[V] / height;
1575 render_pager_start_page (p);
1582 render_pager_destroy (struct render_pager *p)
1586 render_break_destroy (&p->x_break);
1587 render_break_destroy (&p->y_break);
1588 render_page_unref (p->page);
1593 /* Returns true if P has content remaining to render, false if rendering is
1596 render_pager_has_next (const struct render_pager *p_)
1598 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1600 while (!render_break_has_next (&p->y_break))
1602 render_break_destroy (&p->y_break);
1603 if (!render_break_has_next (&p->x_break))
1605 render_break_destroy (&p->x_break);
1606 render_break_init_empty (&p->x_break);
1607 render_break_init_empty (&p->y_break);
1612 &p->y_break, render_break_next (&p->x_break,
1613 p->params->size[H] / p->scale), V);
1618 /* Draws a chunk of content from P to fit in a space that has vertical size
1619 SPACE and the horizontal size specified in the render_params passed to
1620 render_page_create(). Returns the amount of space actually used by the
1621 rendered chunk, which will be 0 if SPACE is too small to render anything or
1622 if no content remains (use render_pager_has_next() to distinguish these
1625 render_pager_draw_next (struct render_pager *p, int space)
1627 if (p->scale != 1.0)
1629 p->params->ops->scale (p->params->aux, p->scale);
1633 int ofs[TABLE_N_AXES] = { 0, 0 };
1635 if (render_pager_has_next (p))
1637 struct render_page *page
1638 = render_break_next (&p->y_break, space - ofs[V]);
1641 render_page_draw (page, ofs);
1642 ofs[V] += render_page_get_size (page, V);
1643 render_page_unref (page);
1647 if (p->scale != 1.0)
1653 /* Draws all of P's content. */
1655 render_pager_draw (const struct render_pager *p)
1657 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1660 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1661 Some extra content might be drawn; the device should perform clipping as
1664 render_pager_draw_region (const struct render_pager *p,
1665 int x, int y, int w, int h)
1667 int ofs[TABLE_N_AXES] = { 0, 0 };
1668 int clip[TABLE_N_AXES][2];
1672 int size = render_page_get_size (p->page, V);
1674 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1675 clip[V][1] = MIN (y + h, ofs[V] + size) - ofs[V];
1676 if (clip[V][1] > clip[V][0])
1677 render_page_draw_region (p->page, ofs, clip);
1682 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1683 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1685 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1687 return render_page_get_size (p->page, axis);
1691 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1693 int size = render_page_get_size (p->page, V);
1694 return (size < height
1696 : render_page_get_best_breakpoint (p->page, height));
1699 /* render_page_select() and helpers. */
1701 struct render_page_selection
1703 const struct render_page *page; /* Page whose slice we are selecting. */
1704 struct render_page *subpage; /* New page under construction. */
1705 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1706 enum table_axis b; /* The opposite of 'a'. */
1707 int z0; /* First cell along 'a' being selected. */
1708 int z1; /* Last cell being selected, plus 1. */
1709 int p0; /* Number of pixels to trim off left side of z0. */
1710 int p1; /* Number of pixels to trim off right side of z1-1. */
1713 static void cell_to_subpage (struct render_page_selection *,
1714 const struct table_cell *,
1715 int subcell[TABLE_N_AXES]);
1716 static const struct render_overflow *find_overflow_for_cell (
1717 struct render_page_selection *, const struct table_cell *);
1718 static struct render_overflow *insert_overflow (struct render_page_selection *,
1719 const struct table_cell *);
1721 /* Creates and returns a new render_page whose contents are a subregion of
1722 PAGE's contents. The new render_page includes cells Z0 through Z1
1723 (exclusive) along AXIS, plus any headers on AXIS.
1725 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1726 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1727 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1728 render cells that are too large to fit on a single page.)
1730 The whole of axis !AXIS is included. (The caller may follow up with another
1731 call to render_page_select() to select on !AXIS to select on that axis as
1734 The caller retains ownership of PAGE, which is not modified. */
1735 static struct render_page *
1736 render_page_select (const struct render_page *page, enum table_axis axis,
1737 int z0, int p0, int z1, int p1)
1739 enum table_axis a = axis;
1740 enum table_axis b = !a;
1742 /* Optimize case where all of PAGE is selected by just incrementing the
1744 if (z0 == page->h[a] && p0 == 0 && z1 == page->n[a] && p1 == 0)
1746 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1751 /* Allocate subpage. */
1752 int trim[2] = { z0 - page->h[a], page->n[a] - z1 };
1754 int n[TABLE_N_AXES] = { [H] = page->n[H], [V] = page->n[V] };
1755 n[a] -= trim[0] + trim[1];
1757 int r[TABLE_N_AXES][2];
1758 for (enum table_axis k = 0; k < TABLE_N_AXES; k++)
1760 r[k][0] = page->r[k][0];
1761 r[k][1] = page->r[k][1];
1766 struct render_page *subpage = render_page_allocate__ (
1767 page->params, table_ref (page->table), n, page->h, r);
1769 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1770 off that side of the page and there are no headers. */
1771 subpage->is_edge_cutoff[a][0] =
1772 subpage->h[a] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1773 subpage->is_edge_cutoff[a][1] =
1774 p1 || (z1 == page->n[a] && page->is_edge_cutoff[a][1]);
1775 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1776 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1778 /* Select widths from PAGE into subpage. */
1779 int *scp = page->cp[a];
1780 int *dcp = subpage->cp[a];
1782 for (int z = 0; z <= rule_ofs (subpage->h[a]); z++, dcp++)
1784 int w = !z && subpage->is_edge_cutoff[a][0] ? 0 : scp[z + 1] - scp[z];
1785 dcp[1] = dcp[0] + w;
1787 for (int z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1789 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1790 if (z == cell_ofs (z0))
1792 if (z == cell_ofs (z1 - 1))
1795 for (int z = rule_ofs_r (page, a, 0);
1796 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1798 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1801 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1803 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1805 for (int z = 0; z < page->n[b] * 2 + 2; z++)
1806 subpage->cp[b][z] = page->cp[b][z];
1808 /* Add new overflows. */
1809 struct render_page_selection s = {
1820 if (!page->h[a] || z0 > page->h[a] || p0)
1821 for (int z = 0; z < page->n[b];)
1823 int d[TABLE_N_AXES];
1827 struct table_cell cell;
1828 render_get_cell (page, d[H], d[V], &cell);
1829 bool overflow0 = p0 || cell.d[a][0] < z0;
1830 bool overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1831 if (overflow0 || overflow1)
1833 struct render_overflow *ro = insert_overflow (&s, &cell);
1836 ro->overflow[a][0] += p0 + axis_width (
1837 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1840 ro->overflow[a][1] += p1 + axis_width (
1841 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1846 for (int z = 0; z < page->n[b];)
1848 int d[TABLE_N_AXES];
1852 struct table_cell cell;
1853 render_get_cell (page, d[H], d[V], &cell);
1854 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1855 && find_overflow_for_cell (&s, &cell) == NULL)
1857 struct render_overflow *ro = insert_overflow (&s, &cell);
1858 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1859 cell_ofs (cell.d[a][1]));
1864 /* Copy overflows from PAGE into subpage. */
1865 struct render_overflow *ro;
1866 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1868 struct table_cell cell;
1870 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1871 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1872 && find_overflow_for_cell (&s, &cell) == NULL)
1873 insert_overflow (&s, &cell);
1879 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1880 coordinates of the top-left cell as it will appear in S->subpage.
1882 CELL must actually intersect the region of S->page that is being selected
1883 by render_page_select() or the results will not make any sense. */
1885 cell_to_subpage (struct render_page_selection *s,
1886 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1888 enum table_axis a = s->a;
1889 enum table_axis b = s->b;
1890 int ha0 = s->subpage->h[a];
1892 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1893 subcell[b] = cell->d[b][0];
1896 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1897 that cell in S->subpage, if there is one, and a null pointer otherwise.
1899 CELL must actually intersect the region of S->page that is being selected
1900 by render_page_select() or the results will not make any sense. */
1901 static const struct render_overflow *
1902 find_overflow_for_cell (struct render_page_selection *s,
1903 const struct table_cell *cell)
1907 cell_to_subpage (s, cell, subcell);
1908 return find_overflow (s->subpage, subcell[H], subcell[V]);
1911 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1912 cell in S->subpage (which must not already exist). Initializes the new
1913 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1916 CELL must actually intersect the region of S->page that is being selected
1917 by render_page_select() or the results will not make any sense. */
1918 static struct render_overflow *
1919 insert_overflow (struct render_page_selection *s,
1920 const struct table_cell *cell)
1922 struct render_overflow *of = XZALLOC (struct render_overflow);
1923 cell_to_subpage (s, cell, of->d);
1924 hmap_insert (&s->subpage->overflows, &of->node,
1925 hash_cell (of->d[H], of->d[V]));
1927 const struct render_overflow *old
1928 = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1930 memcpy (of->overflow, old->overflow, sizeof of->overflow);