1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "output/render.h"
29 #include "output/tab.h"
30 #include "output/table-item.h"
31 #include "output/table.h"
33 #include "gl/minmax.h"
34 #include "gl/xalloc.h"
37 #define _(msgid) gettext (msgid)
39 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
43 /* A layout for rendering a specific table on a specific device.
45 May represent the layout of an entire table presented to
46 render_page_create(), or a rectangular subregion of a table broken out using
47 render_break_next() to allow a table to be broken across multiple pages.
49 A page's size is not limited to the size passed in as part of render_params.
50 render_pager breaks a render_page into smaller render_pages that will fit in
51 the available space. */
54 const struct render_params *params; /* Parameters of the target device. */
55 struct table *table; /* Table rendered. */
58 /* Local copies of table->n and table->h, for convenience. */
60 int h[TABLE_N_AXES][2];
62 /* cp[H] represents x positions within the table.
64 cp[H][1] = the width of the leftmost vertical rule.
65 cp[H][2] = cp[H][1] + the width of the leftmost column.
66 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
68 cp[H][2 * nc] = x position of the rightmost vertical rule.
69 cp[H][2 * nc + 1] = total table width including all rules.
71 Similarly, cp[V] represents y positions within the table.
73 cp[V][1] = the height of the topmost horizontal rule.
74 cp[V][2] = cp[V][1] + the height of the topmost row.
75 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
77 cp[V][2 * nr] = y position of the bottommost horizontal rule.
78 cp[V][2 * nr + 1] = total table height including all rules.
80 Rules and columns can have width or height 0, in which case consecutive
81 values in this array are equal. */
82 int *cp[TABLE_N_AXES];
84 /* render_break_next() can break a table such that some cells are not fully
85 contained within a render_page. This will happen if a cell is too wide
86 or two tall to fit on a single page, or if a cell spans multiple rows or
87 columns and the page only includes some of those rows or columns.
89 This hash table contains "struct render_overflow"s that represents each
90 such cell that doesn't completely fit on this page.
92 Each overflow cell borders at least one header edge of the table and may
93 border more. (A single table cell that is so large that it fills the
94 entire page can overflow on all four sides!) */
95 struct hmap overflows;
97 /* If a single column (or row) is too wide (or tall) to fit on a page
98 reasonably, then render_break_next() will split a single row or column
99 across multiple render_pages. This member indicates when this has
102 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
103 of the leftmost column in this page, and false otherwise.
105 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
106 of the rightmost column in this page, and false otherwise.
108 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
109 and bottom of the table.
111 The effect of is_edge_cutoff is to prevent rules along the edge in
112 question from being rendered.
114 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
115 contain a node for each cell along that edge. */
116 bool is_edge_cutoff[TABLE_N_AXES][2];
118 /* If part of a joined cell would be cut off by breaking a table along
119 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
120 join_crossing[axis][z] is the thickness of the rule that would be cut
123 This is used to know to allocate extra space for breaking at such a
124 position, so that part of the cell's content is not lost.
126 This affects breaking a table only when headers are present. When
127 headers are not present, the rule's thickness is used for cell content,
128 so no part of the cell's content is lost (and in fact it is duplicated
129 across both pages). */
130 int *join_crossing[TABLE_N_AXES];
133 static struct render_page *render_page_create (const struct render_params *,
136 struct render_page *render_page_ref (const struct render_page *page_);
137 static void render_page_unref (struct render_page *);
139 /* Returns the offset in struct render_page's cp[axis] array of the rule with
140 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
141 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
142 next rule to the right (or below); and so on. */
144 rule_ofs (int rule_idx)
149 /* Returns the offset in struct render_page's cp[axis] array of the rule with
150 index RULE_IDX_R, which counts from the right side (or bottom) of the page
151 left (or up), according to whether AXIS is H or V, respectively. That is,
152 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
153 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
154 (or above); and so on. */
156 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
158 return (page->n[axis] - rule_idx_r) * 2;
161 /* Returns the offset in struct render_page's cp[axis] array of the cell with
162 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
163 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
164 next cell to the right (or below); and so on. */
166 cell_ofs (int cell_idx)
168 return cell_idx * 2 + 1;
171 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
173 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
175 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
178 /* Returns the width of the headers in PAGE along AXIS. */
180 headers_width (const struct render_page *page, int axis)
182 int h0 = page->h[axis][0];
183 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
184 int n = page->n[axis];
185 int h1 = page->h[axis][1];
186 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
190 /* Returns the width of cell X along AXIS in PAGE. */
192 cell_width (const struct render_page *page, int axis, int x)
194 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
197 /* Returns the width of rule X along AXIS in PAGE. */
199 rule_width (const struct render_page *page, int axis, int x)
201 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
204 /* Returns the width of rule X along AXIS in PAGE. */
206 rule_width_r (const struct render_page *page, int axis, int x)
208 int ofs = rule_ofs_r (page, axis, x);
209 return axis_width (page, axis, ofs, ofs + 1);
212 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
214 joined_width (const struct render_page *page, int axis, int x0, int x1)
216 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
219 /* Returns the width of the widest cell, excluding headers, along AXIS in
222 max_cell_width (const struct render_page *page, int axis)
224 int n = page->n[axis];
225 int x0 = page->h[axis][0];
226 int x1 = n - page->h[axis][1];
230 for (x = x0; x < x1; x++)
232 int w = cell_width (page, axis, x);
239 /* A cell that doesn't completely fit on the render_page. */
240 struct render_overflow
242 struct hmap_node node; /* In render_page's 'overflows' hmap. */
244 /* Occupied region of page.
246 d[H][0] is the leftmost column.
247 d[H][1] is the rightmost column, plus 1.
248 d[V][0] is the top row.
249 d[V][1] is the bottom row, plus 1.
251 The cell in its original table might occupy a larger region. This
252 member reflects the size of the cell in the current render_page, after
253 trimming off any rows or columns due to page-breaking. */
256 /* The space that has been trimmed off the cell:
258 overflow[H][0]: space trimmed off its left side.
259 overflow[H][1]: space trimmed off its right side.
260 overflow[V][0]: space trimmed off its top.
261 overflow[V][1]: space trimmed off its bottom.
263 During rendering, this information is used to position the rendered
264 portion of the cell within the available space.
266 When a cell is rendered, sometimes it is permitted to spill over into
267 space that is ordinarily reserved for rules. Either way, this space is
268 still included in overflow values.
270 Suppose, for example, that a cell that joins 2 columns has a width of 60
271 pixels and content "abcdef", that the 2 columns that it joins have
272 widths of 20 and 30 pixels, respectively, and that therefore the rule
273 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
274 It might render like this, if each character is 10x10, and showing a few
275 extra table cells for context:
283 If this render_page is broken at the rule that separates "gh" from
284 "ijk", then the page that contains the left side of the "abcdef" cell
285 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
286 and the page that contains the right side of the cell will have
287 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
305 int overflow[TABLE_N_AXES][2];
308 /* Returns a hash value for (,Y). */
310 hash_cell (int x, int y)
312 return hash_int (x + (y << 16), 0);
315 /* Searches PAGE's set of render_overflow for one whose top-left cell is
316 (X,Y). Returns it, if there is one, otherwise a null pointer. */
317 static const struct render_overflow *
318 find_overflow (const struct render_page *page, int x, int y)
320 if (!hmap_is_empty (&page->overflows))
322 const struct render_overflow *of;
324 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
325 hash_cell (x, y), &page->overflows)
326 if (x == of->d[H] && y == of->d[V])
333 /* Row or column dimensions. Used to figure the size of a table in
334 render_page_create() and discarded after that. */
337 /* Width without considering rows (or columns) that span more than one (or
341 /* Width taking spanned rows (or columns) into consideration. */
345 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
346 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
349 distribute_spanned_width (int width,
350 struct render_row *rows, const int *rules, int n)
352 /* Sum up the unspanned widths of the N rows for use as weights. */
353 int total_unspanned = 0;
354 for (int x = 0; x < n; x++)
355 total_unspanned += rows[x].unspanned;
356 for (int x = 0; x < n - 1; x++)
357 total_unspanned += rules[x + 1];
358 if (total_unspanned >= width)
361 /* The algorithm used here is based on the following description from HTML 4:
363 For cells that span multiple columns, a simple approach consists of
364 apportioning the min/max widths evenly to each of the constituent
365 columns. A slightly more complex approach is to use the min/max
366 widths of unspanned cells to weight how spanned widths are
367 apportioned. Experiments suggest that a blend of the two approaches
368 gives good results for a wide range of tables.
370 We blend the two approaches half-and-half, except that we cannot use the
371 unspanned weights when 'total_unspanned' is 0 (because that would cause a
374 The calculation we want to do is this:
377 w1 = width * (column's unspanned width) / (total unspanned width)
378 (column's width) = (w0 + w1) / 2
380 We implement it as a precise calculation in integers by multiplying w0 and
381 w1 by the common denominator of all three calculations (d), dividing that
382 out in the column width calculation, and then keeping the remainder for
385 (We actually compute the unspanned width of a column as twice the
386 unspanned width, plus the width of the rule on the left, plus the width of
387 the rule on the right. That way each rule contributes to both the cell on
388 its left and on its right.)
390 long long int d0 = n;
391 long long int d1 = 2LL * MAX (total_unspanned, 1);
392 long long int d = d0 * d1;
393 if (total_unspanned > 0)
395 long long int w = d / 2;
396 for (int x = 0; x < n; x++)
399 if (total_unspanned > 0)
401 long long int unspanned = rows[x].unspanned * 2LL;
403 unspanned += rules[x + 1];
405 unspanned += rules[x];
406 w += width * unspanned * d0;
409 rows[x].width = MAX (rows[x].width, w / d);
410 w -= rows[x].width * d;
414 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
417 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
418 const struct render_row *rows, const int *rules)
420 int n = page->n[axis];
426 for (z = 0; z < n; z++)
428 cp[1] = cp[0] + rules[z];
429 cp[2] = cp[1] + rows[z].width;
432 cp[1] = cp[0] + rules[n];
435 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
437 calculate_table_width (int n, const struct render_row *rows, int *rules)
443 for (x = 0; x < n; x++)
444 width += rows[x].width;
445 for (x = 0; x <= n; x++)
451 /* Rendering utility functions. */
453 /* Returns the line style to use for drawing a rule of the given TYPE. */
454 static enum render_line_style
455 rule_to_render_type (unsigned char type)
460 return RENDER_LINE_NONE;
462 return RENDER_LINE_SINGLE;
464 return RENDER_LINE_DOUBLE;
470 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
471 rendered with PARAMS. */
473 measure_rule (const struct render_params *params, const struct table *table,
474 enum table_axis a, int z)
476 enum table_axis b = !a;
481 /* Determine all types of rules that are present, as a bitmap in 'rules'
482 where rule type 't' is present if bit 2**t is set. */
485 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
486 rules |= 1u << table_get_rule (table, a, d[H], d[V]);
488 /* Turn off TAL_NONE because it has width 0 and we needn't bother. However,
489 if the device doesn't support margins, make sure that there is at least a
490 small gap between cells (but we don't need any at the left or right edge
492 if (rules & (1u << TAL_0))
494 rules &= ~(1u << TAL_0);
495 if (z > 0 && z < table->n[a] && !params->supports_margins && a == H)
496 rules |= 1u << TAL_1;
499 /* Calculate maximum width of the rules that are present. */
501 if (rules & (1u << TAL_1)
502 || (z > 0 && z < table->n[a] && rules & (1u << TAL_0)))
503 width = params->line_widths[a][RENDER_LINE_SINGLE];
504 if (rules & (1u << TAL_2))
505 width = MAX (width, params->line_widths[a][RENDER_LINE_DOUBLE]);
509 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
510 space for all of the members of the new page, but the caller must initialize
511 the 'cp' member itself. */
512 static struct render_page *
513 render_page_allocate (const struct render_params *params,
516 struct render_page *page;
519 page = xmalloc (sizeof *page);
520 page->params = params;
523 page->n[H] = table->n[H];
524 page->n[V] = table->n[V];
525 page->h[H][0] = table->h[H][0];
526 page->h[H][1] = table->h[H][1];
527 page->h[V][0] = table->h[V][0];
528 page->h[V][1] = table->h[V][1];
530 for (i = 0; i < TABLE_N_AXES; i++)
532 page->cp[i] = xmalloc ((2 * page->n[i] + 2) * sizeof *page->cp[i]);
533 page->join_crossing[i] = xzalloc ((page->n[i] + 1) * sizeof *page->join_crossing[i]);
536 hmap_init (&page->overflows);
537 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
542 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
543 cp[H] in the new page from ROWS and RULES. The caller must still initialize
545 static struct render_page *
546 create_page_with_exact_widths (const struct render_params *params,
548 const struct render_row *rows, int *rules)
550 struct render_page *page = render_page_allocate (params, table);
551 accumulate_row_widths (page, H, rows, rules);
555 /* Allocates and returns a new render_page for PARAMS and TABLE.
557 Initializes cp[H] in the new page by setting the width of each row 'i' to
558 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
559 ROW_MAX[i].width. Sets the width of rules to those in RULES.
561 W_MIN is the sum of ROWS_MIN[].width.
563 W_MAX is the sum of ROWS_MAX[].width.
565 The caller must still initialize cp[V]. */
566 static struct render_page *
567 create_page_with_interpolated_widths (const struct render_params *params,
569 const struct render_row *rows_min,
570 const struct render_row *rows_max,
571 int w_min, int w_max, const int *rules)
573 const int n = table->n[H];
574 const long long int avail = params->size[H] - w_min;
575 const long long int wanted = w_max - w_min;
579 struct render_page *page = render_page_allocate (params, table);
581 int *cph = page->cp[H];
583 long long int w = wanted / 2;
584 for (int x = 0; x < n; x++)
586 w += avail * (rows_max[x].width - rows_min[x].width);
587 int extra = w / wanted;
590 cph[1] = cph[0] + rules[x];
591 cph[2] = cph[1] + rows_min[x].width + extra;
594 cph[1] = cph[0] + rules[n];
596 assert (page->cp[H][n * 2 + 1] == params->size[H]);
602 set_join_crossings (struct render_page *page, enum table_axis axis,
603 const struct table_cell *cell, int *rules)
607 for (z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
608 page->join_crossing[axis][z] = rules[z];
611 /* Creates and returns a new render_page for rendering TABLE on a device
614 The new render_page will be suitable for rendering on a device whose page
615 size is PARAMS->size, but the caller is responsible for actually breaking it
616 up to fit on such a device, using the render_break abstraction. */
617 static struct render_page *
618 render_page_create (const struct render_params *params, struct table *table)
620 struct render_page *page;
622 struct render_row *columns[2];
623 struct render_row *rows;
625 int *rules[TABLE_N_AXES];
629 enum table_axis axis;
631 nc = table_nc (table);
632 nr = table_nr (table);
634 /* Figure out rule widths. */
635 for (axis = 0; axis < TABLE_N_AXES; axis++)
637 int n = table->n[axis] + 1;
640 rules[axis] = xnmalloc (n, sizeof *rules);
641 for (z = 0; z < n; z++)
642 rules[axis][z] = measure_rule (params, table, axis, z);
645 /* Calculate minimum and maximum widths of cells that do not
646 span multiple columns. */
647 for (i = 0; i < 2; i++)
648 columns[i] = xzalloc (nc * sizeof *columns[i]);
649 for (y = 0; y < nr; y++)
650 for (x = 0; x < nc; )
652 struct table_cell cell;
654 table_get_cell (table, x, y, &cell);
655 if (y == cell.d[V][0])
657 if (table_cell_colspan (&cell) == 1)
662 params->measure_cell_width (params->aux, &cell,
664 for (i = 0; i < 2; i++)
665 if (columns[i][x].unspanned < w[i])
666 columns[i][x].unspanned = w[i];
670 table_cell_free (&cell);
673 /* Distribute widths of spanned columns. */
674 for (i = 0; i < 2; i++)
675 for (x = 0; x < nc; x++)
676 columns[i][x].width = columns[i][x].unspanned;
677 for (y = 0; y < nr; y++)
678 for (x = 0; x < nc; )
680 struct table_cell cell;
682 table_get_cell (table, x, y, &cell);
683 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
687 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
688 for (i = 0; i < 2; i++)
689 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
690 rules[H], table_cell_colspan (&cell));
693 table_cell_free (&cell);
696 /* In pathological cases, spans can cause the minimum width of a column to
697 exceed the maximum width. This bollixes our interpolation algorithm
698 later, so fix it up. */
699 for (i = 0; i < nc; i++)
700 if (columns[MIN][i].width > columns[MAX][i].width)
701 columns[MAX][i].width = columns[MIN][i].width;
703 /* Decide final column widths. */
704 for (i = 0; i < 2; i++)
705 table_widths[i] = calculate_table_width (table_nc (table),
706 columns[i], rules[H]);
707 if (table_widths[MAX] <= params->size[H])
709 /* Fits even with maximum widths. Use them. */
710 page = create_page_with_exact_widths (params, table, columns[MAX],
713 else if (table_widths[MIN] <= params->size[H])
715 /* Fits with minimum widths, so distribute the leftover space. */
716 page = create_page_with_interpolated_widths (
717 params, table, columns[MIN], columns[MAX],
718 table_widths[MIN], table_widths[MAX], rules[H]);
722 /* Doesn't fit even with minimum widths. Assign minimums for now, and
723 later we can break it horizontally into multiple pages. */
724 page = create_page_with_exact_widths (params, table, columns[MIN],
728 /* Calculate heights of cells that do not span multiple rows. */
729 rows = xzalloc (nr * sizeof *rows);
730 for (y = 0; y < nr; y++)
732 for (x = 0; x < nc; )
734 struct render_row *r = &rows[y];
735 struct table_cell cell;
737 table_get_cell (table, x, y, &cell);
738 if (y == cell.d[V][0])
740 if (table_cell_rowspan (&cell) == 1)
742 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
743 int h = params->measure_cell_height (params->aux, &cell, w);
744 if (h > r->unspanned)
745 r->unspanned = r->width = h;
748 set_join_crossings (page, V, &cell, rules[V]);
750 if (table_cell_colspan (&cell) > 1)
751 set_join_crossings (page, H, &cell, rules[H]);
754 table_cell_free (&cell);
757 for (i = 0; i < 2; i++)
760 /* Distribute heights of spanned rows. */
761 for (y = 0; y < nr; y++)
762 for (x = 0; x < nc; )
764 struct table_cell cell;
766 table_get_cell (table, x, y, &cell);
767 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
769 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
770 int h = params->measure_cell_height (params->aux, &cell, w);
771 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
772 table_cell_rowspan (&cell));
775 table_cell_free (&cell);
778 /* Decide final row heights. */
779 accumulate_row_widths (page, V, rows, rules[V]);
782 /* Measure headers. If they are "too big", get rid of them. */
783 for (axis = 0; axis < TABLE_N_AXES; axis++)
785 int hw = headers_width (page, axis);
786 if (hw * 2 >= page->params->size[axis]
787 || hw + max_cell_width (page, axis) > page->params->size[axis])
789 page->table = table_unshare (page->table);
790 page->table->h[axis][0] = page->table->h[axis][1] = 0;
791 page->h[axis][0] = page->h[axis][1] = 0;
801 /* Increases PAGE's reference count. */
803 render_page_ref (const struct render_page *page_)
805 struct render_page *page = CONST_CAST (struct render_page *, page_);
810 /* Decreases PAGE's reference count and destroys PAGE if this causes the
811 reference count to fall to zero. */
813 render_page_unref (struct render_page *page)
815 if (page != NULL && --page->ref_cnt == 0)
818 struct render_overflow *overflow, *next;
820 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
823 hmap_destroy (&page->overflows);
825 table_unref (page->table);
827 for (i = 0; i < TABLE_N_AXES; ++i)
829 free (page->join_crossing[i]);
837 /* Returns the size of PAGE along AXIS. (This might be larger than the page
838 size specified in the parameters passed to render_page_create(). Use a
839 render_break to break up a render_page into page-sized chunks.) */
841 render_page_get_size (const struct render_page *page, enum table_axis axis)
843 return page->cp[axis][page->n[axis] * 2 + 1];
847 render_page_get_best_breakpoint (const struct render_page *page, int height)
851 /* If there's no room for at least the top row and the rules above and below
852 it, don't include any of the table. */
853 if (page->cp[V][3] > height)
856 /* Otherwise include as many rows and rules as we can. */
857 for (y = 5; y <= 2 * page->n[V] + 1; y += 2)
858 if (page->cp[V][y] > height)
859 return page->cp[V][y - 2];
863 /* Drawing render_pages. */
865 static inline enum render_line_style
866 get_rule (const struct render_page *page, enum table_axis axis,
867 const int d[TABLE_N_AXES])
869 return rule_to_render_type (table_get_rule (page->table,
870 axis, d[H] / 2, d[V] / 2));
880 render_direction_rtl (void)
882 /* TRANSLATORS: Do not translate this string. If the script of your language
883 reads from right to left (eg Persian, Arabic, Hebrew etc), then replace
884 this string with "output-direction-rtl". Otherwise either leave it
885 untranslated or copy it verbatim. */
886 const char *dir = _("output-direction-ltr");
887 if ( 0 == strcmp ("output-direction-rtl", dir))
890 if ( 0 != strcmp ("output-direction-ltr", dir))
891 fprintf (stderr, "This localisation has been incorrectly translated. Complain to the translator.\n");
897 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
898 const int d[TABLE_N_AXES])
900 enum render_line_style styles[TABLE_N_AXES][2];
903 for (a = 0; a < TABLE_N_AXES; a++)
905 enum table_axis b = !a;
907 styles[a][0] = styles[a][1] = RENDER_LINE_NONE;
910 || (page->is_edge_cutoff[a][0] && d[a] == 0)
911 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
922 styles[a][0] = get_rule (page, a, e);
925 if (d[b] / 2 < page->table->n[b])
926 styles[a][1] = get_rule (page, a, d);
929 styles[a][0] = styles[a][1] = get_rule (page, a, d);
932 if (styles[H][0] != RENDER_LINE_NONE || styles[H][1] != RENDER_LINE_NONE
933 || styles[V][0] != RENDER_LINE_NONE || styles[V][1] != RENDER_LINE_NONE)
935 int bb[TABLE_N_AXES][2];
937 bb[H][0] = ofs[H] + page->cp[H][d[H]];
938 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
939 if (render_direction_rtl ())
942 bb[H][0] = render_page_get_size (page, H) - bb[H][1];
943 bb[H][1] = render_page_get_size (page, H) - temp;
945 bb[V][0] = ofs[V] + page->cp[V][d[V]];
946 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
947 page->params->draw_line (page->params->aux, bb, styles);
952 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
953 const struct table_cell *cell)
955 const struct render_overflow *of;
956 int bb[TABLE_N_AXES][2];
957 int clip[TABLE_N_AXES][2];
959 bb[H][0] = clip[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
960 bb[H][1] = clip[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
961 if (render_direction_rtl ())
964 bb[H][0] = clip[H][0] = render_page_get_size (page, H) - bb[H][1];
965 bb[H][1] = clip[H][1] = render_page_get_size (page, H) - temp;
967 bb[V][0] = clip[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
968 bb[V][1] = clip[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
970 of = find_overflow (page, cell->d[H][0], cell->d[V][0]);
973 enum table_axis axis;
975 for (axis = 0; axis < TABLE_N_AXES; axis++)
977 if (of->overflow[axis][0])
979 bb[axis][0] -= of->overflow[axis][0];
980 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
981 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
983 if (of->overflow[axis][1])
985 bb[axis][1] += of->overflow[axis][1];
986 if (cell->d[axis][1] == page->n[axis] && !page->is_edge_cutoff[axis][1])
987 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2 + 1];
992 page->params->draw_cell (page->params->aux, cell, bb, clip);
995 /* Draws the cells of PAGE indicated in BB. */
997 render_page_draw_cells (const struct render_page *page,
998 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
1002 for (y = bb[V][0]; y < bb[V][1]; y++)
1003 for (x = bb[H][0]; x < bb[H][1]; )
1004 if (is_rule (x) || is_rule (y))
1006 int d[TABLE_N_AXES];
1009 render_rule (page, ofs, d);
1014 struct table_cell cell;
1016 table_get_cell (page->table, x / 2, y / 2, &cell);
1017 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
1018 render_cell (page, ofs, &cell);
1019 x = rule_ofs (cell.d[H][1]);
1020 table_cell_free (&cell);
1024 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
1025 render_params provided to render_page_create(). */
1027 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
1029 int bb[TABLE_N_AXES][2];
1032 bb[H][1] = page->n[H] * 2 + 1;
1034 bb[V][1] = page->n[V] * 2 + 1;
1036 render_page_draw_cells (page, ofs, bb);
1039 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1041 get_clip_min_extent (int x0, const int cp[], int n)
1043 int low, high, best;
1050 int middle = low + (high - low) / 2;
1052 if (cp[middle] <= x0)
1064 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1066 get_clip_max_extent (int x1, const int cp[], int n)
1068 int low, high, best;
1075 int middle = low + (high - low) / 2;
1077 if (cp[middle] >= x1)
1078 best = high = middle;
1083 while (best > 0 && cp[best - 1] == cp[best])
1089 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1090 'draw_line' and 'draw_cell' functions from the render_params provided to
1091 render_page_create(). */
1093 render_page_draw_region (const struct render_page *page,
1094 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1096 int bb[TABLE_N_AXES][2];
1098 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1099 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1100 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1101 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1103 render_page_draw_cells (page, ofs, bb);
1106 /* Breaking up tables to fit on a page. */
1108 /* An iterator for breaking render_pages into smaller chunks. */
1111 struct render_page *page; /* Page being broken up. */
1112 enum table_axis axis; /* Axis along which 'page' is being broken. */
1113 int z; /* Next cell along 'axis'. */
1114 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1115 int hw; /* Width of headers of 'page' along 'axis'. */
1118 static int needed_size (const struct render_break *, int cell);
1119 static bool cell_is_breakable (const struct render_break *, int cell);
1120 static struct render_page *render_page_select (const struct render_page *,
1125 /* Initializes render_break B for breaking PAGE along AXIS.
1126 Takes ownership of PAGE. */
1128 render_break_init (struct render_break *b, struct render_page *page,
1129 enum table_axis axis)
1133 b->z = page->h[axis][0];
1135 b->hw = headers_width (page, axis);
1138 /* Initializes B as a render_break structure for which
1139 render_break_has_next() always returns false. */
1141 render_break_init_empty (struct render_break *b)
1144 b->axis = TABLE_HORZ;
1150 /* Frees B and unrefs the render_page that it owns. */
1152 render_break_destroy (struct render_break *b)
1156 render_page_unref (b->page);
1161 /* Returns true if B still has cells that are yet to be returned,
1162 false if all of B's page has been processed. */
1164 render_break_has_next (const struct render_break *b)
1166 const struct render_page *page = b->page;
1167 enum table_axis axis = b->axis;
1169 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1172 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1173 Returns a null pointer if B has already been completely broken up, or if
1174 SIZE is too small to reasonably render any cells. The latter will never
1175 happen if SIZE is at least as large as the page size passed to
1176 render_page_create() along B's axis. */
1177 static struct render_page *
1178 render_break_next (struct render_break *b, int size)
1180 const struct render_page *page = b->page;
1181 enum table_axis axis = b->axis;
1182 struct render_page *subpage;
1185 if (!render_break_has_next (b))
1189 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1191 int needed = needed_size (b, z + 1);
1194 if (cell_is_breakable (b, z))
1196 /* If there is no right header and we render a partial cell on
1197 the right side of the body, then we omit the rightmost rule of
1198 the body. Otherwise the rendering is deceptive because it
1199 looks like the whole cell is present instead of a partial
1202 This is similar to code for the left side in needed_size(). */
1203 int rule_allowance = (page->h[axis][1]
1205 : rule_width (page, axis, z));
1207 /* The amount that, if we added cell 'z', the rendering would
1208 overfill the allocated 'size'. */
1209 int overhang = needed - size - rule_allowance;
1211 /* The width of cell 'z'. */
1212 int cell_size = cell_width (page, axis, z);
1214 /* The amount trimmed off the left side of 'z',
1215 and the amount left to render. */
1216 int cell_ofs = z == b->z ? b->pixel : 0;
1217 int cell_left = cell_size - cell_ofs;
1219 /* A small but visible width. */
1220 int em = page->params->font_size[axis];
1222 /* If some of the cell remains to render,
1223 and there would still be some of the cell left afterward,
1224 then partially render that much of the cell. */
1225 pixel = (cell_left && cell_left > overhang
1226 ? cell_left - overhang + cell_ofs
1229 /* If there would be only a tiny amount of the cell left after
1230 rendering it partially, reduce the amount rendered slightly
1231 to make the output look a little better. */
1232 if (pixel + em > cell_size)
1233 pixel = MAX (pixel - em, 0);
1235 /* If we're breaking vertically, then consider whether the cells
1236 being broken have a better internal breakpoint than the exact
1237 number of pixels available, which might look bad e.g. because
1238 it breaks in the middle of a line of text. */
1239 if (axis == TABLE_VERT && page->params->adjust_break)
1243 for (x = 0; x < page->n[H]; )
1245 struct table_cell cell;
1249 table_get_cell (page->table, x, z, &cell);
1250 w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1251 better_pixel = page->params->adjust_break (
1252 page->params->aux, &cell, w, pixel);
1254 table_cell_free (&cell);
1256 if (better_pixel < pixel)
1258 if (better_pixel > (z == b->z ? b->pixel : 0))
1260 pixel = better_pixel;
1263 else if (better_pixel == 0 && z != b->z)
1276 if (z == b->z && !pixel)
1279 subpage = render_page_select (page, axis, b->z, b->pixel,
1281 pixel ? cell_width (page, axis, z) - pixel
1288 /* Returns the width that would be required along B's axis to render a page
1289 from B's current position up to but not including CELL. */
1291 needed_size (const struct render_break *b, int cell)
1293 const struct render_page *page = b->page;
1294 enum table_axis axis = b->axis;
1297 /* Width of left header not including its rightmost rule. */
1298 size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1300 /* If we have a pixel offset and there is no left header, then we omit the
1301 leftmost rule of the body. Otherwise the rendering is deceptive because
1302 it looks like the whole cell is present instead of a partial cell.
1304 Otherwise (if there are headers) we will be merging two rules: the
1305 rightmost rule in the header and the leftmost rule in the body. We assume
1306 that the width of a merged rule is the larger of the widths of either rule
1308 if (b->pixel == 0 || page->h[axis][0])
1309 size += MAX (rule_width (page, axis, page->h[axis][0]),
1310 rule_width (page, axis, b->z));
1312 /* Width of body, minus any pixel offset in the leftmost cell. */
1313 size += joined_width (page, axis, b->z, cell) - b->pixel;
1315 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1316 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1317 rule_width (page, axis, cell));
1319 /* Width of right header not including its leftmost rule. */
1320 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1321 rule_ofs_r (page, axis, 0));
1323 /* Join crossing. */
1324 if (page->h[axis][0] && page->h[axis][1])
1325 size += page->join_crossing[axis][b->z];
1330 /* Returns true if CELL along B's axis may be broken across a page boundary.
1332 This is just a heuristic. Breaking cells across page boundaries can save
1333 space, but it looks ugly. */
1335 cell_is_breakable (const struct render_break *b, int cell)
1337 const struct render_page *page = b->page;
1338 enum table_axis axis = b->axis;
1340 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1347 const struct render_params *params;
1349 struct render_page **pages;
1350 size_t n_pages, allocated_pages;
1353 struct render_break x_break;
1354 struct render_break y_break;
1357 static const struct render_page *
1358 render_pager_add_table (struct render_pager *p, struct table *table)
1360 struct render_page *page;
1362 if (p->n_pages >= p->allocated_pages)
1363 p->pages = x2nrealloc (p->pages, &p->allocated_pages, sizeof *p->pages);
1364 page = p->pages[p->n_pages++] = render_page_create (p->params, table);
1369 render_pager_start_page (struct render_pager *p)
1371 render_break_init (&p->x_break, render_page_ref (p->pages[p->cur_page++]),
1373 render_break_init_empty (&p->y_break);
1377 add_footnote_page (struct render_pager *p, const struct table_item *item)
1379 const struct footnote **f;
1380 size_t n_footnotes = table_collect_footnotes (item, &f);
1384 struct tab_table *t = tab_create (2, n_footnotes);
1386 for (size_t i = 0; i < n_footnotes; i++)
1389 tab_text (t, 0, i, TAB_LEFT, "");
1390 tab_add_footnote (t, 0, i, f[i]);
1391 tab_text (t, 1, i, TAB_LEFT, f[i]->content);
1393 render_pager_add_table (p, &t->table);
1399 add_text_page (struct render_pager *p, const struct table_item_text *t)
1404 struct tab_table *tab = tab_create (1, 1);
1405 tab_text (tab, 0, 0, TAB_LEFT, t->content);
1406 for (size_t i = 0; i < t->n_footnotes; i++)
1407 tab_add_footnote (tab, 0, 0, t->footnotes[i]);
1408 render_pager_add_table (p, &tab->table);
1411 /* Creates and returns a new render_pager for rendering TABLE_ITEM on the
1412 device with the given PARAMS. */
1413 struct render_pager *
1414 render_pager_create (const struct render_params *params,
1415 const struct table_item *table_item)
1417 struct render_pager *p;
1419 p = xzalloc (sizeof *p);
1423 add_text_page (p, table_item_get_title (table_item));
1426 render_pager_add_table (p, table_ref (table_item_get_table (table_item)));
1429 add_text_page (p, table_item_get_caption (table_item));
1432 add_footnote_page (p, table_item);
1434 render_pager_start_page (p);
1441 render_pager_destroy (struct render_pager *p)
1447 render_break_destroy (&p->x_break);
1448 render_break_destroy (&p->y_break);
1449 for (i = 0; i < p->n_pages; i++)
1450 render_page_unref (p->pages[i]);
1456 /* Returns true if P has content remaining to render, false if rendering is
1459 render_pager_has_next (const struct render_pager *p_)
1461 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1463 while (!render_break_has_next (&p->y_break))
1465 render_break_destroy (&p->y_break);
1466 if (!render_break_has_next (&p->x_break))
1468 render_break_destroy (&p->x_break);
1469 if (p->cur_page >= p->n_pages)
1471 render_break_init_empty (&p->x_break);
1472 render_break_init_empty (&p->y_break);
1475 render_pager_start_page (p);
1478 render_break_init (&p->y_break,
1479 render_break_next (&p->x_break, p->params->size[H]), V);
1484 /* Draws a chunk of content from P to fit in a space that has vertical size
1485 SPACE and the horizontal size specified in the render_params passed to
1486 render_page_create(). Returns the amount of space actually used by the
1487 rendered chunk, which will be 0 if SPACE is too small to render anything or
1488 if no content remains (use render_pager_has_next() to distinguish these
1491 render_pager_draw_next (struct render_pager *p, int space)
1493 int ofs[TABLE_N_AXES] = { 0, 0 };
1494 size_t start_page = SIZE_MAX;
1496 while (render_pager_has_next (p))
1498 struct render_page *page;
1500 if (start_page == p->cur_page)
1502 start_page = p->cur_page;
1504 page = render_break_next (&p->y_break, space - ofs[V]);
1508 render_page_draw (page, ofs);
1509 ofs[V] += render_page_get_size (page, V);
1510 render_page_unref (page);
1515 /* Draws all of P's content. */
1517 render_pager_draw (const struct render_pager *p)
1519 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1522 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1523 Some extra content might be drawn; the device should perform clipping as
1526 render_pager_draw_region (const struct render_pager *p,
1527 int x, int y, int w, int h)
1529 int ofs[TABLE_N_AXES] = { 0, 0 };
1530 int clip[TABLE_N_AXES][2];
1535 for (i = 0; i < p->n_pages; i++)
1537 const struct render_page *page = p->pages[i];
1538 int size = render_page_get_size (page, V);
1540 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1541 clip[V][1] = MIN (y + h, ofs[V] + size) - ofs[V];
1542 if (clip[V][1] > clip[V][0])
1543 render_page_draw_region (page, ofs, clip);
1549 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1550 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1552 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1557 for (i = 0; i < p->n_pages; i++)
1559 int subsize = render_page_get_size (p->pages[i], axis);
1560 size = axis == H ? MAX (size, subsize) : size + subsize;
1567 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1572 for (i = 0; i < p->n_pages; i++)
1574 int size = render_page_get_size (p->pages[i], V);
1575 if (y + size >= height)
1576 return render_page_get_best_breakpoint (p->pages[i], height - y) + y;
1583 /* render_page_select() and helpers. */
1585 struct render_page_selection
1587 const struct render_page *page; /* Page whose slice we are selecting. */
1588 struct render_page *subpage; /* New page under construction. */
1589 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1590 enum table_axis b; /* The opposite of 'a'. */
1591 int z0; /* First cell along 'a' being selected. */
1592 int z1; /* Last cell being selected, plus 1. */
1593 int p0; /* Number of pixels to trim off left side of z0. */
1594 int p1; /* Number of pixels to trim off right side of z1-1. */
1597 static void cell_to_subpage (struct render_page_selection *,
1598 const struct table_cell *,
1599 int subcell[TABLE_N_AXES]);
1600 static const struct render_overflow *find_overflow_for_cell (
1601 struct render_page_selection *, const struct table_cell *);
1602 static struct render_overflow *insert_overflow (struct render_page_selection *,
1603 const struct table_cell *);
1605 /* Creates and returns a new render_page whose contents are a subregion of
1606 PAGE's contents. The new render_page includes cells Z0 through Z1
1607 (exclusive) along AXIS, plus any headers on AXIS.
1609 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1610 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1611 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1612 render cells that are too large to fit on a single page.)
1614 The whole of axis !AXIS is included. (The caller may follow up with another
1615 call to render_page_select() to select on !AXIS to select on that axis as
1618 The caller retains ownership of PAGE, which is not modified. */
1619 static struct render_page *
1620 render_page_select (const struct render_page *page, enum table_axis axis,
1621 int z0, int p0, int z1, int p1)
1623 struct render_page_selection s;
1624 enum table_axis a = axis;
1625 enum table_axis b = !a;
1626 struct render_page *subpage;
1627 struct render_overflow *ro;
1633 /* Optimize case where all of PAGE is selected by just incrementing the
1635 if (z0 == page->h[a][0] && p0 == 0
1636 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1638 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1643 /* Allocate subpage. */
1644 subpage = render_page_allocate (page->params,
1645 table_select_slice (
1646 table_ref (page->table),
1649 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1650 off that side of the page and there are no headers. */
1651 subpage->is_edge_cutoff[a][0] =
1652 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1653 subpage->is_edge_cutoff[a][1] =
1654 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1655 && page->is_edge_cutoff[a][1]));
1656 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1657 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1659 /* Select join crossings from PAGE into subpage. */
1660 jc = subpage->join_crossing[a];
1661 for (z = 0; z < page->h[a][0]; z++)
1662 *jc++ = page->join_crossing[a][z];
1663 for (z = z0; z <= z1; z++)
1664 *jc++ = page->join_crossing[a][z];
1665 for (z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1666 *jc++ = page->join_crossing[a][z];
1667 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1669 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1670 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1672 /* Select widths from PAGE into subpage. */
1674 dcp = subpage->cp[a];
1676 for (z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1678 if (z == 0 && subpage->is_edge_cutoff[a][0])
1681 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1683 for (z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1685 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1686 if (z == cell_ofs (z0))
1689 if (page->h[a][0] && page->h[a][1])
1690 dcp[1] += page->join_crossing[a][z / 2];
1692 if (z == cell_ofs (z1 - 1))
1695 for (z = rule_ofs_r (page, a, subpage->h[a][1]);
1696 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1698 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1701 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1703 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1705 for (z = 0; z < page->n[b] * 2 + 2; z++)
1706 subpage->cp[b][z] = page->cp[b][z];
1708 /* Add new overflows. */
1716 s.subpage = subpage;
1718 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1719 for (z = 0; z < page->n[b]; )
1721 struct table_cell cell;
1722 int d[TABLE_N_AXES];
1729 table_get_cell (page->table, d[H], d[V], &cell);
1730 overflow0 = p0 || cell.d[a][0] < z0;
1731 overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1732 if (overflow0 || overflow1)
1734 ro = insert_overflow (&s, &cell);
1738 ro->overflow[a][0] += p0 + axis_width (
1739 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1740 if (page->h[a][0] && page->h[a][1])
1741 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1747 ro->overflow[a][1] += p1 + axis_width (
1748 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1749 if (page->h[a][0] && page->h[a][1])
1750 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1754 table_cell_free (&cell);
1757 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1758 for (z = 0; z < page->n[b]; )
1760 struct table_cell cell;
1761 int d[TABLE_N_AXES];
1765 table_get_cell (page->table, d[H], d[V], &cell);
1766 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1767 && find_overflow_for_cell (&s, &cell) == NULL)
1769 ro = insert_overflow (&s, &cell);
1770 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1771 cell_ofs (cell.d[a][1]));
1774 table_cell_free (&cell);
1777 /* Copy overflows from PAGE into subpage. */
1778 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1780 struct table_cell cell;
1782 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1783 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1784 && find_overflow_for_cell (&s, &cell) == NULL)
1785 insert_overflow (&s, &cell);
1786 table_cell_free (&cell);
1792 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1793 coordinates of the top-left cell as it will appear in S->subpage.
1795 CELL must actually intersect the region of S->page that is being selected
1796 by render_page_select() or the results will not make any sense. */
1798 cell_to_subpage (struct render_page_selection *s,
1799 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1801 enum table_axis a = s->a;
1802 enum table_axis b = s->b;
1803 int ha0 = s->subpage->h[a][0];
1805 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1806 subcell[b] = cell->d[b][0];
1809 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1810 that cell in S->subpage, if there is one, and a null pointer otherwise.
1812 CELL must actually intersect the region of S->page that is being selected
1813 by render_page_select() or the results will not make any sense. */
1814 static const struct render_overflow *
1815 find_overflow_for_cell (struct render_page_selection *s,
1816 const struct table_cell *cell)
1820 cell_to_subpage (s, cell, subcell);
1821 return find_overflow (s->subpage, subcell[H], subcell[V]);
1824 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1825 cell in S->subpage (which must not already exist). Initializes the new
1826 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1829 CELL must actually intersect the region of S->page that is being selected
1830 by render_page_select() or the results will not make any sense. */
1831 static struct render_overflow *
1832 insert_overflow (struct render_page_selection *s,
1833 const struct table_cell *cell)
1835 const struct render_overflow *old;
1836 struct render_overflow *of;
1838 of = xzalloc (sizeof *of);
1839 cell_to_subpage (s, cell, of->d);
1840 hmap_insert (&s->subpage->overflows, &of->node,
1841 hash_cell (of->d[H], of->d[V]));
1843 old = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1845 memcpy (of->overflow, old->overflow, sizeof of->overflow);