1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "libpspp/assertion.h"
25 #include "libpspp/hash-functions.h"
26 #include "libpspp/hmap.h"
27 #include "output/render.h"
28 #include "output/table.h"
30 #include "gl/minmax.h"
31 #include "gl/xalloc.h"
33 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
37 /* A layout for rendering a specific table on a specific device.
39 May represent the layout of an entire table presented to
40 render_page_create(), or a rectangular subregion of a table broken out using
41 render_break_next() to allow a table to be broken across multiple pages. */
44 const struct render_params *params; /* Parameters of the target device. */
45 struct table *table; /* Table rendered. */
48 /* Local copies of table->n and table->h, for convenience. */
50 int h[TABLE_N_AXES][2];
52 /* cp[H] represents x positions within the table.
54 cp[H][1] = the width of the leftmost vertical rule.
55 cp[H][2] = cp[H][1] + the width of the leftmost column.
56 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
58 cp[H][2 * nc] = x position of the rightmost vertical rule.
59 cp[H][2 * nc + 1] = total table width including all rules.
61 Similarly, cp[V] represents y positions within the table.
63 cp[V][1] = the height of the topmost horizontal rule.
64 cp[V][2] = cp[V][1] + the height of the topmost row.
65 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
67 cp[V][2 * nr] = y position of the bottommost horizontal rule.
68 cp[V][2 * nr + 1] = total table height including all rules.
70 Rules and columns can have width or height 0, in which case consecutive
71 values in this array are equal. */
72 int *cp[TABLE_N_AXES];
74 /* render_break_next() can break a table such that some cells are not fully
75 contained within a render_page. This will happen if a cell is too wide
76 or two tall to fit on a single page, or if a cell spans multiple rows or
77 columns and the page only includes some of those rows or columns.
79 This hash table contains "struct render_overflow"s that represents each
80 such cell that doesn't completely fit on this page.
82 Each overflow cell borders at least one header edge of the table and may
83 border more. (A single table cell that is so large that it fills the
84 entire page can overflow on all four sides!) */
85 struct hmap overflows;
87 /* If a single column (or row) is too wide (or tall) to fit on a page
88 reasonably, then render_break_next() will split a single row or column
89 across multiple render_pages. This member indicates when this has
92 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
93 of the leftmost column in this page, and false otherwise.
95 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
96 of the rightmost column in this page, and false otherwise.
98 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
99 and bottom of the table.
101 The effect of is_edge_cutoff is to prevent rules along the edge in
102 question from being rendered.
104 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
105 contain a node for each cell along that edge. */
106 bool is_edge_cutoff[TABLE_N_AXES][2];
108 /* If part of a joined cell would be cut off by breaking a table along
109 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
110 join_crossing[axis][z] is the thickness of the rule that would be cut
113 This is used to know to allocate extra space for breaking at such a
114 position, so that part of the cell's content is not lost.
116 This affects breaking a table only when headers are present. When
117 headers are not present, the rule's thickness is used for cell content,
118 so no part of the cell's content is lost (and in fact it is duplicated
119 across both pages). */
120 int *join_crossing[TABLE_N_AXES];
123 /* Returns the offset in struct render_page's cp[axis] array of the rule with
124 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
125 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
126 next rule to the right (or below); and so on. */
128 rule_ofs (int rule_idx)
133 /* Returns the offset in struct render_page's cp[axis] array of the rule with
134 index RULE_IDX_R, which counts from the right side (or bottom) of the page
135 left (or up), according to whether AXIS is H or V, respectively. That is,
136 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
137 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
138 (or above); and so on. */
140 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
142 return (page->n[axis] - rule_idx_r) * 2;
145 /* Returns the offset in struct render_page's cp[axis] array of the cell with
146 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
147 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
148 next cell to the right (or below); and so on. */
150 cell_ofs (int cell_idx)
152 return cell_idx * 2 + 1;
155 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
157 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
159 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
162 /* Returns the width of the headers in PAGE along AXIS. */
164 headers_width (const struct render_page *page, int axis)
166 int h0 = page->h[axis][0];
167 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
168 int n = page->n[axis];
169 int h1 = page->h[axis][1];
170 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
174 /* Returns the width of cell X along AXIS in PAGE. */
176 cell_width (const struct render_page *page, int axis, int x)
178 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
181 /* Returns the width of rule X along AXIS in PAGE. */
183 rule_width (const struct render_page *page, int axis, int x)
185 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
188 /* Returns the width of rule X along AXIS in PAGE. */
190 rule_width_r (const struct render_page *page, int axis, int x)
192 int ofs = rule_ofs_r (page, axis, x);
193 return axis_width (page, axis, ofs, ofs + 1);
196 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
198 joined_width (const struct render_page *page, int axis, int x0, int x1)
200 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
203 /* Returns the width of the widest cell, excluding headers, along AXIS in
206 max_cell_width (const struct render_page *page, int axis)
208 int n = page->n[axis];
209 int x0 = page->h[axis][0];
210 int x1 = n - page->h[axis][1];
214 for (x = x0; x < x1; x++)
216 int w = cell_width (page, axis, x);
223 /* A cell that doesn't completely fit on the render_page. */
224 struct render_overflow
226 struct hmap_node node; /* In render_page's 'overflows' hmap. */
228 /* Occupied region of page.
230 d[H][0] is the leftmost column.
231 d[H][1] is the rightmost column, plus 1.
232 d[V][0] is the top row.
233 d[V][1] is the bottom row, plus 1.
235 The cell in its original table might occupy a larger region. This
236 member reflects the size of the cell in the current render_page, after
237 trimming off any rows or columns due to page-breaking. */
240 /* The space that has been trimmed off the cell:
242 overflow[H][0]: space trimmed off its left side.
243 overflow[H][1]: space trimmed off its right side.
244 overflow[V][0]: space trimmed off its top.
245 overflow[V][1]: space trimmed off its bottom.
247 During rendering, this information is used to position the rendered
248 portion of the cell within the available space.
250 When a cell is rendered, sometimes it is permitted to spill over into
251 space that is ordinarily reserved for rules. Either way, this space is
252 still included in overflow values.
254 Suppose, for example, that a cell that joins 2 columns has a width of 60
255 pixels and content "abcdef", that the 2 columns that it joins have
256 widths of 20 and 30 pixels, respectively, and that therefore the rule
257 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
258 It might render like this, if each character is 10x10, and showing a few
259 extra table cells for context:
267 If this render_page is broken at the rule that separates "gh" from
268 "ijk", then the page that contains the left side of the "abcdef" cell
269 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
270 and the page that contains the right side of the cell will have
271 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
289 int overflow[TABLE_N_AXES][2];
292 /* Returns a hash value for (X,Y). */
294 hash_overflow (int x, int y)
296 return hash_int (x + (y << 16), 0);
299 /* Searches PAGE's set of render_overflow for one whose top-left cell is
300 (X,Y). Returns it, if there is one, otherwise a null pointer. */
301 static const struct render_overflow *
302 find_overflow (const struct render_page *page, int x, int y)
304 if (!hmap_is_empty (&page->overflows))
306 const struct render_overflow *of;
308 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
309 hash_overflow (x, y), &page->overflows)
310 if (x == of->d[H] && y == of->d[V])
317 /* Row or column dimensions. Used to figure the size of a table in
318 render_page_create() and discarded after that. */
321 /* Width without considering rows (or columns) that span more than one (or
325 /* Width taking spanned rows (or columns) into consideration. */
329 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
330 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
333 distribute_spanned_width (int width,
334 struct render_row *rows, const int *rules, int n)
340 /* Sum up the unspanned widths of the N rows for use as weights. */
342 for (x = 0; x < n; x++)
343 total_unspanned += rows[x].unspanned;
344 for (x = 0; x < n - 1; x++)
345 total_unspanned += rules[x + 1];
346 if (total_unspanned >= width)
349 /* The algorithm used here is based on the following description from HTML 4:
351 For cells that span multiple columns, a simple approach consists of
352 apportioning the min/max widths evenly to each of the constituent
353 columns. A slightly more complex approach is to use the min/max
354 widths of unspanned cells to weight how spanned widths are
355 apportioned. Experiments suggest that a blend of the two approaches
356 gives good results for a wide range of tables.
358 We blend the two approaches half-and-half, except that we cannot use the
359 unspanned weights when 'total_unspanned' is 0 (because that would cause a
362 This implementation uses floating-point types and operators, but all the
363 values involved are integers. For integers smaller than 53 bits, this
364 should not lose any precision, and it should degrade gracefully for larger
367 The calculation we want to do is this:
370 w1 = width * (column's unspanned width) / (total unspanned width)
371 (column's width) = (w0 + w1) / 2
373 We implement it as a precise calculation in integers by multiplying w0 and
374 w1 by the common denominator of all three calculations (d), dividing that
375 out in the column width calculation, and then keeping the remainder for
378 (We actually compute the unspanned width of a column as twice the
379 unspanned width, plus the width of the rule on the left, plus the width of
380 the rule on the right. That way each rule contributes to both the cell on
381 its left and on its right.)
384 d1 = 2.0 * (total_unspanned > 0 ? total_unspanned : 1.0);
386 if (total_unspanned > 0)
389 for (x = 0; x < n; x++)
392 if (total_unspanned > 0)
394 double unspanned = rows[x].unspanned * 2.0;
396 unspanned += rules[x + 1];
398 unspanned += rules[x];
399 w += width * unspanned * d0;
402 rows[x].width = MAX (rows[x].width, w / d);
403 w -= rows[x].width * d;
407 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
410 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
411 const struct render_row *rows, const int *rules)
413 int n = page->n[axis];
419 for (z = 0; z < n; z++)
421 cp[1] = cp[0] + rules[z];
422 cp[2] = cp[1] + rows[z].width;
425 cp[1] = cp[0] + rules[n];
428 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
430 calculate_table_width (int n, const struct render_row *rows, int *rules)
436 for (x = 0; x < n; x++)
437 width += rows[x].width;
438 for (x = 0; x <= n; x++)
444 /* Rendering utility functions. */
446 /* Returns the line style to use for drawing a rule of the given TYPE. */
447 static enum render_line_style
448 rule_to_render_type (unsigned char type)
454 return RENDER_LINE_NONE;
456 return RENDER_LINE_SINGLE;
458 return RENDER_LINE_DOUBLE;
464 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
465 rendered with PARAMS. */
467 measure_rule (const struct render_params *params, const struct table *table,
468 enum table_axis a, int z)
470 enum table_axis b = !a;
475 /* Determine all types of rules that are present, as a bitmap in 'rules'
476 where rule type 't' is present if bit 2**t is set. */
479 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
480 rules |= 1u << table_get_rule (table, a, d[H], d[V]);
482 /* Calculate maximum width of the rules that are present. */
484 if (rules & (1u << TAL_1)
485 || (z > 0 && z < table->n[a] && rules & (1u << TAL_GAP)))
486 width = params->line_widths[a][RENDER_LINE_SINGLE];
487 if (rules & (1u << TAL_2))
488 width = MAX (width, params->line_widths[a][RENDER_LINE_DOUBLE]);
492 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
493 space for all of the members of the new page, but the caller must initialize
494 the 'cp' member itself. */
495 static struct render_page *
496 render_page_allocate (const struct render_params *params,
499 struct render_page *page;
502 page = xmalloc (sizeof *page);
503 page->params = params;
506 page->n[H] = table->n[H];
507 page->n[V] = table->n[V];
508 page->h[H][0] = table->h[H][0];
509 page->h[H][1] = table->h[H][1];
510 page->h[V][0] = table->h[V][0];
511 page->h[V][1] = table->h[V][1];
513 for (i = 0; i < TABLE_N_AXES; i++)
515 page->cp[i] = xmalloc ((2 * page->n[i] + 2) * sizeof *page->cp[i]);
516 page->join_crossing[i] = xzalloc ((page->n[i] + 1) * sizeof *page->join_crossing[i]);
519 hmap_init (&page->overflows);
520 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
525 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
526 cp[H] in the new page from ROWS and RULES. The caller must still initialize
528 static struct render_page *
529 create_page_with_exact_widths (const struct render_params *params,
531 const struct render_row *rows, int *rules)
533 struct render_page *page = render_page_allocate (params, table);
534 accumulate_row_widths (page, H, rows, rules);
538 /* Allocates and returns a new render_page for PARAMS and TABLE.
540 Initializes cp[H] in the new page by setting the width of each row 'i' to
541 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
542 ROW_MAX[i].width. Sets the width of rules to those in RULES.
544 W_MIN is the sum of ROWS_MIN[].width.
546 W_MAX is the sum of ROWS_MAX[].width.
548 The caller must still initialize cp[V]. */
549 static struct render_page *
550 create_page_with_interpolated_widths (const struct render_params *params,
552 const struct render_row *rows_min,
553 const struct render_row *rows_max,
554 int w_min, int w_max, const int *rules)
556 /* This implementation uses floating-point types and operators, but all the
557 values involved are integers. For integers smaller than 53 bits, this
558 should not lose any precision, and it should degrade gracefully for larger
560 const int n = table->n[H];
561 const double avail = params->size[H] - w_min;
562 const double wanted = w_max - w_min;
563 struct render_page *page;
570 page = render_page_allocate (params, table);
574 w = (int) wanted / 2;
575 for (x = 0; x < n; x++)
579 w += avail * (rows_max[x].width - rows_min[x].width);
583 cph[1] = cph[0] + rules[x];
584 cph[2] = cph[1] + rows_min[x].width + extra;
587 cph[1] = cph[0] + rules[n];
589 assert (page->cp[H][n * 2 + 1] == params->size[H]);
595 set_join_crossings (struct render_page *page, enum table_axis axis,
596 const struct table_cell *cell, int *rules)
600 for (z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
601 page->join_crossing[axis][z] = rules[z];
604 /* Creates and returns a new render_page for rendering TABLE on a device
607 The new render_page will be suitable for rendering on a device whose page
608 size is PARAMS->size, but the caller is responsible for actually breaking it
609 up to fit on such a device, using the render_break abstraction. */
611 render_page_create (const struct render_params *params,
612 const struct table *table_)
614 struct render_page *page;
617 struct render_row *columns[2];
618 struct render_row *rows;
620 int *rules[TABLE_N_AXES];
624 enum table_axis axis;
626 table = table_ref (table_);
627 nc = table_nc (table);
628 nr = table_nr (table);
630 /* Figure out rule widths. */
631 for (axis = 0; axis < TABLE_N_AXES; axis++)
633 int n = table->n[axis] + 1;
636 rules[axis] = xnmalloc (n, sizeof *rules);
637 for (z = 0; z < n; z++)
638 rules[axis][z] = measure_rule (params, table, axis, z);
641 /* Calculate minimum and maximum widths of cells that do not
642 span multiple columns. */
643 for (i = 0; i < 2; i++)
644 columns[i] = xzalloc (nc * sizeof *columns[i]);
645 for (y = 0; y < nr; y++)
646 for (x = 0; x < nc; )
648 struct table_cell cell;
650 table_get_cell (table, x, y, &cell);
651 if (y == cell.d[V][0] && table_cell_colspan (&cell) == 1)
656 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
657 for (i = 0; i < 2; i++)
658 if (columns[i][x].unspanned < w[i])
659 columns[i][x].unspanned = w[i];
662 table_cell_free (&cell);
665 /* Distribute widths of spanned columns. */
666 for (i = 0; i < 2; i++)
667 for (x = 0; x < nc; x++)
668 columns[i][x].width = columns[i][x].unspanned;
669 for (y = 0; y < nr; y++)
670 for (x = 0; x < nc; )
672 struct table_cell cell;
674 table_get_cell (table, x, y, &cell);
675 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
679 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
680 for (i = 0; i < 2; i++)
681 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
682 rules[H], table_cell_colspan (&cell));
685 table_cell_free (&cell);
688 /* Decide final column widths. */
689 for (i = 0; i < 2; i++)
690 table_widths[i] = calculate_table_width (table_nc (table),
691 columns[i], rules[H]);
692 if (table_widths[MAX] <= params->size[H])
694 /* Fits even with maximum widths. Use them. */
695 page = create_page_with_exact_widths (params, table, columns[MAX],
698 else if (table_widths[MIN] <= params->size[H])
700 /* Fits with minimum widths, so distribute the leftover space. */
701 page = create_page_with_interpolated_widths (
702 params, table, columns[MIN], columns[MAX],
703 table_widths[MIN], table_widths[MAX], rules[H]);
707 /* Doesn't fit even with minimum widths. Assign minimums for now, and
708 later we can break it horizontally into multiple pages. */
709 page = create_page_with_exact_widths (params, table, columns[MIN],
713 /* Calculate heights of cells that do not span multiple rows. */
714 rows = xzalloc (nr * sizeof *rows);
715 for (y = 0; y < nr; y++)
717 for (x = 0; x < nc; )
719 struct render_row *r = &rows[y];
720 struct table_cell cell;
722 table_get_cell (table, x, y, &cell);
723 if (y == cell.d[V][0])
725 if (table_cell_rowspan (&cell) == 1)
727 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
728 int h = params->measure_cell_height (params->aux, &cell, w);
729 if (h > r->unspanned)
730 r->unspanned = r->width = h;
733 set_join_crossings (page, V, &cell, rules[V]);
735 if (table_cell_colspan (&cell) > 1)
736 set_join_crossings (page, H, &cell, rules[H]);
739 table_cell_free (&cell);
742 for (i = 0; i < 2; i++)
745 /* Distribute heights of spanned rows. */
746 for (y = 0; y < nr; y++)
747 for (x = 0; x < nc; )
749 struct table_cell cell;
751 table_get_cell (table, x, y, &cell);
752 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
754 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
755 int h = params->measure_cell_height (params->aux, &cell, w);
756 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
757 table_cell_rowspan (&cell));
760 table_cell_free (&cell);
763 /* Decide final row heights. */
764 accumulate_row_widths (page, V, rows, rules[V]);
767 /* Measure headers. If they are "too big", get rid of them. */
768 for (axis = 0; axis < TABLE_N_AXES; axis++)
770 int hw = headers_width (page, axis);
771 if (hw * 2 >= page->params->size[axis]
772 || hw + max_cell_width (page, axis) > page->params->size[axis])
774 page->table = table_unshare (page->table);
775 page->table->h[axis][0] = page->table->h[axis][1] = 0;
776 page->h[axis][0] = page->h[axis][1] = 0;
786 /* Increases PAGE's reference count. */
788 render_page_ref (const struct render_page *page_)
790 struct render_page *page = CONST_CAST (struct render_page *, page_);
795 /* Decreases PAGE's reference count and destroys PAGE if this causes the
796 reference count to fall to zero. */
798 render_page_unref (struct render_page *page)
800 if (page != NULL && --page->ref_cnt == 0)
803 struct render_overflow *overflow, *next;
805 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
808 hmap_destroy (&page->overflows);
810 table_unref (page->table);
812 for (i = 0; i < TABLE_N_AXES; ++i)
814 free (page->join_crossing[i]);
822 /* Returns the size of PAGE along AXIS. (This might be larger than the page
823 size specified in the parameters passed to render_page_create(). Use a
824 render_break to break up a render_page into page-sized chunks.) */
826 render_page_get_size (const struct render_page *page, enum table_axis axis)
828 return page->cp[axis][page->n[axis] * 2 + 1];
832 render_page_get_best_breakpoint (const struct render_page *page, int height)
836 /* If there's no room for at least the top row and the rules above and below
837 it, don't include any of the table. */
838 if (page->cp[V][3] > height)
841 /* Otherwise include as many rows and rules as we can. */
842 for (y = 5; y <= 2 * page->n[V] + 1; y += 2)
843 if (page->cp[V][y] > height)
844 return page->cp[V][y - 2];
848 /* Drawing render_pages. */
850 static inline enum render_line_style
851 get_rule (const struct render_page *page, enum table_axis axis,
852 const int d[TABLE_N_AXES])
854 return rule_to_render_type (table_get_rule (page->table,
855 axis, d[H] / 2, d[V] / 2));
865 render_rule (const struct render_page *page, const int d[TABLE_N_AXES])
867 enum render_line_style styles[TABLE_N_AXES][2];
870 for (a = 0; a < TABLE_N_AXES; a++)
872 enum table_axis b = !a;
874 styles[a][0] = styles[a][1] = RENDER_LINE_NONE;
877 || (page->is_edge_cutoff[a][0] && d[a] == 0)
878 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
889 styles[a][0] = get_rule (page, a, e);
892 if (d[b] / 2 < page->table->n[b])
893 styles[a][1] = get_rule (page, a, d);
896 styles[a][0] = styles[a][1] = get_rule (page, a, d);
899 if (styles[H][0] != RENDER_LINE_NONE || styles[H][1] != RENDER_LINE_NONE
900 || styles[V][0] != RENDER_LINE_NONE || styles[V][1] != RENDER_LINE_NONE)
902 int bb[TABLE_N_AXES][2];
904 bb[H][0] = page->cp[H][d[H]];
905 bb[H][1] = page->cp[H][d[H] + 1];
906 bb[V][0] = page->cp[V][d[V]];
907 bb[V][1] = page->cp[V][d[V] + 1];
908 page->params->draw_line (page->params->aux, bb, styles);
913 render_cell (const struct render_page *page, const struct table_cell *cell)
915 const struct render_overflow *of;
916 int bb[TABLE_N_AXES][2];
917 int clip[TABLE_N_AXES][2];
919 bb[H][0] = clip[H][0] = page->cp[H][cell->d[H][0] * 2 + 1];
920 bb[H][1] = clip[H][1] = page->cp[H][cell->d[H][1] * 2];
921 bb[V][0] = clip[V][0] = page->cp[V][cell->d[V][0] * 2 + 1];
922 bb[V][1] = clip[V][1] = page->cp[V][cell->d[V][1] * 2];
924 of = find_overflow (page, cell->d[H][0], cell->d[V][0]);
927 enum table_axis axis;
929 for (axis = 0; axis < TABLE_N_AXES; axis++)
931 if (of->overflow[axis][0])
933 bb[axis][0] -= of->overflow[axis][0];
934 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
935 clip[axis][0] = page->cp[axis][cell->d[axis][0] * 2];
937 if (of->overflow[axis][1])
939 bb[axis][1] += of->overflow[axis][1];
940 if (cell->d[axis][1] == page->n[axis] && !page->is_edge_cutoff[axis][1])
941 clip[axis][1] = page->cp[axis][cell->d[axis][1] * 2 + 1];
946 page->params->draw_cell (page->params->aux, cell, bb, clip);
949 /* Draws the cells of PAGE indicated in BB. */
951 render_page_draw_cells (const struct render_page *page,
952 int bb[TABLE_N_AXES][2])
956 for (y = bb[V][0]; y < bb[V][1]; y++)
957 for (x = bb[H][0]; x < bb[H][1]; )
958 if (is_rule (x) || is_rule (y))
963 render_rule (page, d);
968 struct table_cell cell;
970 table_get_cell (page->table, x / 2, y / 2, &cell);
971 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
972 render_cell (page, &cell);
973 x = rule_ofs (cell.d[H][1]);
974 table_cell_free (&cell);
978 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
979 render_params provided to render_page_create(). */
981 render_page_draw (const struct render_page *page)
983 int bb[TABLE_N_AXES][2];
986 bb[H][1] = page->n[H] * 2 + 1;
988 bb[V][1] = page->n[V] * 2 + 1;
990 render_page_draw_cells (page, bb);
993 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
995 get_clip_min_extent (int x0, const int cp[], int n)
1004 int middle = low + (high - low) / 2;
1006 if (cp[middle] <= x0)
1018 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1020 get_clip_max_extent (int x1, const int cp[], int n)
1022 int low, high, best;
1029 int middle = low + (high - low) / 2;
1031 if (cp[middle] >= x1)
1032 best = high = middle;
1037 while (best > 0 && cp[best - 1] == cp[best])
1043 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1044 'draw_line' and 'draw_cell' functions from the render_params provided to
1045 render_page_create(). */
1047 render_page_draw_region (const struct render_page *page,
1048 int x, int y, int w, int h)
1050 int bb[TABLE_N_AXES][2];
1052 bb[H][0] = get_clip_min_extent (x, page->cp[H], page->n[H] * 2 + 1);
1053 bb[H][1] = get_clip_max_extent (x + w, page->cp[H], page->n[H] * 2 + 1);
1054 bb[V][0] = get_clip_min_extent (y, page->cp[V], page->n[V] * 2 + 1);
1055 bb[V][1] = get_clip_max_extent (y + h, page->cp[V], page->n[V] * 2 + 1);
1057 render_page_draw_cells (page, bb);
1060 /* Breaking up tables to fit on a page. */
1062 static int needed_size (const struct render_break *, int cell);
1063 static bool cell_is_breakable (const struct render_break *, int cell);
1064 static struct render_page *render_page_select (const struct render_page *,
1069 /* Initializes render_break B for breaking PAGE along AXIS.
1071 Ownership of PAGE is transferred to B. The caller must use
1072 render_page_ref() if it needs to keep a copy of PAGE. */
1074 render_break_init (struct render_break *b, struct render_page *page,
1075 enum table_axis axis)
1079 b->z = page->h[axis][0];
1081 b->hw = headers_width (page, axis);
1084 /* Initializes B as a render_break structure for which
1085 render_break_has_next() always returns false. */
1087 render_break_init_empty (struct render_break *b)
1090 b->axis = TABLE_HORZ;
1096 /* Frees B and unrefs the render_page that it owns. */
1098 render_break_destroy (struct render_break *b)
1102 render_page_unref (b->page);
1107 /* Returns true if B still has cells that are yet to be returned,
1108 false if all of B's page has been processed. */
1110 render_break_has_next (const struct render_break *b)
1112 const struct render_page *page = b->page;
1113 enum table_axis axis = b->axis;
1115 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1118 /* Returns the minimum SIZE argument that, if passed to render_break_next(),
1119 will avoid a null return value (if cells are still left). */
1121 render_break_next_size (const struct render_break *b)
1123 const struct render_page *page = b->page;
1124 enum table_axis axis = b->axis;
1126 return (!render_break_has_next (b) ? 0
1127 : !cell_is_breakable (b, b->z) ? needed_size (b, b->z + 1)
1128 : b->hw + page->params->font_size[axis]);
1131 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1132 Returns a null pointer if B has already been completely broken up, or if
1133 SIZE is too small to reasonably render any cells. The latter will never
1134 happen if SIZE is at least as large as the page size passed to
1135 render_page_create() along B's axis. */
1136 struct render_page *
1137 render_break_next (struct render_break *b, int size)
1139 const struct render_page *page = b->page;
1140 enum table_axis axis = b->axis;
1141 struct render_page *subpage;
1144 if (!render_break_has_next (b))
1148 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1150 int needed = needed_size (b, z + 1);
1153 if (cell_is_breakable (b, z))
1155 /* If there is no right header and we render a partial cell on
1156 the right side of the body, then we omit the rightmost rule of
1157 the body. Otherwise the rendering is deceptive because it
1158 looks like the whole cell is present instead of a partial
1161 This is similar to code for the left side in needed_size(). */
1162 int rule_allowance = (page->h[axis][1]
1164 : rule_width (page, axis, z));
1166 /* The amount that, if we added cell 'z', the rendering would
1167 overfill the allocated 'size'. */
1168 int overhang = needed - size - rule_allowance;
1170 /* The width of cell 'z'. */
1171 int cell_size = cell_width (page, axis, z);
1173 /* The amount trimmed off the left side of 'z',
1174 and the amount left to render. */
1175 int cell_ofs = z == b->z ? b->pixel : 0;
1176 int cell_left = cell_size - cell_ofs;
1178 /* A small but visible width. */
1179 int em = page->params->font_size[axis];
1181 /* If some of the cell remains to render,
1182 and there would still be some of the cell left afterward,
1183 then partially render that much of the cell. */
1184 pixel = (cell_left && cell_left > overhang
1185 ? cell_left - overhang + cell_ofs
1188 /* If there would be only a tiny amount of the cell left after
1189 rendering it partially, reduce the amount rendered slightly
1190 to make the output look a little better. */
1191 if (pixel + em > cell_size)
1192 pixel = MAX (pixel - em, 0);
1194 /* If we're breaking vertically, then consider whether the cells
1195 being broken have a better internal breakpoint than the exact
1196 number of pixels available, which might look bad e.g. because
1197 it breaks in the middle of a line of text. */
1198 if (axis == TABLE_VERT && page->params->adjust_break)
1202 for (x = 0; x < page->n[H]; )
1204 struct table_cell cell;
1208 table_get_cell (page->table, x, z, &cell);
1209 w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1210 better_pixel = page->params->adjust_break (
1211 page->params->aux, &cell, w, pixel);
1213 table_cell_free (&cell);
1215 if (better_pixel < pixel)
1217 if (better_pixel > (z == b->z ? b->pixel : 0))
1219 pixel = better_pixel;
1222 else if (better_pixel == 0 && z != b->z)
1235 if (z == b->z && !pixel)
1238 subpage = render_page_select (page, axis, b->z, b->pixel,
1240 pixel ? cell_width (page, axis, z) - pixel
1247 /* Returns the width that would be required along B's axis to render a page
1248 from B's current position up to but not including CELL. */
1250 needed_size (const struct render_break *b, int cell)
1252 const struct render_page *page = b->page;
1253 enum table_axis axis = b->axis;
1256 /* Width of left header not including its rightmost rule. */
1257 size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1259 /* If we have a pixel offset and there is no left header, then we omit the
1260 leftmost rule of the body. Otherwise the rendering is deceptive because
1261 it looks like the whole cell is present instead of a partial cell.
1263 Otherwise (if there are headers) we will be merging two rules: the
1264 rightmost rule in the header and the leftmost rule in the body. We assume
1265 that the width of a merged rule is the larger of the widths of either rule
1267 if (b->pixel == 0 || page->h[axis][0])
1268 size += MAX (rule_width (page, axis, page->h[axis][0]),
1269 rule_width (page, axis, b->z));
1271 /* Width of body, minus any pixel offset in the leftmost cell. */
1272 size += joined_width (page, axis, b->z, cell) - b->pixel;
1274 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1275 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1276 rule_width (page, axis, cell));
1278 /* Width of right header not including its leftmost rule. */
1279 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1280 rule_ofs_r (page, axis, 0));
1282 /* Join crossing. */
1283 if (page->h[axis][0] && page->h[axis][1])
1284 size += page->join_crossing[axis][b->z];
1289 /* Returns true if CELL along B's axis may be broken across a page boundary.
1291 This is just a heuristic. Breaking cells across page boundaries can save
1292 space, but it looks ugly. */
1294 cell_is_breakable (const struct render_break *b, int cell)
1296 const struct render_page *page = b->page;
1297 enum table_axis axis = b->axis;
1299 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1302 /* render_page_select() and helpers. */
1304 struct render_page_selection
1306 const struct render_page *page; /* Page whose slice we are selecting. */
1307 struct render_page *subpage; /* New page under construction. */
1308 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1309 enum table_axis b; /* The opposite of 'a'. */
1310 int z0; /* First cell along 'a' being selected. */
1311 int z1; /* Last cell being selected, plus 1. */
1312 int p0; /* Number of pixels to trim off left side of z0. */
1313 int p1; /* Number of pixels to trim off right side of z1-1. */
1316 static void cell_to_subpage (struct render_page_selection *,
1317 const struct table_cell *,
1318 int subcell[TABLE_N_AXES]);
1319 static const struct render_overflow *find_overflow_for_cell (
1320 struct render_page_selection *, const struct table_cell *);
1321 static struct render_overflow *insert_overflow (struct render_page_selection *,
1322 const struct table_cell *);
1324 /* Creates and returns a new render_page whose contents are a subregion of
1325 PAGE's contents. The new render_page includes cells Z0 through Z1 along
1326 AXIS, plus any headers on AXIS.
1328 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1329 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1330 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1331 render cells that are too large to fit on a single page.)
1333 The whole of axis !AXIS is included. (The caller may follow up with another
1334 call to render_page_select() to select on !AXIS to select on that axis as
1337 The caller retains ownership of PAGE, which is not modified. */
1338 static struct render_page *
1339 render_page_select (const struct render_page *page, enum table_axis axis,
1340 int z0, int p0, int z1, int p1)
1342 struct render_page_selection s;
1343 enum table_axis a = axis;
1344 enum table_axis b = !a;
1345 struct render_page *subpage;
1346 struct render_overflow *ro;
1352 /* Optimize case where all of PAGE is selected by just incrementing the
1354 if (z0 == page->h[a][0] && p0 == 0
1355 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1357 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1362 /* Allocate subpage. */
1363 subpage = render_page_allocate (page->params,
1364 table_select_slice (
1365 table_ref (page->table),
1368 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1369 off that side of the page and there are no headers. */
1370 subpage->is_edge_cutoff[a][0] =
1371 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1372 subpage->is_edge_cutoff[a][1] =
1373 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1374 && page->is_edge_cutoff[a][1]));
1375 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1376 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1378 /* Select join crossings from PAGE into subpage. */
1379 jc = subpage->join_crossing[a];
1380 for (z = 0; z < page->h[a][0]; z++)
1381 *jc++ = page->join_crossing[a][z];
1382 for (z = z0; z <= z1; z++)
1383 *jc++ = page->join_crossing[a][z];
1384 for (z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1385 *jc++ = page->join_crossing[a][z];
1386 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1388 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1389 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1391 /* Select widths from PAGE into subpage. */
1393 dcp = subpage->cp[a];
1395 for (z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1397 if (z == 0 && subpage->is_edge_cutoff[a][0])
1400 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1402 for (z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1404 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1405 if (z == cell_ofs (z0))
1408 if (page->h[a][0] && page->h[a][1])
1409 dcp[1] += page->join_crossing[a][z / 2];
1411 if (z == cell_ofs (z1 - 1))
1414 for (z = rule_ofs_r (page, a, subpage->h[a][1]);
1415 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1417 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1420 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1422 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1424 for (z = 0; z < page->n[b] * 2 + 2; z++)
1425 subpage->cp[b][z] = page->cp[b][z];
1427 /* Add new overflows. */
1435 s.subpage = subpage;
1437 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1438 for (z = 0; z < page->n[b]; )
1440 struct table_cell cell;
1441 int d[TABLE_N_AXES];
1448 table_get_cell (page->table, d[H], d[V], &cell);
1449 overflow0 = p0 || cell.d[a][0] < z0;
1450 overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1451 if (overflow0 || overflow1)
1453 ro = insert_overflow (&s, &cell);
1457 ro->overflow[a][0] += p0 + axis_width (
1458 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1459 if (page->h[a][0] && page->h[a][1])
1460 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1466 ro->overflow[a][1] += p1 + axis_width (
1467 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1468 if (page->h[a][0] && page->h[a][1])
1469 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1473 table_cell_free (&cell);
1476 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1477 for (z = 0; z < page->n[b]; )
1479 struct table_cell cell;
1480 int d[TABLE_N_AXES];
1484 table_get_cell (page->table, d[H], d[V], &cell);
1485 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1486 && find_overflow_for_cell (&s, &cell) == NULL)
1488 ro = insert_overflow (&s, &cell);
1489 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1490 cell_ofs (cell.d[a][1]));
1493 table_cell_free (&cell);
1496 /* Copy overflows from PAGE into subpage. */
1497 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1499 struct table_cell cell;
1501 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1502 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1503 && find_overflow_for_cell (&s, &cell) == NULL)
1504 insert_overflow (&s, &cell);
1505 table_cell_free (&cell);
1511 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1512 coordinates of the top-left cell as it will appear in S->subpage.
1514 CELL must actually intersect the region of S->page that is being selected
1515 by render_page_select() or the results will not make any sense. */
1517 cell_to_subpage (struct render_page_selection *s,
1518 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1520 enum table_axis a = s->a;
1521 enum table_axis b = s->b;
1522 int ha0 = s->subpage->h[a][0];
1524 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1525 subcell[b] = cell->d[b][0];
1528 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1529 that cell in S->subpage, if there is one, and a null pointer otherwise.
1531 CELL must actually intersect the region of S->page that is being selected
1532 by render_page_select() or the results will not make any sense. */
1533 static const struct render_overflow *
1534 find_overflow_for_cell (struct render_page_selection *s,
1535 const struct table_cell *cell)
1539 cell_to_subpage (s, cell, subcell);
1540 return find_overflow (s->subpage, subcell[H], subcell[V]);
1543 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1544 cell in S->subpage (which must not already exist). Initializes the new
1545 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1548 CELL must actually intersect the region of S->page that is being selected
1549 by render_page_select() or the results will not make any sense. */
1550 static struct render_overflow *
1551 insert_overflow (struct render_page_selection *s,
1552 const struct table_cell *cell)
1554 const struct render_overflow *old;
1555 struct render_overflow *of;
1557 of = xzalloc (sizeof *of);
1558 cell_to_subpage (s, cell, of->d);
1559 hmap_insert (&s->subpage->overflows, &of->node,
1560 hash_overflow (of->d[H], of->d[V]));
1562 old = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1564 memcpy (of->overflow, old->overflow, sizeof of->overflow);