1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "libpspp/assertion.h"
25 #include "libpspp/hash-functions.h"
26 #include "libpspp/hmap.h"
27 #include "output/render.h"
28 #include "output/table-item.h"
29 #include "output/table.h"
31 #include "gl/minmax.h"
32 #include "gl/xalloc.h"
34 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
38 /* A layout for rendering a specific table on a specific device.
40 May represent the layout of an entire table presented to
41 render_page_create(), or a rectangular subregion of a table broken out using
42 render_break_next() to allow a table to be broken across multiple pages.
44 A page's size is not limited to the size passed in as part of render_params.
45 render_pager breaks a render_page into smaller render_pages that will fit in
46 the available space. */
49 const struct render_params *params; /* Parameters of the target device. */
50 struct table *table; /* Table rendered. */
53 /* Local copies of table->n and table->h, for convenience. */
55 int h[TABLE_N_AXES][2];
57 /* cp[H] represents x positions within the table.
59 cp[H][1] = the width of the leftmost vertical rule.
60 cp[H][2] = cp[H][1] + the width of the leftmost column.
61 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
63 cp[H][2 * nc] = x position of the rightmost vertical rule.
64 cp[H][2 * nc + 1] = total table width including all rules.
66 Similarly, cp[V] represents y positions within the table.
68 cp[V][1] = the height of the topmost horizontal rule.
69 cp[V][2] = cp[V][1] + the height of the topmost row.
70 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
72 cp[V][2 * nr] = y position of the bottommost horizontal rule.
73 cp[V][2 * nr + 1] = total table height including all rules.
75 Rules and columns can have width or height 0, in which case consecutive
76 values in this array are equal. */
77 int *cp[TABLE_N_AXES];
79 /* render_break_next() can break a table such that some cells are not fully
80 contained within a render_page. This will happen if a cell is too wide
81 or two tall to fit on a single page, or if a cell spans multiple rows or
82 columns and the page only includes some of those rows or columns.
84 This hash table contains "struct render_overflow"s that represents each
85 such cell that doesn't completely fit on this page.
87 Each overflow cell borders at least one header edge of the table and may
88 border more. (A single table cell that is so large that it fills the
89 entire page can overflow on all four sides!) */
90 struct hmap overflows;
92 /* If a single column (or row) is too wide (or tall) to fit on a page
93 reasonably, then render_break_next() will split a single row or column
94 across multiple render_pages. This member indicates when this has
97 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
98 of the leftmost column in this page, and false otherwise.
100 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
101 of the rightmost column in this page, and false otherwise.
103 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
104 and bottom of the table.
106 The effect of is_edge_cutoff is to prevent rules along the edge in
107 question from being rendered.
109 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
110 contain a node for each cell along that edge. */
111 bool is_edge_cutoff[TABLE_N_AXES][2];
113 /* If part of a joined cell would be cut off by breaking a table along
114 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
115 join_crossing[axis][z] is the thickness of the rule that would be cut
118 This is used to know to allocate extra space for breaking at such a
119 position, so that part of the cell's content is not lost.
121 This affects breaking a table only when headers are present. When
122 headers are not present, the rule's thickness is used for cell content,
123 so no part of the cell's content is lost (and in fact it is duplicated
124 across both pages). */
125 int *join_crossing[TABLE_N_AXES];
128 static struct render_page *render_page_create (const struct render_params *,
129 const struct table *);
131 static void render_page_unref (struct render_page *);
133 /* Returns the offset in struct render_page's cp[axis] array of the rule with
134 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
135 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
136 next rule to the right (or below); and so on. */
138 rule_ofs (int rule_idx)
143 /* Returns the offset in struct render_page's cp[axis] array of the rule with
144 index RULE_IDX_R, which counts from the right side (or bottom) of the page
145 left (or up), according to whether AXIS is H or V, respectively. That is,
146 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
147 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
148 (or above); and so on. */
150 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
152 return (page->n[axis] - rule_idx_r) * 2;
155 /* Returns the offset in struct render_page's cp[axis] array of the cell with
156 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
157 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
158 next cell to the right (or below); and so on. */
160 cell_ofs (int cell_idx)
162 return cell_idx * 2 + 1;
165 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
167 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
169 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
172 /* Returns the width of the headers in PAGE along AXIS. */
174 headers_width (const struct render_page *page, int axis)
176 int h0 = page->h[axis][0];
177 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
178 int n = page->n[axis];
179 int h1 = page->h[axis][1];
180 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
184 /* Returns the width of cell X along AXIS in PAGE. */
186 cell_width (const struct render_page *page, int axis, int x)
188 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
191 /* Returns the width of rule X along AXIS in PAGE. */
193 rule_width (const struct render_page *page, int axis, int x)
195 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
198 /* Returns the width of rule X along AXIS in PAGE. */
200 rule_width_r (const struct render_page *page, int axis, int x)
202 int ofs = rule_ofs_r (page, axis, x);
203 return axis_width (page, axis, ofs, ofs + 1);
206 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
208 joined_width (const struct render_page *page, int axis, int x0, int x1)
210 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
213 /* Returns the width of the widest cell, excluding headers, along AXIS in
216 max_cell_width (const struct render_page *page, int axis)
218 int n = page->n[axis];
219 int x0 = page->h[axis][0];
220 int x1 = n - page->h[axis][1];
224 for (x = x0; x < x1; x++)
226 int w = cell_width (page, axis, x);
233 /* A cell that doesn't completely fit on the render_page. */
234 struct render_overflow
236 struct hmap_node node; /* In render_page's 'overflows' hmap. */
238 /* Occupied region of page.
240 d[H][0] is the leftmost column.
241 d[H][1] is the rightmost column, plus 1.
242 d[V][0] is the top row.
243 d[V][1] is the bottom row, plus 1.
245 The cell in its original table might occupy a larger region. This
246 member reflects the size of the cell in the current render_page, after
247 trimming off any rows or columns due to page-breaking. */
250 /* The space that has been trimmed off the cell:
252 overflow[H][0]: space trimmed off its left side.
253 overflow[H][1]: space trimmed off its right side.
254 overflow[V][0]: space trimmed off its top.
255 overflow[V][1]: space trimmed off its bottom.
257 During rendering, this information is used to position the rendered
258 portion of the cell within the available space.
260 When a cell is rendered, sometimes it is permitted to spill over into
261 space that is ordinarily reserved for rules. Either way, this space is
262 still included in overflow values.
264 Suppose, for example, that a cell that joins 2 columns has a width of 60
265 pixels and content "abcdef", that the 2 columns that it joins have
266 widths of 20 and 30 pixels, respectively, and that therefore the rule
267 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
268 It might render like this, if each character is 10x10, and showing a few
269 extra table cells for context:
277 If this render_page is broken at the rule that separates "gh" from
278 "ijk", then the page that contains the left side of the "abcdef" cell
279 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
280 and the page that contains the right side of the cell will have
281 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
299 int overflow[TABLE_N_AXES][2];
302 /* Returns a hash value for (X,Y). */
304 hash_overflow (int x, int y)
306 return hash_int (x + (y << 16), 0);
309 /* Searches PAGE's set of render_overflow for one whose top-left cell is
310 (X,Y). Returns it, if there is one, otherwise a null pointer. */
311 static const struct render_overflow *
312 find_overflow (const struct render_page *page, int x, int y)
314 if (!hmap_is_empty (&page->overflows))
316 const struct render_overflow *of;
318 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
319 hash_overflow (x, y), &page->overflows)
320 if (x == of->d[H] && y == of->d[V])
327 /* Row or column dimensions. Used to figure the size of a table in
328 render_page_create() and discarded after that. */
331 /* Width without considering rows (or columns) that span more than one (or
335 /* Width taking spanned rows (or columns) into consideration. */
339 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
340 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
343 distribute_spanned_width (int width,
344 struct render_row *rows, const int *rules, int n)
350 /* Sum up the unspanned widths of the N rows for use as weights. */
352 for (x = 0; x < n; x++)
353 total_unspanned += rows[x].unspanned;
354 for (x = 0; x < n - 1; x++)
355 total_unspanned += rules[x + 1];
356 if (total_unspanned >= width)
359 /* The algorithm used here is based on the following description from HTML 4:
361 For cells that span multiple columns, a simple approach consists of
362 apportioning the min/max widths evenly to each of the constituent
363 columns. A slightly more complex approach is to use the min/max
364 widths of unspanned cells to weight how spanned widths are
365 apportioned. Experiments suggest that a blend of the two approaches
366 gives good results for a wide range of tables.
368 We blend the two approaches half-and-half, except that we cannot use the
369 unspanned weights when 'total_unspanned' is 0 (because that would cause a
372 This implementation uses floating-point types and operators, but all the
373 values involved are integers. For integers smaller than 53 bits, this
374 should not lose any precision, and it should degrade gracefully for larger
377 The calculation we want to do is this:
380 w1 = width * (column's unspanned width) / (total unspanned width)
381 (column's width) = (w0 + w1) / 2
383 We implement it as a precise calculation in integers by multiplying w0 and
384 w1 by the common denominator of all three calculations (d), dividing that
385 out in the column width calculation, and then keeping the remainder for
388 (We actually compute the unspanned width of a column as twice the
389 unspanned width, plus the width of the rule on the left, plus the width of
390 the rule on the right. That way each rule contributes to both the cell on
391 its left and on its right.)
394 d1 = 2.0 * (total_unspanned > 0 ? total_unspanned : 1.0);
396 if (total_unspanned > 0)
399 for (x = 0; x < n; x++)
402 if (total_unspanned > 0)
404 double unspanned = rows[x].unspanned * 2.0;
406 unspanned += rules[x + 1];
408 unspanned += rules[x];
409 w += width * unspanned * d0;
412 rows[x].width = MAX (rows[x].width, w / d);
413 w -= rows[x].width * d;
417 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
420 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
421 const struct render_row *rows, const int *rules)
423 int n = page->n[axis];
429 for (z = 0; z < n; z++)
431 cp[1] = cp[0] + rules[z];
432 cp[2] = cp[1] + rows[z].width;
435 cp[1] = cp[0] + rules[n];
438 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
440 calculate_table_width (int n, const struct render_row *rows, int *rules)
446 for (x = 0; x < n; x++)
447 width += rows[x].width;
448 for (x = 0; x <= n; x++)
454 /* Rendering utility functions. */
456 /* Returns the line style to use for drawing a rule of the given TYPE. */
457 static enum render_line_style
458 rule_to_render_type (unsigned char type)
464 return RENDER_LINE_NONE;
466 return RENDER_LINE_SINGLE;
468 return RENDER_LINE_DOUBLE;
474 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
475 rendered with PARAMS. */
477 measure_rule (const struct render_params *params, const struct table *table,
478 enum table_axis a, int z)
480 enum table_axis b = !a;
485 /* Determine all types of rules that are present, as a bitmap in 'rules'
486 where rule type 't' is present if bit 2**t is set. */
489 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
490 rules |= 1u << table_get_rule (table, a, d[H], d[V]);
492 /* Calculate maximum width of the rules that are present. */
494 if (rules & (1u << TAL_1)
495 || (z > 0 && z < table->n[a] && rules & (1u << TAL_GAP)))
496 width = params->line_widths[a][RENDER_LINE_SINGLE];
497 if (rules & (1u << TAL_2))
498 width = MAX (width, params->line_widths[a][RENDER_LINE_DOUBLE]);
502 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
503 space for all of the members of the new page, but the caller must initialize
504 the 'cp' member itself. */
505 static struct render_page *
506 render_page_allocate (const struct render_params *params,
509 struct render_page *page;
512 page = xmalloc (sizeof *page);
513 page->params = params;
516 page->n[H] = table->n[H];
517 page->n[V] = table->n[V];
518 page->h[H][0] = table->h[H][0];
519 page->h[H][1] = table->h[H][1];
520 page->h[V][0] = table->h[V][0];
521 page->h[V][1] = table->h[V][1];
523 for (i = 0; i < TABLE_N_AXES; i++)
525 page->cp[i] = xmalloc ((2 * page->n[i] + 2) * sizeof *page->cp[i]);
526 page->join_crossing[i] = xzalloc ((page->n[i] + 1) * sizeof *page->join_crossing[i]);
529 hmap_init (&page->overflows);
530 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
535 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
536 cp[H] in the new page from ROWS and RULES. The caller must still initialize
538 static struct render_page *
539 create_page_with_exact_widths (const struct render_params *params,
541 const struct render_row *rows, int *rules)
543 struct render_page *page = render_page_allocate (params, table);
544 accumulate_row_widths (page, H, rows, rules);
548 /* Allocates and returns a new render_page for PARAMS and TABLE.
550 Initializes cp[H] in the new page by setting the width of each row 'i' to
551 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
552 ROW_MAX[i].width. Sets the width of rules to those in RULES.
554 W_MIN is the sum of ROWS_MIN[].width.
556 W_MAX is the sum of ROWS_MAX[].width.
558 The caller must still initialize cp[V]. */
559 static struct render_page *
560 create_page_with_interpolated_widths (const struct render_params *params,
562 const struct render_row *rows_min,
563 const struct render_row *rows_max,
564 int w_min, int w_max, const int *rules)
566 /* This implementation uses floating-point types and operators, but all the
567 values involved are integers. For integers smaller than 53 bits, this
568 should not lose any precision, and it should degrade gracefully for larger
570 const int n = table->n[H];
571 const double avail = params->size[H] - w_min;
572 const double wanted = w_max - w_min;
573 struct render_page *page;
580 page = render_page_allocate (params, table);
584 w = (int) wanted / 2;
585 for (x = 0; x < n; x++)
589 w += avail * (rows_max[x].width - rows_min[x].width);
593 cph[1] = cph[0] + rules[x];
594 cph[2] = cph[1] + rows_min[x].width + extra;
597 cph[1] = cph[0] + rules[n];
599 assert (page->cp[H][n * 2 + 1] == params->size[H]);
605 set_join_crossings (struct render_page *page, enum table_axis axis,
606 const struct table_cell *cell, int *rules)
610 for (z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
611 page->join_crossing[axis][z] = rules[z];
614 /* Creates and returns a new render_page for rendering TABLE on a device
617 The new render_page will be suitable for rendering on a device whose page
618 size is PARAMS->size, but the caller is responsible for actually breaking it
619 up to fit on such a device, using the render_break abstraction. */
620 static struct render_page *
621 render_page_create (const struct render_params *params,
622 const struct table *table_)
624 struct render_page *page;
627 struct render_row *columns[2];
628 struct render_row *rows;
630 int *rules[TABLE_N_AXES];
634 enum table_axis axis;
636 table = table_ref (table_);
637 nc = table_nc (table);
638 nr = table_nr (table);
640 /* Figure out rule widths. */
641 for (axis = 0; axis < TABLE_N_AXES; axis++)
643 int n = table->n[axis] + 1;
646 rules[axis] = xnmalloc (n, sizeof *rules);
647 for (z = 0; z < n; z++)
648 rules[axis][z] = measure_rule (params, table, axis, z);
651 /* Calculate minimum and maximum widths of cells that do not
652 span multiple columns. */
653 for (i = 0; i < 2; i++)
654 columns[i] = xzalloc (nc * sizeof *columns[i]);
655 for (y = 0; y < nr; y++)
656 for (x = 0; x < nc; )
658 struct table_cell cell;
660 table_get_cell (table, x, y, &cell);
661 if (y == cell.d[V][0] && table_cell_colspan (&cell) == 1)
666 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
667 for (i = 0; i < 2; i++)
668 if (columns[i][x].unspanned < w[i])
669 columns[i][x].unspanned = w[i];
672 table_cell_free (&cell);
675 /* Distribute widths of spanned columns. */
676 for (i = 0; i < 2; i++)
677 for (x = 0; x < nc; x++)
678 columns[i][x].width = columns[i][x].unspanned;
679 for (y = 0; y < nr; y++)
680 for (x = 0; x < nc; )
682 struct table_cell cell;
684 table_get_cell (table, x, y, &cell);
685 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
689 params->measure_cell_width (params->aux, &cell, &w[MIN], &w[MAX]);
690 for (i = 0; i < 2; i++)
691 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
692 rules[H], table_cell_colspan (&cell));
695 table_cell_free (&cell);
698 /* Decide final column widths. */
699 for (i = 0; i < 2; i++)
700 table_widths[i] = calculate_table_width (table_nc (table),
701 columns[i], rules[H]);
702 if (table_widths[MAX] <= params->size[H])
704 /* Fits even with maximum widths. Use them. */
705 page = create_page_with_exact_widths (params, table, columns[MAX],
708 else if (table_widths[MIN] <= params->size[H])
710 /* Fits with minimum widths, so distribute the leftover space. */
711 page = create_page_with_interpolated_widths (
712 params, table, columns[MIN], columns[MAX],
713 table_widths[MIN], table_widths[MAX], rules[H]);
717 /* Doesn't fit even with minimum widths. Assign minimums for now, and
718 later we can break it horizontally into multiple pages. */
719 page = create_page_with_exact_widths (params, table, columns[MIN],
723 /* Calculate heights of cells that do not span multiple rows. */
724 rows = xzalloc (nr * sizeof *rows);
725 for (y = 0; y < nr; y++)
727 for (x = 0; x < nc; )
729 struct render_row *r = &rows[y];
730 struct table_cell cell;
732 table_get_cell (table, x, y, &cell);
733 if (y == cell.d[V][0])
735 if (table_cell_rowspan (&cell) == 1)
737 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
738 int h = params->measure_cell_height (params->aux, &cell, w);
739 if (h > r->unspanned)
740 r->unspanned = r->width = h;
743 set_join_crossings (page, V, &cell, rules[V]);
745 if (table_cell_colspan (&cell) > 1)
746 set_join_crossings (page, H, &cell, rules[H]);
749 table_cell_free (&cell);
752 for (i = 0; i < 2; i++)
755 /* Distribute heights of spanned rows. */
756 for (y = 0; y < nr; y++)
757 for (x = 0; x < nc; )
759 struct table_cell cell;
761 table_get_cell (table, x, y, &cell);
762 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
764 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
765 int h = params->measure_cell_height (params->aux, &cell, w);
766 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
767 table_cell_rowspan (&cell));
770 table_cell_free (&cell);
773 /* Decide final row heights. */
774 accumulate_row_widths (page, V, rows, rules[V]);
777 /* Measure headers. If they are "too big", get rid of them. */
778 for (axis = 0; axis < TABLE_N_AXES; axis++)
780 int hw = headers_width (page, axis);
781 if (hw * 2 >= page->params->size[axis]
782 || hw + max_cell_width (page, axis) > page->params->size[axis])
784 page->table = table_unshare (page->table);
785 page->table->h[axis][0] = page->table->h[axis][1] = 0;
786 page->h[axis][0] = page->h[axis][1] = 0;
796 /* Decreases PAGE's reference count and destroys PAGE if this causes the
797 reference count to fall to zero. */
799 render_page_unref (struct render_page *page)
801 if (page != NULL && --page->ref_cnt == 0)
804 struct render_overflow *overflow, *next;
806 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
809 hmap_destroy (&page->overflows);
811 table_unref (page->table);
813 for (i = 0; i < TABLE_N_AXES; ++i)
815 free (page->join_crossing[i]);
823 /* Returns the size of PAGE along AXIS. (This might be larger than the page
824 size specified in the parameters passed to render_page_create(). Use a
825 render_break to break up a render_page into page-sized chunks.) */
827 render_page_get_size (const struct render_page *page, enum table_axis axis)
829 return page->cp[axis][page->n[axis] * 2 + 1];
833 render_page_get_best_breakpoint (const struct render_page *page, int height)
837 /* If there's no room for at least the top row and the rules above and below
838 it, don't include any of the table. */
839 if (page->cp[V][3] > height)
842 /* Otherwise include as many rows and rules as we can. */
843 for (y = 5; y <= 2 * page->n[V] + 1; y += 2)
844 if (page->cp[V][y] > height)
845 return page->cp[V][y - 2];
849 /* Drawing render_pages. */
851 static inline enum render_line_style
852 get_rule (const struct render_page *page, enum table_axis axis,
853 const int d[TABLE_N_AXES])
855 return rule_to_render_type (table_get_rule (page->table,
856 axis, d[H] / 2, d[V] / 2));
866 render_rule (const struct render_page *page, const int d[TABLE_N_AXES])
868 enum render_line_style styles[TABLE_N_AXES][2];
871 for (a = 0; a < TABLE_N_AXES; a++)
873 enum table_axis b = !a;
875 styles[a][0] = styles[a][1] = RENDER_LINE_NONE;
878 || (page->is_edge_cutoff[a][0] && d[a] == 0)
879 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
890 styles[a][0] = get_rule (page, a, e);
893 if (d[b] / 2 < page->table->n[b])
894 styles[a][1] = get_rule (page, a, d);
897 styles[a][0] = styles[a][1] = get_rule (page, a, d);
900 if (styles[H][0] != RENDER_LINE_NONE || styles[H][1] != RENDER_LINE_NONE
901 || styles[V][0] != RENDER_LINE_NONE || styles[V][1] != RENDER_LINE_NONE)
903 int bb[TABLE_N_AXES][2];
905 bb[H][0] = page->cp[H][d[H]];
906 bb[H][1] = page->cp[H][d[H] + 1];
907 bb[V][0] = page->cp[V][d[V]];
908 bb[V][1] = page->cp[V][d[V] + 1];
909 page->params->draw_line (page->params->aux, bb, styles);
914 render_cell (const struct render_page *page, const struct table_cell *cell)
916 const struct render_overflow *of;
917 int bb[TABLE_N_AXES][2];
918 int clip[TABLE_N_AXES][2];
920 bb[H][0] = clip[H][0] = page->cp[H][cell->d[H][0] * 2 + 1];
921 bb[H][1] = clip[H][1] = page->cp[H][cell->d[H][1] * 2];
922 bb[V][0] = clip[V][0] = page->cp[V][cell->d[V][0] * 2 + 1];
923 bb[V][1] = clip[V][1] = page->cp[V][cell->d[V][1] * 2];
925 of = find_overflow (page, cell->d[H][0], cell->d[V][0]);
928 enum table_axis axis;
930 for (axis = 0; axis < TABLE_N_AXES; axis++)
932 if (of->overflow[axis][0])
934 bb[axis][0] -= of->overflow[axis][0];
935 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
936 clip[axis][0] = page->cp[axis][cell->d[axis][0] * 2];
938 if (of->overflow[axis][1])
940 bb[axis][1] += of->overflow[axis][1];
941 if (cell->d[axis][1] == page->n[axis] && !page->is_edge_cutoff[axis][1])
942 clip[axis][1] = page->cp[axis][cell->d[axis][1] * 2 + 1];
947 page->params->draw_cell (page->params->aux, cell, bb, clip);
950 /* Draws the cells of PAGE indicated in BB. */
952 render_page_draw_cells (const struct render_page *page,
953 int bb[TABLE_N_AXES][2])
957 for (y = bb[V][0]; y < bb[V][1]; y++)
958 for (x = bb[H][0]; x < bb[H][1]; )
959 if (is_rule (x) || is_rule (y))
964 render_rule (page, d);
969 struct table_cell cell;
971 table_get_cell (page->table, x / 2, y / 2, &cell);
972 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
973 render_cell (page, &cell);
974 x = rule_ofs (cell.d[H][1]);
975 table_cell_free (&cell);
979 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
980 render_params provided to render_page_create(). */
982 render_page_draw (const struct render_page *page)
984 int bb[TABLE_N_AXES][2];
987 bb[H][1] = page->n[H] * 2 + 1;
989 bb[V][1] = page->n[V] * 2 + 1;
991 render_page_draw_cells (page, bb);
994 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
996 get_clip_min_extent (int x0, const int cp[], int n)
1005 int middle = low + (high - low) / 2;
1007 if (cp[middle] <= x0)
1019 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1021 get_clip_max_extent (int x1, const int cp[], int n)
1023 int low, high, best;
1030 int middle = low + (high - low) / 2;
1032 if (cp[middle] >= x1)
1033 best = high = middle;
1038 while (best > 0 && cp[best - 1] == cp[best])
1044 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1045 'draw_line' and 'draw_cell' functions from the render_params provided to
1046 render_page_create(). */
1048 render_page_draw_region (const struct render_page *page,
1049 int x, int y, int w, int h)
1051 int bb[TABLE_N_AXES][2];
1053 bb[H][0] = get_clip_min_extent (x, page->cp[H], page->n[H] * 2 + 1);
1054 bb[H][1] = get_clip_max_extent (x + w, page->cp[H], page->n[H] * 2 + 1);
1055 bb[V][0] = get_clip_min_extent (y, page->cp[V], page->n[V] * 2 + 1);
1056 bb[V][1] = get_clip_max_extent (y + h, page->cp[V], page->n[V] * 2 + 1);
1058 render_page_draw_cells (page, bb);
1061 /* Breaking up tables to fit on a page. */
1063 /* An iterator for breaking render_pages into smaller chunks. */
1066 struct render_page *page; /* Page being broken up. */
1067 enum table_axis axis; /* Axis along which 'page' is being broken. */
1068 int z; /* Next cell along 'axis'. */
1069 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1070 int hw; /* Width of headers of 'page' along 'axis'. */
1073 static int needed_size (const struct render_break *, int cell);
1074 static bool cell_is_breakable (const struct render_break *, int cell);
1075 static struct render_page *render_page_select (const struct render_page *,
1080 /* Initializes render_break B for breaking PAGE along AXIS.
1082 Ownership of PAGE is transferred to B. The caller must use
1083 render_page_ref() if it needs to keep a copy of PAGE. */
1085 render_break_init (struct render_break *b, struct render_page *page,
1086 enum table_axis axis)
1090 b->z = page->h[axis][0];
1092 b->hw = headers_width (page, axis);
1095 /* Initializes B as a render_break structure for which
1096 render_break_has_next() always returns false. */
1098 render_break_init_empty (struct render_break *b)
1101 b->axis = TABLE_HORZ;
1107 /* Frees B and unrefs the render_page that it owns. */
1109 render_break_destroy (struct render_break *b)
1113 render_page_unref (b->page);
1118 /* Returns true if B still has cells that are yet to be returned,
1119 false if all of B's page has been processed. */
1121 render_break_has_next (const struct render_break *b)
1123 const struct render_page *page = b->page;
1124 enum table_axis axis = b->axis;
1126 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1129 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1130 Returns a null pointer if B has already been completely broken up, or if
1131 SIZE is too small to reasonably render any cells. The latter will never
1132 happen if SIZE is at least as large as the page size passed to
1133 render_page_create() along B's axis. */
1134 static struct render_page *
1135 render_break_next (struct render_break *b, int size)
1137 const struct render_page *page = b->page;
1138 enum table_axis axis = b->axis;
1139 struct render_page *subpage;
1142 if (!render_break_has_next (b))
1146 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1148 int needed = needed_size (b, z + 1);
1151 if (cell_is_breakable (b, z))
1153 /* If there is no right header and we render a partial cell on
1154 the right side of the body, then we omit the rightmost rule of
1155 the body. Otherwise the rendering is deceptive because it
1156 looks like the whole cell is present instead of a partial
1159 This is similar to code for the left side in needed_size(). */
1160 int rule_allowance = (page->h[axis][1]
1162 : rule_width (page, axis, z));
1164 /* The amount that, if we added cell 'z', the rendering would
1165 overfill the allocated 'size'. */
1166 int overhang = needed - size - rule_allowance;
1168 /* The width of cell 'z'. */
1169 int cell_size = cell_width (page, axis, z);
1171 /* The amount trimmed off the left side of 'z',
1172 and the amount left to render. */
1173 int cell_ofs = z == b->z ? b->pixel : 0;
1174 int cell_left = cell_size - cell_ofs;
1176 /* A small but visible width. */
1177 int em = page->params->font_size[axis];
1179 /* If some of the cell remains to render,
1180 and there would still be some of the cell left afterward,
1181 then partially render that much of the cell. */
1182 pixel = (cell_left && cell_left > overhang
1183 ? cell_left - overhang + cell_ofs
1186 /* If there would be only a tiny amount of the cell left after
1187 rendering it partially, reduce the amount rendered slightly
1188 to make the output look a little better. */
1189 if (pixel + em > cell_size)
1190 pixel = MAX (pixel - em, 0);
1192 /* If we're breaking vertically, then consider whether the cells
1193 being broken have a better internal breakpoint than the exact
1194 number of pixels available, which might look bad e.g. because
1195 it breaks in the middle of a line of text. */
1196 if (axis == TABLE_VERT && page->params->adjust_break)
1200 for (x = 0; x < page->n[H]; )
1202 struct table_cell cell;
1206 table_get_cell (page->table, x, z, &cell);
1207 w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1208 better_pixel = page->params->adjust_break (
1209 page->params->aux, &cell, w, pixel);
1211 table_cell_free (&cell);
1213 if (better_pixel < pixel)
1215 if (better_pixel > (z == b->z ? b->pixel : 0))
1217 pixel = better_pixel;
1220 else if (better_pixel == 0 && z != b->z)
1233 if (z == b->z && !pixel)
1236 subpage = render_page_select (page, axis, b->z, b->pixel,
1238 pixel ? cell_width (page, axis, z) - pixel
1245 /* Returns the width that would be required along B's axis to render a page
1246 from B's current position up to but not including CELL. */
1248 needed_size (const struct render_break *b, int cell)
1250 const struct render_page *page = b->page;
1251 enum table_axis axis = b->axis;
1254 /* Width of left header not including its rightmost rule. */
1255 size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1257 /* If we have a pixel offset and there is no left header, then we omit the
1258 leftmost rule of the body. Otherwise the rendering is deceptive because
1259 it looks like the whole cell is present instead of a partial cell.
1261 Otherwise (if there are headers) we will be merging two rules: the
1262 rightmost rule in the header and the leftmost rule in the body. We assume
1263 that the width of a merged rule is the larger of the widths of either rule
1265 if (b->pixel == 0 || page->h[axis][0])
1266 size += MAX (rule_width (page, axis, page->h[axis][0]),
1267 rule_width (page, axis, b->z));
1269 /* Width of body, minus any pixel offset in the leftmost cell. */
1270 size += joined_width (page, axis, b->z, cell) - b->pixel;
1272 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1273 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1274 rule_width (page, axis, cell));
1276 /* Width of right header not including its leftmost rule. */
1277 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1278 rule_ofs_r (page, axis, 0));
1280 /* Join crossing. */
1281 if (page->h[axis][0] && page->h[axis][1])
1282 size += page->join_crossing[axis][b->z];
1287 /* Returns true if CELL along B's axis may be broken across a page boundary.
1289 This is just a heuristic. Breaking cells across page boundaries can save
1290 space, but it looks ugly. */
1292 cell_is_breakable (const struct render_break *b, int cell)
1294 const struct render_page *page = b->page;
1295 enum table_axis axis = b->axis;
1297 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1305 struct render_page *page;
1306 struct render_break x_break;
1307 struct render_break y_break;
1310 /* Creates and returns a new render_pager for breaking PAGE into smaller
1311 chunks. Takes ownership of PAGE. */
1312 struct render_pager *
1313 render_pager_create (const struct render_params *params,
1314 const struct table_item *table_item)
1316 struct render_pager *p = xmalloc (sizeof *p);
1317 p->width = params->size[H];
1318 p->page = render_page_create (params, table_item_get_table (table_item));
1319 render_break_init (&p->x_break, p->page, H);
1320 render_break_init_empty (&p->y_break);
1326 render_pager_destroy (struct render_pager *p)
1330 render_break_destroy (&p->x_break);
1331 render_break_destroy (&p->y_break);
1332 render_page_unref (p->page);
1337 /* Returns true if P has content remaining to render, false if rendering is
1340 render_pager_has_next (const struct render_pager *p_)
1342 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1344 while (!render_break_has_next (&p->y_break))
1346 render_break_destroy (&p->y_break);
1347 if (render_break_has_next (&p->x_break))
1349 struct render_page *x_slice;
1351 x_slice = render_break_next (&p->x_break, p->width);
1352 render_break_init (&p->y_break, x_slice, V);
1356 render_break_init_empty (&p->y_break);
1363 /* Draws a chunk of content from P to fit in a space that has vertical size
1364 SPACE and the horizontal size specified in the render_params passed to
1365 render_page_create(). Returns the amount of space actually used by the
1366 rendered chunk, which will be 0 if SPACE is too small to render anything or
1367 if no content remains (use render_pager_has_next() to distinguish these
1370 render_pager_draw_next (struct render_pager *p, int space)
1372 struct render_page *page = (render_pager_has_next (p)
1373 ? render_break_next (&p->y_break, space)
1377 int used = render_page_get_size (page, V);
1379 render_page_draw (page);
1380 render_page_unref (page);
1387 /* Draws all of P's content. */
1389 render_pager_draw (const struct render_pager *p)
1391 render_page_draw (p->page);
1394 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1395 Some extra content might be drawn; the device should perform clipping as
1398 render_pager_draw_region (const struct render_pager *p,
1399 int x, int y, int w, int h)
1401 render_page_draw_region (p->page, x, y, w, h);
1404 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1405 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1407 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1409 return render_page_get_size (p->page, axis);
1413 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1415 return render_page_get_best_breakpoint (p->page, height);
1418 /* render_page_select() and helpers. */
1420 struct render_page_selection
1422 const struct render_page *page; /* Page whose slice we are selecting. */
1423 struct render_page *subpage; /* New page under construction. */
1424 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1425 enum table_axis b; /* The opposite of 'a'. */
1426 int z0; /* First cell along 'a' being selected. */
1427 int z1; /* Last cell being selected, plus 1. */
1428 int p0; /* Number of pixels to trim off left side of z0. */
1429 int p1; /* Number of pixels to trim off right side of z1-1. */
1432 static void cell_to_subpage (struct render_page_selection *,
1433 const struct table_cell *,
1434 int subcell[TABLE_N_AXES]);
1435 static const struct render_overflow *find_overflow_for_cell (
1436 struct render_page_selection *, const struct table_cell *);
1437 static struct render_overflow *insert_overflow (struct render_page_selection *,
1438 const struct table_cell *);
1440 /* Creates and returns a new render_page whose contents are a subregion of
1441 PAGE's contents. The new render_page includes cells Z0 through Z1 along
1442 AXIS, plus any headers on AXIS.
1444 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1445 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1446 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1447 render cells that are too large to fit on a single page.)
1449 The whole of axis !AXIS is included. (The caller may follow up with another
1450 call to render_page_select() to select on !AXIS to select on that axis as
1453 The caller retains ownership of PAGE, which is not modified. */
1454 static struct render_page *
1455 render_page_select (const struct render_page *page, enum table_axis axis,
1456 int z0, int p0, int z1, int p1)
1458 struct render_page_selection s;
1459 enum table_axis a = axis;
1460 enum table_axis b = !a;
1461 struct render_page *subpage;
1462 struct render_overflow *ro;
1468 /* Optimize case where all of PAGE is selected by just incrementing the
1470 if (z0 == page->h[a][0] && p0 == 0
1471 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1473 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1478 /* Allocate subpage. */
1479 subpage = render_page_allocate (page->params,
1480 table_select_slice (
1481 table_ref (page->table),
1484 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1485 off that side of the page and there are no headers. */
1486 subpage->is_edge_cutoff[a][0] =
1487 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1488 subpage->is_edge_cutoff[a][1] =
1489 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1490 && page->is_edge_cutoff[a][1]));
1491 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1492 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1494 /* Select join crossings from PAGE into subpage. */
1495 jc = subpage->join_crossing[a];
1496 for (z = 0; z < page->h[a][0]; z++)
1497 *jc++ = page->join_crossing[a][z];
1498 for (z = z0; z <= z1; z++)
1499 *jc++ = page->join_crossing[a][z];
1500 for (z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1501 *jc++ = page->join_crossing[a][z];
1502 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1504 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1505 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1507 /* Select widths from PAGE into subpage. */
1509 dcp = subpage->cp[a];
1511 for (z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1513 if (z == 0 && subpage->is_edge_cutoff[a][0])
1516 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1518 for (z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1520 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1521 if (z == cell_ofs (z0))
1524 if (page->h[a][0] && page->h[a][1])
1525 dcp[1] += page->join_crossing[a][z / 2];
1527 if (z == cell_ofs (z1 - 1))
1530 for (z = rule_ofs_r (page, a, subpage->h[a][1]);
1531 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1533 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1536 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1538 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1540 for (z = 0; z < page->n[b] * 2 + 2; z++)
1541 subpage->cp[b][z] = page->cp[b][z];
1543 /* Add new overflows. */
1551 s.subpage = subpage;
1553 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1554 for (z = 0; z < page->n[b]; )
1556 struct table_cell cell;
1557 int d[TABLE_N_AXES];
1564 table_get_cell (page->table, d[H], d[V], &cell);
1565 overflow0 = p0 || cell.d[a][0] < z0;
1566 overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1567 if (overflow0 || overflow1)
1569 ro = insert_overflow (&s, &cell);
1573 ro->overflow[a][0] += p0 + axis_width (
1574 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1575 if (page->h[a][0] && page->h[a][1])
1576 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1582 ro->overflow[a][1] += p1 + axis_width (
1583 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1584 if (page->h[a][0] && page->h[a][1])
1585 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1589 table_cell_free (&cell);
1592 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1593 for (z = 0; z < page->n[b]; )
1595 struct table_cell cell;
1596 int d[TABLE_N_AXES];
1600 table_get_cell (page->table, d[H], d[V], &cell);
1601 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1602 && find_overflow_for_cell (&s, &cell) == NULL)
1604 ro = insert_overflow (&s, &cell);
1605 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1606 cell_ofs (cell.d[a][1]));
1609 table_cell_free (&cell);
1612 /* Copy overflows from PAGE into subpage. */
1613 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1615 struct table_cell cell;
1617 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1618 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1619 && find_overflow_for_cell (&s, &cell) == NULL)
1620 insert_overflow (&s, &cell);
1621 table_cell_free (&cell);
1627 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1628 coordinates of the top-left cell as it will appear in S->subpage.
1630 CELL must actually intersect the region of S->page that is being selected
1631 by render_page_select() or the results will not make any sense. */
1633 cell_to_subpage (struct render_page_selection *s,
1634 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1636 enum table_axis a = s->a;
1637 enum table_axis b = s->b;
1638 int ha0 = s->subpage->h[a][0];
1640 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1641 subcell[b] = cell->d[b][0];
1644 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1645 that cell in S->subpage, if there is one, and a null pointer otherwise.
1647 CELL must actually intersect the region of S->page that is being selected
1648 by render_page_select() or the results will not make any sense. */
1649 static const struct render_overflow *
1650 find_overflow_for_cell (struct render_page_selection *s,
1651 const struct table_cell *cell)
1655 cell_to_subpage (s, cell, subcell);
1656 return find_overflow (s->subpage, subcell[H], subcell[V]);
1659 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1660 cell in S->subpage (which must not already exist). Initializes the new
1661 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1664 CELL must actually intersect the region of S->page that is being selected
1665 by render_page_select() or the results will not make any sense. */
1666 static struct render_overflow *
1667 insert_overflow (struct render_page_selection *s,
1668 const struct table_cell *cell)
1670 const struct render_overflow *old;
1671 struct render_overflow *of;
1673 of = xzalloc (sizeof *of);
1674 cell_to_subpage (s, cell, of->d);
1675 hmap_insert (&s->subpage->overflows, &of->node,
1676 hash_overflow (of->d[H], of->d[V]));
1678 old = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1680 memcpy (of->overflow, old->overflow, sizeof of->overflow);