xref: /aosp_15_r20/external/harfbuzz_ng/src/hb-repacker.hh (revision 2d1272b857b1f7575e6e246373e1cb218663db8a)
1 /*
2  * Copyright © 2020  Google, Inc.
3  *
4  *  This is part of HarfBuzz, a text shaping library.
5  *
6  * Permission is hereby granted, without written agreement and without
7  * license or royalty fees, to use, copy, modify, and distribute this
8  * software and its documentation for any purpose, provided that the
9  * above copyright notice and the following two paragraphs appear in
10  * all copies of this software.
11  *
12  * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
13  * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
14  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
15  * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
16  * DAMAGE.
17  *
18  * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
19  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
20  * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
21  * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
22  * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
23  *
24  * Google Author(s): Garret Rieger
25  */
26 
27 #ifndef HB_REPACKER_HH
28 #define HB_REPACKER_HH
29 
30 #include "hb-open-type.hh"
31 #include "hb-map.hh"
32 #include "hb-vector.hh"
33 #include "graph/graph.hh"
34 #include "graph/gsubgpos-graph.hh"
35 #include "graph/serialize.hh"
36 
37 using graph::graph_t;
38 
39 /*
40  * For a detailed writeup on the overflow resolution algorithm see:
41  * docs/repacker.md
42  */
43 
44 struct lookup_size_t
45 {
46   unsigned lookup_index;
47   size_t size;
48   unsigned num_subtables;
49 
cmplookup_size_t50   static int cmp (const void* a, const void* b)
51   {
52     return cmp ((const lookup_size_t*) a,
53                 (const lookup_size_t*) b);
54   }
55 
cmplookup_size_t56   static int cmp (const lookup_size_t* a, const lookup_size_t* b)
57   {
58     double subtables_per_byte_a = (double) a->num_subtables / (double) a->size;
59     double subtables_per_byte_b = (double) b->num_subtables / (double) b->size;
60     if (subtables_per_byte_a == subtables_per_byte_b) {
61       return b->lookup_index - a->lookup_index;
62     }
63 
64     double cmp = subtables_per_byte_b - subtables_per_byte_a;
65     if (cmp < 0) return -1;
66     if (cmp > 0) return 1;
67     return 0;
68   }
69 };
70 
71 static inline
_presplit_subtables_if_needed(graph::gsubgpos_graph_context_t & ext_context)72 bool _presplit_subtables_if_needed (graph::gsubgpos_graph_context_t& ext_context)
73 {
74   // For each lookup this will check the size of subtables and split them as needed
75   // so that no subtable is at risk of overflowing. (where we support splitting for
76   // that subtable type).
77   //
78   // TODO(grieger): de-dup newly added nodes as necessary. Probably just want a full de-dup
79   //                pass after this processing is done. Not super necessary as splits are
80   //                only done where overflow is likely, so de-dup probably will get undone
81   //                later anyways.
82 
83   // The loop below can modify the contents of ext_context.lookups if new subtables are added
84   // to a lookup during a split. So save the initial set of lookup indices so the iteration doesn't
85   // risk access free'd memory if ext_context.lookups gets resized.
86   hb_set_t lookup_indices(ext_context.lookups.keys ());
87   for (unsigned lookup_index : lookup_indices)
88   {
89     graph::Lookup* lookup = ext_context.lookups.get(lookup_index);
90     if (!lookup->split_subtables_if_needed (ext_context, lookup_index))
91       return false;
92   }
93 
94   return true;
95 }
96 
97 /*
98  * Analyze the lookups in a GSUB/GPOS table and decide if any should be promoted
99  * to extension lookups.
100  */
101 static inline
_promote_extensions_if_needed(graph::gsubgpos_graph_context_t & ext_context)102 bool _promote_extensions_if_needed (graph::gsubgpos_graph_context_t& ext_context)
103 {
104   // Simple Algorithm (v1, current):
105   // 1. Calculate how many bytes each non-extension lookup consumes.
106   // 2. Select up to 64k of those to remain as non-extension (greedy, highest subtables per byte first)
107   // 3. Promote the rest.
108   //
109   // Advanced Algorithm (v2, not implemented):
110   // 1. Perform connected component analysis using lookups as roots.
111   // 2. Compute size of each connected component.
112   // 3. Select up to 64k worth of connected components to remain as non-extensions.
113   //    (greedy, highest subtables per byte first)
114   // 4. Promote the rest.
115 
116   // TODO(garretrieger): support extension demotion, then consider all lookups. Requires advanced algo.
117   // TODO(garretrieger): also support extension promotion during iterative resolution phase, then
118   //                     we can use a less conservative threshold here.
119   // TODO(grieger): skip this for the 24 bit case.
120   if (!ext_context.lookups) return true;
121 
122   unsigned total_lookup_table_sizes = 0;
123   hb_vector_t<lookup_size_t> lookup_sizes;
124   lookup_sizes.alloc (ext_context.lookups.get_population (), true);
125 
126   for (unsigned lookup_index : ext_context.lookups.keys ())
127   {
128     const auto& lookup_v = ext_context.graph.vertices_[lookup_index];
129     total_lookup_table_sizes += lookup_v.table_size ();
130 
131     const graph::Lookup* lookup = ext_context.lookups.get(lookup_index);
132     hb_set_t visited;
133     lookup_sizes.push (lookup_size_t {
134         lookup_index,
135         ext_context.graph.find_subgraph_size (lookup_index, visited),
136         lookup->number_of_subtables (),
137       });
138   }
139 
140   lookup_sizes.qsort ();
141 
142   size_t lookup_list_size = ext_context.graph.vertices_[ext_context.lookup_list_index].table_size ();
143   size_t l2_l3_size = lookup_list_size + total_lookup_table_sizes; // Lookup List + Lookups
144   size_t l3_l4_size = total_lookup_table_sizes; // Lookups + SubTables
145   size_t l4_plus_size = 0; // SubTables + their descendants
146 
147   // Start by assuming all lookups are using extension subtables, this size will be removed later
148   // if it's decided to not make a lookup extension.
149   for (auto p : lookup_sizes)
150   {
151     // TODO(garretrieger): this overestimates the extension subtables size because some extension subtables may be
152     //                     reused. However, we can't correct this until we have connected component analysis in place.
153     unsigned subtables_size = p.num_subtables * 8;
154     l3_l4_size += subtables_size;
155     l4_plus_size += subtables_size;
156   }
157 
158   bool layers_full = false;
159   for (auto p : lookup_sizes)
160   {
161     const graph::Lookup* lookup = ext_context.lookups.get(p.lookup_index);
162     if (lookup->is_extension (ext_context.table_tag))
163       // already an extension so size is counted by the loop above.
164       continue;
165 
166     if (!layers_full)
167     {
168       size_t lookup_size = ext_context.graph.vertices_[p.lookup_index].table_size ();
169       hb_set_t visited;
170       size_t subtables_size = ext_context.graph.find_subgraph_size (p.lookup_index, visited, 1) - lookup_size;
171       size_t remaining_size = p.size - subtables_size - lookup_size;
172 
173       l3_l4_size   += subtables_size;
174       l3_l4_size   -= p.num_subtables * 8;
175       l4_plus_size += subtables_size + remaining_size;
176 
177       if (l2_l3_size < (1 << 16)
178           && l3_l4_size < (1 << 16)
179           && l4_plus_size < (1 << 16)) continue; // this lookup fits within all layers groups
180 
181       layers_full = true;
182     }
183 
184     if (!ext_context.lookups.get(p.lookup_index)->make_extension (ext_context, p.lookup_index))
185       return false;
186   }
187 
188   return true;
189 }
190 
191 static inline
_try_isolating_subgraphs(const hb_vector_t<graph::overflow_record_t> & overflows,graph_t & sorted_graph)192 bool _try_isolating_subgraphs (const hb_vector_t<graph::overflow_record_t>& overflows,
193                                graph_t& sorted_graph)
194 {
195   unsigned space = 0;
196   hb_set_t roots_to_isolate;
197 
198   for (int i = overflows.length - 1; i >= 0; i--)
199   {
200     const graph::overflow_record_t& r = overflows[i];
201 
202     unsigned root;
203     unsigned overflow_space = sorted_graph.space_for (r.parent, &root);
204     if (!overflow_space) continue;
205     if (sorted_graph.num_roots_for_space (overflow_space) <= 1) continue;
206 
207     if (!space) {
208       space = overflow_space;
209     }
210 
211     if (space == overflow_space)
212       roots_to_isolate.add(root);
213   }
214 
215   if (!roots_to_isolate) return false;
216 
217   unsigned maximum_to_move = hb_max ((sorted_graph.num_roots_for_space (space) / 2u), 1u);
218   if (roots_to_isolate.get_population () > maximum_to_move) {
219     // Only move at most half of the roots in a space at a time.
220     unsigned extra = roots_to_isolate.get_population () - maximum_to_move;
221     while (extra--) {
222       uint32_t root = HB_SET_VALUE_INVALID;
223       roots_to_isolate.previous (&root);
224       roots_to_isolate.del (root);
225     }
226   }
227 
228   DEBUG_MSG (SUBSET_REPACK, nullptr,
229              "Overflow in space %u (%u roots). Moving %u roots to space %u.",
230              space,
231              sorted_graph.num_roots_for_space (space),
232              roots_to_isolate.get_population (),
233              sorted_graph.next_space ());
234 
235   sorted_graph.isolate_subgraph (roots_to_isolate);
236   sorted_graph.move_to_new_space (roots_to_isolate);
237 
238   return true;
239 }
240 
241 static inline
_resolve_shared_overflow(const hb_vector_t<graph::overflow_record_t> & overflows,int overflow_index,graph_t & sorted_graph)242 bool _resolve_shared_overflow(const hb_vector_t<graph::overflow_record_t>& overflows,
243                               int overflow_index,
244                               graph_t& sorted_graph)
245 {
246   const graph::overflow_record_t& r = overflows[overflow_index];
247 
248   // Find all of the parents in overflowing links that link to this
249   // same child node. We will then try duplicating the child node and
250   // re-assigning all of these parents to the duplicate.
251   hb_set_t parents;
252   parents.add(r.parent);
253   for (int i = overflow_index - 1; i >= 0; i--) {
254     const graph::overflow_record_t& r2 = overflows[i];
255     if (r2.child == r.child) {
256       parents.add(r2.parent);
257     }
258   }
259 
260   unsigned result = sorted_graph.duplicate(&parents, r.child);
261   if (result == (unsigned) -1 && parents.get_population() > 2) {
262     // All links to the child are overflowing, so we can't include all
263     // in the duplication. Remove one parent from the duplication.
264     // Remove the lowest index parent, which will be the closest to the child.
265     parents.del(parents.get_min());
266     result = sorted_graph.duplicate(&parents, r.child);
267   }
268 
269   if (result == (unsigned) -1) return result;
270 
271   if (parents.get_population() > 1) {
272     // If the duplicated node has more than one parent pre-emptively raise it's priority to the maximum.
273     // This will place it close to the parents. Node's with only one parent, don't need this as normal overflow
274     // resolution will raise priority if needed.
275     //
276     // Reasoning: most of the parents to this child are likely at the same layer in the graph. Duplicating
277     // the child will theoretically allow it to be placed closer to it's parents. However, due to the shortest
278     // distance sort by default it's placement will remain in the same layer, thus it will remain in roughly the
279     // same position (and distance from parents) as the original child node. The overflow resolution will attempt
280     // to move nodes closer, but only for non-shared nodes. Since this node is shared, it will simply be given
281     // further duplication which defeats the attempt to duplicate with multiple parents. To fix this we
282     // pre-emptively raise priority now which allows the duplicated node to pack into the same layer as it's parents.
283     sorted_graph.vertices_[result].give_max_priority();
284   }
285 
286   return result;
287 }
288 
289 static inline
_process_overflows(const hb_vector_t<graph::overflow_record_t> & overflows,hb_set_t & priority_bumped_parents,graph_t & sorted_graph)290 bool _process_overflows (const hb_vector_t<graph::overflow_record_t>& overflows,
291                          hb_set_t& priority_bumped_parents,
292                          graph_t& sorted_graph)
293 {
294   bool resolution_attempted = false;
295 
296   // Try resolving the furthest overflows first.
297   for (int i = overflows.length - 1; i >= 0; i--)
298   {
299     const graph::overflow_record_t& r = overflows[i];
300     const auto& child = sorted_graph.vertices_[r.child];
301     if (child.is_shared ())
302     {
303       // The child object is shared, we may be able to eliminate the overflow
304       // by duplicating it.
305       if (!_resolve_shared_overflow(overflows, i, sorted_graph)) continue;
306       return true;
307     }
308 
309     if (child.is_leaf () && !priority_bumped_parents.has (r.parent))
310     {
311       // This object is too far from it's parent, attempt to move it closer.
312       //
313       // TODO(garretrieger): initially limiting this to leaf's since they can be
314       //                     moved closer with fewer consequences. However, this can
315       //                     likely can be used for non-leafs as well.
316       // TODO(garretrieger): also try lowering priority of the parent. Make it
317       //                     get placed further up in the ordering, closer to it's children.
318       //                     this is probably preferable if the total size of the parent object
319       //                     is < then the total size of the children (and the parent can be moved).
320       //                     Since in that case moving the parent will cause a smaller increase in
321       //                     the length of other offsets.
322       if (sorted_graph.raise_childrens_priority (r.parent)) {
323         priority_bumped_parents.add (r.parent);
324         resolution_attempted = true;
325       }
326       continue;
327     }
328 
329     // TODO(garretrieger): add additional offset resolution strategies
330     // - Promotion to extension lookups.
331     // - Table splitting.
332   }
333 
334   return resolution_attempted;
335 }
336 
337 inline bool
hb_resolve_graph_overflows(hb_tag_t table_tag,unsigned max_rounds,bool always_recalculate_extensions,graph_t & sorted_graph)338 hb_resolve_graph_overflows (hb_tag_t table_tag,
339                             unsigned max_rounds ,
340                             bool always_recalculate_extensions,
341                             graph_t& sorted_graph /* IN/OUT */)
342 {
343   DEBUG_MSG (SUBSET_REPACK, nullptr, "Repacking %c%c%c%c.", HB_UNTAG(table_tag));
344   sorted_graph.sort_shortest_distance ();
345   if (sorted_graph.in_error ())
346   {
347     DEBUG_MSG (SUBSET_REPACK, nullptr, "Sorted graph in error state after initial sort.");
348     return false;
349   }
350 
351   bool will_overflow = graph::will_overflow (sorted_graph);
352   if (!will_overflow)
353     return true;
354 
355   bool is_gsub_or_gpos = (table_tag == HB_OT_TAG_GPOS ||  table_tag == HB_OT_TAG_GSUB);
356   graph::gsubgpos_graph_context_t ext_context (table_tag, sorted_graph);
357   if (is_gsub_or_gpos && will_overflow)
358   {
359     DEBUG_MSG (SUBSET_REPACK, nullptr, "Applying GSUB/GPOS repacking specializations.");
360     if (always_recalculate_extensions)
361     {
362       DEBUG_MSG (SUBSET_REPACK, nullptr, "Splitting subtables if needed.");
363       if (!_presplit_subtables_if_needed (ext_context)) {
364         DEBUG_MSG (SUBSET_REPACK, nullptr, "Subtable splitting failed.");
365         return false;
366       }
367 
368       DEBUG_MSG (SUBSET_REPACK, nullptr, "Promoting lookups to extensions if needed.");
369       if (!_promote_extensions_if_needed (ext_context)) {
370         DEBUG_MSG (SUBSET_REPACK, nullptr, "Extensions promotion failed.");
371         return false;
372       }
373     }
374 
375     DEBUG_MSG (SUBSET_REPACK, nullptr, "Assigning spaces to 32 bit subgraphs.");
376     if (sorted_graph.assign_spaces ())
377       sorted_graph.sort_shortest_distance ();
378     else
379       sorted_graph.sort_shortest_distance_if_needed ();
380   }
381 
382   unsigned round = 0;
383   hb_vector_t<graph::overflow_record_t> overflows;
384   // TODO(garretrieger): select a good limit for max rounds.
385   while (!sorted_graph.in_error ()
386          && graph::will_overflow (sorted_graph, &overflows)
387          && round < max_rounds) {
388     DEBUG_MSG (SUBSET_REPACK, nullptr, "=== Overflow resolution round %u ===", round);
389     print_overflows (sorted_graph, overflows);
390 
391     hb_set_t priority_bumped_parents;
392 
393     if (!_try_isolating_subgraphs (overflows, sorted_graph))
394     {
395       // Don't count space isolation towards round limit. Only increment
396       // round counter if space isolation made no changes.
397       round++;
398       if (!_process_overflows (overflows, priority_bumped_parents, sorted_graph))
399       {
400         DEBUG_MSG (SUBSET_REPACK, nullptr, "No resolution available :(");
401         break;
402       }
403     }
404 
405     sorted_graph.sort_shortest_distance ();
406   }
407 
408   if (sorted_graph.in_error ())
409   {
410     DEBUG_MSG (SUBSET_REPACK, nullptr, "Sorted graph in error state.");
411     return false;
412   }
413 
414   if (graph::will_overflow (sorted_graph))
415   {
416     if (is_gsub_or_gpos && !always_recalculate_extensions) {
417       // If this a GSUB/GPOS table and we didn't try to extension promotion and table splitting then
418       // as a last ditch effort, re-run the repacker with it enabled.
419       DEBUG_MSG (SUBSET_REPACK, nullptr, "Failed to find a resolution. Re-running with extension promotion and table splitting enabled.");
420       return hb_resolve_graph_overflows (table_tag, max_rounds, true, sorted_graph);
421     }
422 
423     DEBUG_MSG (SUBSET_REPACK, nullptr, "Offset overflow resolution failed.");
424     return false;
425   }
426 
427   return true;
428 }
429 
430 /*
431  * Attempts to modify the topological sorting of the provided object graph to
432  * eliminate offset overflows in the links between objects of the graph. If a
433  * non-overflowing ordering is found the updated graph is serialized it into the
434  * provided serialization context.
435  *
436  * If necessary the structure of the graph may be modified in ways that do not
437  * affect the functionality of the graph. For example shared objects may be
438  * duplicated.
439  *
440  * For a detailed writeup describing how the algorithm operates see:
441  * docs/repacker.md
442  */
443 template<typename T>
444 inline hb_blob_t*
hb_resolve_overflows(const T & packed,hb_tag_t table_tag,unsigned max_rounds=32,bool recalculate_extensions=false)445 hb_resolve_overflows (const T& packed,
446                       hb_tag_t table_tag,
447                       unsigned max_rounds = 32,
448                       bool recalculate_extensions = false) {
449   graph_t sorted_graph (packed);
450   if (sorted_graph.in_error ())
451   {
452     // Invalid graph definition.
453     return nullptr;
454   }
455 
456   if (!sorted_graph.is_fully_connected ())
457   {
458     sorted_graph.print_orphaned_nodes ();
459     return nullptr;
460   }
461 
462   if (sorted_graph.in_error ())
463   {
464     // Allocations failed somewhere
465     DEBUG_MSG (SUBSET_REPACK, nullptr,
466                "Graph is in error, likely due to a memory allocation error.");
467     return nullptr;
468   }
469 
470   if (!hb_resolve_graph_overflows (table_tag, max_rounds, recalculate_extensions, sorted_graph))
471     return nullptr;
472 
473   return graph::serialize (sorted_graph);
474 }
475 
476 #endif /* HB_REPACKER_HH */
477