1 /* 2 * Copyright © 2022 Google, Inc. 3 * 4 * This is part of HarfBuzz, a text shaping library. 5 * 6 * Permission is hereby granted, without written agreement and without 7 * license or royalty fees, to use, copy, modify, and distribute this 8 * software and its documentation for any purpose, provided that the 9 * above copyright notice and the following two paragraphs appear in 10 * all copies of this software. 11 * 12 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR 13 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES 14 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN 15 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH 16 * DAMAGE. 17 * 18 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, 19 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 20 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS 21 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO 22 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 23 * 24 * Google Author(s): Garret Rieger 25 */ 26 27 #ifndef GRAPH_PAIRPOS_GRAPH_HH 28 #define GRAPH_PAIRPOS_GRAPH_HH 29 30 #include "split-helpers.hh" 31 #include "coverage-graph.hh" 32 #include "classdef-graph.hh" 33 #include "../OT/Layout/GPOS/PairPos.hh" 34 #include "../OT/Layout/GPOS/PosLookupSubTable.hh" 35 36 namespace graph { 37 38 struct PairPosFormat1 : public OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes> 39 { sanitizegraph::PairPosFormat140 bool sanitize (graph_t::vertex_t& vertex) const 41 { 42 int64_t vertex_len = vertex.obj.tail - vertex.obj.head; 43 unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size; 44 if (vertex_len < min_size) return false; 45 hb_barrier (); 46 47 return vertex_len >= 48 min_size + pairSet.get_size () - pairSet.len.get_size(); 49 } 50 split_subtablesgraph::PairPosFormat151 hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c, 52 unsigned parent_index, 53 unsigned this_index) 54 { 55 hb_set_t visited; 56 57 const unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage); 58 const unsigned coverage_size = c.graph.vertices_[coverage_id].table_size (); 59 const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size; 60 61 unsigned partial_coverage_size = 4; 62 unsigned accumulated = base_size; 63 hb_vector_t<unsigned> split_points; 64 for (unsigned i = 0; i < pairSet.len; i++) 65 { 66 unsigned pair_set_index = pair_set_graph_index (c, this_index, i); 67 unsigned accumulated_delta = 68 c.graph.find_subgraph_size (pair_set_index, visited) + 69 SmallTypes::size; // for PairSet offset. 70 partial_coverage_size += OT::HBUINT16::static_size; 71 72 accumulated += accumulated_delta; 73 unsigned total = accumulated + hb_min (partial_coverage_size, coverage_size); 74 75 if (total >= (1 << 16)) 76 { 77 split_points.push (i); 78 accumulated = base_size + accumulated_delta; 79 partial_coverage_size = 6; 80 visited.clear (); // node sharing isn't allowed between splits. 81 } 82 } 83 84 split_context_t split_context { 85 c, 86 this, 87 c.graph.duplicate_if_shared (parent_index, this_index), 88 }; 89 90 return actuate_subtable_split<split_context_t> (split_context, split_points); 91 } 92 93 private: 94 95 struct split_context_t { 96 gsubgpos_graph_context_t& c; 97 PairPosFormat1* thiz; 98 unsigned this_index; 99 original_countgraph::PairPosFormat1::split_context_t100 unsigned original_count () 101 { 102 return thiz->pairSet.len; 103 } 104 clone_rangegraph::PairPosFormat1::split_context_t105 unsigned clone_range (unsigned start, unsigned end) 106 { 107 return thiz->clone_range (this->c, this->this_index, start, end); 108 } 109 shrinkgraph::PairPosFormat1::split_context_t110 bool shrink (unsigned count) 111 { 112 return thiz->shrink (this->c, this->this_index, count); 113 } 114 }; 115 shrinkgraph::PairPosFormat1116 bool shrink (gsubgpos_graph_context_t& c, 117 unsigned this_index, 118 unsigned count) 119 { 120 DEBUG_MSG (SUBSET_REPACK, nullptr, 121 " Shrinking PairPosFormat1 (%u) to [0, %u).", 122 this_index, 123 count); 124 unsigned old_count = pairSet.len; 125 if (count >= old_count) 126 return true; 127 128 pairSet.len = count; 129 c.graph.vertices_[this_index].obj.tail -= (old_count - count) * SmallTypes::size; 130 131 auto coverage = c.graph.as_mutable_table<Coverage> (this_index, &this->coverage); 132 if (!coverage) return false; 133 134 unsigned coverage_size = coverage.vertex->table_size (); 135 auto new_coverage = 136 + hb_zip (coverage.table->iter (), hb_range ()) 137 | hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) { 138 return p.second < count; 139 }) 140 | hb_map_retains_sorting (hb_first) 141 ; 142 143 return Coverage::make_coverage (c, new_coverage, coverage.index, coverage_size); 144 } 145 146 // Create a new PairPos including PairSet's from start (inclusive) to end (exclusive). 147 // Returns object id of the new object. clone_rangegraph::PairPosFormat1148 unsigned clone_range (gsubgpos_graph_context_t& c, 149 unsigned this_index, 150 unsigned start, unsigned end) const 151 { 152 DEBUG_MSG (SUBSET_REPACK, nullptr, 153 " Cloning PairPosFormat1 (%u) range [%u, %u).", this_index, start, end); 154 155 unsigned num_pair_sets = end - start; 156 unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size 157 + num_pair_sets * SmallTypes::size; 158 159 unsigned pair_pos_prime_id = c.create_node (prime_size); 160 if (pair_pos_prime_id == (unsigned) -1) return -1; 161 162 PairPosFormat1* pair_pos_prime = (PairPosFormat1*) c.graph.object (pair_pos_prime_id).head; 163 pair_pos_prime->format = this->format; 164 pair_pos_prime->valueFormat[0] = this->valueFormat[0]; 165 pair_pos_prime->valueFormat[1] = this->valueFormat[1]; 166 pair_pos_prime->pairSet.len = num_pair_sets; 167 168 for (unsigned i = start; i < end; i++) 169 { 170 c.graph.move_child<> (this_index, 171 &pairSet[i], 172 pair_pos_prime_id, 173 &pair_pos_prime->pairSet[i - start]); 174 } 175 176 unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage); 177 if (!Coverage::clone_coverage (c, 178 coverage_id, 179 pair_pos_prime_id, 180 2, 181 start, end)) 182 return -1; 183 184 return pair_pos_prime_id; 185 } 186 187 188 pair_set_graph_indexgraph::PairPosFormat1189 unsigned pair_set_graph_index (gsubgpos_graph_context_t& c, unsigned this_index, unsigned i) const 190 { 191 return c.graph.index_for_offset (this_index, &pairSet[i]); 192 } 193 }; 194 195 struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes> 196 { sanitizegraph::PairPosFormat2197 bool sanitize (graph_t::vertex_t& vertex) const 198 { 199 size_t vertex_len = vertex.table_size (); 200 unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size; 201 if (vertex_len < min_size) return false; 202 hb_barrier (); 203 204 const unsigned class1_count = class1Count; 205 return vertex_len >= 206 min_size + class1_count * get_class1_record_size (); 207 } 208 split_subtablesgraph::PairPosFormat2209 hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c, 210 unsigned parent_index, 211 unsigned this_index) 212 { 213 const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size; 214 const unsigned class_def_2_size = size_of (c, this_index, &classDef2); 215 const Coverage* coverage = get_coverage (c, this_index); 216 const ClassDef* class_def_1 = get_class_def_1 (c, this_index); 217 auto gid_and_class = 218 + coverage->iter () 219 | hb_map_retains_sorting ([&] (hb_codepoint_t gid) { 220 return hb_codepoint_pair_t (gid, class_def_1->get_class (gid)); 221 }) 222 ; 223 class_def_size_estimator_t estimator (gid_and_class); 224 225 const unsigned class1_count = class1Count; 226 const unsigned class2_count = class2Count; 227 const unsigned class1_record_size = get_class1_record_size (); 228 229 const unsigned value_1_len = valueFormat1.get_len (); 230 const unsigned value_2_len = valueFormat2.get_len (); 231 const unsigned total_value_len = value_1_len + value_2_len; 232 233 unsigned accumulated = base_size; 234 unsigned coverage_size = 4; 235 unsigned class_def_1_size = 4; 236 unsigned max_coverage_size = coverage_size; 237 unsigned max_class_def_1_size = class_def_1_size; 238 239 hb_vector_t<unsigned> split_points; 240 241 hb_hashmap_t<unsigned, unsigned> device_tables = get_all_device_tables (c, this_index); 242 hb_vector_t<unsigned> format1_device_table_indices = valueFormat1.get_device_table_indices (); 243 hb_vector_t<unsigned> format2_device_table_indices = valueFormat2.get_device_table_indices (); 244 bool has_device_tables = bool(format1_device_table_indices) || bool(format2_device_table_indices); 245 246 hb_set_t visited; 247 for (unsigned i = 0; i < class1_count; i++) 248 { 249 unsigned accumulated_delta = class1_record_size; 250 class_def_1_size = estimator.add_class_def_size (i); 251 coverage_size = estimator.coverage_size (); 252 max_coverage_size = hb_max (max_coverage_size, coverage_size); 253 max_class_def_1_size = hb_max (max_class_def_1_size, class_def_1_size); 254 255 if (has_device_tables) { 256 for (unsigned j = 0; j < class2_count; j++) 257 { 258 unsigned value1_index = total_value_len * (class2_count * i + j); 259 unsigned value2_index = value1_index + value_1_len; 260 accumulated_delta += size_of_value_record_children (c, 261 device_tables, 262 format1_device_table_indices, 263 value1_index, 264 visited); 265 accumulated_delta += size_of_value_record_children (c, 266 device_tables, 267 format2_device_table_indices, 268 value2_index, 269 visited); 270 } 271 } 272 273 accumulated += accumulated_delta; 274 unsigned total = accumulated 275 + coverage_size + class_def_1_size + class_def_2_size 276 // The largest object will pack last and can exceed the size limit. 277 - hb_max (hb_max (coverage_size, class_def_1_size), class_def_2_size); 278 if (total >= (1 << 16)) 279 { 280 split_points.push (i); 281 // split does not include i, so add the size for i when we reset the size counters. 282 accumulated = base_size + accumulated_delta; 283 284 estimator.reset(); 285 class_def_1_size = estimator.add_class_def_size(i); 286 coverage_size = estimator.coverage_size(); 287 visited.clear (); // node sharing isn't allowed between splits. 288 } 289 } 290 291 split_context_t split_context { 292 c, 293 this, 294 c.graph.duplicate_if_shared (parent_index, this_index), 295 class1_record_size, 296 total_value_len, 297 value_1_len, 298 value_2_len, 299 max_coverage_size, 300 max_class_def_1_size, 301 device_tables, 302 format1_device_table_indices, 303 format2_device_table_indices 304 }; 305 306 return actuate_subtable_split<split_context_t> (split_context, split_points); 307 } 308 private: 309 310 struct split_context_t 311 { 312 gsubgpos_graph_context_t& c; 313 PairPosFormat2* thiz; 314 unsigned this_index; 315 unsigned class1_record_size; 316 unsigned value_record_len; 317 unsigned value1_record_len; 318 unsigned value2_record_len; 319 unsigned max_coverage_size; 320 unsigned max_class_def_size; 321 322 const hb_hashmap_t<unsigned, unsigned>& device_tables; 323 const hb_vector_t<unsigned>& format1_device_table_indices; 324 const hb_vector_t<unsigned>& format2_device_table_indices; 325 original_countgraph::PairPosFormat2::split_context_t326 unsigned original_count () 327 { 328 return thiz->class1Count; 329 } 330 clone_rangegraph::PairPosFormat2::split_context_t331 unsigned clone_range (unsigned start, unsigned end) 332 { 333 return thiz->clone_range (*this, start, end); 334 } 335 shrinkgraph::PairPosFormat2::split_context_t336 bool shrink (unsigned count) 337 { 338 return thiz->shrink (*this, count); 339 } 340 }; 341 get_class1_record_sizegraph::PairPosFormat2342 size_t get_class1_record_size () const 343 { 344 const size_t class2_count = class2Count; 345 return 346 class2_count * (valueFormat1.get_size () + valueFormat2.get_size ()); 347 } 348 clone_rangegraph::PairPosFormat2349 unsigned clone_range (split_context_t& split_context, 350 unsigned start, unsigned end) const 351 { 352 DEBUG_MSG (SUBSET_REPACK, nullptr, 353 " Cloning PairPosFormat2 (%u) range [%u, %u).", split_context.this_index, start, end); 354 355 graph_t& graph = split_context.c.graph; 356 357 unsigned num_records = end - start; 358 unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size 359 + num_records * split_context.class1_record_size; 360 361 unsigned pair_pos_prime_id = split_context.c.create_node (prime_size); 362 if (pair_pos_prime_id == (unsigned) -1) return -1; 363 364 PairPosFormat2* pair_pos_prime = 365 (PairPosFormat2*) graph.object (pair_pos_prime_id).head; 366 pair_pos_prime->format = this->format; 367 pair_pos_prime->valueFormat1 = this->valueFormat1; 368 pair_pos_prime->valueFormat2 = this->valueFormat2; 369 pair_pos_prime->class1Count = num_records; 370 pair_pos_prime->class2Count = this->class2Count; 371 clone_class1_records (split_context, 372 pair_pos_prime_id, 373 start, 374 end); 375 376 unsigned coverage_id = 377 graph.index_for_offset (split_context.this_index, &coverage); 378 unsigned class_def_1_id = 379 graph.index_for_offset (split_context.this_index, &classDef1); 380 auto& coverage_v = graph.vertices_[coverage_id]; 381 auto& class_def_1_v = graph.vertices_[class_def_1_id]; 382 Coverage* coverage_table = (Coverage*) coverage_v.obj.head; 383 ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head; 384 if (!coverage_table 385 || !coverage_table->sanitize (coverage_v) 386 || !class_def_1_table 387 || !class_def_1_table->sanitize (class_def_1_v)) 388 return -1; 389 390 auto klass_map = 391 + coverage_table->iter () 392 | hb_map_retains_sorting ([&] (hb_codepoint_t gid) { 393 return hb_codepoint_pair_t (gid, class_def_1_table->get_class (gid)); 394 }) 395 | hb_filter ([&] (hb_codepoint_t klass) { 396 return klass >= start && klass < end; 397 }, hb_second) 398 | hb_map_retains_sorting ([&] (hb_codepoint_pair_t gid_and_class) { 399 // Classes must be from 0...N so subtract start 400 return hb_codepoint_pair_t (gid_and_class.first, gid_and_class.second - start); 401 }) 402 ; 403 404 if (!Coverage::add_coverage (split_context.c, 405 pair_pos_prime_id, 406 2, 407 + klass_map | hb_map_retains_sorting (hb_first), 408 split_context.max_coverage_size)) 409 return -1; 410 411 // classDef1 412 if (!ClassDef::add_class_def (split_context.c, 413 pair_pos_prime_id, 414 8, 415 + klass_map, 416 split_context.max_class_def_size)) 417 return -1; 418 419 // classDef2 420 unsigned class_def_2_id = 421 graph.index_for_offset (split_context.this_index, &classDef2); 422 auto* class_def_link = graph.vertices_[pair_pos_prime_id].obj.real_links.push (); 423 class_def_link->width = SmallTypes::size; 424 class_def_link->objidx = class_def_2_id; 425 class_def_link->position = 10; 426 graph.vertices_[class_def_2_id].add_parent (pair_pos_prime_id); 427 graph.duplicate (pair_pos_prime_id, class_def_2_id); 428 429 return pair_pos_prime_id; 430 } 431 clone_class1_recordsgraph::PairPosFormat2432 void clone_class1_records (split_context_t& split_context, 433 unsigned pair_pos_prime_id, 434 unsigned start, unsigned end) const 435 { 436 PairPosFormat2* pair_pos_prime = 437 (PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head; 438 439 char* start_addr = ((char*)&values[0]) + start * split_context.class1_record_size; 440 unsigned num_records = end - start; 441 hb_memcpy (&pair_pos_prime->values[0], 442 start_addr, 443 num_records * split_context.class1_record_size); 444 445 if (!split_context.format1_device_table_indices 446 && !split_context.format2_device_table_indices) 447 // No device tables to move over. 448 return; 449 450 unsigned class2_count = class2Count; 451 for (unsigned i = start; i < end; i++) 452 { 453 for (unsigned j = 0; j < class2_count; j++) 454 { 455 unsigned value1_index = split_context.value_record_len * (class2_count * i + j); 456 unsigned value2_index = value1_index + split_context.value1_record_len; 457 458 unsigned new_value1_index = split_context.value_record_len * (class2_count * (i - start) + j); 459 unsigned new_value2_index = new_value1_index + split_context.value1_record_len; 460 461 transfer_device_tables (split_context, 462 pair_pos_prime_id, 463 split_context.format1_device_table_indices, 464 value1_index, 465 new_value1_index); 466 467 transfer_device_tables (split_context, 468 pair_pos_prime_id, 469 split_context.format2_device_table_indices, 470 value2_index, 471 new_value2_index); 472 } 473 } 474 } 475 transfer_device_tablesgraph::PairPosFormat2476 void transfer_device_tables (split_context_t& split_context, 477 unsigned pair_pos_prime_id, 478 const hb_vector_t<unsigned>& device_table_indices, 479 unsigned old_value_record_index, 480 unsigned new_value_record_index) const 481 { 482 PairPosFormat2* pair_pos_prime = 483 (PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head; 484 485 for (unsigned i : device_table_indices) 486 { 487 OT::Offset16* record = (OT::Offset16*) &values[old_value_record_index + i]; 488 unsigned record_position = ((char*) record) - ((char*) this); 489 if (!split_context.device_tables.has (record_position)) continue; 490 491 split_context.c.graph.move_child ( 492 split_context.this_index, 493 record, 494 pair_pos_prime_id, 495 (OT::Offset16*) &pair_pos_prime->values[new_value_record_index + i]); 496 } 497 } 498 shrinkgraph::PairPosFormat2499 bool shrink (split_context_t& split_context, 500 unsigned count) 501 { 502 DEBUG_MSG (SUBSET_REPACK, nullptr, 503 " Shrinking PairPosFormat2 (%u) to [0, %u).", 504 split_context.this_index, 505 count); 506 unsigned old_count = class1Count; 507 if (count >= old_count) 508 return true; 509 510 graph_t& graph = split_context.c.graph; 511 class1Count = count; 512 graph.vertices_[split_context.this_index].obj.tail -= 513 (old_count - count) * split_context.class1_record_size; 514 515 auto coverage = 516 graph.as_mutable_table<Coverage> (split_context.this_index, &this->coverage); 517 if (!coverage) return false; 518 519 auto class_def_1 = 520 graph.as_mutable_table<ClassDef> (split_context.this_index, &classDef1); 521 if (!class_def_1) return false; 522 523 auto klass_map = 524 + coverage.table->iter () 525 | hb_map_retains_sorting ([&] (hb_codepoint_t gid) { 526 return hb_codepoint_pair_t (gid, class_def_1.table->get_class (gid)); 527 }) 528 | hb_filter ([&] (hb_codepoint_t klass) { 529 return klass < count; 530 }, hb_second) 531 ; 532 533 auto new_coverage = + klass_map | hb_map_retains_sorting (hb_first); 534 if (!Coverage::make_coverage (split_context.c, 535 + new_coverage, 536 coverage.index, 537 // existing ranges my not be kept, worst case size is a format 1 538 // coverage table. 539 4 + new_coverage.len() * 2)) 540 return false; 541 542 return ClassDef::make_class_def (split_context.c, 543 + klass_map, 544 class_def_1.index, 545 class_def_1.vertex->table_size ()); 546 } 547 548 hb_hashmap_t<unsigned, unsigned> get_all_device_tablesgraph::PairPosFormat2549 get_all_device_tables (gsubgpos_graph_context_t& c, 550 unsigned this_index) const 551 { 552 const auto& v = c.graph.vertices_[this_index]; 553 return v.position_to_index_map (); 554 } 555 get_coveragegraph::PairPosFormat2556 const Coverage* get_coverage (gsubgpos_graph_context_t& c, 557 unsigned this_index) const 558 { 559 unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage); 560 auto& coverage_v = c.graph.vertices_[coverage_id]; 561 562 Coverage* coverage_table = (Coverage*) coverage_v.obj.head; 563 if (!coverage_table || !coverage_table->sanitize (coverage_v)) 564 return &Null(Coverage); 565 return coverage_table; 566 } 567 get_class_def_1graph::PairPosFormat2568 const ClassDef* get_class_def_1 (gsubgpos_graph_context_t& c, 569 unsigned this_index) const 570 { 571 unsigned class_def_1_id = c.graph.index_for_offset (this_index, &classDef1); 572 auto& class_def_1_v = c.graph.vertices_[class_def_1_id]; 573 574 ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head; 575 if (!class_def_1_table || !class_def_1_table->sanitize (class_def_1_v)) 576 return &Null(ClassDef); 577 return class_def_1_table; 578 } 579 size_of_value_record_childrengraph::PairPosFormat2580 unsigned size_of_value_record_children (gsubgpos_graph_context_t& c, 581 const hb_hashmap_t<unsigned, unsigned>& device_tables, 582 const hb_vector_t<unsigned> device_table_indices, 583 unsigned value_record_index, 584 hb_set_t& visited) 585 { 586 unsigned size = 0; 587 for (unsigned i : device_table_indices) 588 { 589 OT::Layout::GPOS_impl::Value* record = &values[value_record_index + i]; 590 unsigned record_position = ((char*) record) - ((char*) this); 591 unsigned* obj_idx; 592 if (!device_tables.has (record_position, &obj_idx)) continue; 593 size += c.graph.find_subgraph_size (*obj_idx, visited); 594 } 595 return size; 596 } 597 size_ofgraph::PairPosFormat2598 unsigned size_of (gsubgpos_graph_context_t& c, 599 unsigned this_index, 600 const void* offset) const 601 { 602 const unsigned id = c.graph.index_for_offset (this_index, offset); 603 return c.graph.vertices_[id].table_size (); 604 } 605 }; 606 607 struct PairPos : public OT::Layout::GPOS_impl::PairPos 608 { split_subtablesgraph::PairPos609 hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c, 610 unsigned parent_index, 611 unsigned this_index) 612 { 613 switch (u.format) { 614 case 1: 615 return ((PairPosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index); 616 case 2: 617 return ((PairPosFormat2*)(&u.format2))->split_subtables (c, parent_index, this_index); 618 #ifndef HB_NO_BEYOND_64K 619 case 3: HB_FALLTHROUGH; 620 case 4: HB_FALLTHROUGH; 621 // Don't split 24bit PairPos's. 622 #endif 623 default: 624 return hb_vector_t<unsigned> (); 625 } 626 } 627 sanitizegraph::PairPos628 bool sanitize (graph_t::vertex_t& vertex) const 629 { 630 int64_t vertex_len = vertex.obj.tail - vertex.obj.head; 631 if (vertex_len < u.format.get_size ()) return false; 632 hb_barrier (); 633 634 switch (u.format) { 635 case 1: 636 return ((PairPosFormat1*)(&u.format1))->sanitize (vertex); 637 case 2: 638 return ((PairPosFormat2*)(&u.format2))->sanitize (vertex); 639 #ifndef HB_NO_BEYOND_64K 640 case 3: HB_FALLTHROUGH; 641 case 4: HB_FALLTHROUGH; 642 #endif 643 default: 644 // We don't handle format 3 and 4 here. 645 return false; 646 } 647 } 648 }; 649 650 } 651 652 #endif // GRAPH_PAIRPOS_GRAPH_HH 653