1 /* 2 * Copyright © 2019 Adobe Inc. 3 * Copyright © 2019 Ebrahim Byagowi 4 * 5 * This is part of HarfBuzz, a text shaping library. 6 * 7 * Permission is hereby granted, without written agreement and without 8 * license or royalty fees, to use, copy, modify, and distribute this 9 * software and its documentation for any purpose, provided that the 10 * above copyright notice and the following two paragraphs appear in 11 * all copies of this software. 12 * 13 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR 14 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES 15 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN 16 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH 17 * DAMAGE. 18 * 19 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, 20 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 21 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS 22 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO 23 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 24 * 25 * Adobe Author(s): Michiharu Ariza 26 */ 27 28 #ifndef HB_OT_VAR_GVAR_TABLE_HH 29 #define HB_OT_VAR_GVAR_TABLE_HH 30 31 #include "hb-open-type.hh" 32 #include "hb-ot-var-common.hh" 33 34 /* 35 * gvar -- Glyph Variation Table 36 * https://docs.microsoft.com/en-us/typography/opentype/spec/gvar 37 */ 38 #define HB_OT_TAG_gvar HB_TAG('g','v','a','r') 39 40 namespace OT { 41 42 struct GlyphVariationData : TupleVariationData 43 {}; 44 45 struct glyph_variations_t 46 { 47 using tuple_variations_t = TupleVariationData::tuple_variations_t; 48 hb_vector_t<tuple_variations_t> glyph_variations; 49 50 hb_vector_t<char> compiled_shared_tuples; 51 private: 52 unsigned shared_tuples_count = 0; 53 54 /* shared coords-> index map after instantiation */ 55 hb_hashmap_t<const hb_vector_t<char>*, unsigned> shared_tuples_idx_map; 56 57 public: compiled_shared_tuples_countOT::glyph_variations_t58 unsigned compiled_shared_tuples_count () const 59 { return shared_tuples_count; } 60 compiled_byte_sizeOT::glyph_variations_t61 unsigned compiled_byte_size () const 62 { 63 unsigned byte_size = 0; 64 for (const auto& _ : glyph_variations) 65 byte_size += _.get_compiled_byte_size (); 66 67 return byte_size; 68 } 69 create_from_glyphs_var_dataOT::glyph_variations_t70 bool create_from_glyphs_var_data (unsigned axis_count, 71 const hb_array_t<const F2DOT14> shared_tuples, 72 const hb_subset_plan_t *plan, 73 const hb_hashmap_t<hb_codepoint_t, hb_bytes_t>& new_gid_var_data_map) 74 { 75 if (unlikely (!glyph_variations.alloc (plan->new_to_old_gid_list.length, true))) 76 return false; 77 78 auto it = hb_iter (plan->new_to_old_gid_list); 79 for (auto &_ : it) 80 { 81 hb_codepoint_t new_gid = _.first; 82 contour_point_vector_t *all_contour_points; 83 if (!new_gid_var_data_map.has (new_gid) || 84 !plan->new_gid_contour_points_map.has (new_gid, &all_contour_points)) 85 return false; 86 hb_bytes_t var_data = new_gid_var_data_map.get (new_gid); 87 88 const GlyphVariationData* p = reinterpret_cast<const GlyphVariationData*> (var_data.arrayZ); 89 hb_vector_t<unsigned> shared_indices; 90 GlyphVariationData::tuple_iterator_t iterator; 91 tuple_variations_t tuple_vars; 92 93 /* in case variation data is empty, push an empty struct into the vector, 94 * keep the vector in sync with the new_to_old_gid_list */ 95 if (!var_data || ! p->has_data () || !all_contour_points->length || 96 !GlyphVariationData::get_tuple_iterator (var_data, axis_count, 97 var_data.arrayZ, 98 shared_indices, &iterator)) 99 { 100 glyph_variations.push (std::move (tuple_vars)); 101 continue; 102 } 103 104 bool is_composite_glyph = false; 105 is_composite_glyph = plan->composite_new_gids.has (new_gid); 106 107 if (!p->decompile_tuple_variations (all_contour_points->length, true /* is_gvar */, 108 iterator, &(plan->axes_old_index_tag_map), 109 shared_indices, shared_tuples, 110 tuple_vars, /* OUT */ 111 is_composite_glyph)) 112 return false; 113 glyph_variations.push (std::move (tuple_vars)); 114 } 115 return !glyph_variations.in_error () && glyph_variations.length == plan->new_to_old_gid_list.length; 116 } 117 instantiateOT::glyph_variations_t118 bool instantiate (const hb_subset_plan_t *plan) 119 { 120 unsigned count = plan->new_to_old_gid_list.length; 121 bool iup_optimize = false; 122 iup_optimize = plan->flags & HB_SUBSET_FLAGS_OPTIMIZE_IUP_DELTAS; 123 for (unsigned i = 0; i < count; i++) 124 { 125 hb_codepoint_t new_gid = plan->new_to_old_gid_list[i].first; 126 contour_point_vector_t *all_points; 127 if (!plan->new_gid_contour_points_map.has (new_gid, &all_points)) 128 return false; 129 if (!glyph_variations[i].instantiate (plan->axes_location, plan->axes_triple_distances, all_points, iup_optimize)) 130 return false; 131 } 132 return true; 133 } 134 compile_bytesOT::glyph_variations_t135 bool compile_bytes (const hb_map_t& axes_index_map, 136 const hb_map_t& axes_old_index_tag_map) 137 { 138 if (!compile_shared_tuples (axes_index_map, axes_old_index_tag_map)) 139 return false; 140 for (tuple_variations_t& vars: glyph_variations) 141 if (!vars.compile_bytes (axes_index_map, axes_old_index_tag_map, 142 true, /* use shared points*/ 143 &shared_tuples_idx_map)) 144 return false; 145 146 return true; 147 } 148 compile_shared_tuplesOT::glyph_variations_t149 bool compile_shared_tuples (const hb_map_t& axes_index_map, 150 const hb_map_t& axes_old_index_tag_map) 151 { 152 /* key is pointer to compiled_peak_coords inside each tuple, hashing 153 * function will always deref pointers first */ 154 hb_hashmap_t<const hb_vector_t<char>*, unsigned> coords_count_map; 155 156 /* count the num of shared coords */ 157 for (tuple_variations_t& vars: glyph_variations) 158 { 159 for (tuple_delta_t& var : vars.tuple_vars) 160 { 161 if (!var.compile_peak_coords (axes_index_map, axes_old_index_tag_map)) 162 return false; 163 unsigned* count; 164 if (coords_count_map.has (&(var.compiled_peak_coords), &count)) 165 coords_count_map.set (&(var.compiled_peak_coords), *count + 1); 166 else 167 coords_count_map.set (&(var.compiled_peak_coords), 1); 168 } 169 } 170 171 if (!coords_count_map || coords_count_map.in_error ()) 172 return false; 173 174 /* add only those coords that are used more than once into the vector and sort */ 175 hb_vector_t<const hb_vector_t<char>*> shared_coords; 176 if (unlikely (!shared_coords.alloc (coords_count_map.get_population ()))) 177 return false; 178 179 for (const auto _ : coords_count_map.iter ()) 180 { 181 if (_.second == 1) continue; 182 shared_coords.push (_.first); 183 } 184 185 /* no shared tuples: no coords are used more than once */ 186 if (!shared_coords) return true; 187 /* sorting based on the coords frequency first (high to low), then compare 188 * the coords bytes */ 189 hb_qsort (shared_coords.arrayZ, shared_coords.length, sizeof (hb_vector_t<char>*), _cmp_coords, (void *) (&coords_count_map)); 190 191 /* build shared_coords->idx map and shared tuples byte array */ 192 193 shared_tuples_count = hb_min (0xFFFu + 1, shared_coords.length); 194 unsigned len = shared_tuples_count * (shared_coords[0]->length); 195 if (unlikely (!compiled_shared_tuples.alloc (len))) 196 return false; 197 198 for (unsigned i = 0; i < shared_tuples_count; i++) 199 { 200 shared_tuples_idx_map.set (shared_coords[i], i); 201 /* add a concat() in hb_vector_t? */ 202 for (char c : shared_coords[i]->iter ()) 203 compiled_shared_tuples.push (c); 204 } 205 206 return true; 207 } 208 _cmp_coordsOT::glyph_variations_t209 static int _cmp_coords (const void *pa, const void *pb, void *arg) 210 { 211 const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* coords_count_map = 212 reinterpret_cast<const hb_hashmap_t<const hb_vector_t<char>*, unsigned>*> (arg); 213 214 /* shared_coords is hb_vector_t<const hb_vector_t<char>*> so casting pa/pb 215 * to be a pointer to a pointer */ 216 const hb_vector_t<char>** a = reinterpret_cast<const hb_vector_t<char>**> (const_cast<void*>(pa)); 217 const hb_vector_t<char>** b = reinterpret_cast<const hb_vector_t<char>**> (const_cast<void*>(pb)); 218 219 bool has_a = coords_count_map->has (*a); 220 bool has_b = coords_count_map->has (*b); 221 222 if (has_a && has_b) 223 { 224 unsigned a_num = coords_count_map->get (*a); 225 unsigned b_num = coords_count_map->get (*b); 226 227 if (a_num != b_num) 228 return b_num - a_num; 229 230 return (*b)->as_array().cmp ((*a)->as_array ()); 231 } 232 else if (has_a) return -1; 233 else if (has_b) return 1; 234 else return 0; 235 } 236 237 template<typename Iterator, 238 hb_requires (hb_is_iterator (Iterator))> serialize_glyph_var_dataOT::glyph_variations_t239 bool serialize_glyph_var_data (hb_serialize_context_t *c, 240 Iterator it, 241 bool long_offset, 242 unsigned num_glyphs, 243 char* glyph_var_data_offsets /* OUT: glyph var data offsets array */) const 244 { 245 TRACE_SERIALIZE (this); 246 247 if (long_offset) 248 { 249 ((HBUINT32 *) glyph_var_data_offsets)[0] = 0; 250 glyph_var_data_offsets += 4; 251 } 252 else 253 { 254 ((HBUINT16 *) glyph_var_data_offsets)[0] = 0; 255 glyph_var_data_offsets += 2; 256 } 257 unsigned glyph_offset = 0; 258 hb_codepoint_t last_gid = 0; 259 unsigned idx = 0; 260 261 TupleVariationData* cur_glyph = c->start_embed<TupleVariationData> (); 262 if (!cur_glyph) return_trace (false); 263 for (auto &_ : it) 264 { 265 hb_codepoint_t gid = _.first; 266 if (long_offset) 267 for (; last_gid < gid; last_gid++) 268 ((HBUINT32 *) glyph_var_data_offsets)[last_gid] = glyph_offset; 269 else 270 for (; last_gid < gid; last_gid++) 271 ((HBUINT16 *) glyph_var_data_offsets)[last_gid] = glyph_offset / 2; 272 273 if (idx >= glyph_variations.length) return_trace (false); 274 if (!cur_glyph->serialize (c, true, glyph_variations[idx])) return_trace (false); 275 TupleVariationData* next_glyph = c->start_embed<TupleVariationData> (); 276 glyph_offset += (char *) next_glyph - (char *) cur_glyph; 277 278 if (long_offset) 279 ((HBUINT32 *) glyph_var_data_offsets)[gid] = glyph_offset; 280 else 281 ((HBUINT16 *) glyph_var_data_offsets)[gid] = glyph_offset / 2; 282 283 last_gid++; 284 idx++; 285 cur_glyph = next_glyph; 286 } 287 288 if (long_offset) 289 for (; last_gid < num_glyphs; last_gid++) 290 ((HBUINT32 *) glyph_var_data_offsets)[last_gid] = glyph_offset; 291 else 292 for (; last_gid < num_glyphs; last_gid++) 293 ((HBUINT16 *) glyph_var_data_offsets)[last_gid] = glyph_offset / 2; 294 return_trace (true); 295 } 296 }; 297 298 struct gvar 299 { 300 static constexpr hb_tag_t tableTag = HB_OT_TAG_gvar; 301 sanitize_shallowOT::gvar302 bool sanitize_shallow (hb_sanitize_context_t *c) const 303 { 304 TRACE_SANITIZE (this); 305 return_trace (c->check_struct (this) && 306 hb_barrier () && 307 (version.major == 1) && 308 sharedTuples.sanitize (c, this, axisCount * sharedTupleCount) && 309 (is_long_offset () ? 310 c->check_array (get_long_offset_array (), c->get_num_glyphs () + 1) : 311 c->check_array (get_short_offset_array (), c->get_num_glyphs () + 1))); 312 } 313 314 /* GlyphVariationData not sanitized here; must be checked while accessing each glyph variation data */ sanitizeOT::gvar315 bool sanitize (hb_sanitize_context_t *c) const 316 { return sanitize_shallow (c); } 317 decompile_glyph_variationsOT::gvar318 bool decompile_glyph_variations (hb_subset_context_t *c, 319 glyph_variations_t& glyph_vars /* OUT */) const 320 { 321 hb_hashmap_t<hb_codepoint_t, hb_bytes_t> new_gid_var_data_map; 322 auto it = hb_iter (c->plan->new_to_old_gid_list); 323 if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) 324 { 325 new_gid_var_data_map.set (0, hb_bytes_t ()); 326 it++; 327 } 328 329 for (auto &_ : it) 330 { 331 hb_codepoint_t new_gid = _.first; 332 hb_codepoint_t old_gid = _.second; 333 hb_bytes_t var_data_bytes = get_glyph_var_data_bytes (c->source_blob, glyphCountX, old_gid); 334 new_gid_var_data_map.set (new_gid, var_data_bytes); 335 } 336 337 if (new_gid_var_data_map.in_error ()) return false; 338 339 hb_array_t<const F2DOT14> shared_tuples = (this+sharedTuples).as_array ((unsigned) sharedTupleCount * (unsigned) axisCount); 340 return glyph_vars.create_from_glyphs_var_data (axisCount, shared_tuples, c->plan, new_gid_var_data_map); 341 } 342 343 template<typename Iterator, 344 hb_requires (hb_is_iterator (Iterator))> serializeOT::gvar345 bool serialize (hb_serialize_context_t *c, 346 const glyph_variations_t& glyph_vars, 347 Iterator it, 348 unsigned axis_count, 349 unsigned num_glyphs, 350 bool force_long_offsets) const 351 { 352 TRACE_SERIALIZE (this); 353 gvar *out = c->allocate_min<gvar> (); 354 if (unlikely (!out)) return_trace (false); 355 356 out->version.major = 1; 357 out->version.minor = 0; 358 out->axisCount = axis_count; 359 out->glyphCountX = hb_min (0xFFFFu, num_glyphs); 360 361 unsigned glyph_var_data_size = glyph_vars.compiled_byte_size (); 362 /* According to the spec: If the short format (Offset16) is used for offsets, 363 * the value stored is the offset divided by 2, so the maximum data size should 364 * be 2 * 0xFFFFu, which is 0x1FFFEu */ 365 bool long_offset = glyph_var_data_size > 0x1FFFEu || force_long_offsets; 366 out->flags = long_offset ? 1 : 0; 367 368 HBUINT8 *glyph_var_data_offsets = c->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1), false); 369 if (!glyph_var_data_offsets) return_trace (false); 370 371 /* shared tuples */ 372 unsigned shared_tuple_count = glyph_vars.compiled_shared_tuples_count (); 373 out->sharedTupleCount = shared_tuple_count; 374 375 if (!shared_tuple_count) 376 out->sharedTuples = 0; 377 else 378 { 379 hb_array_t<const char> shared_tuples = glyph_vars.compiled_shared_tuples.as_array ().copy (c); 380 if (!shared_tuples.arrayZ) return_trace (false); 381 out->sharedTuples = shared_tuples.arrayZ - (char *) out; 382 } 383 384 char *glyph_var_data = c->start_embed<char> (); 385 if (!glyph_var_data) return_trace (false); 386 out->dataZ = glyph_var_data - (char *) out; 387 388 return_trace (glyph_vars.serialize_glyph_var_data (c, it, long_offset, num_glyphs, 389 (char *) glyph_var_data_offsets)); 390 } 391 instantiateOT::gvar392 bool instantiate (hb_subset_context_t *c) const 393 { 394 TRACE_SUBSET (this); 395 glyph_variations_t glyph_vars; 396 if (!decompile_glyph_variations (c, glyph_vars)) 397 return_trace (false); 398 399 if (!glyph_vars.instantiate (c->plan)) return_trace (false); 400 if (!glyph_vars.compile_bytes (c->plan->axes_index_map, c->plan->axes_old_index_tag_map)) 401 return_trace (false); 402 403 unsigned axis_count = c->plan->axes_index_map.get_population (); 404 unsigned num_glyphs = c->plan->num_output_glyphs (); 405 auto it = hb_iter (c->plan->new_to_old_gid_list); 406 407 bool force_long_offsets = false; 408 #ifdef HB_EXPERIMENTAL_API 409 force_long_offsets = c->plan->flags & HB_SUBSET_FLAGS_IFTB_REQUIREMENTS; 410 #endif 411 return_trace (serialize (c->serializer, glyph_vars, it, axis_count, num_glyphs, force_long_offsets)); 412 } 413 subsetOT::gvar414 bool subset (hb_subset_context_t *c) const 415 { 416 TRACE_SUBSET (this); 417 if (c->plan->all_axes_pinned) 418 return_trace (false); 419 420 if (c->plan->normalized_coords) 421 return_trace (instantiate (c)); 422 423 unsigned glyph_count = version.to_int () ? c->plan->source->get_num_glyphs () : 0; 424 425 gvar *out = c->serializer->allocate_min<gvar> (); 426 if (unlikely (!out)) return_trace (false); 427 428 out->version.major = 1; 429 out->version.minor = 0; 430 out->axisCount = axisCount; 431 out->sharedTupleCount = sharedTupleCount; 432 433 unsigned int num_glyphs = c->plan->num_output_glyphs (); 434 out->glyphCountX = hb_min (0xFFFFu, num_glyphs); 435 436 auto it = hb_iter (c->plan->new_to_old_gid_list); 437 if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) 438 it++; 439 unsigned int subset_data_size = 0; 440 for (auto &_ : it) 441 { 442 hb_codepoint_t old_gid = _.second; 443 subset_data_size += get_glyph_var_data_bytes (c->source_blob, glyph_count, old_gid).length; 444 } 445 446 /* According to the spec: If the short format (Offset16) is used for offsets, 447 * the value stored is the offset divided by 2, so the maximum data size should 448 * be 2 * 0xFFFFu, which is 0x1FFFEu */ 449 bool long_offset = subset_data_size > 0x1FFFEu; 450 #ifdef HB_EXPERIMENTAL_API 451 long_offset = long_offset || (c->plan->flags & HB_SUBSET_FLAGS_IFTB_REQUIREMENTS); 452 #endif 453 out->flags = long_offset ? 1 : 0; 454 455 HBUINT8 *subset_offsets = c->serializer->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1), false); 456 if (!subset_offsets) return_trace (false); 457 458 /* shared tuples */ 459 if (!sharedTupleCount || !sharedTuples) 460 out->sharedTuples = 0; 461 else 462 { 463 unsigned int shared_tuple_size = F2DOT14::static_size * axisCount * sharedTupleCount; 464 F2DOT14 *tuples = c->serializer->allocate_size<F2DOT14> (shared_tuple_size); 465 if (!tuples) return_trace (false); 466 out->sharedTuples = (char *) tuples - (char *) out; 467 hb_memcpy (tuples, this+sharedTuples, shared_tuple_size); 468 } 469 470 /* This ordering relative to the shared tuples array, which puts the glyphVariationData 471 last in the table, is required when HB_SUBSET_FLAGS_IFTB_REQUIREMENTS is set */ 472 char *subset_data = c->serializer->allocate_size<char> (subset_data_size, false); 473 if (!subset_data) return_trace (false); 474 out->dataZ = subset_data - (char *) out; 475 476 477 if (long_offset) 478 { 479 ((HBUINT32 *) subset_offsets)[0] = 0; 480 subset_offsets += 4; 481 } 482 else 483 { 484 ((HBUINT16 *) subset_offsets)[0] = 0; 485 subset_offsets += 2; 486 } 487 unsigned int glyph_offset = 0; 488 489 hb_codepoint_t last = 0; 490 it = hb_iter (c->plan->new_to_old_gid_list); 491 if (it->first == 0 && !(c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) 492 it++; 493 for (auto &_ : it) 494 { 495 hb_codepoint_t gid = _.first; 496 hb_codepoint_t old_gid = _.second; 497 498 if (long_offset) 499 for (; last < gid; last++) 500 ((HBUINT32 *) subset_offsets)[last] = glyph_offset; 501 else 502 for (; last < gid; last++) 503 ((HBUINT16 *) subset_offsets)[last] = glyph_offset / 2; 504 505 hb_bytes_t var_data_bytes = get_glyph_var_data_bytes (c->source_blob, 506 glyph_count, 507 old_gid); 508 509 hb_memcpy (subset_data, var_data_bytes.arrayZ, var_data_bytes.length); 510 subset_data += var_data_bytes.length; 511 glyph_offset += var_data_bytes.length; 512 513 if (long_offset) 514 ((HBUINT32 *) subset_offsets)[gid] = glyph_offset; 515 else 516 ((HBUINT16 *) subset_offsets)[gid] = glyph_offset / 2; 517 518 last++; // Skip over gid 519 } 520 521 if (long_offset) 522 for (; last < num_glyphs; last++) 523 ((HBUINT32 *) subset_offsets)[last] = glyph_offset; 524 else 525 for (; last < num_glyphs; last++) 526 ((HBUINT16 *) subset_offsets)[last] = glyph_offset / 2; 527 528 return_trace (true); 529 } 530 531 protected: get_glyph_var_data_bytesOT::gvar532 const hb_bytes_t get_glyph_var_data_bytes (hb_blob_t *blob, 533 unsigned glyph_count, 534 hb_codepoint_t glyph) const 535 { 536 unsigned start_offset = get_offset (glyph_count, glyph); 537 unsigned end_offset = get_offset (glyph_count, glyph+1); 538 if (unlikely (end_offset < start_offset)) return hb_bytes_t (); 539 unsigned length = end_offset - start_offset; 540 hb_bytes_t var_data = blob->as_bytes ().sub_array (((unsigned) dataZ) + start_offset, length); 541 return likely (var_data.length >= GlyphVariationData::min_size) ? var_data : hb_bytes_t (); 542 } 543 is_long_offsetOT::gvar544 bool is_long_offset () const { return flags & 1; } 545 get_offsetOT::gvar546 unsigned get_offset (unsigned glyph_count, unsigned i) const 547 { 548 if (unlikely (i > glyph_count)) return 0; 549 hb_barrier (); 550 return is_long_offset () ? get_long_offset_array ()[i] : get_short_offset_array ()[i] * 2; 551 } 552 get_long_offset_arrayOT::gvar553 const HBUINT32 * get_long_offset_array () const { return (const HBUINT32 *) &offsetZ; } get_short_offset_arrayOT::gvar554 const HBUINT16 *get_short_offset_array () const { return (const HBUINT16 *) &offsetZ; } 555 556 public: 557 struct accelerator_t 558 { accelerator_tOT::gvar::accelerator_t559 accelerator_t (hb_face_t *face) 560 { 561 table = hb_sanitize_context_t ().reference_table<gvar> (face); 562 /* If sanitize failed, set glyphCount to 0. */ 563 glyphCount = table->version.to_int () ? face->get_num_glyphs () : 0; 564 565 /* For shared tuples that only have one axis active, shared the index of 566 * that axis as a cache. This will speed up caclulate_scalar() a lot 567 * for fonts with lots of axes and many "monovar" tuples. */ 568 hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * table->axisCount); 569 unsigned count = table->sharedTupleCount; 570 if (unlikely (!shared_tuple_active_idx.resize (count, false))) return; 571 unsigned axis_count = table->axisCount; 572 for (unsigned i = 0; i < count; i++) 573 { 574 hb_array_t<const F2DOT14> tuple = shared_tuples.sub_array (axis_count * i, axis_count); 575 int idx1 = -1, idx2 = -1; 576 for (unsigned j = 0; j < axis_count; j++) 577 { 578 const F2DOT14 &peak = tuple.arrayZ[j]; 579 if (peak.to_int () != 0) 580 { 581 if (idx1 == -1) 582 idx1 = j; 583 else if (idx2 == -1) 584 idx2 = j; 585 else 586 { 587 idx1 = idx2 = -1; 588 break; 589 } 590 } 591 } 592 shared_tuple_active_idx.arrayZ[i] = {idx1, idx2}; 593 } 594 } ~accelerator_tOT::gvar::accelerator_t595 ~accelerator_t () { table.destroy (); } 596 597 private: 598 infer_deltaOT::gvar::accelerator_t599 static float infer_delta (const hb_array_t<contour_point_t> points, 600 const hb_array_t<contour_point_t> deltas, 601 unsigned int target, unsigned int prev, unsigned int next, 602 float contour_point_t::*m) 603 { 604 float target_val = points.arrayZ[target].*m; 605 float prev_val = points.arrayZ[prev].*m; 606 float next_val = points.arrayZ[next].*m; 607 float prev_delta = deltas.arrayZ[prev].*m; 608 float next_delta = deltas.arrayZ[next].*m; 609 610 if (prev_val == next_val) 611 return (prev_delta == next_delta) ? prev_delta : 0.f; 612 else if (target_val <= hb_min (prev_val, next_val)) 613 return (prev_val < next_val) ? prev_delta : next_delta; 614 else if (target_val >= hb_max (prev_val, next_val)) 615 return (prev_val > next_val) ? prev_delta : next_delta; 616 617 /* linear interpolation */ 618 float r = (target_val - prev_val) / (next_val - prev_val); 619 return prev_delta + r * (next_delta - prev_delta); 620 } 621 next_indexOT::gvar::accelerator_t622 static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end) 623 { return (i >= end) ? start : (i + 1); } 624 625 public: apply_deltas_to_pointsOT::gvar::accelerator_t626 bool apply_deltas_to_points (hb_codepoint_t glyph, 627 hb_array_t<const int> coords, 628 const hb_array_t<contour_point_t> points, 629 bool phantom_only = false) const 630 { 631 if (unlikely (glyph >= glyphCount)) return true; 632 633 hb_bytes_t var_data_bytes = table->get_glyph_var_data_bytes (table.get_blob (), glyphCount, glyph); 634 if (!var_data_bytes.as<GlyphVariationData> ()->has_data ()) return true; 635 hb_vector_t<unsigned int> shared_indices; 636 GlyphVariationData::tuple_iterator_t iterator; 637 if (!GlyphVariationData::get_tuple_iterator (var_data_bytes, table->axisCount, 638 var_data_bytes.arrayZ, 639 shared_indices, &iterator)) 640 return true; /* so isn't applied at all */ 641 642 /* Save original points for inferred delta calculation */ 643 contour_point_vector_t orig_points_vec; // Populated lazily 644 auto orig_points = orig_points_vec.as_array (); 645 646 /* flag is used to indicate referenced point */ 647 contour_point_vector_t deltas_vec; // Populated lazily 648 auto deltas = deltas_vec.as_array (); 649 650 hb_vector_t<unsigned> end_points; // Populated lazily 651 652 unsigned num_coords = table->axisCount; 653 hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * num_coords); 654 655 hb_vector_t<unsigned int> private_indices; 656 hb_vector_t<int> x_deltas; 657 hb_vector_t<int> y_deltas; 658 unsigned count = points.length; 659 bool flush = false; 660 do 661 { 662 float scalar = iterator.current_tuple->calculate_scalar (coords, num_coords, shared_tuples, 663 &shared_tuple_active_idx); 664 if (scalar == 0.f) continue; 665 const HBUINT8 *p = iterator.get_serialized_data (); 666 unsigned int length = iterator.current_tuple->get_data_size (); 667 if (unlikely (!iterator.var_data_bytes.check_range (p, length))) 668 return false; 669 670 if (!deltas) 671 { 672 if (unlikely (!deltas_vec.resize (count, false))) return false; 673 deltas = deltas_vec.as_array (); 674 hb_memset (deltas.arrayZ + (phantom_only ? count - 4 : 0), 0, 675 (phantom_only ? 4 : count) * sizeof (deltas[0])); 676 } 677 678 const HBUINT8 *end = p + length; 679 680 bool has_private_points = iterator.current_tuple->has_private_points (); 681 if (has_private_points && 682 !GlyphVariationData::decompile_points (p, private_indices, end)) 683 return false; 684 const hb_array_t<unsigned int> &indices = has_private_points ? private_indices : shared_indices; 685 686 bool apply_to_all = (indices.length == 0); 687 unsigned int num_deltas = apply_to_all ? points.length : indices.length; 688 if (unlikely (!x_deltas.resize (num_deltas, false))) return false; 689 if (unlikely (!GlyphVariationData::decompile_deltas (p, x_deltas, end))) return false; 690 if (unlikely (!y_deltas.resize (num_deltas, false))) return false; 691 if (unlikely (!GlyphVariationData::decompile_deltas (p, y_deltas, end))) return false; 692 693 if (!apply_to_all) 694 { 695 if (!orig_points && !phantom_only) 696 { 697 orig_points_vec.extend (points); 698 if (unlikely (orig_points_vec.in_error ())) return false; 699 orig_points = orig_points_vec.as_array (); 700 } 701 702 if (flush) 703 { 704 for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) 705 points.arrayZ[i].translate (deltas.arrayZ[i]); 706 flush = false; 707 708 } 709 hb_memset (deltas.arrayZ + (phantom_only ? count - 4 : 0), 0, 710 (phantom_only ? 4 : count) * sizeof (deltas[0])); 711 } 712 713 if (HB_OPTIMIZE_SIZE_VAL) 714 { 715 for (unsigned int i = 0; i < num_deltas; i++) 716 { 717 unsigned int pt_index; 718 if (apply_to_all) 719 pt_index = i; 720 else 721 { 722 pt_index = indices[i]; 723 if (unlikely (pt_index >= deltas.length)) continue; 724 } 725 if (phantom_only && pt_index < count - 4) continue; 726 auto &delta = deltas.arrayZ[pt_index]; 727 delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */ 728 delta.x += x_deltas.arrayZ[i] * scalar; 729 delta.y += y_deltas.arrayZ[i] * scalar; 730 } 731 } 732 else 733 { 734 /* Ouch. Four cases... for optimization. */ 735 if (scalar != 1.0f) 736 { 737 if (apply_to_all) 738 for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) 739 { 740 unsigned int pt_index = i; 741 auto &delta = deltas.arrayZ[pt_index]; 742 delta.x += x_deltas.arrayZ[i] * scalar; 743 delta.y += y_deltas.arrayZ[i] * scalar; 744 } 745 else 746 for (unsigned int i = 0; i < num_deltas; i++) 747 { 748 unsigned int pt_index = indices[i]; 749 if (unlikely (pt_index >= deltas.length)) continue; 750 if (phantom_only && pt_index < count - 4) continue; 751 auto &delta = deltas.arrayZ[pt_index]; 752 delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */ 753 delta.x += x_deltas.arrayZ[i] * scalar; 754 delta.y += y_deltas.arrayZ[i] * scalar; 755 } 756 } 757 else 758 { 759 if (apply_to_all) 760 for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) 761 { 762 unsigned int pt_index = i; 763 auto &delta = deltas.arrayZ[pt_index]; 764 delta.x += x_deltas.arrayZ[i]; 765 delta.y += y_deltas.arrayZ[i]; 766 } 767 else 768 for (unsigned int i = 0; i < num_deltas; i++) 769 { 770 unsigned int pt_index = indices[i]; 771 if (unlikely (pt_index >= deltas.length)) continue; 772 if (phantom_only && pt_index < count - 4) continue; 773 auto &delta = deltas.arrayZ[pt_index]; 774 delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */ 775 delta.x += x_deltas.arrayZ[i]; 776 delta.y += y_deltas.arrayZ[i]; 777 } 778 } 779 } 780 781 /* infer deltas for unreferenced points */ 782 if (!apply_to_all && !phantom_only) 783 { 784 if (!end_points) 785 { 786 for (unsigned i = 0; i < count; ++i) 787 if (points.arrayZ[i].is_end_point) 788 end_points.push (i); 789 if (unlikely (end_points.in_error ())) return false; 790 } 791 792 unsigned start_point = 0; 793 for (unsigned end_point : end_points) 794 { 795 /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */ 796 unsigned unref_count = 0; 797 for (unsigned i = start_point; i < end_point + 1; i++) 798 unref_count += deltas.arrayZ[i].flag; 799 unref_count = (end_point - start_point + 1) - unref_count; 800 801 unsigned j = start_point; 802 if (unref_count == 0 || unref_count > end_point - start_point) 803 goto no_more_gaps; 804 805 for (;;) 806 { 807 /* Locate the next gap of unreferenced points between two referenced points prev and next. 808 * Note that a gap may wrap around at left (start_point) and/or at right (end_point). 809 */ 810 unsigned int prev, next, i; 811 for (;;) 812 { 813 i = j; 814 j = next_index (i, start_point, end_point); 815 if (deltas.arrayZ[i].flag && !deltas.arrayZ[j].flag) break; 816 } 817 prev = j = i; 818 for (;;) 819 { 820 i = j; 821 j = next_index (i, start_point, end_point); 822 if (!deltas.arrayZ[i].flag && deltas.arrayZ[j].flag) break; 823 } 824 next = j; 825 /* Infer deltas for all unref points in the gap between prev and next */ 826 i = prev; 827 for (;;) 828 { 829 i = next_index (i, start_point, end_point); 830 if (i == next) break; 831 deltas.arrayZ[i].x = infer_delta (orig_points, deltas, i, prev, next, &contour_point_t::x); 832 deltas.arrayZ[i].y = infer_delta (orig_points, deltas, i, prev, next, &contour_point_t::y); 833 if (--unref_count == 0) goto no_more_gaps; 834 } 835 } 836 no_more_gaps: 837 start_point = end_point + 1; 838 } 839 } 840 841 flush = true; 842 843 } while (iterator.move_to_next ()); 844 845 if (flush) 846 { 847 for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++) 848 points.arrayZ[i].translate (deltas.arrayZ[i]); 849 } 850 851 return true; 852 } 853 get_axis_countOT::gvar::accelerator_t854 unsigned int get_axis_count () const { return table->axisCount; } 855 856 private: 857 hb_blob_ptr_t<gvar> table; 858 unsigned glyphCount; 859 hb_vector_t<hb_pair_t<int, int>> shared_tuple_active_idx; 860 }; 861 862 protected: 863 FixedVersion<>version; /* Version number of the glyph variations table 864 * Set to 0x00010000u. */ 865 HBUINT16 axisCount; /* The number of variation axes for this font. This must be 866 * the same number as axisCount in the 'fvar' table. */ 867 HBUINT16 sharedTupleCount; 868 /* The number of shared tuple records. Shared tuple records 869 * can be referenced within glyph variation data tables for 870 * multiple glyphs, as opposed to other tuple records stored 871 * directly within a glyph variation data table. */ 872 NNOffset32To<UnsizedArrayOf<F2DOT14>> 873 sharedTuples; /* Offset from the start of this table to the shared tuple records. 874 * Array of tuple records shared across all glyph variation data tables. */ 875 HBUINT16 glyphCountX; /* The number of glyphs in this font. This must match the number of 876 * glyphs stored elsewhere in the font. */ 877 HBUINT16 flags; /* Bit-field that gives the format of the offset array that follows. 878 * If bit 0 is clear, the offsets are uint16; if bit 0 is set, the 879 * offsets are uint32. */ 880 Offset32To<GlyphVariationData> 881 dataZ; /* Offset from the start of this table to the array of 882 * GlyphVariationData tables. */ 883 UnsizedArrayOf<HBUINT8> 884 offsetZ; /* Offsets from the start of the GlyphVariationData array 885 * to each GlyphVariationData table. */ 886 public: 887 DEFINE_SIZE_ARRAY (20, offsetZ); 888 }; 889 890 struct gvar_accelerator_t : gvar::accelerator_t { gvar_accelerator_tOT::gvar_accelerator_t891 gvar_accelerator_t (hb_face_t *face) : gvar::accelerator_t (face) {} 892 }; 893 894 } /* namespace OT */ 895 896 #endif /* HB_OT_VAR_GVAR_TABLE_HH */ 897