summaryrefslogtreecommitdiffstats
path: root/gfx/harfbuzz/src/OT/Layout/Common
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
commit6bf0a5cb5034a7e684dcc3500e841785237ce2dd (patch)
treea68f146d7fa01f0134297619fbe7e33db084e0aa /gfx/harfbuzz/src/OT/Layout/Common
parentInitial commit. (diff)
downloadthunderbird-upstream.tar.xz
thunderbird-upstream.zip
Adding upstream version 1:115.7.0.upstream/1%115.7.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--gfx/harfbuzz/src/OT/Layout/Common/Coverage.hh348
-rw-r--r--gfx/harfbuzz/src/OT/Layout/Common/CoverageFormat1.hh133
-rw-r--r--gfx/harfbuzz/src/OT/Layout/Common/CoverageFormat2.hh239
-rw-r--r--gfx/harfbuzz/src/OT/Layout/Common/RangeRecord.hh97
4 files changed, 817 insertions, 0 deletions
diff --git a/gfx/harfbuzz/src/OT/Layout/Common/Coverage.hh b/gfx/harfbuzz/src/OT/Layout/Common/Coverage.hh
new file mode 100644
index 0000000000..9ca88f788a
--- /dev/null
+++ b/gfx/harfbuzz/src/OT/Layout/Common/Coverage.hh
@@ -0,0 +1,348 @@
+/*
+ * Copyright © 2007,2008,2009 Red Hat, Inc.
+ * Copyright © 2010,2012 Google, Inc.
+ *
+ * This is part of HarfBuzz, a text shaping library.
+ *
+ * Permission is hereby granted, without written agreement and without
+ * license or royalty fees, to use, copy, modify, and distribute this
+ * software and its documentation for any purpose, provided that the
+ * above copyright notice and the following two paragraphs appear in
+ * all copies of this software.
+ *
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
+ * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
+ * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ *
+ * Red Hat Author(s): Behdad Esfahbod
+ * Google Author(s): Behdad Esfahbod, Garret Rieger
+ */
+
+#ifndef OT_LAYOUT_COMMON_COVERAGE_HH
+#define OT_LAYOUT_COMMON_COVERAGE_HH
+
+#include "../types.hh"
+#include "CoverageFormat1.hh"
+#include "CoverageFormat2.hh"
+
+namespace OT {
+namespace Layout {
+namespace Common {
+
+template<typename Iterator>
+static inline void Coverage_serialize (hb_serialize_context_t *c,
+ Iterator it);
+
+struct Coverage
+{
+
+ protected:
+ union {
+ HBUINT16 format; /* Format identifier */
+ CoverageFormat1_3<SmallTypes> format1;
+ CoverageFormat2_4<SmallTypes> format2;
+#ifndef HB_NO_BEYOND_64K
+ CoverageFormat1_3<MediumTypes>format3;
+ CoverageFormat2_4<MediumTypes>format4;
+#endif
+ } u;
+ public:
+ DEFINE_SIZE_UNION (2, format);
+
+ bool sanitize (hb_sanitize_context_t *c) const
+ {
+ TRACE_SANITIZE (this);
+ if (!u.format.sanitize (c)) return_trace (false);
+ switch (u.format)
+ {
+ case 1: return_trace (u.format1.sanitize (c));
+ case 2: return_trace (u.format2.sanitize (c));
+#ifndef HB_NO_BEYOND_64K
+ case 3: return_trace (u.format3.sanitize (c));
+ case 4: return_trace (u.format4.sanitize (c));
+#endif
+ default:return_trace (true);
+ }
+ }
+
+ /* Has interface. */
+ unsigned operator [] (hb_codepoint_t k) const { return get (k); }
+ bool has (hb_codepoint_t k) const { return (*this)[k] != NOT_COVERED; }
+ /* Predicate. */
+ bool operator () (hb_codepoint_t k) const { return has (k); }
+
+ unsigned int get (hb_codepoint_t k) const { return get_coverage (k); }
+ unsigned int get_coverage (hb_codepoint_t glyph_id) const
+ {
+ switch (u.format) {
+ case 1: return u.format1.get_coverage (glyph_id);
+ case 2: return u.format2.get_coverage (glyph_id);
+#ifndef HB_NO_BEYOND_64K
+ case 3: return u.format3.get_coverage (glyph_id);
+ case 4: return u.format4.get_coverage (glyph_id);
+#endif
+ default:return NOT_COVERED;
+ }
+ }
+
+ unsigned get_population () const
+ {
+ switch (u.format) {
+ case 1: return u.format1.get_population ();
+ case 2: return u.format2.get_population ();
+#ifndef HB_NO_BEYOND_64K
+ case 3: return u.format3.get_population ();
+ case 4: return u.format4.get_population ();
+#endif
+ default:return NOT_COVERED;
+ }
+ }
+
+ template <typename Iterator,
+ hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
+ bool serialize (hb_serialize_context_t *c, Iterator glyphs)
+ {
+ TRACE_SERIALIZE (this);
+ if (unlikely (!c->extend_min (this))) return_trace (false);
+
+ unsigned count = hb_len (glyphs);
+ unsigned num_ranges = 0;
+ hb_codepoint_t last = (hb_codepoint_t) -2;
+ hb_codepoint_t max = 0;
+ bool unsorted = false;
+ for (auto g: glyphs)
+ {
+ if (last != (hb_codepoint_t) -2 && g < last)
+ unsorted = true;
+ if (last + 1 != g)
+ num_ranges++;
+ last = g;
+ if (g > max) max = g;
+ }
+ u.format = !unsorted && count <= num_ranges * 3 ? 1 : 2;
+
+#ifndef HB_NO_BEYOND_64K
+ if (max > 0xFFFFu)
+ u.format += 2;
+ if (unlikely (max > 0xFFFFFFu))
+#else
+ if (unlikely (max > 0xFFFFu))
+#endif
+ {
+ c->check_success (false, HB_SERIALIZE_ERROR_INT_OVERFLOW);
+ return_trace (false);
+ }
+
+ switch (u.format)
+ {
+ case 1: return_trace (u.format1.serialize (c, glyphs));
+ case 2: return_trace (u.format2.serialize (c, glyphs));
+#ifndef HB_NO_BEYOND_64K
+ case 3: return_trace (u.format3.serialize (c, glyphs));
+ case 4: return_trace (u.format4.serialize (c, glyphs));
+#endif
+ default:return_trace (false);
+ }
+ }
+
+ bool subset (hb_subset_context_t *c) const
+ {
+ TRACE_SUBSET (this);
+ auto it =
+ + iter ()
+ | hb_take (c->plan->source->get_num_glyphs ())
+ | hb_map_retains_sorting (c->plan->glyph_map_gsub)
+ | hb_filter ([] (hb_codepoint_t glyph) { return glyph != HB_MAP_VALUE_INVALID; })
+ ;
+
+ // Cache the iterator result as it will be iterated multiple times
+ // by the serialize code below.
+ hb_sorted_vector_t<hb_codepoint_t> glyphs (it);
+ Coverage_serialize (c->serializer, glyphs.iter ());
+ return_trace (bool (glyphs));
+ }
+
+ bool intersects (const hb_set_t *glyphs) const
+ {
+ switch (u.format)
+ {
+ case 1: return u.format1.intersects (glyphs);
+ case 2: return u.format2.intersects (glyphs);
+#ifndef HB_NO_BEYOND_64K
+ case 3: return u.format3.intersects (glyphs);
+ case 4: return u.format4.intersects (glyphs);
+#endif
+ default:return false;
+ }
+ }
+ bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
+ {
+ switch (u.format)
+ {
+ case 1: return u.format1.intersects_coverage (glyphs, index);
+ case 2: return u.format2.intersects_coverage (glyphs, index);
+#ifndef HB_NO_BEYOND_64K
+ case 3: return u.format3.intersects_coverage (glyphs, index);
+ case 4: return u.format4.intersects_coverage (glyphs, index);
+#endif
+ default:return false;
+ }
+ }
+
+ /* Might return false if array looks unsorted.
+ * Used for faster rejection of corrupt data. */
+ template <typename set_t>
+ bool collect_coverage (set_t *glyphs) const
+ {
+ switch (u.format)
+ {
+ case 1: return u.format1.collect_coverage (glyphs);
+ case 2: return u.format2.collect_coverage (glyphs);
+#ifndef HB_NO_BEYOND_64K
+ case 3: return u.format3.collect_coverage (glyphs);
+ case 4: return u.format4.collect_coverage (glyphs);
+#endif
+ default:return false;
+ }
+ }
+
+ template <typename IterableOut,
+ hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
+ void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
+ {
+ switch (u.format)
+ {
+ case 1: return u.format1.intersect_set (glyphs, intersect_glyphs);
+ case 2: return u.format2.intersect_set (glyphs, intersect_glyphs);
+#ifndef HB_NO_BEYOND_64K
+ case 3: return u.format3.intersect_set (glyphs, intersect_glyphs);
+ case 4: return u.format4.intersect_set (glyphs, intersect_glyphs);
+#endif
+ default:return ;
+ }
+ }
+
+ struct iter_t : hb_iter_with_fallback_t<iter_t, hb_codepoint_t>
+ {
+ static constexpr bool is_sorted_iterator = true;
+ iter_t (const Coverage &c_ = Null (Coverage))
+ {
+ hb_memset (this, 0, sizeof (*this));
+ format = c_.u.format;
+ switch (format)
+ {
+ case 1: u.format1.init (c_.u.format1); return;
+ case 2: u.format2.init (c_.u.format2); return;
+#ifndef HB_NO_BEYOND_64K
+ case 3: u.format3.init (c_.u.format3); return;
+ case 4: u.format4.init (c_.u.format4); return;
+#endif
+ default: return;
+ }
+ }
+ bool __more__ () const
+ {
+ switch (format)
+ {
+ case 1: return u.format1.__more__ ();
+ case 2: return u.format2.__more__ ();
+#ifndef HB_NO_BEYOND_64K
+ case 3: return u.format3.__more__ ();
+ case 4: return u.format4.__more__ ();
+#endif
+ default:return false;
+ }
+ }
+ void __next__ ()
+ {
+ switch (format)
+ {
+ case 1: u.format1.__next__ (); break;
+ case 2: u.format2.__next__ (); break;
+#ifndef HB_NO_BEYOND_64K
+ case 3: u.format3.__next__ (); break;
+ case 4: u.format4.__next__ (); break;
+#endif
+ default: break;
+ }
+ }
+ typedef hb_codepoint_t __item_t__;
+ __item_t__ __item__ () const { return get_glyph (); }
+
+ hb_codepoint_t get_glyph () const
+ {
+ switch (format)
+ {
+ case 1: return u.format1.get_glyph ();
+ case 2: return u.format2.get_glyph ();
+#ifndef HB_NO_BEYOND_64K
+ case 3: return u.format3.get_glyph ();
+ case 4: return u.format4.get_glyph ();
+#endif
+ default:return 0;
+ }
+ }
+ bool operator != (const iter_t& o) const
+ {
+ if (unlikely (format != o.format)) return true;
+ switch (format)
+ {
+ case 1: return u.format1 != o.u.format1;
+ case 2: return u.format2 != o.u.format2;
+#ifndef HB_NO_BEYOND_64K
+ case 3: return u.format3 != o.u.format3;
+ case 4: return u.format4 != o.u.format4;
+#endif
+ default:return false;
+ }
+ }
+ iter_t __end__ () const
+ {
+ iter_t it = {};
+ it.format = format;
+ switch (format)
+ {
+ case 1: it.u.format1 = u.format1.__end__ (); break;
+ case 2: it.u.format2 = u.format2.__end__ (); break;
+#ifndef HB_NO_BEYOND_64K
+ case 3: it.u.format3 = u.format3.__end__ (); break;
+ case 4: it.u.format4 = u.format4.__end__ (); break;
+#endif
+ default: break;
+ }
+ return it;
+ }
+
+ private:
+ unsigned int format;
+ union {
+#ifndef HB_NO_BEYOND_64K
+ CoverageFormat2_4<MediumTypes>::iter_t format4; /* Put this one first since it's larger; helps shut up compiler. */
+ CoverageFormat1_3<MediumTypes>::iter_t format3;
+#endif
+ CoverageFormat2_4<SmallTypes>::iter_t format2; /* Put this one first since it's larger; helps shut up compiler. */
+ CoverageFormat1_3<SmallTypes>::iter_t format1;
+ } u;
+ };
+ iter_t iter () const { return iter_t (*this); }
+};
+
+template<typename Iterator>
+static inline void
+Coverage_serialize (hb_serialize_context_t *c,
+ Iterator it)
+{ c->start_embed<Coverage> ()->serialize (c, it); }
+
+}
+}
+}
+
+#endif // #ifndef OT_LAYOUT_COMMON_COVERAGE_HH
diff --git a/gfx/harfbuzz/src/OT/Layout/Common/CoverageFormat1.hh b/gfx/harfbuzz/src/OT/Layout/Common/CoverageFormat1.hh
new file mode 100644
index 0000000000..5d68e3d15e
--- /dev/null
+++ b/gfx/harfbuzz/src/OT/Layout/Common/CoverageFormat1.hh
@@ -0,0 +1,133 @@
+/*
+ * Copyright © 2007,2008,2009 Red Hat, Inc.
+ * Copyright © 2010,2012 Google, Inc.
+ *
+ * This is part of HarfBuzz, a text shaping library.
+ *
+ * Permission is hereby granted, without written agreement and without
+ * license or royalty fees, to use, copy, modify, and distribute this
+ * software and its documentation for any purpose, provided that the
+ * above copyright notice and the following two paragraphs appear in
+ * all copies of this software.
+ *
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
+ * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
+ * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ *
+ * Red Hat Author(s): Behdad Esfahbod
+ * Google Author(s): Behdad Esfahbod, Garret Rieger
+ */
+
+
+#ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT1_HH
+#define OT_LAYOUT_COMMON_COVERAGEFORMAT1_HH
+
+namespace OT {
+namespace Layout {
+namespace Common {
+
+#define NOT_COVERED ((unsigned int) -1)
+
+template <typename Types>
+struct CoverageFormat1_3
+{
+ friend struct Coverage;
+
+ protected:
+ HBUINT16 coverageFormat; /* Format identifier--format = 1 */
+ SortedArray16Of<typename Types::HBGlyphID>
+ glyphArray; /* Array of GlyphIDs--in numerical order */
+ public:
+ DEFINE_SIZE_ARRAY (4, glyphArray);
+
+ private:
+ bool sanitize (hb_sanitize_context_t *c) const
+ {
+ TRACE_SANITIZE (this);
+ return_trace (glyphArray.sanitize (c));
+ }
+
+ unsigned int get_coverage (hb_codepoint_t glyph_id) const
+ {
+ unsigned int i;
+ glyphArray.bfind (glyph_id, &i, HB_NOT_FOUND_STORE, NOT_COVERED);
+ return i;
+ }
+
+ unsigned get_population () const
+ {
+ return glyphArray.len;
+ }
+
+ template <typename Iterator,
+ hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
+ bool serialize (hb_serialize_context_t *c, Iterator glyphs)
+ {
+ TRACE_SERIALIZE (this);
+ return_trace (glyphArray.serialize (c, glyphs));
+ }
+
+ bool intersects (const hb_set_t *glyphs) const
+ {
+ if (glyphArray.len > glyphs->get_population () * hb_bit_storage ((unsigned) glyphArray.len) / 2)
+ {
+ for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
+ if (get_coverage (g) != NOT_COVERED)
+ return true;
+ return false;
+ }
+
+ for (const auto& g : glyphArray.as_array ())
+ if (glyphs->has (g))
+ return true;
+ return false;
+ }
+ bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
+ { return glyphs->has (glyphArray[index]); }
+
+ template <typename IterableOut,
+ hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
+ void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
+ {
+ unsigned count = glyphArray.len;
+ for (unsigned i = 0; i < count; i++)
+ if (glyphs.has (glyphArray[i]))
+ intersect_glyphs << glyphArray[i];
+ }
+
+ template <typename set_t>
+ bool collect_coverage (set_t *glyphs) const
+ { return glyphs->add_sorted_array (glyphArray.as_array ()); }
+
+ public:
+ /* Older compilers need this to be public. */
+ struct iter_t
+ {
+ void init (const struct CoverageFormat1_3 &c_) { c = &c_; i = 0; }
+ bool __more__ () const { return i < c->glyphArray.len; }
+ void __next__ () { i++; }
+ hb_codepoint_t get_glyph () const { return c->glyphArray[i]; }
+ bool operator != (const iter_t& o) const
+ { return i != o.i; }
+ iter_t __end__ () const { iter_t it; it.init (*c); it.i = c->glyphArray.len; return it; }
+
+ private:
+ const struct CoverageFormat1_3 *c;
+ unsigned int i;
+ };
+ private:
+};
+
+}
+}
+}
+
+#endif // #ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT1_HH
diff --git a/gfx/harfbuzz/src/OT/Layout/Common/CoverageFormat2.hh b/gfx/harfbuzz/src/OT/Layout/Common/CoverageFormat2.hh
new file mode 100644
index 0000000000..fa501d659d
--- /dev/null
+++ b/gfx/harfbuzz/src/OT/Layout/Common/CoverageFormat2.hh
@@ -0,0 +1,239 @@
+/*
+ * Copyright © 2007,2008,2009 Red Hat, Inc.
+ * Copyright © 2010,2012 Google, Inc.
+ *
+ * This is part of HarfBuzz, a text shaping library.
+ *
+ * Permission is hereby granted, without written agreement and without
+ * license or royalty fees, to use, copy, modify, and distribute this
+ * software and its documentation for any purpose, provided that the
+ * above copyright notice and the following two paragraphs appear in
+ * all copies of this software.
+ *
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
+ * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
+ * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ *
+ * Red Hat Author(s): Behdad Esfahbod
+ * Google Author(s): Behdad Esfahbod, Garret Rieger
+ */
+
+#ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT2_HH
+#define OT_LAYOUT_COMMON_COVERAGEFORMAT2_HH
+
+#include "RangeRecord.hh"
+
+namespace OT {
+namespace Layout {
+namespace Common {
+
+template <typename Types>
+struct CoverageFormat2_4
+{
+ friend struct Coverage;
+
+ protected:
+ HBUINT16 coverageFormat; /* Format identifier--format = 2 */
+ SortedArray16Of<RangeRecord<Types>>
+ rangeRecord; /* Array of glyph ranges--ordered by
+ * Start GlyphID. rangeCount entries
+ * long */
+ public:
+ DEFINE_SIZE_ARRAY (4, rangeRecord);
+
+ private:
+
+ bool sanitize (hb_sanitize_context_t *c) const
+ {
+ TRACE_SANITIZE (this);
+ return_trace (rangeRecord.sanitize (c));
+ }
+
+ unsigned int get_coverage (hb_codepoint_t glyph_id) const
+ {
+ const RangeRecord<Types> &range = rangeRecord.bsearch (glyph_id);
+ return likely (range.first <= range.last)
+ ? (unsigned int) range.value + (glyph_id - range.first)
+ : NOT_COVERED;
+ }
+
+ unsigned get_population () const
+ {
+ typename Types::large_int ret = 0;
+ for (const auto &r : rangeRecord)
+ ret += r.get_population ();
+ return ret > UINT_MAX ? UINT_MAX : (unsigned) ret;
+ }
+
+ template <typename Iterator,
+ hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
+ bool serialize (hb_serialize_context_t *c, Iterator glyphs)
+ {
+ TRACE_SERIALIZE (this);
+ if (unlikely (!c->extend_min (this))) return_trace (false);
+
+ unsigned num_ranges = 0;
+ hb_codepoint_t last = (hb_codepoint_t) -2;
+ for (auto g: glyphs)
+ {
+ if (last + 1 != g)
+ num_ranges++;
+ last = g;
+ }
+
+ if (unlikely (!rangeRecord.serialize (c, num_ranges))) return_trace (false);
+ if (!num_ranges) return_trace (true);
+
+ unsigned count = 0;
+ unsigned range = (unsigned) -1;
+ last = (hb_codepoint_t) -2;
+ unsigned unsorted = false;
+ for (auto g: glyphs)
+ {
+ if (last + 1 != g)
+ {
+ if (unlikely (last != (hb_codepoint_t) -2 && last + 1 > g))
+ unsorted = true;
+
+ range++;
+ rangeRecord.arrayZ[range].first = g;
+ rangeRecord.arrayZ[range].value = count;
+ }
+ rangeRecord.arrayZ[range].last = g;
+ last = g;
+ count++;
+ }
+
+ if (unlikely (unsorted))
+ rangeRecord.as_array ().qsort (RangeRecord<Types>::cmp_range);
+
+ return_trace (true);
+ }
+
+ bool intersects (const hb_set_t *glyphs) const
+ {
+ if (rangeRecord.len > glyphs->get_population () * hb_bit_storage ((unsigned) rangeRecord.len) / 2)
+ {
+ for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
+ if (get_coverage (g) != NOT_COVERED)
+ return true;
+ return false;
+ }
+
+ return hb_any (+ hb_iter (rangeRecord)
+ | hb_map ([glyphs] (const RangeRecord<Types> &range) { return range.intersects (*glyphs); }));
+ }
+ bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
+ {
+ auto *range = rangeRecord.as_array ().bsearch (index);
+ if (range)
+ return range->intersects (*glyphs);
+ return false;
+ }
+
+ template <typename IterableOut,
+ hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
+ void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
+ {
+ /* Break out of loop for overlapping, broken, tables,
+ * to avoid fuzzer timouts. */
+ hb_codepoint_t last = 0;
+ for (const auto& range : rangeRecord)
+ {
+ if (unlikely (range.first < last))
+ break;
+ last = range.last;
+ for (hb_codepoint_t g = range.first - 1;
+ glyphs.next (&g) && g <= last;)
+ intersect_glyphs << g;
+ }
+ }
+
+ template <typename set_t>
+ bool collect_coverage (set_t *glyphs) const
+ {
+ for (const auto& range: rangeRecord)
+ if (unlikely (!range.collect_coverage (glyphs)))
+ return false;
+ return true;
+ }
+
+ public:
+ /* Older compilers need this to be public. */
+ struct iter_t
+ {
+ void init (const CoverageFormat2_4 &c_)
+ {
+ c = &c_;
+ coverage = 0;
+ i = 0;
+ j = c->rangeRecord.len ? c->rangeRecord[0].first : 0;
+ if (unlikely (c->rangeRecord[0].first > c->rangeRecord[0].last))
+ {
+ /* Broken table. Skip. */
+ i = c->rangeRecord.len;
+ j = 0;
+ }
+ }
+ bool __more__ () const { return i < c->rangeRecord.len; }
+ void __next__ ()
+ {
+ if (j >= c->rangeRecord[i].last)
+ {
+ i++;
+ if (__more__ ())
+ {
+ unsigned int old = coverage;
+ j = c->rangeRecord.arrayZ[i].first;
+ coverage = c->rangeRecord.arrayZ[i].value;
+ if (unlikely (coverage != old + 1))
+ {
+ /* Broken table. Skip. Important to avoid DoS.
+ * Also, our callers depend on coverage being
+ * consecutive and monotonically increasing,
+ * ie. iota(). */
+ i = c->rangeRecord.len;
+ j = 0;
+ return;
+ }
+ }
+ else
+ j = 0;
+ return;
+ }
+ coverage++;
+ j++;
+ }
+ hb_codepoint_t get_glyph () const { return j; }
+ bool operator != (const iter_t& o) const
+ { return i != o.i || j != o.j; }
+ iter_t __end__ () const
+ {
+ iter_t it;
+ it.init (*c);
+ it.i = c->rangeRecord.len;
+ it.j = 0;
+ return it;
+ }
+
+ private:
+ const struct CoverageFormat2_4 *c;
+ unsigned int i, coverage;
+ hb_codepoint_t j;
+ };
+ private:
+};
+
+}
+}
+}
+
+#endif // #ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT2_HH
diff --git a/gfx/harfbuzz/src/OT/Layout/Common/RangeRecord.hh b/gfx/harfbuzz/src/OT/Layout/Common/RangeRecord.hh
new file mode 100644
index 0000000000..85aacace9a
--- /dev/null
+++ b/gfx/harfbuzz/src/OT/Layout/Common/RangeRecord.hh
@@ -0,0 +1,97 @@
+/*
+ * Copyright © 2007,2008,2009 Red Hat, Inc.
+ * Copyright © 2010,2012 Google, Inc.
+ *
+ * This is part of HarfBuzz, a text shaping library.
+ *
+ * Permission is hereby granted, without written agreement and without
+ * license or royalty fees, to use, copy, modify, and distribute this
+ * software and its documentation for any purpose, provided that the
+ * above copyright notice and the following two paragraphs appear in
+ * all copies of this software.
+ *
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
+ * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
+ * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ *
+ * Red Hat Author(s): Behdad Esfahbod
+ * Google Author(s): Behdad Esfahbod, Garret Rieger
+ */
+
+#ifndef OT_LAYOUT_COMMON_RANGERECORD_HH
+#define OT_LAYOUT_COMMON_RANGERECORD_HH
+
+namespace OT {
+namespace Layout {
+namespace Common {
+
+template <typename Types>
+struct RangeRecord
+{
+ typename Types::HBGlyphID first; /* First GlyphID in the range */
+ typename Types::HBGlyphID last; /* Last GlyphID in the range */
+ HBUINT16 value; /* Value */
+
+ DEFINE_SIZE_STATIC (2 + 2 * Types::size);
+
+ bool sanitize (hb_sanitize_context_t *c) const
+ {
+ TRACE_SANITIZE (this);
+ return_trace (c->check_struct (this));
+ }
+
+ int cmp (hb_codepoint_t g) const
+ { return g < first ? -1 : g <= last ? 0 : +1; }
+
+ HB_INTERNAL static int cmp_range (const void *pa, const void *pb) {
+ const RangeRecord *a = (const RangeRecord *) pa;
+ const RangeRecord *b = (const RangeRecord *) pb;
+ if (a->first < b->first) return -1;
+ if (a->first > b->first) return +1;
+ if (a->last < b->last) return -1;
+ if (a->last > b->last) return +1;
+ if (a->value < b->value) return -1;
+ if (a->value > b->value) return +1;
+ return 0;
+ }
+
+ unsigned get_population () const
+ {
+ if (unlikely (last < first)) return 0;
+ return (last - first + 1);
+ }
+
+ bool intersects (const hb_set_t &glyphs) const
+ { return glyphs.intersects (first, last); }
+
+ template <typename set_t>
+ bool collect_coverage (set_t *glyphs) const
+ { return glyphs->add_range (first, last); }
+};
+
+}
+}
+}
+
+// TODO(garretrieger): This was previously implemented using
+// DECLARE_NULL_NAMESPACE_BYTES_TEMPLATE1 (OT, RangeRecord, 9);
+// but that only works when there is only a single namespace level.
+// The macro should probably be fixed so it can work in this situation.
+extern HB_INTERNAL const unsigned char _hb_Null_OT_RangeRecord[9];
+template <typename Spec>
+struct Null<OT::Layout::Common::RangeRecord<Spec>> {
+ static OT::Layout::Common::RangeRecord<Spec> const & get_null () {
+ return *reinterpret_cast<const OT::Layout::Common::RangeRecord<Spec> *> (_hb_Null_OT_RangeRecord);
+ }
+};
+
+
+#endif // #ifndef OT_LAYOUT_COMMON_RANGERECORD_HH