2 * Copyright © 2012,2017 Google, Inc.
3 * Copyright © 2021 Behdad Esfahbod
5 * This is part of HarfBuzz, a text shaping library.
7 * Permission is hereby granted, without written agreement and without
8 * license or royalty fees, to use, copy, modify, and distribute this
9 * software and its documentation for any purpose, provided that the
10 * above copyright notice and the following two paragraphs appear in
11 * all copies of this software.
13 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
14 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
15 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
16 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
19 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
20 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
21 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
22 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
23 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
25 * Google Author(s): Behdad Esfahbod
32 #include "hb-bit-page.hh"
33 #include "hb-machinery.hh"
38 hb_bit_set_t () = default;
39 ~hb_bit_set_t () = default;
41 hb_bit_set_t (const hb_bit_set_t& other) : hb_bit_set_t () { set (other); }
42 hb_bit_set_t ( hb_bit_set_t&& other) : hb_bit_set_t () { hb_swap (*this, other); }
43 hb_bit_set_t& operator= (const hb_bit_set_t& other) { set (other); return *this; }
44 hb_bit_set_t& operator= (hb_bit_set_t&& other) { hb_swap (*this, other); return *this; }
45 friend void swap (hb_bit_set_t &a, hb_bit_set_t &b)
47 if (likely (!a.successful || !b.successful))
49 hb_swap (a.population, b.population);
50 hb_swap (a.last_page_lookup, b.last_page_lookup);
51 hb_swap (a.page_map, b.page_map);
52 hb_swap (a.pages, b.pages);
69 using page_t = hb_bit_page_t;
72 int cmp (const page_map_t &o) const { return cmp (o.major); }
73 int cmp (uint32_t o_major) const { return (int) o_major - (int) major; }
79 bool successful = true; /* Allocations successful */
80 mutable unsigned int population = 0;
81 mutable unsigned int last_page_lookup = 0;
82 hb_sorted_vector_t<page_map_t> page_map;
83 hb_vector_t<page_t> pages;
85 void err () { if (successful) successful = false; } /* TODO Remove */
86 bool in_error () const { return !successful; }
88 bool resize (unsigned int count)
90 if (unlikely (!successful)) return false;
91 if (unlikely (!pages.resize (count) || !page_map.resize (count)))
93 pages.resize (page_map.length);
109 if (likely (successful))
112 bool is_empty () const
114 unsigned int count = pages.length;
115 for (unsigned int i = 0; i < count; i++)
116 if (!pages[i].is_empty ())
120 explicit operator bool () const { return !is_empty (); }
123 void dirty () { population = UINT_MAX; }
126 void add (hb_codepoint_t g)
128 if (unlikely (!successful)) return;
129 if (unlikely (g == INVALID)) return;
131 page_t *page = page_for (g, true); if (unlikely (!page)) return;
134 bool add_range (hb_codepoint_t a, hb_codepoint_t b)
136 if (unlikely (!successful)) return true; /* https://github.com/harfbuzz/harfbuzz/issues/657 */
137 if (unlikely (a > b || a == INVALID || b == INVALID)) return false;
139 unsigned int ma = get_major (a);
140 unsigned int mb = get_major (b);
143 page_t *page = page_for (a, true); if (unlikely (!page)) return false;
144 page->add_range (a, b);
148 page_t *page = page_for (a, true); if (unlikely (!page)) return false;
149 page->add_range (a, major_start (ma + 1) - 1);
151 for (unsigned int m = ma + 1; m < mb; m++)
153 page = page_for (major_start (m), true); if (unlikely (!page)) return false;
157 page = page_for (b, true); if (unlikely (!page)) return false;
158 page->add_range (major_start (mb), b);
163 template <typename T>
164 void set_array (bool v, const T *array, unsigned int count, unsigned int stride=sizeof(T))
166 if (unlikely (!successful)) return;
169 hb_codepoint_t g = *array;
172 unsigned int m = get_major (g);
173 page_t *page = page_for (g, v); if (unlikely (v && !page)) return;
174 unsigned int start = major_start (m);
175 unsigned int end = major_start (m + 1);
178 if (v || page) /* The v check is to optimize out the page check if v is true. */
181 array = &StructAtOffsetUnaligned<T> (array, stride);
184 while (count && (g = *array, start <= g && g < end));
188 template <typename T>
189 void add_array (const T *array, unsigned int count, unsigned int stride=sizeof(T))
190 { set_array (true, array, count, stride); }
191 template <typename T>
192 void add_array (const hb_array_t<const T>& arr) { add_array (&arr, arr.len ()); }
194 template <typename T>
195 void del_array (const T *array, unsigned int count, unsigned int stride=sizeof(T))
196 { set_array (false, array, count, stride); }
197 template <typename T>
198 void del_array (const hb_array_t<const T>& arr) { del_array (&arr, arr.len ()); }
200 /* Might return false if array looks unsorted.
201 * Used for faster rejection of corrupt data. */
202 template <typename T>
203 bool set_sorted_array (bool v, const T *array, unsigned int count, unsigned int stride=sizeof(T))
205 if (unlikely (!successful)) return true; /* https://github.com/harfbuzz/harfbuzz/issues/657 */
206 if (!count) return true;
208 hb_codepoint_t g = *array;
209 hb_codepoint_t last_g = g;
212 unsigned int m = get_major (g);
213 page_t *page = page_for (g, v); if (unlikely (v && !page)) return false;
214 unsigned int end = major_start (m + 1);
217 /* If we try harder we can change the following comparison to <=;
218 * Not sure if it's worth it. */
219 if (g < last_g) return false;
222 if (v || page) /* The v check is to optimize out the page check if v is true. */
225 array = (const T *) ((const char *) array + stride);
228 while (count && (g = *array, g < end));
233 template <typename T>
234 bool add_sorted_array (const T *array, unsigned int count, unsigned int stride=sizeof(T))
235 { return set_sorted_array (true, array, count, stride); }
236 template <typename T>
237 bool add_sorted_array (const hb_sorted_array_t<const T>& arr) { return add_sorted_array (&arr, arr.len ()); }
239 template <typename T>
240 bool del_sorted_array (const T *array, unsigned int count, unsigned int stride=sizeof(T))
241 { return set_sorted_array (false, array, count, stride); }
242 template <typename T>
243 bool del_sorted_array (const hb_sorted_array_t<const T>& arr) { return del_sorted_array (&arr, arr.len ()); }
245 void del (hb_codepoint_t g)
247 if (unlikely (!successful)) return;
248 page_t *page = page_for (g);
256 void del_pages (int ds, int de)
260 // Pre-allocate the workspace that compact() will need so we can bail on allocation failure
261 // before attempting to rewrite the page map.
262 hb_vector_t<unsigned> compact_workspace;
263 if (unlikely (!allocate_compact_workspace (compact_workspace))) return;
265 unsigned int write_index = 0;
266 for (unsigned int i = 0; i < page_map.length; i++)
268 int m = (int) page_map[i].major;
269 if (m < ds || de < m)
270 page_map[write_index++] = page_map[i];
272 compact (compact_workspace, write_index);
273 resize (write_index);
279 void del_range (hb_codepoint_t a, hb_codepoint_t b)
281 if (unlikely (!successful)) return;
282 if (unlikely (a > b || a == INVALID)) return;
284 unsigned int ma = get_major (a);
285 unsigned int mb = get_major (b);
286 /* Delete pages from ds through de if ds <= de. */
287 int ds = (a == major_start (ma))? (int) ma: (int) (ma + 1);
288 int de = (b + 1 == major_start (mb + 1))? (int) mb: ((int) mb - 1);
289 if (ds > de || (int) ma < ds)
291 page_t *page = page_for (a);
295 page->del_range (a, b);
297 page->del_range (a, major_start (ma + 1) - 1);
300 if (de < (int) mb && ma != mb)
302 page_t *page = page_for (b);
304 page->del_range (major_start (mb), b);
309 bool get (hb_codepoint_t g) const
311 const page_t *page = page_for (g);
314 return page->get (g);
318 static constexpr bool SENTINEL = false;
319 typedef bool value_t;
320 value_t operator [] (hb_codepoint_t k) const { return get (k); }
321 bool has (hb_codepoint_t k) const { return (*this)[k] != SENTINEL; }
323 bool operator () (hb_codepoint_t k) const { return has (k); }
325 /* Sink interface. */
326 hb_bit_set_t& operator << (hb_codepoint_t v)
327 { add (v); return *this; }
328 hb_bit_set_t& operator << (const hb_pair_t<hb_codepoint_t, hb_codepoint_t>& range)
329 { add_range (range.first, range.second); return *this; }
331 bool intersects (hb_codepoint_t first, hb_codepoint_t last) const
333 hb_codepoint_t c = first - 1;
334 return next (&c) && c <= last;
336 void set (const hb_bit_set_t &other)
338 if (unlikely (!successful)) return;
339 unsigned int count = other.pages.length;
340 if (unlikely (!resize (count)))
342 population = other.population;
344 /* TODO switch to vector operator =. */
345 hb_memcpy ((void *) pages, (const void *) other.pages, count * pages.item_size);
346 hb_memcpy ((void *) page_map, (const void *) other.page_map, count * page_map.item_size);
349 bool is_equal (const hb_bit_set_t &other) const
351 if (has_population () && other.has_population () &&
352 get_population () != other.get_population ())
355 unsigned int na = pages.length;
356 unsigned int nb = other.pages.length;
358 unsigned int a = 0, b = 0;
359 for (; a < na && b < nb; )
361 if (page_at (a).is_empty ()) { a++; continue; }
362 if (other.page_at (b).is_empty ()) { b++; continue; }
363 if (page_map[a].major != other.page_map[b].major ||
364 !page_at (a).is_equal (other.page_at (b)))
370 if (!page_at (a).is_empty ()) { return false; }
372 if (!other.page_at (b).is_empty ()) { return false; }
377 bool is_subset (const hb_bit_set_t &larger_set) const
379 if (has_population () && larger_set.has_population () &&
380 get_population () != larger_set.get_population ())
384 for (uint32_t lpi = 0; spi < page_map.length && lpi < larger_set.page_map.length; lpi++)
386 uint32_t spm = page_map[spi].major;
387 uint32_t lpm = larger_set.page_map[lpi].major;
388 auto sp = page_at (spi);
389 auto lp = larger_set.page_at (lpi);
391 if (spm < lpm && !sp.is_empty ())
397 if (!sp.is_subset (lp))
403 while (spi < page_map.length)
404 if (!page_at (spi++).is_empty ())
411 bool allocate_compact_workspace (hb_vector_t<unsigned>& workspace)
413 if (unlikely (!workspace.resize (pages.length)))
423 * workspace should be a pre-sized vector allocated to hold at exactly pages.length
426 void compact (hb_vector_t<unsigned>& workspace,
429 assert(workspace.length == pages.length);
430 hb_vector_t<unsigned>& old_index_to_page_map_index = workspace;
432 hb_fill (old_index_to_page_map_index.writer(), 0xFFFFFFFF);
433 for (unsigned i = 0; i < length; i++)
434 old_index_to_page_map_index[page_map[i].index] = i;
436 compact_pages (old_index_to_page_map_index);
438 void compact_pages (const hb_vector_t<unsigned>& old_index_to_page_map_index)
440 unsigned int write_index = 0;
441 for (unsigned int i = 0; i < pages.length; i++)
443 if (old_index_to_page_map_index[i] == 0xFFFFFFFF) continue;
446 pages[write_index] = pages[i];
448 page_map[old_index_to_page_map_index[i]].index = write_index;
454 template <typename Op>
455 void process (const Op& op, const hb_bit_set_t &other)
457 const bool passthru_left = op (1, 0);
458 const bool passthru_right = op (0, 1);
460 if (unlikely (!successful)) return;
464 unsigned int na = pages.length;
465 unsigned int nb = other.pages.length;
466 unsigned int next_page = na;
468 unsigned int count = 0, newCount = 0;
469 unsigned int a = 0, b = 0;
470 unsigned int write_index = 0;
472 // Pre-allocate the workspace that compact() will need so we can bail on allocation failure
473 // before attempting to rewrite the page map.
474 hb_vector_t<unsigned> compact_workspace;
475 if (!passthru_left && unlikely (!allocate_compact_workspace (compact_workspace))) return;
477 for (; a < na && b < nb; )
479 if (page_map[a].major == other.page_map[b].major)
483 // Move page_map entries that we're keeping from the left side set
484 // to the front of the page_map vector. This isn't necessary if
485 // passthru_left is set since no left side pages will be removed
488 page_map[write_index] = page_map[a];
496 else if (page_map[a].major < other.page_map[b].major)
517 next_page = write_index;
518 compact (compact_workspace, write_index);
521 if (unlikely (!resize (count)))
526 /* Process in-place backward. */
531 if (page_map[a - 1].major == other.page_map[b - 1].major)
536 page_map[count] = page_map[a];
537 page_at (count).v = op (page_at (a).v, other.page_at (b).v);
539 else if (page_map[a - 1].major > other.page_map[b - 1].major)
545 page_map[count] = page_map[a];
554 page_map[count].major = other.page_map[b].major;
555 page_map[count].index = next_page++;
556 page_at (count).v = other.page_at (b).v;
565 page_map[count] = page_map [a];
572 page_map[count].major = other.page_map[b].major;
573 page_map[count].index = next_page++;
574 page_at (count).v = other.page_at (b).v;
580 void union_ (const hb_bit_set_t &other) { process (hb_bitwise_or, other); }
581 void intersect (const hb_bit_set_t &other) { process (hb_bitwise_and, other); }
582 void subtract (const hb_bit_set_t &other) { process (hb_bitwise_gt, other); }
583 void symmetric_difference (const hb_bit_set_t &other) { process (hb_bitwise_xor, other); }
585 bool next (hb_codepoint_t *codepoint) const
587 // TODO: this should be merged with prev() as both implementations
589 if (unlikely (*codepoint == INVALID)) {
590 *codepoint = get_min ();
591 return *codepoint != INVALID;
594 const auto* page_map_array = page_map.arrayZ;
595 unsigned int major = get_major (*codepoint);
596 unsigned int i = last_page_lookup;
598 if (unlikely (i >= page_map.length || page_map_array[i].major != major))
600 page_map.bfind (major, &i, HB_NOT_FOUND_STORE_CLOSEST);
601 if (i >= page_map.length) {
602 *codepoint = INVALID;
607 const auto* pages_array = pages.arrayZ;
608 const page_map_t ¤t = page_map_array[i];
609 if (likely (current.major == major))
611 if (pages_array[current.index].next (codepoint))
613 *codepoint += current.major * page_t::PAGE_BITS;
614 last_page_lookup = i;
620 for (; i < page_map.length; i++)
622 const page_map_t ¤t = page_map.arrayZ[i];
623 hb_codepoint_t m = pages_array[current.index].get_min ();
626 *codepoint = current.major * page_t::PAGE_BITS + m;
627 last_page_lookup = i;
631 last_page_lookup = 0;
632 *codepoint = INVALID;
635 bool previous (hb_codepoint_t *codepoint) const
637 if (unlikely (*codepoint == INVALID)) {
638 *codepoint = get_max ();
639 return *codepoint != INVALID;
642 page_map_t map = {get_major (*codepoint), 0};
644 page_map.bfind (map, &i, HB_NOT_FOUND_STORE_CLOSEST);
645 if (i < page_map.length && page_map[i].major == map.major)
647 if (pages[page_map[i].index].previous (codepoint))
649 *codepoint += page_map[i].major * page_t::PAGE_BITS;
654 for (; (int) i >= 0; i--)
656 hb_codepoint_t m = pages[page_map[i].index].get_max ();
659 *codepoint = page_map[i].major * page_t::PAGE_BITS + m;
663 *codepoint = INVALID;
666 bool next_range (hb_codepoint_t *first, hb_codepoint_t *last) const
673 *last = *first = INVALID;
679 while (next (&i) && i == *last + 1)
684 bool previous_range (hb_codepoint_t *first, hb_codepoint_t *last) const
691 *last = *first = INVALID;
697 while (previous (&i) && i == *first - 1)
703 bool has_population () const { return population != UINT_MAX; }
704 unsigned int get_population () const
706 if (has_population ())
709 unsigned int pop = 0;
710 unsigned int count = pages.length;
711 for (unsigned int i = 0; i < count; i++)
712 pop += pages[i].get_population ();
717 hb_codepoint_t get_min () const
719 unsigned count = pages.length;
720 for (unsigned i = 0; i < count; i++)
722 const auto& map = page_map[i];
723 const auto& page = pages[map.index];
725 if (!page.is_empty ())
726 return map.major * page_t::PAGE_BITS + page.get_min ();
730 hb_codepoint_t get_max () const
732 unsigned count = pages.length;
733 for (signed i = count - 1; i >= 0; i--)
735 const auto& map = page_map[(unsigned) i];
736 const auto& page = pages[map.index];
738 if (!page.is_empty ())
739 return map.major * page_t::PAGE_BITS + page.get_max ();
744 static constexpr hb_codepoint_t INVALID = page_t::INVALID;
747 * Iterator implementation.
749 struct iter_t : hb_iter_with_fallback_t<iter_t, hb_codepoint_t>
751 static constexpr bool is_sorted_iterator = true;
752 iter_t (const hb_bit_set_t &s_ = Null (hb_bit_set_t),
753 bool init = true) : s (&s_), v (INVALID), l(0)
757 l = s->get_population () + 1;
762 typedef hb_codepoint_t __item_t__;
763 hb_codepoint_t __item__ () const { return v; }
764 bool __more__ () const { return v != INVALID; }
765 void __next__ () { s->next (&v); if (l) l--; }
766 void __prev__ () { s->previous (&v); }
767 unsigned __len__ () const { return l; }
768 iter_t end () const { return iter_t (*s, false); }
769 bool operator != (const iter_t& o) const
770 { return s != o.s || v != o.v; }
773 const hb_bit_set_t *s;
777 iter_t iter () const { return iter_t (*this); }
778 operator iter_t () const { return iter (); }
782 page_t *page_for (hb_codepoint_t g, bool insert = false)
784 page_map_t map = {get_major (g), pages.length};
786 if (!page_map.bfind (map, &i, HB_NOT_FOUND_STORE_CLOSEST))
791 if (unlikely (!resize (pages.length + 1)))
794 pages[map.index].init0 ();
795 memmove (page_map + i + 1,
797 (page_map.length - 1 - i) * page_map.item_size);
800 return &pages[page_map[i].index];
802 const page_t *page_for (hb_codepoint_t g) const
804 page_map_t key = {get_major (g)};
805 const page_map_t *found = page_map.bsearch (key);
807 return &pages[found->index];
810 page_t &page_at (unsigned int i) { return pages[page_map[i].index]; }
811 const page_t &page_at (unsigned int i) const { return pages[page_map[i].index]; }
812 unsigned int get_major (hb_codepoint_t g) const { return g / page_t::PAGE_BITS; }
813 hb_codepoint_t major_start (unsigned int major) const { return major * page_t::PAGE_BITS; }
817 #endif /* HB_BIT_SET_HH */