1 //! `FixedBitSet` is a simple fixed size set of bits.
2 //!
3 //! ### Crate features
4 //!
5 //! - `std` (default feature)
6 //! Disabling this feature disables using std and instead uses crate alloc.
7 //!
8 //! ### SIMD Acceleration
9 //! `fixedbitset` is written with SIMD in mind. The backing store and set operations will use aligned SIMD data types and instructions when compiling
10 //! for compatible target platforms. The use of SIMD generally enables better performance in many set and batch operations (i.e. intersection/union/inserting a range).
11 //!
12 //! When SIMD is not available on the target, the crate will gracefully fallback to a default implementation. It is intended to add support for other SIMD architectures
13 //! once they appear in stable Rust.
14 //!
15 //! Currently only SSE2/AVX/AVX2 on x86/x86_64 and wasm32 SIMD are supported as this is what stable Rust supports.
16 #![no_std]
17 #![deny(clippy::undocumented_unsafe_blocks)]
18
19 /// Local Android change: Use std to allow building as a dylib.
20 #[cfg(android_dylib)]
21 extern crate std;
22
23 extern crate alloc;
24 use alloc::{vec, vec::Vec};
25
26 mod block;
27 mod range;
28
29 #[cfg(feature = "serde")]
30 extern crate serde;
31 #[cfg(feature = "serde")]
32 mod serde_impl;
33
34 use core::fmt::Write;
35 use core::fmt::{Binary, Display, Error, Formatter};
36
37 use core::cmp::Ordering;
38 use core::hash::Hash;
39 use core::iter::{Chain, FusedIterator};
40 use core::mem::ManuallyDrop;
41 use core::mem::MaybeUninit;
42 use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Index};
43 use core::ptr::NonNull;
44 pub use range::IndexRange;
45
46 pub(crate) const BITS: usize = core::mem::size_of::<Block>() * 8;
47 #[cfg(feature = "serde")]
48 pub(crate) const BYTES: usize = core::mem::size_of::<Block>();
49
50 use block::Block as SimdBlock;
51 pub type Block = usize;
52
53 #[inline]
div_rem(x: usize, denominator: usize) -> (usize, usize)54 fn div_rem(x: usize, denominator: usize) -> (usize, usize) {
55 (x / denominator, x % denominator)
56 }
57
vec_into_parts<T>(vec: Vec<T>) -> (NonNull<T>, usize, usize)58 fn vec_into_parts<T>(vec: Vec<T>) -> (NonNull<T>, usize, usize) {
59 let mut vec = ManuallyDrop::new(vec);
60 (
61 // SAFETY: A Vec's internal pointer is always non-null.
62 unsafe { NonNull::new_unchecked(vec.as_mut_ptr()) },
63 vec.capacity(),
64 vec.len(),
65 )
66 }
67
68 /// `FixedBitSet` is a simple fixed size set of bits that each can
69 /// be enabled (1 / **true**) or disabled (0 / **false**).
70 ///
71 /// The bit set has a fixed capacity in terms of enabling bits (and the
72 /// capacity can grow using the `grow` method).
73 ///
74 /// Derived traits depend on both the zeros and ones, so [0,1] is not equal to
75 /// [0,1,0].
76 #[derive(Debug, Eq)]
77 pub struct FixedBitSet {
78 pub(crate) data: NonNull<MaybeUninit<SimdBlock>>,
79 capacity: usize,
80 /// length in bits
81 pub(crate) length: usize,
82 }
83
84 // SAFETY: FixedBitset contains no thread-local state and can be safely sent between threads
85 unsafe impl Send for FixedBitSet {}
86 // SAFETY: FixedBitset does not provide simultaneous unsynchronized mutable access to the
87 // underlying buffer.
88 unsafe impl Sync for FixedBitSet {}
89
90 impl FixedBitSet {
91 /// Create a new empty **FixedBitSet**.
new() -> Self92 pub const fn new() -> Self {
93 FixedBitSet {
94 data: NonNull::dangling(),
95 capacity: 0,
96 length: 0,
97 }
98 }
99
100 /// Create a new **FixedBitSet** with a specific number of bits,
101 /// all initially clear.
with_capacity(bits: usize) -> Self102 pub fn with_capacity(bits: usize) -> Self {
103 let (mut blocks, rem) = div_rem(bits, SimdBlock::BITS);
104 blocks += (rem > 0) as usize;
105 Self::from_blocks_and_len(vec![SimdBlock::NONE; blocks], bits)
106 }
107
108 #[inline]
from_blocks_and_len(data: Vec<SimdBlock>, length: usize) -> Self109 fn from_blocks_and_len(data: Vec<SimdBlock>, length: usize) -> Self {
110 let (data, capacity, _) = vec_into_parts(data);
111 FixedBitSet {
112 data: data.cast(),
113 capacity,
114 length,
115 }
116 }
117
118 /// Create a new **FixedBitSet** with a specific number of bits,
119 /// initialized from provided blocks.
120 ///
121 /// If the blocks are not the exact size needed for the capacity
122 /// they will be padded with zeros (if shorter) or truncated to
123 /// the capacity (if longer).
124 ///
125 /// For example:
126 /// ```
127 /// let data = vec![4];
128 /// let bs = fixedbitset::FixedBitSet::with_capacity_and_blocks(4, data);
129 /// assert_eq!(format!("{:b}", bs), "0010");
130 /// ```
with_capacity_and_blocks<I: IntoIterator<Item = Block>>(bits: usize, blocks: I) -> Self131 pub fn with_capacity_and_blocks<I: IntoIterator<Item = Block>>(bits: usize, blocks: I) -> Self {
132 let mut bitset = Self::with_capacity(bits);
133 for (subblock, value) in bitset.as_mut_slice().iter_mut().zip(blocks.into_iter()) {
134 *subblock = value;
135 }
136 bitset
137 }
138
139 /// Grow capacity to **bits**, all new bits initialized to zero
140 #[inline]
grow(&mut self, bits: usize)141 pub fn grow(&mut self, bits: usize) {
142 #[cold]
143 #[track_caller]
144 #[inline(never)]
145 fn do_grow(slf: &mut FixedBitSet, bits: usize) {
146 // SAFETY: The provided fill is initialized to NONE.
147 unsafe { slf.grow_inner(bits, MaybeUninit::new(SimdBlock::NONE)) };
148 }
149
150 if bits > self.length {
151 do_grow(self, bits);
152 }
153 }
154
155 /// # Safety
156 /// If `fill` is uninitialized, the memory must not be accessed and must be immediately
157 /// written over
158 #[inline(always)]
grow_inner(&mut self, bits: usize, fill: MaybeUninit<SimdBlock>)159 unsafe fn grow_inner(&mut self, bits: usize, fill: MaybeUninit<SimdBlock>) {
160 // SAFETY: The data pointer and capacity were created from a Vec initially. The block
161 // len is identical to that of the original.
162 let mut data = unsafe {
163 Vec::from_raw_parts(self.data.as_ptr(), self.simd_block_len(), self.capacity)
164 };
165 let (mut blocks, rem) = div_rem(bits, SimdBlock::BITS);
166 blocks += (rem > 0) as usize;
167 data.resize(blocks, fill);
168 let (data, capacity, _) = vec_into_parts(data);
169 self.data = data;
170 self.capacity = capacity;
171 self.length = bits;
172 }
173
174 #[inline]
get_unchecked(&self, subblock: usize) -> &Block175 unsafe fn get_unchecked(&self, subblock: usize) -> &Block {
176 &*self.data.as_ptr().cast::<Block>().add(subblock)
177 }
178
179 #[inline]
get_unchecked_mut(&mut self, subblock: usize) -> &mut Block180 unsafe fn get_unchecked_mut(&mut self, subblock: usize) -> &mut Block {
181 &mut *self.data.as_ptr().cast::<Block>().add(subblock)
182 }
183
184 #[inline]
usize_len(&self) -> usize185 fn usize_len(&self) -> usize {
186 let (mut blocks, rem) = div_rem(self.length, BITS);
187 blocks += (rem > 0) as usize;
188 blocks
189 }
190
191 #[inline]
simd_block_len(&self) -> usize192 fn simd_block_len(&self) -> usize {
193 let (mut blocks, rem) = div_rem(self.length, SimdBlock::BITS);
194 blocks += (rem > 0) as usize;
195 blocks
196 }
197
198 #[inline]
batch_count_ones(blocks: impl IntoIterator<Item = Block>) -> usize199 fn batch_count_ones(blocks: impl IntoIterator<Item = Block>) -> usize {
200 blocks.into_iter().map(|x| x.count_ones() as usize).sum()
201 }
202
203 #[inline]
as_simd_slice(&self) -> &[SimdBlock]204 fn as_simd_slice(&self) -> &[SimdBlock] {
205 // SAFETY: The slice constructed is within bounds of the underlying allocation. This function
206 // is called with a read-only borrow so no other write can happen as long as the returned borrow lives.
207 unsafe { core::slice::from_raw_parts(self.data.as_ptr().cast(), self.simd_block_len()) }
208 }
209
210 #[inline]
as_mut_simd_slice(&mut self) -> &mut [SimdBlock]211 fn as_mut_simd_slice(&mut self) -> &mut [SimdBlock] {
212 // SAFETY: The slice constructed is within bounds of the underlying allocation. This function
213 // is called with a mutable borrow so no other read or write can happen as long as the returned borrow lives.
214 unsafe { core::slice::from_raw_parts_mut(self.data.as_ptr().cast(), self.simd_block_len()) }
215 }
216
217 #[inline]
as_simd_slice_uninit(&self) -> &[MaybeUninit<SimdBlock>]218 fn as_simd_slice_uninit(&self) -> &[MaybeUninit<SimdBlock>] {
219 // SAFETY: The slice constructed is within bounds of the underlying allocation. This function
220 // is called with a read-only borrow so no other write can happen as long as the returned borrow lives.
221 unsafe { core::slice::from_raw_parts(self.data.as_ptr(), self.simd_block_len()) }
222 }
223
224 #[inline]
as_mut_simd_slice_uninit(&mut self) -> &mut [MaybeUninit<SimdBlock>]225 fn as_mut_simd_slice_uninit(&mut self) -> &mut [MaybeUninit<SimdBlock>] {
226 // SAFETY: The slice constructed is within bounds of the underlying allocation. This function
227 // is called with a mutable borrow so no other read or write can happen as long as the returned borrow lives.
228 unsafe { core::slice::from_raw_parts_mut(self.data.as_ptr(), self.simd_block_len()) }
229 }
230
231 /// Grows the internal size of the bitset before inserting a bit
232 ///
233 /// Unlike `insert`, this cannot panic, but may allocate if the bit is outside of the existing buffer's range.
234 ///
235 /// This is faster than calling `grow` then `insert` in succession.
236 #[inline]
grow_and_insert(&mut self, bits: usize)237 pub fn grow_and_insert(&mut self, bits: usize) {
238 self.grow(bits + 1);
239
240 let (blocks, rem) = div_rem(bits, BITS);
241 // SAFETY: The above grow ensures that the block is inside the Vec's allocation.
242 unsafe {
243 *self.get_unchecked_mut(blocks) |= 1 << rem;
244 }
245 }
246
247 /// The length of the [`FixedBitSet`] in bits.
248 ///
249 /// Note: `len` includes both set and unset bits.
250 /// ```
251 /// # use fixedbitset::FixedBitSet;
252 /// let bitset = FixedBitSet::with_capacity(10);
253 /// // there are 0 set bits, but 10 unset bits
254 /// assert_eq!(bitset.len(), 10);
255 /// ```
256 /// `len` does not return the count of set bits. For that, use
257 /// [`bitset.count_ones(..)`](FixedBitSet::count_ones) instead.
258 #[inline]
len(&self) -> usize259 pub fn len(&self) -> usize {
260 self.length
261 }
262
263 /// `true` if the [`FixedBitSet`] is empty.
264 ///
265 /// Note that an "empty" `FixedBitSet` is a `FixedBitSet` with
266 /// no bits (meaning: it's length is zero). If you want to check
267 /// if all bits are unset, use [`FixedBitSet::is_clear`].
268 ///
269 /// ```
270 /// # use fixedbitset::FixedBitSet;
271 /// let bitset = FixedBitSet::with_capacity(10);
272 /// assert!(!bitset.is_empty());
273 ///
274 /// let bitset = FixedBitSet::with_capacity(0);
275 /// assert!(bitset.is_empty());
276 /// ```
277 #[inline]
is_empty(&self) -> bool278 pub fn is_empty(&self) -> bool {
279 self.len() == 0
280 }
281
282 /// `true` if all bits in the [`FixedBitSet`] are unset.
283 ///
284 /// As opposed to [`FixedBitSet::is_empty`], which is `true` only for
285 /// sets without any bits, set or unset.
286 ///
287 /// ```
288 /// # use fixedbitset::FixedBitSet;
289 /// let mut bitset = FixedBitSet::with_capacity(10);
290 /// assert!(bitset.is_clear());
291 ///
292 /// bitset.insert(2);
293 /// assert!(!bitset.is_clear());
294 /// ```
295 ///
296 /// This is equivalent to [`bitset.count_ones(..) == 0`](FixedBitSet::count_ones).
297 #[inline]
is_clear(&self) -> bool298 pub fn is_clear(&self) -> bool {
299 self.as_simd_slice().iter().all(|block| block.is_empty())
300 }
301
302 /// Finds the lowest set bit in the bitset.
303 ///
304 /// Returns `None` if there aren't any set bits.
305 ///
306 /// ```
307 /// # use fixedbitset::FixedBitSet;
308 /// let mut bitset = FixedBitSet::with_capacity(10);
309 /// assert_eq!(bitset.minimum(), None);
310 ///
311 /// bitset.insert(2);
312 /// assert_eq!(bitset.minimum(), Some(2));
313 /// bitset.insert(8);
314 /// assert_eq!(bitset.minimum(), Some(2));
315 /// ```
316 #[inline]
minimum(&self) -> Option<usize>317 pub fn minimum(&self) -> Option<usize> {
318 let (block_idx, block) = self
319 .as_simd_slice()
320 .iter()
321 .enumerate()
322 .find(|&(_, block)| !block.is_empty())?;
323 let mut inner = 0;
324 let mut trailing = 0;
325 for subblock in block.into_usize_array() {
326 if subblock != 0 {
327 trailing = subblock.trailing_zeros() as usize;
328 break;
329 } else {
330 inner += BITS;
331 }
332 }
333 Some(block_idx * SimdBlock::BITS + inner + trailing)
334 }
335
336 /// Finds the highest set bit in the bitset.
337 ///
338 /// Returns `None` if there aren't any set bits.
339 ///
340 /// ```
341 /// # use fixedbitset::FixedBitSet;
342 /// let mut bitset = FixedBitSet::with_capacity(10);
343 /// assert_eq!(bitset.maximum(), None);
344 ///
345 /// bitset.insert(8);
346 /// assert_eq!(bitset.maximum(), Some(8));
347 /// bitset.insert(2);
348 /// assert_eq!(bitset.maximum(), Some(8));
349 /// ```
350 #[inline]
maximum(&self) -> Option<usize>351 pub fn maximum(&self) -> Option<usize> {
352 let (block_idx, block) = self
353 .as_simd_slice()
354 .iter()
355 .rev()
356 .enumerate()
357 .find(|&(_, block)| !block.is_empty())?;
358 let mut inner = 0;
359 let mut leading = 0;
360 for subblock in block.into_usize_array().iter().rev() {
361 if *subblock != 0 {
362 leading = subblock.leading_zeros() as usize;
363 break;
364 } else {
365 inner += BITS;
366 }
367 }
368 let max = self.simd_block_len() * SimdBlock::BITS;
369 Some(max - block_idx * SimdBlock::BITS - inner - leading - 1)
370 }
371
372 /// `true` if all bits in the [`FixedBitSet`] are set.
373 ///
374 /// ```
375 /// # use fixedbitset::FixedBitSet;
376 /// let mut bitset = FixedBitSet::with_capacity(10);
377 /// assert!(!bitset.is_full());
378 ///
379 /// bitset.insert_range(..);
380 /// assert!(bitset.is_full());
381 /// ```
382 ///
383 /// This is equivalent to [`bitset.count_ones(..) == bitset.len()`](FixedBitSet::count_ones).
384 #[inline]
is_full(&self) -> bool385 pub fn is_full(&self) -> bool {
386 self.contains_all_in_range(..)
387 }
388
389 /// Return **true** if the bit is enabled in the **FixedBitSet**,
390 /// **false** otherwise.
391 ///
392 /// Note: bits outside the capacity are always disabled.
393 ///
394 /// Note: Also available with index syntax: `bitset[bit]`.
395 #[inline]
contains(&self, bit: usize) -> bool396 pub fn contains(&self, bit: usize) -> bool {
397 (bit < self.length)
398 // SAFETY: The above check ensures that the block and bit are within bounds.
399 .then(|| unsafe { self.contains_unchecked(bit) })
400 .unwrap_or(false)
401 }
402
403 /// Return **true** if the bit is enabled in the **FixedBitSet**,
404 /// **false** otherwise.
405 ///
406 /// Note: unlike `contains`, calling this with an invalid `bit`
407 /// is undefined behavior.
408 ///
409 /// # Safety
410 /// `bit` must be less than `self.len()`
411 #[inline]
contains_unchecked(&self, bit: usize) -> bool412 pub unsafe fn contains_unchecked(&self, bit: usize) -> bool {
413 let (block, i) = div_rem(bit, BITS);
414 (self.get_unchecked(block) & (1 << i)) != 0
415 }
416
417 /// Clear all bits.
418 #[inline]
clear(&mut self)419 pub fn clear(&mut self) {
420 for elt in self.as_mut_simd_slice().iter_mut() {
421 *elt = SimdBlock::NONE
422 }
423 }
424
425 /// Enable `bit`.
426 ///
427 /// **Panics** if **bit** is out of bounds.
428 #[inline]
insert(&mut self, bit: usize)429 pub fn insert(&mut self, bit: usize) {
430 assert!(
431 bit < self.length,
432 "insert at index {} exceeds fixedbitset size {}",
433 bit,
434 self.length
435 );
436 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
437 unsafe {
438 self.insert_unchecked(bit);
439 }
440 }
441
442 /// Enable `bit` without any length checks.
443 ///
444 /// # Safety
445 /// `bit` must be less than `self.len()`
446 #[inline]
insert_unchecked(&mut self, bit: usize)447 pub unsafe fn insert_unchecked(&mut self, bit: usize) {
448 let (block, i) = div_rem(bit, BITS);
449 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
450 unsafe {
451 *self.get_unchecked_mut(block) |= 1 << i;
452 }
453 }
454
455 /// Disable `bit`.
456 ///
457 /// **Panics** if **bit** is out of bounds.
458 #[inline]
remove(&mut self, bit: usize)459 pub fn remove(&mut self, bit: usize) {
460 assert!(
461 bit < self.length,
462 "remove at index {} exceeds fixedbitset size {}",
463 bit,
464 self.length
465 );
466 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
467 unsafe {
468 self.remove_unchecked(bit);
469 }
470 }
471
472 /// Disable `bit` without any bounds checking.
473 ///
474 /// # Safety
475 /// `bit` must be less than `self.len()`
476 #[inline]
remove_unchecked(&mut self, bit: usize)477 pub unsafe fn remove_unchecked(&mut self, bit: usize) {
478 let (block, i) = div_rem(bit, BITS);
479 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
480 unsafe {
481 *self.get_unchecked_mut(block) &= !(1 << i);
482 }
483 }
484
485 /// Enable `bit`, and return its previous value.
486 ///
487 /// **Panics** if **bit** is out of bounds.
488 #[inline]
put(&mut self, bit: usize) -> bool489 pub fn put(&mut self, bit: usize) -> bool {
490 assert!(
491 bit < self.length,
492 "put at index {} exceeds fixedbitset size {}",
493 bit,
494 self.length
495 );
496 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
497 unsafe { self.put_unchecked(bit) }
498 }
499
500 /// Enable `bit`, and return its previous value without doing any bounds checking.
501 ///
502 /// # Safety
503 /// `bit` must be less than `self.len()`
504 #[inline]
put_unchecked(&mut self, bit: usize) -> bool505 pub unsafe fn put_unchecked(&mut self, bit: usize) -> bool {
506 let (block, i) = div_rem(bit, BITS);
507 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
508 unsafe {
509 let word = self.get_unchecked_mut(block);
510 let prev = *word & (1 << i) != 0;
511 *word |= 1 << i;
512 prev
513 }
514 }
515
516 /// Toggle `bit` (inverting its state).
517 ///
518 /// ***Panics*** if **bit** is out of bounds
519 #[inline]
toggle(&mut self, bit: usize)520 pub fn toggle(&mut self, bit: usize) {
521 assert!(
522 bit < self.length,
523 "toggle at index {} exceeds fixedbitset size {}",
524 bit,
525 self.length
526 );
527 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
528 unsafe {
529 self.toggle_unchecked(bit);
530 }
531 }
532
533 /// Toggle `bit` (inverting its state) without any bounds checking.
534 ///
535 /// # Safety
536 /// `bit` must be less than `self.len()`
537 #[inline]
toggle_unchecked(&mut self, bit: usize)538 pub unsafe fn toggle_unchecked(&mut self, bit: usize) {
539 let (block, i) = div_rem(bit, BITS);
540 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
541 unsafe {
542 *self.get_unchecked_mut(block) ^= 1 << i;
543 }
544 }
545
546 /// Sets a bit to the provided `enabled` value.
547 ///
548 /// **Panics** if **bit** is out of bounds.
549 #[inline]
set(&mut self, bit: usize, enabled: bool)550 pub fn set(&mut self, bit: usize, enabled: bool) {
551 assert!(
552 bit < self.length,
553 "set at index {} exceeds fixedbitset size {}",
554 bit,
555 self.length
556 );
557 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
558 unsafe {
559 self.set_unchecked(bit, enabled);
560 }
561 }
562
563 /// Sets a bit to the provided `enabled` value without doing any bounds checking.
564 ///
565 /// # Safety
566 /// `bit` must be less than `self.len()`
567 #[inline]
set_unchecked(&mut self, bit: usize, enabled: bool)568 pub unsafe fn set_unchecked(&mut self, bit: usize, enabled: bool) {
569 let (block, i) = div_rem(bit, BITS);
570 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
571 let elt = unsafe { self.get_unchecked_mut(block) };
572 if enabled {
573 *elt |= 1 << i;
574 } else {
575 *elt &= !(1 << i);
576 }
577 }
578
579 /// Copies boolean value from specified bit to the specified bit.
580 ///
581 /// If `from` is out-of-bounds, `to` will be unset.
582 ///
583 /// **Panics** if **to** is out of bounds.
584 #[inline]
copy_bit(&mut self, from: usize, to: usize)585 pub fn copy_bit(&mut self, from: usize, to: usize) {
586 assert!(
587 to < self.length,
588 "copy to index {} exceeds fixedbitset size {}",
589 to,
590 self.length
591 );
592 let enabled = self.contains(from);
593 // SAFETY: The above assertion ensures that the block is inside the Vec's allocation.
594 unsafe { self.set_unchecked(to, enabled) };
595 }
596
597 /// Copies boolean value from specified bit to the specified bit.
598 ///
599 /// Note: unlike `copy_bit`, calling this with an invalid `from`
600 /// is undefined behavior.
601 ///
602 /// # Safety
603 /// `to` must both be less than `self.len()`
604 #[inline]
copy_bit_unchecked(&mut self, from: usize, to: usize)605 pub unsafe fn copy_bit_unchecked(&mut self, from: usize, to: usize) {
606 // SAFETY: Caller must ensure that `from` is within bounds.
607 let enabled = self.contains_unchecked(from);
608 // SAFETY: Caller must ensure that `to` is within bounds.
609 self.set_unchecked(to, enabled);
610 }
611
612 /// Count the number of set bits in the given bit range.
613 ///
614 /// This function is potentially much faster than using `ones(other).count()`.
615 /// Use `..` to count the whole content of the bitset.
616 ///
617 /// **Panics** if the range extends past the end of the bitset.
618 #[inline]
count_ones<T: IndexRange>(&self, range: T) -> usize619 pub fn count_ones<T: IndexRange>(&self, range: T) -> usize {
620 Self::batch_count_ones(Masks::new(range, self.length).map(|(block, mask)| {
621 // SAFETY: Masks cannot return a block index that is out of range.
622 unsafe { *self.get_unchecked(block) & mask }
623 }))
624 }
625
626 /// Count the number of unset bits in the given bit range.
627 ///
628 /// This function is potentially much faster than using `zeroes(other).count()`.
629 /// Use `..` to count the whole content of the bitset.
630 ///
631 /// **Panics** if the range extends past the end of the bitset.
632 #[inline]
count_zeroes<T: IndexRange>(&self, range: T) -> usize633 pub fn count_zeroes<T: IndexRange>(&self, range: T) -> usize {
634 Self::batch_count_ones(Masks::new(range, self.length).map(|(block, mask)| {
635 // SAFETY: Masks cannot return a block index that is out of range.
636 unsafe { !*self.get_unchecked(block) & mask }
637 }))
638 }
639
640 /// Sets every bit in the given range to the given state (`enabled`)
641 ///
642 /// Use `..` to set the whole bitset.
643 ///
644 /// **Panics** if the range extends past the end of the bitset.
645 #[inline]
set_range<T: IndexRange>(&mut self, range: T, enabled: bool)646 pub fn set_range<T: IndexRange>(&mut self, range: T, enabled: bool) {
647 if enabled {
648 self.insert_range(range);
649 } else {
650 self.remove_range(range);
651 }
652 }
653
654 /// Enables every bit in the given range.
655 ///
656 /// Use `..` to make the whole bitset ones.
657 ///
658 /// **Panics** if the range extends past the end of the bitset.
659 #[inline]
insert_range<T: IndexRange>(&mut self, range: T)660 pub fn insert_range<T: IndexRange>(&mut self, range: T) {
661 for (block, mask) in Masks::new(range, self.length) {
662 // SAFETY: Masks cannot return a block index that is out of range.
663 let block = unsafe { self.get_unchecked_mut(block) };
664 *block |= mask;
665 }
666 }
667
668 /// Disables every bit in the given range.
669 ///
670 /// Use `..` to make the whole bitset ones.
671 ///
672 /// **Panics** if the range extends past the end of the bitset.
673 #[inline]
remove_range<T: IndexRange>(&mut self, range: T)674 pub fn remove_range<T: IndexRange>(&mut self, range: T) {
675 for (block, mask) in Masks::new(range, self.length) {
676 // SAFETY: Masks cannot return a block index that is out of range.
677 let block = unsafe { self.get_unchecked_mut(block) };
678 *block &= !mask;
679 }
680 }
681
682 /// Toggles (inverts) every bit in the given range.
683 ///
684 /// Use `..` to toggle the whole bitset.
685 ///
686 /// **Panics** if the range extends past the end of the bitset.
687 #[inline]
toggle_range<T: IndexRange>(&mut self, range: T)688 pub fn toggle_range<T: IndexRange>(&mut self, range: T) {
689 for (block, mask) in Masks::new(range, self.length) {
690 // SAFETY: Masks cannot return a block index that is out of range.
691 let block = unsafe { self.get_unchecked_mut(block) };
692 *block ^= mask;
693 }
694 }
695
696 /// Checks if the bitset contains every bit in the given range.
697 ///
698 /// **Panics** if the range extends past the end of the bitset.
699 #[inline]
contains_all_in_range<T: IndexRange>(&self, range: T) -> bool700 pub fn contains_all_in_range<T: IndexRange>(&self, range: T) -> bool {
701 for (block, mask) in Masks::new(range, self.length) {
702 // SAFETY: Masks cannot return a block index that is out of range.
703 let block = unsafe { self.get_unchecked(block) };
704 if block & mask != mask {
705 return false;
706 }
707 }
708 true
709 }
710
711 /// Checks if the bitset contains at least one set bit in the given range.
712 ///
713 /// **Panics** if the range extends past the end of the bitset.
714 #[inline]
contains_any_in_range<T: IndexRange>(&self, range: T) -> bool715 pub fn contains_any_in_range<T: IndexRange>(&self, range: T) -> bool {
716 for (block, mask) in Masks::new(range, self.length) {
717 // SAFETY: Masks cannot return a block index that is out of range.
718 let block = unsafe { self.get_unchecked(block) };
719 if block & mask != 0 {
720 return true;
721 }
722 }
723 false
724 }
725
726 /// View the bitset as a slice of `Block` blocks
727 #[inline]
as_slice(&self) -> &[Block]728 pub fn as_slice(&self) -> &[Block] {
729 // SAFETY: The bits from both usize and Block are required to be reinterprettable, and
730 // neither have any padding or alignment issues. The slice constructed is within bounds
731 // of the underlying allocation. This function is called with a read-only borrow so
732 // no other write can happen as long as the returned borrow lives.
733 unsafe {
734 let ptr = self.data.as_ptr().cast::<Block>();
735 core::slice::from_raw_parts(ptr, self.usize_len())
736 }
737 }
738
739 /// View the bitset as a mutable slice of `Block` blocks. Writing past the bitlength in the last
740 /// will cause `contains` to return potentially incorrect results for bits past the bitlength.
741 #[inline]
as_mut_slice(&mut self) -> &mut [Block]742 pub fn as_mut_slice(&mut self) -> &mut [Block] {
743 // SAFETY: The bits from both usize and Block are required to be reinterprettable, and
744 // neither have any padding or alignment issues. The slice constructed is within bounds
745 // of the underlying allocation. This function is called with a mutable borrow so
746 // no other read or write can happen as long as the returned borrow lives.
747 unsafe {
748 let ptr = self.data.as_ptr().cast::<Block>();
749 core::slice::from_raw_parts_mut(ptr, self.usize_len())
750 }
751 }
752
753 /// Iterates over all enabled bits.
754 ///
755 /// Iterator element is the index of the `1` bit, type `usize`.
756 #[inline]
ones(&self) -> Ones757 pub fn ones(&self) -> Ones {
758 match self.as_slice().split_first() {
759 Some((&first_block, rem)) => {
760 let (&last_block, rem) = rem.split_last().unwrap_or((&0, rem));
761 Ones {
762 bitset_front: first_block,
763 bitset_back: last_block,
764 block_idx_front: 0,
765 block_idx_back: (1 + rem.len()) * BITS,
766 remaining_blocks: rem.iter(),
767 }
768 }
769 None => Ones {
770 bitset_front: 0,
771 bitset_back: 0,
772 block_idx_front: 0,
773 block_idx_back: 0,
774 remaining_blocks: [].iter(),
775 },
776 }
777 }
778
779 /// Iterates over all enabled bits.
780 ///
781 /// Iterator element is the index of the `1` bit, type `usize`.
782 /// Unlike `ones`, this function consumes the `FixedBitset`.
into_ones(self) -> IntoOnes783 pub fn into_ones(self) -> IntoOnes {
784 let ptr = self.data.as_ptr().cast();
785 let len = self.simd_block_len() * SimdBlock::USIZE_COUNT;
786 // SAFETY:
787 // - ptr comes from self.data, so it is valid;
788 // - self.data is valid for self.data.len() SimdBlocks,
789 // which is exactly self.data.len() * SimdBlock::USIZE_COUNT usizes;
790 // - we will keep this slice around only as long as self.data is,
791 // so it won't become dangling.
792 let slice = unsafe { core::slice::from_raw_parts(ptr, len) };
793 // SAFETY: The data pointer and capacity were created from a Vec initially. The block
794 // len is identical to that of the original.
795 let data: Vec<SimdBlock> = unsafe {
796 Vec::from_raw_parts(
797 self.data.as_ptr().cast(),
798 self.simd_block_len(),
799 self.capacity,
800 )
801 };
802 let mut iter = slice.iter().copied();
803
804 core::mem::forget(self);
805
806 IntoOnes {
807 bitset_front: iter.next().unwrap_or(0),
808 bitset_back: iter.next_back().unwrap_or(0),
809 block_idx_front: 0,
810 block_idx_back: len.saturating_sub(1) * BITS,
811 remaining_blocks: iter,
812 _buf: data,
813 }
814 }
815
816 /// Iterates over all disabled bits.
817 ///
818 /// Iterator element is the index of the `0` bit, type `usize`.
819 #[inline]
zeroes(&self) -> Zeroes820 pub fn zeroes(&self) -> Zeroes {
821 match self.as_slice().split_first() {
822 Some((&block, rem)) => Zeroes {
823 bitset: !block,
824 block_idx: 0,
825 len: self.len(),
826 remaining_blocks: rem.iter(),
827 },
828 None => Zeroes {
829 bitset: !0,
830 block_idx: 0,
831 len: self.len(),
832 remaining_blocks: [].iter(),
833 },
834 }
835 }
836
837 /// Returns a lazy iterator over the intersection of two `FixedBitSet`s
intersection<'a>(&'a self, other: &'a FixedBitSet) -> Intersection<'a>838 pub fn intersection<'a>(&'a self, other: &'a FixedBitSet) -> Intersection<'a> {
839 Intersection {
840 iter: self.ones(),
841 other,
842 }
843 }
844
845 /// Returns a lazy iterator over the union of two `FixedBitSet`s.
union<'a>(&'a self, other: &'a FixedBitSet) -> Union<'a>846 pub fn union<'a>(&'a self, other: &'a FixedBitSet) -> Union<'a> {
847 Union {
848 iter: self.ones().chain(other.difference(self)),
849 }
850 }
851
852 /// Returns a lazy iterator over the difference of two `FixedBitSet`s. The difference of `a`
853 /// and `b` is the elements of `a` which are not in `b`.
difference<'a>(&'a self, other: &'a FixedBitSet) -> Difference<'a>854 pub fn difference<'a>(&'a self, other: &'a FixedBitSet) -> Difference<'a> {
855 Difference {
856 iter: self.ones(),
857 other,
858 }
859 }
860
861 /// Returns a lazy iterator over the symmetric difference of two `FixedBitSet`s.
862 /// The symmetric difference of `a` and `b` is the elements of one, but not both, sets.
symmetric_difference<'a>(&'a self, other: &'a FixedBitSet) -> SymmetricDifference<'a>863 pub fn symmetric_difference<'a>(&'a self, other: &'a FixedBitSet) -> SymmetricDifference<'a> {
864 SymmetricDifference {
865 iter: self.difference(other).chain(other.difference(self)),
866 }
867 }
868
869 /// In-place union of two `FixedBitSet`s.
870 ///
871 /// On calling this method, `self`'s capacity may be increased to match `other`'s.
union_with(&mut self, other: &FixedBitSet)872 pub fn union_with(&mut self, other: &FixedBitSet) {
873 if other.len() >= self.len() {
874 self.grow(other.len());
875 }
876 self.as_mut_simd_slice()
877 .iter_mut()
878 .zip(other.as_simd_slice().iter())
879 .for_each(|(x, y)| *x |= *y);
880 }
881
882 /// In-place intersection of two `FixedBitSet`s.
883 ///
884 /// On calling this method, `self`'s capacity will remain the same as before.
intersect_with(&mut self, other: &FixedBitSet)885 pub fn intersect_with(&mut self, other: &FixedBitSet) {
886 let me = self.as_mut_simd_slice();
887 let other = other.as_simd_slice();
888 me.iter_mut().zip(other.iter()).for_each(|(x, y)| {
889 *x &= *y;
890 });
891 let mn = core::cmp::min(me.len(), other.len());
892 for wd in &mut me[mn..] {
893 *wd = SimdBlock::NONE;
894 }
895 }
896
897 /// In-place difference of two `FixedBitSet`s.
898 ///
899 /// On calling this method, `self`'s capacity will remain the same as before.
difference_with(&mut self, other: &FixedBitSet)900 pub fn difference_with(&mut self, other: &FixedBitSet) {
901 self.as_mut_simd_slice()
902 .iter_mut()
903 .zip(other.as_simd_slice().iter())
904 .for_each(|(x, y)| {
905 *x &= !*y;
906 });
907
908 // There's no need to grow self or do any other adjustments.
909 //
910 // * If self is longer than other, the bits at the end of self won't be affected since other
911 // has them implicitly set to 0.
912 // * If other is longer than self, the bits at the end of other are irrelevant since self
913 // has them set to 0 anyway.
914 }
915
916 /// In-place symmetric difference of two `FixedBitSet`s.
917 ///
918 /// On calling this method, `self`'s capacity may be increased to match `other`'s.
symmetric_difference_with(&mut self, other: &FixedBitSet)919 pub fn symmetric_difference_with(&mut self, other: &FixedBitSet) {
920 if other.len() >= self.len() {
921 self.grow(other.len());
922 }
923 self.as_mut_simd_slice()
924 .iter_mut()
925 .zip(other.as_simd_slice().iter())
926 .for_each(|(x, y)| {
927 *x ^= *y;
928 });
929 }
930
931 /// Computes how many bits would be set in the union between two bitsets.
932 ///
933 /// This is potentially much faster than using `union(other).count()`. Unlike
934 /// other methods like using [`union_with`] followed by [`count_ones`], this
935 /// does not mutate in place or require separate allocations.
936 #[inline]
union_count(&self, other: &FixedBitSet) -> usize937 pub fn union_count(&self, other: &FixedBitSet) -> usize {
938 let me = self.as_slice();
939 let other = other.as_slice();
940 let count = Self::batch_count_ones(me.iter().zip(other.iter()).map(|(x, y)| (*x | *y)));
941 match other.len().cmp(&me.len()) {
942 Ordering::Greater => count + Self::batch_count_ones(other[me.len()..].iter().copied()),
943 Ordering::Less => count + Self::batch_count_ones(me[other.len()..].iter().copied()),
944 Ordering::Equal => count,
945 }
946 }
947
948 /// Computes how many bits would be set in the intersection between two bitsets.
949 ///
950 /// This is potentially much faster than using `intersection(other).count()`. Unlike
951 /// other methods like using [`intersect_with`] followed by [`count_ones`], this
952 /// does not mutate in place or require separate allocations.
953 #[inline]
intersection_count(&self, other: &FixedBitSet) -> usize954 pub fn intersection_count(&self, other: &FixedBitSet) -> usize {
955 Self::batch_count_ones(
956 self.as_slice()
957 .iter()
958 .zip(other.as_slice())
959 .map(|(x, y)| (*x & *y)),
960 )
961 }
962
963 /// Computes how many bits would be set in the difference between two bitsets.
964 ///
965 /// This is potentially much faster than using `difference(other).count()`. Unlike
966 /// other methods like using [`difference_with`] followed by [`count_ones`], this
967 /// does not mutate in place or require separate allocations.
968 #[inline]
difference_count(&self, other: &FixedBitSet) -> usize969 pub fn difference_count(&self, other: &FixedBitSet) -> usize {
970 Self::batch_count_ones(
971 self.as_slice()
972 .iter()
973 .zip(other.as_slice().iter())
974 .map(|(x, y)| (*x & !*y)),
975 )
976 }
977
978 /// Computes how many bits would be set in the symmetric difference between two bitsets.
979 ///
980 /// This is potentially much faster than using `symmetric_difference(other).count()`. Unlike
981 /// other methods like using [`symmetric_difference_with`] followed by [`count_ones`], this
982 /// does not mutate in place or require separate allocations.
983 #[inline]
symmetric_difference_count(&self, other: &FixedBitSet) -> usize984 pub fn symmetric_difference_count(&self, other: &FixedBitSet) -> usize {
985 let me = self.as_slice();
986 let other = other.as_slice();
987 let count = Self::batch_count_ones(me.iter().zip(other.iter()).map(|(x, y)| (*x ^ *y)));
988 match other.len().cmp(&me.len()) {
989 Ordering::Greater => count + Self::batch_count_ones(other[me.len()..].iter().copied()),
990 Ordering::Less => count + Self::batch_count_ones(me[other.len()..].iter().copied()),
991 Ordering::Equal => count,
992 }
993 }
994
995 /// Returns `true` if `self` has no elements in common with `other`. This
996 /// is equivalent to checking for an empty intersection.
is_disjoint(&self, other: &FixedBitSet) -> bool997 pub fn is_disjoint(&self, other: &FixedBitSet) -> bool {
998 self.as_simd_slice()
999 .iter()
1000 .zip(other.as_simd_slice())
1001 .all(|(x, y)| (*x & *y).is_empty())
1002 }
1003
1004 /// Returns `true` if the set is a subset of another, i.e. `other` contains
1005 /// at least all the values in `self`.
is_subset(&self, other: &FixedBitSet) -> bool1006 pub fn is_subset(&self, other: &FixedBitSet) -> bool {
1007 let me = self.as_simd_slice();
1008 let other = other.as_simd_slice();
1009 me.iter()
1010 .zip(other.iter())
1011 .all(|(x, y)| x.andnot(*y).is_empty())
1012 && me.iter().skip(other.len()).all(|x| x.is_empty())
1013 }
1014
1015 /// Returns `true` if the set is a superset of another, i.e. `self` contains
1016 /// at least all the values in `other`.
is_superset(&self, other: &FixedBitSet) -> bool1017 pub fn is_superset(&self, other: &FixedBitSet) -> bool {
1018 other.is_subset(self)
1019 }
1020 }
1021
1022 impl Hash for FixedBitSet {
hash<H: core::hash::Hasher>(&self, state: &mut H)1023 fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
1024 self.length.hash(state);
1025 self.as_simd_slice().hash(state);
1026 }
1027 }
1028
1029 impl PartialEq for FixedBitSet {
eq(&self, other: &Self) -> bool1030 fn eq(&self, other: &Self) -> bool {
1031 self.length == other.length && self.as_simd_slice().eq(other.as_simd_slice())
1032 }
1033 }
1034
1035 impl PartialOrd for FixedBitSet {
partial_cmp(&self, other: &Self) -> Option<Ordering>1036 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1037 Some(self.cmp(other))
1038 }
1039 }
1040
1041 impl Ord for FixedBitSet {
cmp(&self, other: &Self) -> Ordering1042 fn cmp(&self, other: &Self) -> Ordering {
1043 self.length
1044 .cmp(&other.length)
1045 .then_with(|| self.as_simd_slice().cmp(other.as_simd_slice()))
1046 }
1047 }
1048
1049 impl Default for FixedBitSet {
default() -> Self1050 fn default() -> Self {
1051 Self::new()
1052 }
1053 }
1054
1055 impl Drop for FixedBitSet {
drop(&mut self)1056 fn drop(&mut self) {
1057 // SAFETY: The data pointer and capacity were created from a Vec initially. The block
1058 // len is identical to that of the original.
1059 drop(unsafe {
1060 Vec::from_raw_parts(self.data.as_ptr(), self.simd_block_len(), self.capacity)
1061 });
1062 }
1063 }
1064
1065 impl Binary for FixedBitSet {
fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>1066 fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
1067 if f.alternate() {
1068 f.write_str("0b")?;
1069 }
1070
1071 for i in 0..self.length {
1072 if self[i] {
1073 f.write_char('1')?;
1074 } else {
1075 f.write_char('0')?;
1076 }
1077 }
1078
1079 Ok(())
1080 }
1081 }
1082
1083 impl Display for FixedBitSet {
fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>1084 fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
1085 Binary::fmt(&self, f)
1086 }
1087 }
1088
1089 /// An iterator producing elements in the difference of two sets.
1090 ///
1091 /// This struct is created by the [`FixedBitSet::difference`] method.
1092 pub struct Difference<'a> {
1093 iter: Ones<'a>,
1094 other: &'a FixedBitSet,
1095 }
1096
1097 impl<'a> Iterator for Difference<'a> {
1098 type Item = usize;
1099
1100 #[inline]
next(&mut self) -> Option<Self::Item>1101 fn next(&mut self) -> Option<Self::Item> {
1102 self.iter.by_ref().find(|&nxt| !self.other.contains(nxt))
1103 }
1104
1105 #[inline]
size_hint(&self) -> (usize, Option<usize>)1106 fn size_hint(&self) -> (usize, Option<usize>) {
1107 self.iter.size_hint()
1108 }
1109 }
1110
1111 impl<'a> DoubleEndedIterator for Difference<'a> {
next_back(&mut self) -> Option<Self::Item>1112 fn next_back(&mut self) -> Option<Self::Item> {
1113 self.iter
1114 .by_ref()
1115 .rev()
1116 .find(|&nxt| !self.other.contains(nxt))
1117 }
1118 }
1119
1120 // Difference will continue to return None once it first returns None.
1121 impl<'a> FusedIterator for Difference<'a> {}
1122
1123 /// An iterator producing elements in the symmetric difference of two sets.
1124 ///
1125 /// This struct is created by the [`FixedBitSet::symmetric_difference`] method.
1126 pub struct SymmetricDifference<'a> {
1127 iter: Chain<Difference<'a>, Difference<'a>>,
1128 }
1129
1130 impl<'a> Iterator for SymmetricDifference<'a> {
1131 type Item = usize;
1132
1133 #[inline]
next(&mut self) -> Option<Self::Item>1134 fn next(&mut self) -> Option<Self::Item> {
1135 self.iter.next()
1136 }
1137
1138 #[inline]
size_hint(&self) -> (usize, Option<usize>)1139 fn size_hint(&self) -> (usize, Option<usize>) {
1140 self.iter.size_hint()
1141 }
1142 }
1143
1144 impl<'a> DoubleEndedIterator for SymmetricDifference<'a> {
next_back(&mut self) -> Option<Self::Item>1145 fn next_back(&mut self) -> Option<Self::Item> {
1146 self.iter.next_back()
1147 }
1148 }
1149
1150 // SymmetricDifference will continue to return None once it first returns None.
1151 impl<'a> FusedIterator for SymmetricDifference<'a> {}
1152
1153 /// An iterator producing elements in the intersection of two sets.
1154 ///
1155 /// This struct is created by the [`FixedBitSet::intersection`] method.
1156 pub struct Intersection<'a> {
1157 iter: Ones<'a>,
1158 other: &'a FixedBitSet,
1159 }
1160
1161 impl<'a> Iterator for Intersection<'a> {
1162 type Item = usize; // the bit position of the '1'
1163
1164 #[inline]
next(&mut self) -> Option<Self::Item>1165 fn next(&mut self) -> Option<Self::Item> {
1166 self.iter.by_ref().find(|&nxt| self.other.contains(nxt))
1167 }
1168
1169 #[inline]
size_hint(&self) -> (usize, Option<usize>)1170 fn size_hint(&self) -> (usize, Option<usize>) {
1171 self.iter.size_hint()
1172 }
1173 }
1174
1175 impl<'a> DoubleEndedIterator for Intersection<'a> {
next_back(&mut self) -> Option<Self::Item>1176 fn next_back(&mut self) -> Option<Self::Item> {
1177 self.iter
1178 .by_ref()
1179 .rev()
1180 .find(|&nxt| self.other.contains(nxt))
1181 }
1182 }
1183
1184 // Intersection will continue to return None once it first returns None.
1185 impl<'a> FusedIterator for Intersection<'a> {}
1186
1187 /// An iterator producing elements in the union of two sets.
1188 ///
1189 /// This struct is created by the [`FixedBitSet::union`] method.
1190 pub struct Union<'a> {
1191 iter: Chain<Ones<'a>, Difference<'a>>,
1192 }
1193
1194 impl<'a> Iterator for Union<'a> {
1195 type Item = usize;
1196
1197 #[inline]
next(&mut self) -> Option<Self::Item>1198 fn next(&mut self) -> Option<Self::Item> {
1199 self.iter.next()
1200 }
1201
1202 #[inline]
size_hint(&self) -> (usize, Option<usize>)1203 fn size_hint(&self) -> (usize, Option<usize>) {
1204 self.iter.size_hint()
1205 }
1206 }
1207
1208 impl<'a> DoubleEndedIterator for Union<'a> {
next_back(&mut self) -> Option<Self::Item>1209 fn next_back(&mut self) -> Option<Self::Item> {
1210 self.iter.next_back()
1211 }
1212 }
1213
1214 // Union will continue to return None once it first returns None.
1215 impl<'a> FusedIterator for Union<'a> {}
1216
1217 struct Masks {
1218 first_block: usize,
1219 first_mask: usize,
1220 last_block: usize,
1221 last_mask: usize,
1222 }
1223
1224 impl Masks {
1225 #[inline]
new<T: IndexRange>(range: T, length: usize) -> Masks1226 fn new<T: IndexRange>(range: T, length: usize) -> Masks {
1227 let start = range.start().unwrap_or(0);
1228 let end = range.end().unwrap_or(length);
1229 assert!(
1230 start <= end && end <= length,
1231 "invalid range {}..{} for a fixedbitset of size {}",
1232 start,
1233 end,
1234 length
1235 );
1236
1237 let (first_block, first_rem) = div_rem(start, BITS);
1238 let (last_block, last_rem) = div_rem(end, BITS);
1239
1240 Masks {
1241 first_block,
1242 first_mask: usize::MAX << first_rem,
1243 last_block,
1244 last_mask: (usize::MAX >> 1) >> (BITS - last_rem - 1),
1245 // this is equivalent to `MAX >> (BITS - x)` with correct semantics when x == 0.
1246 }
1247 }
1248 }
1249
1250 impl Iterator for Masks {
1251 type Item = (usize, usize);
1252
1253 #[inline]
next(&mut self) -> Option<Self::Item>1254 fn next(&mut self) -> Option<Self::Item> {
1255 match self.first_block.cmp(&self.last_block) {
1256 Ordering::Less => {
1257 let res = (self.first_block, self.first_mask);
1258 self.first_block += 1;
1259 self.first_mask = !0;
1260 Some(res)
1261 }
1262 Ordering::Equal => {
1263 let mask = self.first_mask & self.last_mask;
1264 let res = if mask == 0 {
1265 None
1266 } else {
1267 Some((self.first_block, mask))
1268 };
1269 self.first_block += 1;
1270 res
1271 }
1272 Ordering::Greater => None,
1273 }
1274 }
1275
1276 #[inline]
size_hint(&self) -> (usize, Option<usize>)1277 fn size_hint(&self) -> (usize, Option<usize>) {
1278 (self.first_block..=self.last_block).size_hint()
1279 }
1280 }
1281
1282 // Masks will continue to return None once it first returns None.
1283 impl FusedIterator for Masks {}
1284
1285 // Masks's size_hint implementation is exact. It never returns an
1286 // unbounded value and always returns an exact number of values.
1287 impl ExactSizeIterator for Masks {}
1288
1289 /// An iterator producing the indices of the set bit in a set.
1290 ///
1291 /// This struct is created by the [`FixedBitSet::ones`] method.
1292 pub struct Ones<'a> {
1293 bitset_front: usize,
1294 bitset_back: usize,
1295 block_idx_front: usize,
1296 block_idx_back: usize,
1297 remaining_blocks: core::slice::Iter<'a, usize>,
1298 }
1299
1300 impl<'a> Ones<'a> {
1301 #[inline]
last_positive_bit_and_unset(n: &mut usize) -> usize1302 pub fn last_positive_bit_and_unset(n: &mut usize) -> usize {
1303 // Find the last set bit using x & -x
1304 let last_bit = *n & n.wrapping_neg();
1305
1306 // Find the position of the last set bit
1307 let position = last_bit.trailing_zeros();
1308
1309 // Unset the last set bit
1310 *n &= *n - 1;
1311
1312 position as usize
1313 }
1314
1315 #[inline]
first_positive_bit_and_unset(n: &mut usize) -> usize1316 fn first_positive_bit_and_unset(n: &mut usize) -> usize {
1317 /* Identify the first non zero bit */
1318 let bit_idx = n.leading_zeros();
1319
1320 /* set that bit to zero */
1321 let mask = !((1_usize) << (BITS as u32 - bit_idx - 1));
1322 n.bitand_assign(mask);
1323
1324 bit_idx as usize
1325 }
1326 }
1327
1328 impl<'a> DoubleEndedIterator for Ones<'a> {
next_back(&mut self) -> Option<Self::Item>1329 fn next_back(&mut self) -> Option<Self::Item> {
1330 while self.bitset_back == 0 {
1331 match self.remaining_blocks.next_back() {
1332 None => {
1333 if self.bitset_front != 0 {
1334 self.bitset_back = 0;
1335 self.block_idx_back = self.block_idx_front;
1336 return Some(
1337 self.block_idx_front + BITS
1338 - Self::first_positive_bit_and_unset(&mut self.bitset_front)
1339 - 1,
1340 );
1341 } else {
1342 return None;
1343 }
1344 }
1345 Some(next_block) => {
1346 self.bitset_back = *next_block;
1347 self.block_idx_back -= BITS;
1348 }
1349 };
1350 }
1351
1352 Some(
1353 self.block_idx_back - Self::first_positive_bit_and_unset(&mut self.bitset_back) + BITS
1354 - 1,
1355 )
1356 }
1357 }
1358
1359 impl<'a> Iterator for Ones<'a> {
1360 type Item = usize; // the bit position of the '1'
1361
1362 #[inline]
next(&mut self) -> Option<Self::Item>1363 fn next(&mut self) -> Option<Self::Item> {
1364 while self.bitset_front == 0 {
1365 match self.remaining_blocks.next() {
1366 Some(next_block) => {
1367 self.bitset_front = *next_block;
1368 self.block_idx_front += BITS;
1369 }
1370 None => {
1371 if self.bitset_back != 0 {
1372 // not needed for iteration, but for size_hint
1373 self.block_idx_front = self.block_idx_back;
1374 self.bitset_front = 0;
1375
1376 return Some(
1377 self.block_idx_back
1378 + Self::last_positive_bit_and_unset(&mut self.bitset_back),
1379 );
1380 } else {
1381 return None;
1382 }
1383 }
1384 };
1385 }
1386
1387 Some(self.block_idx_front + Self::last_positive_bit_and_unset(&mut self.bitset_front))
1388 }
1389
1390 #[inline]
size_hint(&self) -> (usize, Option<usize>)1391 fn size_hint(&self) -> (usize, Option<usize>) {
1392 (
1393 0,
1394 (Some(self.block_idx_back - self.block_idx_front + 2 * BITS)),
1395 )
1396 }
1397 }
1398
1399 // Ones will continue to return None once it first returns None.
1400 impl<'a> FusedIterator for Ones<'a> {}
1401
1402 /// An iterator producing the indices of the set bit in a set.
1403 ///
1404 /// This struct is created by the [`FixedBitSet::ones`] method.
1405 pub struct Zeroes<'a> {
1406 bitset: usize,
1407 block_idx: usize,
1408 len: usize,
1409 remaining_blocks: core::slice::Iter<'a, usize>,
1410 }
1411
1412 impl<'a> Iterator for Zeroes<'a> {
1413 type Item = usize; // the bit position of the '1'
1414
1415 #[inline]
next(&mut self) -> Option<Self::Item>1416 fn next(&mut self) -> Option<Self::Item> {
1417 while self.bitset == 0 {
1418 self.bitset = !*self.remaining_blocks.next()?;
1419 self.block_idx += BITS;
1420 }
1421 let t = self.bitset & (0_usize).wrapping_sub(self.bitset);
1422 let r = self.bitset.trailing_zeros() as usize;
1423 self.bitset ^= t;
1424 let bit = self.block_idx + r;
1425 // The remaining zeroes beyond the length of the bitset must be excluded.
1426 if bit < self.len {
1427 Some(bit)
1428 } else {
1429 None
1430 }
1431 }
1432
1433 #[inline]
size_hint(&self) -> (usize, Option<usize>)1434 fn size_hint(&self) -> (usize, Option<usize>) {
1435 (0, Some(self.len))
1436 }
1437 }
1438
1439 // Zeroes will stop returning Some when exhausted.
1440 impl<'a> FusedIterator for Zeroes<'a> {}
1441
1442 impl Clone for FixedBitSet {
1443 #[inline]
clone(&self) -> Self1444 fn clone(&self) -> Self {
1445 Self::from_blocks_and_len(Vec::from(self.as_simd_slice()), self.length)
1446 }
1447
1448 #[inline]
clone_from(&mut self, source: &Self)1449 fn clone_from(&mut self, source: &Self) {
1450 if self.length < source.length {
1451 // SAFETY: `fill` is uninitialized, but is immediately initialized from `source`.
1452 unsafe { self.grow_inner(source.length, MaybeUninit::uninit()) };
1453 }
1454 let me = self.as_mut_simd_slice_uninit();
1455 let them = source.as_simd_slice_uninit();
1456 match me.len().cmp(&them.len()) {
1457 Ordering::Greater => {
1458 let (head, tail) = me.split_at_mut(them.len());
1459 head.copy_from_slice(them);
1460 tail.fill(MaybeUninit::new(SimdBlock::NONE));
1461 }
1462 Ordering::Equal => me.copy_from_slice(them),
1463 // The grow_inner above ensures that self is at least as large as source.
1464 // so this branch is unreachable.
1465 Ordering::Less => {}
1466 }
1467 self.length = source.length;
1468 }
1469 }
1470
1471 /// Return **true** if the bit is enabled in the bitset,
1472 /// or **false** otherwise.
1473 ///
1474 /// Note: bits outside the capacity are always disabled, and thus
1475 /// indexing a FixedBitSet will not panic.
1476 impl Index<usize> for FixedBitSet {
1477 type Output = bool;
1478
1479 #[inline]
index(&self, bit: usize) -> &bool1480 fn index(&self, bit: usize) -> &bool {
1481 if self.contains(bit) {
1482 &true
1483 } else {
1484 &false
1485 }
1486 }
1487 }
1488
1489 /// Sets the bit at index **i** to **true** for each item **i** in the input **src**.
1490 impl Extend<usize> for FixedBitSet {
extend<I: IntoIterator<Item = usize>>(&mut self, src: I)1491 fn extend<I: IntoIterator<Item = usize>>(&mut self, src: I) {
1492 let iter = src.into_iter();
1493 for i in iter {
1494 if i >= self.len() {
1495 self.grow(i + 1);
1496 }
1497 self.put(i);
1498 }
1499 }
1500 }
1501
1502 /// Return a FixedBitSet containing bits set to **true** for every bit index in
1503 /// the iterator, other bits are set to **false**.
1504 impl FromIterator<usize> for FixedBitSet {
from_iter<I: IntoIterator<Item = usize>>(src: I) -> Self1505 fn from_iter<I: IntoIterator<Item = usize>>(src: I) -> Self {
1506 let mut fbs = FixedBitSet::with_capacity(0);
1507 fbs.extend(src);
1508 fbs
1509 }
1510 }
1511
1512 pub struct IntoOnes {
1513 bitset_front: Block,
1514 bitset_back: Block,
1515 block_idx_front: usize,
1516 block_idx_back: usize,
1517 remaining_blocks: core::iter::Copied<core::slice::Iter<'static, usize>>,
1518 // Keep buf along so that `remaining_blocks` remains valid.
1519 _buf: Vec<SimdBlock>,
1520 }
1521
1522 impl IntoOnes {
1523 #[inline]
last_positive_bit_and_unset(n: &mut Block) -> usize1524 pub fn last_positive_bit_and_unset(n: &mut Block) -> usize {
1525 // Find the last set bit using x & -x
1526 let last_bit = *n & n.wrapping_neg();
1527
1528 // Find the position of the last set bit
1529 let position = last_bit.trailing_zeros();
1530
1531 // Unset the last set bit
1532 *n &= *n - 1;
1533
1534 position as usize
1535 }
1536
1537 #[inline]
first_positive_bit_and_unset(n: &mut Block) -> usize1538 fn first_positive_bit_and_unset(n: &mut Block) -> usize {
1539 /* Identify the first non zero bit */
1540 let bit_idx = n.leading_zeros();
1541
1542 /* set that bit to zero */
1543 let mask = !((1_usize) << (BITS as u32 - bit_idx - 1));
1544 n.bitand_assign(mask);
1545
1546 bit_idx as usize
1547 }
1548 }
1549
1550 impl DoubleEndedIterator for IntoOnes {
next_back(&mut self) -> Option<Self::Item>1551 fn next_back(&mut self) -> Option<Self::Item> {
1552 while self.bitset_back == 0 {
1553 match self.remaining_blocks.next_back() {
1554 None => {
1555 if self.bitset_front != 0 {
1556 self.bitset_back = 0;
1557 self.block_idx_back = self.block_idx_front;
1558 return Some(
1559 self.block_idx_front + BITS
1560 - Self::first_positive_bit_and_unset(&mut self.bitset_front)
1561 - 1,
1562 );
1563 } else {
1564 return None;
1565 }
1566 }
1567 Some(next_block) => {
1568 self.bitset_back = next_block;
1569 self.block_idx_back -= BITS;
1570 }
1571 };
1572 }
1573
1574 Some(
1575 self.block_idx_back - Self::first_positive_bit_and_unset(&mut self.bitset_back) + BITS
1576 - 1,
1577 )
1578 }
1579 }
1580
1581 impl Iterator for IntoOnes {
1582 type Item = usize; // the bit position of the '1'
1583
1584 #[inline]
next(&mut self) -> Option<Self::Item>1585 fn next(&mut self) -> Option<Self::Item> {
1586 while self.bitset_front == 0 {
1587 match self.remaining_blocks.next() {
1588 Some(next_block) => {
1589 self.bitset_front = next_block;
1590 self.block_idx_front += BITS;
1591 }
1592 None => {
1593 if self.bitset_back != 0 {
1594 // not needed for iteration, but for size_hint
1595 self.block_idx_front = self.block_idx_back;
1596 self.bitset_front = 0;
1597
1598 return Some(
1599 self.block_idx_back
1600 + Self::last_positive_bit_and_unset(&mut self.bitset_back),
1601 );
1602 } else {
1603 return None;
1604 }
1605 }
1606 };
1607 }
1608
1609 Some(self.block_idx_front + Self::last_positive_bit_and_unset(&mut self.bitset_front))
1610 }
1611
1612 #[inline]
size_hint(&self) -> (usize, Option<usize>)1613 fn size_hint(&self) -> (usize, Option<usize>) {
1614 (
1615 0,
1616 (Some(self.block_idx_back - self.block_idx_front + 2 * BITS)),
1617 )
1618 }
1619 }
1620
1621 // Ones will continue to return None once it first returns None.
1622 impl FusedIterator for IntoOnes {}
1623
1624 impl<'a> BitAnd for &'a FixedBitSet {
1625 type Output = FixedBitSet;
bitand(self, other: &FixedBitSet) -> FixedBitSet1626 fn bitand(self, other: &FixedBitSet) -> FixedBitSet {
1627 let (short, long) = {
1628 if self.len() <= other.len() {
1629 (self.as_simd_slice(), other.as_simd_slice())
1630 } else {
1631 (other.as_simd_slice(), self.as_simd_slice())
1632 }
1633 };
1634 let mut data = Vec::from(short);
1635 for (data, block) in data.iter_mut().zip(long.iter()) {
1636 *data &= *block;
1637 }
1638 let len = core::cmp::min(self.len(), other.len());
1639 FixedBitSet::from_blocks_and_len(data, len)
1640 }
1641 }
1642
1643 impl BitAndAssign for FixedBitSet {
bitand_assign(&mut self, other: Self)1644 fn bitand_assign(&mut self, other: Self) {
1645 self.intersect_with(&other);
1646 }
1647 }
1648
1649 impl BitAndAssign<&Self> for FixedBitSet {
bitand_assign(&mut self, other: &Self)1650 fn bitand_assign(&mut self, other: &Self) {
1651 self.intersect_with(other);
1652 }
1653 }
1654
1655 impl<'a> BitOr for &'a FixedBitSet {
1656 type Output = FixedBitSet;
bitor(self, other: &FixedBitSet) -> FixedBitSet1657 fn bitor(self, other: &FixedBitSet) -> FixedBitSet {
1658 let (short, long) = {
1659 if self.len() <= other.len() {
1660 (self.as_simd_slice(), other.as_simd_slice())
1661 } else {
1662 (other.as_simd_slice(), self.as_simd_slice())
1663 }
1664 };
1665 let mut data = Vec::from(long);
1666 for (data, block) in data.iter_mut().zip(short.iter()) {
1667 *data |= *block;
1668 }
1669 let len = core::cmp::max(self.len(), other.len());
1670 FixedBitSet::from_blocks_and_len(data, len)
1671 }
1672 }
1673
1674 impl BitOrAssign for FixedBitSet {
bitor_assign(&mut self, other: Self)1675 fn bitor_assign(&mut self, other: Self) {
1676 self.union_with(&other);
1677 }
1678 }
1679
1680 impl BitOrAssign<&Self> for FixedBitSet {
bitor_assign(&mut self, other: &Self)1681 fn bitor_assign(&mut self, other: &Self) {
1682 self.union_with(other);
1683 }
1684 }
1685
1686 impl<'a> BitXor for &'a FixedBitSet {
1687 type Output = FixedBitSet;
bitxor(self, other: &FixedBitSet) -> FixedBitSet1688 fn bitxor(self, other: &FixedBitSet) -> FixedBitSet {
1689 let (short, long) = {
1690 if self.len() <= other.len() {
1691 (self.as_simd_slice(), other.as_simd_slice())
1692 } else {
1693 (other.as_simd_slice(), self.as_simd_slice())
1694 }
1695 };
1696 let mut data = Vec::from(long);
1697 for (data, block) in data.iter_mut().zip(short.iter()) {
1698 *data ^= *block;
1699 }
1700 let len = core::cmp::max(self.len(), other.len());
1701 FixedBitSet::from_blocks_and_len(data, len)
1702 }
1703 }
1704
1705 impl BitXorAssign for FixedBitSet {
bitxor_assign(&mut self, other: Self)1706 fn bitxor_assign(&mut self, other: Self) {
1707 self.symmetric_difference_with(&other);
1708 }
1709 }
1710
1711 impl BitXorAssign<&Self> for FixedBitSet {
bitxor_assign(&mut self, other: &Self)1712 fn bitxor_assign(&mut self, other: &Self) {
1713 self.symmetric_difference_with(other);
1714 }
1715 }
1716