xref: /aosp_15_r20/external/cronet/third_party/rust/chromium_crates_io/vendor/bytes-1.6.0/src/bytes_mut.rs (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 use core::iter::FromIterator;
2 use core::mem::{self, ManuallyDrop, MaybeUninit};
3 use core::ops::{Deref, DerefMut};
4 use core::ptr::{self, NonNull};
5 use core::{cmp, fmt, hash, isize, slice, usize};
6 
7 use alloc::{
8     borrow::{Borrow, BorrowMut},
9     boxed::Box,
10     string::String,
11     vec,
12     vec::Vec,
13 };
14 
15 use crate::buf::{IntoIter, UninitSlice};
16 use crate::bytes::Vtable;
17 #[allow(unused)]
18 use crate::loom::sync::atomic::AtomicMut;
19 use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
20 use crate::{Buf, BufMut, Bytes};
21 
22 /// A unique reference to a contiguous slice of memory.
23 ///
24 /// `BytesMut` represents a unique view into a potentially shared memory region.
25 /// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
26 /// mutate the memory.
27 ///
28 /// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
29 /// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
30 /// same `buf` overlaps with its slice. That guarantee means that a write lock
31 /// is not required.
32 ///
33 /// # Growth
34 ///
35 /// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
36 /// necessary. However, explicitly reserving the required space up-front before
37 /// a series of inserts will be more efficient.
38 ///
39 /// # Examples
40 ///
41 /// ```
42 /// use bytes::{BytesMut, BufMut};
43 ///
44 /// let mut buf = BytesMut::with_capacity(64);
45 ///
46 /// buf.put_u8(b'h');
47 /// buf.put_u8(b'e');
48 /// buf.put(&b"llo"[..]);
49 ///
50 /// assert_eq!(&buf[..], b"hello");
51 ///
52 /// // Freeze the buffer so that it can be shared
53 /// let a = buf.freeze();
54 ///
55 /// // This does not allocate, instead `b` points to the same memory.
56 /// let b = a.clone();
57 ///
58 /// assert_eq!(&a[..], b"hello");
59 /// assert_eq!(&b[..], b"hello");
60 /// ```
61 pub struct BytesMut {
62     ptr: NonNull<u8>,
63     len: usize,
64     cap: usize,
65     data: *mut Shared,
66 }
67 
68 // Thread-safe reference-counted container for the shared storage. This mostly
69 // the same as `core::sync::Arc` but without the weak counter. The ref counting
70 // fns are based on the ones found in `std`.
71 //
72 // The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
73 // up making the overall code simpler and easier to reason about. This is due to
74 // some of the logic around setting `Inner::arc` and other ways the `arc` field
75 // is used. Using `Arc` ended up requiring a number of funky transmutes and
76 // other shenanigans to make it work.
77 struct Shared {
78     vec: Vec<u8>,
79     original_capacity_repr: usize,
80     ref_count: AtomicUsize,
81 }
82 
83 // Assert that the alignment of `Shared` is divisible by 2.
84 // This is a necessary invariant since we depend on allocating `Shared` a
85 // shared object to implicitly carry the `KIND_ARC` flag in its pointer.
86 // This flag is set when the LSB is 0.
87 const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
88 
89 // Buffer storage strategy flags.
90 const KIND_ARC: usize = 0b0;
91 const KIND_VEC: usize = 0b1;
92 const KIND_MASK: usize = 0b1;
93 
94 // The max original capacity value. Any `Bytes` allocated with a greater initial
95 // capacity will default to this.
96 const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
97 // The original capacity algorithm will not take effect unless the originally
98 // allocated capacity was at least 1kb in size.
99 const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
100 // The original capacity is stored in powers of 2 starting at 1kb to a max of
101 // 64kb. Representing it as such requires only 3 bits of storage.
102 const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
103 const ORIGINAL_CAPACITY_OFFSET: usize = 2;
104 
105 const VEC_POS_OFFSET: usize = 5;
106 // When the storage is in the `Vec` representation, the pointer can be advanced
107 // at most this value. This is due to the amount of storage available to track
108 // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
109 // bits.
110 const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
111 const NOT_VEC_POS_MASK: usize = 0b11111;
112 
113 #[cfg(target_pointer_width = "64")]
114 const PTR_WIDTH: usize = 64;
115 #[cfg(target_pointer_width = "32")]
116 const PTR_WIDTH: usize = 32;
117 
118 /*
119  *
120  * ===== BytesMut =====
121  *
122  */
123 
124 impl BytesMut {
125     /// Creates a new `BytesMut` with the specified capacity.
126     ///
127     /// The returned `BytesMut` will be able to hold at least `capacity` bytes
128     /// without reallocating.
129     ///
130     /// It is important to note that this function does not specify the length
131     /// of the returned `BytesMut`, but only the capacity.
132     ///
133     /// # Examples
134     ///
135     /// ```
136     /// use bytes::{BytesMut, BufMut};
137     ///
138     /// let mut bytes = BytesMut::with_capacity(64);
139     ///
140     /// // `bytes` contains no data, even though there is capacity
141     /// assert_eq!(bytes.len(), 0);
142     ///
143     /// bytes.put(&b"hello world"[..]);
144     ///
145     /// assert_eq!(&bytes[..], b"hello world");
146     /// ```
147     #[inline]
with_capacity(capacity: usize) -> BytesMut148     pub fn with_capacity(capacity: usize) -> BytesMut {
149         BytesMut::from_vec(Vec::with_capacity(capacity))
150     }
151 
152     /// Creates a new `BytesMut` with default capacity.
153     ///
154     /// Resulting object has length 0 and unspecified capacity.
155     /// This function does not allocate.
156     ///
157     /// # Examples
158     ///
159     /// ```
160     /// use bytes::{BytesMut, BufMut};
161     ///
162     /// let mut bytes = BytesMut::new();
163     ///
164     /// assert_eq!(0, bytes.len());
165     ///
166     /// bytes.reserve(2);
167     /// bytes.put_slice(b"xy");
168     ///
169     /// assert_eq!(&b"xy"[..], &bytes[..]);
170     /// ```
171     #[inline]
new() -> BytesMut172     pub fn new() -> BytesMut {
173         BytesMut::with_capacity(0)
174     }
175 
176     /// Returns the number of bytes contained in this `BytesMut`.
177     ///
178     /// # Examples
179     ///
180     /// ```
181     /// use bytes::BytesMut;
182     ///
183     /// let b = BytesMut::from(&b"hello"[..]);
184     /// assert_eq!(b.len(), 5);
185     /// ```
186     #[inline]
len(&self) -> usize187     pub fn len(&self) -> usize {
188         self.len
189     }
190 
191     /// Returns true if the `BytesMut` has a length of 0.
192     ///
193     /// # Examples
194     ///
195     /// ```
196     /// use bytes::BytesMut;
197     ///
198     /// let b = BytesMut::with_capacity(64);
199     /// assert!(b.is_empty());
200     /// ```
201     #[inline]
is_empty(&self) -> bool202     pub fn is_empty(&self) -> bool {
203         self.len == 0
204     }
205 
206     /// Returns the number of bytes the `BytesMut` can hold without reallocating.
207     ///
208     /// # Examples
209     ///
210     /// ```
211     /// use bytes::BytesMut;
212     ///
213     /// let b = BytesMut::with_capacity(64);
214     /// assert_eq!(b.capacity(), 64);
215     /// ```
216     #[inline]
capacity(&self) -> usize217     pub fn capacity(&self) -> usize {
218         self.cap
219     }
220 
221     /// Converts `self` into an immutable `Bytes`.
222     ///
223     /// The conversion is zero cost and is used to indicate that the slice
224     /// referenced by the handle will no longer be mutated. Once the conversion
225     /// is done, the handle can be cloned and shared across threads.
226     ///
227     /// # Examples
228     ///
229     /// ```
230     /// use bytes::{BytesMut, BufMut};
231     /// use std::thread;
232     ///
233     /// let mut b = BytesMut::with_capacity(64);
234     /// b.put(&b"hello world"[..]);
235     /// let b1 = b.freeze();
236     /// let b2 = b1.clone();
237     ///
238     /// let th = thread::spawn(move || {
239     ///     assert_eq!(&b1[..], b"hello world");
240     /// });
241     ///
242     /// assert_eq!(&b2[..], b"hello world");
243     /// th.join().unwrap();
244     /// ```
245     #[inline]
freeze(self) -> Bytes246     pub fn freeze(self) -> Bytes {
247         let bytes = ManuallyDrop::new(self);
248         if bytes.kind() == KIND_VEC {
249             // Just re-use `Bytes` internal Vec vtable
250             unsafe {
251                 let off = bytes.get_vec_pos();
252                 let vec = rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off);
253                 let mut b: Bytes = vec.into();
254                 b.advance(off);
255                 b
256             }
257         } else {
258             debug_assert_eq!(bytes.kind(), KIND_ARC);
259 
260             let ptr = bytes.ptr.as_ptr();
261             let len = bytes.len;
262             let data = AtomicPtr::new(bytes.data.cast());
263             unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
264         }
265     }
266 
267     /// Creates a new `BytesMut`, which is initialized with zero.
268     ///
269     /// # Examples
270     ///
271     /// ```
272     /// use bytes::BytesMut;
273     ///
274     /// let zeros = BytesMut::zeroed(42);
275     ///
276     /// assert_eq!(zeros.len(), 42);
277     /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
278     /// ```
zeroed(len: usize) -> BytesMut279     pub fn zeroed(len: usize) -> BytesMut {
280         BytesMut::from_vec(vec![0; len])
281     }
282 
283     /// Splits the bytes into two at the given index.
284     ///
285     /// Afterwards `self` contains elements `[0, at)`, and the returned
286     /// `BytesMut` contains elements `[at, capacity)`.
287     ///
288     /// This is an `O(1)` operation that just increases the reference count
289     /// and sets a few indices.
290     ///
291     /// # Examples
292     ///
293     /// ```
294     /// use bytes::BytesMut;
295     ///
296     /// let mut a = BytesMut::from(&b"hello world"[..]);
297     /// let mut b = a.split_off(5);
298     ///
299     /// a[0] = b'j';
300     /// b[0] = b'!';
301     ///
302     /// assert_eq!(&a[..], b"jello");
303     /// assert_eq!(&b[..], b"!world");
304     /// ```
305     ///
306     /// # Panics
307     ///
308     /// Panics if `at > capacity`.
309     #[must_use = "consider BytesMut::truncate if you don't need the other half"]
split_off(&mut self, at: usize) -> BytesMut310     pub fn split_off(&mut self, at: usize) -> BytesMut {
311         assert!(
312             at <= self.capacity(),
313             "split_off out of bounds: {:?} <= {:?}",
314             at,
315             self.capacity(),
316         );
317         unsafe {
318             let mut other = self.shallow_clone();
319             // SAFETY: We've checked that `at` <= `self.capacity()` above.
320             other.advance_unchecked(at);
321             self.cap = at;
322             self.len = cmp::min(self.len, at);
323             other
324         }
325     }
326 
327     /// Removes the bytes from the current view, returning them in a new
328     /// `BytesMut` handle.
329     ///
330     /// Afterwards, `self` will be empty, but will retain any additional
331     /// capacity that it had before the operation. This is identical to
332     /// `self.split_to(self.len())`.
333     ///
334     /// This is an `O(1)` operation that just increases the reference count and
335     /// sets a few indices.
336     ///
337     /// # Examples
338     ///
339     /// ```
340     /// use bytes::{BytesMut, BufMut};
341     ///
342     /// let mut buf = BytesMut::with_capacity(1024);
343     /// buf.put(&b"hello world"[..]);
344     ///
345     /// let other = buf.split();
346     ///
347     /// assert!(buf.is_empty());
348     /// assert_eq!(1013, buf.capacity());
349     ///
350     /// assert_eq!(other, b"hello world"[..]);
351     /// ```
352     #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
split(&mut self) -> BytesMut353     pub fn split(&mut self) -> BytesMut {
354         let len = self.len();
355         self.split_to(len)
356     }
357 
358     /// Splits the buffer into two at the given index.
359     ///
360     /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
361     /// contains elements `[0, at)`.
362     ///
363     /// This is an `O(1)` operation that just increases the reference count and
364     /// sets a few indices.
365     ///
366     /// # Examples
367     ///
368     /// ```
369     /// use bytes::BytesMut;
370     ///
371     /// let mut a = BytesMut::from(&b"hello world"[..]);
372     /// let mut b = a.split_to(5);
373     ///
374     /// a[0] = b'!';
375     /// b[0] = b'j';
376     ///
377     /// assert_eq!(&a[..], b"!world");
378     /// assert_eq!(&b[..], b"jello");
379     /// ```
380     ///
381     /// # Panics
382     ///
383     /// Panics if `at > len`.
384     #[must_use = "consider BytesMut::advance if you don't need the other half"]
split_to(&mut self, at: usize) -> BytesMut385     pub fn split_to(&mut self, at: usize) -> BytesMut {
386         assert!(
387             at <= self.len(),
388             "split_to out of bounds: {:?} <= {:?}",
389             at,
390             self.len(),
391         );
392 
393         unsafe {
394             let mut other = self.shallow_clone();
395             // SAFETY: We've checked that `at` <= `self.len()` and we know that `self.len()` <=
396             // `self.capacity()`.
397             self.advance_unchecked(at);
398             other.cap = at;
399             other.len = at;
400             other
401         }
402     }
403 
404     /// Shortens the buffer, keeping the first `len` bytes and dropping the
405     /// rest.
406     ///
407     /// If `len` is greater than the buffer's current length, this has no
408     /// effect.
409     ///
410     /// Existing underlying capacity is preserved.
411     ///
412     /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
413     /// excess bytes to be returned instead of dropped.
414     ///
415     /// # Examples
416     ///
417     /// ```
418     /// use bytes::BytesMut;
419     ///
420     /// let mut buf = BytesMut::from(&b"hello world"[..]);
421     /// buf.truncate(5);
422     /// assert_eq!(buf, b"hello"[..]);
423     /// ```
truncate(&mut self, len: usize)424     pub fn truncate(&mut self, len: usize) {
425         if len <= self.len() {
426             unsafe {
427                 self.set_len(len);
428             }
429         }
430     }
431 
432     /// Clears the buffer, removing all data. Existing capacity is preserved.
433     ///
434     /// # Examples
435     ///
436     /// ```
437     /// use bytes::BytesMut;
438     ///
439     /// let mut buf = BytesMut::from(&b"hello world"[..]);
440     /// buf.clear();
441     /// assert!(buf.is_empty());
442     /// ```
clear(&mut self)443     pub fn clear(&mut self) {
444         self.truncate(0);
445     }
446 
447     /// Resizes the buffer so that `len` is equal to `new_len`.
448     ///
449     /// If `new_len` is greater than `len`, the buffer is extended by the
450     /// difference with each additional byte set to `value`. If `new_len` is
451     /// less than `len`, the buffer is simply truncated.
452     ///
453     /// # Examples
454     ///
455     /// ```
456     /// use bytes::BytesMut;
457     ///
458     /// let mut buf = BytesMut::new();
459     ///
460     /// buf.resize(3, 0x1);
461     /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
462     ///
463     /// buf.resize(2, 0x2);
464     /// assert_eq!(&buf[..], &[0x1, 0x1]);
465     ///
466     /// buf.resize(4, 0x3);
467     /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
468     /// ```
resize(&mut self, new_len: usize, value: u8)469     pub fn resize(&mut self, new_len: usize, value: u8) {
470         let len = self.len();
471         if new_len > len {
472             let additional = new_len - len;
473             self.reserve(additional);
474             unsafe {
475                 let dst = self.chunk_mut().as_mut_ptr();
476                 ptr::write_bytes(dst, value, additional);
477                 self.set_len(new_len);
478             }
479         } else {
480             self.truncate(new_len);
481         }
482     }
483 
484     /// Sets the length of the buffer.
485     ///
486     /// This will explicitly set the size of the buffer without actually
487     /// modifying the data, so it is up to the caller to ensure that the data
488     /// has been initialized.
489     ///
490     /// # Examples
491     ///
492     /// ```
493     /// use bytes::BytesMut;
494     ///
495     /// let mut b = BytesMut::from(&b"hello world"[..]);
496     ///
497     /// unsafe {
498     ///     b.set_len(5);
499     /// }
500     ///
501     /// assert_eq!(&b[..], b"hello");
502     ///
503     /// unsafe {
504     ///     b.set_len(11);
505     /// }
506     ///
507     /// assert_eq!(&b[..], b"hello world");
508     /// ```
509     #[inline]
set_len(&mut self, len: usize)510     pub unsafe fn set_len(&mut self, len: usize) {
511         debug_assert!(len <= self.cap, "set_len out of bounds");
512         self.len = len;
513     }
514 
515     /// Reserves capacity for at least `additional` more bytes to be inserted
516     /// into the given `BytesMut`.
517     ///
518     /// More than `additional` bytes may be reserved in order to avoid frequent
519     /// reallocations. A call to `reserve` may result in an allocation.
520     ///
521     /// Before allocating new buffer space, the function will attempt to reclaim
522     /// space in the existing buffer. If the current handle references a view
523     /// into a larger original buffer, and all other handles referencing part
524     /// of the same original buffer have been dropped, then the current view
525     /// can be copied/shifted to the front of the buffer and the handle can take
526     /// ownership of the full buffer, provided that the full buffer is large
527     /// enough to fit the requested additional capacity.
528     ///
529     /// This optimization will only happen if shifting the data from the current
530     /// view to the front of the buffer is not too expensive in terms of the
531     /// (amortized) time required. The precise condition is subject to change;
532     /// as of now, the length of the data being shifted needs to be at least as
533     /// large as the distance that it's shifted by. If the current view is empty
534     /// and the original buffer is large enough to fit the requested additional
535     /// capacity, then reallocations will never happen.
536     ///
537     /// # Examples
538     ///
539     /// In the following example, a new buffer is allocated.
540     ///
541     /// ```
542     /// use bytes::BytesMut;
543     ///
544     /// let mut buf = BytesMut::from(&b"hello"[..]);
545     /// buf.reserve(64);
546     /// assert!(buf.capacity() >= 69);
547     /// ```
548     ///
549     /// In the following example, the existing buffer is reclaimed.
550     ///
551     /// ```
552     /// use bytes::{BytesMut, BufMut};
553     ///
554     /// let mut buf = BytesMut::with_capacity(128);
555     /// buf.put(&[0; 64][..]);
556     ///
557     /// let ptr = buf.as_ptr();
558     /// let other = buf.split();
559     ///
560     /// assert!(buf.is_empty());
561     /// assert_eq!(buf.capacity(), 64);
562     ///
563     /// drop(other);
564     /// buf.reserve(128);
565     ///
566     /// assert_eq!(buf.capacity(), 128);
567     /// assert_eq!(buf.as_ptr(), ptr);
568     /// ```
569     ///
570     /// # Panics
571     ///
572     /// Panics if the new capacity overflows `usize`.
573     #[inline]
reserve(&mut self, additional: usize)574     pub fn reserve(&mut self, additional: usize) {
575         let len = self.len();
576         let rem = self.capacity() - len;
577 
578         if additional <= rem {
579             // The handle can already store at least `additional` more bytes, so
580             // there is no further work needed to be done.
581             return;
582         }
583 
584         self.reserve_inner(additional);
585     }
586 
587     // In separate function to allow the short-circuits in `reserve` to
588     // be inline-able. Significant helps performance.
reserve_inner(&mut self, additional: usize)589     fn reserve_inner(&mut self, additional: usize) {
590         let len = self.len();
591         let kind = self.kind();
592 
593         if kind == KIND_VEC {
594             // If there's enough free space before the start of the buffer, then
595             // just copy the data backwards and reuse the already-allocated
596             // space.
597             //
598             // Otherwise, since backed by a vector, use `Vec::reserve`
599             //
600             // We need to make sure that this optimization does not kill the
601             // amortized runtimes of BytesMut's operations.
602             unsafe {
603                 let off = self.get_vec_pos();
604 
605                 // Only reuse space if we can satisfy the requested additional space.
606                 //
607                 // Also check if the value of `off` suggests that enough bytes
608                 // have been read to account for the overhead of shifting all
609                 // the data (in an amortized analysis).
610                 // Hence the condition `off >= self.len()`.
611                 //
612                 // This condition also already implies that the buffer is going
613                 // to be (at least) half-empty in the end; so we do not break
614                 // the (amortized) runtime with future resizes of the underlying
615                 // `Vec`.
616                 //
617                 // [For more details check issue #524, and PR #525.]
618                 if self.capacity() - self.len() + off >= additional && off >= self.len() {
619                     // There's enough space, and it's not too much overhead:
620                     // reuse the space!
621                     //
622                     // Just move the pointer back to the start after copying
623                     // data back.
624                     let base_ptr = self.ptr.as_ptr().sub(off);
625                     // Since `off >= self.len()`, the two regions don't overlap.
626                     ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
627                     self.ptr = vptr(base_ptr);
628                     self.set_vec_pos(0);
629 
630                     // Length stays constant, but since we moved backwards we
631                     // can gain capacity back.
632                     self.cap += off;
633                 } else {
634                     // Not enough space, or reusing might be too much overhead:
635                     // allocate more space!
636                     let mut v =
637                         ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
638                     v.reserve(additional);
639 
640                     // Update the info
641                     self.ptr = vptr(v.as_mut_ptr().add(off));
642                     self.len = v.len() - off;
643                     self.cap = v.capacity() - off;
644                 }
645 
646                 return;
647             }
648         }
649 
650         debug_assert_eq!(kind, KIND_ARC);
651         let shared: *mut Shared = self.data;
652 
653         // Reserving involves abandoning the currently shared buffer and
654         // allocating a new vector with the requested capacity.
655         //
656         // Compute the new capacity
657         let mut new_cap = len.checked_add(additional).expect("overflow");
658 
659         unsafe {
660             // First, try to reclaim the buffer. This is possible if the current
661             // handle is the only outstanding handle pointing to the buffer.
662             if (*shared).is_unique() {
663                 // This is the only handle to the buffer. It can be reclaimed.
664                 // However, before doing the work of copying data, check to make
665                 // sure that the vector has enough capacity.
666                 let v = &mut (*shared).vec;
667 
668                 let v_capacity = v.capacity();
669                 let ptr = v.as_mut_ptr();
670 
671                 let offset = offset_from(self.ptr.as_ptr(), ptr);
672 
673                 // Compare the condition in the `kind == KIND_VEC` case above
674                 // for more details.
675                 if v_capacity >= new_cap + offset {
676                     self.cap = new_cap;
677                     // no copy is necessary
678                 } else if v_capacity >= new_cap && offset >= len {
679                     // The capacity is sufficient, and copying is not too much
680                     // overhead: reclaim the buffer!
681 
682                     // `offset >= len` means: no overlap
683                     ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
684 
685                     self.ptr = vptr(ptr);
686                     self.cap = v.capacity();
687                 } else {
688                     // calculate offset
689                     let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
690 
691                     // new_cap is calculated in terms of `BytesMut`, not the underlying
692                     // `Vec`, so it does not take the offset into account.
693                     //
694                     // Thus we have to manually add it here.
695                     new_cap = new_cap.checked_add(off).expect("overflow");
696 
697                     // The vector capacity is not sufficient. The reserve request is
698                     // asking for more than the initial buffer capacity. Allocate more
699                     // than requested if `new_cap` is not much bigger than the current
700                     // capacity.
701                     //
702                     // There are some situations, using `reserve_exact` that the
703                     // buffer capacity could be below `original_capacity`, so do a
704                     // check.
705                     let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
706 
707                     new_cap = cmp::max(double, new_cap);
708 
709                     // No space - allocate more
710                     //
711                     // The length field of `Shared::vec` is not used by the `BytesMut`;
712                     // instead we use the `len` field in the `BytesMut` itself. However,
713                     // when calling `reserve`, it doesn't guarantee that data stored in
714                     // the unused capacity of the vector is copied over to the new
715                     // allocation, so we need to ensure that we don't have any data we
716                     // care about in the unused capacity before calling `reserve`.
717                     debug_assert!(off + len <= v.capacity());
718                     v.set_len(off + len);
719                     v.reserve(new_cap - v.len());
720 
721                     // Update the info
722                     self.ptr = vptr(v.as_mut_ptr().add(off));
723                     self.cap = v.capacity() - off;
724                 }
725 
726                 return;
727             }
728         }
729 
730         let original_capacity_repr = unsafe { (*shared).original_capacity_repr };
731         let original_capacity = original_capacity_from_repr(original_capacity_repr);
732 
733         new_cap = cmp::max(new_cap, original_capacity);
734 
735         // Create a new vector to store the data
736         let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
737 
738         // Copy the bytes
739         v.extend_from_slice(self.as_ref());
740 
741         // Release the shared handle. This must be done *after* the bytes are
742         // copied.
743         unsafe { release_shared(shared) };
744 
745         // Update self
746         let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
747         self.data = invalid_ptr(data);
748         self.ptr = vptr(v.as_mut_ptr());
749         self.len = v.len();
750         self.cap = v.capacity();
751     }
752 
753     /// Appends given bytes to this `BytesMut`.
754     ///
755     /// If this `BytesMut` object does not have enough capacity, it is resized
756     /// first.
757     ///
758     /// # Examples
759     ///
760     /// ```
761     /// use bytes::BytesMut;
762     ///
763     /// let mut buf = BytesMut::with_capacity(0);
764     /// buf.extend_from_slice(b"aaabbb");
765     /// buf.extend_from_slice(b"cccddd");
766     ///
767     /// assert_eq!(b"aaabbbcccddd", &buf[..]);
768     /// ```
769     #[inline]
extend_from_slice(&mut self, extend: &[u8])770     pub fn extend_from_slice(&mut self, extend: &[u8]) {
771         let cnt = extend.len();
772         self.reserve(cnt);
773 
774         unsafe {
775             let dst = self.spare_capacity_mut();
776             // Reserved above
777             debug_assert!(dst.len() >= cnt);
778 
779             ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
780         }
781 
782         unsafe {
783             self.advance_mut(cnt);
784         }
785     }
786 
787     /// Absorbs a `BytesMut` that was previously split off.
788     ///
789     /// If the two `BytesMut` objects were previously contiguous and not mutated
790     /// in a way that causes re-allocation i.e., if `other` was created by
791     /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
792     /// that just decreases a reference count and sets a few indices.
793     /// Otherwise this method degenerates to
794     /// `self.extend_from_slice(other.as_ref())`.
795     ///
796     /// # Examples
797     ///
798     /// ```
799     /// use bytes::BytesMut;
800     ///
801     /// let mut buf = BytesMut::with_capacity(64);
802     /// buf.extend_from_slice(b"aaabbbcccddd");
803     ///
804     /// let split = buf.split_off(6);
805     /// assert_eq!(b"aaabbb", &buf[..]);
806     /// assert_eq!(b"cccddd", &split[..]);
807     ///
808     /// buf.unsplit(split);
809     /// assert_eq!(b"aaabbbcccddd", &buf[..]);
810     /// ```
unsplit(&mut self, other: BytesMut)811     pub fn unsplit(&mut self, other: BytesMut) {
812         if self.is_empty() {
813             *self = other;
814             return;
815         }
816 
817         if let Err(other) = self.try_unsplit(other) {
818             self.extend_from_slice(other.as_ref());
819         }
820     }
821 
822     // private
823 
824     // For now, use a `Vec` to manage the memory for us, but we may want to
825     // change that in the future to some alternate allocator strategy.
826     //
827     // Thus, we don't expose an easy way to construct from a `Vec` since an
828     // internal change could make a simple pattern (`BytesMut::from(vec)`)
829     // suddenly a lot more expensive.
830     #[inline]
from_vec(vec: Vec<u8>) -> BytesMut831     pub(crate) fn from_vec(vec: Vec<u8>) -> BytesMut {
832         let mut vec = ManuallyDrop::new(vec);
833         let ptr = vptr(vec.as_mut_ptr());
834         let len = vec.len();
835         let cap = vec.capacity();
836 
837         let original_capacity_repr = original_capacity_to_repr(cap);
838         let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
839 
840         BytesMut {
841             ptr,
842             len,
843             cap,
844             data: invalid_ptr(data),
845         }
846     }
847 
848     #[inline]
as_slice(&self) -> &[u8]849     fn as_slice(&self) -> &[u8] {
850         unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
851     }
852 
853     #[inline]
as_slice_mut(&mut self) -> &mut [u8]854     fn as_slice_mut(&mut self) -> &mut [u8] {
855         unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
856     }
857 
858     /// Advance the buffer without bounds checking.
859     ///
860     /// # SAFETY
861     ///
862     /// The caller must ensure that `count` <= `self.cap`.
advance_unchecked(&mut self, count: usize)863     unsafe fn advance_unchecked(&mut self, count: usize) {
864         // Setting the start to 0 is a no-op, so return early if this is the
865         // case.
866         if count == 0 {
867             return;
868         }
869 
870         debug_assert!(count <= self.cap, "internal: set_start out of bounds");
871 
872         let kind = self.kind();
873 
874         if kind == KIND_VEC {
875             // Setting the start when in vec representation is a little more
876             // complicated. First, we have to track how far ahead the
877             // "start" of the byte buffer from the beginning of the vec. We
878             // also have to ensure that we don't exceed the maximum shift.
879             let pos = self.get_vec_pos() + count;
880 
881             if pos <= MAX_VEC_POS {
882                 self.set_vec_pos(pos);
883             } else {
884                 // The repr must be upgraded to ARC. This will never happen
885                 // on 64 bit systems and will only happen on 32 bit systems
886                 // when shifting past 134,217,727 bytes. As such, we don't
887                 // worry too much about performance here.
888                 self.promote_to_shared(/*ref_count = */ 1);
889             }
890         }
891 
892         // Updating the start of the view is setting `ptr` to point to the
893         // new start and updating the `len` field to reflect the new length
894         // of the view.
895         self.ptr = vptr(self.ptr.as_ptr().add(count));
896         self.len = self.len.checked_sub(count).unwrap_or(0);
897         self.cap -= count;
898     }
899 
try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut>900     fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
901         if other.capacity() == 0 {
902             return Ok(());
903         }
904 
905         let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
906         if ptr == other.ptr.as_ptr()
907             && self.kind() == KIND_ARC
908             && other.kind() == KIND_ARC
909             && self.data == other.data
910         {
911             // Contiguous blocks, just combine directly
912             self.len += other.len;
913             self.cap += other.cap;
914             Ok(())
915         } else {
916             Err(other)
917         }
918     }
919 
920     #[inline]
kind(&self) -> usize921     fn kind(&self) -> usize {
922         self.data as usize & KIND_MASK
923     }
924 
promote_to_shared(&mut self, ref_cnt: usize)925     unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
926         debug_assert_eq!(self.kind(), KIND_VEC);
927         debug_assert!(ref_cnt == 1 || ref_cnt == 2);
928 
929         let original_capacity_repr =
930             (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
931 
932         // The vec offset cannot be concurrently mutated, so there
933         // should be no danger reading it.
934         let off = (self.data as usize) >> VEC_POS_OFFSET;
935 
936         // First, allocate a new `Shared` instance containing the
937         // `Vec` fields. It's important to note that `ptr`, `len`,
938         // and `cap` cannot be mutated without having `&mut self`.
939         // This means that these fields will not be concurrently
940         // updated and since the buffer hasn't been promoted to an
941         // `Arc`, those three fields still are the components of the
942         // vector.
943         let shared = Box::new(Shared {
944             vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
945             original_capacity_repr,
946             ref_count: AtomicUsize::new(ref_cnt),
947         });
948 
949         let shared = Box::into_raw(shared);
950 
951         // The pointer should be aligned, so this assert should
952         // always succeed.
953         debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
954 
955         self.data = shared;
956     }
957 
958     /// Makes an exact shallow clone of `self`.
959     ///
960     /// The kind of `self` doesn't matter, but this is unsafe
961     /// because the clone will have the same offsets. You must
962     /// be sure the returned value to the user doesn't allow
963     /// two views into the same range.
964     #[inline]
shallow_clone(&mut self) -> BytesMut965     unsafe fn shallow_clone(&mut self) -> BytesMut {
966         if self.kind() == KIND_ARC {
967             increment_shared(self.data);
968             ptr::read(self)
969         } else {
970             self.promote_to_shared(/*ref_count = */ 2);
971             ptr::read(self)
972         }
973     }
974 
975     #[inline]
get_vec_pos(&self) -> usize976     unsafe fn get_vec_pos(&self) -> usize {
977         debug_assert_eq!(self.kind(), KIND_VEC);
978 
979         self.data as usize >> VEC_POS_OFFSET
980     }
981 
982     #[inline]
set_vec_pos(&mut self, pos: usize)983     unsafe fn set_vec_pos(&mut self, pos: usize) {
984         debug_assert_eq!(self.kind(), KIND_VEC);
985         debug_assert!(pos <= MAX_VEC_POS);
986 
987         self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (self.data as usize & NOT_VEC_POS_MASK));
988     }
989 
990     /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
991     ///
992     /// The returned slice can be used to fill the buffer with data (e.g. by
993     /// reading from a file) before marking the data as initialized using the
994     /// [`set_len`] method.
995     ///
996     /// [`set_len`]: BytesMut::set_len
997     ///
998     /// # Examples
999     ///
1000     /// ```
1001     /// use bytes::BytesMut;
1002     ///
1003     /// // Allocate buffer big enough for 10 bytes.
1004     /// let mut buf = BytesMut::with_capacity(10);
1005     ///
1006     /// // Fill in the first 3 elements.
1007     /// let uninit = buf.spare_capacity_mut();
1008     /// uninit[0].write(0);
1009     /// uninit[1].write(1);
1010     /// uninit[2].write(2);
1011     ///
1012     /// // Mark the first 3 bytes of the buffer as being initialized.
1013     /// unsafe {
1014     ///     buf.set_len(3);
1015     /// }
1016     ///
1017     /// assert_eq!(&buf[..], &[0, 1, 2]);
1018     /// ```
1019     #[inline]
spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>]1020     pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
1021         unsafe {
1022             let ptr = self.ptr.as_ptr().add(self.len);
1023             let len = self.cap - self.len;
1024 
1025             slice::from_raw_parts_mut(ptr.cast(), len)
1026         }
1027     }
1028 }
1029 
1030 impl Drop for BytesMut {
drop(&mut self)1031     fn drop(&mut self) {
1032         let kind = self.kind();
1033 
1034         if kind == KIND_VEC {
1035             unsafe {
1036                 let off = self.get_vec_pos();
1037 
1038                 // Vector storage, free the vector
1039                 let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
1040             }
1041         } else if kind == KIND_ARC {
1042             unsafe { release_shared(self.data) };
1043         }
1044     }
1045 }
1046 
1047 impl Buf for BytesMut {
1048     #[inline]
remaining(&self) -> usize1049     fn remaining(&self) -> usize {
1050         self.len()
1051     }
1052 
1053     #[inline]
chunk(&self) -> &[u8]1054     fn chunk(&self) -> &[u8] {
1055         self.as_slice()
1056     }
1057 
1058     #[inline]
advance(&mut self, cnt: usize)1059     fn advance(&mut self, cnt: usize) {
1060         assert!(
1061             cnt <= self.remaining(),
1062             "cannot advance past `remaining`: {:?} <= {:?}",
1063             cnt,
1064             self.remaining(),
1065         );
1066         unsafe {
1067             // SAFETY: We've checked that `cnt` <= `self.remaining()` and we know that
1068             // `self.remaining()` <= `self.cap`.
1069             self.advance_unchecked(cnt);
1070         }
1071     }
1072 
copy_to_bytes(&mut self, len: usize) -> Bytes1073     fn copy_to_bytes(&mut self, len: usize) -> Bytes {
1074         self.split_to(len).freeze()
1075     }
1076 }
1077 
1078 unsafe impl BufMut for BytesMut {
1079     #[inline]
remaining_mut(&self) -> usize1080     fn remaining_mut(&self) -> usize {
1081         usize::MAX - self.len()
1082     }
1083 
1084     #[inline]
advance_mut(&mut self, cnt: usize)1085     unsafe fn advance_mut(&mut self, cnt: usize) {
1086         let remaining = self.cap - self.len();
1087         if cnt > remaining {
1088             super::panic_advance(cnt, remaining);
1089         }
1090         // Addition won't overflow since it is at most `self.cap`.
1091         self.len = self.len() + cnt;
1092     }
1093 
1094     #[inline]
chunk_mut(&mut self) -> &mut UninitSlice1095     fn chunk_mut(&mut self) -> &mut UninitSlice {
1096         if self.capacity() == self.len() {
1097             self.reserve(64);
1098         }
1099         self.spare_capacity_mut().into()
1100     }
1101 
1102     // Specialize these methods so they can skip checking `remaining_mut`
1103     // and `advance_mut`.
1104 
put<T: Buf>(&mut self, mut src: T) where Self: Sized,1105     fn put<T: Buf>(&mut self, mut src: T)
1106     where
1107         Self: Sized,
1108     {
1109         while src.has_remaining() {
1110             let s = src.chunk();
1111             let l = s.len();
1112             self.extend_from_slice(s);
1113             src.advance(l);
1114         }
1115     }
1116 
put_slice(&mut self, src: &[u8])1117     fn put_slice(&mut self, src: &[u8]) {
1118         self.extend_from_slice(src);
1119     }
1120 
put_bytes(&mut self, val: u8, cnt: usize)1121     fn put_bytes(&mut self, val: u8, cnt: usize) {
1122         self.reserve(cnt);
1123         unsafe {
1124             let dst = self.spare_capacity_mut();
1125             // Reserved above
1126             debug_assert!(dst.len() >= cnt);
1127 
1128             ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
1129 
1130             self.advance_mut(cnt);
1131         }
1132     }
1133 }
1134 
1135 impl AsRef<[u8]> for BytesMut {
1136     #[inline]
as_ref(&self) -> &[u8]1137     fn as_ref(&self) -> &[u8] {
1138         self.as_slice()
1139     }
1140 }
1141 
1142 impl Deref for BytesMut {
1143     type Target = [u8];
1144 
1145     #[inline]
deref(&self) -> &[u8]1146     fn deref(&self) -> &[u8] {
1147         self.as_ref()
1148     }
1149 }
1150 
1151 impl AsMut<[u8]> for BytesMut {
1152     #[inline]
as_mut(&mut self) -> &mut [u8]1153     fn as_mut(&mut self) -> &mut [u8] {
1154         self.as_slice_mut()
1155     }
1156 }
1157 
1158 impl DerefMut for BytesMut {
1159     #[inline]
deref_mut(&mut self) -> &mut [u8]1160     fn deref_mut(&mut self) -> &mut [u8] {
1161         self.as_mut()
1162     }
1163 }
1164 
1165 impl<'a> From<&'a [u8]> for BytesMut {
from(src: &'a [u8]) -> BytesMut1166     fn from(src: &'a [u8]) -> BytesMut {
1167         BytesMut::from_vec(src.to_vec())
1168     }
1169 }
1170 
1171 impl<'a> From<&'a str> for BytesMut {
from(src: &'a str) -> BytesMut1172     fn from(src: &'a str) -> BytesMut {
1173         BytesMut::from(src.as_bytes())
1174     }
1175 }
1176 
1177 impl From<BytesMut> for Bytes {
from(src: BytesMut) -> Bytes1178     fn from(src: BytesMut) -> Bytes {
1179         src.freeze()
1180     }
1181 }
1182 
1183 impl PartialEq for BytesMut {
eq(&self, other: &BytesMut) -> bool1184     fn eq(&self, other: &BytesMut) -> bool {
1185         self.as_slice() == other.as_slice()
1186     }
1187 }
1188 
1189 impl PartialOrd for BytesMut {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1190     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1191         self.as_slice().partial_cmp(other.as_slice())
1192     }
1193 }
1194 
1195 impl Ord for BytesMut {
cmp(&self, other: &BytesMut) -> cmp::Ordering1196     fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
1197         self.as_slice().cmp(other.as_slice())
1198     }
1199 }
1200 
1201 impl Eq for BytesMut {}
1202 
1203 impl Default for BytesMut {
1204     #[inline]
default() -> BytesMut1205     fn default() -> BytesMut {
1206         BytesMut::new()
1207     }
1208 }
1209 
1210 impl hash::Hash for BytesMut {
hash<H>(&self, state: &mut H) where H: hash::Hasher,1211     fn hash<H>(&self, state: &mut H)
1212     where
1213         H: hash::Hasher,
1214     {
1215         let s: &[u8] = self.as_ref();
1216         s.hash(state);
1217     }
1218 }
1219 
1220 impl Borrow<[u8]> for BytesMut {
borrow(&self) -> &[u8]1221     fn borrow(&self) -> &[u8] {
1222         self.as_ref()
1223     }
1224 }
1225 
1226 impl BorrowMut<[u8]> for BytesMut {
borrow_mut(&mut self) -> &mut [u8]1227     fn borrow_mut(&mut self) -> &mut [u8] {
1228         self.as_mut()
1229     }
1230 }
1231 
1232 impl fmt::Write for BytesMut {
1233     #[inline]
write_str(&mut self, s: &str) -> fmt::Result1234     fn write_str(&mut self, s: &str) -> fmt::Result {
1235         if self.remaining_mut() >= s.len() {
1236             self.put_slice(s.as_bytes());
1237             Ok(())
1238         } else {
1239             Err(fmt::Error)
1240         }
1241     }
1242 
1243     #[inline]
write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result1244     fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1245         fmt::write(self, args)
1246     }
1247 }
1248 
1249 impl Clone for BytesMut {
clone(&self) -> BytesMut1250     fn clone(&self) -> BytesMut {
1251         BytesMut::from(&self[..])
1252     }
1253 }
1254 
1255 impl IntoIterator for BytesMut {
1256     type Item = u8;
1257     type IntoIter = IntoIter<BytesMut>;
1258 
into_iter(self) -> Self::IntoIter1259     fn into_iter(self) -> Self::IntoIter {
1260         IntoIter::new(self)
1261     }
1262 }
1263 
1264 impl<'a> IntoIterator for &'a BytesMut {
1265     type Item = &'a u8;
1266     type IntoIter = core::slice::Iter<'a, u8>;
1267 
into_iter(self) -> Self::IntoIter1268     fn into_iter(self) -> Self::IntoIter {
1269         self.as_ref().iter()
1270     }
1271 }
1272 
1273 impl Extend<u8> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8>,1274     fn extend<T>(&mut self, iter: T)
1275     where
1276         T: IntoIterator<Item = u8>,
1277     {
1278         let iter = iter.into_iter();
1279 
1280         let (lower, _) = iter.size_hint();
1281         self.reserve(lower);
1282 
1283         // TODO: optimize
1284         // 1. If self.kind() == KIND_VEC, use Vec::extend
1285         for b in iter {
1286             self.put_u8(b);
1287         }
1288     }
1289 }
1290 
1291 impl<'a> Extend<&'a u8> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8>,1292     fn extend<T>(&mut self, iter: T)
1293     where
1294         T: IntoIterator<Item = &'a u8>,
1295     {
1296         self.extend(iter.into_iter().copied())
1297     }
1298 }
1299 
1300 impl Extend<Bytes> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = Bytes>,1301     fn extend<T>(&mut self, iter: T)
1302     where
1303         T: IntoIterator<Item = Bytes>,
1304     {
1305         for bytes in iter {
1306             self.extend_from_slice(&bytes)
1307         }
1308     }
1309 }
1310 
1311 impl FromIterator<u8> for BytesMut {
from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self1312     fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1313         BytesMut::from_vec(Vec::from_iter(into_iter))
1314     }
1315 }
1316 
1317 impl<'a> FromIterator<&'a u8> for BytesMut {
from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self1318     fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1319         BytesMut::from_iter(into_iter.into_iter().copied())
1320     }
1321 }
1322 
1323 /*
1324  *
1325  * ===== Inner =====
1326  *
1327  */
1328 
increment_shared(ptr: *mut Shared)1329 unsafe fn increment_shared(ptr: *mut Shared) {
1330     let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
1331 
1332     if old_size > isize::MAX as usize {
1333         crate::abort();
1334     }
1335 }
1336 
release_shared(ptr: *mut Shared)1337 unsafe fn release_shared(ptr: *mut Shared) {
1338     // `Shared` storage... follow the drop steps from Arc.
1339     if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
1340         return;
1341     }
1342 
1343     // This fence is needed to prevent reordering of use of the data and
1344     // deletion of the data.  Because it is marked `Release`, the decreasing
1345     // of the reference count synchronizes with this `Acquire` fence. This
1346     // means that use of the data happens before decreasing the reference
1347     // count, which happens before this fence, which happens before the
1348     // deletion of the data.
1349     //
1350     // As explained in the [Boost documentation][1],
1351     //
1352     // > It is important to enforce any possible access to the object in one
1353     // > thread (through an existing reference) to *happen before* deleting
1354     // > the object in a different thread. This is achieved by a "release"
1355     // > operation after dropping a reference (any access to the object
1356     // > through this reference must obviously happened before), and an
1357     // > "acquire" operation before deleting the object.
1358     //
1359     // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1360     //
1361     // Thread sanitizer does not support atomic fences. Use an atomic load
1362     // instead.
1363     (*ptr).ref_count.load(Ordering::Acquire);
1364 
1365     // Drop the data
1366     drop(Box::from_raw(ptr));
1367 }
1368 
1369 impl Shared {
is_unique(&self) -> bool1370     fn is_unique(&self) -> bool {
1371         // The goal is to check if the current handle is the only handle
1372         // that currently has access to the buffer. This is done by
1373         // checking if the `ref_count` is currently 1.
1374         //
1375         // The `Acquire` ordering synchronizes with the `Release` as
1376         // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1377         // operation guarantees that any mutations done in other threads
1378         // are ordered before the `ref_count` is decremented. As such,
1379         // this `Acquire` will guarantee that those mutations are
1380         // visible to the current thread.
1381         self.ref_count.load(Ordering::Acquire) == 1
1382     }
1383 }
1384 
1385 #[inline]
original_capacity_to_repr(cap: usize) -> usize1386 fn original_capacity_to_repr(cap: usize) -> usize {
1387     let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
1388     cmp::min(
1389         width,
1390         MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
1391     )
1392 }
1393 
original_capacity_from_repr(repr: usize) -> usize1394 fn original_capacity_from_repr(repr: usize) -> usize {
1395     if repr == 0 {
1396         return 0;
1397     }
1398 
1399     1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
1400 }
1401 
1402 #[cfg(test)]
1403 mod tests {
1404     use super::*;
1405 
1406     #[test]
test_original_capacity_to_repr()1407     fn test_original_capacity_to_repr() {
1408         assert_eq!(original_capacity_to_repr(0), 0);
1409 
1410         let max_width = 32;
1411 
1412         for width in 1..(max_width + 1) {
1413             let cap = 1 << width - 1;
1414 
1415             let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
1416                 0
1417             } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
1418                 width - MIN_ORIGINAL_CAPACITY_WIDTH
1419             } else {
1420                 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
1421             };
1422 
1423             assert_eq!(original_capacity_to_repr(cap), expected);
1424 
1425             if width > 1 {
1426                 assert_eq!(original_capacity_to_repr(cap + 1), expected);
1427             }
1428 
1429             //  MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
1430             if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
1431                 assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
1432                 assert_eq!(original_capacity_to_repr(cap + 76), expected);
1433             } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
1434                 assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
1435                 assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
1436             }
1437         }
1438     }
1439 
1440     #[test]
test_original_capacity_from_repr()1441     fn test_original_capacity_from_repr() {
1442         assert_eq!(0, original_capacity_from_repr(0));
1443 
1444         let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
1445 
1446         assert_eq!(min_cap, original_capacity_from_repr(1));
1447         assert_eq!(min_cap * 2, original_capacity_from_repr(2));
1448         assert_eq!(min_cap * 4, original_capacity_from_repr(3));
1449         assert_eq!(min_cap * 8, original_capacity_from_repr(4));
1450         assert_eq!(min_cap * 16, original_capacity_from_repr(5));
1451         assert_eq!(min_cap * 32, original_capacity_from_repr(6));
1452         assert_eq!(min_cap * 64, original_capacity_from_repr(7));
1453     }
1454 }
1455 
1456 unsafe impl Send for BytesMut {}
1457 unsafe impl Sync for BytesMut {}
1458 
1459 /*
1460  *
1461  * ===== PartialEq / PartialOrd =====
1462  *
1463  */
1464 
1465 impl PartialEq<[u8]> for BytesMut {
eq(&self, other: &[u8]) -> bool1466     fn eq(&self, other: &[u8]) -> bool {
1467         &**self == other
1468     }
1469 }
1470 
1471 impl PartialOrd<[u8]> for BytesMut {
partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering>1472     fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
1473         (**self).partial_cmp(other)
1474     }
1475 }
1476 
1477 impl PartialEq<BytesMut> for [u8] {
eq(&self, other: &BytesMut) -> bool1478     fn eq(&self, other: &BytesMut) -> bool {
1479         *other == *self
1480     }
1481 }
1482 
1483 impl PartialOrd<BytesMut> for [u8] {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1484     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1485         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1486     }
1487 }
1488 
1489 impl PartialEq<str> for BytesMut {
eq(&self, other: &str) -> bool1490     fn eq(&self, other: &str) -> bool {
1491         &**self == other.as_bytes()
1492     }
1493 }
1494 
1495 impl PartialOrd<str> for BytesMut {
partial_cmp(&self, other: &str) -> Option<cmp::Ordering>1496     fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
1497         (**self).partial_cmp(other.as_bytes())
1498     }
1499 }
1500 
1501 impl PartialEq<BytesMut> for str {
eq(&self, other: &BytesMut) -> bool1502     fn eq(&self, other: &BytesMut) -> bool {
1503         *other == *self
1504     }
1505 }
1506 
1507 impl PartialOrd<BytesMut> for str {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1508     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1509         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1510     }
1511 }
1512 
1513 impl PartialEq<Vec<u8>> for BytesMut {
eq(&self, other: &Vec<u8>) -> bool1514     fn eq(&self, other: &Vec<u8>) -> bool {
1515         *self == other[..]
1516     }
1517 }
1518 
1519 impl PartialOrd<Vec<u8>> for BytesMut {
partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering>1520     fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
1521         (**self).partial_cmp(&other[..])
1522     }
1523 }
1524 
1525 impl PartialEq<BytesMut> for Vec<u8> {
eq(&self, other: &BytesMut) -> bool1526     fn eq(&self, other: &BytesMut) -> bool {
1527         *other == *self
1528     }
1529 }
1530 
1531 impl PartialOrd<BytesMut> for Vec<u8> {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1532     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1533         other.partial_cmp(self)
1534     }
1535 }
1536 
1537 impl PartialEq<String> for BytesMut {
eq(&self, other: &String) -> bool1538     fn eq(&self, other: &String) -> bool {
1539         *self == other[..]
1540     }
1541 }
1542 
1543 impl PartialOrd<String> for BytesMut {
partial_cmp(&self, other: &String) -> Option<cmp::Ordering>1544     fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
1545         (**self).partial_cmp(other.as_bytes())
1546     }
1547 }
1548 
1549 impl PartialEq<BytesMut> for String {
eq(&self, other: &BytesMut) -> bool1550     fn eq(&self, other: &BytesMut) -> bool {
1551         *other == *self
1552     }
1553 }
1554 
1555 impl PartialOrd<BytesMut> for String {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1556     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1557         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1558     }
1559 }
1560 
1561 impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
1562 where
1563     BytesMut: PartialEq<T>,
1564 {
eq(&self, other: &&'a T) -> bool1565     fn eq(&self, other: &&'a T) -> bool {
1566         *self == **other
1567     }
1568 }
1569 
1570 impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
1571 where
1572     BytesMut: PartialOrd<T>,
1573 {
partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering>1574     fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
1575         self.partial_cmp(*other)
1576     }
1577 }
1578 
1579 impl PartialEq<BytesMut> for &[u8] {
eq(&self, other: &BytesMut) -> bool1580     fn eq(&self, other: &BytesMut) -> bool {
1581         *other == *self
1582     }
1583 }
1584 
1585 impl PartialOrd<BytesMut> for &[u8] {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1586     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1587         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1588     }
1589 }
1590 
1591 impl PartialEq<BytesMut> for &str {
eq(&self, other: &BytesMut) -> bool1592     fn eq(&self, other: &BytesMut) -> bool {
1593         *other == *self
1594     }
1595 }
1596 
1597 impl PartialOrd<BytesMut> for &str {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1598     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1599         other.partial_cmp(self)
1600     }
1601 }
1602 
1603 impl PartialEq<BytesMut> for Bytes {
eq(&self, other: &BytesMut) -> bool1604     fn eq(&self, other: &BytesMut) -> bool {
1605         other[..] == self[..]
1606     }
1607 }
1608 
1609 impl PartialEq<Bytes> for BytesMut {
eq(&self, other: &Bytes) -> bool1610     fn eq(&self, other: &Bytes) -> bool {
1611         other[..] == self[..]
1612     }
1613 }
1614 
1615 impl From<BytesMut> for Vec<u8> {
from(bytes: BytesMut) -> Self1616     fn from(bytes: BytesMut) -> Self {
1617         let kind = bytes.kind();
1618         let bytes = ManuallyDrop::new(bytes);
1619 
1620         let mut vec = if kind == KIND_VEC {
1621             unsafe {
1622                 let off = bytes.get_vec_pos();
1623                 rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
1624             }
1625         } else {
1626             let shared = bytes.data as *mut Shared;
1627 
1628             if unsafe { (*shared).is_unique() } {
1629                 let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
1630 
1631                 unsafe { release_shared(shared) };
1632 
1633                 vec
1634             } else {
1635                 return ManuallyDrop::into_inner(bytes).deref().to_vec();
1636             }
1637         };
1638 
1639         let len = bytes.len;
1640 
1641         unsafe {
1642             ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
1643             vec.set_len(len);
1644         }
1645 
1646         vec
1647     }
1648 }
1649 
1650 #[inline]
vptr(ptr: *mut u8) -> NonNull<u8>1651 fn vptr(ptr: *mut u8) -> NonNull<u8> {
1652     if cfg!(debug_assertions) {
1653         NonNull::new(ptr).expect("Vec pointer should be non-null")
1654     } else {
1655         unsafe { NonNull::new_unchecked(ptr) }
1656     }
1657 }
1658 
1659 /// Returns a dangling pointer with the given address. This is used to store
1660 /// integer data in pointer fields.
1661 ///
1662 /// It is equivalent to `addr as *mut T`, but this fails on miri when strict
1663 /// provenance checking is enabled.
1664 #[inline]
invalid_ptr<T>(addr: usize) -> *mut T1665 fn invalid_ptr<T>(addr: usize) -> *mut T {
1666     let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
1667     debug_assert_eq!(ptr as usize, addr);
1668     ptr.cast::<T>()
1669 }
1670 
1671 /// Precondition: dst >= original
1672 ///
1673 /// The following line is equivalent to:
1674 ///
1675 /// ```rust,ignore
1676 /// self.ptr.as_ptr().offset_from(ptr) as usize;
1677 /// ```
1678 ///
1679 /// But due to min rust is 1.39 and it is only stabilized
1680 /// in 1.47, we cannot use it.
1681 #[inline]
offset_from(dst: *mut u8, original: *mut u8) -> usize1682 fn offset_from(dst: *mut u8, original: *mut u8) -> usize {
1683     debug_assert!(dst >= original);
1684 
1685     dst as usize - original as usize
1686 }
1687 
rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8>1688 unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
1689     let ptr = ptr.sub(off);
1690     len += off;
1691     cap += off;
1692 
1693     Vec::from_raw_parts(ptr, len, cap)
1694 }
1695 
1696 // ===== impl SharedVtable =====
1697 
1698 static SHARED_VTABLE: Vtable = Vtable {
1699     clone: shared_v_clone,
1700     to_vec: shared_v_to_vec,
1701     is_unique: crate::bytes::shared_is_unique,
1702     drop: shared_v_drop,
1703 };
1704 
shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes1705 unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1706     let shared = data.load(Ordering::Relaxed) as *mut Shared;
1707     increment_shared(shared);
1708 
1709     let data = AtomicPtr::new(shared as *mut ());
1710     Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
1711 }
1712 
shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8>1713 unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1714     let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
1715 
1716     if (*shared).is_unique() {
1717         let shared = &mut *shared;
1718 
1719         // Drop shared
1720         let mut vec = mem::replace(&mut shared.vec, Vec::new());
1721         release_shared(shared);
1722 
1723         // Copy back buffer
1724         ptr::copy(ptr, vec.as_mut_ptr(), len);
1725         vec.set_len(len);
1726 
1727         vec
1728     } else {
1729         let v = slice::from_raw_parts(ptr, len).to_vec();
1730         release_shared(shared);
1731         v
1732     }
1733 }
1734 
shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize)1735 unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1736     data.with_mut(|shared| {
1737         release_shared(*shared as *mut Shared);
1738     });
1739 }
1740 
1741 // compile-fails
1742 
1743 /// ```compile_fail
1744 /// use bytes::BytesMut;
1745 /// #[deny(unused_must_use)]
1746 /// {
1747 ///     let mut b1 = BytesMut::from("hello world");
1748 ///     b1.split_to(6);
1749 /// }
1750 /// ```
_split_to_must_use()1751 fn _split_to_must_use() {}
1752 
1753 /// ```compile_fail
1754 /// use bytes::BytesMut;
1755 /// #[deny(unused_must_use)]
1756 /// {
1757 ///     let mut b1 = BytesMut::from("hello world");
1758 ///     b1.split_off(6);
1759 /// }
1760 /// ```
_split_off_must_use()1761 fn _split_off_must_use() {}
1762 
1763 /// ```compile_fail
1764 /// use bytes::BytesMut;
1765 /// #[deny(unused_must_use)]
1766 /// {
1767 ///     let mut b1 = BytesMut::from("hello world");
1768 ///     b1.split();
1769 /// }
1770 /// ```
_split_must_use()1771 fn _split_must_use() {}
1772 
1773 // fuzz tests
1774 #[cfg(all(test, loom))]
1775 mod fuzz {
1776     use loom::sync::Arc;
1777     use loom::thread;
1778 
1779     use super::BytesMut;
1780     use crate::Bytes;
1781 
1782     #[test]
bytes_mut_cloning_frozen()1783     fn bytes_mut_cloning_frozen() {
1784         loom::model(|| {
1785             let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
1786             let addr = a.as_ptr() as usize;
1787 
1788             // test the Bytes::clone is Sync by putting it in an Arc
1789             let a1 = Arc::new(a);
1790             let a2 = a1.clone();
1791 
1792             let t1 = thread::spawn(move || {
1793                 let b: Bytes = (*a1).clone();
1794                 assert_eq!(b.as_ptr() as usize, addr);
1795             });
1796 
1797             let t2 = thread::spawn(move || {
1798                 let b: Bytes = (*a2).clone();
1799                 assert_eq!(b.as_ptr() as usize, addr);
1800             });
1801 
1802             t1.join().unwrap();
1803             t2.join().unwrap();
1804         });
1805     }
1806 }
1807