portable_atomic_util/
arc.rs

1// SPDX-License-Identifier: Apache-2.0 OR MIT
2
3// This module is based on alloc::sync::Arc.
4//
5// The code has been adjusted to work with stable Rust (and optionally support some unstable features).
6//
7// Source: https://github.com/rust-lang/rust/blob/a0c2aba29aa9ea50a7c45c3391dd446f856bef7b/library/alloc/src/sync.rs.
8//
9// Copyright & License of the original code:
10// - https://github.com/rust-lang/rust/blob/a0c2aba29aa9ea50a7c45c3391dd446f856bef7b/COPYRIGHT
11// - https://github.com/rust-lang/rust/blob/a0c2aba29aa9ea50a7c45c3391dd446f856bef7b/LICENSE-APACHE
12// - https://github.com/rust-lang/rust/blob/a0c2aba29aa9ea50a7c45c3391dd446f856bef7b/LICENSE-MIT
13
14#![allow(clippy::must_use_candidate)] // align to alloc::sync::Arc
15#![allow(clippy::undocumented_unsafe_blocks)] // TODO: most of the unsafe codes were inherited from alloc::sync::Arc
16
17use portable_atomic::{
18    self as atomic, hint,
19    Ordering::{Acquire, Relaxed, Release},
20};
21
22use alloc::{alloc::handle_alloc_error, boxed::Box};
23#[cfg(not(portable_atomic_no_alloc_layout_extras))]
24use alloc::{
25    borrow::{Cow, ToOwned},
26    string::String,
27    vec::Vec,
28};
29use core::{
30    alloc::Layout,
31    any::Any,
32    borrow, cmp, fmt,
33    hash::{Hash, Hasher},
34    isize,
35    marker::PhantomData,
36    mem::{self, align_of_val, size_of_val, ManuallyDrop},
37    ops::Deref,
38    pin::Pin,
39    ptr::{self, NonNull},
40    usize,
41};
42#[cfg(portable_atomic_unstable_coerce_unsized)]
43use core::{marker::Unsize, ops::CoerceUnsized};
44
45/// A soft limit on the amount of references that may be made to an `Arc`.
46///
47/// Going above this limit will abort your program (although not
48/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
49/// Trying to go above it might call a `panic` (if not actually going above it).
50///
51/// This is a global invariant, and also applies when using a compare-exchange loop.
52///
53/// See comment in `Arc::clone`.
54const MAX_REFCOUNT: usize = isize::MAX as usize;
55
56/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
57const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
58
59#[cfg(not(portable_atomic_sanitize_thread))]
60macro_rules! acquire {
61    ($x:expr) => {
62        atomic::fence(Acquire)
63    };
64}
65
66// ThreadSanitizer does not support memory fences. To avoid false positive
67// reports in Arc / Weak implementation use atomic loads for synchronization
68// instead.
69#[cfg(portable_atomic_sanitize_thread)]
70macro_rules! acquire {
71    ($x:expr) => {
72        $x.load(Acquire)
73    };
74}
75
76/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
77/// Reference Counted'.
78///
79/// This is an equivalent to [`std::sync::Arc`], but using [portable-atomic] for synchronization.
80/// See the documentation for [`std::sync::Arc`] for more details.
81///
82/// **Note:** Unlike `std::sync::Arc`, coercing `Arc<T>` to `Arc<U>` is only possible if
83/// the optional cfg `portable_atomic_unstable_coerce_unsized` is enabled, as documented at the crate-level documentation,
84/// and this optional cfg item is only supported with Rust nightly version.
85/// This is because coercing the pointee requires the
86/// [unstable `CoerceUnsized` trait](https://doc.rust-lang.org/nightly/core/ops/trait.CoerceUnsized.html).
87/// See [this issue comment](https://github.com/taiki-e/portable-atomic/issues/143#issuecomment-1866488569)
88/// for a workaround that works without depending on unstable features.
89///
90/// [portable-atomic]: https://crates.io/crates/portable-atomic
91///
92/// # Examples
93///
94/// ```
95/// use portable_atomic_util::Arc;
96/// use std::thread;
97///
98/// let five = Arc::new(5);
99///
100/// for _ in 0..10 {
101///     let five = Arc::clone(&five);
102///
103///     thread::spawn(move || {
104///         assert_eq!(*five, 5);
105///     });
106/// }
107/// # if cfg!(miri) { std::thread::sleep(std::time::Duration::from_millis(500)); } // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371
108/// ```
109pub struct Arc<T: ?Sized> {
110    ptr: NonNull<ArcInner<T>>,
111    phantom: PhantomData<ArcInner<T>>,
112}
113
114unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
115unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
116
117#[cfg(not(portable_atomic_no_core_unwind_safe))]
118impl<T: ?Sized + core::panic::RefUnwindSafe> core::panic::UnwindSafe for Arc<T> {}
119#[cfg(all(portable_atomic_no_core_unwind_safe, feature = "std"))]
120impl<T: ?Sized + std::panic::RefUnwindSafe> std::panic::UnwindSafe for Arc<T> {}
121
122#[cfg(portable_atomic_unstable_coerce_unsized)]
123impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
124
125impl<T: ?Sized> Arc<T> {
126    #[inline]
127    fn into_inner_non_null(this: Self) -> NonNull<ArcInner<T>> {
128        let this = mem::ManuallyDrop::new(this);
129        this.ptr
130    }
131
132    #[inline]
133    unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
134        Self { ptr, phantom: PhantomData }
135    }
136
137    #[inline]
138    unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
139        // SAFETY: the caller must uphold the safety contract.
140        unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
141    }
142}
143
144#[allow(clippy::too_long_first_doc_paragraph)]
145/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
146/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
147/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
148///
149/// This is an equivalent to [`std::sync::Weak`], but using [portable-atomic] for synchronization.
150/// See the documentation for [`std::sync::Weak`] for more details.
151///
152/// <!-- TODO: support coercing `Weak<T>` to `Weak<U>` with testing, if optional cfg `portable_atomic_unstable_coerce_unsized` is enabled -->
153/// **Note:** Unlike `std::sync::Weak`, coercing `Weak<T>` to `Weak<U>` is not possible, not even if
154/// the optional cfg `portable_atomic_unstable_coerce_unsized` is enabled.
155///
156/// [`upgrade`]: Weak::upgrade
157/// [portable-atomic]: https://crates.io/crates/portable-atomic
158///
159/// # Examples
160///
161/// ```
162/// use portable_atomic_util::Arc;
163/// use std::thread;
164///
165/// let five = Arc::new(5);
166/// let weak_five = Arc::downgrade(&five);
167///
168/// # let t =
169/// thread::spawn(move || {
170///     let five = weak_five.upgrade().unwrap();
171///     assert_eq!(*five, 5);
172/// });
173/// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371
174/// ```
175pub struct Weak<T: ?Sized> {
176    // This is a `NonNull` to allow optimizing the size of this type in enums,
177    // but it is not necessarily a valid pointer.
178    // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
179    // to allocate space on the heap. That's not a value a real pointer
180    // will ever have because RcBox has alignment at least 2.
181    // This is only possible when `T: Sized`; unsized `T` never dangle.
182    ptr: NonNull<ArcInner<T>>,
183}
184
185unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
186unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
187
188impl<T: ?Sized> fmt::Debug for Weak<T> {
189    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
190        f.write_str("(Weak)")
191    }
192}
193
194// This is repr(C) to future-proof against possible field-reordering, which
195// would interfere with otherwise safe [into|from]_raw() of transmutable
196// inner types.
197#[repr(C)]
198struct ArcInner<T: ?Sized> {
199    strong: atomic::AtomicUsize,
200
201    // the value usize::MAX acts as a sentinel for temporarily "locking" the
202    // ability to upgrade weak pointers or downgrade strong ones; this is used
203    // to avoid races in `make_mut` and `get_mut`.
204    weak: atomic::AtomicUsize,
205
206    data: T,
207}
208
209/// Calculate layout for `ArcInner<T>` using the inner value's layout
210fn arc_inner_layout_for_value_layout(layout: Layout) -> Layout {
211    // Calculate layout using the given value layout.
212    // Previously, layout was calculated on the expression
213    // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
214    // reference (see #54908).
215    pad_to_align(extend_layout(Layout::new::<ArcInner<()>>(), layout).unwrap().0)
216}
217
218unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
219unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
220
221impl<T> Arc<T> {
222    /// Constructs a new `Arc<T>`.
223    ///
224    /// # Examples
225    ///
226    /// ```
227    /// use portable_atomic_util::Arc;
228    ///
229    /// let five = Arc::new(5);
230    /// ```
231    #[inline]
232    pub fn new(data: T) -> Self {
233        // Start the weak pointer count as 1 which is the weak pointer that's
234        // held by all the strong pointers (kinda), see std/rc.rs for more info
235        let x: Box<_> = Box::new(ArcInner {
236            strong: atomic::AtomicUsize::new(1),
237            weak: atomic::AtomicUsize::new(1),
238            data,
239        });
240        unsafe { Self::from_inner(Box::leak(x).into()) }
241    }
242
243    /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
244    /// to allow you to construct a `T` which holds a weak pointer to itself.
245    ///
246    /// Generally, a structure circularly referencing itself, either directly or
247    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
248    /// Using this function, you get access to the weak pointer during the
249    /// initialization of `T`, before the `Arc<T>` is created, such that you can
250    /// clone and store it inside the `T`.
251    ///
252    /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
253    /// then calls your closure, giving it a `Weak<T>` to this allocation,
254    /// and only afterwards completes the construction of the `Arc<T>` by placing
255    /// the `T` returned from your closure into the allocation.
256    ///
257    /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
258    /// returns, calling [`upgrade`] on the weak reference inside your closure will
259    /// fail and result in a `None` value.
260    ///
261    /// # Panics
262    ///
263    /// If `data_fn` panics, the panic is propagated to the caller, and the
264    /// temporary [`Weak<T>`] is dropped normally.
265    ///
266    /// # Example
267    ///
268    /// ```
269    /// use portable_atomic_util::{Arc, Weak};
270    ///
271    /// struct Gadget {
272    ///     me: Weak<Gadget>,
273    /// }
274    ///
275    /// impl Gadget {
276    ///     /// Constructs a reference counted Gadget.
277    ///     fn new() -> Arc<Self> {
278    ///         // `me` is a `Weak<Gadget>` pointing at the new allocation of the
279    ///         // `Arc` we're constructing.
280    ///         Arc::new_cyclic(|me| {
281    ///             // Create the actual struct here.
282    ///             Gadget { me: me.clone() }
283    ///         })
284    ///     }
285    ///
286    ///     /// Returns a reference counted pointer to Self.
287    ///     fn me(&self) -> Arc<Self> {
288    ///         self.me.upgrade().unwrap()
289    ///     }
290    /// }
291    /// ```
292    /// [`upgrade`]: Weak::upgrade
293    #[inline]
294    pub fn new_cyclic<F>(data_fn: F) -> Self
295    where
296        F: FnOnce(&Weak<T>) -> T,
297    {
298        // Construct the inner in the "uninitialized" state with a single
299        // weak reference.
300        let init_ptr = Weak::new_uninit_ptr();
301
302        let weak = Weak { ptr: init_ptr };
303
304        // It's important we don't give up ownership of the weak pointer, or
305        // else the memory might be freed by the time `data_fn` returns. If
306        // we really wanted to pass ownership, we could create an additional
307        // weak pointer for ourselves, but this would result in additional
308        // updates to the weak reference count which might not be necessary
309        // otherwise.
310        let data = data_fn(&weak);
311
312        // Now we can properly initialize the inner value and turn our weak
313        // reference into a strong reference.
314        unsafe {
315            let inner = init_ptr.as_ptr();
316            ptr::write(data_ptr::<T>(inner, &data), data);
317
318            // The above write to the data field must be visible to any threads which
319            // observe a non-zero strong count. Therefore we need at least "Release" ordering
320            // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
321            //
322            // "Acquire" ordering is not required. When considering the possible behaviors
323            // of `data_fn` we only need to look at what it could do with a reference to a
324            // non-upgradeable `Weak`:
325            // - It can *clone* the `Weak`, increasing the weak reference count.
326            // - It can drop those clones, decreasing the weak reference count (but never to zero).
327            //
328            // These side effects do not impact us in any way, and no other side effects are
329            // possible with safe code alone.
330            let prev_value = (*inner).strong.fetch_add(1, Release);
331            debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
332
333            // Strong references should collectively own a shared weak reference,
334            // so don't run the destructor for our old weak reference.
335            mem::forget(weak);
336
337            Self::from_inner(init_ptr)
338        }
339    }
340
341    /// Constructs a new `Arc` with uninitialized contents.
342    ///
343    /// # Examples
344    ///
345    /// ```
346    /// use portable_atomic_util::Arc;
347    ///
348    /// let mut five = Arc::<u32>::new_uninit();
349    ///
350    /// // Deferred initialization:
351    /// Arc::get_mut(&mut five).unwrap().write(5);
352    ///
353    /// let five = unsafe { five.assume_init() };
354    ///
355    /// assert_eq!(*five, 5)
356    /// ```
357    #[cfg(not(portable_atomic_no_maybe_uninit))]
358    #[inline]
359    #[must_use]
360    pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
361        unsafe {
362            Arc::from_ptr(Arc::allocate_for_layout(
363                Layout::new::<T>(),
364                |layout| Global.allocate(layout),
365                |ptr| ptr as *mut _,
366            ))
367        }
368    }
369
370    /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
371    /// `data` will be pinned in memory and unable to be moved.
372    #[must_use]
373    pub fn pin(data: T) -> Pin<Self> {
374        unsafe { Pin::new_unchecked(Self::new(data)) }
375    }
376
377    /// Returns the inner value, if the `Arc` has exactly one strong reference.
378    ///
379    /// Otherwise, an [`Err`] is returned with the same `Arc` that was
380    /// passed in.
381    ///
382    /// This will succeed even if there are outstanding weak references.
383    ///
384    /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
385    /// keep the `Arc` in the [`Err`] case.
386    /// Immediately dropping the [`Err`]-value, as the expression
387    /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
388    /// drop to zero and the inner value of the `Arc` to be dropped.
389    /// For instance, if two threads execute such an expression in parallel,
390    /// there is a race condition without the possibility of unsafety:
391    /// The threads could first both check whether they own the last instance
392    /// in `Arc::try_unwrap`, determine that they both do not, and then both
393    /// discard and drop their instance in the call to [`ok`][`Result::ok`].
394    /// In this scenario, the value inside the `Arc` is safely destroyed
395    /// by exactly one of the threads, but neither thread will ever be able
396    /// to use the value.
397    ///
398    /// # Examples
399    ///
400    /// ```
401    /// use portable_atomic_util::Arc;
402    ///
403    /// let x = Arc::new(3);
404    /// assert_eq!(Arc::try_unwrap(x), Ok(3));
405    ///
406    /// let x = Arc::new(4);
407    /// let _y = Arc::clone(&x);
408    /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
409    /// ```
410    #[inline]
411    pub fn try_unwrap(this: Self) -> Result<T, Self> {
412        if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
413            return Err(this);
414        }
415
416        acquire!(this.inner().strong);
417
418        let this = ManuallyDrop::new(this);
419        let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
420
421        // Make a weak pointer to clean up the implicit strong-weak reference
422        let _weak = Weak { ptr: this.ptr };
423
424        Ok(elem)
425    }
426
427    /// Returns the inner value, if the `Arc` has exactly one strong reference.
428    ///
429    /// Otherwise, [`None`] is returned and the `Arc` is dropped.
430    ///
431    /// This will succeed even if there are outstanding weak references.
432    ///
433    /// If `Arc::into_inner` is called on every clone of this `Arc`,
434    /// it is guaranteed that exactly one of the calls returns the inner value.
435    /// This means in particular that the inner value is not dropped.
436    ///
437    /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
438    /// is meant for different use-cases. If used as a direct replacement
439    /// for `Arc::into_inner` anyway, such as with the expression
440    /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
441    /// **not** give the same guarantee as described in the previous paragraph.
442    /// For more information, see the examples below and read the documentation
443    /// of [`Arc::try_unwrap`].
444    ///
445    /// # Examples
446    ///
447    /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
448    ///
449    /// ```
450    /// use portable_atomic_util::Arc;
451    ///
452    /// let x = Arc::new(3);
453    /// let y = Arc::clone(&x);
454    ///
455    /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
456    /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
457    /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
458    ///
459    /// let x_inner_value = x_thread.join().unwrap();
460    /// let y_inner_value = y_thread.join().unwrap();
461    ///
462    /// // One of the threads is guaranteed to receive the inner value:
463    /// assert!(matches!((x_inner_value, y_inner_value), (None, Some(3)) | (Some(3), None)));
464    /// // The result could also be `(None, None)` if the threads called
465    /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
466    /// ```
467    ///
468    /// A more practical example demonstrating the need for `Arc::into_inner`:
469    /// ```
470    /// use portable_atomic_util::Arc;
471    ///
472    /// // Definition of a simple singly linked list using `Arc`:
473    /// #[derive(Clone)]
474    /// struct LinkedList<T>(Option<Arc<Node<T>>>);
475    /// struct Node<T>(T, Option<Arc<Node<T>>>);
476    ///
477    /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
478    /// // can cause a stack overflow. To prevent this, we can provide a
479    /// // manual `Drop` implementation that does the destruction in a loop:
480    /// impl<T> Drop for LinkedList<T> {
481    ///     fn drop(&mut self) {
482    ///         let mut link = self.0.take();
483    ///         while let Some(arc_node) = link.take() {
484    ///             if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
485    ///                 link = next;
486    ///             }
487    ///         }
488    ///     }
489    /// }
490    ///
491    /// // Implementation of `new` and `push` omitted
492    /// impl<T> LinkedList<T> {
493    ///     /* ... */
494    /// #   fn new() -> Self {
495    /// #       LinkedList(None)
496    /// #   }
497    /// #   fn push(&mut self, x: T) {
498    /// #       self.0 = Some(Arc::new(Node(x, self.0.take())));
499    /// #   }
500    /// }
501    ///
502    /// // The following code could have still caused a stack overflow
503    /// // despite the manual `Drop` impl if that `Drop` impl had used
504    /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
505    ///
506    /// // Create a long list and clone it
507    /// let mut x = LinkedList::new();
508    /// let size = 100000;
509    /// # let size = if cfg!(miri) { 100 } else { size };
510    /// for i in 0..size {
511    ///     x.push(i); // Adds i to the front of x
512    /// }
513    /// let y = x.clone();
514    ///
515    /// // Drop the clones in parallel
516    /// let x_thread = std::thread::spawn(|| drop(x));
517    /// let y_thread = std::thread::spawn(|| drop(y));
518    /// x_thread.join().unwrap();
519    /// y_thread.join().unwrap();
520    /// ```
521    #[inline]
522    pub fn into_inner(this: Self) -> Option<T> {
523        // Make sure that the ordinary `Drop` implementation isn’t called as well
524        let mut this = mem::ManuallyDrop::new(this);
525
526        // Following the implementation of `drop` and `drop_slow`
527        if this.inner().strong.fetch_sub(1, Release) != 1 {
528            return None;
529        }
530
531        acquire!(this.inner().strong);
532
533        // SAFETY: This mirrors the line
534        //
535        //     unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
536        //
537        // in `drop_slow`. Instead of dropping the value behind the pointer,
538        // it is read and eventually returned; `ptr::read` has the same
539        // safety conditions as `ptr::drop_in_place`.
540        let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
541
542        drop(Weak { ptr: this.ptr });
543
544        Some(inner)
545    }
546}
547
548#[cfg(not(portable_atomic_no_alloc_layout_extras))]
549impl<T> Arc<[T]> {
550    /// Constructs a new atomically reference-counted slice with uninitialized contents.
551    ///
552    /// # Examples
553    ///
554    /// ```
555    /// use portable_atomic_util::Arc;
556    ///
557    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
558    ///
559    /// // Deferred initialization:
560    /// let data = Arc::get_mut(&mut values).unwrap();
561    /// data[0].write(1);
562    /// data[1].write(2);
563    /// data[2].write(3);
564    ///
565    /// let values = unsafe { values.assume_init() };
566    ///
567    /// assert_eq!(*values, [1, 2, 3])
568    /// ```
569    #[inline]
570    #[must_use]
571    pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
572        unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
573    }
574}
575
576#[cfg(not(portable_atomic_no_maybe_uninit))]
577impl<T> Arc<mem::MaybeUninit<T>> {
578    /// Converts to `Arc<T>`.
579    ///
580    /// # Safety
581    ///
582    /// As with [`MaybeUninit::assume_init`],
583    /// it is up to the caller to guarantee that the inner value
584    /// really is in an initialized state.
585    /// Calling this when the content is not yet fully initialized
586    /// causes immediate undefined behavior.
587    ///
588    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
589    ///
590    /// # Examples
591    ///
592    /// ```
593    /// use portable_atomic_util::Arc;
594    ///
595    /// let mut five = Arc::<u32>::new_uninit();
596    ///
597    /// // Deferred initialization:
598    /// Arc::get_mut(&mut five).unwrap().write(5);
599    ///
600    /// let five = unsafe { five.assume_init() };
601    ///
602    /// assert_eq!(*five, 5)
603    /// ```
604    #[must_use = "`self` will be dropped if the result is not used"]
605    #[inline]
606    pub unsafe fn assume_init(self) -> Arc<T> {
607        let ptr = Arc::into_inner_non_null(self);
608        // SAFETY: MaybeUninit<T> has the same layout as T, and
609        // the caller must ensure data is initialized.
610        unsafe { Arc::from_inner(ptr.cast::<ArcInner<T>>()) }
611    }
612}
613
614#[cfg(not(portable_atomic_no_alloc_layout_extras))]
615impl<T> Arc<[mem::MaybeUninit<T>]> {
616    /// Converts to `Arc<[T]>`.
617    ///
618    /// # Safety
619    ///
620    /// As with [`MaybeUninit::assume_init`],
621    /// it is up to the caller to guarantee that the inner value
622    /// really is in an initialized state.
623    /// Calling this when the content is not yet fully initialized
624    /// causes immediate undefined behavior.
625    ///
626    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
627    ///
628    /// # Examples
629    ///
630    /// ```
631    /// use portable_atomic_util::Arc;
632    ///
633    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
634    ///
635    /// // Deferred initialization:
636    /// let data = Arc::get_mut(&mut values).unwrap();
637    /// data[0].write(1);
638    /// data[1].write(2);
639    /// data[2].write(3);
640    ///
641    /// let values = unsafe { values.assume_init() };
642    ///
643    /// assert_eq!(*values, [1, 2, 3])
644    /// ```
645    #[must_use = "`self` will be dropped if the result is not used"]
646    #[inline]
647    pub unsafe fn assume_init(self) -> Arc<[T]> {
648        let ptr = Arc::into_inner_non_null(self);
649        // SAFETY: [MaybeUninit<T>] has the same layout as [T], and
650        // the caller must ensure data is initialized.
651        unsafe { Arc::from_ptr(ptr.as_ptr() as *mut ArcInner<[T]>) }
652    }
653}
654
655impl<T: ?Sized> Arc<T> {
656    /// Constructs an `Arc<T>` from a raw pointer.
657    ///
658    /// # Safety
659    ///
660    /// The raw pointer must have been previously returned by a call to
661    /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
662    ///
663    /// * If `U` is sized, it must have the same size and alignment as `T`. This
664    ///   is trivially true if `U` is `T`.
665    /// * If `U` is unsized, its data pointer must have the same size and
666    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
667    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
668    ///   coercion].
669    ///
670    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
671    /// and alignment, this is basically like transmuting references of
672    /// different types. See [`mem::transmute`] for more information
673    /// on what restrictions apply in this case.
674    ///
675    /// The user of `from_raw` has to make sure a specific value of `T` is only
676    /// dropped once.
677    ///
678    /// This function is unsafe because improper use may lead to memory unsafety,
679    /// even if the returned `Arc<T>` is never accessed.
680    ///
681    /// [into_raw]: Arc::into_raw
682    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
683    ///
684    /// # Examples
685    ///
686    /// ```
687    /// use portable_atomic_util::Arc;
688    ///
689    /// let x = Arc::new("hello".to_owned());
690    /// let x_ptr = Arc::into_raw(x);
691    ///
692    /// unsafe {
693    ///     // Convert back to an `Arc` to prevent leak.
694    ///     let x = Arc::from_raw(x_ptr);
695    ///     assert_eq!(&*x, "hello");
696    ///
697    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
698    /// }
699    ///
700    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
701    /// ```
702    ///
703    /// Convert a slice back into its original array:
704    ///
705    /// ```
706    /// use portable_atomic_util::Arc;
707    ///
708    /// let x: Arc<[u32]> = Arc::from([1, 2, 3]);
709    /// let x_ptr: *const [u32] = Arc::into_raw(x);
710    ///
711    /// unsafe {
712    ///     let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
713    ///     assert_eq!(&*x, &[1, 2, 3]);
714    /// }
715    /// ```
716    #[inline]
717    pub unsafe fn from_raw(ptr: *const T) -> Self {
718        unsafe {
719            let offset = data_offset::<T>(&*ptr);
720
721            // Reverse the offset to find the original ArcInner.
722            let arc_ptr = strict::byte_sub(ptr as *mut T, offset) as *mut ArcInner<T>;
723
724            Self::from_ptr(arc_ptr)
725        }
726    }
727
728    /// Increments the strong reference count on the `Arc<T>` associated with the
729    /// provided pointer by one.
730    ///
731    /// # Safety
732    ///
733    /// The pointer must have been obtained through `Arc::into_raw`, and the
734    /// associated `Arc` instance must be valid (i.e. the strong count must be at
735    /// least 1) for the duration of this method.
736    ///
737    /// # Examples
738    ///
739    /// ```
740    /// use portable_atomic_util::Arc;
741    ///
742    /// let five = Arc::new(5);
743    ///
744    /// unsafe {
745    ///     let ptr = Arc::into_raw(five);
746    ///     Arc::increment_strong_count(ptr);
747    ///
748    ///     // This assertion is deterministic because we haven't shared
749    ///     // the `Arc` between threads.
750    ///     let five = Arc::from_raw(ptr);
751    ///     assert_eq!(2, Arc::strong_count(&five));
752    /// #   // Prevent leaks for Miri.
753    /// #   Arc::decrement_strong_count(ptr);
754    /// }
755    /// ```
756    #[inline]
757    pub unsafe fn increment_strong_count(ptr: *const T) {
758        // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
759        let arc = unsafe { mem::ManuallyDrop::new(Self::from_raw(ptr)) };
760        // Now increase refcount, but don't drop new refcount either
761        let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
762    }
763
764    /// Decrements the strong reference count on the `Arc<T>` associated with the
765    /// provided pointer by one.
766    ///
767    /// # Safety
768    ///
769    /// The pointer must have been obtained through `Arc::into_raw`, and the
770    /// associated `Arc` instance must be valid (i.e. the strong count must be at
771    /// least 1) when invoking this method. This method can be used to release the final
772    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
773    /// released.
774    ///
775    /// # Examples
776    ///
777    /// ```
778    /// use portable_atomic_util::Arc;
779    ///
780    /// let five = Arc::new(5);
781    ///
782    /// unsafe {
783    ///     let ptr = Arc::into_raw(five);
784    ///     Arc::increment_strong_count(ptr);
785    ///
786    ///     // Those assertions are deterministic because we haven't shared
787    ///     // the `Arc` between threads.
788    ///     let five = Arc::from_raw(ptr);
789    ///     assert_eq!(2, Arc::strong_count(&five));
790    ///     Arc::decrement_strong_count(ptr);
791    ///     assert_eq!(1, Arc::strong_count(&five));
792    /// }
793    /// ```
794    #[inline]
795    pub unsafe fn decrement_strong_count(ptr: *const T) {
796        // SAFETY: the caller must uphold the safety contract.
797        unsafe { drop(Self::from_raw(ptr)) }
798    }
799}
800
801impl<T: ?Sized> Arc<T> {
802    /// Consumes the `Arc`, returning the wrapped pointer.
803    ///
804    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
805    /// [`Arc::from_raw`].
806    ///
807    /// # Examples
808    ///
809    /// ```
810    /// use portable_atomic_util::Arc;
811    ///
812    /// let x = Arc::new("hello".to_owned());
813    /// let x_ptr = Arc::into_raw(x);
814    /// assert_eq!(unsafe { &*x_ptr }, "hello");
815    /// # // Prevent leaks for Miri.
816    /// # drop(unsafe { Arc::from_raw(x_ptr) });
817    /// ```
818    #[must_use = "losing the pointer will leak memory"]
819    pub fn into_raw(this: Self) -> *const T {
820        let this = ManuallyDrop::new(this);
821        Self::as_ptr(&*this)
822    }
823
824    /// Provides a raw pointer to the data.
825    ///
826    /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
827    /// as long as there are strong counts in the `Arc`.
828    ///
829    /// # Examples
830    ///
831    /// ```
832    /// use portable_atomic_util::Arc;
833    ///
834    /// let x = Arc::new("hello".to_owned());
835    /// let y = Arc::clone(&x);
836    /// let x_ptr = Arc::as_ptr(&x);
837    /// assert_eq!(x_ptr, Arc::as_ptr(&y));
838    /// assert_eq!(unsafe { &*x_ptr }, "hello");
839    /// ```
840    #[must_use]
841    pub fn as_ptr(this: &Self) -> *const T {
842        let ptr: *mut ArcInner<T> = this.ptr.as_ptr();
843
844        unsafe { data_ptr::<T>(ptr, &**this) }
845    }
846
847    /// Creates a new [`Weak`] pointer to this allocation.
848    ///
849    /// # Examples
850    ///
851    /// ```
852    /// use portable_atomic_util::Arc;
853    ///
854    /// let five = Arc::new(5);
855    ///
856    /// let weak_five = Arc::downgrade(&five);
857    /// ```
858    #[must_use = "this returns a new `Weak` pointer, \
859                  without modifying the original `Arc`"]
860    #[allow(clippy::missing_panics_doc)]
861    pub fn downgrade(this: &Self) -> Weak<T> {
862        // This Relaxed is OK because we're checking the value in the CAS
863        // below.
864        let mut cur = this.inner().weak.load(Relaxed);
865
866        loop {
867            // check if the weak counter is currently "locked"; if so, spin.
868            if cur == usize::MAX {
869                hint::spin_loop();
870                cur = this.inner().weak.load(Relaxed);
871                continue;
872            }
873
874            // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
875            assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
876
877            // NOTE: this code currently ignores the possibility of overflow
878            // into usize::MAX; in general both Rc and Arc need to be adjusted
879            // to deal with overflow.
880
881            // Unlike with Clone(), we need this to be an Acquire read to
882            // synchronize with the write coming from `is_unique`, so that the
883            // events prior to that write happen before this read.
884            match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
885                Ok(_) => {
886                    // Make sure we do not create a dangling Weak
887                    debug_assert!(!is_dangling(this.ptr.as_ptr()));
888                    return Weak { ptr: this.ptr };
889                }
890                Err(old) => cur = old,
891            }
892        }
893    }
894
895    /// Gets the number of [`Weak`] pointers to this allocation.
896    ///
897    /// # Safety
898    ///
899    /// This method by itself is safe, but using it correctly requires extra care.
900    /// Another thread can change the weak count at any time,
901    /// including potentially between calling this method and acting on the result.
902    ///
903    /// # Examples
904    ///
905    /// ```
906    /// use portable_atomic_util::Arc;
907    ///
908    /// let five = Arc::new(5);
909    /// let _weak_five = Arc::downgrade(&five);
910    ///
911    /// // This assertion is deterministic because we haven't shared
912    /// // the `Arc` or `Weak` between threads.
913    /// assert_eq!(1, Arc::weak_count(&five));
914    /// ```
915    #[inline]
916    #[must_use]
917    pub fn weak_count(this: &Self) -> usize {
918        let cnt = this.inner().weak.load(Relaxed);
919        // If the weak count is currently locked, the value of the
920        // count was 0 just before taking the lock.
921        if cnt == usize::MAX {
922            0
923        } else {
924            cnt - 1
925        }
926    }
927
928    /// Gets the number of strong (`Arc`) pointers to this allocation.
929    ///
930    /// # Safety
931    ///
932    /// This method by itself is safe, but using it correctly requires extra care.
933    /// Another thread can change the strong count at any time,
934    /// including potentially between calling this method and acting on the result.
935    ///
936    /// # Examples
937    ///
938    /// ```
939    /// use portable_atomic_util::Arc;
940    ///
941    /// let five = Arc::new(5);
942    /// let _also_five = Arc::clone(&five);
943    ///
944    /// // This assertion is deterministic because we haven't shared
945    /// // the `Arc` between threads.
946    /// assert_eq!(2, Arc::strong_count(&five));
947    /// ```
948    #[inline]
949    #[must_use]
950    pub fn strong_count(this: &Self) -> usize {
951        this.inner().strong.load(Relaxed)
952    }
953
954    #[inline]
955    fn inner(&self) -> &ArcInner<T> {
956        // This unsafety is ok because while this arc is alive we're guaranteed
957        // that the inner pointer is valid. Furthermore, we know that the
958        // `ArcInner` structure itself is `Sync` because the inner data is
959        // `Sync` as well, so we're ok loaning out an immutable pointer to these
960        // contents.
961        unsafe { self.ptr.as_ref() }
962    }
963
964    // Non-inlined part of `drop`.
965    #[inline(never)]
966    unsafe fn drop_slow(&mut self) {
967        // Destroy the data at this time, even though we must not free the box
968        // allocation itself (there might still be weak pointers lying around).
969        unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
970
971        // Drop the weak ref collectively held by all strong references
972        // Take a reference to `self.alloc` instead of cloning because 1. it'll
973        // last long enough, and 2. you should be able to drop `Arc`s with
974        // unclonable allocators
975        drop(Weak { ptr: self.ptr });
976    }
977
978    /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
979    /// [`ptr::eq`]. This function ignores the metadata of  `dyn Trait` pointers.
980    ///
981    /// # Examples
982    ///
983    /// ```
984    /// use portable_atomic_util::Arc;
985    ///
986    /// let five = Arc::new(5);
987    /// let same_five = Arc::clone(&five);
988    /// let other_five = Arc::new(5);
989    ///
990    /// assert!(Arc::ptr_eq(&five, &same_five));
991    /// assert!(!Arc::ptr_eq(&five, &other_five));
992    /// ```
993    ///
994    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
995    #[inline]
996    #[must_use]
997    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
998        ptr::eq(this.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ())
999    }
1000}
1001
1002impl<T: ?Sized> Arc<T> {
1003    /// Allocates an `ArcInner<T>` with sufficient space for
1004    /// a possibly-unsized inner value where the value has the layout provided.
1005    ///
1006    /// The function `mem_to_arc_inner` is called with the data pointer
1007    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1008    unsafe fn allocate_for_layout(
1009        value_layout: Layout,
1010        allocate: impl FnOnce(Layout) -> Option<NonNull<u8>>,
1011        mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1012    ) -> *mut ArcInner<T> {
1013        let layout = arc_inner_layout_for_value_layout(value_layout);
1014
1015        let ptr = allocate(layout).unwrap_or_else(|| handle_alloc_error(layout));
1016
1017        unsafe { Self::initialize_arc_inner(ptr, layout, mem_to_arc_inner) }
1018    }
1019
1020    unsafe fn initialize_arc_inner(
1021        ptr: NonNull<u8>,
1022        _layout: Layout,
1023        mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1024    ) -> *mut ArcInner<T> {
1025        let inner: *mut ArcInner<T> = mem_to_arc_inner(ptr.as_ptr());
1026        // debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout); // for_value_raw is unstable
1027
1028        // SAFETY: mem_to_arc_inner return a valid pointer to uninitialized ArcInner<T>.
1029        // ArcInner<T> is repr(C), and strong and weak are the first and second fields and
1030        // are the same type, so `inner as *mut atomic::AtomicUsize` is strong and
1031        // `(inner as *mut atomic::AtomicUsize).add(1)` is weak.
1032        unsafe {
1033            let strong = inner as *mut atomic::AtomicUsize;
1034            strong.write(atomic::AtomicUsize::new(1));
1035            let weak = strong.add(1);
1036            weak.write(atomic::AtomicUsize::new(1));
1037        }
1038
1039        inner
1040    }
1041}
1042
1043impl<T: ?Sized> Arc<T> {
1044    /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
1045    #[inline]
1046    unsafe fn allocate_for_value(value: &T) -> *mut ArcInner<T> {
1047        let ptr: *const T = value;
1048        // Allocate for the `ArcInner<T>` using the given value.
1049        unsafe {
1050            Self::allocate_for_layout(
1051                Layout::for_value(value),
1052                |layout| Global.allocate(layout),
1053                |mem| strict::with_metadata_of(mem, ptr as *mut ArcInner<T>),
1054            )
1055        }
1056    }
1057
1058    fn from_box(src: Box<T>) -> Arc<T> {
1059        unsafe {
1060            let value_size = size_of_val(&*src);
1061            let ptr = Self::allocate_for_value(&*src);
1062
1063            // Copy value as bytes
1064            ptr::copy_nonoverlapping(
1065                &*src as *const T as *const u8,
1066                data_ptr::<T>(ptr, &*src) as *mut u8,
1067                value_size,
1068            );
1069
1070            // Free the allocation without dropping its contents
1071            let box_ptr = Box::into_raw(src);
1072            let src = Box::from_raw(box_ptr as *mut mem::ManuallyDrop<T>);
1073            drop(src);
1074
1075            Self::from_ptr(ptr)
1076        }
1077    }
1078}
1079
1080#[cfg(not(portable_atomic_no_alloc_layout_extras))]
1081impl<T> Arc<[T]> {
1082    /// Allocates an `ArcInner<[T]>` with the given length.
1083    unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
1084        unsafe {
1085            Self::allocate_for_layout(
1086                Layout::array::<T>(len).unwrap(),
1087                |layout| Global.allocate(layout),
1088                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
1089            )
1090        }
1091    }
1092
1093    /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
1094    ///
1095    /// Behavior is undefined should the size be wrong.
1096    unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Self {
1097        // Panic guard while cloning T elements.
1098        // In the event of a panic, elements that have been written
1099        // into the new ArcInner will be dropped, then the memory freed.
1100        struct Guard<T> {
1101            ptr: *mut ArcInner<[mem::MaybeUninit<T>]>,
1102            elems: *mut T,
1103            n_elems: usize,
1104        }
1105
1106        impl<T> Drop for Guard<T> {
1107            fn drop(&mut self) {
1108                unsafe {
1109                    let slice = ptr::slice_from_raw_parts_mut(self.elems, self.n_elems);
1110                    ptr::drop_in_place(slice);
1111
1112                    drop(Box::from_raw(self.ptr));
1113                }
1114            }
1115        }
1116
1117        unsafe {
1118            let ptr: *mut ArcInner<[mem::MaybeUninit<T>]> = Arc::allocate_for_slice(len);
1119
1120            // Pointer to first element
1121            let elems = (*ptr).data.as_mut_ptr() as *mut T;
1122
1123            let mut guard = Guard { ptr, elems, n_elems: 0 };
1124
1125            for (i, item) in iter.enumerate() {
1126                ptr::write(elems.add(i), item);
1127                guard.n_elems += 1;
1128            }
1129
1130            // All clear. Forget the guard so it doesn't free the new ArcInner.
1131            mem::forget(guard);
1132
1133            Arc::from_ptr(ptr).assume_init()
1134        }
1135    }
1136}
1137
1138impl<T: ?Sized> Clone for Arc<T> {
1139    /// Makes a clone of the `Arc` pointer.
1140    ///
1141    /// This creates another pointer to the same allocation, increasing the
1142    /// strong reference count.
1143    ///
1144    /// # Examples
1145    ///
1146    /// ```
1147    /// use portable_atomic_util::Arc;
1148    ///
1149    /// let five = Arc::new(5);
1150    ///
1151    /// let _ = Arc::clone(&five);
1152    /// ```
1153    #[inline]
1154    fn clone(&self) -> Self {
1155        // Using a relaxed ordering is alright here, as knowledge of the
1156        // original reference prevents other threads from erroneously deleting
1157        // the object.
1158        //
1159        // As explained in the [Boost documentation][1], Increasing the
1160        // reference counter can always be done with memory_order_relaxed: New
1161        // references to an object can only be formed from an existing
1162        // reference, and passing an existing reference from one thread to
1163        // another must already provide any required synchronization.
1164        //
1165        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1166        let old_size = self.inner().strong.fetch_add(1, Relaxed);
1167
1168        // However we need to guard against massive refcounts in case someone is `mem::forget`ing
1169        // Arcs. If we don't do this the count can overflow and users will use-after free. This
1170        // branch will never be taken in any realistic program. We abort because such a program is
1171        // incredibly degenerate, and we don't care to support it.
1172        //
1173        // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
1174        // But we do that check *after* having done the increment, so there is a chance here that
1175        // the worst already happened and we actually do overflow the `usize` counter. However, that
1176        // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
1177        // above and the `abort` below, which seems exceedingly unlikely.
1178        //
1179        // This is a global invariant, and also applies when using a compare-exchange loop to increment
1180        // counters in other methods.
1181        // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
1182        // and then overflow using a few `fetch_add`s.
1183        if old_size > MAX_REFCOUNT {
1184            abort();
1185        }
1186
1187        unsafe { Self::from_inner(self.ptr) }
1188    }
1189}
1190
1191impl<T: ?Sized> Deref for Arc<T> {
1192    type Target = T;
1193
1194    #[inline]
1195    fn deref(&self) -> &Self::Target {
1196        &self.inner().data
1197    }
1198}
1199
1200impl<T: ?Sized + CloneToUninit> Arc<T> {
1201    /// Makes a mutable reference into the given `Arc`.
1202    ///
1203    /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
1204    /// [`clone`] the inner value to a new allocation to ensure unique ownership.  This is also
1205    /// referred to as clone-on-write.
1206    ///
1207    /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
1208    /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
1209    /// be cloned.
1210    ///
1211    /// See also [`get_mut`], which will fail rather than cloning the inner value
1212    /// or dissociating [`Weak`] pointers.
1213    ///
1214    /// [`clone`]: Clone::clone
1215    /// [`get_mut`]: Arc::get_mut
1216    ///
1217    /// # Examples
1218    ///
1219    /// ```
1220    /// use portable_atomic_util::Arc;
1221    ///
1222    /// let mut data = Arc::new(5);
1223    ///
1224    /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
1225    /// let mut other_data = Arc::clone(&data); // Won't clone inner data
1226    /// *Arc::make_mut(&mut data) += 1; // Clones inner data
1227    /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
1228    /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
1229    ///
1230    /// // Now `data` and `other_data` point to different allocations.
1231    /// assert_eq!(*data, 8);
1232    /// assert_eq!(*other_data, 12);
1233    /// ```
1234    ///
1235    /// [`Weak`] pointers will be dissociated:
1236    ///
1237    /// ```
1238    /// use portable_atomic_util::Arc;
1239    ///
1240    /// let mut data = Arc::new(75);
1241    /// let weak = Arc::downgrade(&data);
1242    ///
1243    /// assert!(75 == *data);
1244    /// assert!(75 == *weak.upgrade().unwrap());
1245    ///
1246    /// *Arc::make_mut(&mut data) += 1;
1247    ///
1248    /// assert!(76 == *data);
1249    /// assert!(weak.upgrade().is_none());
1250    /// ```
1251    #[inline]
1252    pub fn make_mut(this: &mut Self) -> &mut T {
1253        let size_of_val = mem::size_of_val::<T>(&**this);
1254
1255        // Note that we hold both a strong reference and a weak reference.
1256        // Thus, releasing our strong reference only will not, by itself, cause
1257        // the memory to be deallocated.
1258        //
1259        // Use Acquire to ensure that we see any writes to `weak` that happen
1260        // before release writes (i.e., decrements) to `strong`. Since we hold a
1261        // weak count, there's no chance the ArcInner itself could be
1262        // deallocated.
1263        if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
1264            // Another strong pointer exists, so we must clone.
1265
1266            let this_data_ref: &T = this;
1267            // `in_progress` drops the allocation if we panic before finishing initializing it.
1268            let mut in_progress: UniqueArcUninit<T> = UniqueArcUninit::new(this_data_ref);
1269
1270            let initialized_clone = unsafe {
1271                // Clone. If the clone panics, `in_progress` will be dropped and clean up.
1272                this_data_ref.clone_to_uninit(in_progress.data_ptr());
1273                // Cast type of pointer, now that it is initialized.
1274                in_progress.into_arc()
1275            };
1276            *this = initialized_clone;
1277        } else if this.inner().weak.load(Relaxed) != 1 {
1278            // Relaxed suffices in the above because this is fundamentally an
1279            // optimization: we are always racing with weak pointers being
1280            // dropped. Worst case, we end up allocated a new Arc unnecessarily.
1281
1282            // We removed the last strong ref, but there are additional weak
1283            // refs remaining. We'll move the contents to a new Arc, and
1284            // invalidate the other weak refs.
1285
1286            // Note that it is not possible for the read of `weak` to yield
1287            // usize::MAX (i.e., locked), since the weak count can only be
1288            // locked by a thread with a strong reference.
1289
1290            // Materialize our own implicit weak pointer, so that it can clean
1291            // up the ArcInner as needed.
1292            let _weak = Weak { ptr: this.ptr };
1293
1294            // Can just steal the data, all that's left is `Weak`s
1295            //
1296            // We don't need panic-protection like the above branch does, but we might as well
1297            // use the same mechanism.
1298            let mut in_progress: UniqueArcUninit<T> = UniqueArcUninit::new(&**this);
1299            unsafe {
1300                // Initialize `in_progress` with move of **this.
1301                // We have to express this in terms of bytes because `T: ?Sized`; there is no
1302                // operation that just copies a value based on its `size_of_val()`.
1303                ptr::copy_nonoverlapping(
1304                    &**this as *const T as *const u8,
1305                    in_progress.data_ptr() as *mut u8,
1306                    size_of_val,
1307                );
1308
1309                ptr::write(this, in_progress.into_arc());
1310            }
1311        } else {
1312            // We were the sole reference of either kind; bump back up the
1313            // strong ref count.
1314            this.inner().strong.store(1, Release);
1315        }
1316
1317        // As with `get_mut()`, the unsafety is ok because our reference was
1318        // either unique to begin with, or became one upon cloning the contents.
1319        unsafe { Self::get_mut_unchecked(this) }
1320    }
1321}
1322
1323impl<T: Clone> Arc<T> {
1324    /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
1325    /// clone.
1326    ///
1327    /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
1328    /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
1329    ///
1330    /// # Examples
1331    ///
1332    /// ```
1333    /// use portable_atomic_util::Arc;
1334    /// use std::ptr;
1335    ///
1336    /// let inner = String::from("test");
1337    /// let ptr = inner.as_ptr();
1338    ///
1339    /// let arc = Arc::new(inner);
1340    /// let inner = Arc::unwrap_or_clone(arc);
1341    /// // The inner value was not cloned
1342    /// assert!(ptr::eq(ptr, inner.as_ptr()));
1343    ///
1344    /// let arc = Arc::new(inner);
1345    /// let arc2 = arc.clone();
1346    /// let inner = Arc::unwrap_or_clone(arc);
1347    /// // Because there were 2 references, we had to clone the inner value.
1348    /// assert!(!ptr::eq(ptr, inner.as_ptr()));
1349    /// // `arc2` is the last reference, so when we unwrap it we get back
1350    /// // the original `String`.
1351    /// let inner = Arc::unwrap_or_clone(arc2);
1352    /// assert!(ptr::eq(ptr, inner.as_ptr()));
1353    /// ```
1354    #[inline]
1355    pub fn unwrap_or_clone(this: Self) -> T {
1356        Self::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
1357    }
1358}
1359
1360impl<T: ?Sized> Arc<T> {
1361    /// Returns a mutable reference into the given `Arc`, if there are
1362    /// no other `Arc` or [`Weak`] pointers to the same allocation.
1363    ///
1364    /// Returns [`None`] otherwise, because it is not safe to
1365    /// mutate a shared value.
1366    ///
1367    /// See also [`make_mut`][make_mut], which will [`clone`][clone]
1368    /// the inner value when there are other `Arc` pointers.
1369    ///
1370    /// [make_mut]: Arc::make_mut
1371    /// [clone]: Clone::clone
1372    ///
1373    /// # Examples
1374    ///
1375    /// ```
1376    /// use portable_atomic_util::Arc;
1377    ///
1378    /// let mut x = Arc::new(3);
1379    /// *Arc::get_mut(&mut x).unwrap() = 4;
1380    /// assert_eq!(*x, 4);
1381    ///
1382    /// let _y = Arc::clone(&x);
1383    /// assert!(Arc::get_mut(&mut x).is_none());
1384    /// ```
1385    #[inline]
1386    pub fn get_mut(this: &mut Self) -> Option<&mut T> {
1387        if this.is_unique() {
1388            // This unsafety is ok because we're guaranteed that the pointer
1389            // returned is the *only* pointer that will ever be returned to T. Our
1390            // reference count is guaranteed to be 1 at this point, and we required
1391            // the Arc itself to be `mut`, so we're returning the only possible
1392            // reference to the inner data.
1393            unsafe { Some(Self::get_mut_unchecked(this)) }
1394        } else {
1395            None
1396        }
1397    }
1398
1399    #[inline]
1400    unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
1401        // We are careful to *not* create a reference covering the "count" fields, as
1402        // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
1403        unsafe { &mut (*this.ptr.as_ptr()).data }
1404    }
1405
1406    /// Determine whether this is the unique reference (including weak refs) to
1407    /// the underlying data.
1408    ///
1409    /// Note that this requires locking the weak ref count.
1410    fn is_unique(&mut self) -> bool {
1411        // lock the weak pointer count if we appear to be the sole weak pointer
1412        // holder.
1413        //
1414        // The acquire label here ensures a happens-before relationship with any
1415        // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
1416        // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
1417        // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
1418        if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
1419            // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
1420            // counter in `drop` -- the only access that happens when any but the last reference
1421            // is being dropped.
1422            let unique = self.inner().strong.load(Acquire) == 1;
1423
1424            // The release write here synchronizes with a read in `downgrade`,
1425            // effectively preventing the above read of `strong` from happening
1426            // after the write.
1427            self.inner().weak.store(1, Release); // release the lock
1428            unique
1429        } else {
1430            false
1431        }
1432    }
1433}
1434
1435impl<T: ?Sized> Drop for Arc<T> {
1436    /// Drops the `Arc`.
1437    ///
1438    /// This will decrement the strong reference count. If the strong reference
1439    /// count reaches zero then the only other references (if any) are
1440    /// [`Weak`], so we `drop` the inner value.
1441    ///
1442    /// # Examples
1443    ///
1444    /// ```
1445    /// use portable_atomic_util::Arc;
1446    ///
1447    /// struct Foo;
1448    ///
1449    /// impl Drop for Foo {
1450    ///     fn drop(&mut self) {
1451    ///         println!("dropped!");
1452    ///     }
1453    /// }
1454    ///
1455    /// let foo = Arc::new(Foo);
1456    /// let foo2 = Arc::clone(&foo);
1457    ///
1458    /// drop(foo); // Doesn't print anything
1459    /// drop(foo2); // Prints "dropped!"
1460    /// ```
1461    #[inline]
1462    fn drop(&mut self) {
1463        // Because `fetch_sub` is already atomic, we do not need to synchronize
1464        // with other threads unless we are going to delete the object. This
1465        // same logic applies to the below `fetch_sub` to the `weak` count.
1466        if self.inner().strong.fetch_sub(1, Release) != 1 {
1467            return;
1468        }
1469
1470        // This fence is needed to prevent reordering of use of the data and
1471        // deletion of the data. Because it is marked `Release`, the decreasing
1472        // of the reference count synchronizes with this `Acquire` fence. This
1473        // means that use of the data happens before decreasing the reference
1474        // count, which happens before this fence, which happens before the
1475        // deletion of the data.
1476        //
1477        // As explained in the [Boost documentation][1],
1478        //
1479        // > It is important to enforce any possible access to the object in one
1480        // > thread (through an existing reference) to *happen before* deleting
1481        // > the object in a different thread. This is achieved by a "release"
1482        // > operation after dropping a reference (any access to the object
1483        // > through this reference must obviously happened before), and an
1484        // > "acquire" operation before deleting the object.
1485        //
1486        // In particular, while the contents of an Arc are usually immutable, it's
1487        // possible to have interior writes to something like a Mutex<T>. Since a
1488        // Mutex is not acquired when it is deleted, we can't rely on its
1489        // synchronization logic to make writes in thread A visible to a destructor
1490        // running in thread B.
1491        //
1492        // Also note that the Acquire fence here could probably be replaced with an
1493        // Acquire load, which could improve performance in highly-contended
1494        // situations. See [2].
1495        //
1496        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1497        // [2]: (https://github.com/rust-lang/rust/pull/41714)
1498        acquire!(self.inner().strong);
1499
1500        unsafe {
1501            self.drop_slow();
1502        }
1503    }
1504}
1505
1506impl Arc<dyn Any + Send + Sync> {
1507    /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
1508    ///
1509    /// # Examples
1510    ///
1511    /// ```
1512    /// use portable_atomic_util::Arc;
1513    /// use std::any::Any;
1514    ///
1515    /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
1516    ///     if let Ok(string) = value.downcast::<String>() {
1517    ///         println!("String ({}): {}", string.len(), string);
1518    ///     }
1519    /// }
1520    ///
1521    /// let my_string = "Hello World".to_string();
1522    /// print_if_string(Arc::from(Box::new(my_string) as Box<dyn Any + Send + Sync>));
1523    /// print_if_string(Arc::from(Box::new(0i8) as Box<dyn Any + Send + Sync>));
1524    /// // or with "--cfg portable_atomic_unstable_coerce_unsized" in RUSTFLAGS (requires Rust nightly):
1525    /// // print_if_string(Arc::new(my_string));
1526    /// // print_if_string(Arc::new(0i8));
1527    /// ```
1528    #[inline]
1529    pub fn downcast<T>(self) -> Result<Arc<T>, Self>
1530    where
1531        T: Any + Send + Sync,
1532    {
1533        if (*self).is::<T>() {
1534            unsafe {
1535                let ptr = Arc::into_inner_non_null(self);
1536                Ok(Arc::from_inner(ptr.cast::<ArcInner<T>>()))
1537            }
1538        } else {
1539            Err(self)
1540        }
1541    }
1542}
1543
1544impl<T> Weak<T> {
1545    /// Constructs a new `Weak<T>`, without allocating any memory.
1546    /// Calling [`upgrade`] on the return value always gives [`None`].
1547    ///
1548    /// [`upgrade`]: Weak::upgrade
1549    ///
1550    /// # Examples
1551    ///
1552    /// ```
1553    /// use portable_atomic_util::Weak;
1554    ///
1555    /// let empty: Weak<i64> = Weak::new();
1556    /// assert!(empty.upgrade().is_none());
1557    /// ```
1558    #[inline]
1559    #[must_use]
1560    pub const fn new() -> Self {
1561        Self {
1562            ptr: unsafe {
1563                NonNull::new_unchecked(strict::without_provenance_mut::<ArcInner<T>>(usize::MAX))
1564            },
1565        }
1566    }
1567
1568    #[inline]
1569    #[must_use]
1570    fn new_uninit_ptr() -> NonNull<ArcInner<T>> {
1571        unsafe {
1572            NonNull::new_unchecked(Self::allocate_for_layout(
1573                Layout::new::<T>(),
1574                |layout| Global.allocate(layout),
1575                |ptr| ptr as *mut _,
1576            ))
1577        }
1578    }
1579}
1580
1581/// Helper type to allow accessing the reference counts without
1582/// making any assertions about the data field.
1583struct WeakInner<'a> {
1584    weak: &'a atomic::AtomicUsize,
1585    strong: &'a atomic::AtomicUsize,
1586}
1587
1588// TODO: See Weak::from_raw
1589impl<T /*: ?Sized */> Weak<T> {
1590    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
1591    ///
1592    /// This can be used to safely get a strong reference (by calling [`upgrade`]
1593    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
1594    ///
1595    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
1596    /// as these don't own anything; the method still works on them).
1597    ///
1598    /// # Safety
1599    ///
1600    /// The pointer must have originated from the [`into_raw`] and must still own its potential
1601    /// weak reference.
1602    ///
1603    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
1604    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
1605    /// count is not modified by this operation) and therefore it must be paired with a previous
1606    /// call to [`into_raw`].
1607    /// # Examples
1608    ///
1609    /// ```
1610    /// use portable_atomic_util::{Arc, Weak};
1611    ///
1612    /// let strong = Arc::new("hello".to_owned());
1613    ///
1614    /// let raw_1 = Arc::downgrade(&strong).into_raw();
1615    /// let raw_2 = Arc::downgrade(&strong).into_raw();
1616    ///
1617    /// assert_eq!(2, Arc::weak_count(&strong));
1618    ///
1619    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
1620    /// assert_eq!(1, Arc::weak_count(&strong));
1621    ///
1622    /// drop(strong);
1623    ///
1624    /// // Decrement the last weak count.
1625    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
1626    /// ```
1627    ///
1628    /// [`new`]: Weak::new
1629    /// [`into_raw`]: Weak::into_raw
1630    /// [`upgrade`]: Weak::upgrade
1631    #[inline]
1632    pub unsafe fn from_raw(ptr: *const T) -> Self {
1633        // See Weak::as_ptr for context on how the input pointer is derived.
1634
1635        let ptr = if is_dangling(ptr) {
1636            // This is a dangling Weak.
1637            ptr as *mut ArcInner<T>
1638        } else {
1639            // Otherwise, we're guaranteed the pointer came from a non-dangling Weak.
1640            // TODO: data_offset calls align_of_val which needs to create a reference
1641            // to data but we cannot create a reference to data here since data in Weak
1642            // can be dropped concurrently from another thread. Therefore, we can
1643            // only support sized types that can avoid references to data
1644            // unless align_of_val_raw is stabilized.
1645            // // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
1646            // let offset = unsafe { data_offset::<T>(ptr) };
1647            let offset = data_offset_align(mem::align_of::<T>());
1648
1649            // Thus, we reverse the offset to get the whole RcBox.
1650            // SAFETY: the pointer originated from a Weak, so this offset is safe.
1651            unsafe { strict::byte_sub(ptr as *mut T, offset) as *mut ArcInner<T> }
1652        };
1653
1654        // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
1655        Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } }
1656    }
1657}
1658
1659// TODO: See Weak::from_raw
1660impl<T /*: ?Sized */> Weak<T> {
1661    /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
1662    ///
1663    /// The pointer is valid only if there are some strong references. The pointer may be dangling,
1664    /// unaligned or even [`null`] otherwise.
1665    ///
1666    /// # Examples
1667    ///
1668    /// ```
1669    /// use portable_atomic_util::Arc;
1670    /// use std::ptr;
1671    ///
1672    /// let strong = Arc::new("hello".to_owned());
1673    /// let weak = Arc::downgrade(&strong);
1674    /// // Both point to the same object
1675    /// assert!(ptr::eq(&*strong, weak.as_ptr()));
1676    /// // The strong here keeps it alive, so we can still access the object.
1677    /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
1678    ///
1679    /// drop(strong);
1680    /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
1681    /// // undefined behavior.
1682    /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
1683    /// ```
1684    ///
1685    /// [`null`]: core::ptr::null "ptr::null"
1686    #[must_use]
1687    pub fn as_ptr(&self) -> *const T {
1688        let ptr: *mut ArcInner<T> = self.ptr.as_ptr();
1689
1690        if is_dangling(ptr) {
1691            // If the pointer is dangling, we return the sentinel directly. This cannot be
1692            // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
1693            ptr as *const T
1694        } else {
1695            // TODO: See Weak::from_raw
1696            // // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
1697            // // The payload may be dropped at this point, and we have to maintain provenance,
1698            // // so use raw pointer manipulation.
1699            // unsafe { data_ptr::<T>(ptr, &(*ptr).data) }
1700            unsafe {
1701                let offset = data_offset_align(mem::align_of::<T>());
1702                strict::byte_add(ptr, offset) as *const T
1703            }
1704        }
1705    }
1706
1707    /// Consumes the `Weak<T>` and turns it into a raw pointer.
1708    ///
1709    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
1710    /// one weak reference (the weak count is not modified by this operation). It can be turned
1711    /// back into the `Weak<T>` with [`from_raw`].
1712    ///
1713    /// The same restrictions of accessing the target of the pointer as with
1714    /// [`as_ptr`] apply.
1715    ///
1716    /// # Examples
1717    ///
1718    /// ```
1719    /// use portable_atomic_util::{Arc, Weak};
1720    ///
1721    /// let strong = Arc::new("hello".to_owned());
1722    /// let weak = Arc::downgrade(&strong);
1723    /// let raw = weak.into_raw();
1724    ///
1725    /// assert_eq!(1, Arc::weak_count(&strong));
1726    /// assert_eq!("hello", unsafe { &*raw });
1727    ///
1728    /// drop(unsafe { Weak::from_raw(raw) });
1729    /// assert_eq!(0, Arc::weak_count(&strong));
1730    /// ```
1731    ///
1732    /// [`from_raw`]: Weak::from_raw
1733    /// [`as_ptr`]: Weak::as_ptr
1734    #[must_use = "losing the pointer will leak memory"]
1735    pub fn into_raw(self) -> *const T {
1736        ManuallyDrop::new(self).as_ptr()
1737    }
1738}
1739
1740impl<T: ?Sized> Weak<T> {
1741    /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
1742    /// dropping of the inner value if successful.
1743    ///
1744    /// Returns [`None`] if the inner value has since been dropped.
1745    ///
1746    /// # Examples
1747    ///
1748    /// ```
1749    /// use portable_atomic_util::Arc;
1750    ///
1751    /// let five = Arc::new(5);
1752    ///
1753    /// let weak_five = Arc::downgrade(&five);
1754    ///
1755    /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
1756    /// assert!(strong_five.is_some());
1757    ///
1758    /// // Destroy all strong pointers.
1759    /// drop(strong_five);
1760    /// drop(five);
1761    ///
1762    /// assert!(weak_five.upgrade().is_none());
1763    /// ```
1764    #[must_use = "this returns a new `Arc`, \
1765                  without modifying the original weak pointer"]
1766    pub fn upgrade(&self) -> Option<Arc<T>> {
1767        #[inline]
1768        fn checked_increment(n: usize) -> Option<usize> {
1769            // Any write of 0 we can observe leaves the field in permanently zero state.
1770            if n == 0 {
1771                return None;
1772            }
1773            // See comments in `Arc::clone` for why we do this (for `mem::forget`).
1774            assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1775            Some(n + 1)
1776        }
1777
1778        // We use a CAS loop to increment the strong count instead of a
1779        // fetch_add as this function should never take the reference count
1780        // from zero to one.
1781        //
1782        // Relaxed is fine for the failure case because we don't have any expectations about the new state.
1783        // Acquire is necessary for the success case to synchronize with `Arc::new_cyclic`, when the inner
1784        // value can be initialized after `Weak` references have already been created. In that case, we
1785        // expect to observe the fully initialized value.
1786        if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
1787            // SAFETY: pointer is not null, verified in checked_increment
1788            unsafe { Some(Arc::from_inner(self.ptr)) }
1789        } else {
1790            None
1791        }
1792    }
1793
1794    /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
1795    ///
1796    /// If `self` was created using [`Weak::new`], this will return 0.
1797    #[must_use]
1798    pub fn strong_count(&self) -> usize {
1799        if let Some(inner) = self.inner() {
1800            inner.strong.load(Relaxed)
1801        } else {
1802            0
1803        }
1804    }
1805
1806    /// Gets an approximation of the number of `Weak` pointers pointing to this
1807    /// allocation.
1808    ///
1809    /// If `self` was created using [`Weak::new`], or if there are no remaining
1810    /// strong pointers, this will return 0.
1811    ///
1812    /// # Accuracy
1813    ///
1814    /// Due to implementation details, the returned value can be off by 1 in
1815    /// either direction when other threads are manipulating any `Arc`s or
1816    /// `Weak`s pointing to the same allocation.
1817    #[must_use]
1818    pub fn weak_count(&self) -> usize {
1819        if let Some(inner) = self.inner() {
1820            let weak = inner.weak.load(Acquire);
1821            let strong = inner.strong.load(Relaxed);
1822            if strong == 0 {
1823                0
1824            } else {
1825                // Since we observed that there was at least one strong pointer
1826                // after reading the weak count, we know that the implicit weak
1827                // reference (present whenever any strong references are alive)
1828                // was still around when we observed the weak count, and can
1829                // therefore safely subtract it.
1830                weak - 1
1831            }
1832        } else {
1833            0
1834        }
1835    }
1836
1837    /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
1838    /// (i.e., when this `Weak` was created by `Weak::new`).
1839    #[inline]
1840    fn inner(&self) -> Option<WeakInner<'_>> {
1841        let ptr = self.ptr.as_ptr();
1842        if is_dangling(ptr) {
1843            None
1844        } else {
1845            // SAFETY: non-dangling Weak is a valid pointer.
1846            // We are careful to *not* create a reference covering the "data" field, as
1847            // the field may be mutated concurrently (for example, if the last `Arc`
1848            // is dropped, the data field will be dropped in-place).
1849            Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
1850        }
1851    }
1852
1853    /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
1854    /// both don't point to any allocation (because they were created with `Weak::new()`). However,
1855    /// this function ignores the metadata of  `dyn Trait` pointers.
1856    ///
1857    /// # Notes
1858    ///
1859    /// Since this compares pointers it means that `Weak::new()` will equal each
1860    /// other, even though they don't point to any allocation.
1861    ///
1862    /// # Examples
1863    ///
1864    /// ```
1865    /// use portable_atomic_util::Arc;
1866    ///
1867    /// let first_rc = Arc::new(5);
1868    /// let first = Arc::downgrade(&first_rc);
1869    /// let second = Arc::downgrade(&first_rc);
1870    ///
1871    /// assert!(first.ptr_eq(&second));
1872    ///
1873    /// let third_rc = Arc::new(5);
1874    /// let third = Arc::downgrade(&third_rc);
1875    ///
1876    /// assert!(!first.ptr_eq(&third));
1877    /// ```
1878    ///
1879    /// Comparing `Weak::new`.
1880    ///
1881    /// ```
1882    /// use portable_atomic_util::{Arc, Weak};
1883    ///
1884    /// let first = Weak::new();
1885    /// let second = Weak::new();
1886    /// assert!(first.ptr_eq(&second));
1887    ///
1888    /// let third_rc = Arc::new(());
1889    /// let third = Arc::downgrade(&third_rc);
1890    /// assert!(!first.ptr_eq(&third));
1891    /// ```
1892    ///
1893    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1894    #[inline]
1895    #[must_use]
1896    pub fn ptr_eq(&self, other: &Self) -> bool {
1897        ptr::eq(self.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ())
1898    }
1899}
1900
1901impl<T: ?Sized> Weak<T> {
1902    /// Allocates an `ArcInner<T>` with sufficient space for
1903    /// a possibly-unsized inner value where the value has the layout provided.
1904    ///
1905    /// The function `mem_to_arc_inner` is called with the data pointer
1906    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1907    unsafe fn allocate_for_layout(
1908        value_layout: Layout,
1909        allocate: impl FnOnce(Layout) -> Option<NonNull<u8>>,
1910        mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1911    ) -> *mut ArcInner<T> {
1912        let layout = arc_inner_layout_for_value_layout(value_layout);
1913
1914        let ptr = allocate(layout).unwrap_or_else(|| handle_alloc_error(layout));
1915
1916        unsafe { Self::initialize_arc_inner(ptr, layout, mem_to_arc_inner) }
1917    }
1918
1919    unsafe fn initialize_arc_inner(
1920        ptr: NonNull<u8>,
1921        _layout: Layout,
1922        mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1923    ) -> *mut ArcInner<T> {
1924        let inner: *mut ArcInner<T> = mem_to_arc_inner(ptr.as_ptr());
1925        // debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout); // for_value_raw is unstable
1926
1927        // SAFETY: mem_to_arc_inner return a valid pointer to uninitialized ArcInner<T>.
1928        // ArcInner<T> is repr(C), and strong and weak are the first and second fields and
1929        // are the same type, so `inner as *mut atomic::AtomicUsize` is strong and
1930        // `(inner as *mut atomic::AtomicUsize).add(1)` is weak.
1931        unsafe {
1932            let strong = inner as *mut atomic::AtomicUsize;
1933            strong.write(atomic::AtomicUsize::new(0));
1934            let weak = strong.add(1);
1935            weak.write(atomic::AtomicUsize::new(1));
1936        }
1937
1938        inner
1939    }
1940}
1941
1942impl<T: ?Sized> Clone for Weak<T> {
1943    /// Makes a clone of the `Weak` pointer that points to the same allocation.
1944    ///
1945    /// # Examples
1946    ///
1947    /// ```
1948    /// use portable_atomic_util::{Arc, Weak};
1949    ///
1950    /// let weak_five = Arc::downgrade(&Arc::new(5));
1951    ///
1952    /// let _ = Weak::clone(&weak_five);
1953    /// ```
1954    #[inline]
1955    fn clone(&self) -> Self {
1956        if let Some(inner) = self.inner() {
1957            // See comments in Arc::clone() for why this is relaxed. This can use a
1958            // fetch_add (ignoring the lock) because the weak count is only locked
1959            // where are *no other* weak pointers in existence. (So we can't be
1960            // running this code in that case).
1961            let old_size = inner.weak.fetch_add(1, Relaxed);
1962
1963            // See comments in Arc::clone() for why we do this (for mem::forget).
1964            if old_size > MAX_REFCOUNT {
1965                abort();
1966            }
1967        }
1968
1969        Self { ptr: self.ptr }
1970    }
1971}
1972
1973impl<T> Default for Weak<T> {
1974    /// Constructs a new `Weak<T>`, without allocating memory.
1975    /// Calling [`upgrade`] on the return value always
1976    /// gives [`None`].
1977    ///
1978    /// [`upgrade`]: Weak::upgrade
1979    ///
1980    /// # Examples
1981    ///
1982    /// ```
1983    /// use portable_atomic_util::Weak;
1984    ///
1985    /// let empty: Weak<i64> = Default::default();
1986    /// assert!(empty.upgrade().is_none());
1987    /// ```
1988    fn default() -> Self {
1989        Self::new()
1990    }
1991}
1992
1993impl<T: ?Sized> Drop for Weak<T> {
1994    /// Drops the `Weak` pointer.
1995    ///
1996    /// # Examples
1997    ///
1998    /// ```
1999    /// use portable_atomic_util::{Arc, Weak};
2000    ///
2001    /// struct Foo;
2002    ///
2003    /// impl Drop for Foo {
2004    ///     fn drop(&mut self) {
2005    ///         println!("dropped!");
2006    ///     }
2007    /// }
2008    ///
2009    /// let foo = Arc::new(Foo);
2010    /// let weak_foo = Arc::downgrade(&foo);
2011    /// let other_weak_foo = Weak::clone(&weak_foo);
2012    ///
2013    /// drop(weak_foo); // Doesn't print anything
2014    /// drop(foo); // Prints "dropped!"
2015    ///
2016    /// assert!(other_weak_foo.upgrade().is_none());
2017    /// ```
2018    fn drop(&mut self) {
2019        // If we find out that we were the last weak pointer, then its time to
2020        // deallocate the data entirely. See the discussion in Arc::drop() about
2021        // the memory orderings
2022        //
2023        // It's not necessary to check for the locked state here, because the
2024        // weak count can only be locked if there was precisely one weak ref,
2025        // meaning that drop could only subsequently run ON that remaining weak
2026        // ref, which can only happen after the lock is released.
2027        let inner = if let Some(inner) = self.inner() { inner } else { return };
2028
2029        if inner.weak.fetch_sub(1, Release) == 1 {
2030            acquire!(inner.weak);
2031            // Free the allocation without dropping T
2032            let ptr = self.ptr.as_ptr() as *mut ArcInner<mem::ManuallyDrop<T>>;
2033            drop(unsafe { Box::from_raw(ptr) });
2034        }
2035    }
2036}
2037
2038impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
2039    /// Equality for two `Arc`s.
2040    ///
2041    /// Two `Arc`s are equal if their inner values are equal, even if they are
2042    /// stored in different allocation.
2043    ///
2044    /// If `T` also implements `Eq` (implying reflexivity of equality),
2045    /// two `Arc`s that point to the same allocation are always equal.
2046    ///
2047    /// # Examples
2048    ///
2049    /// ```
2050    /// use portable_atomic_util::Arc;
2051    ///
2052    /// let five = Arc::new(5);
2053    ///
2054    /// assert!(five == Arc::new(5));
2055    /// ```
2056    #[inline]
2057    fn eq(&self, other: &Self) -> bool {
2058        **self == **other
2059    }
2060
2061    /// Inequality for two `Arc`s.
2062    ///
2063    /// Two `Arc`s are not equal if their inner values are not equal.
2064    ///
2065    /// If `T` also implements `Eq` (implying reflexivity of equality),
2066    /// two `Arc`s that point to the same value are always equal.
2067    ///
2068    /// # Examples
2069    ///
2070    /// ```
2071    /// use portable_atomic_util::Arc;
2072    ///
2073    /// let five = Arc::new(5);
2074    ///
2075    /// assert!(five != Arc::new(6));
2076    /// ```
2077    #[allow(clippy::partialeq_ne_impl)]
2078    #[inline]
2079    fn ne(&self, other: &Self) -> bool {
2080        **self != **other
2081    }
2082}
2083
2084impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
2085    /// Partial comparison for two `Arc`s.
2086    ///
2087    /// The two are compared by calling `partial_cmp()` on their inner values.
2088    ///
2089    /// # Examples
2090    ///
2091    /// ```
2092    /// use portable_atomic_util::Arc;
2093    /// use std::cmp::Ordering;
2094    ///
2095    /// let five = Arc::new(5);
2096    ///
2097    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
2098    /// ```
2099    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
2100        (**self).partial_cmp(&**other)
2101    }
2102
2103    /// Less-than comparison for two `Arc`s.
2104    ///
2105    /// The two are compared by calling `<` on their inner values.
2106    ///
2107    /// # Examples
2108    ///
2109    /// ```
2110    /// use portable_atomic_util::Arc;
2111    ///
2112    /// let five = Arc::new(5);
2113    ///
2114    /// assert!(five < Arc::new(6));
2115    /// ```
2116    fn lt(&self, other: &Self) -> bool {
2117        *(*self) < *(*other)
2118    }
2119
2120    /// 'Less than or equal to' comparison for two `Arc`s.
2121    ///
2122    /// The two are compared by calling `<=` on their inner values.
2123    ///
2124    /// # Examples
2125    ///
2126    /// ```
2127    /// use portable_atomic_util::Arc;
2128    ///
2129    /// let five = Arc::new(5);
2130    ///
2131    /// assert!(five <= Arc::new(5));
2132    /// ```
2133    fn le(&self, other: &Self) -> bool {
2134        *(*self) <= *(*other)
2135    }
2136
2137    /// Greater-than comparison for two `Arc`s.
2138    ///
2139    /// The two are compared by calling `>` on their inner values.
2140    ///
2141    /// # Examples
2142    ///
2143    /// ```
2144    /// use portable_atomic_util::Arc;
2145    ///
2146    /// let five = Arc::new(5);
2147    ///
2148    /// assert!(five > Arc::new(4));
2149    /// ```
2150    fn gt(&self, other: &Self) -> bool {
2151        *(*self) > *(*other)
2152    }
2153
2154    /// 'Greater than or equal to' comparison for two `Arc`s.
2155    ///
2156    /// The two are compared by calling `>=` on their inner values.
2157    ///
2158    /// # Examples
2159    ///
2160    /// ```
2161    /// use portable_atomic_util::Arc;
2162    ///
2163    /// let five = Arc::new(5);
2164    ///
2165    /// assert!(five >= Arc::new(5));
2166    /// ```
2167    fn ge(&self, other: &Self) -> bool {
2168        *(*self) >= *(*other)
2169    }
2170}
2171impl<T: ?Sized + Ord> Ord for Arc<T> {
2172    /// Comparison for two `Arc`s.
2173    ///
2174    /// The two are compared by calling `cmp()` on their inner values.
2175    ///
2176    /// # Examples
2177    ///
2178    /// ```
2179    /// use portable_atomic_util::Arc;
2180    /// use std::cmp::Ordering;
2181    ///
2182    /// let five = Arc::new(5);
2183    ///
2184    /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
2185    /// ```
2186    fn cmp(&self, other: &Self) -> cmp::Ordering {
2187        (**self).cmp(&**other)
2188    }
2189}
2190impl<T: ?Sized + Eq> Eq for Arc<T> {}
2191
2192impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
2193    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2194        fmt::Display::fmt(&**self, f)
2195    }
2196}
2197
2198impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
2199    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2200        fmt::Debug::fmt(&**self, f)
2201    }
2202}
2203
2204impl<T: ?Sized> fmt::Pointer for Arc<T> {
2205    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2206        fmt::Pointer::fmt(&(&**self as *const T), f)
2207    }
2208}
2209
2210impl<T: Default> Default for Arc<T> {
2211    /// Creates a new `Arc<T>`, with the `Default` value for `T`.
2212    ///
2213    /// # Examples
2214    ///
2215    /// ```
2216    /// use portable_atomic_util::Arc;
2217    ///
2218    /// let x: Arc<i32> = Default::default();
2219    /// assert_eq!(*x, 0);
2220    /// ```
2221    fn default() -> Self {
2222        Self::new(T::default())
2223    }
2224}
2225
2226#[cfg(not(portable_atomic_no_min_const_generics))]
2227impl Default for Arc<str> {
2228    /// Creates an empty str inside an Arc.
2229    ///
2230    /// This may or may not share an allocation with other Arcs.
2231    #[inline]
2232    fn default() -> Self {
2233        let arc: Arc<[u8]> = Arc::default();
2234        debug_assert!(core::str::from_utf8(&arc).is_ok());
2235        let ptr = Arc::into_inner_non_null(arc);
2236        unsafe { Arc::from_ptr(ptr.as_ptr() as *mut ArcInner<str>) }
2237    }
2238}
2239
2240#[cfg(not(portable_atomic_no_min_const_generics))]
2241impl<T> Default for Arc<[T]> {
2242    /// Creates an empty `[T]` inside an Arc.
2243    ///
2244    /// This may or may not share an allocation with other Arcs.
2245    #[inline]
2246    fn default() -> Self {
2247        // TODO: we cannot use non-allocation optimization (https://github.com/rust-lang/rust/blob/1.80.0/library/alloc/src/sync.rs#L3449)
2248        // for now since casting Arc<[T; N]> -> Arc<[T]> requires unstable CoerceUnsized.
2249        let arr: [T; 0] = [];
2250        Arc::from(arr)
2251    }
2252}
2253
2254impl<T: ?Sized + Hash> Hash for Arc<T> {
2255    fn hash<H: Hasher>(&self, state: &mut H) {
2256        (**self).hash(state);
2257    }
2258}
2259
2260impl<T> From<T> for Arc<T> {
2261    /// Converts a `T` into an `Arc<T>`
2262    ///
2263    /// The conversion moves the value into a
2264    /// newly allocated `Arc`. It is equivalent to
2265    /// calling `Arc::new(t)`.
2266    ///
2267    /// # Example
2268    ///
2269    /// ```
2270    /// use portable_atomic_util::Arc;
2271    /// let x = 5;
2272    /// let arc = Arc::new(5);
2273    ///
2274    /// assert_eq!(Arc::from(x), arc);
2275    /// ```
2276    fn from(t: T) -> Self {
2277        Self::new(t)
2278    }
2279}
2280
2281// This just outputs the input as is, but helps avoid syntax checks by old rustc that rejects const generics.
2282#[cfg(not(portable_atomic_no_min_const_generics))]
2283macro_rules! items {
2284    ($($tt:tt)*) => {
2285        $($tt)*
2286    };
2287}
2288
2289#[cfg(not(portable_atomic_no_min_const_generics))]
2290items! {
2291impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
2292    /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
2293    ///
2294    /// The conversion moves the array into a newly allocated `Arc`.
2295    ///
2296    /// # Example
2297    ///
2298    /// ```
2299    /// use portable_atomic_util::Arc;
2300    /// let original: [i32; 3] = [1, 2, 3];
2301    /// let shared: Arc<[i32]> = Arc::from(original);
2302    /// assert_eq!(&[1, 2, 3], &shared[..]);
2303    /// ```
2304    #[inline]
2305    fn from(v: [T; N]) -> Self {
2306        // Casting Arc<[T; N]> -> Arc<[T]> requires unstable CoerceUnsized, so we convert via Box.
2307        // Since the compiler knows the actual size and metadata, the intermediate allocation is
2308        // optimized and generates the same code as when using CoerceUnsized and convert Arc<[T; N]> to Arc<[T]>.
2309        // https://github.com/taiki-e/portable-atomic/issues/143#issuecomment-1866488569
2310        let v: Box<[T]> = Box::<[T; N]>::from(v);
2311        v.into()
2312    }
2313}
2314}
2315
2316#[cfg(not(portable_atomic_no_alloc_layout_extras))]
2317impl<T: Clone> From<&[T]> for Arc<[T]> {
2318    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
2319    ///
2320    /// # Example
2321    ///
2322    /// ```
2323    /// use portable_atomic_util::Arc;
2324    /// let original: &[i32] = &[1, 2, 3];
2325    /// let shared: Arc<[i32]> = Arc::from(original);
2326    /// assert_eq!(&[1, 2, 3], &shared[..]);
2327    /// ```
2328    #[inline]
2329    fn from(v: &[T]) -> Self {
2330        unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2331    }
2332}
2333
2334#[cfg(not(portable_atomic_no_alloc_layout_extras))]
2335impl From<&str> for Arc<str> {
2336    /// Allocates a reference-counted `str` and copies `v` into it.
2337    ///
2338    /// # Example
2339    ///
2340    /// ```
2341    /// use portable_atomic_util::Arc;
2342    /// let shared: Arc<str> = Arc::from("eggplant");
2343    /// assert_eq!("eggplant", &shared[..]);
2344    /// ```
2345    #[inline]
2346    fn from(v: &str) -> Self {
2347        let arc = Arc::<[u8]>::from(v.as_bytes());
2348        // SAFETY: `str` has the same layout as `[u8]`.
2349        // https://doc.rust-lang.org/nightly/reference/type-layout.html#str-layout
2350        unsafe { Self::from_raw(Arc::into_raw(arc) as *const str) }
2351    }
2352}
2353
2354#[cfg(not(portable_atomic_no_alloc_layout_extras))]
2355impl From<String> for Arc<str> {
2356    /// Allocates a reference-counted `str` and copies `v` into it.
2357    ///
2358    /// # Example
2359    ///
2360    /// ```
2361    /// use portable_atomic_util::Arc;
2362    /// let unique: String = "eggplant".to_owned();
2363    /// let shared: Arc<str> = Arc::from(unique);
2364    /// assert_eq!("eggplant", &shared[..]);
2365    /// ```
2366    #[inline]
2367    fn from(v: String) -> Self {
2368        Self::from(&v[..])
2369    }
2370}
2371
2372impl<T: ?Sized> From<Box<T>> for Arc<T> {
2373    /// Move a boxed object to a new, reference-counted allocation.
2374    ///
2375    /// # Example
2376    ///
2377    /// ```
2378    /// use portable_atomic_util::Arc;
2379    /// let unique: Box<str> = Box::from("eggplant");
2380    /// let shared: Arc<str> = Arc::from(unique);
2381    /// assert_eq!("eggplant", &shared[..]);
2382    /// ```
2383    #[inline]
2384    fn from(v: Box<T>) -> Self {
2385        Self::from_box(v)
2386    }
2387}
2388
2389#[cfg(not(portable_atomic_no_alloc_layout_extras))]
2390impl<T> From<Vec<T>> for Arc<[T]> {
2391    /// Allocates a reference-counted slice and moves `v`'s items into it.
2392    ///
2393    /// # Example
2394    ///
2395    /// ```
2396    /// use portable_atomic_util::Arc;
2397    /// let unique: Vec<i32> = vec![1, 2, 3];
2398    /// let shared: Arc<[i32]> = Arc::from(unique);
2399    /// assert_eq!(&[1, 2, 3], &shared[..]);
2400    /// ```
2401    #[inline]
2402    fn from(v: Vec<T>) -> Self {
2403        unsafe {
2404            let len = v.len();
2405            let cap = v.capacity();
2406            let vec_ptr = mem::ManuallyDrop::new(v).as_mut_ptr();
2407
2408            let mut arc = Self::new_uninit_slice(len);
2409            let data = Arc::get_mut_unchecked(&mut arc);
2410            ptr::copy_nonoverlapping(vec_ptr, data.as_mut_ptr() as *mut T, len);
2411
2412            // Create a `Vec<T>` with length 0, to deallocate the buffer
2413            // without dropping its contents or the allocator
2414            let _ = Vec::from_raw_parts(vec_ptr, 0, cap);
2415
2416            arc.assume_init()
2417        }
2418    }
2419}
2420
2421#[cfg(not(portable_atomic_no_alloc_layout_extras))]
2422impl<'a, B> From<Cow<'a, B>> for Arc<B>
2423where
2424    B: ?Sized + ToOwned,
2425    Arc<B>: From<&'a B> + From<B::Owned>,
2426{
2427    /// Creates an atomically reference-counted pointer from a clone-on-write
2428    /// pointer by copying its content.
2429    ///
2430    /// # Example
2431    ///
2432    /// ```
2433    /// use portable_atomic_util::Arc;
2434    /// use std::borrow::Cow;
2435    /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
2436    /// let shared: Arc<str> = Arc::from(cow);
2437    /// assert_eq!("eggplant", &shared[..]);
2438    /// ```
2439    #[inline]
2440    fn from(cow: Cow<'a, B>) -> Self {
2441        match cow {
2442            Cow::Borrowed(s) => Self::from(s),
2443            Cow::Owned(s) => Self::from(s),
2444        }
2445    }
2446}
2447
2448#[cfg(not(portable_atomic_no_alloc_layout_extras))]
2449impl From<Arc<str>> for Arc<[u8]> {
2450    /// Converts an atomically reference-counted string slice into a byte slice.
2451    ///
2452    /// # Example
2453    ///
2454    /// ```
2455    /// use portable_atomic_util::Arc;
2456    /// let string: Arc<str> = Arc::from("eggplant");
2457    /// let bytes: Arc<[u8]> = Arc::from(string);
2458    /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
2459    /// ```
2460    #[inline]
2461    fn from(rc: Arc<str>) -> Self {
2462        // SAFETY: `str` has the same layout as `[u8]`.
2463        // https://doc.rust-lang.org/nightly/reference/type-layout.html#str-layout
2464        unsafe { Self::from_raw(Arc::into_raw(rc) as *const [u8]) }
2465    }
2466}
2467
2468#[cfg(not(portable_atomic_no_min_const_generics))]
2469items! {
2470impl<T, const N: usize> core::convert::TryFrom<Arc<[T]>> for Arc<[T; N]> {
2471    type Error = Arc<[T]>;
2472
2473    fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
2474        if boxed_slice.len() == N {
2475            let ptr = Arc::into_inner_non_null(boxed_slice);
2476            Ok(unsafe { Self::from_inner(ptr.cast::<ArcInner<[T; N]>>()) })
2477        } else {
2478            Err(boxed_slice)
2479        }
2480    }
2481}
2482}
2483
2484#[cfg(not(portable_atomic_no_alloc_layout_extras))]
2485impl<T> core::iter::FromIterator<T> for Arc<[T]> {
2486    /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
2487    ///
2488    /// # Performance characteristics
2489    ///
2490    /// ## The general case
2491    ///
2492    /// In the general case, collecting into `Arc<[T]>` is done by first
2493    /// collecting into a `Vec<T>`. That is, when writing the following:
2494    ///
2495    /// ```
2496    /// use portable_atomic_util::Arc;
2497    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
2498    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2499    /// ```
2500    ///
2501    /// this behaves as if we wrote:
2502    ///
2503    /// ```
2504    /// use portable_atomic_util::Arc;
2505    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
2506    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
2507    ///     .into(); // A second allocation for `Arc<[T]>` happens here.
2508    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2509    /// ```
2510    ///
2511    /// This will allocate as many times as needed for constructing the `Vec<T>`
2512    /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
2513    ///
2514    /// ## Iterators of known length
2515    ///
2516    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
2517    /// a single allocation will be made for the `Arc<[T]>`. For example:
2518    ///
2519    /// ```
2520    /// use portable_atomic_util::Arc;
2521    /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
2522    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
2523    /// ```
2524    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
2525        iter.into_iter().collect::<Vec<T>>().into()
2526    }
2527}
2528
2529impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
2530    fn borrow(&self) -> &T {
2531        self
2532    }
2533}
2534
2535impl<T: ?Sized> AsRef<T> for Arc<T> {
2536    fn as_ref(&self) -> &T {
2537        self
2538    }
2539}
2540
2541impl<T: ?Sized> Unpin for Arc<T> {}
2542
2543/// Gets the pointer to data within the given an `ArcInner`.
2544///
2545/// # Safety
2546///
2547/// `arc` must uphold the safety requirements for `.byte_add(data_offset)`.
2548/// This is automatically satisfied if it is a pointer to a valid `ArcInner``.
2549unsafe fn data_ptr<T: ?Sized>(arc: *mut ArcInner<T>, data: &T) -> *mut T {
2550    // SAFETY: the caller must uphold the safety contract.
2551    unsafe {
2552        let offset = data_offset::<T>(data);
2553        strict::byte_add(arc, offset) as *mut T
2554    }
2555}
2556
2557/// Gets the offset within an `ArcInner` for the payload behind a pointer.
2558fn data_offset<T: ?Sized>(ptr: &T) -> usize {
2559    // Align the unsized value to the end of the ArcInner.
2560    // Because RcBox is repr(C), it will always be the last field in memory.
2561    data_offset_align(align_of_val::<T>(ptr))
2562}
2563
2564#[inline]
2565fn data_offset_align(align: usize) -> usize {
2566    let layout = Layout::new::<ArcInner<()>>();
2567    layout.size() + padding_needed_for(layout, align)
2568}
2569
2570/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
2571/// but will deallocate it (without dropping the value) when dropped.
2572///
2573/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
2574struct UniqueArcUninit<T: ?Sized> {
2575    ptr: NonNull<ArcInner<T>>,
2576    layout_for_value: Layout,
2577}
2578
2579impl<T: ?Sized> UniqueArcUninit<T> {
2580    /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
2581    fn new(for_value: &T) -> Self {
2582        let layout = Layout::for_value(for_value);
2583        let ptr = unsafe { Arc::allocate_for_value(for_value) };
2584        Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout }
2585    }
2586
2587    /// Returns the pointer to be written into to initialize the [`Arc`].
2588    fn data_ptr(&mut self) -> *mut T {
2589        let offset = data_offset_align(self.layout_for_value.align());
2590        unsafe { strict::byte_add(self.ptr.as_ptr(), offset) as *mut T }
2591    }
2592
2593    /// Upgrade this into a normal [`Arc`].
2594    ///
2595    /// # Safety
2596    ///
2597    /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
2598    unsafe fn into_arc(self) -> Arc<T> {
2599        let this = ManuallyDrop::new(self);
2600        let ptr = this.ptr.as_ptr();
2601
2602        // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
2603        // for having initialized the data.
2604        unsafe { Arc::from_ptr(ptr) }
2605    }
2606}
2607
2608impl<T: ?Sized> Drop for UniqueArcUninit<T> {
2609    fn drop(&mut self) {
2610        // SAFETY:
2611        // * new() produced a pointer safe to deallocate.
2612        // * We own the pointer unless into_arc() was called, which forgets us.
2613        unsafe {
2614            Global.deallocate(
2615                self.ptr.cast::<u8>(),
2616                arc_inner_layout_for_value_layout(self.layout_for_value),
2617            );
2618        }
2619    }
2620}
2621
2622#[cfg(not(portable_atomic_no_error_in_core))]
2623use core::error;
2624#[cfg(all(portable_atomic_no_error_in_core, feature = "std"))]
2625use std::error;
2626#[cfg(any(not(portable_atomic_no_error_in_core), feature = "std"))]
2627impl<T: ?Sized + error::Error> error::Error for Arc<T> {
2628    #[allow(deprecated)]
2629    fn description(&self) -> &str {
2630        error::Error::description(&**self)
2631    }
2632    #[allow(deprecated)]
2633    fn cause(&self) -> Option<&dyn error::Error> {
2634        error::Error::cause(&**self)
2635    }
2636    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
2637        error::Error::source(&**self)
2638    }
2639}
2640
2641#[cfg(feature = "std")]
2642mod std_impls {
2643    use super::Arc;
2644
2645    // TODO: Other trait implementations that are stable but we currently don't provide:
2646    // - alloc::ffi
2647    //   - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-From%3C%26CStr%3E-for-Arc%3CCStr%3E
2648    //   - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-From%3CCString%3E-for-Arc%3CCStr%3E
2649    //   - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-Default-for-Arc%3CCStr%3E
2650    //   - Currently, we cannot implement these since CStr layout is not stable.
2651    // - std::ffi
2652    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3C%26OsStr%3E-for-Arc%3COsStr%3E
2653    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3COsString%3E-for-Arc%3COsStr%3E
2654    //   - Currently, we cannot implement these since OsStr layout is not stable.
2655    // - std::path
2656    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3C%26Path%3E-for-Arc%3CPath%3E
2657    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3CPathBuf%3E-for-Arc%3CPath%3E
2658    //   - Currently, we cannot implement these since Path layout is not stable.
2659
2660    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsFd-for-Arc%3CT%3E
2661    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsHandle-for-Arc%3CT%3E
2662    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsRawFd-for-Arc%3CT%3E
2663    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsSocket-for-Arc%3CT%3E
2664    // Note:
2665    // - T: ?Sized is currently only allowed on AsFd/AsHandle: https://github.com/rust-lang/rust/pull/114655#issuecomment-1977994288
2666    // - std doesn't implement AsRawHandle/AsRawSocket for Arc as of Rust 1.77.
2667    #[cfg(not(portable_atomic_no_io_safety))]
2668    #[cfg(unix)]
2669    use std::os::unix::io as fd;
2670    // - std::os::unix::io::AsRawFd and std::os::windows::io::{AsRawHandle, AsRawSocket} are available in all versions
2671    // - std::os::wasi::prelude::AsRawFd requires 1.56 (https://github.com/rust-lang/rust/commit/e555003e6d6b6d71ce5509a6b6c7a15861208d6c)
2672    // - std::os::unix::io::AsFd, std::os::wasi::prelude::AsFd, and std::os::windows::io::{AsHandle, AsSocket} require Rust 1.63
2673    // - std::os::wasi::io::AsFd requires Rust 1.65 (https://github.com/rust-lang/rust/pull/103308)
2674    // - std::os::fd requires Rust 1.66 (https://github.com/rust-lang/rust/pull/98368)
2675    // - std::os::hermit::io::AsFd requires Rust 1.69 (https://github.com/rust-lang/rust/commit/b5fb4f3d9b1b308d59cab24ef2f9bf23dad948aa)
2676    // - std::os::fd for HermitOS requires Rust 1.81 (https://github.com/rust-lang/rust/pull/126346)
2677    // - std::os::solid::io::AsFd is unstable (solid_ext, https://github.com/rust-lang/rust/pull/115159)
2678    // Note: we don't implement unstable ones.
2679    #[cfg(not(portable_atomic_no_io_safety))]
2680    #[cfg(target_os = "hermit")]
2681    use std::os::hermit::io as fd;
2682    #[cfg(not(portable_atomic_no_io_safety))]
2683    #[cfg(target_os = "wasi")]
2684    use std::os::wasi::prelude as fd;
2685    /// This impl allows implementing traits that require `AsRawFd` on Arc.
2686    /// ```
2687    /// # #[cfg(target_os = "hermit")]
2688    /// # use std::os::hermit::io::AsRawFd;
2689    /// # #[cfg(target_os = "wasi")]
2690    /// # use std::os::wasi::prelude::AsRawFd;
2691    /// # #[cfg(unix)]
2692    /// # use std::os::unix::io::AsRawFd;
2693    /// use portable_atomic_util::Arc;
2694    /// use std::net::UdpSocket;
2695    ///
2696    /// trait MyTrait: AsRawFd {}
2697    /// impl MyTrait for Arc<UdpSocket> {}
2698    /// ```
2699    // AsRawFd has been stable before io_safety, but this impl was added after io_safety: https://github.com/rust-lang/rust/pull/97437
2700    #[cfg(not(portable_atomic_no_io_safety))]
2701    #[cfg(any(unix, target_os = "hermit", target_os = "wasi"))]
2702    impl<T: fd::AsRawFd> fd::AsRawFd for Arc<T> {
2703        #[inline]
2704        fn as_raw_fd(&self) -> fd::RawFd {
2705            (**self).as_raw_fd()
2706        }
2707    }
2708    /// This impl allows implementing traits that require `AsFd` on Arc.
2709    /// ```
2710    /// # #[cfg(target_os = "hermit")]
2711    /// # use std::os::hermit::io::AsFd;
2712    /// # #[cfg(target_os = "wasi")]
2713    /// # use std::os::wasi::prelude::AsFd;
2714    /// # #[cfg(unix)]
2715    /// # use std::os::unix::io::AsFd;
2716    /// use portable_atomic_util::Arc;
2717    /// use std::net::UdpSocket;
2718    ///
2719    /// trait MyTrait: AsFd {}
2720    /// impl MyTrait for Arc<UdpSocket> {}
2721    /// ```
2722    #[cfg(not(portable_atomic_no_io_safety))]
2723    #[cfg(any(unix, target_os = "hermit", target_os = "wasi"))]
2724    impl<T: ?Sized + fd::AsFd> fd::AsFd for Arc<T> {
2725        #[inline]
2726        fn as_fd(&self) -> fd::BorrowedFd<'_> {
2727            (**self).as_fd()
2728        }
2729    }
2730    /// This impl allows implementing traits that require `AsHandle` on Arc.
2731    /// ```
2732    /// # use std::os::windows::io::AsHandle;
2733    /// use portable_atomic_util::Arc;
2734    /// use std::fs::File;
2735    ///
2736    /// trait MyTrait: AsHandle {}
2737    /// impl MyTrait for Arc<File> {}
2738    /// ```
2739    #[cfg(not(portable_atomic_no_io_safety))]
2740    #[cfg(windows)]
2741    impl<T: ?Sized + std::os::windows::io::AsHandle> std::os::windows::io::AsHandle for Arc<T> {
2742        #[inline]
2743        fn as_handle(&self) -> std::os::windows::io::BorrowedHandle<'_> {
2744            (**self).as_handle()
2745        }
2746    }
2747    /// This impl allows implementing traits that require `AsSocket` on Arc.
2748    /// ```
2749    /// # use std::os::windows::io::AsSocket;
2750    /// use portable_atomic_util::Arc;
2751    /// use std::net::UdpSocket;
2752    ///
2753    /// trait MyTrait: AsSocket {}
2754    /// impl MyTrait for Arc<UdpSocket> {}
2755    /// ```
2756    #[cfg(not(portable_atomic_no_io_safety))]
2757    #[cfg(windows)]
2758    impl<T: std::os::windows::io::AsSocket> std::os::windows::io::AsSocket for Arc<T> {
2759        #[inline]
2760        fn as_socket(&self) -> std::os::windows::io::BorrowedSocket<'_> {
2761            (**self).as_socket()
2762        }
2763    }
2764
2765    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-Read-for-Arc%3CFile%3E
2766    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-Seek-for-Arc%3CFile%3E
2767    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-Write-for-Arc%3CFile%3E
2768    // Note: From discussions in https://github.com/rust-lang/rust/pull/94748 and relevant,
2769    // TcpStream and UnixStream will likely have similar implementations in the future.
2770    impl std::io::Read for Arc<std::fs::File> {
2771        fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
2772            (&**self).read(buf)
2773        }
2774        #[cfg(not(portable_atomic_no_io_vec))]
2775        fn read_vectored(
2776            &mut self,
2777            bufs: &mut [std::io::IoSliceMut<'_>],
2778        ) -> std::io::Result<usize> {
2779            (&**self).read_vectored(bufs)
2780        }
2781        // fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
2782        //     (&**self).read_buf(cursor)
2783        // }
2784        // #[inline]
2785        // fn is_read_vectored(&self) -> bool {
2786        //     (&**self).is_read_vectored()
2787        // }
2788        fn read_to_end(&mut self, buf: &mut alloc::vec::Vec<u8>) -> std::io::Result<usize> {
2789            (&**self).read_to_end(buf)
2790        }
2791        fn read_to_string(&mut self, buf: &mut alloc::string::String) -> std::io::Result<usize> {
2792            (&**self).read_to_string(buf)
2793        }
2794    }
2795    impl std::io::Write for Arc<std::fs::File> {
2796        fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
2797            (&**self).write(buf)
2798        }
2799        #[cfg(not(portable_atomic_no_io_vec))]
2800        fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result<usize> {
2801            (&**self).write_vectored(bufs)
2802        }
2803        // #[inline]
2804        // fn is_write_vectored(&self) -> bool {
2805        //     (&**self).is_write_vectored()
2806        // }
2807        #[inline]
2808        fn flush(&mut self) -> std::io::Result<()> {
2809            (&**self).flush()
2810        }
2811    }
2812    impl std::io::Seek for Arc<std::fs::File> {
2813        fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
2814            (&**self).seek(pos)
2815        }
2816    }
2817}
2818
2819use self::clone::CloneToUninit;
2820mod clone {
2821    use core::ptr;
2822    #[cfg(not(portable_atomic_no_maybe_uninit))]
2823    use core::{
2824        mem::{self, MaybeUninit},
2825        slice,
2826    };
2827
2828    // Based on unstable core::clone::CloneToUninit.
2829    // This trait is private and cannot be implemented for types outside of `portable-atomic-util`.
2830    #[doc(hidden)] // private API
2831    pub unsafe trait CloneToUninit {
2832        unsafe fn clone_to_uninit(&self, dst: *mut Self);
2833    }
2834    unsafe impl<T: Clone> CloneToUninit for T {
2835        #[inline]
2836        unsafe fn clone_to_uninit(&self, dst: *mut Self) {
2837            // SAFETY: we're calling a specialization with the same contract
2838            unsafe { clone_one(self, dst) }
2839        }
2840    }
2841    #[cfg(not(portable_atomic_no_maybe_uninit))]
2842    unsafe impl<T: Clone> CloneToUninit for [T] {
2843        #[inline]
2844        #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
2845        unsafe fn clone_to_uninit(&self, dst: *mut Self) {
2846            // SAFETY: we're calling a specialization with the same contract
2847            unsafe { clone_slice(self, dst) }
2848        }
2849    }
2850    #[cfg(not(portable_atomic_no_maybe_uninit))]
2851    unsafe impl CloneToUninit for str {
2852        #[inline]
2853        #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
2854        unsafe fn clone_to_uninit(&self, dst: *mut Self) {
2855            // SAFETY: str is just a [u8] with UTF-8 invariant
2856            unsafe { self.as_bytes().clone_to_uninit(dst as *mut [u8]) }
2857        }
2858    }
2859    // Note: Currently, we cannot implement this for CStr/OsStr/Path since theirs layout is not stable.
2860
2861    #[inline]
2862    unsafe fn clone_one<T: Clone>(src: &T, dst: *mut T) {
2863        // SAFETY: The safety conditions of clone_to_uninit() are a superset of those of
2864        // ptr::write().
2865        unsafe {
2866            // We hope the optimizer will figure out to create the cloned value in-place,
2867            // skipping ever storing it on the stack and the copy to the destination.
2868            ptr::write(dst, src.clone());
2869        }
2870    }
2871    #[cfg(not(portable_atomic_no_maybe_uninit))]
2872    #[inline]
2873    #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
2874    unsafe fn clone_slice<T: Clone>(src: &[T], dst: *mut [T]) {
2875        let len = src.len();
2876
2877        // SAFETY: The produced `&mut` is valid because:
2878        // * The caller is obligated to provide a pointer which is valid for writes.
2879        // * All bytes pointed to are in MaybeUninit, so we don't care about the memory's
2880        //   initialization status.
2881        let uninit_ref = unsafe { &mut *(dst as *mut [MaybeUninit<T>]) };
2882
2883        // This is the most likely mistake to make, so check it as a debug assertion.
2884        debug_assert_eq!(
2885            len,
2886            uninit_ref.len(),
2887            "clone_to_uninit() source and destination must have equal lengths",
2888        );
2889
2890        // Copy the elements
2891        let mut initializing = InitializingSlice::from_fully_uninit(uninit_ref);
2892        for element_ref in src {
2893            // If the clone() panics, `initializing` will take care of the cleanup.
2894            initializing.push(element_ref.clone());
2895        }
2896        // If we reach here, then the entire slice is initialized, and we've satisfied our
2897        // responsibilities to the caller. Disarm the cleanup guard by forgetting it.
2898        mem::forget(initializing);
2899    }
2900
2901    /// Ownership of a collection of values stored in a non-owned `[MaybeUninit<T>]`, some of which
2902    /// are not yet initialized. This is sort of like a `Vec` that doesn't own its allocation.
2903    /// Its responsibility is to provide cleanup on unwind by dropping the values that *are*
2904    /// initialized, unless disarmed by forgetting.
2905    ///
2906    /// This is a helper for `impl<T: Clone> CloneToUninit for [T]`.
2907    #[cfg(not(portable_atomic_no_maybe_uninit))]
2908    struct InitializingSlice<'a, T> {
2909        data: &'a mut [MaybeUninit<T>],
2910        /// Number of elements of `*self.data` that are initialized.
2911        initialized_len: usize,
2912    }
2913    #[cfg(not(portable_atomic_no_maybe_uninit))]
2914    impl<'a, T> InitializingSlice<'a, T> {
2915        #[inline]
2916        fn from_fully_uninit(data: &'a mut [MaybeUninit<T>]) -> Self {
2917            Self { data, initialized_len: 0 }
2918        }
2919        /// Push a value onto the end of the initialized part of the slice.
2920        ///
2921        /// # Panics
2922        ///
2923        /// Panics if the slice is already fully initialized.
2924        #[inline]
2925        fn push(&mut self, value: T) {
2926            self.data[self.initialized_len] = MaybeUninit::new(value);
2927            self.initialized_len += 1;
2928        }
2929    }
2930    #[cfg(not(portable_atomic_no_maybe_uninit))]
2931    impl<T> Drop for InitializingSlice<'_, T> {
2932        #[cold] // will only be invoked on unwind
2933        fn drop(&mut self) {
2934            let initialized_slice = unsafe {
2935                slice::from_raw_parts_mut(self.data.as_mut_ptr() as *mut T, self.initialized_len)
2936            };
2937            // SAFETY:
2938            // * the pointer is valid because it was made from a mutable reference
2939            // * `initialized_len` counts the initialized elements as an invariant of this type,
2940            //   so each of the pointed-to elements is initialized and may be dropped.
2941            unsafe {
2942                ptr::drop_in_place::<[T]>(initialized_slice);
2943            }
2944        }
2945    }
2946}
2947
2948// Based on unstable Layout::padding_needed_for.
2949#[must_use]
2950#[inline]
2951fn padding_needed_for(layout: Layout, align: usize) -> usize {
2952    let len = layout.size();
2953
2954    // Rounded up value is:
2955    //   len_rounded_up = (len + align - 1) & !(align - 1);
2956    // and then we return the padding difference: `len_rounded_up - len`.
2957    //
2958    // We use modular arithmetic throughout:
2959    //
2960    // 1. align is guaranteed to be > 0, so align - 1 is always
2961    //    valid.
2962    //
2963    // 2. `len + align - 1` can overflow by at most `align - 1`,
2964    //    so the &-mask with `!(align - 1)` will ensure that in the
2965    //    case of overflow, `len_rounded_up` will itself be 0.
2966    //    Thus the returned padding, when added to `len`, yields 0,
2967    //    which trivially satisfies the alignment `align`.
2968    //
2969    // (Of course, attempts to allocate blocks of memory whose
2970    // size and padding overflow in the above manner should cause
2971    // the allocator to yield an error anyway.)
2972
2973    let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
2974    len_rounded_up.wrapping_sub(len)
2975}
2976
2977// Based on Layout::pad_to_align stabilized in Rust 1.44.
2978#[must_use]
2979#[inline]
2980fn pad_to_align(layout: Layout) -> Layout {
2981    let pad = padding_needed_for(layout, layout.align());
2982    // This cannot overflow. Quoting from the invariant of Layout:
2983    // > `size`, when rounded up to the nearest multiple of `align`,
2984    // > must not overflow isize (i.e., the rounded value must be
2985    // > less than or equal to `isize::MAX`)
2986    let new_size = layout.size() + pad;
2987
2988    // SAFETY: padded size is guaranteed to not exceed `isize::MAX`.
2989    unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) }
2990}
2991
2992// Based on Layout::extend stabilized in Rust 1.44.
2993#[inline]
2994fn extend_layout(layout: Layout, next: Layout) -> Option<(Layout, usize)> {
2995    let new_align = cmp::max(layout.align(), next.align());
2996    let pad = padding_needed_for(layout, next.align());
2997
2998    let offset = layout.size().checked_add(pad)?;
2999    let new_size = offset.checked_add(next.size())?;
3000
3001    // The safe constructor is called here to enforce the isize size limit.
3002    let layout = Layout::from_size_align(new_size, new_align).ok()?;
3003    Some((layout, offset))
3004}
3005
3006#[cfg(feature = "std")]
3007use std::process::abort;
3008#[cfg(not(feature = "std"))]
3009#[cold]
3010fn abort() -> ! {
3011    struct Abort;
3012    impl Drop for Abort {
3013        fn drop(&mut self) {
3014            panic!();
3015        }
3016    }
3017
3018    let _abort = Abort;
3019    panic!("abort")
3020}
3021
3022fn is_dangling<T: ?Sized>(ptr: *const T) -> bool {
3023    ptr as *const () as usize == usize::MAX
3024}
3025
3026// Based on unstable alloc::alloc::Global.
3027//
3028// Note: unlike alloc::alloc::Global that returns NonNull<[u8]>,
3029// this returns NonNull<u8>.
3030struct Global;
3031#[allow(clippy::unused_self)]
3032impl Global {
3033    #[inline]
3034    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3035    fn allocate(self, layout: Layout) -> Option<NonNull<u8>> {
3036        // Layout::dangling is unstable
3037        #[must_use]
3038        #[inline]
3039        fn dangling(layout: Layout) -> NonNull<u8> {
3040            // SAFETY: align is guaranteed to be non-zero
3041            unsafe { NonNull::new_unchecked(strict::without_provenance_mut::<u8>(layout.align())) }
3042        }
3043
3044        match layout.size() {
3045            0 => Some(dangling(layout)),
3046            // SAFETY: `layout` is non-zero in size,
3047            _size => unsafe {
3048                let raw_ptr = alloc::alloc::alloc(layout);
3049                NonNull::new(raw_ptr)
3050            },
3051        }
3052    }
3053    #[inline]
3054    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3055    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
3056        if layout.size() != 0 {
3057            // SAFETY: `layout` is non-zero in size,
3058            // other conditions must be upheld by the caller
3059            unsafe { alloc::alloc::dealloc(ptr.as_ptr(), layout) }
3060        }
3061    }
3062}
3063
3064// TODO: use stabilized core::ptr strict_provenance helpers https://github.com/rust-lang/rust/pull/130350
3065mod strict {
3066    #[inline(always)]
3067    #[must_use]
3068    pub(super) const fn without_provenance_mut<T>(addr: usize) -> *mut T {
3069        // An int-to-pointer transmute currently has exactly the intended semantics: it creates a
3070        // pointer without provenance. Note that this is *not* a stable guarantee about transmute
3071        // semantics, it relies on sysroot crates having special status.
3072        // SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that
3073        // pointer).
3074        #[cfg(miri)]
3075        unsafe {
3076            core::mem::transmute(addr)
3077        }
3078        // const transmute requires Rust 1.56.
3079        #[cfg(not(miri))]
3080        {
3081            addr as *mut T
3082        }
3083    }
3084
3085    /// Creates a new pointer with the metadata of `other`.
3086    #[inline]
3087    #[must_use]
3088    pub(super) fn with_metadata_of<T, U: ?Sized>(this: *mut T, mut other: *mut U) -> *mut U {
3089        let target = &mut other as *mut *mut U as *mut *mut u8;
3090
3091        // SAFETY: In case of a thin pointer, this operations is identical
3092        // to a simple assignment. In case of a fat pointer, with the current
3093        // fat pointer layout implementation, the first field of such a
3094        // pointer is always the data pointer, which is likewise assigned.
3095        unsafe { *target = this as *mut u8 };
3096        other
3097    }
3098
3099    // Based on <pointer>::byte_add stabilized in Rust 1.75.
3100    #[inline]
3101    #[must_use]
3102    pub(super) unsafe fn byte_add<T: ?Sized>(ptr: *mut T, count: usize) -> *mut T {
3103        // SAFETY: the caller must uphold the safety contract for `add`.
3104        unsafe { with_metadata_of((ptr as *mut u8).add(count), ptr) }
3105    }
3106
3107    // Based on <pointer>::byte_sub stabilized in Rust 1.75.
3108    #[inline]
3109    #[must_use]
3110    pub(super) unsafe fn byte_sub<T: ?Sized>(ptr: *mut T, count: usize) -> *mut T {
3111        // SAFETY: the caller must uphold the safety contract for `sub`.
3112        unsafe { with_metadata_of((ptr as *mut u8).sub(count), ptr) }
3113    }
3114}