core/sync/atomic.rs
1//! Atomic types
2//!
3//! Atomic types provide primitive shared-memory communication between
4//! threads, and are the building blocks of other concurrent
5//! types.
6//!
7//! This module defines atomic versions of a select number of primitive
8//! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9//! [`AtomicI8`], [`AtomicU16`], etc.
10//! Atomic types present operations that, when used correctly, synchronize
11//! updates between threads.
12//!
13//! Atomic variables are safe to share between threads (they implement [`Sync`])
14//! but they do not themselves provide the mechanism for sharing and follow the
15//! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
16//! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
17//! atomically-reference-counted shared pointer).
18//!
19//! [arc]: ../../../std/sync/struct.Arc.html
20//!
21//! Atomic types may be stored in static variables, initialized using
22//! the constant initializers like [`AtomicBool::new`]. Atomic statics
23//! are often used for lazy global initialization.
24//!
25//! ## Memory model for atomic accesses
26//!
27//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically the rules
28//! from the [`intro.races`][cpp-intro.races] section, without the "consume" memory ordering. Since
29//! C++ uses an object-based memory model whereas Rust is access-based, a bit of translation work
30//! has to be done to apply the C++ rules to Rust: whenever C++ talks about "the value of an
31//! object", we understand that to mean the resulting bytes obtained when doing a read. When the C++
32//! standard talks about "the value of an atomic object", this refers to the result of doing an
33//! atomic load (via the operations provided in this module). A "modification of an atomic object"
34//! refers to an atomic store.
35//!
36//! The end result is *almost* equivalent to saying that creating a *shared reference* to one of the
37//! Rust atomic types corresponds to creating an `atomic_ref` in C++, with the `atomic_ref` being
38//! destroyed when the lifetime of the shared reference ends. The main difference is that Rust
39//! permits concurrent atomic and non-atomic reads to the same memory as those cause no issue in the
40//! C++ memory model, they are just forbidden in C++ because memory is partitioned into "atomic
41//! objects" and "non-atomic objects" (with `atomic_ref` temporarily converting a non-atomic object
42//! into an atomic object).
43//!
44//! The most important aspect of this model is that *data races* are undefined behavior. A data race
45//! is defined as conflicting non-synchronized accesses where at least one of the accesses is
46//! non-atomic. Here, accesses are *conflicting* if they affect overlapping regions of memory and at
47//! least one of them is a write. (A `compare_exchange` or `compare_exchange_weak` that does not
48//! succeed is not considered a write.) They are *non-synchronized* if neither of them
49//! *happens-before* the other, according to the happens-before order of the memory model.
50//!
51//! The other possible cause of undefined behavior in the memory model are mixed-size accesses: Rust
52//! inherits the C++ limitation that non-synchronized conflicting atomic accesses may not partially
53//! overlap. In other words, every pair of non-synchronized atomic accesses must be either disjoint,
54//! access the exact same memory (including using the same access size), or both be reads.
55//!
56//! Each atomic access takes an [`Ordering`] which defines how the operation interacts with the
57//! happens-before order. These orderings behave the same as the corresponding [C++20 atomic
58//! orderings][cpp_memory_order]. For more information, see the [nomicon].
59//!
60//! [cpp]: https://en.cppreference.com/w/cpp/atomic
61//! [cpp-intro.races]: https://timsong-cpp.github.io/cppwp/n4868/intro.multithread#intro.races
62//! [cpp_memory_order]: https://en.cppreference.com/w/cpp/atomic/memory_order
63//! [nomicon]: ../../../nomicon/atomics.html
64//!
65//! ```rust,no_run undefined_behavior
66//! use std::sync::atomic::{AtomicU16, AtomicU8, Ordering};
67//! use std::mem::transmute;
68//! use std::thread;
69//!
70//! let atomic = AtomicU16::new(0);
71//!
72//! thread::scope(|s| {
73//! // This is UB: conflicting non-synchronized accesses, at least one of which is non-atomic.
74//! s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store
75//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write
76//! });
77//!
78//! thread::scope(|s| {
79//! // This is fine: the accesses do not conflict (as none of them performs any modification).
80//! // In C++ this would be disallowed since creating an `atomic_ref` precludes
81//! // further non-atomic accesses, but Rust does not have that limitation.
82//! s.spawn(|| atomic.load(Ordering::Relaxed)); // atomic load
83//! s.spawn(|| unsafe { atomic.as_ptr().read() }); // non-atomic read
84//! });
85//!
86//! thread::scope(|s| {
87//! // This is fine: `join` synchronizes the code in a way such that the atomic
88//! // store happens-before the non-atomic write.
89//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store
90//! handle.join().expect("thread won't panic"); // synchronize
91//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write
92//! });
93//!
94//! thread::scope(|s| {
95//! // This is UB: non-synchronized conflicting differently-sized atomic accesses.
96//! s.spawn(|| atomic.store(1, Ordering::Relaxed));
97//! s.spawn(|| unsafe {
98//! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic);
99//! differently_sized.store(2, Ordering::Relaxed);
100//! });
101//! });
102//!
103//! thread::scope(|s| {
104//! // This is fine: `join` synchronizes the code in a way such that
105//! // the 1-byte store happens-before the 2-byte store.
106//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed));
107//! handle.join().expect("thread won't panic");
108//! s.spawn(|| unsafe {
109//! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic);
110//! differently_sized.store(2, Ordering::Relaxed);
111//! });
112//! });
113//! ```
114//!
115//! # Portability
116//!
117//! All atomic types in this module are guaranteed to be [lock-free] if they're
118//! available. This means they don't internally acquire a global mutex. Atomic
119//! types and operations are not guaranteed to be wait-free. This means that
120//! operations like `fetch_or` may be implemented with a compare-and-swap loop.
121//!
122//! Atomic operations may be implemented at the instruction layer with
123//! larger-size atomics. For example some platforms use 4-byte atomic
124//! instructions to implement `AtomicI8`. Note that this emulation should not
125//! have an impact on correctness of code, it's just something to be aware of.
126//!
127//! The atomic types in this module might not be available on all platforms. The
128//! atomic types here are all widely available, however, and can generally be
129//! relied upon existing. Some notable exceptions are:
130//!
131//! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
132//! `AtomicI64` types.
133//! * ARM platforms like `armv5te` that aren't for Linux only provide `load`
134//! and `store` operations, and do not support Compare and Swap (CAS)
135//! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux,
136//! these CAS operations are implemented via [operating system support], which
137//! may come with a performance penalty.
138//! * ARM targets with `thumbv6m` only provide `load` and `store` operations,
139//! and do not support Compare and Swap (CAS) operations, such as `swap`,
140//! `fetch_add`, etc.
141//!
142//! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
143//!
144//! Note that future platforms may be added that also do not have support for
145//! some atomic operations. Maximally portable code will want to be careful
146//! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
147//! generally the most portable, but even then they're not available everywhere.
148//! For reference, the `std` library requires `AtomicBool`s and pointer-sized atomics, although
149//! `core` does not.
150//!
151//! The `#[cfg(target_has_atomic)]` attribute can be used to conditionally
152//! compile based on the target's supported bit widths. It is a key-value
153//! option set for each supported size, with values "8", "16", "32", "64",
154//! "128", and "ptr" for pointer-sized atomics.
155//!
156//! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
157//!
158//! # Atomic accesses to read-only memory
159//!
160//! In general, *all* atomic accesses on read-only memory are undefined behavior. For instance, attempting
161//! to do a `compare_exchange` that will definitely fail (making it conceptually a read-only
162//! operation) can still cause a segmentation fault if the underlying memory page is mapped read-only. Since
163//! atomic `load`s might be implemented using compare-exchange operations, even a `load` can fault
164//! on read-only memory.
165//!
166//! For the purpose of this section, "read-only memory" is defined as memory that is read-only in
167//! the underlying target, i.e., the pages are mapped with a read-only flag and any attempt to write
168//! will cause a page fault. In particular, an `&u128` reference that points to memory that is
169//! read-write mapped is *not* considered to point to "read-only memory". In Rust, almost all memory
170//! is read-write; the only exceptions are memory created by `const` items or `static` items without
171//! interior mutability, and memory that was specifically marked as read-only by the operating
172//! system via platform-specific APIs.
173//!
174//! As an exception from the general rule stated above, "sufficiently small" atomic loads with
175//! `Ordering::Relaxed` are implemented in a way that works on read-only memory, and are hence not
176//! undefined behavior. The exact size limit for what makes a load "sufficiently small" varies
177//! depending on the target:
178//!
179//! | `target_arch` | Size limit |
180//! |---------------|---------|
181//! | `x86`, `arm`, `loongarch32`, `mips`, `mips32r6`, `powerpc`, `riscv32`, `sparc`, `hexagon` | 4 bytes |
182//! | `x86_64`, `aarch64`, `loongarch64`, `mips64`, `mips64r6`, `powerpc64`, `riscv64`, `sparc64`, `s390x` | 8 bytes |
183//!
184//! Atomics loads that are larger than this limit as well as atomic loads with ordering other
185//! than `Relaxed`, as well as *all* atomic loads on targets not listed in the table, might still be
186//! read-only under certain conditions, but that is not a stable guarantee and should not be relied
187//! upon.
188//!
189//! If you need to do an acquire load on read-only memory, you can do a relaxed load followed by an
190//! acquire fence instead.
191//!
192//! # Examples
193//!
194//! A simple spinlock:
195//!
196//! ```ignore-wasm
197//! use std::sync::Arc;
198//! use std::sync::atomic::{AtomicUsize, Ordering};
199//! use std::{hint, thread};
200//!
201//! fn main() {
202//! let spinlock = Arc::new(AtomicUsize::new(1));
203//!
204//! let spinlock_clone = Arc::clone(&spinlock);
205//!
206//! let thread = thread::spawn(move || {
207//! spinlock_clone.store(0, Ordering::Release);
208//! });
209//!
210//! // Wait for the other thread to release the lock
211//! while spinlock.load(Ordering::Acquire) != 0 {
212//! hint::spin_loop();
213//! }
214//!
215//! if let Err(panic) = thread.join() {
216//! println!("Thread had an error: {panic:?}");
217//! }
218//! }
219//! ```
220//!
221//! Keep a global count of live threads:
222//!
223//! ```
224//! use std::sync::atomic::{AtomicUsize, Ordering};
225//!
226//! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
227//!
228//! // Note that Relaxed ordering doesn't synchronize anything
229//! // except the global thread counter itself.
230//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::Relaxed);
231//! // Note that this number may not be true at the moment of printing
232//! // because some other thread may have changed static value already.
233//! println!("live threads: {}", old_thread_count + 1);
234//! ```
235
236#![stable(feature = "rust1", since = "1.0.0")]
237#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
238#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
239#![rustc_diagnostic_item = "atomic_mod"]
240// Clippy complains about the pattern of "safe function calling unsafe function taking pointers".
241// This happens with AtomicPtr intrinsics but is fine, as the pointers clippy is concerned about
242// are just normal values that get loaded/stored, but not dereferenced.
243#![allow(clippy::not_unsafe_ptr_arg_deref)]
244
245use self::Ordering::*;
246use crate::cell::UnsafeCell;
247use crate::hint::spin_loop;
248use crate::intrinsics::AtomicOrdering as AO;
249use crate::{fmt, intrinsics};
250
251trait Sealed {}
252
253/// A marker trait for primitive types which can be modified atomically.
254///
255/// This is an implementation detail for <code>[Atomic]\<T></code> which may disappear or be replaced at any time.
256///
257/// # Safety
258///
259/// Types implementing this trait must be primitives that can be modified atomically.
260///
261/// The associated `Self::AtomicInner` type must have the same size and bit validity as `Self`,
262/// but may have a higher alignment requirement, so the following `transmute`s are sound:
263///
264/// - `&mut Self::AtomicInner` as `&mut Self`
265/// - `Self` as `Self::AtomicInner` or the reverse
266#[unstable(
267 feature = "atomic_internals",
268 reason = "implementation detail which may disappear or be replaced at any time",
269 issue = "none"
270)]
271#[expect(private_bounds)]
272pub unsafe trait AtomicPrimitive: Sized + Copy + Sealed {
273 /// Temporary implementation detail.
274 type AtomicInner: Sized;
275}
276
277macro impl_atomic_primitive(
278 $Atom:ident $(<$T:ident>)? ($Primitive:ty),
279 size($size:literal),
280 align($align:literal) $(,)?
281) {
282 impl $(<$T>)? Sealed for $Primitive {}
283
284 #[unstable(
285 feature = "atomic_internals",
286 reason = "implementation detail which may disappear or be replaced at any time",
287 issue = "none"
288 )]
289 #[cfg(target_has_atomic_load_store = $size)]
290 unsafe impl $(<$T>)? AtomicPrimitive for $Primitive {
291 type AtomicInner = $Atom $(<$T>)?;
292 }
293}
294
295impl_atomic_primitive!(AtomicBool(bool), size("8"), align(1));
296impl_atomic_primitive!(AtomicI8(i8), size("8"), align(1));
297impl_atomic_primitive!(AtomicU8(u8), size("8"), align(1));
298impl_atomic_primitive!(AtomicI16(i16), size("16"), align(2));
299impl_atomic_primitive!(AtomicU16(u16), size("16"), align(2));
300impl_atomic_primitive!(AtomicI32(i32), size("32"), align(4));
301impl_atomic_primitive!(AtomicU32(u32), size("32"), align(4));
302impl_atomic_primitive!(AtomicI64(i64), size("64"), align(8));
303impl_atomic_primitive!(AtomicU64(u64), size("64"), align(8));
304impl_atomic_primitive!(AtomicI128(i128), size("128"), align(16));
305impl_atomic_primitive!(AtomicU128(u128), size("128"), align(16));
306
307#[cfg(target_pointer_width = "16")]
308impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(2));
309#[cfg(target_pointer_width = "32")]
310impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(4));
311#[cfg(target_pointer_width = "64")]
312impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(8));
313
314#[cfg(target_pointer_width = "16")]
315impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(2));
316#[cfg(target_pointer_width = "32")]
317impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(4));
318#[cfg(target_pointer_width = "64")]
319impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(8));
320
321#[cfg(target_pointer_width = "16")]
322impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(2));
323#[cfg(target_pointer_width = "32")]
324impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(4));
325#[cfg(target_pointer_width = "64")]
326impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(8));
327
328/// A memory location which can be safely modified from multiple threads.
329///
330/// This has the same size and bit validity as the underlying type `T`. However,
331/// the alignment of this type is always equal to its size, even on targets where
332/// `T` has alignment less than its size.
333///
334/// For more about the differences between atomic types and non-atomic types as
335/// well as information about the portability of this type, please see the
336/// [module-level documentation].
337///
338/// **Note:** This type is only available on platforms that support atomic loads
339/// and stores of `T`.
340///
341/// [module-level documentation]: crate::sync::atomic
342#[unstable(feature = "generic_atomic", issue = "130539")]
343pub type Atomic<T> = <T as AtomicPrimitive>::AtomicInner;
344
345// Some architectures don't have byte-sized atomics, which results in LLVM
346// emulating them using a LL/SC loop. However for AtomicBool we can take
347// advantage of the fact that it only ever contains 0 or 1 and use atomic OR/AND
348// instead, which LLVM can emulate using a larger atomic OR/AND operation.
349//
350// This list should only contain architectures which have word-sized atomic-or/
351// atomic-and instructions but don't natively support byte-sized atomics.
352#[cfg(target_has_atomic = "8")]
353const EMULATE_ATOMIC_BOOL: bool = cfg!(any(
354 target_arch = "riscv32",
355 target_arch = "riscv64",
356 target_arch = "loongarch32",
357 target_arch = "loongarch64"
358));
359
360/// A boolean type which can be safely shared between threads.
361///
362/// This type has the same size, alignment, and bit validity as a [`bool`].
363///
364/// **Note**: This type is only available on platforms that support atomic
365/// loads and stores of `u8`.
366#[cfg(target_has_atomic_load_store = "8")]
367#[stable(feature = "rust1", since = "1.0.0")]
368#[rustc_diagnostic_item = "AtomicBool"]
369#[repr(C, align(1))]
370pub struct AtomicBool {
371 v: UnsafeCell<u8>,
372}
373
374#[cfg(target_has_atomic_load_store = "8")]
375#[stable(feature = "rust1", since = "1.0.0")]
376impl Default for AtomicBool {
377 /// Creates an `AtomicBool` initialized to `false`.
378 #[inline]
379 fn default() -> Self {
380 Self::new(false)
381 }
382}
383
384// Send is implicitly implemented for AtomicBool.
385#[cfg(target_has_atomic_load_store = "8")]
386#[stable(feature = "rust1", since = "1.0.0")]
387unsafe impl Sync for AtomicBool {}
388
389/// A raw pointer type which can be safely shared between threads.
390///
391/// This type has the same size and bit validity as a `*mut T`.
392///
393/// **Note**: This type is only available on platforms that support atomic
394/// loads and stores of pointers. Its size depends on the target pointer's size.
395#[cfg(target_has_atomic_load_store = "ptr")]
396#[stable(feature = "rust1", since = "1.0.0")]
397#[rustc_diagnostic_item = "AtomicPtr"]
398#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
399#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
400#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
401pub struct AtomicPtr<T> {
402 p: UnsafeCell<*mut T>,
403}
404
405#[cfg(target_has_atomic_load_store = "ptr")]
406#[stable(feature = "rust1", since = "1.0.0")]
407impl<T> Default for AtomicPtr<T> {
408 /// Creates a null `AtomicPtr<T>`.
409 fn default() -> AtomicPtr<T> {
410 AtomicPtr::new(crate::ptr::null_mut())
411 }
412}
413
414#[cfg(target_has_atomic_load_store = "ptr")]
415#[stable(feature = "rust1", since = "1.0.0")]
416unsafe impl<T> Send for AtomicPtr<T> {}
417#[cfg(target_has_atomic_load_store = "ptr")]
418#[stable(feature = "rust1", since = "1.0.0")]
419unsafe impl<T> Sync for AtomicPtr<T> {}
420
421/// Atomic memory orderings
422///
423/// Memory orderings specify the way atomic operations synchronize memory.
424/// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
425/// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
426/// operations synchronize other memory while additionally preserving a total order of such
427/// operations across all threads.
428///
429/// Rust's memory orderings are [the same as those of
430/// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
431///
432/// For more information see the [nomicon].
433///
434/// [nomicon]: ../../../nomicon/atomics.html
435#[stable(feature = "rust1", since = "1.0.0")]
436#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
437#[non_exhaustive]
438#[rustc_diagnostic_item = "Ordering"]
439pub enum Ordering {
440 /// No ordering constraints, only atomic operations.
441 ///
442 /// Corresponds to [`memory_order_relaxed`] in C++20.
443 ///
444 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
445 #[stable(feature = "rust1", since = "1.0.0")]
446 Relaxed,
447 /// When coupled with a store, all previous operations become ordered
448 /// before any load of this value with [`Acquire`] (or stronger) ordering.
449 /// In particular, all previous writes become visible to all threads
450 /// that perform an [`Acquire`] (or stronger) load of this value.
451 ///
452 /// Notice that using this ordering for an operation that combines loads
453 /// and stores leads to a [`Relaxed`] load operation!
454 ///
455 /// This ordering is only applicable for operations that can perform a store.
456 ///
457 /// Corresponds to [`memory_order_release`] in C++20.
458 ///
459 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
460 #[stable(feature = "rust1", since = "1.0.0")]
461 Release,
462 /// When coupled with a load, if the loaded value was written by a store operation with
463 /// [`Release`] (or stronger) ordering, then all subsequent operations
464 /// become ordered after that store. In particular, all subsequent loads will see data
465 /// written before the store.
466 ///
467 /// Notice that using this ordering for an operation that combines loads
468 /// and stores leads to a [`Relaxed`] store operation!
469 ///
470 /// This ordering is only applicable for operations that can perform a load.
471 ///
472 /// Corresponds to [`memory_order_acquire`] in C++20.
473 ///
474 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
475 #[stable(feature = "rust1", since = "1.0.0")]
476 Acquire,
477 /// Has the effects of both [`Acquire`] and [`Release`] together:
478 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
479 ///
480 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
481 /// not performing any store and hence it has just [`Acquire`] ordering. However,
482 /// `AcqRel` will never perform [`Relaxed`] accesses.
483 ///
484 /// This ordering is only applicable for operations that combine both loads and stores.
485 ///
486 /// Corresponds to [`memory_order_acq_rel`] in C++20.
487 ///
488 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
489 #[stable(feature = "rust1", since = "1.0.0")]
490 AcqRel,
491 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
492 /// operations, respectively) with the additional guarantee that all threads see all
493 /// sequentially consistent operations in the same order.
494 ///
495 /// Corresponds to [`memory_order_seq_cst`] in C++20.
496 ///
497 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
498 #[stable(feature = "rust1", since = "1.0.0")]
499 SeqCst,
500}
501
502/// An [`AtomicBool`] initialized to `false`.
503#[cfg(target_has_atomic_load_store = "8")]
504#[stable(feature = "rust1", since = "1.0.0")]
505#[deprecated(
506 since = "1.34.0",
507 note = "the `new` function is now preferred",
508 suggestion = "AtomicBool::new(false)"
509)]
510pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
511
512#[cfg(target_has_atomic_load_store = "8")]
513impl AtomicBool {
514 /// Creates a new `AtomicBool`.
515 ///
516 /// # Examples
517 ///
518 /// ```
519 /// use std::sync::atomic::AtomicBool;
520 ///
521 /// let atomic_true = AtomicBool::new(true);
522 /// let atomic_false = AtomicBool::new(false);
523 /// ```
524 #[inline]
525 #[stable(feature = "rust1", since = "1.0.0")]
526 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
527 #[must_use]
528 pub const fn new(v: bool) -> AtomicBool {
529 AtomicBool { v: UnsafeCell::new(v as u8) }
530 }
531
532 /// Creates a new `AtomicBool` from a pointer.
533 ///
534 /// # Examples
535 ///
536 /// ```
537 /// use std::sync::atomic::{self, AtomicBool};
538 ///
539 /// // Get a pointer to an allocated value
540 /// let ptr: *mut bool = Box::into_raw(Box::new(false));
541 ///
542 /// assert!(ptr.cast::<AtomicBool>().is_aligned());
543 ///
544 /// {
545 /// // Create an atomic view of the allocated value
546 /// let atomic = unsafe { AtomicBool::from_ptr(ptr) };
547 ///
548 /// // Use `atomic` for atomic operations, possibly share it with other threads
549 /// atomic.store(true, atomic::Ordering::Relaxed);
550 /// }
551 ///
552 /// // It's ok to non-atomically access the value behind `ptr`,
553 /// // since the reference to the atomic ended its lifetime in the block above
554 /// assert_eq!(unsafe { *ptr }, true);
555 ///
556 /// // Deallocate the value
557 /// unsafe { drop(Box::from_raw(ptr)) }
558 /// ```
559 ///
560 /// # Safety
561 ///
562 /// * `ptr` must be aligned to `align_of::<AtomicBool>()` (note that this is always true, since
563 /// `align_of::<AtomicBool>() == 1`).
564 /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
565 /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
566 /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
567 /// sizes, without synchronization.
568 ///
569 /// [valid]: crate::ptr#safety
570 /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
571 #[inline]
572 #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
573 #[rustc_const_stable(feature = "const_atomic_from_ptr", since = "1.84.0")]
574 pub const unsafe fn from_ptr<'a>(ptr: *mut bool) -> &'a AtomicBool {
575 // SAFETY: guaranteed by the caller
576 unsafe { &*ptr.cast() }
577 }
578
579 /// Returns a mutable reference to the underlying [`bool`].
580 ///
581 /// This is safe because the mutable reference guarantees that no other threads are
582 /// concurrently accessing the atomic data.
583 ///
584 /// # Examples
585 ///
586 /// ```
587 /// use std::sync::atomic::{AtomicBool, Ordering};
588 ///
589 /// let mut some_bool = AtomicBool::new(true);
590 /// assert_eq!(*some_bool.get_mut(), true);
591 /// *some_bool.get_mut() = false;
592 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
593 /// ```
594 #[inline]
595 #[stable(feature = "atomic_access", since = "1.15.0")]
596 pub fn get_mut(&mut self) -> &mut bool {
597 // SAFETY: the mutable reference guarantees unique ownership.
598 unsafe { &mut *(self.v.get() as *mut bool) }
599 }
600
601 /// Gets atomic access to a `&mut bool`.
602 ///
603 /// # Examples
604 ///
605 /// ```
606 /// #![feature(atomic_from_mut)]
607 /// use std::sync::atomic::{AtomicBool, Ordering};
608 ///
609 /// let mut some_bool = true;
610 /// let a = AtomicBool::from_mut(&mut some_bool);
611 /// a.store(false, Ordering::Relaxed);
612 /// assert_eq!(some_bool, false);
613 /// ```
614 #[inline]
615 #[cfg(target_has_atomic_equal_alignment = "8")]
616 #[unstable(feature = "atomic_from_mut", issue = "76314")]
617 pub fn from_mut(v: &mut bool) -> &mut Self {
618 // SAFETY: the mutable reference guarantees unique ownership, and
619 // alignment of both `bool` and `Self` is 1.
620 unsafe { &mut *(v as *mut bool as *mut Self) }
621 }
622
623 /// Gets non-atomic access to a `&mut [AtomicBool]` slice.
624 ///
625 /// This is safe because the mutable reference guarantees that no other threads are
626 /// concurrently accessing the atomic data.
627 ///
628 /// # Examples
629 ///
630 /// ```ignore-wasm
631 /// #![feature(atomic_from_mut)]
632 /// use std::sync::atomic::{AtomicBool, Ordering};
633 ///
634 /// let mut some_bools = [const { AtomicBool::new(false) }; 10];
635 ///
636 /// let view: &mut [bool] = AtomicBool::get_mut_slice(&mut some_bools);
637 /// assert_eq!(view, [false; 10]);
638 /// view[..5].copy_from_slice(&[true; 5]);
639 ///
640 /// std::thread::scope(|s| {
641 /// for t in &some_bools[..5] {
642 /// s.spawn(move || assert_eq!(t.load(Ordering::Relaxed), true));
643 /// }
644 ///
645 /// for f in &some_bools[5..] {
646 /// s.spawn(move || assert_eq!(f.load(Ordering::Relaxed), false));
647 /// }
648 /// });
649 /// ```
650 #[inline]
651 #[unstable(feature = "atomic_from_mut", issue = "76314")]
652 pub fn get_mut_slice(this: &mut [Self]) -> &mut [bool] {
653 // SAFETY: the mutable reference guarantees unique ownership.
654 unsafe { &mut *(this as *mut [Self] as *mut [bool]) }
655 }
656
657 /// Gets atomic access to a `&mut [bool]` slice.
658 ///
659 /// # Examples
660 ///
661 /// ```rust,ignore-wasm
662 /// #![feature(atomic_from_mut)]
663 /// use std::sync::atomic::{AtomicBool, Ordering};
664 ///
665 /// let mut some_bools = [false; 10];
666 /// let a = &*AtomicBool::from_mut_slice(&mut some_bools);
667 /// std::thread::scope(|s| {
668 /// for i in 0..a.len() {
669 /// s.spawn(move || a[i].store(true, Ordering::Relaxed));
670 /// }
671 /// });
672 /// assert_eq!(some_bools, [true; 10]);
673 /// ```
674 #[inline]
675 #[cfg(target_has_atomic_equal_alignment = "8")]
676 #[unstable(feature = "atomic_from_mut", issue = "76314")]
677 pub fn from_mut_slice(v: &mut [bool]) -> &mut [Self] {
678 // SAFETY: the mutable reference guarantees unique ownership, and
679 // alignment of both `bool` and `Self` is 1.
680 unsafe { &mut *(v as *mut [bool] as *mut [Self]) }
681 }
682
683 /// Consumes the atomic and returns the contained value.
684 ///
685 /// This is safe because passing `self` by value guarantees that no other threads are
686 /// concurrently accessing the atomic data.
687 ///
688 /// # Examples
689 ///
690 /// ```
691 /// use std::sync::atomic::AtomicBool;
692 ///
693 /// let some_bool = AtomicBool::new(true);
694 /// assert_eq!(some_bool.into_inner(), true);
695 /// ```
696 #[inline]
697 #[stable(feature = "atomic_access", since = "1.15.0")]
698 #[rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0")]
699 pub const fn into_inner(self) -> bool {
700 self.v.into_inner() != 0
701 }
702
703 /// Loads a value from the bool.
704 ///
705 /// `load` takes an [`Ordering`] argument which describes the memory ordering
706 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
707 ///
708 /// # Panics
709 ///
710 /// Panics if `order` is [`Release`] or [`AcqRel`].
711 ///
712 /// # Examples
713 ///
714 /// ```
715 /// use std::sync::atomic::{AtomicBool, Ordering};
716 ///
717 /// let some_bool = AtomicBool::new(true);
718 ///
719 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
720 /// ```
721 #[inline]
722 #[stable(feature = "rust1", since = "1.0.0")]
723 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
724 pub fn load(&self, order: Ordering) -> bool {
725 // SAFETY: any data races are prevented by atomic intrinsics and the raw
726 // pointer passed in is valid because we got it from a reference.
727 unsafe { atomic_load(self.v.get(), order) != 0 }
728 }
729
730 /// Stores a value into the bool.
731 ///
732 /// `store` takes an [`Ordering`] argument which describes the memory ordering
733 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
734 ///
735 /// # Panics
736 ///
737 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
738 ///
739 /// # Examples
740 ///
741 /// ```
742 /// use std::sync::atomic::{AtomicBool, Ordering};
743 ///
744 /// let some_bool = AtomicBool::new(true);
745 ///
746 /// some_bool.store(false, Ordering::Relaxed);
747 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
748 /// ```
749 #[inline]
750 #[stable(feature = "rust1", since = "1.0.0")]
751 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
752 #[rustc_should_not_be_called_on_const_items]
753 pub fn store(&self, val: bool, order: Ordering) {
754 // SAFETY: any data races are prevented by atomic intrinsics and the raw
755 // pointer passed in is valid because we got it from a reference.
756 unsafe {
757 atomic_store(self.v.get(), val as u8, order);
758 }
759 }
760
761 /// Stores a value into the bool, returning the previous value.
762 ///
763 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
764 /// of this operation. All ordering modes are possible. Note that using
765 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
766 /// using [`Release`] makes the load part [`Relaxed`].
767 ///
768 /// **Note:** This method is only available on platforms that support atomic
769 /// operations on `u8`.
770 ///
771 /// # Examples
772 ///
773 /// ```
774 /// use std::sync::atomic::{AtomicBool, Ordering};
775 ///
776 /// let some_bool = AtomicBool::new(true);
777 ///
778 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
779 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
780 /// ```
781 #[inline]
782 #[stable(feature = "rust1", since = "1.0.0")]
783 #[cfg(target_has_atomic = "8")]
784 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
785 #[rustc_should_not_be_called_on_const_items]
786 pub fn swap(&self, val: bool, order: Ordering) -> bool {
787 if EMULATE_ATOMIC_BOOL {
788 if val { self.fetch_or(true, order) } else { self.fetch_and(false, order) }
789 } else {
790 // SAFETY: data races are prevented by atomic intrinsics.
791 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
792 }
793 }
794
795 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
796 ///
797 /// The return value is always the previous value. If it is equal to `current`, then the value
798 /// was updated.
799 ///
800 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
801 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
802 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
803 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
804 /// happens, and using [`Release`] makes the load part [`Relaxed`].
805 ///
806 /// **Note:** This method is only available on platforms that support atomic
807 /// operations on `u8`.
808 ///
809 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
810 ///
811 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
812 /// memory orderings:
813 ///
814 /// Original | Success | Failure
815 /// -------- | ------- | -------
816 /// Relaxed | Relaxed | Relaxed
817 /// Acquire | Acquire | Acquire
818 /// Release | Release | Relaxed
819 /// AcqRel | AcqRel | Acquire
820 /// SeqCst | SeqCst | SeqCst
821 ///
822 /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use
823 /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`,
824 /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err`
825 /// rather than to infer success vs failure based on the value that was read.
826 ///
827 /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead.
828 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
829 /// which allows the compiler to generate better assembly code when the compare and swap
830 /// is used in a loop.
831 ///
832 /// # Examples
833 ///
834 /// ```
835 /// use std::sync::atomic::{AtomicBool, Ordering};
836 ///
837 /// let some_bool = AtomicBool::new(true);
838 ///
839 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
840 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
841 ///
842 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
843 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
844 /// ```
845 #[inline]
846 #[stable(feature = "rust1", since = "1.0.0")]
847 #[deprecated(
848 since = "1.50.0",
849 note = "Use `compare_exchange` or `compare_exchange_weak` instead"
850 )]
851 #[cfg(target_has_atomic = "8")]
852 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
853 #[rustc_should_not_be_called_on_const_items]
854 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
855 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
856 Ok(x) => x,
857 Err(x) => x,
858 }
859 }
860
861 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
862 ///
863 /// The return value is a result indicating whether the new value was written and containing
864 /// the previous value. On success this value is guaranteed to be equal to `current`.
865 ///
866 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
867 /// ordering of this operation. `success` describes the required ordering for the
868 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
869 /// `failure` describes the required ordering for the load operation that takes place when
870 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
871 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
872 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
873 ///
874 /// **Note:** This method is only available on platforms that support atomic
875 /// operations on `u8`.
876 ///
877 /// # Examples
878 ///
879 /// ```
880 /// use std::sync::atomic::{AtomicBool, Ordering};
881 ///
882 /// let some_bool = AtomicBool::new(true);
883 ///
884 /// assert_eq!(some_bool.compare_exchange(true,
885 /// false,
886 /// Ordering::Acquire,
887 /// Ordering::Relaxed),
888 /// Ok(true));
889 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
890 ///
891 /// assert_eq!(some_bool.compare_exchange(true, true,
892 /// Ordering::SeqCst,
893 /// Ordering::Acquire),
894 /// Err(false));
895 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
896 /// ```
897 ///
898 /// # Considerations
899 ///
900 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
901 /// of CAS operations. In particular, a load of the value followed by a successful
902 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
903 /// changed the value in the interim. This is usually important when the *equality* check in
904 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
905 /// does not necessarily imply identity. In this case, `compare_exchange` can lead to the
906 /// [ABA problem].
907 ///
908 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
909 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
910 #[inline]
911 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
912 #[doc(alias = "compare_and_swap")]
913 #[cfg(target_has_atomic = "8")]
914 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
915 #[rustc_should_not_be_called_on_const_items]
916 pub fn compare_exchange(
917 &self,
918 current: bool,
919 new: bool,
920 success: Ordering,
921 failure: Ordering,
922 ) -> Result<bool, bool> {
923 if EMULATE_ATOMIC_BOOL {
924 // Pick the strongest ordering from success and failure.
925 let order = match (success, failure) {
926 (SeqCst, _) => SeqCst,
927 (_, SeqCst) => SeqCst,
928 (AcqRel, _) => AcqRel,
929 (_, AcqRel) => {
930 panic!("there is no such thing as an acquire-release failure ordering")
931 }
932 (Release, Acquire) => AcqRel,
933 (Acquire, _) => Acquire,
934 (_, Acquire) => Acquire,
935 (Release, Relaxed) => Release,
936 (_, Release) => panic!("there is no such thing as a release failure ordering"),
937 (Relaxed, Relaxed) => Relaxed,
938 };
939 let old = if current == new {
940 // This is a no-op, but we still need to perform the operation
941 // for memory ordering reasons.
942 self.fetch_or(false, order)
943 } else {
944 // This sets the value to the new one and returns the old one.
945 self.swap(new, order)
946 };
947 if old == current { Ok(old) } else { Err(old) }
948 } else {
949 // SAFETY: data races are prevented by atomic intrinsics.
950 match unsafe {
951 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
952 } {
953 Ok(x) => Ok(x != 0),
954 Err(x) => Err(x != 0),
955 }
956 }
957 }
958
959 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
960 ///
961 /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
962 /// comparison succeeds, which can result in more efficient code on some platforms. The
963 /// return value is a result indicating whether the new value was written and containing the
964 /// previous value.
965 ///
966 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
967 /// ordering of this operation. `success` describes the required ordering for the
968 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
969 /// `failure` describes the required ordering for the load operation that takes place when
970 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
971 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
972 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
973 ///
974 /// **Note:** This method is only available on platforms that support atomic
975 /// operations on `u8`.
976 ///
977 /// # Examples
978 ///
979 /// ```
980 /// use std::sync::atomic::{AtomicBool, Ordering};
981 ///
982 /// let val = AtomicBool::new(false);
983 ///
984 /// let new = true;
985 /// let mut old = val.load(Ordering::Relaxed);
986 /// loop {
987 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
988 /// Ok(_) => break,
989 /// Err(x) => old = x,
990 /// }
991 /// }
992 /// ```
993 ///
994 /// # Considerations
995 ///
996 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
997 /// of CAS operations. In particular, a load of the value followed by a successful
998 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
999 /// changed the value in the interim. This is usually important when the *equality* check in
1000 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
1001 /// does not necessarily imply identity. In this case, `compare_exchange` can lead to the
1002 /// [ABA problem].
1003 ///
1004 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1005 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1006 #[inline]
1007 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1008 #[doc(alias = "compare_and_swap")]
1009 #[cfg(target_has_atomic = "8")]
1010 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1011 #[rustc_should_not_be_called_on_const_items]
1012 pub fn compare_exchange_weak(
1013 &self,
1014 current: bool,
1015 new: bool,
1016 success: Ordering,
1017 failure: Ordering,
1018 ) -> Result<bool, bool> {
1019 if EMULATE_ATOMIC_BOOL {
1020 return self.compare_exchange(current, new, success, failure);
1021 }
1022
1023 // SAFETY: data races are prevented by atomic intrinsics.
1024 match unsafe {
1025 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
1026 } {
1027 Ok(x) => Ok(x != 0),
1028 Err(x) => Err(x != 0),
1029 }
1030 }
1031
1032 /// Logical "and" with a boolean value.
1033 ///
1034 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
1035 /// the new value to the result.
1036 ///
1037 /// Returns the previous value.
1038 ///
1039 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1040 /// of this operation. All ordering modes are possible. Note that using
1041 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1042 /// using [`Release`] makes the load part [`Relaxed`].
1043 ///
1044 /// **Note:** This method is only available on platforms that support atomic
1045 /// operations on `u8`.
1046 ///
1047 /// # Examples
1048 ///
1049 /// ```
1050 /// use std::sync::atomic::{AtomicBool, Ordering};
1051 ///
1052 /// let foo = AtomicBool::new(true);
1053 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
1054 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1055 ///
1056 /// let foo = AtomicBool::new(true);
1057 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
1058 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1059 ///
1060 /// let foo = AtomicBool::new(false);
1061 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
1062 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1063 /// ```
1064 #[inline]
1065 #[stable(feature = "rust1", since = "1.0.0")]
1066 #[cfg(target_has_atomic = "8")]
1067 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1068 #[rustc_should_not_be_called_on_const_items]
1069 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
1070 // SAFETY: data races are prevented by atomic intrinsics.
1071 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
1072 }
1073
1074 /// Logical "nand" with a boolean value.
1075 ///
1076 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
1077 /// the new value to the result.
1078 ///
1079 /// Returns the previous value.
1080 ///
1081 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1082 /// of this operation. All ordering modes are possible. Note that using
1083 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1084 /// using [`Release`] makes the load part [`Relaxed`].
1085 ///
1086 /// **Note:** This method is only available on platforms that support atomic
1087 /// operations on `u8`.
1088 ///
1089 /// # Examples
1090 ///
1091 /// ```
1092 /// use std::sync::atomic::{AtomicBool, Ordering};
1093 ///
1094 /// let foo = AtomicBool::new(true);
1095 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
1096 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1097 ///
1098 /// let foo = AtomicBool::new(true);
1099 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
1100 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
1101 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1102 ///
1103 /// let foo = AtomicBool::new(false);
1104 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
1105 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1106 /// ```
1107 #[inline]
1108 #[stable(feature = "rust1", since = "1.0.0")]
1109 #[cfg(target_has_atomic = "8")]
1110 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1111 #[rustc_should_not_be_called_on_const_items]
1112 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
1113 // We can't use atomic_nand here because it can result in a bool with
1114 // an invalid value. This happens because the atomic operation is done
1115 // with an 8-bit integer internally, which would set the upper 7 bits.
1116 // So we just use fetch_xor or swap instead.
1117 if val {
1118 // !(x & true) == !x
1119 // We must invert the bool.
1120 self.fetch_xor(true, order)
1121 } else {
1122 // !(x & false) == true
1123 // We must set the bool to true.
1124 self.swap(true, order)
1125 }
1126 }
1127
1128 /// Logical "or" with a boolean value.
1129 ///
1130 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
1131 /// new value to the result.
1132 ///
1133 /// Returns the previous value.
1134 ///
1135 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1136 /// of this operation. All ordering modes are possible. Note that using
1137 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1138 /// using [`Release`] makes the load part [`Relaxed`].
1139 ///
1140 /// **Note:** This method is only available on platforms that support atomic
1141 /// operations on `u8`.
1142 ///
1143 /// # Examples
1144 ///
1145 /// ```
1146 /// use std::sync::atomic::{AtomicBool, Ordering};
1147 ///
1148 /// let foo = AtomicBool::new(true);
1149 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
1150 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1151 ///
1152 /// let foo = AtomicBool::new(true);
1153 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
1154 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1155 ///
1156 /// let foo = AtomicBool::new(false);
1157 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
1158 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1159 /// ```
1160 #[inline]
1161 #[stable(feature = "rust1", since = "1.0.0")]
1162 #[cfg(target_has_atomic = "8")]
1163 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1164 #[rustc_should_not_be_called_on_const_items]
1165 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
1166 // SAFETY: data races are prevented by atomic intrinsics.
1167 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
1168 }
1169
1170 /// Logical "xor" with a boolean value.
1171 ///
1172 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
1173 /// the new value to the result.
1174 ///
1175 /// Returns the previous value.
1176 ///
1177 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1178 /// of this operation. All ordering modes are possible. Note that using
1179 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1180 /// using [`Release`] makes the load part [`Relaxed`].
1181 ///
1182 /// **Note:** This method is only available on platforms that support atomic
1183 /// operations on `u8`.
1184 ///
1185 /// # Examples
1186 ///
1187 /// ```
1188 /// use std::sync::atomic::{AtomicBool, Ordering};
1189 ///
1190 /// let foo = AtomicBool::new(true);
1191 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
1192 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1193 ///
1194 /// let foo = AtomicBool::new(true);
1195 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
1196 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1197 ///
1198 /// let foo = AtomicBool::new(false);
1199 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
1200 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1201 /// ```
1202 #[inline]
1203 #[stable(feature = "rust1", since = "1.0.0")]
1204 #[cfg(target_has_atomic = "8")]
1205 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1206 #[rustc_should_not_be_called_on_const_items]
1207 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
1208 // SAFETY: data races are prevented by atomic intrinsics.
1209 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
1210 }
1211
1212 /// Logical "not" with a boolean value.
1213 ///
1214 /// Performs a logical "not" operation on the current value, and sets
1215 /// the new value to the result.
1216 ///
1217 /// Returns the previous value.
1218 ///
1219 /// `fetch_not` takes an [`Ordering`] argument which describes the memory ordering
1220 /// of this operation. All ordering modes are possible. Note that using
1221 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1222 /// using [`Release`] makes the load part [`Relaxed`].
1223 ///
1224 /// **Note:** This method is only available on platforms that support atomic
1225 /// operations on `u8`.
1226 ///
1227 /// # Examples
1228 ///
1229 /// ```
1230 /// use std::sync::atomic::{AtomicBool, Ordering};
1231 ///
1232 /// let foo = AtomicBool::new(true);
1233 /// assert_eq!(foo.fetch_not(Ordering::SeqCst), true);
1234 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1235 ///
1236 /// let foo = AtomicBool::new(false);
1237 /// assert_eq!(foo.fetch_not(Ordering::SeqCst), false);
1238 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1239 /// ```
1240 #[inline]
1241 #[stable(feature = "atomic_bool_fetch_not", since = "1.81.0")]
1242 #[cfg(target_has_atomic = "8")]
1243 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1244 #[rustc_should_not_be_called_on_const_items]
1245 pub fn fetch_not(&self, order: Ordering) -> bool {
1246 self.fetch_xor(true, order)
1247 }
1248
1249 /// Returns a mutable pointer to the underlying [`bool`].
1250 ///
1251 /// Doing non-atomic reads and writes on the resulting boolean can be a data race.
1252 /// This method is mostly useful for FFI, where the function signature may use
1253 /// `*mut bool` instead of `&AtomicBool`.
1254 ///
1255 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
1256 /// atomic types work with interior mutability. All modifications of an atomic change the value
1257 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
1258 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
1259 /// requirements of the [memory model].
1260 ///
1261 /// # Examples
1262 ///
1263 /// ```ignore (extern-declaration)
1264 /// # fn main() {
1265 /// use std::sync::atomic::AtomicBool;
1266 ///
1267 /// extern "C" {
1268 /// fn my_atomic_op(arg: *mut bool);
1269 /// }
1270 ///
1271 /// let mut atomic = AtomicBool::new(true);
1272 /// unsafe {
1273 /// my_atomic_op(atomic.as_ptr());
1274 /// }
1275 /// # }
1276 /// ```
1277 ///
1278 /// [memory model]: self#memory-model-for-atomic-accesses
1279 #[inline]
1280 #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
1281 #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
1282 #[rustc_never_returns_null_ptr]
1283 #[rustc_should_not_be_called_on_const_items]
1284 pub const fn as_ptr(&self) -> *mut bool {
1285 self.v.get().cast()
1286 }
1287
1288 /// Fetches the value, and applies a function to it that returns an optional
1289 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1290 /// returned `Some(_)`, else `Err(previous_value)`.
1291 ///
1292 /// Note: This may call the function multiple times if the value has been
1293 /// changed from other threads in the meantime, as long as the function
1294 /// returns `Some(_)`, but the function will have been applied only once to
1295 /// the stored value.
1296 ///
1297 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
1298 /// ordering of this operation. The first describes the required ordering for
1299 /// when the operation finally succeeds while the second describes the
1300 /// required ordering for loads. These correspond to the success and failure
1301 /// orderings of [`AtomicBool::compare_exchange`] respectively.
1302 ///
1303 /// Using [`Acquire`] as success ordering makes the store part of this
1304 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1305 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1306 /// [`Acquire`] or [`Relaxed`].
1307 ///
1308 /// **Note:** This method is only available on platforms that support atomic
1309 /// operations on `u8`.
1310 ///
1311 /// # Considerations
1312 ///
1313 /// This method is not magic; it is not provided by the hardware, and does not act like a
1314 /// critical section or mutex.
1315 ///
1316 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
1317 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem].
1318 ///
1319 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1320 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1321 ///
1322 /// # Examples
1323 ///
1324 /// ```rust
1325 /// use std::sync::atomic::{AtomicBool, Ordering};
1326 ///
1327 /// let x = AtomicBool::new(false);
1328 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
1329 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
1330 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
1331 /// assert_eq!(x.load(Ordering::SeqCst), false);
1332 /// ```
1333 #[inline]
1334 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
1335 #[cfg(target_has_atomic = "8")]
1336 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1337 #[rustc_should_not_be_called_on_const_items]
1338 pub fn fetch_update<F>(
1339 &self,
1340 set_order: Ordering,
1341 fetch_order: Ordering,
1342 mut f: F,
1343 ) -> Result<bool, bool>
1344 where
1345 F: FnMut(bool) -> Option<bool>,
1346 {
1347 let mut prev = self.load(fetch_order);
1348 while let Some(next) = f(prev) {
1349 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1350 x @ Ok(_) => return x,
1351 Err(next_prev) => prev = next_prev,
1352 }
1353 }
1354 Err(prev)
1355 }
1356
1357 /// Fetches the value, and applies a function to it that returns an optional
1358 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1359 /// returned `Some(_)`, else `Err(previous_value)`.
1360 ///
1361 /// See also: [`update`](`AtomicBool::update`).
1362 ///
1363 /// Note: This may call the function multiple times if the value has been
1364 /// changed from other threads in the meantime, as long as the function
1365 /// returns `Some(_)`, but the function will have been applied only once to
1366 /// the stored value.
1367 ///
1368 /// `try_update` takes two [`Ordering`] arguments to describe the memory
1369 /// ordering of this operation. The first describes the required ordering for
1370 /// when the operation finally succeeds while the second describes the
1371 /// required ordering for loads. These correspond to the success and failure
1372 /// orderings of [`AtomicBool::compare_exchange`] respectively.
1373 ///
1374 /// Using [`Acquire`] as success ordering makes the store part of this
1375 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1376 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1377 /// [`Acquire`] or [`Relaxed`].
1378 ///
1379 /// **Note:** This method is only available on platforms that support atomic
1380 /// operations on `u8`.
1381 ///
1382 /// # Considerations
1383 ///
1384 /// This method is not magic; it is not provided by the hardware, and does not act like a
1385 /// critical section or mutex.
1386 ///
1387 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
1388 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem].
1389 ///
1390 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1391 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1392 ///
1393 /// # Examples
1394 ///
1395 /// ```rust
1396 /// #![feature(atomic_try_update)]
1397 /// use std::sync::atomic::{AtomicBool, Ordering};
1398 ///
1399 /// let x = AtomicBool::new(false);
1400 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
1401 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
1402 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
1403 /// assert_eq!(x.load(Ordering::SeqCst), false);
1404 /// ```
1405 #[inline]
1406 #[unstable(feature = "atomic_try_update", issue = "135894")]
1407 #[cfg(target_has_atomic = "8")]
1408 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1409 #[rustc_should_not_be_called_on_const_items]
1410 pub fn try_update(
1411 &self,
1412 set_order: Ordering,
1413 fetch_order: Ordering,
1414 f: impl FnMut(bool) -> Option<bool>,
1415 ) -> Result<bool, bool> {
1416 // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
1417 // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
1418 self.fetch_update(set_order, fetch_order, f)
1419 }
1420
1421 /// Fetches the value, applies a function to it that it return a new value.
1422 /// The new value is stored and the old value is returned.
1423 ///
1424 /// See also: [`try_update`](`AtomicBool::try_update`).
1425 ///
1426 /// Note: This may call the function multiple times if the value has been changed from other threads in
1427 /// the meantime, but the function will have been applied only once to the stored value.
1428 ///
1429 /// `update` takes two [`Ordering`] arguments to describe the memory
1430 /// ordering of this operation. The first describes the required ordering for
1431 /// when the operation finally succeeds while the second describes the
1432 /// required ordering for loads. These correspond to the success and failure
1433 /// orderings of [`AtomicBool::compare_exchange`] respectively.
1434 ///
1435 /// Using [`Acquire`] as success ordering makes the store part
1436 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1437 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1438 ///
1439 /// **Note:** This method is only available on platforms that support atomic operations on `u8`.
1440 ///
1441 /// # Considerations
1442 ///
1443 /// This method is not magic; it is not provided by the hardware, and does not act like a
1444 /// critical section or mutex.
1445 ///
1446 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
1447 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem].
1448 ///
1449 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1450 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1451 ///
1452 /// # Examples
1453 ///
1454 /// ```rust
1455 /// #![feature(atomic_try_update)]
1456 ///
1457 /// use std::sync::atomic::{AtomicBool, Ordering};
1458 ///
1459 /// let x = AtomicBool::new(false);
1460 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| !x), false);
1461 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| !x), true);
1462 /// assert_eq!(x.load(Ordering::SeqCst), false);
1463 /// ```
1464 #[inline]
1465 #[unstable(feature = "atomic_try_update", issue = "135894")]
1466 #[cfg(target_has_atomic = "8")]
1467 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1468 #[rustc_should_not_be_called_on_const_items]
1469 pub fn update(
1470 &self,
1471 set_order: Ordering,
1472 fetch_order: Ordering,
1473 mut f: impl FnMut(bool) -> bool,
1474 ) -> bool {
1475 let mut prev = self.load(fetch_order);
1476 loop {
1477 match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
1478 Ok(x) => break x,
1479 Err(next_prev) => prev = next_prev,
1480 }
1481 }
1482 }
1483}
1484
1485#[cfg(target_has_atomic_load_store = "ptr")]
1486impl<T> AtomicPtr<T> {
1487 /// Creates a new `AtomicPtr`.
1488 ///
1489 /// # Examples
1490 ///
1491 /// ```
1492 /// use std::sync::atomic::AtomicPtr;
1493 ///
1494 /// let ptr = &mut 5;
1495 /// let atomic_ptr = AtomicPtr::new(ptr);
1496 /// ```
1497 #[inline]
1498 #[stable(feature = "rust1", since = "1.0.0")]
1499 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
1500 pub const fn new(p: *mut T) -> AtomicPtr<T> {
1501 AtomicPtr { p: UnsafeCell::new(p) }
1502 }
1503
1504 /// Creates a new `AtomicPtr` from a pointer.
1505 ///
1506 /// # Examples
1507 ///
1508 /// ```
1509 /// use std::sync::atomic::{self, AtomicPtr};
1510 ///
1511 /// // Get a pointer to an allocated value
1512 /// let ptr: *mut *mut u8 = Box::into_raw(Box::new(std::ptr::null_mut()));
1513 ///
1514 /// assert!(ptr.cast::<AtomicPtr<u8>>().is_aligned());
1515 ///
1516 /// {
1517 /// // Create an atomic view of the allocated value
1518 /// let atomic = unsafe { AtomicPtr::from_ptr(ptr) };
1519 ///
1520 /// // Use `atomic` for atomic operations, possibly share it with other threads
1521 /// atomic.store(std::ptr::NonNull::dangling().as_ptr(), atomic::Ordering::Relaxed);
1522 /// }
1523 ///
1524 /// // It's ok to non-atomically access the value behind `ptr`,
1525 /// // since the reference to the atomic ended its lifetime in the block above
1526 /// assert!(!unsafe { *ptr }.is_null());
1527 ///
1528 /// // Deallocate the value
1529 /// unsafe { drop(Box::from_raw(ptr)) }
1530 /// ```
1531 ///
1532 /// # Safety
1533 ///
1534 /// * `ptr` must be aligned to `align_of::<AtomicPtr<T>>()` (note that on some platforms this
1535 /// can be bigger than `align_of::<*mut T>()`).
1536 /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
1537 /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
1538 /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
1539 /// sizes, without synchronization.
1540 ///
1541 /// [valid]: crate::ptr#safety
1542 /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
1543 #[inline]
1544 #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
1545 #[rustc_const_stable(feature = "const_atomic_from_ptr", since = "1.84.0")]
1546 pub const unsafe fn from_ptr<'a>(ptr: *mut *mut T) -> &'a AtomicPtr<T> {
1547 // SAFETY: guaranteed by the caller
1548 unsafe { &*ptr.cast() }
1549 }
1550
1551 /// Returns a mutable reference to the underlying pointer.
1552 ///
1553 /// This is safe because the mutable reference guarantees that no other threads are
1554 /// concurrently accessing the atomic data.
1555 ///
1556 /// # Examples
1557 ///
1558 /// ```
1559 /// use std::sync::atomic::{AtomicPtr, Ordering};
1560 ///
1561 /// let mut data = 10;
1562 /// let mut atomic_ptr = AtomicPtr::new(&mut data);
1563 /// let mut other_data = 5;
1564 /// *atomic_ptr.get_mut() = &mut other_data;
1565 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
1566 /// ```
1567 #[inline]
1568 #[stable(feature = "atomic_access", since = "1.15.0")]
1569 pub fn get_mut(&mut self) -> &mut *mut T {
1570 self.p.get_mut()
1571 }
1572
1573 /// Gets atomic access to a pointer.
1574 ///
1575 /// **Note:** This function is only available on targets where `AtomicPtr<T>` has the same alignment as `*const T`
1576 ///
1577 /// # Examples
1578 ///
1579 /// ```
1580 /// #![feature(atomic_from_mut)]
1581 /// use std::sync::atomic::{AtomicPtr, Ordering};
1582 ///
1583 /// let mut data = 123;
1584 /// let mut some_ptr = &mut data as *mut i32;
1585 /// let a = AtomicPtr::from_mut(&mut some_ptr);
1586 /// let mut other_data = 456;
1587 /// a.store(&mut other_data, Ordering::Relaxed);
1588 /// assert_eq!(unsafe { *some_ptr }, 456);
1589 /// ```
1590 #[inline]
1591 #[cfg(target_has_atomic_equal_alignment = "ptr")]
1592 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1593 pub fn from_mut(v: &mut *mut T) -> &mut Self {
1594 let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
1595 // SAFETY:
1596 // - the mutable reference guarantees unique ownership.
1597 // - the alignment of `*mut T` and `Self` is the same on all platforms
1598 // supported by rust, as verified above.
1599 unsafe { &mut *(v as *mut *mut T as *mut Self) }
1600 }
1601
1602 /// Gets non-atomic access to a `&mut [AtomicPtr]` slice.
1603 ///
1604 /// This is safe because the mutable reference guarantees that no other threads are
1605 /// concurrently accessing the atomic data.
1606 ///
1607 /// # Examples
1608 ///
1609 /// ```ignore-wasm
1610 /// #![feature(atomic_from_mut)]
1611 /// use std::ptr::null_mut;
1612 /// use std::sync::atomic::{AtomicPtr, Ordering};
1613 ///
1614 /// let mut some_ptrs = [const { AtomicPtr::new(null_mut::<String>()) }; 10];
1615 ///
1616 /// let view: &mut [*mut String] = AtomicPtr::get_mut_slice(&mut some_ptrs);
1617 /// assert_eq!(view, [null_mut::<String>(); 10]);
1618 /// view
1619 /// .iter_mut()
1620 /// .enumerate()
1621 /// .for_each(|(i, ptr)| *ptr = Box::into_raw(Box::new(format!("iteration#{i}"))));
1622 ///
1623 /// std::thread::scope(|s| {
1624 /// for ptr in &some_ptrs {
1625 /// s.spawn(move || {
1626 /// let ptr = ptr.load(Ordering::Relaxed);
1627 /// assert!(!ptr.is_null());
1628 ///
1629 /// let name = unsafe { Box::from_raw(ptr) };
1630 /// println!("Hello, {name}!");
1631 /// });
1632 /// }
1633 /// });
1634 /// ```
1635 #[inline]
1636 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1637 pub fn get_mut_slice(this: &mut [Self]) -> &mut [*mut T] {
1638 // SAFETY: the mutable reference guarantees unique ownership.
1639 unsafe { &mut *(this as *mut [Self] as *mut [*mut T]) }
1640 }
1641
1642 /// Gets atomic access to a slice of pointers.
1643 ///
1644 /// **Note:** This function is only available on targets where `AtomicPtr<T>` has the same alignment as `*const T`
1645 ///
1646 /// # Examples
1647 ///
1648 /// ```ignore-wasm
1649 /// #![feature(atomic_from_mut)]
1650 /// use std::ptr::null_mut;
1651 /// use std::sync::atomic::{AtomicPtr, Ordering};
1652 ///
1653 /// let mut some_ptrs = [null_mut::<String>(); 10];
1654 /// let a = &*AtomicPtr::from_mut_slice(&mut some_ptrs);
1655 /// std::thread::scope(|s| {
1656 /// for i in 0..a.len() {
1657 /// s.spawn(move || {
1658 /// let name = Box::new(format!("thread{i}"));
1659 /// a[i].store(Box::into_raw(name), Ordering::Relaxed);
1660 /// });
1661 /// }
1662 /// });
1663 /// for p in some_ptrs {
1664 /// assert!(!p.is_null());
1665 /// let name = unsafe { Box::from_raw(p) };
1666 /// println!("Hello, {name}!");
1667 /// }
1668 /// ```
1669 #[inline]
1670 #[cfg(target_has_atomic_equal_alignment = "ptr")]
1671 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1672 pub fn from_mut_slice(v: &mut [*mut T]) -> &mut [Self] {
1673 // SAFETY:
1674 // - the mutable reference guarantees unique ownership.
1675 // - the alignment of `*mut T` and `Self` is the same on all platforms
1676 // supported by rust, as verified above.
1677 unsafe { &mut *(v as *mut [*mut T] as *mut [Self]) }
1678 }
1679
1680 /// Consumes the atomic and returns the contained value.
1681 ///
1682 /// This is safe because passing `self` by value guarantees that no other threads are
1683 /// concurrently accessing the atomic data.
1684 ///
1685 /// # Examples
1686 ///
1687 /// ```
1688 /// use std::sync::atomic::AtomicPtr;
1689 ///
1690 /// let mut data = 5;
1691 /// let atomic_ptr = AtomicPtr::new(&mut data);
1692 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
1693 /// ```
1694 #[inline]
1695 #[stable(feature = "atomic_access", since = "1.15.0")]
1696 #[rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0")]
1697 pub const fn into_inner(self) -> *mut T {
1698 self.p.into_inner()
1699 }
1700
1701 /// Loads a value from the pointer.
1702 ///
1703 /// `load` takes an [`Ordering`] argument which describes the memory ordering
1704 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1705 ///
1706 /// # Panics
1707 ///
1708 /// Panics if `order` is [`Release`] or [`AcqRel`].
1709 ///
1710 /// # Examples
1711 ///
1712 /// ```
1713 /// use std::sync::atomic::{AtomicPtr, Ordering};
1714 ///
1715 /// let ptr = &mut 5;
1716 /// let some_ptr = AtomicPtr::new(ptr);
1717 ///
1718 /// let value = some_ptr.load(Ordering::Relaxed);
1719 /// ```
1720 #[inline]
1721 #[stable(feature = "rust1", since = "1.0.0")]
1722 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1723 pub fn load(&self, order: Ordering) -> *mut T {
1724 // SAFETY: data races are prevented by atomic intrinsics.
1725 unsafe { atomic_load(self.p.get(), order) }
1726 }
1727
1728 /// Stores a value into the pointer.
1729 ///
1730 /// `store` takes an [`Ordering`] argument which describes the memory ordering
1731 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1732 ///
1733 /// # Panics
1734 ///
1735 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1736 ///
1737 /// # Examples
1738 ///
1739 /// ```
1740 /// use std::sync::atomic::{AtomicPtr, Ordering};
1741 ///
1742 /// let ptr = &mut 5;
1743 /// let some_ptr = AtomicPtr::new(ptr);
1744 ///
1745 /// let other_ptr = &mut 10;
1746 ///
1747 /// some_ptr.store(other_ptr, Ordering::Relaxed);
1748 /// ```
1749 #[inline]
1750 #[stable(feature = "rust1", since = "1.0.0")]
1751 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1752 #[rustc_should_not_be_called_on_const_items]
1753 pub fn store(&self, ptr: *mut T, order: Ordering) {
1754 // SAFETY: data races are prevented by atomic intrinsics.
1755 unsafe {
1756 atomic_store(self.p.get(), ptr, order);
1757 }
1758 }
1759
1760 /// Stores a value into the pointer, returning the previous value.
1761 ///
1762 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1763 /// of this operation. All ordering modes are possible. Note that using
1764 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1765 /// using [`Release`] makes the load part [`Relaxed`].
1766 ///
1767 /// **Note:** This method is only available on platforms that support atomic
1768 /// operations on pointers.
1769 ///
1770 /// # Examples
1771 ///
1772 /// ```
1773 /// use std::sync::atomic::{AtomicPtr, Ordering};
1774 ///
1775 /// let ptr = &mut 5;
1776 /// let some_ptr = AtomicPtr::new(ptr);
1777 ///
1778 /// let other_ptr = &mut 10;
1779 ///
1780 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1781 /// ```
1782 #[inline]
1783 #[stable(feature = "rust1", since = "1.0.0")]
1784 #[cfg(target_has_atomic = "ptr")]
1785 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1786 #[rustc_should_not_be_called_on_const_items]
1787 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
1788 // SAFETY: data races are prevented by atomic intrinsics.
1789 unsafe { atomic_swap(self.p.get(), ptr, order) }
1790 }
1791
1792 /// Stores a value into the pointer if the current value is the same as the `current` value.
1793 ///
1794 /// The return value is always the previous value. If it is equal to `current`, then the value
1795 /// was updated.
1796 ///
1797 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1798 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1799 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1800 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1801 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1802 ///
1803 /// **Note:** This method is only available on platforms that support atomic
1804 /// operations on pointers.
1805 ///
1806 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1807 ///
1808 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1809 /// memory orderings:
1810 ///
1811 /// Original | Success | Failure
1812 /// -------- | ------- | -------
1813 /// Relaxed | Relaxed | Relaxed
1814 /// Acquire | Acquire | Acquire
1815 /// Release | Release | Relaxed
1816 /// AcqRel | AcqRel | Acquire
1817 /// SeqCst | SeqCst | SeqCst
1818 ///
1819 /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use
1820 /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`,
1821 /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err`
1822 /// rather than to infer success vs failure based on the value that was read.
1823 ///
1824 /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead.
1825 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1826 /// which allows the compiler to generate better assembly code when the compare and swap
1827 /// is used in a loop.
1828 ///
1829 /// # Examples
1830 ///
1831 /// ```
1832 /// use std::sync::atomic::{AtomicPtr, Ordering};
1833 ///
1834 /// let ptr = &mut 5;
1835 /// let some_ptr = AtomicPtr::new(ptr);
1836 ///
1837 /// let other_ptr = &mut 10;
1838 ///
1839 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1840 /// ```
1841 #[inline]
1842 #[stable(feature = "rust1", since = "1.0.0")]
1843 #[deprecated(
1844 since = "1.50.0",
1845 note = "Use `compare_exchange` or `compare_exchange_weak` instead"
1846 )]
1847 #[cfg(target_has_atomic = "ptr")]
1848 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1849 #[rustc_should_not_be_called_on_const_items]
1850 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1851 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1852 Ok(x) => x,
1853 Err(x) => x,
1854 }
1855 }
1856
1857 /// Stores a value into the pointer if the current value is the same as the `current` value.
1858 ///
1859 /// The return value is a result indicating whether the new value was written and containing
1860 /// the previous value. On success this value is guaranteed to be equal to `current`.
1861 ///
1862 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1863 /// ordering of this operation. `success` describes the required ordering for the
1864 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1865 /// `failure` describes the required ordering for the load operation that takes place when
1866 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1867 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1868 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1869 ///
1870 /// **Note:** This method is only available on platforms that support atomic
1871 /// operations on pointers.
1872 ///
1873 /// # Examples
1874 ///
1875 /// ```
1876 /// use std::sync::atomic::{AtomicPtr, Ordering};
1877 ///
1878 /// let ptr = &mut 5;
1879 /// let some_ptr = AtomicPtr::new(ptr);
1880 ///
1881 /// let other_ptr = &mut 10;
1882 ///
1883 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1884 /// Ordering::SeqCst, Ordering::Relaxed);
1885 /// ```
1886 ///
1887 /// # Considerations
1888 ///
1889 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
1890 /// of CAS operations. In particular, a load of the value followed by a successful
1891 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
1892 /// changed the value in the interim. This is usually important when the *equality* check in
1893 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
1894 /// does not necessarily imply identity. This is a particularly common case for pointers, as
1895 /// a pointer holding the same address does not imply that the same object exists at that
1896 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
1897 ///
1898 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1899 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1900 #[inline]
1901 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1902 #[cfg(target_has_atomic = "ptr")]
1903 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1904 #[rustc_should_not_be_called_on_const_items]
1905 pub fn compare_exchange(
1906 &self,
1907 current: *mut T,
1908 new: *mut T,
1909 success: Ordering,
1910 failure: Ordering,
1911 ) -> Result<*mut T, *mut T> {
1912 // SAFETY: data races are prevented by atomic intrinsics.
1913 unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) }
1914 }
1915
1916 /// Stores a value into the pointer if the current value is the same as the `current` value.
1917 ///
1918 /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
1919 /// comparison succeeds, which can result in more efficient code on some platforms. The
1920 /// return value is a result indicating whether the new value was written and containing the
1921 /// previous value.
1922 ///
1923 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1924 /// ordering of this operation. `success` describes the required ordering for the
1925 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1926 /// `failure` describes the required ordering for the load operation that takes place when
1927 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1928 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1929 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1930 ///
1931 /// **Note:** This method is only available on platforms that support atomic
1932 /// operations on pointers.
1933 ///
1934 /// # Examples
1935 ///
1936 /// ```
1937 /// use std::sync::atomic::{AtomicPtr, Ordering};
1938 ///
1939 /// let some_ptr = AtomicPtr::new(&mut 5);
1940 ///
1941 /// let new = &mut 10;
1942 /// let mut old = some_ptr.load(Ordering::Relaxed);
1943 /// loop {
1944 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1945 /// Ok(_) => break,
1946 /// Err(x) => old = x,
1947 /// }
1948 /// }
1949 /// ```
1950 ///
1951 /// # Considerations
1952 ///
1953 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
1954 /// of CAS operations. In particular, a load of the value followed by a successful
1955 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
1956 /// changed the value in the interim. This is usually important when the *equality* check in
1957 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
1958 /// does not necessarily imply identity. This is a particularly common case for pointers, as
1959 /// a pointer holding the same address does not imply that the same object exists at that
1960 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
1961 ///
1962 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1963 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1964 #[inline]
1965 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1966 #[cfg(target_has_atomic = "ptr")]
1967 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1968 #[rustc_should_not_be_called_on_const_items]
1969 pub fn compare_exchange_weak(
1970 &self,
1971 current: *mut T,
1972 new: *mut T,
1973 success: Ordering,
1974 failure: Ordering,
1975 ) -> Result<*mut T, *mut T> {
1976 // SAFETY: This intrinsic is unsafe because it operates on a raw pointer
1977 // but we know for sure that the pointer is valid (we just got it from
1978 // an `UnsafeCell` that we have by reference) and the atomic operation
1979 // itself allows us to safely mutate the `UnsafeCell` contents.
1980 unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) }
1981 }
1982
1983 /// Fetches the value, and applies a function to it that returns an optional
1984 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1985 /// returned `Some(_)`, else `Err(previous_value)`.
1986 ///
1987 /// Note: This may call the function multiple times if the value has been
1988 /// changed from other threads in the meantime, as long as the function
1989 /// returns `Some(_)`, but the function will have been applied only once to
1990 /// the stored value.
1991 ///
1992 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
1993 /// ordering of this operation. The first describes the required ordering for
1994 /// when the operation finally succeeds while the second describes the
1995 /// required ordering for loads. These correspond to the success and failure
1996 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
1997 ///
1998 /// Using [`Acquire`] as success ordering makes the store part of this
1999 /// operation [`Relaxed`], and using [`Release`] makes the final successful
2000 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
2001 /// [`Acquire`] or [`Relaxed`].
2002 ///
2003 /// **Note:** This method is only available on platforms that support atomic
2004 /// operations on pointers.
2005 ///
2006 /// # Considerations
2007 ///
2008 /// This method is not magic; it is not provided by the hardware, and does not act like a
2009 /// critical section or mutex.
2010 ///
2011 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
2012 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem],
2013 /// which is a particularly common pitfall for pointers!
2014 ///
2015 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2016 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
2017 ///
2018 /// # Examples
2019 ///
2020 /// ```rust
2021 /// use std::sync::atomic::{AtomicPtr, Ordering};
2022 ///
2023 /// let ptr: *mut _ = &mut 5;
2024 /// let some_ptr = AtomicPtr::new(ptr);
2025 ///
2026 /// let new: *mut _ = &mut 10;
2027 /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
2028 /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
2029 /// if x == ptr {
2030 /// Some(new)
2031 /// } else {
2032 /// None
2033 /// }
2034 /// });
2035 /// assert_eq!(result, Ok(ptr));
2036 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
2037 /// ```
2038 #[inline]
2039 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
2040 #[cfg(target_has_atomic = "ptr")]
2041 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2042 #[rustc_should_not_be_called_on_const_items]
2043 pub fn fetch_update<F>(
2044 &self,
2045 set_order: Ordering,
2046 fetch_order: Ordering,
2047 mut f: F,
2048 ) -> Result<*mut T, *mut T>
2049 where
2050 F: FnMut(*mut T) -> Option<*mut T>,
2051 {
2052 let mut prev = self.load(fetch_order);
2053 while let Some(next) = f(prev) {
2054 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
2055 x @ Ok(_) => return x,
2056 Err(next_prev) => prev = next_prev,
2057 }
2058 }
2059 Err(prev)
2060 }
2061 /// Fetches the value, and applies a function to it that returns an optional
2062 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
2063 /// returned `Some(_)`, else `Err(previous_value)`.
2064 ///
2065 /// See also: [`update`](`AtomicPtr::update`).
2066 ///
2067 /// Note: This may call the function multiple times if the value has been
2068 /// changed from other threads in the meantime, as long as the function
2069 /// returns `Some(_)`, but the function will have been applied only once to
2070 /// the stored value.
2071 ///
2072 /// `try_update` takes two [`Ordering`] arguments to describe the memory
2073 /// ordering of this operation. The first describes the required ordering for
2074 /// when the operation finally succeeds while the second describes the
2075 /// required ordering for loads. These correspond to the success and failure
2076 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
2077 ///
2078 /// Using [`Acquire`] as success ordering makes the store part of this
2079 /// operation [`Relaxed`], and using [`Release`] makes the final successful
2080 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
2081 /// [`Acquire`] or [`Relaxed`].
2082 ///
2083 /// **Note:** This method is only available on platforms that support atomic
2084 /// operations on pointers.
2085 ///
2086 /// # Considerations
2087 ///
2088 /// This method is not magic; it is not provided by the hardware, and does not act like a
2089 /// critical section or mutex.
2090 ///
2091 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
2092 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem],
2093 /// which is a particularly common pitfall for pointers!
2094 ///
2095 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2096 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
2097 ///
2098 /// # Examples
2099 ///
2100 /// ```rust
2101 /// #![feature(atomic_try_update)]
2102 /// use std::sync::atomic::{AtomicPtr, Ordering};
2103 ///
2104 /// let ptr: *mut _ = &mut 5;
2105 /// let some_ptr = AtomicPtr::new(ptr);
2106 ///
2107 /// let new: *mut _ = &mut 10;
2108 /// assert_eq!(some_ptr.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
2109 /// let result = some_ptr.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
2110 /// if x == ptr {
2111 /// Some(new)
2112 /// } else {
2113 /// None
2114 /// }
2115 /// });
2116 /// assert_eq!(result, Ok(ptr));
2117 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
2118 /// ```
2119 #[inline]
2120 #[unstable(feature = "atomic_try_update", issue = "135894")]
2121 #[cfg(target_has_atomic = "ptr")]
2122 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2123 #[rustc_should_not_be_called_on_const_items]
2124 pub fn try_update(
2125 &self,
2126 set_order: Ordering,
2127 fetch_order: Ordering,
2128 f: impl FnMut(*mut T) -> Option<*mut T>,
2129 ) -> Result<*mut T, *mut T> {
2130 // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
2131 // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
2132 self.fetch_update(set_order, fetch_order, f)
2133 }
2134
2135 /// Fetches the value, applies a function to it that it return a new value.
2136 /// The new value is stored and the old value is returned.
2137 ///
2138 /// See also: [`try_update`](`AtomicPtr::try_update`).
2139 ///
2140 /// Note: This may call the function multiple times if the value has been changed from other threads in
2141 /// the meantime, but the function will have been applied only once to the stored value.
2142 ///
2143 /// `update` takes two [`Ordering`] arguments to describe the memory
2144 /// ordering of this operation. The first describes the required ordering for
2145 /// when the operation finally succeeds while the second describes the
2146 /// required ordering for loads. These correspond to the success and failure
2147 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
2148 ///
2149 /// Using [`Acquire`] as success ordering makes the store part
2150 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
2151 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
2152 ///
2153 /// **Note:** This method is only available on platforms that support atomic
2154 /// operations on pointers.
2155 ///
2156 /// # Considerations
2157 ///
2158 /// This method is not magic; it is not provided by the hardware, and does not act like a
2159 /// critical section or mutex.
2160 ///
2161 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
2162 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem],
2163 /// which is a particularly common pitfall for pointers!
2164 ///
2165 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2166 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
2167 ///
2168 /// # Examples
2169 ///
2170 /// ```rust
2171 /// #![feature(atomic_try_update)]
2172 ///
2173 /// use std::sync::atomic::{AtomicPtr, Ordering};
2174 ///
2175 /// let ptr: *mut _ = &mut 5;
2176 /// let some_ptr = AtomicPtr::new(ptr);
2177 ///
2178 /// let new: *mut _ = &mut 10;
2179 /// let result = some_ptr.update(Ordering::SeqCst, Ordering::SeqCst, |_| new);
2180 /// assert_eq!(result, ptr);
2181 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
2182 /// ```
2183 #[inline]
2184 #[unstable(feature = "atomic_try_update", issue = "135894")]
2185 #[cfg(target_has_atomic = "8")]
2186 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2187 #[rustc_should_not_be_called_on_const_items]
2188 pub fn update(
2189 &self,
2190 set_order: Ordering,
2191 fetch_order: Ordering,
2192 mut f: impl FnMut(*mut T) -> *mut T,
2193 ) -> *mut T {
2194 let mut prev = self.load(fetch_order);
2195 loop {
2196 match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
2197 Ok(x) => break x,
2198 Err(next_prev) => prev = next_prev,
2199 }
2200 }
2201 }
2202
2203 /// Offsets the pointer's address by adding `val` (in units of `T`),
2204 /// returning the previous pointer.
2205 ///
2206 /// This is equivalent to using [`wrapping_add`] to atomically perform the
2207 /// equivalent of `ptr = ptr.wrapping_add(val);`.
2208 ///
2209 /// This method operates in units of `T`, which means that it cannot be used
2210 /// to offset the pointer by an amount which is not a multiple of
2211 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
2212 /// work with a deliberately misaligned pointer. In such cases, you may use
2213 /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead.
2214 ///
2215 /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the
2216 /// memory ordering of this operation. All ordering modes are possible. Note
2217 /// that using [`Acquire`] makes the store part of this operation
2218 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
2219 ///
2220 /// **Note**: This method is only available on platforms that support atomic
2221 /// operations on [`AtomicPtr`].
2222 ///
2223 /// [`wrapping_add`]: pointer::wrapping_add
2224 ///
2225 /// # Examples
2226 ///
2227 /// ```
2228 /// use core::sync::atomic::{AtomicPtr, Ordering};
2229 ///
2230 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
2231 /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0);
2232 /// // Note: units of `size_of::<i64>()`.
2233 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
2234 /// ```
2235 #[inline]
2236 #[cfg(target_has_atomic = "ptr")]
2237 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2238 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2239 #[rustc_should_not_be_called_on_const_items]
2240 pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T {
2241 self.fetch_byte_add(val.wrapping_mul(size_of::<T>()), order)
2242 }
2243
2244 /// Offsets the pointer's address by subtracting `val` (in units of `T`),
2245 /// returning the previous pointer.
2246 ///
2247 /// This is equivalent to using [`wrapping_sub`] to atomically perform the
2248 /// equivalent of `ptr = ptr.wrapping_sub(val);`.
2249 ///
2250 /// This method operates in units of `T`, which means that it cannot be used
2251 /// to offset the pointer by an amount which is not a multiple of
2252 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
2253 /// work with a deliberately misaligned pointer. In such cases, you may use
2254 /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead.
2255 ///
2256 /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory
2257 /// ordering of this operation. All ordering modes are possible. Note that
2258 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2259 /// and using [`Release`] makes the load part [`Relaxed`].
2260 ///
2261 /// **Note**: This method is only available on platforms that support atomic
2262 /// operations on [`AtomicPtr`].
2263 ///
2264 /// [`wrapping_sub`]: pointer::wrapping_sub
2265 ///
2266 /// # Examples
2267 ///
2268 /// ```
2269 /// use core::sync::atomic::{AtomicPtr, Ordering};
2270 ///
2271 /// let array = [1i32, 2i32];
2272 /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
2273 ///
2274 /// assert!(core::ptr::eq(
2275 /// atom.fetch_ptr_sub(1, Ordering::Relaxed),
2276 /// &array[1],
2277 /// ));
2278 /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
2279 /// ```
2280 #[inline]
2281 #[cfg(target_has_atomic = "ptr")]
2282 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2283 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2284 #[rustc_should_not_be_called_on_const_items]
2285 pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
2286 self.fetch_byte_sub(val.wrapping_mul(size_of::<T>()), order)
2287 }
2288
2289 /// Offsets the pointer's address by adding `val` *bytes*, returning the
2290 /// previous pointer.
2291 ///
2292 /// This is equivalent to using [`wrapping_byte_add`] to atomically
2293 /// perform `ptr = ptr.wrapping_byte_add(val)`.
2294 ///
2295 /// `fetch_byte_add` takes an [`Ordering`] argument which describes the
2296 /// memory ordering of this operation. All ordering modes are possible. Note
2297 /// that using [`Acquire`] makes the store part of this operation
2298 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
2299 ///
2300 /// **Note**: This method is only available on platforms that support atomic
2301 /// operations on [`AtomicPtr`].
2302 ///
2303 /// [`wrapping_byte_add`]: pointer::wrapping_byte_add
2304 ///
2305 /// # Examples
2306 ///
2307 /// ```
2308 /// use core::sync::atomic::{AtomicPtr, Ordering};
2309 ///
2310 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
2311 /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0);
2312 /// // Note: in units of bytes, not `size_of::<i64>()`.
2313 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
2314 /// ```
2315 #[inline]
2316 #[cfg(target_has_atomic = "ptr")]
2317 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2318 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2319 #[rustc_should_not_be_called_on_const_items]
2320 pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
2321 // SAFETY: data races are prevented by atomic intrinsics.
2322 unsafe { atomic_add(self.p.get(), val, order).cast() }
2323 }
2324
2325 /// Offsets the pointer's address by subtracting `val` *bytes*, returning the
2326 /// previous pointer.
2327 ///
2328 /// This is equivalent to using [`wrapping_byte_sub`] to atomically
2329 /// perform `ptr = ptr.wrapping_byte_sub(val)`.
2330 ///
2331 /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
2332 /// memory ordering of this operation. All ordering modes are possible. Note
2333 /// that using [`Acquire`] makes the store part of this operation
2334 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
2335 ///
2336 /// **Note**: This method is only available on platforms that support atomic
2337 /// operations on [`AtomicPtr`].
2338 ///
2339 /// [`wrapping_byte_sub`]: pointer::wrapping_byte_sub
2340 ///
2341 /// # Examples
2342 ///
2343 /// ```
2344 /// use core::sync::atomic::{AtomicPtr, Ordering};
2345 ///
2346 /// let mut arr = [0i64, 1];
2347 /// let atom = AtomicPtr::<i64>::new(&raw mut arr[1]);
2348 /// assert_eq!(atom.fetch_byte_sub(8, Ordering::Relaxed).addr(), (&raw const arr[1]).addr());
2349 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), (&raw const arr[0]).addr());
2350 /// ```
2351 #[inline]
2352 #[cfg(target_has_atomic = "ptr")]
2353 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2354 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2355 #[rustc_should_not_be_called_on_const_items]
2356 pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
2357 // SAFETY: data races are prevented by atomic intrinsics.
2358 unsafe { atomic_sub(self.p.get(), val, order).cast() }
2359 }
2360
2361 /// Performs a bitwise "or" operation on the address of the current pointer,
2362 /// and the argument `val`, and stores a pointer with provenance of the
2363 /// current pointer and the resulting address.
2364 ///
2365 /// This is equivalent to using [`map_addr`] to atomically perform
2366 /// `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
2367 /// pointer schemes to atomically set tag bits.
2368 ///
2369 /// **Caveat**: This operation returns the previous value. To compute the
2370 /// stored value without losing provenance, you may use [`map_addr`]. For
2371 /// example: `a.fetch_or(val).map_addr(|a| a | val)`.
2372 ///
2373 /// `fetch_or` takes an [`Ordering`] argument which describes the memory
2374 /// ordering of this operation. All ordering modes are possible. Note that
2375 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2376 /// and using [`Release`] makes the load part [`Relaxed`].
2377 ///
2378 /// **Note**: This method is only available on platforms that support atomic
2379 /// operations on [`AtomicPtr`].
2380 ///
2381 /// This API and its claimed semantics are part of the Strict Provenance
2382 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
2383 /// details.
2384 ///
2385 /// [`map_addr`]: pointer::map_addr
2386 ///
2387 /// # Examples
2388 ///
2389 /// ```
2390 /// use core::sync::atomic::{AtomicPtr, Ordering};
2391 ///
2392 /// let pointer = &mut 3i64 as *mut i64;
2393 ///
2394 /// let atom = AtomicPtr::<i64>::new(pointer);
2395 /// // Tag the bottom bit of the pointer.
2396 /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0);
2397 /// // Extract and untag.
2398 /// let tagged = atom.load(Ordering::Relaxed);
2399 /// assert_eq!(tagged.addr() & 1, 1);
2400 /// assert_eq!(tagged.map_addr(|p| p & !1), pointer);
2401 /// ```
2402 #[inline]
2403 #[cfg(target_has_atomic = "ptr")]
2404 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2405 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2406 #[rustc_should_not_be_called_on_const_items]
2407 pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
2408 // SAFETY: data races are prevented by atomic intrinsics.
2409 unsafe { atomic_or(self.p.get(), val, order).cast() }
2410 }
2411
2412 /// Performs a bitwise "and" operation on the address of the current
2413 /// pointer, and the argument `val`, and stores a pointer with provenance of
2414 /// the current pointer and the resulting address.
2415 ///
2416 /// This is equivalent to using [`map_addr`] to atomically perform
2417 /// `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
2418 /// pointer schemes to atomically unset tag bits.
2419 ///
2420 /// **Caveat**: This operation returns the previous value. To compute the
2421 /// stored value without losing provenance, you may use [`map_addr`]. For
2422 /// example: `a.fetch_and(val).map_addr(|a| a & val)`.
2423 ///
2424 /// `fetch_and` takes an [`Ordering`] argument which describes the memory
2425 /// ordering of this operation. All ordering modes are possible. Note that
2426 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2427 /// and using [`Release`] makes the load part [`Relaxed`].
2428 ///
2429 /// **Note**: This method is only available on platforms that support atomic
2430 /// operations on [`AtomicPtr`].
2431 ///
2432 /// This API and its claimed semantics are part of the Strict Provenance
2433 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
2434 /// details.
2435 ///
2436 /// [`map_addr`]: pointer::map_addr
2437 ///
2438 /// # Examples
2439 ///
2440 /// ```
2441 /// use core::sync::atomic::{AtomicPtr, Ordering};
2442 ///
2443 /// let pointer = &mut 3i64 as *mut i64;
2444 /// // A tagged pointer
2445 /// let atom = AtomicPtr::<i64>::new(pointer.map_addr(|a| a | 1));
2446 /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1);
2447 /// // Untag, and extract the previously tagged pointer.
2448 /// let untagged = atom.fetch_and(!1, Ordering::Relaxed)
2449 /// .map_addr(|a| a & !1);
2450 /// assert_eq!(untagged, pointer);
2451 /// ```
2452 #[inline]
2453 #[cfg(target_has_atomic = "ptr")]
2454 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2455 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2456 #[rustc_should_not_be_called_on_const_items]
2457 pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
2458 // SAFETY: data races are prevented by atomic intrinsics.
2459 unsafe { atomic_and(self.p.get(), val, order).cast() }
2460 }
2461
2462 /// Performs a bitwise "xor" operation on the address of the current
2463 /// pointer, and the argument `val`, and stores a pointer with provenance of
2464 /// the current pointer and the resulting address.
2465 ///
2466 /// This is equivalent to using [`map_addr`] to atomically perform
2467 /// `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
2468 /// pointer schemes to atomically toggle tag bits.
2469 ///
2470 /// **Caveat**: This operation returns the previous value. To compute the
2471 /// stored value without losing provenance, you may use [`map_addr`]. For
2472 /// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`.
2473 ///
2474 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory
2475 /// ordering of this operation. All ordering modes are possible. Note that
2476 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2477 /// and using [`Release`] makes the load part [`Relaxed`].
2478 ///
2479 /// **Note**: This method is only available on platforms that support atomic
2480 /// operations on [`AtomicPtr`].
2481 ///
2482 /// This API and its claimed semantics are part of the Strict Provenance
2483 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
2484 /// details.
2485 ///
2486 /// [`map_addr`]: pointer::map_addr
2487 ///
2488 /// # Examples
2489 ///
2490 /// ```
2491 /// use core::sync::atomic::{AtomicPtr, Ordering};
2492 ///
2493 /// let pointer = &mut 3i64 as *mut i64;
2494 /// let atom = AtomicPtr::<i64>::new(pointer);
2495 ///
2496 /// // Toggle a tag bit on the pointer.
2497 /// atom.fetch_xor(1, Ordering::Relaxed);
2498 /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
2499 /// ```
2500 #[inline]
2501 #[cfg(target_has_atomic = "ptr")]
2502 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2503 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2504 #[rustc_should_not_be_called_on_const_items]
2505 pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
2506 // SAFETY: data races are prevented by atomic intrinsics.
2507 unsafe { atomic_xor(self.p.get(), val, order).cast() }
2508 }
2509
2510 /// Returns a mutable pointer to the underlying pointer.
2511 ///
2512 /// Doing non-atomic reads and writes on the resulting pointer can be a data race.
2513 /// This method is mostly useful for FFI, where the function signature may use
2514 /// `*mut *mut T` instead of `&AtomicPtr<T>`.
2515 ///
2516 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
2517 /// atomic types work with interior mutability. All modifications of an atomic change the value
2518 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
2519 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
2520 /// requirements of the [memory model].
2521 ///
2522 /// # Examples
2523 ///
2524 /// ```ignore (extern-declaration)
2525 /// use std::sync::atomic::AtomicPtr;
2526 ///
2527 /// extern "C" {
2528 /// fn my_atomic_op(arg: *mut *mut u32);
2529 /// }
2530 ///
2531 /// let mut value = 17;
2532 /// let atomic = AtomicPtr::new(&mut value);
2533 ///
2534 /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
2535 /// unsafe {
2536 /// my_atomic_op(atomic.as_ptr());
2537 /// }
2538 /// ```
2539 ///
2540 /// [memory model]: self#memory-model-for-atomic-accesses
2541 #[inline]
2542 #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
2543 #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
2544 #[rustc_never_returns_null_ptr]
2545 pub const fn as_ptr(&self) -> *mut *mut T {
2546 self.p.get()
2547 }
2548}
2549
2550#[cfg(target_has_atomic_load_store = "8")]
2551#[stable(feature = "atomic_bool_from", since = "1.24.0")]
2552#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
2553impl const From<bool> for AtomicBool {
2554 /// Converts a `bool` into an `AtomicBool`.
2555 ///
2556 /// # Examples
2557 ///
2558 /// ```
2559 /// use std::sync::atomic::AtomicBool;
2560 /// let atomic_bool = AtomicBool::from(true);
2561 /// assert_eq!(format!("{atomic_bool:?}"), "true")
2562 /// ```
2563 #[inline]
2564 fn from(b: bool) -> Self {
2565 Self::new(b)
2566 }
2567}
2568
2569#[cfg(target_has_atomic_load_store = "ptr")]
2570#[stable(feature = "atomic_from", since = "1.23.0")]
2571#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
2572impl<T> const From<*mut T> for AtomicPtr<T> {
2573 /// Converts a `*mut T` into an `AtomicPtr<T>`.
2574 #[inline]
2575 fn from(p: *mut T) -> Self {
2576 Self::new(p)
2577 }
2578}
2579
2580#[allow(unused_macros)] // This macro ends up being unused on some architectures.
2581macro_rules! if_8_bit {
2582 (u8, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("", $($($yes)*)?) };
2583 (i8, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("", $($($yes)*)?) };
2584 ($_:ident, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("", $($($no)*)?) };
2585}
2586
2587#[cfg(target_has_atomic_load_store)]
2588macro_rules! atomic_int {
2589 ($cfg_cas:meta,
2590 $cfg_align:meta,
2591 $stable:meta,
2592 $stable_cxchg:meta,
2593 $stable_debug:meta,
2594 $stable_access:meta,
2595 $stable_from:meta,
2596 $stable_nand:meta,
2597 $const_stable_new:meta,
2598 $const_stable_into_inner:meta,
2599 $diagnostic_item:meta,
2600 $s_int_type:literal,
2601 $extra_feature:expr,
2602 $min_fn:ident, $max_fn:ident,
2603 $align:expr,
2604 $int_type:ident $atomic_type:ident) => {
2605 /// An integer type which can be safely shared between threads.
2606 ///
2607 /// This type has the same
2608 #[doc = if_8_bit!(
2609 $int_type,
2610 yes = ["size, alignment, and bit validity"],
2611 no = ["size and bit validity"],
2612 )]
2613 /// as the underlying integer type, [`
2614 #[doc = $s_int_type]
2615 /// `].
2616 #[doc = if_8_bit! {
2617 $int_type,
2618 no = [
2619 "However, the alignment of this type is always equal to its ",
2620 "size, even on targets where [`", $s_int_type, "`] has a ",
2621 "lesser alignment."
2622 ],
2623 }]
2624 ///
2625 /// For more about the differences between atomic types and
2626 /// non-atomic types as well as information about the portability of
2627 /// this type, please see the [module-level documentation].
2628 ///
2629 /// **Note:** This type is only available on platforms that support
2630 /// atomic loads and stores of [`
2631 #[doc = $s_int_type]
2632 /// `].
2633 ///
2634 /// [module-level documentation]: crate::sync::atomic
2635 #[$stable]
2636 #[$diagnostic_item]
2637 #[repr(C, align($align))]
2638 pub struct $atomic_type {
2639 v: UnsafeCell<$int_type>,
2640 }
2641
2642 #[$stable]
2643 impl Default for $atomic_type {
2644 #[inline]
2645 fn default() -> Self {
2646 Self::new(Default::default())
2647 }
2648 }
2649
2650 #[$stable_from]
2651 #[rustc_const_unstable(feature = "const_convert", issue = "143773")]
2652 impl const From<$int_type> for $atomic_type {
2653 #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")]
2654 #[inline]
2655 fn from(v: $int_type) -> Self { Self::new(v) }
2656 }
2657
2658 #[$stable_debug]
2659 impl fmt::Debug for $atomic_type {
2660 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2661 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
2662 }
2663 }
2664
2665 // Send is implicitly implemented.
2666 #[$stable]
2667 unsafe impl Sync for $atomic_type {}
2668
2669 impl $atomic_type {
2670 /// Creates a new atomic integer.
2671 ///
2672 /// # Examples
2673 ///
2674 /// ```
2675 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2676 ///
2677 #[doc = concat!("let atomic_forty_two = ", stringify!($atomic_type), "::new(42);")]
2678 /// ```
2679 #[inline]
2680 #[$stable]
2681 #[$const_stable_new]
2682 #[must_use]
2683 pub const fn new(v: $int_type) -> Self {
2684 Self {v: UnsafeCell::new(v)}
2685 }
2686
2687 /// Creates a new reference to an atomic integer from a pointer.
2688 ///
2689 /// # Examples
2690 ///
2691 /// ```
2692 #[doc = concat!($extra_feature, "use std::sync::atomic::{self, ", stringify!($atomic_type), "};")]
2693 ///
2694 /// // Get a pointer to an allocated value
2695 #[doc = concat!("let ptr: *mut ", stringify!($int_type), " = Box::into_raw(Box::new(0));")]
2696 ///
2697 #[doc = concat!("assert!(ptr.cast::<", stringify!($atomic_type), ">().is_aligned());")]
2698 ///
2699 /// {
2700 /// // Create an atomic view of the allocated value
2701 // SAFETY: this is a doc comment, tidy, it can't hurt you (also guaranteed by the construction of `ptr` and the assert above)
2702 #[doc = concat!(" let atomic = unsafe {", stringify!($atomic_type), "::from_ptr(ptr) };")]
2703 ///
2704 /// // Use `atomic` for atomic operations, possibly share it with other threads
2705 /// atomic.store(1, atomic::Ordering::Relaxed);
2706 /// }
2707 ///
2708 /// // It's ok to non-atomically access the value behind `ptr`,
2709 /// // since the reference to the atomic ended its lifetime in the block above
2710 /// assert_eq!(unsafe { *ptr }, 1);
2711 ///
2712 /// // Deallocate the value
2713 /// unsafe { drop(Box::from_raw(ptr)) }
2714 /// ```
2715 ///
2716 /// # Safety
2717 ///
2718 /// * `ptr` must be aligned to
2719 #[doc = concat!(" `align_of::<", stringify!($atomic_type), ">()`")]
2720 #[doc = if_8_bit!{
2721 $int_type,
2722 yes = [
2723 " (note that this is always true, since `align_of::<",
2724 stringify!($atomic_type), ">() == 1`)."
2725 ],
2726 no = [
2727 " (note that on some platforms this can be bigger than `align_of::<",
2728 stringify!($int_type), ">()`)."
2729 ],
2730 }]
2731 /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
2732 /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
2733 /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
2734 /// sizes, without synchronization.
2735 ///
2736 /// [valid]: crate::ptr#safety
2737 /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
2738 #[inline]
2739 #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
2740 #[rustc_const_stable(feature = "const_atomic_from_ptr", since = "1.84.0")]
2741 pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
2742 // SAFETY: guaranteed by the caller
2743 unsafe { &*ptr.cast() }
2744 }
2745
2746
2747 /// Returns a mutable reference to the underlying integer.
2748 ///
2749 /// This is safe because the mutable reference guarantees that no other threads are
2750 /// concurrently accessing the atomic data.
2751 ///
2752 /// # Examples
2753 ///
2754 /// ```
2755 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2756 ///
2757 #[doc = concat!("let mut some_var = ", stringify!($atomic_type), "::new(10);")]
2758 /// assert_eq!(*some_var.get_mut(), 10);
2759 /// *some_var.get_mut() = 5;
2760 /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
2761 /// ```
2762 #[inline]
2763 #[$stable_access]
2764 pub fn get_mut(&mut self) -> &mut $int_type {
2765 self.v.get_mut()
2766 }
2767
2768 #[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")]
2769 ///
2770 #[doc = if_8_bit! {
2771 $int_type,
2772 no = [
2773 "**Note:** This function is only available on targets where `",
2774 stringify!($atomic_type), "` has the same alignment as `", stringify!($int_type), "`."
2775 ],
2776 }]
2777 ///
2778 /// # Examples
2779 ///
2780 /// ```
2781 /// #![feature(atomic_from_mut)]
2782 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2783 ///
2784 /// let mut some_int = 123;
2785 #[doc = concat!("let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);")]
2786 /// a.store(100, Ordering::Relaxed);
2787 /// assert_eq!(some_int, 100);
2788 /// ```
2789 ///
2790 #[inline]
2791 #[$cfg_align]
2792 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2793 pub fn from_mut(v: &mut $int_type) -> &mut Self {
2794 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
2795 // SAFETY:
2796 // - the mutable reference guarantees unique ownership.
2797 // - the alignment of `$int_type` and `Self` is the
2798 // same, as promised by $cfg_align and verified above.
2799 unsafe { &mut *(v as *mut $int_type as *mut Self) }
2800 }
2801
2802 #[doc = concat!("Get non-atomic access to a `&mut [", stringify!($atomic_type), "]` slice")]
2803 ///
2804 /// This is safe because the mutable reference guarantees that no other threads are
2805 /// concurrently accessing the atomic data.
2806 ///
2807 /// # Examples
2808 ///
2809 /// ```ignore-wasm
2810 /// #![feature(atomic_from_mut)]
2811 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2812 ///
2813 #[doc = concat!("let mut some_ints = [const { ", stringify!($atomic_type), "::new(0) }; 10];")]
2814 ///
2815 #[doc = concat!("let view: &mut [", stringify!($int_type), "] = ", stringify!($atomic_type), "::get_mut_slice(&mut some_ints);")]
2816 /// assert_eq!(view, [0; 10]);
2817 /// view
2818 /// .iter_mut()
2819 /// .enumerate()
2820 /// .for_each(|(idx, int)| *int = idx as _);
2821 ///
2822 /// std::thread::scope(|s| {
2823 /// some_ints
2824 /// .iter()
2825 /// .enumerate()
2826 /// .for_each(|(idx, int)| {
2827 /// s.spawn(move || assert_eq!(int.load(Ordering::Relaxed), idx as _));
2828 /// })
2829 /// });
2830 /// ```
2831 #[inline]
2832 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2833 pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
2834 // SAFETY: the mutable reference guarantees unique ownership.
2835 unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
2836 }
2837
2838 #[doc = concat!("Get atomic access to a `&mut [", stringify!($int_type), "]` slice.")]
2839 ///
2840 #[doc = if_8_bit! {
2841 $int_type,
2842 no = [
2843 "**Note:** This function is only available on targets where `",
2844 stringify!($atomic_type), "` has the same alignment as `", stringify!($int_type), "`."
2845 ],
2846 }]
2847 ///
2848 /// # Examples
2849 ///
2850 /// ```ignore-wasm
2851 /// #![feature(atomic_from_mut)]
2852 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2853 ///
2854 /// let mut some_ints = [0; 10];
2855 #[doc = concat!("let a = &*", stringify!($atomic_type), "::from_mut_slice(&mut some_ints);")]
2856 /// std::thread::scope(|s| {
2857 /// for i in 0..a.len() {
2858 /// s.spawn(move || a[i].store(i as _, Ordering::Relaxed));
2859 /// }
2860 /// });
2861 /// for (i, n) in some_ints.into_iter().enumerate() {
2862 /// assert_eq!(i, n as usize);
2863 /// }
2864 /// ```
2865 #[inline]
2866 #[$cfg_align]
2867 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2868 pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
2869 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
2870 // SAFETY:
2871 // - the mutable reference guarantees unique ownership.
2872 // - the alignment of `$int_type` and `Self` is the
2873 // same, as promised by $cfg_align and verified above.
2874 unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
2875 }
2876
2877 /// Consumes the atomic and returns the contained value.
2878 ///
2879 /// This is safe because passing `self` by value guarantees that no other threads are
2880 /// concurrently accessing the atomic data.
2881 ///
2882 /// # Examples
2883 ///
2884 /// ```
2885 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2886 ///
2887 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2888 /// assert_eq!(some_var.into_inner(), 5);
2889 /// ```
2890 #[inline]
2891 #[$stable_access]
2892 #[$const_stable_into_inner]
2893 pub const fn into_inner(self) -> $int_type {
2894 self.v.into_inner()
2895 }
2896
2897 /// Loads a value from the atomic integer.
2898 ///
2899 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
2900 /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
2901 ///
2902 /// # Panics
2903 ///
2904 /// Panics if `order` is [`Release`] or [`AcqRel`].
2905 ///
2906 /// # Examples
2907 ///
2908 /// ```
2909 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2910 ///
2911 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2912 ///
2913 /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
2914 /// ```
2915 #[inline]
2916 #[$stable]
2917 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2918 pub fn load(&self, order: Ordering) -> $int_type {
2919 // SAFETY: data races are prevented by atomic intrinsics.
2920 unsafe { atomic_load(self.v.get(), order) }
2921 }
2922
2923 /// Stores a value into the atomic integer.
2924 ///
2925 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
2926 /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
2927 ///
2928 /// # Panics
2929 ///
2930 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
2931 ///
2932 /// # Examples
2933 ///
2934 /// ```
2935 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2936 ///
2937 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2938 ///
2939 /// some_var.store(10, Ordering::Relaxed);
2940 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
2941 /// ```
2942 #[inline]
2943 #[$stable]
2944 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2945 #[rustc_should_not_be_called_on_const_items]
2946 pub fn store(&self, val: $int_type, order: Ordering) {
2947 // SAFETY: data races are prevented by atomic intrinsics.
2948 unsafe { atomic_store(self.v.get(), val, order); }
2949 }
2950
2951 /// Stores a value into the atomic integer, returning the previous value.
2952 ///
2953 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
2954 /// of this operation. All ordering modes are possible. Note that using
2955 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2956 /// using [`Release`] makes the load part [`Relaxed`].
2957 ///
2958 /// **Note**: This method is only available on platforms that support atomic operations on
2959 #[doc = concat!("[`", $s_int_type, "`].")]
2960 ///
2961 /// # Examples
2962 ///
2963 /// ```
2964 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2965 ///
2966 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2967 ///
2968 /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
2969 /// ```
2970 #[inline]
2971 #[$stable]
2972 #[$cfg_cas]
2973 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2974 #[rustc_should_not_be_called_on_const_items]
2975 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
2976 // SAFETY: data races are prevented by atomic intrinsics.
2977 unsafe { atomic_swap(self.v.get(), val, order) }
2978 }
2979
2980 /// Stores a value into the atomic integer if the current value is the same as
2981 /// the `current` value.
2982 ///
2983 /// The return value is always the previous value. If it is equal to `current`, then the
2984 /// value was updated.
2985 ///
2986 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
2987 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
2988 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
2989 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
2990 /// happens, and using [`Release`] makes the load part [`Relaxed`].
2991 ///
2992 /// **Note**: This method is only available on platforms that support atomic operations on
2993 #[doc = concat!("[`", $s_int_type, "`].")]
2994 ///
2995 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
2996 ///
2997 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
2998 /// memory orderings:
2999 ///
3000 /// Original | Success | Failure
3001 /// -------- | ------- | -------
3002 /// Relaxed | Relaxed | Relaxed
3003 /// Acquire | Acquire | Acquire
3004 /// Release | Release | Relaxed
3005 /// AcqRel | AcqRel | Acquire
3006 /// SeqCst | SeqCst | SeqCst
3007 ///
3008 /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use
3009 /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`,
3010 /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err`
3011 /// rather than to infer success vs failure based on the value that was read.
3012 ///
3013 /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead.
3014 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
3015 /// which allows the compiler to generate better assembly code when the compare and swap
3016 /// is used in a loop.
3017 ///
3018 /// # Examples
3019 ///
3020 /// ```
3021 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3022 ///
3023 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
3024 ///
3025 /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
3026 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3027 ///
3028 /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
3029 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3030 /// ```
3031 #[inline]
3032 #[$stable]
3033 #[deprecated(
3034 since = "1.50.0",
3035 note = "Use `compare_exchange` or `compare_exchange_weak` instead")
3036 ]
3037 #[$cfg_cas]
3038 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3039 #[rustc_should_not_be_called_on_const_items]
3040 pub fn compare_and_swap(&self,
3041 current: $int_type,
3042 new: $int_type,
3043 order: Ordering) -> $int_type {
3044 match self.compare_exchange(current,
3045 new,
3046 order,
3047 strongest_failure_ordering(order)) {
3048 Ok(x) => x,
3049 Err(x) => x,
3050 }
3051 }
3052
3053 /// Stores a value into the atomic integer if the current value is the same as
3054 /// the `current` value.
3055 ///
3056 /// The return value is a result indicating whether the new value was written and
3057 /// containing the previous value. On success this value is guaranteed to be equal to
3058 /// `current`.
3059 ///
3060 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
3061 /// ordering of this operation. `success` describes the required ordering for the
3062 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
3063 /// `failure` describes the required ordering for the load operation that takes place when
3064 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
3065 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
3066 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3067 ///
3068 /// **Note**: This method is only available on platforms that support atomic operations on
3069 #[doc = concat!("[`", $s_int_type, "`].")]
3070 ///
3071 /// # Examples
3072 ///
3073 /// ```
3074 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3075 ///
3076 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
3077 ///
3078 /// assert_eq!(some_var.compare_exchange(5, 10,
3079 /// Ordering::Acquire,
3080 /// Ordering::Relaxed),
3081 /// Ok(5));
3082 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3083 ///
3084 /// assert_eq!(some_var.compare_exchange(6, 12,
3085 /// Ordering::SeqCst,
3086 /// Ordering::Acquire),
3087 /// Err(10));
3088 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3089 /// ```
3090 ///
3091 /// # Considerations
3092 ///
3093 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
3094 /// of CAS operations. In particular, a load of the value followed by a successful
3095 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
3096 /// changed the value in the interim! This is usually important when the *equality* check in
3097 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
3098 /// does not necessarily imply identity. This is a particularly common case for pointers, as
3099 /// a pointer holding the same address does not imply that the same object exists at that
3100 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
3101 ///
3102 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3103 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3104 #[inline]
3105 #[$stable_cxchg]
3106 #[$cfg_cas]
3107 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3108 #[rustc_should_not_be_called_on_const_items]
3109 pub fn compare_exchange(&self,
3110 current: $int_type,
3111 new: $int_type,
3112 success: Ordering,
3113 failure: Ordering) -> Result<$int_type, $int_type> {
3114 // SAFETY: data races are prevented by atomic intrinsics.
3115 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
3116 }
3117
3118 /// Stores a value into the atomic integer if the current value is the same as
3119 /// the `current` value.
3120 ///
3121 #[doc = concat!("Unlike [`", stringify!($atomic_type), "::compare_exchange`],")]
3122 /// this function is allowed to spuriously fail even
3123 /// when the comparison succeeds, which can result in more efficient code on some
3124 /// platforms. The return value is a result indicating whether the new value was
3125 /// written and containing the previous value.
3126 ///
3127 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
3128 /// ordering of this operation. `success` describes the required ordering for the
3129 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
3130 /// `failure` describes the required ordering for the load operation that takes place when
3131 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
3132 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
3133 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3134 ///
3135 /// **Note**: This method is only available on platforms that support atomic operations on
3136 #[doc = concat!("[`", $s_int_type, "`].")]
3137 ///
3138 /// # Examples
3139 ///
3140 /// ```
3141 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3142 ///
3143 #[doc = concat!("let val = ", stringify!($atomic_type), "::new(4);")]
3144 ///
3145 /// let mut old = val.load(Ordering::Relaxed);
3146 /// loop {
3147 /// let new = old * 2;
3148 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
3149 /// Ok(_) => break,
3150 /// Err(x) => old = x,
3151 /// }
3152 /// }
3153 /// ```
3154 ///
3155 /// # Considerations
3156 ///
3157 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
3158 /// of CAS operations. In particular, a load of the value followed by a successful
3159 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
3160 /// changed the value in the interim. This is usually important when the *equality* check in
3161 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
3162 /// does not necessarily imply identity. This is a particularly common case for pointers, as
3163 /// a pointer holding the same address does not imply that the same object exists at that
3164 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
3165 ///
3166 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3167 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3168 #[inline]
3169 #[$stable_cxchg]
3170 #[$cfg_cas]
3171 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3172 #[rustc_should_not_be_called_on_const_items]
3173 pub fn compare_exchange_weak(&self,
3174 current: $int_type,
3175 new: $int_type,
3176 success: Ordering,
3177 failure: Ordering) -> Result<$int_type, $int_type> {
3178 // SAFETY: data races are prevented by atomic intrinsics.
3179 unsafe {
3180 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
3181 }
3182 }
3183
3184 /// Adds to the current value, returning the previous value.
3185 ///
3186 /// This operation wraps around on overflow.
3187 ///
3188 /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
3189 /// of this operation. All ordering modes are possible. Note that using
3190 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3191 /// using [`Release`] makes the load part [`Relaxed`].
3192 ///
3193 /// **Note**: This method is only available on platforms that support atomic operations on
3194 #[doc = concat!("[`", $s_int_type, "`].")]
3195 ///
3196 /// # Examples
3197 ///
3198 /// ```
3199 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3200 ///
3201 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0);")]
3202 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
3203 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
3204 /// ```
3205 #[inline]
3206 #[$stable]
3207 #[$cfg_cas]
3208 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3209 #[rustc_should_not_be_called_on_const_items]
3210 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
3211 // SAFETY: data races are prevented by atomic intrinsics.
3212 unsafe { atomic_add(self.v.get(), val, order) }
3213 }
3214
3215 /// Subtracts from the current value, returning the previous value.
3216 ///
3217 /// This operation wraps around on overflow.
3218 ///
3219 /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
3220 /// of this operation. All ordering modes are possible. Note that using
3221 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3222 /// using [`Release`] makes the load part [`Relaxed`].
3223 ///
3224 /// **Note**: This method is only available on platforms that support atomic operations on
3225 #[doc = concat!("[`", $s_int_type, "`].")]
3226 ///
3227 /// # Examples
3228 ///
3229 /// ```
3230 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3231 ///
3232 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(20);")]
3233 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
3234 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
3235 /// ```
3236 #[inline]
3237 #[$stable]
3238 #[$cfg_cas]
3239 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3240 #[rustc_should_not_be_called_on_const_items]
3241 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
3242 // SAFETY: data races are prevented by atomic intrinsics.
3243 unsafe { atomic_sub(self.v.get(), val, order) }
3244 }
3245
3246 /// Bitwise "and" with the current value.
3247 ///
3248 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
3249 /// sets the new value to the result.
3250 ///
3251 /// Returns the previous value.
3252 ///
3253 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
3254 /// of this operation. All ordering modes are possible. Note that using
3255 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3256 /// using [`Release`] makes the load part [`Relaxed`].
3257 ///
3258 /// **Note**: This method is only available on platforms that support atomic operations on
3259 #[doc = concat!("[`", $s_int_type, "`].")]
3260 ///
3261 /// # Examples
3262 ///
3263 /// ```
3264 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3265 ///
3266 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
3267 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
3268 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
3269 /// ```
3270 #[inline]
3271 #[$stable]
3272 #[$cfg_cas]
3273 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3274 #[rustc_should_not_be_called_on_const_items]
3275 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
3276 // SAFETY: data races are prevented by atomic intrinsics.
3277 unsafe { atomic_and(self.v.get(), val, order) }
3278 }
3279
3280 /// Bitwise "nand" with the current value.
3281 ///
3282 /// Performs a bitwise "nand" operation on the current value and the argument `val`, and
3283 /// sets the new value to the result.
3284 ///
3285 /// Returns the previous value.
3286 ///
3287 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
3288 /// of this operation. All ordering modes are possible. Note that using
3289 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3290 /// using [`Release`] makes the load part [`Relaxed`].
3291 ///
3292 /// **Note**: This method is only available on platforms that support atomic operations on
3293 #[doc = concat!("[`", $s_int_type, "`].")]
3294 ///
3295 /// # Examples
3296 ///
3297 /// ```
3298 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3299 ///
3300 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0x13);")]
3301 /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
3302 /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
3303 /// ```
3304 #[inline]
3305 #[$stable_nand]
3306 #[$cfg_cas]
3307 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3308 #[rustc_should_not_be_called_on_const_items]
3309 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
3310 // SAFETY: data races are prevented by atomic intrinsics.
3311 unsafe { atomic_nand(self.v.get(), val, order) }
3312 }
3313
3314 /// Bitwise "or" with the current value.
3315 ///
3316 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
3317 /// sets the new value to the result.
3318 ///
3319 /// Returns the previous value.
3320 ///
3321 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
3322 /// of this operation. All ordering modes are possible. Note that using
3323 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3324 /// using [`Release`] makes the load part [`Relaxed`].
3325 ///
3326 /// **Note**: This method is only available on platforms that support atomic operations on
3327 #[doc = concat!("[`", $s_int_type, "`].")]
3328 ///
3329 /// # Examples
3330 ///
3331 /// ```
3332 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3333 ///
3334 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
3335 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
3336 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
3337 /// ```
3338 #[inline]
3339 #[$stable]
3340 #[$cfg_cas]
3341 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3342 #[rustc_should_not_be_called_on_const_items]
3343 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
3344 // SAFETY: data races are prevented by atomic intrinsics.
3345 unsafe { atomic_or(self.v.get(), val, order) }
3346 }
3347
3348 /// Bitwise "xor" with the current value.
3349 ///
3350 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
3351 /// sets the new value to the result.
3352 ///
3353 /// Returns the previous value.
3354 ///
3355 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
3356 /// of this operation. All ordering modes are possible. Note that using
3357 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3358 /// using [`Release`] makes the load part [`Relaxed`].
3359 ///
3360 /// **Note**: This method is only available on platforms that support atomic operations on
3361 #[doc = concat!("[`", $s_int_type, "`].")]
3362 ///
3363 /// # Examples
3364 ///
3365 /// ```
3366 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3367 ///
3368 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
3369 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
3370 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
3371 /// ```
3372 #[inline]
3373 #[$stable]
3374 #[$cfg_cas]
3375 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3376 #[rustc_should_not_be_called_on_const_items]
3377 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
3378 // SAFETY: data races are prevented by atomic intrinsics.
3379 unsafe { atomic_xor(self.v.get(), val, order) }
3380 }
3381
3382 /// Fetches the value, and applies a function to it that returns an optional
3383 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
3384 /// `Err(previous_value)`.
3385 ///
3386 /// Note: This may call the function multiple times if the value has been changed from other threads in
3387 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
3388 /// only once to the stored value.
3389 ///
3390 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
3391 /// The first describes the required ordering for when the operation finally succeeds while the second
3392 /// describes the required ordering for loads. These correspond to the success and failure orderings of
3393 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
3394 /// respectively.
3395 ///
3396 /// Using [`Acquire`] as success ordering makes the store part
3397 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
3398 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3399 ///
3400 /// **Note**: This method is only available on platforms that support atomic operations on
3401 #[doc = concat!("[`", $s_int_type, "`].")]
3402 ///
3403 /// # Considerations
3404 ///
3405 /// This method is not magic; it is not provided by the hardware, and does not act like a
3406 /// critical section or mutex.
3407 ///
3408 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
3409 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem]
3410 /// if this atomic integer is an index or more generally if knowledge of only the *bitwise value*
3411 /// of the atomic is not in and of itself sufficient to ensure any required preconditions.
3412 ///
3413 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3414 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3415 ///
3416 /// # Examples
3417 ///
3418 /// ```rust
3419 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3420 ///
3421 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
3422 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
3423 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
3424 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
3425 /// assert_eq!(x.load(Ordering::SeqCst), 9);
3426 /// ```
3427 #[inline]
3428 #[stable(feature = "no_more_cas", since = "1.45.0")]
3429 #[$cfg_cas]
3430 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3431 #[rustc_should_not_be_called_on_const_items]
3432 pub fn fetch_update<F>(&self,
3433 set_order: Ordering,
3434 fetch_order: Ordering,
3435 mut f: F) -> Result<$int_type, $int_type>
3436 where F: FnMut($int_type) -> Option<$int_type> {
3437 let mut prev = self.load(fetch_order);
3438 while let Some(next) = f(prev) {
3439 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
3440 x @ Ok(_) => return x,
3441 Err(next_prev) => prev = next_prev
3442 }
3443 }
3444 Err(prev)
3445 }
3446
3447 /// Fetches the value, and applies a function to it that returns an optional
3448 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
3449 /// `Err(previous_value)`.
3450 ///
3451 #[doc = concat!("See also: [`update`](`", stringify!($atomic_type), "::update`).")]
3452 ///
3453 /// Note: This may call the function multiple times if the value has been changed from other threads in
3454 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
3455 /// only once to the stored value.
3456 ///
3457 /// `try_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
3458 /// The first describes the required ordering for when the operation finally succeeds while the second
3459 /// describes the required ordering for loads. These correspond to the success and failure orderings of
3460 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
3461 /// respectively.
3462 ///
3463 /// Using [`Acquire`] as success ordering makes the store part
3464 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
3465 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3466 ///
3467 /// **Note**: This method is only available on platforms that support atomic operations on
3468 #[doc = concat!("[`", $s_int_type, "`].")]
3469 ///
3470 /// # Considerations
3471 ///
3472 /// This method is not magic; it is not provided by the hardware, and does not act like a
3473 /// critical section or mutex.
3474 ///
3475 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
3476 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem]
3477 /// if this atomic integer is an index or more generally if knowledge of only the *bitwise value*
3478 /// of the atomic is not in and of itself sufficient to ensure any required preconditions.
3479 ///
3480 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3481 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3482 ///
3483 /// # Examples
3484 ///
3485 /// ```rust
3486 /// #![feature(atomic_try_update)]
3487 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3488 ///
3489 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
3490 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
3491 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
3492 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
3493 /// assert_eq!(x.load(Ordering::SeqCst), 9);
3494 /// ```
3495 #[inline]
3496 #[unstable(feature = "atomic_try_update", issue = "135894")]
3497 #[$cfg_cas]
3498 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3499 #[rustc_should_not_be_called_on_const_items]
3500 pub fn try_update(
3501 &self,
3502 set_order: Ordering,
3503 fetch_order: Ordering,
3504 f: impl FnMut($int_type) -> Option<$int_type>,
3505 ) -> Result<$int_type, $int_type> {
3506 // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
3507 // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
3508 self.fetch_update(set_order, fetch_order, f)
3509 }
3510
3511 /// Fetches the value, applies a function to it that it return a new value.
3512 /// The new value is stored and the old value is returned.
3513 ///
3514 #[doc = concat!("See also: [`try_update`](`", stringify!($atomic_type), "::try_update`).")]
3515 ///
3516 /// Note: This may call the function multiple times if the value has been changed from other threads in
3517 /// the meantime, but the function will have been applied only once to the stored value.
3518 ///
3519 /// `update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
3520 /// The first describes the required ordering for when the operation finally succeeds while the second
3521 /// describes the required ordering for loads. These correspond to the success and failure orderings of
3522 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
3523 /// respectively.
3524 ///
3525 /// Using [`Acquire`] as success ordering makes the store part
3526 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
3527 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3528 ///
3529 /// **Note**: This method is only available on platforms that support atomic operations on
3530 #[doc = concat!("[`", $s_int_type, "`].")]
3531 ///
3532 /// # Considerations
3533 ///
3534 /// [CAS operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3535 /// This method is not magic; it is not provided by the hardware, and does not act like a
3536 /// critical section or mutex.
3537 ///
3538 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
3539 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem]
3540 /// if this atomic integer is an index or more generally if knowledge of only the *bitwise value*
3541 /// of the atomic is not in and of itself sufficient to ensure any required preconditions.
3542 ///
3543 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3544 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3545 ///
3546 /// # Examples
3547 ///
3548 /// ```rust
3549 /// #![feature(atomic_try_update)]
3550 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3551 ///
3552 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
3553 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| x + 1), 7);
3554 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| x + 1), 8);
3555 /// assert_eq!(x.load(Ordering::SeqCst), 9);
3556 /// ```
3557 #[inline]
3558 #[unstable(feature = "atomic_try_update", issue = "135894")]
3559 #[$cfg_cas]
3560 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3561 #[rustc_should_not_be_called_on_const_items]
3562 pub fn update(
3563 &self,
3564 set_order: Ordering,
3565 fetch_order: Ordering,
3566 mut f: impl FnMut($int_type) -> $int_type,
3567 ) -> $int_type {
3568 let mut prev = self.load(fetch_order);
3569 loop {
3570 match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
3571 Ok(x) => break x,
3572 Err(next_prev) => prev = next_prev,
3573 }
3574 }
3575 }
3576
3577 /// Maximum with the current value.
3578 ///
3579 /// Finds the maximum of the current value and the argument `val`, and
3580 /// sets the new value to the result.
3581 ///
3582 /// Returns the previous value.
3583 ///
3584 /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
3585 /// of this operation. All ordering modes are possible. Note that using
3586 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3587 /// using [`Release`] makes the load part [`Relaxed`].
3588 ///
3589 /// **Note**: This method is only available on platforms that support atomic operations on
3590 #[doc = concat!("[`", $s_int_type, "`].")]
3591 ///
3592 /// # Examples
3593 ///
3594 /// ```
3595 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3596 ///
3597 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3598 /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
3599 /// assert_eq!(foo.load(Ordering::SeqCst), 42);
3600 /// ```
3601 ///
3602 /// If you want to obtain the maximum value in one step, you can use the following:
3603 ///
3604 /// ```
3605 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3606 ///
3607 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3608 /// let bar = 42;
3609 /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
3610 /// assert!(max_foo == 42);
3611 /// ```
3612 #[inline]
3613 #[stable(feature = "atomic_min_max", since = "1.45.0")]
3614 #[$cfg_cas]
3615 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3616 #[rustc_should_not_be_called_on_const_items]
3617 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
3618 // SAFETY: data races are prevented by atomic intrinsics.
3619 unsafe { $max_fn(self.v.get(), val, order) }
3620 }
3621
3622 /// Minimum with the current value.
3623 ///
3624 /// Finds the minimum of the current value and the argument `val`, and
3625 /// sets the new value to the result.
3626 ///
3627 /// Returns the previous value.
3628 ///
3629 /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
3630 /// of this operation. All ordering modes are possible. Note that using
3631 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3632 /// using [`Release`] makes the load part [`Relaxed`].
3633 ///
3634 /// **Note**: This method is only available on platforms that support atomic operations on
3635 #[doc = concat!("[`", $s_int_type, "`].")]
3636 ///
3637 /// # Examples
3638 ///
3639 /// ```
3640 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3641 ///
3642 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3643 /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
3644 /// assert_eq!(foo.load(Ordering::Relaxed), 23);
3645 /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
3646 /// assert_eq!(foo.load(Ordering::Relaxed), 22);
3647 /// ```
3648 ///
3649 /// If you want to obtain the minimum value in one step, you can use the following:
3650 ///
3651 /// ```
3652 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3653 ///
3654 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3655 /// let bar = 12;
3656 /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
3657 /// assert_eq!(min_foo, 12);
3658 /// ```
3659 #[inline]
3660 #[stable(feature = "atomic_min_max", since = "1.45.0")]
3661 #[$cfg_cas]
3662 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3663 #[rustc_should_not_be_called_on_const_items]
3664 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
3665 // SAFETY: data races are prevented by atomic intrinsics.
3666 unsafe { $min_fn(self.v.get(), val, order) }
3667 }
3668
3669 /// Returns a mutable pointer to the underlying integer.
3670 ///
3671 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
3672 /// This method is mostly useful for FFI, where the function signature may use
3673 #[doc = concat!("`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.")]
3674 ///
3675 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
3676 /// atomic types work with interior mutability. All modifications of an atomic change the value
3677 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
3678 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
3679 /// requirements of the [memory model].
3680 ///
3681 /// # Examples
3682 ///
3683 /// ```ignore (extern-declaration)
3684 /// # fn main() {
3685 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
3686 ///
3687 /// extern "C" {
3688 #[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
3689 /// }
3690 ///
3691 #[doc = concat!("let atomic = ", stringify!($atomic_type), "::new(1);")]
3692 ///
3693 /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
3694 /// unsafe {
3695 /// my_atomic_op(atomic.as_ptr());
3696 /// }
3697 /// # }
3698 /// ```
3699 ///
3700 /// [memory model]: self#memory-model-for-atomic-accesses
3701 #[inline]
3702 #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
3703 #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
3704 #[rustc_never_returns_null_ptr]
3705 pub const fn as_ptr(&self) -> *mut $int_type {
3706 self.v.get()
3707 }
3708 }
3709 }
3710}
3711
3712#[cfg(target_has_atomic_load_store = "8")]
3713atomic_int! {
3714 cfg(target_has_atomic = "8"),
3715 cfg(target_has_atomic_equal_alignment = "8"),
3716 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3717 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3718 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3719 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3720 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3721 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3722 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3723 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3724 rustc_diagnostic_item = "AtomicI8",
3725 "i8",
3726 "",
3727 atomic_min, atomic_max,
3728 1,
3729 i8 AtomicI8
3730}
3731#[cfg(target_has_atomic_load_store = "8")]
3732atomic_int! {
3733 cfg(target_has_atomic = "8"),
3734 cfg(target_has_atomic_equal_alignment = "8"),
3735 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3736 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3737 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3738 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3739 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3740 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3741 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3742 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3743 rustc_diagnostic_item = "AtomicU8",
3744 "u8",
3745 "",
3746 atomic_umin, atomic_umax,
3747 1,
3748 u8 AtomicU8
3749}
3750#[cfg(target_has_atomic_load_store = "16")]
3751atomic_int! {
3752 cfg(target_has_atomic = "16"),
3753 cfg(target_has_atomic_equal_alignment = "16"),
3754 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3755 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3756 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3757 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3758 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3759 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3760 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3761 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3762 rustc_diagnostic_item = "AtomicI16",
3763 "i16",
3764 "",
3765 atomic_min, atomic_max,
3766 2,
3767 i16 AtomicI16
3768}
3769#[cfg(target_has_atomic_load_store = "16")]
3770atomic_int! {
3771 cfg(target_has_atomic = "16"),
3772 cfg(target_has_atomic_equal_alignment = "16"),
3773 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3774 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3775 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3776 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3777 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3778 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3779 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3780 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3781 rustc_diagnostic_item = "AtomicU16",
3782 "u16",
3783 "",
3784 atomic_umin, atomic_umax,
3785 2,
3786 u16 AtomicU16
3787}
3788#[cfg(target_has_atomic_load_store = "32")]
3789atomic_int! {
3790 cfg(target_has_atomic = "32"),
3791 cfg(target_has_atomic_equal_alignment = "32"),
3792 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3793 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3794 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3795 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3796 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3797 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3798 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3799 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3800 rustc_diagnostic_item = "AtomicI32",
3801 "i32",
3802 "",
3803 atomic_min, atomic_max,
3804 4,
3805 i32 AtomicI32
3806}
3807#[cfg(target_has_atomic_load_store = "32")]
3808atomic_int! {
3809 cfg(target_has_atomic = "32"),
3810 cfg(target_has_atomic_equal_alignment = "32"),
3811 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3812 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3813 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3814 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3815 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3816 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3817 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3818 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3819 rustc_diagnostic_item = "AtomicU32",
3820 "u32",
3821 "",
3822 atomic_umin, atomic_umax,
3823 4,
3824 u32 AtomicU32
3825}
3826#[cfg(target_has_atomic_load_store = "64")]
3827atomic_int! {
3828 cfg(target_has_atomic = "64"),
3829 cfg(target_has_atomic_equal_alignment = "64"),
3830 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3831 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3832 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3833 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3834 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3835 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3836 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3837 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3838 rustc_diagnostic_item = "AtomicI64",
3839 "i64",
3840 "",
3841 atomic_min, atomic_max,
3842 8,
3843 i64 AtomicI64
3844}
3845#[cfg(target_has_atomic_load_store = "64")]
3846atomic_int! {
3847 cfg(target_has_atomic = "64"),
3848 cfg(target_has_atomic_equal_alignment = "64"),
3849 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3850 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3851 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3852 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3853 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3854 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3855 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3856 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3857 rustc_diagnostic_item = "AtomicU64",
3858 "u64",
3859 "",
3860 atomic_umin, atomic_umax,
3861 8,
3862 u64 AtomicU64
3863}
3864#[cfg(target_has_atomic_load_store = "128")]
3865atomic_int! {
3866 cfg(target_has_atomic = "128"),
3867 cfg(target_has_atomic_equal_alignment = "128"),
3868 unstable(feature = "integer_atomics", issue = "99069"),
3869 unstable(feature = "integer_atomics", issue = "99069"),
3870 unstable(feature = "integer_atomics", issue = "99069"),
3871 unstable(feature = "integer_atomics", issue = "99069"),
3872 unstable(feature = "integer_atomics", issue = "99069"),
3873 unstable(feature = "integer_atomics", issue = "99069"),
3874 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3875 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3876 rustc_diagnostic_item = "AtomicI128",
3877 "i128",
3878 "#![feature(integer_atomics)]\n\n",
3879 atomic_min, atomic_max,
3880 16,
3881 i128 AtomicI128
3882}
3883#[cfg(target_has_atomic_load_store = "128")]
3884atomic_int! {
3885 cfg(target_has_atomic = "128"),
3886 cfg(target_has_atomic_equal_alignment = "128"),
3887 unstable(feature = "integer_atomics", issue = "99069"),
3888 unstable(feature = "integer_atomics", issue = "99069"),
3889 unstable(feature = "integer_atomics", issue = "99069"),
3890 unstable(feature = "integer_atomics", issue = "99069"),
3891 unstable(feature = "integer_atomics", issue = "99069"),
3892 unstable(feature = "integer_atomics", issue = "99069"),
3893 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3894 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3895 rustc_diagnostic_item = "AtomicU128",
3896 "u128",
3897 "#![feature(integer_atomics)]\n\n",
3898 atomic_umin, atomic_umax,
3899 16,
3900 u128 AtomicU128
3901}
3902
3903#[cfg(target_has_atomic_load_store = "ptr")]
3904macro_rules! atomic_int_ptr_sized {
3905 ( $($target_pointer_width:literal $align:literal)* ) => { $(
3906 #[cfg(target_pointer_width = $target_pointer_width)]
3907 atomic_int! {
3908 cfg(target_has_atomic = "ptr"),
3909 cfg(target_has_atomic_equal_alignment = "ptr"),
3910 stable(feature = "rust1", since = "1.0.0"),
3911 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
3912 stable(feature = "atomic_debug", since = "1.3.0"),
3913 stable(feature = "atomic_access", since = "1.15.0"),
3914 stable(feature = "atomic_from", since = "1.23.0"),
3915 stable(feature = "atomic_nand", since = "1.27.0"),
3916 rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
3917 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3918 rustc_diagnostic_item = "AtomicIsize",
3919 "isize",
3920 "",
3921 atomic_min, atomic_max,
3922 $align,
3923 isize AtomicIsize
3924 }
3925 #[cfg(target_pointer_width = $target_pointer_width)]
3926 atomic_int! {
3927 cfg(target_has_atomic = "ptr"),
3928 cfg(target_has_atomic_equal_alignment = "ptr"),
3929 stable(feature = "rust1", since = "1.0.0"),
3930 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
3931 stable(feature = "atomic_debug", since = "1.3.0"),
3932 stable(feature = "atomic_access", since = "1.15.0"),
3933 stable(feature = "atomic_from", since = "1.23.0"),
3934 stable(feature = "atomic_nand", since = "1.27.0"),
3935 rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
3936 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3937 rustc_diagnostic_item = "AtomicUsize",
3938 "usize",
3939 "",
3940 atomic_umin, atomic_umax,
3941 $align,
3942 usize AtomicUsize
3943 }
3944
3945 /// An [`AtomicIsize`] initialized to `0`.
3946 #[cfg(target_pointer_width = $target_pointer_width)]
3947 #[stable(feature = "rust1", since = "1.0.0")]
3948 #[deprecated(
3949 since = "1.34.0",
3950 note = "the `new` function is now preferred",
3951 suggestion = "AtomicIsize::new(0)",
3952 )]
3953 pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
3954
3955 /// An [`AtomicUsize`] initialized to `0`.
3956 #[cfg(target_pointer_width = $target_pointer_width)]
3957 #[stable(feature = "rust1", since = "1.0.0")]
3958 #[deprecated(
3959 since = "1.34.0",
3960 note = "the `new` function is now preferred",
3961 suggestion = "AtomicUsize::new(0)",
3962 )]
3963 pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
3964 )* };
3965}
3966
3967#[cfg(target_has_atomic_load_store = "ptr")]
3968atomic_int_ptr_sized! {
3969 "16" 2
3970 "32" 4
3971 "64" 8
3972}
3973
3974#[inline]
3975#[cfg(target_has_atomic)]
3976fn strongest_failure_ordering(order: Ordering) -> Ordering {
3977 match order {
3978 Release => Relaxed,
3979 Relaxed => Relaxed,
3980 SeqCst => SeqCst,
3981 Acquire => Acquire,
3982 AcqRel => Acquire,
3983 }
3984}
3985
3986#[inline]
3987#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3988unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
3989 // SAFETY: the caller must uphold the safety contract for `atomic_store`.
3990 unsafe {
3991 match order {
3992 Relaxed => intrinsics::atomic_store::<T, { AO::Relaxed }>(dst, val),
3993 Release => intrinsics::atomic_store::<T, { AO::Release }>(dst, val),
3994 SeqCst => intrinsics::atomic_store::<T, { AO::SeqCst }>(dst, val),
3995 Acquire => panic!("there is no such thing as an acquire store"),
3996 AcqRel => panic!("there is no such thing as an acquire-release store"),
3997 }
3998 }
3999}
4000
4001#[inline]
4002#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4003unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
4004 // SAFETY: the caller must uphold the safety contract for `atomic_load`.
4005 unsafe {
4006 match order {
4007 Relaxed => intrinsics::atomic_load::<T, { AO::Relaxed }>(dst),
4008 Acquire => intrinsics::atomic_load::<T, { AO::Acquire }>(dst),
4009 SeqCst => intrinsics::atomic_load::<T, { AO::SeqCst }>(dst),
4010 Release => panic!("there is no such thing as a release load"),
4011 AcqRel => panic!("there is no such thing as an acquire-release load"),
4012 }
4013 }
4014}
4015
4016#[inline]
4017#[cfg(target_has_atomic)]
4018#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4019unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4020 // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
4021 unsafe {
4022 match order {
4023 Relaxed => intrinsics::atomic_xchg::<T, { AO::Relaxed }>(dst, val),
4024 Acquire => intrinsics::atomic_xchg::<T, { AO::Acquire }>(dst, val),
4025 Release => intrinsics::atomic_xchg::<T, { AO::Release }>(dst, val),
4026 AcqRel => intrinsics::atomic_xchg::<T, { AO::AcqRel }>(dst, val),
4027 SeqCst => intrinsics::atomic_xchg::<T, { AO::SeqCst }>(dst, val),
4028 }
4029 }
4030}
4031
4032/// Returns the previous value (like __sync_fetch_and_add).
4033#[inline]
4034#[cfg(target_has_atomic)]
4035#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4036unsafe fn atomic_add<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4037 // SAFETY: the caller must uphold the safety contract for `atomic_add`.
4038 unsafe {
4039 match order {
4040 Relaxed => intrinsics::atomic_xadd::<T, U, { AO::Relaxed }>(dst, val),
4041 Acquire => intrinsics::atomic_xadd::<T, U, { AO::Acquire }>(dst, val),
4042 Release => intrinsics::atomic_xadd::<T, U, { AO::Release }>(dst, val),
4043 AcqRel => intrinsics::atomic_xadd::<T, U, { AO::AcqRel }>(dst, val),
4044 SeqCst => intrinsics::atomic_xadd::<T, U, { AO::SeqCst }>(dst, val),
4045 }
4046 }
4047}
4048
4049/// Returns the previous value (like __sync_fetch_and_sub).
4050#[inline]
4051#[cfg(target_has_atomic)]
4052#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4053unsafe fn atomic_sub<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4054 // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
4055 unsafe {
4056 match order {
4057 Relaxed => intrinsics::atomic_xsub::<T, U, { AO::Relaxed }>(dst, val),
4058 Acquire => intrinsics::atomic_xsub::<T, U, { AO::Acquire }>(dst, val),
4059 Release => intrinsics::atomic_xsub::<T, U, { AO::Release }>(dst, val),
4060 AcqRel => intrinsics::atomic_xsub::<T, U, { AO::AcqRel }>(dst, val),
4061 SeqCst => intrinsics::atomic_xsub::<T, U, { AO::SeqCst }>(dst, val),
4062 }
4063 }
4064}
4065
4066/// Publicly exposed for stdarch; nobody else should use this.
4067#[inline]
4068#[cfg(target_has_atomic)]
4069#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4070#[unstable(feature = "core_intrinsics", issue = "none")]
4071#[doc(hidden)]
4072pub unsafe fn atomic_compare_exchange<T: Copy>(
4073 dst: *mut T,
4074 old: T,
4075 new: T,
4076 success: Ordering,
4077 failure: Ordering,
4078) -> Result<T, T> {
4079 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
4080 let (val, ok) = unsafe {
4081 match (success, failure) {
4082 (Relaxed, Relaxed) => {
4083 intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
4084 }
4085 (Relaxed, Acquire) => {
4086 intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
4087 }
4088 (Relaxed, SeqCst) => {
4089 intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
4090 }
4091 (Acquire, Relaxed) => {
4092 intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
4093 }
4094 (Acquire, Acquire) => {
4095 intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
4096 }
4097 (Acquire, SeqCst) => {
4098 intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
4099 }
4100 (Release, Relaxed) => {
4101 intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
4102 }
4103 (Release, Acquire) => {
4104 intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
4105 }
4106 (Release, SeqCst) => {
4107 intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
4108 }
4109 (AcqRel, Relaxed) => {
4110 intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
4111 }
4112 (AcqRel, Acquire) => {
4113 intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
4114 }
4115 (AcqRel, SeqCst) => {
4116 intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
4117 }
4118 (SeqCst, Relaxed) => {
4119 intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
4120 }
4121 (SeqCst, Acquire) => {
4122 intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
4123 }
4124 (SeqCst, SeqCst) => {
4125 intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
4126 }
4127 (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
4128 (_, Release) => panic!("there is no such thing as a release failure ordering"),
4129 }
4130 };
4131 if ok { Ok(val) } else { Err(val) }
4132}
4133
4134#[inline]
4135#[cfg(target_has_atomic)]
4136#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4137unsafe fn atomic_compare_exchange_weak<T: Copy>(
4138 dst: *mut T,
4139 old: T,
4140 new: T,
4141 success: Ordering,
4142 failure: Ordering,
4143) -> Result<T, T> {
4144 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
4145 let (val, ok) = unsafe {
4146 match (success, failure) {
4147 (Relaxed, Relaxed) => {
4148 intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
4149 }
4150 (Relaxed, Acquire) => {
4151 intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
4152 }
4153 (Relaxed, SeqCst) => {
4154 intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
4155 }
4156 (Acquire, Relaxed) => {
4157 intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
4158 }
4159 (Acquire, Acquire) => {
4160 intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
4161 }
4162 (Acquire, SeqCst) => {
4163 intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
4164 }
4165 (Release, Relaxed) => {
4166 intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
4167 }
4168 (Release, Acquire) => {
4169 intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
4170 }
4171 (Release, SeqCst) => {
4172 intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
4173 }
4174 (AcqRel, Relaxed) => {
4175 intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
4176 }
4177 (AcqRel, Acquire) => {
4178 intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
4179 }
4180 (AcqRel, SeqCst) => {
4181 intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
4182 }
4183 (SeqCst, Relaxed) => {
4184 intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
4185 }
4186 (SeqCst, Acquire) => {
4187 intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
4188 }
4189 (SeqCst, SeqCst) => {
4190 intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
4191 }
4192 (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
4193 (_, Release) => panic!("there is no such thing as a release failure ordering"),
4194 }
4195 };
4196 if ok { Ok(val) } else { Err(val) }
4197}
4198
4199#[inline]
4200#[cfg(target_has_atomic)]
4201#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4202unsafe fn atomic_and<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4203 // SAFETY: the caller must uphold the safety contract for `atomic_and`
4204 unsafe {
4205 match order {
4206 Relaxed => intrinsics::atomic_and::<T, U, { AO::Relaxed }>(dst, val),
4207 Acquire => intrinsics::atomic_and::<T, U, { AO::Acquire }>(dst, val),
4208 Release => intrinsics::atomic_and::<T, U, { AO::Release }>(dst, val),
4209 AcqRel => intrinsics::atomic_and::<T, U, { AO::AcqRel }>(dst, val),
4210 SeqCst => intrinsics::atomic_and::<T, U, { AO::SeqCst }>(dst, val),
4211 }
4212 }
4213}
4214
4215#[inline]
4216#[cfg(target_has_atomic)]
4217#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4218unsafe fn atomic_nand<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4219 // SAFETY: the caller must uphold the safety contract for `atomic_nand`
4220 unsafe {
4221 match order {
4222 Relaxed => intrinsics::atomic_nand::<T, U, { AO::Relaxed }>(dst, val),
4223 Acquire => intrinsics::atomic_nand::<T, U, { AO::Acquire }>(dst, val),
4224 Release => intrinsics::atomic_nand::<T, U, { AO::Release }>(dst, val),
4225 AcqRel => intrinsics::atomic_nand::<T, U, { AO::AcqRel }>(dst, val),
4226 SeqCst => intrinsics::atomic_nand::<T, U, { AO::SeqCst }>(dst, val),
4227 }
4228 }
4229}
4230
4231#[inline]
4232#[cfg(target_has_atomic)]
4233#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4234unsafe fn atomic_or<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4235 // SAFETY: the caller must uphold the safety contract for `atomic_or`
4236 unsafe {
4237 match order {
4238 SeqCst => intrinsics::atomic_or::<T, U, { AO::SeqCst }>(dst, val),
4239 Acquire => intrinsics::atomic_or::<T, U, { AO::Acquire }>(dst, val),
4240 Release => intrinsics::atomic_or::<T, U, { AO::Release }>(dst, val),
4241 AcqRel => intrinsics::atomic_or::<T, U, { AO::AcqRel }>(dst, val),
4242 Relaxed => intrinsics::atomic_or::<T, U, { AO::Relaxed }>(dst, val),
4243 }
4244 }
4245}
4246
4247#[inline]
4248#[cfg(target_has_atomic)]
4249#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4250unsafe fn atomic_xor<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4251 // SAFETY: the caller must uphold the safety contract for `atomic_xor`
4252 unsafe {
4253 match order {
4254 SeqCst => intrinsics::atomic_xor::<T, U, { AO::SeqCst }>(dst, val),
4255 Acquire => intrinsics::atomic_xor::<T, U, { AO::Acquire }>(dst, val),
4256 Release => intrinsics::atomic_xor::<T, U, { AO::Release }>(dst, val),
4257 AcqRel => intrinsics::atomic_xor::<T, U, { AO::AcqRel }>(dst, val),
4258 Relaxed => intrinsics::atomic_xor::<T, U, { AO::Relaxed }>(dst, val),
4259 }
4260 }
4261}
4262
4263/// Updates `*dst` to the max value of `val` and the old value (signed comparison)
4264#[inline]
4265#[cfg(target_has_atomic)]
4266#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4267unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4268 // SAFETY: the caller must uphold the safety contract for `atomic_max`
4269 unsafe {
4270 match order {
4271 Relaxed => intrinsics::atomic_max::<T, { AO::Relaxed }>(dst, val),
4272 Acquire => intrinsics::atomic_max::<T, { AO::Acquire }>(dst, val),
4273 Release => intrinsics::atomic_max::<T, { AO::Release }>(dst, val),
4274 AcqRel => intrinsics::atomic_max::<T, { AO::AcqRel }>(dst, val),
4275 SeqCst => intrinsics::atomic_max::<T, { AO::SeqCst }>(dst, val),
4276 }
4277 }
4278}
4279
4280/// Updates `*dst` to the min value of `val` and the old value (signed comparison)
4281#[inline]
4282#[cfg(target_has_atomic)]
4283#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4284unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4285 // SAFETY: the caller must uphold the safety contract for `atomic_min`
4286 unsafe {
4287 match order {
4288 Relaxed => intrinsics::atomic_min::<T, { AO::Relaxed }>(dst, val),
4289 Acquire => intrinsics::atomic_min::<T, { AO::Acquire }>(dst, val),
4290 Release => intrinsics::atomic_min::<T, { AO::Release }>(dst, val),
4291 AcqRel => intrinsics::atomic_min::<T, { AO::AcqRel }>(dst, val),
4292 SeqCst => intrinsics::atomic_min::<T, { AO::SeqCst }>(dst, val),
4293 }
4294 }
4295}
4296
4297/// Updates `*dst` to the max value of `val` and the old value (unsigned comparison)
4298#[inline]
4299#[cfg(target_has_atomic)]
4300#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4301unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4302 // SAFETY: the caller must uphold the safety contract for `atomic_umax`
4303 unsafe {
4304 match order {
4305 Relaxed => intrinsics::atomic_umax::<T, { AO::Relaxed }>(dst, val),
4306 Acquire => intrinsics::atomic_umax::<T, { AO::Acquire }>(dst, val),
4307 Release => intrinsics::atomic_umax::<T, { AO::Release }>(dst, val),
4308 AcqRel => intrinsics::atomic_umax::<T, { AO::AcqRel }>(dst, val),
4309 SeqCst => intrinsics::atomic_umax::<T, { AO::SeqCst }>(dst, val),
4310 }
4311 }
4312}
4313
4314/// Updates `*dst` to the min value of `val` and the old value (unsigned comparison)
4315#[inline]
4316#[cfg(target_has_atomic)]
4317#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4318unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4319 // SAFETY: the caller must uphold the safety contract for `atomic_umin`
4320 unsafe {
4321 match order {
4322 Relaxed => intrinsics::atomic_umin::<T, { AO::Relaxed }>(dst, val),
4323 Acquire => intrinsics::atomic_umin::<T, { AO::Acquire }>(dst, val),
4324 Release => intrinsics::atomic_umin::<T, { AO::Release }>(dst, val),
4325 AcqRel => intrinsics::atomic_umin::<T, { AO::AcqRel }>(dst, val),
4326 SeqCst => intrinsics::atomic_umin::<T, { AO::SeqCst }>(dst, val),
4327 }
4328 }
4329}
4330
4331/// An atomic fence.
4332///
4333/// Fences create synchronization between themselves and atomic operations or fences in other
4334/// threads. To achieve this, a fence prevents the compiler and CPU from reordering certain types of
4335/// memory operations around it.
4336///
4337/// There are 3 different ways to use an atomic fence:
4338///
4339/// - atomic - fence synchronization: an atomic operation with (at least) [`Release`] ordering
4340/// semantics synchronizes with a fence with (at least) [`Acquire`] ordering semantics.
4341/// - fence - atomic synchronization: a fence with (at least) [`Release`] ordering semantics
4342/// synchronizes with an atomic operation with (at least) [`Acquire`] ordering semantics.
4343/// - fence - fence synchronization: a fence with (at least) [`Release`] ordering semantics
4344/// synchronizes with a fence with (at least) [`Acquire`] ordering semantics.
4345///
4346/// These 3 ways complement the regular, fence-less, atomic - atomic synchronization.
4347///
4348/// ## Atomic - Fence
4349///
4350/// An atomic operation on one thread will synchronize with a fence on another thread when:
4351///
4352/// - on thread 1:
4353/// - an atomic operation 'X' with (at least) [`Release`] ordering semantics on some atomic
4354/// object 'm',
4355///
4356/// - is paired on thread 2 with:
4357/// - an atomic read 'Y' with any order on 'm',
4358/// - followed by a fence 'B' with (at least) [`Acquire`] ordering semantics.
4359///
4360/// This provides a happens-before dependence between X and B.
4361///
4362/// ```text
4363/// Thread 1 Thread 2
4364///
4365/// m.store(3, Release); X ---------
4366/// |
4367/// |
4368/// -------------> Y if m.load(Relaxed) == 3 {
4369/// B fence(Acquire);
4370/// ...
4371/// }
4372/// ```
4373///
4374/// ## Fence - Atomic
4375///
4376/// A fence on one thread will synchronize with an atomic operation on another thread when:
4377///
4378/// - on thread:
4379/// - a fence 'A' with (at least) [`Release`] ordering semantics,
4380/// - followed by an atomic write 'X' with any ordering on some atomic object 'm',
4381///
4382/// - is paired on thread 2 with:
4383/// - an atomic operation 'Y' with (at least) [`Acquire`] ordering semantics.
4384///
4385/// This provides a happens-before dependence between A and Y.
4386///
4387/// ```text
4388/// Thread 1 Thread 2
4389///
4390/// fence(Release); A
4391/// m.store(3, Relaxed); X ---------
4392/// |
4393/// |
4394/// -------------> Y if m.load(Acquire) == 3 {
4395/// ...
4396/// }
4397/// ```
4398///
4399/// ## Fence - Fence
4400///
4401/// A fence on one thread will synchronize with a fence on another thread when:
4402///
4403/// - on thread 1:
4404/// - a fence 'A' which has (at least) [`Release`] ordering semantics,
4405/// - followed by an atomic write 'X' with any ordering on some atomic object 'm',
4406///
4407/// - is paired on thread 2 with:
4408/// - an atomic read 'Y' with any ordering on 'm',
4409/// - followed by a fence 'B' with (at least) [`Acquire`] ordering semantics.
4410///
4411/// This provides a happens-before dependence between A and B.
4412///
4413/// ```text
4414/// Thread 1 Thread 2
4415///
4416/// fence(Release); A --------------
4417/// m.store(3, Relaxed); X --------- |
4418/// | |
4419/// | |
4420/// -------------> Y if m.load(Relaxed) == 3 {
4421/// |-------> B fence(Acquire);
4422/// ...
4423/// }
4424/// ```
4425///
4426/// ## Mandatory Atomic
4427///
4428/// Note that in the examples above, it is crucial that the access to `m` are atomic. Fences cannot
4429/// be used to establish synchronization between non-atomic accesses in different threads. However,
4430/// thanks to the happens-before relationship, any non-atomic access that happen-before the atomic
4431/// operation or fence with (at least) [`Release`] ordering semantics are now also properly
4432/// synchronized with any non-atomic accesses that happen-after the atomic operation or fence with
4433/// (at least) [`Acquire`] ordering semantics.
4434///
4435/// ## Memory Ordering
4436///
4437/// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`] and [`Release`]
4438/// semantics, participates in the global program order of the other [`SeqCst`] operations and/or
4439/// fences.
4440///
4441/// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
4442///
4443/// # Panics
4444///
4445/// Panics if `order` is [`Relaxed`].
4446///
4447/// # Examples
4448///
4449/// ```
4450/// use std::sync::atomic::AtomicBool;
4451/// use std::sync::atomic::fence;
4452/// use std::sync::atomic::Ordering;
4453///
4454/// // A mutual exclusion primitive based on spinlock.
4455/// pub struct Mutex {
4456/// flag: AtomicBool,
4457/// }
4458///
4459/// impl Mutex {
4460/// pub fn new() -> Mutex {
4461/// Mutex {
4462/// flag: AtomicBool::new(false),
4463/// }
4464/// }
4465///
4466/// pub fn lock(&self) {
4467/// // Wait until the old value is `false`.
4468/// while self
4469/// .flag
4470/// .compare_exchange_weak(false, true, Ordering::Relaxed, Ordering::Relaxed)
4471/// .is_err()
4472/// {}
4473/// // This fence synchronizes-with store in `unlock`.
4474/// fence(Ordering::Acquire);
4475/// }
4476///
4477/// pub fn unlock(&self) {
4478/// self.flag.store(false, Ordering::Release);
4479/// }
4480/// }
4481/// ```
4482#[inline]
4483#[stable(feature = "rust1", since = "1.0.0")]
4484#[rustc_diagnostic_item = "fence"]
4485#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4486pub fn fence(order: Ordering) {
4487 // SAFETY: using an atomic fence is safe.
4488 unsafe {
4489 match order {
4490 Acquire => intrinsics::atomic_fence::<{ AO::Acquire }>(),
4491 Release => intrinsics::atomic_fence::<{ AO::Release }>(),
4492 AcqRel => intrinsics::atomic_fence::<{ AO::AcqRel }>(),
4493 SeqCst => intrinsics::atomic_fence::<{ AO::SeqCst }>(),
4494 Relaxed => panic!("there is no such thing as a relaxed fence"),
4495 }
4496 }
4497}
4498
4499/// A "compiler-only" atomic fence.
4500///
4501/// Like [`fence`], this function establishes synchronization with other atomic operations and
4502/// fences. However, unlike [`fence`], `compiler_fence` only establishes synchronization with
4503/// operations *in the same thread*. This may at first sound rather useless, since code within a
4504/// thread is typically already totally ordered and does not need any further synchronization.
4505/// However, there are cases where code can run on the same thread without being ordered:
4506/// - The most common case is that of a *signal handler*: a signal handler runs in the same thread
4507/// as the code it interrupted, but it is not ordered with respect to that code. `compiler_fence`
4508/// can be used to establish synchronization between a thread and its signal handler, the same way
4509/// that `fence` can be used to establish synchronization across threads.
4510/// - Similar situations can arise in embedded programming with interrupt handlers, or in custom
4511/// implementations of preemptive green threads. In general, `compiler_fence` can establish
4512/// synchronization with code that is guaranteed to run on the same hardware CPU.
4513///
4514/// See [`fence`] for how a fence can be used to achieve synchronization. Note that just like
4515/// [`fence`], synchronization still requires atomic operations to be used in both threads -- it is
4516/// not possible to perform synchronization entirely with fences and non-atomic operations.
4517///
4518/// `compiler_fence` does not emit any machine code, but restricts the kinds of memory re-ordering
4519/// the compiler is allowed to do. `compiler_fence` corresponds to [`atomic_signal_fence`] in C and
4520/// C++.
4521///
4522/// [`atomic_signal_fence`]: https://en.cppreference.com/w/cpp/atomic/atomic_signal_fence
4523///
4524/// # Panics
4525///
4526/// Panics if `order` is [`Relaxed`].
4527///
4528/// # Examples
4529///
4530/// Without the two `compiler_fence` calls, the read of `IMPORTANT_VARIABLE` in `signal_handler`
4531/// is *undefined behavior* due to a data race, despite everything happening in a single thread.
4532/// This is because the signal handler is considered to run concurrently with its associated
4533/// thread, and explicit synchronization is required to pass data between a thread and its
4534/// signal handler. The code below uses two `compiler_fence` calls to establish the usual
4535/// release-acquire synchronization pattern (see [`fence`] for an image).
4536///
4537/// ```
4538/// use std::sync::atomic::AtomicBool;
4539/// use std::sync::atomic::Ordering;
4540/// use std::sync::atomic::compiler_fence;
4541///
4542/// static mut IMPORTANT_VARIABLE: usize = 0;
4543/// static IS_READY: AtomicBool = AtomicBool::new(false);
4544///
4545/// fn main() {
4546/// unsafe { IMPORTANT_VARIABLE = 42 };
4547/// // Marks earlier writes as being released with future relaxed stores.
4548/// compiler_fence(Ordering::Release);
4549/// IS_READY.store(true, Ordering::Relaxed);
4550/// }
4551///
4552/// fn signal_handler() {
4553/// if IS_READY.load(Ordering::Relaxed) {
4554/// // Acquires writes that were released with relaxed stores that we read from.
4555/// compiler_fence(Ordering::Acquire);
4556/// assert_eq!(unsafe { IMPORTANT_VARIABLE }, 42);
4557/// }
4558/// }
4559/// ```
4560#[inline]
4561#[stable(feature = "compiler_fences", since = "1.21.0")]
4562#[rustc_diagnostic_item = "compiler_fence"]
4563#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4564pub fn compiler_fence(order: Ordering) {
4565 // SAFETY: using an atomic fence is safe.
4566 unsafe {
4567 match order {
4568 Acquire => intrinsics::atomic_singlethreadfence::<{ AO::Acquire }>(),
4569 Release => intrinsics::atomic_singlethreadfence::<{ AO::Release }>(),
4570 AcqRel => intrinsics::atomic_singlethreadfence::<{ AO::AcqRel }>(),
4571 SeqCst => intrinsics::atomic_singlethreadfence::<{ AO::SeqCst }>(),
4572 Relaxed => panic!("there is no such thing as a relaxed fence"),
4573 }
4574 }
4575}
4576
4577#[cfg(target_has_atomic_load_store = "8")]
4578#[stable(feature = "atomic_debug", since = "1.3.0")]
4579impl fmt::Debug for AtomicBool {
4580 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4581 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
4582 }
4583}
4584
4585#[cfg(target_has_atomic_load_store = "ptr")]
4586#[stable(feature = "atomic_debug", since = "1.3.0")]
4587impl<T> fmt::Debug for AtomicPtr<T> {
4588 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4589 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
4590 }
4591}
4592
4593#[cfg(target_has_atomic_load_store = "ptr")]
4594#[stable(feature = "atomic_pointer", since = "1.24.0")]
4595impl<T> fmt::Pointer for AtomicPtr<T> {
4596 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4597 fmt::Pointer::fmt(&self.load(Ordering::Relaxed), f)
4598 }
4599}
4600
4601/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
4602///
4603/// This function is deprecated in favor of [`hint::spin_loop`].
4604///
4605/// [`hint::spin_loop`]: crate::hint::spin_loop
4606#[inline]
4607#[stable(feature = "spin_loop_hint", since = "1.24.0")]
4608#[deprecated(since = "1.51.0", note = "use hint::spin_loop instead")]
4609pub fn spin_loop_hint() {
4610 spin_loop()
4611}