core/num/f16.rs
1//! Constants for the `f16` half-precision floating point type.
2//!
3//! *[See also the `f16` primitive type][f16].*
4//!
5//! Mathematically significant numbers are provided in the `consts` sub-module.
6//!
7//! For the constants defined directly in this module
8//! (as distinct from those defined in the `consts` sub-module),
9//! new code should instead use the associated constants
10//! defined directly on the `f16` type.
11
12#![unstable(feature = "f16", issue = "116909")]
13
14use crate::convert::FloatToInt;
15use crate::num::FpCategory;
16#[cfg(not(test))]
17use crate::num::libm;
18use crate::panic::const_assert;
19use crate::{intrinsics, mem};
20
21/// Basic mathematical constants.
22#[unstable(feature = "f16", issue = "116909")]
23#[rustc_diagnostic_item = "f16_consts_mod"]
24pub mod consts {
25 // FIXME: replace with mathematical constants from cmath.
26
27 /// Archimedes' constant (π)
28 #[unstable(feature = "f16", issue = "116909")]
29 pub const PI: f16 = 3.14159265358979323846264338327950288_f16;
30
31 /// The full circle constant (τ)
32 ///
33 /// Equal to 2π.
34 #[unstable(feature = "f16", issue = "116909")]
35 pub const TAU: f16 = 6.28318530717958647692528676655900577_f16;
36
37 /// The golden ratio (φ)
38 #[unstable(feature = "f16", issue = "116909")]
39 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
40 pub const PHI: f16 = 1.618033988749894848204586834365638118_f16;
41
42 /// The Euler-Mascheroni constant (γ)
43 #[unstable(feature = "f16", issue = "116909")]
44 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
45 pub const EGAMMA: f16 = 0.577215664901532860606512090082402431_f16;
46
47 /// π/2
48 #[unstable(feature = "f16", issue = "116909")]
49 pub const FRAC_PI_2: f16 = 1.57079632679489661923132169163975144_f16;
50
51 /// π/3
52 #[unstable(feature = "f16", issue = "116909")]
53 pub const FRAC_PI_3: f16 = 1.04719755119659774615421446109316763_f16;
54
55 /// π/4
56 #[unstable(feature = "f16", issue = "116909")]
57 pub const FRAC_PI_4: f16 = 0.785398163397448309615660845819875721_f16;
58
59 /// π/6
60 #[unstable(feature = "f16", issue = "116909")]
61 pub const FRAC_PI_6: f16 = 0.52359877559829887307710723054658381_f16;
62
63 /// π/8
64 #[unstable(feature = "f16", issue = "116909")]
65 pub const FRAC_PI_8: f16 = 0.39269908169872415480783042290993786_f16;
66
67 /// 1/π
68 #[unstable(feature = "f16", issue = "116909")]
69 pub const FRAC_1_PI: f16 = 0.318309886183790671537767526745028724_f16;
70
71 /// 1/sqrt(π)
72 #[unstable(feature = "f16", issue = "116909")]
73 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
74 pub const FRAC_1_SQRT_PI: f16 = 0.564189583547756286948079451560772586_f16;
75
76 /// 1/sqrt(2π)
77 #[doc(alias = "FRAC_1_SQRT_TAU")]
78 #[unstable(feature = "f16", issue = "116909")]
79 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
80 pub const FRAC_1_SQRT_2PI: f16 = 0.398942280401432677939946059934381868_f16;
81
82 /// 2/π
83 #[unstable(feature = "f16", issue = "116909")]
84 pub const FRAC_2_PI: f16 = 0.636619772367581343075535053490057448_f16;
85
86 /// 2/sqrt(π)
87 #[unstable(feature = "f16", issue = "116909")]
88 pub const FRAC_2_SQRT_PI: f16 = 1.12837916709551257389615890312154517_f16;
89
90 /// sqrt(2)
91 #[unstable(feature = "f16", issue = "116909")]
92 pub const SQRT_2: f16 = 1.41421356237309504880168872420969808_f16;
93
94 /// 1/sqrt(2)
95 #[unstable(feature = "f16", issue = "116909")]
96 pub const FRAC_1_SQRT_2: f16 = 0.707106781186547524400844362104849039_f16;
97
98 /// sqrt(3)
99 #[unstable(feature = "f16", issue = "116909")]
100 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
101 pub const SQRT_3: f16 = 1.732050807568877293527446341505872367_f16;
102
103 /// 1/sqrt(3)
104 #[unstable(feature = "f16", issue = "116909")]
105 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
106 pub const FRAC_1_SQRT_3: f16 = 0.577350269189625764509148780501957456_f16;
107
108 /// Euler's number (e)
109 #[unstable(feature = "f16", issue = "116909")]
110 pub const E: f16 = 2.71828182845904523536028747135266250_f16;
111
112 /// log<sub>2</sub>(10)
113 #[unstable(feature = "f16", issue = "116909")]
114 pub const LOG2_10: f16 = 3.32192809488736234787031942948939018_f16;
115
116 /// log<sub>2</sub>(e)
117 #[unstable(feature = "f16", issue = "116909")]
118 pub const LOG2_E: f16 = 1.44269504088896340735992468100189214_f16;
119
120 /// log<sub>10</sub>(2)
121 #[unstable(feature = "f16", issue = "116909")]
122 pub const LOG10_2: f16 = 0.301029995663981195213738894724493027_f16;
123
124 /// log<sub>10</sub>(e)
125 #[unstable(feature = "f16", issue = "116909")]
126 pub const LOG10_E: f16 = 0.434294481903251827651128918916605082_f16;
127
128 /// ln(2)
129 #[unstable(feature = "f16", issue = "116909")]
130 pub const LN_2: f16 = 0.693147180559945309417232121458176568_f16;
131
132 /// ln(10)
133 #[unstable(feature = "f16", issue = "116909")]
134 pub const LN_10: f16 = 2.30258509299404568401799145468436421_f16;
135}
136
137impl f16 {
138 // FIXME(f16_f128): almost all methods in this `impl` are missing examples and a const
139 // implementation. Add these once we can run code on all platforms and have f16/f128 in CTFE.
140
141 /// The radix or base of the internal representation of `f16`.
142 #[unstable(feature = "f16", issue = "116909")]
143 pub const RADIX: u32 = 2;
144
145 /// Number of significant digits in base 2.
146 ///
147 /// Note that the size of the mantissa in the bitwise representation is one
148 /// smaller than this since the leading 1 is not stored explicitly.
149 #[unstable(feature = "f16", issue = "116909")]
150 pub const MANTISSA_DIGITS: u32 = 11;
151
152 /// Approximate number of significant digits in base 10.
153 ///
154 /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
155 /// significant digits can be converted to `f16` and back without loss.
156 ///
157 /// Equal to floor(log<sub>10</sub> 2<sup>[`MANTISSA_DIGITS`] − 1</sup>).
158 ///
159 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
160 #[unstable(feature = "f16", issue = "116909")]
161 pub const DIGITS: u32 = 3;
162
163 /// [Machine epsilon] value for `f16`.
164 ///
165 /// This is the difference between `1.0` and the next larger representable number.
166 ///
167 /// Equal to 2<sup>1 − [`MANTISSA_DIGITS`]</sup>.
168 ///
169 /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
170 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
171 #[unstable(feature = "f16", issue = "116909")]
172 #[rustc_diagnostic_item = "f16_epsilon"]
173 pub const EPSILON: f16 = 9.7656e-4_f16;
174
175 /// Smallest finite `f16` value.
176 ///
177 /// Equal to −[`MAX`].
178 ///
179 /// [`MAX`]: f16::MAX
180 #[unstable(feature = "f16", issue = "116909")]
181 pub const MIN: f16 = -6.5504e+4_f16;
182 /// Smallest positive normal `f16` value.
183 ///
184 /// Equal to 2<sup>[`MIN_EXP`] − 1</sup>.
185 ///
186 /// [`MIN_EXP`]: f16::MIN_EXP
187 #[unstable(feature = "f16", issue = "116909")]
188 pub const MIN_POSITIVE: f16 = 6.1035e-5_f16;
189 /// Largest finite `f16` value.
190 ///
191 /// Equal to
192 /// (1 − 2<sup>−[`MANTISSA_DIGITS`]</sup>) 2<sup>[`MAX_EXP`]</sup>.
193 ///
194 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
195 /// [`MAX_EXP`]: f16::MAX_EXP
196 #[unstable(feature = "f16", issue = "116909")]
197 pub const MAX: f16 = 6.5504e+4_f16;
198
199 /// One greater than the minimum possible *normal* power of 2 exponent
200 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
201 ///
202 /// This corresponds to the exact minimum possible *normal* power of 2 exponent
203 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
204 /// In other words, all normal numbers representable by this type are
205 /// greater than or equal to 0.5 × 2<sup><i>MIN_EXP</i></sup>.
206 #[unstable(feature = "f16", issue = "116909")]
207 pub const MIN_EXP: i32 = -13;
208 /// One greater than the maximum possible power of 2 exponent
209 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
210 ///
211 /// This corresponds to the exact maximum possible power of 2 exponent
212 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
213 /// In other words, all numbers representable by this type are
214 /// strictly less than 2<sup><i>MAX_EXP</i></sup>.
215 #[unstable(feature = "f16", issue = "116909")]
216 pub const MAX_EXP: i32 = 16;
217
218 /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
219 ///
220 /// Equal to ceil(log<sub>10</sub> [`MIN_POSITIVE`]).
221 ///
222 /// [`MIN_POSITIVE`]: f16::MIN_POSITIVE
223 #[unstable(feature = "f16", issue = "116909")]
224 pub const MIN_10_EXP: i32 = -4;
225 /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
226 ///
227 /// Equal to floor(log<sub>10</sub> [`MAX`]).
228 ///
229 /// [`MAX`]: f16::MAX
230 #[unstable(feature = "f16", issue = "116909")]
231 pub const MAX_10_EXP: i32 = 4;
232
233 /// Not a Number (NaN).
234 ///
235 /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are
236 /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and
237 /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern)
238 /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more
239 /// info.
240 ///
241 /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions
242 /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is
243 /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary.
244 /// The concrete bit pattern may change across Rust versions and target platforms.
245 #[allow(clippy::eq_op)]
246 #[rustc_diagnostic_item = "f16_nan"]
247 #[unstable(feature = "f16", issue = "116909")]
248 pub const NAN: f16 = 0.0_f16 / 0.0_f16;
249
250 /// Infinity (∞).
251 #[unstable(feature = "f16", issue = "116909")]
252 pub const INFINITY: f16 = 1.0_f16 / 0.0_f16;
253
254 /// Negative infinity (−∞).
255 #[unstable(feature = "f16", issue = "116909")]
256 pub const NEG_INFINITY: f16 = -1.0_f16 / 0.0_f16;
257
258 /// Sign bit
259 pub(crate) const SIGN_MASK: u16 = 0x8000;
260
261 /// Exponent mask
262 pub(crate) const EXP_MASK: u16 = 0x7c00;
263
264 /// Mantissa mask
265 pub(crate) const MAN_MASK: u16 = 0x03ff;
266
267 /// Minimum representable positive value (min subnormal)
268 const TINY_BITS: u16 = 0x1;
269
270 /// Minimum representable negative value (min negative subnormal)
271 const NEG_TINY_BITS: u16 = Self::TINY_BITS | Self::SIGN_MASK;
272
273 /// Returns `true` if this value is NaN.
274 ///
275 /// ```
276 /// #![feature(f16)]
277 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
278 ///
279 /// let nan = f16::NAN;
280 /// let f = 7.0_f16;
281 ///
282 /// assert!(nan.is_nan());
283 /// assert!(!f.is_nan());
284 /// # }
285 /// ```
286 #[inline]
287 #[must_use]
288 #[unstable(feature = "f16", issue = "116909")]
289 #[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
290 pub const fn is_nan(self) -> bool {
291 self != self
292 }
293
294 /// Returns `true` if this value is positive infinity or negative infinity, and
295 /// `false` otherwise.
296 ///
297 /// ```
298 /// #![feature(f16)]
299 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
300 ///
301 /// let f = 7.0f16;
302 /// let inf = f16::INFINITY;
303 /// let neg_inf = f16::NEG_INFINITY;
304 /// let nan = f16::NAN;
305 ///
306 /// assert!(!f.is_infinite());
307 /// assert!(!nan.is_infinite());
308 ///
309 /// assert!(inf.is_infinite());
310 /// assert!(neg_inf.is_infinite());
311 /// # }
312 /// ```
313 #[inline]
314 #[must_use]
315 #[unstable(feature = "f16", issue = "116909")]
316 pub const fn is_infinite(self) -> bool {
317 (self == f16::INFINITY) | (self == f16::NEG_INFINITY)
318 }
319
320 /// Returns `true` if this number is neither infinite nor NaN.
321 ///
322 /// ```
323 /// #![feature(f16)]
324 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
325 ///
326 /// let f = 7.0f16;
327 /// let inf: f16 = f16::INFINITY;
328 /// let neg_inf: f16 = f16::NEG_INFINITY;
329 /// let nan: f16 = f16::NAN;
330 ///
331 /// assert!(f.is_finite());
332 ///
333 /// assert!(!nan.is_finite());
334 /// assert!(!inf.is_finite());
335 /// assert!(!neg_inf.is_finite());
336 /// # }
337 /// ```
338 #[inline]
339 #[must_use]
340 #[unstable(feature = "f16", issue = "116909")]
341 #[rustc_const_unstable(feature = "f16", issue = "116909")]
342 pub const fn is_finite(self) -> bool {
343 // There's no need to handle NaN separately: if self is NaN,
344 // the comparison is not true, exactly as desired.
345 self.abs() < Self::INFINITY
346 }
347
348 /// Returns `true` if the number is [subnormal].
349 ///
350 /// ```
351 /// #![feature(f16)]
352 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
353 ///
354 /// let min = f16::MIN_POSITIVE; // 6.1035e-5
355 /// let max = f16::MAX;
356 /// let lower_than_min = 1.0e-7_f16;
357 /// let zero = 0.0_f16;
358 ///
359 /// assert!(!min.is_subnormal());
360 /// assert!(!max.is_subnormal());
361 ///
362 /// assert!(!zero.is_subnormal());
363 /// assert!(!f16::NAN.is_subnormal());
364 /// assert!(!f16::INFINITY.is_subnormal());
365 /// // Values between `0` and `min` are Subnormal.
366 /// assert!(lower_than_min.is_subnormal());
367 /// # }
368 /// ```
369 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
370 #[inline]
371 #[must_use]
372 #[unstable(feature = "f16", issue = "116909")]
373 pub const fn is_subnormal(self) -> bool {
374 matches!(self.classify(), FpCategory::Subnormal)
375 }
376
377 /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN.
378 ///
379 /// ```
380 /// #![feature(f16)]
381 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
382 ///
383 /// let min = f16::MIN_POSITIVE; // 6.1035e-5
384 /// let max = f16::MAX;
385 /// let lower_than_min = 1.0e-7_f16;
386 /// let zero = 0.0_f16;
387 ///
388 /// assert!(min.is_normal());
389 /// assert!(max.is_normal());
390 ///
391 /// assert!(!zero.is_normal());
392 /// assert!(!f16::NAN.is_normal());
393 /// assert!(!f16::INFINITY.is_normal());
394 /// // Values between `0` and `min` are Subnormal.
395 /// assert!(!lower_than_min.is_normal());
396 /// # }
397 /// ```
398 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
399 #[inline]
400 #[must_use]
401 #[unstable(feature = "f16", issue = "116909")]
402 pub const fn is_normal(self) -> bool {
403 matches!(self.classify(), FpCategory::Normal)
404 }
405
406 /// Returns the floating point category of the number. If only one property
407 /// is going to be tested, it is generally faster to use the specific
408 /// predicate instead.
409 ///
410 /// ```
411 /// #![feature(f16)]
412 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
413 ///
414 /// use std::num::FpCategory;
415 ///
416 /// let num = 12.4_f16;
417 /// let inf = f16::INFINITY;
418 ///
419 /// assert_eq!(num.classify(), FpCategory::Normal);
420 /// assert_eq!(inf.classify(), FpCategory::Infinite);
421 /// # }
422 /// ```
423 #[inline]
424 #[unstable(feature = "f16", issue = "116909")]
425 pub const fn classify(self) -> FpCategory {
426 let b = self.to_bits();
427 match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
428 (0, Self::EXP_MASK) => FpCategory::Infinite,
429 (_, Self::EXP_MASK) => FpCategory::Nan,
430 (0, 0) => FpCategory::Zero,
431 (_, 0) => FpCategory::Subnormal,
432 _ => FpCategory::Normal,
433 }
434 }
435
436 /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
437 /// positive sign bit and positive infinity.
438 ///
439 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
440 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
441 /// conserved over arithmetic operations, the result of `is_sign_positive` on
442 /// a NaN might produce an unexpected or non-portable result. See the [specification
443 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0`
444 /// if you need fully portable behavior (will return `false` for all NaNs).
445 ///
446 /// ```
447 /// #![feature(f16)]
448 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
449 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
450 ///
451 /// let f = 7.0_f16;
452 /// let g = -7.0_f16;
453 ///
454 /// assert!(f.is_sign_positive());
455 /// assert!(!g.is_sign_positive());
456 /// # }
457 /// ```
458 #[inline]
459 #[must_use]
460 #[unstable(feature = "f16", issue = "116909")]
461 pub const fn is_sign_positive(self) -> bool {
462 !self.is_sign_negative()
463 }
464
465 /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
466 /// negative sign bit and negative infinity.
467 ///
468 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
469 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
470 /// conserved over arithmetic operations, the result of `is_sign_negative` on
471 /// a NaN might produce an unexpected or non-portable result. See the [specification
472 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0`
473 /// if you need fully portable behavior (will return `false` for all NaNs).
474 ///
475 /// ```
476 /// #![feature(f16)]
477 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
478 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
479 ///
480 /// let f = 7.0_f16;
481 /// let g = -7.0_f16;
482 ///
483 /// assert!(!f.is_sign_negative());
484 /// assert!(g.is_sign_negative());
485 /// # }
486 /// ```
487 #[inline]
488 #[must_use]
489 #[unstable(feature = "f16", issue = "116909")]
490 pub const fn is_sign_negative(self) -> bool {
491 // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
492 // applies to zeros and NaNs as well.
493 // SAFETY: This is just transmuting to get the sign bit, it's fine.
494 (self.to_bits() & (1 << 15)) != 0
495 }
496
497 /// Returns the least number greater than `self`.
498 ///
499 /// Let `TINY` be the smallest representable positive `f16`. Then,
500 /// - if `self.is_nan()`, this returns `self`;
501 /// - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
502 /// - if `self` is `-TINY`, this returns -0.0;
503 /// - if `self` is -0.0 or +0.0, this returns `TINY`;
504 /// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
505 /// - otherwise the unique least value greater than `self` is returned.
506 ///
507 /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
508 /// is finite `x == x.next_up().next_down()` also holds.
509 ///
510 /// ```rust
511 /// #![feature(f16)]
512 /// # // FIXME(f16_f128): ABI issues on MSVC
513 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
514 ///
515 /// // f16::EPSILON is the difference between 1.0 and the next number up.
516 /// assert_eq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
517 /// // But not for most numbers.
518 /// assert!(0.1f16.next_up() < 0.1 + f16::EPSILON);
519 /// assert_eq!(4356f16.next_up(), 4360.0);
520 /// # }
521 /// ```
522 ///
523 /// This operation corresponds to IEEE-754 `nextUp`.
524 ///
525 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
526 /// [`INFINITY`]: Self::INFINITY
527 /// [`MIN`]: Self::MIN
528 /// [`MAX`]: Self::MAX
529 #[inline]
530 #[doc(alias = "nextUp")]
531 #[unstable(feature = "f16", issue = "116909")]
532 pub const fn next_up(self) -> Self {
533 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
534 // denormals to zero. This is in general unsound and unsupported, but here
535 // we do our best to still produce the correct result on such targets.
536 let bits = self.to_bits();
537 if self.is_nan() || bits == Self::INFINITY.to_bits() {
538 return self;
539 }
540
541 let abs = bits & !Self::SIGN_MASK;
542 let next_bits = if abs == 0 {
543 Self::TINY_BITS
544 } else if bits == abs {
545 bits + 1
546 } else {
547 bits - 1
548 };
549 Self::from_bits(next_bits)
550 }
551
552 /// Returns the greatest number less than `self`.
553 ///
554 /// Let `TINY` be the smallest representable positive `f16`. Then,
555 /// - if `self.is_nan()`, this returns `self`;
556 /// - if `self` is [`INFINITY`], this returns [`MAX`];
557 /// - if `self` is `TINY`, this returns 0.0;
558 /// - if `self` is -0.0 or +0.0, this returns `-TINY`;
559 /// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
560 /// - otherwise the unique greatest value less than `self` is returned.
561 ///
562 /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
563 /// is finite `x == x.next_down().next_up()` also holds.
564 ///
565 /// ```rust
566 /// #![feature(f16)]
567 /// # // FIXME(f16_f128): ABI issues on MSVC
568 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
569 ///
570 /// let x = 1.0f16;
571 /// // Clamp value into range [0, 1).
572 /// let clamped = x.clamp(0.0, 1.0f16.next_down());
573 /// assert!(clamped < 1.0);
574 /// assert_eq!(clamped.next_up(), 1.0);
575 /// # }
576 /// ```
577 ///
578 /// This operation corresponds to IEEE-754 `nextDown`.
579 ///
580 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
581 /// [`INFINITY`]: Self::INFINITY
582 /// [`MIN`]: Self::MIN
583 /// [`MAX`]: Self::MAX
584 #[inline]
585 #[doc(alias = "nextDown")]
586 #[unstable(feature = "f16", issue = "116909")]
587 pub const fn next_down(self) -> Self {
588 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
589 // denormals to zero. This is in general unsound and unsupported, but here
590 // we do our best to still produce the correct result on such targets.
591 let bits = self.to_bits();
592 if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
593 return self;
594 }
595
596 let abs = bits & !Self::SIGN_MASK;
597 let next_bits = if abs == 0 {
598 Self::NEG_TINY_BITS
599 } else if bits == abs {
600 bits - 1
601 } else {
602 bits + 1
603 };
604 Self::from_bits(next_bits)
605 }
606
607 /// Takes the reciprocal (inverse) of a number, `1/x`.
608 ///
609 /// ```
610 /// #![feature(f16)]
611 /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms
612 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
613 ///
614 /// let x = 2.0_f16;
615 /// let abs_difference = (x.recip() - (1.0 / x)).abs();
616 ///
617 /// assert!(abs_difference <= f16::EPSILON);
618 /// # }
619 /// ```
620 #[inline]
621 #[unstable(feature = "f16", issue = "116909")]
622 #[must_use = "this returns the result of the operation, without modifying the original"]
623 pub const fn recip(self) -> Self {
624 1.0 / self
625 }
626
627 /// Converts radians to degrees.
628 ///
629 /// # Unspecified precision
630 ///
631 /// The precision of this function is non-deterministic. This means it varies by platform,
632 /// Rust version, and can even differ within the same execution from one invocation to the next.
633 ///
634 /// # Examples
635 ///
636 /// ```
637 /// #![feature(f16)]
638 /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms
639 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
640 ///
641 /// let angle = std::f16::consts::PI;
642 ///
643 /// let abs_difference = (angle.to_degrees() - 180.0).abs();
644 /// assert!(abs_difference <= 0.5);
645 /// # }
646 /// ```
647 #[inline]
648 #[unstable(feature = "f16", issue = "116909")]
649 #[must_use = "this returns the result of the operation, without modifying the original"]
650 pub const fn to_degrees(self) -> Self {
651 // Use a literal to avoid double rounding, consts::PI is already rounded,
652 // and dividing would round again.
653 const PIS_IN_180: f16 = 57.2957795130823208767981548141051703_f16;
654 self * PIS_IN_180
655 }
656
657 /// Converts degrees to radians.
658 ///
659 /// # Unspecified precision
660 ///
661 /// The precision of this function is non-deterministic. This means it varies by platform,
662 /// Rust version, and can even differ within the same execution from one invocation to the next.
663 ///
664 /// # Examples
665 ///
666 /// ```
667 /// #![feature(f16)]
668 /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms
669 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
670 ///
671 /// let angle = 180.0f16;
672 ///
673 /// let abs_difference = (angle.to_radians() - std::f16::consts::PI).abs();
674 ///
675 /// assert!(abs_difference <= 0.01);
676 /// # }
677 /// ```
678 #[inline]
679 #[unstable(feature = "f16", issue = "116909")]
680 #[must_use = "this returns the result of the operation, without modifying the original"]
681 pub const fn to_radians(self) -> f16 {
682 // Use a literal to avoid double rounding, consts::PI is already rounded,
683 // and dividing would round again.
684 const RADS_PER_DEG: f16 = 0.017453292519943295769236907684886_f16;
685 self * RADS_PER_DEG
686 }
687
688 /// Returns the maximum of the two numbers, ignoring NaN.
689 ///
690 /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
691 /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
692 /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
693 /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
694 /// non-deterministically.
695 ///
696 /// The handling of NaNs follows the IEEE 754-2019 semantics for `maximumNumber`, treating all
697 /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
698 /// follows the IEEE 754-2008 semantics for `maxNum`.
699 ///
700 /// ```
701 /// #![feature(f16)]
702 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
703 ///
704 /// let x = 1.0f16;
705 /// let y = 2.0f16;
706 ///
707 /// assert_eq!(x.max(y), y);
708 /// assert_eq!(x.max(f16::NAN), x);
709 /// # }
710 /// ```
711 #[inline]
712 #[unstable(feature = "f16", issue = "116909")]
713 #[rustc_const_unstable(feature = "f16", issue = "116909")]
714 #[must_use = "this returns the result of the comparison, without modifying either input"]
715 pub const fn max(self, other: f16) -> f16 {
716 intrinsics::maxnumf16(self, other)
717 }
718
719 /// Returns the minimum of the two numbers, ignoring NaN.
720 ///
721 /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
722 /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
723 /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
724 /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
725 /// non-deterministically.
726 ///
727 /// The handling of NaNs follows the IEEE 754-2019 semantics for `minimumNumber`, treating all
728 /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
729 /// follows the IEEE 754-2008 semantics for `minNum`.
730 ///
731 /// ```
732 /// #![feature(f16)]
733 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
734 ///
735 /// let x = 1.0f16;
736 /// let y = 2.0f16;
737 ///
738 /// assert_eq!(x.min(y), x);
739 /// assert_eq!(x.min(f16::NAN), x);
740 /// # }
741 /// ```
742 #[inline]
743 #[unstable(feature = "f16", issue = "116909")]
744 #[rustc_const_unstable(feature = "f16", issue = "116909")]
745 #[must_use = "this returns the result of the comparison, without modifying either input"]
746 pub const fn min(self, other: f16) -> f16 {
747 intrinsics::minnumf16(self, other)
748 }
749
750 /// Returns the maximum of the two numbers, propagating NaN.
751 ///
752 /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
753 /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
754 /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
755 /// non-NaN inputs.
756 ///
757 /// This is in contrast to [`f16::max`] which only returns NaN when *both* arguments are NaN,
758 /// and which does not reliably order `-0.0` and `+0.0`.
759 ///
760 /// This follows the IEEE 754-2019 semantics for `maximum`.
761 ///
762 /// ```
763 /// #![feature(f16)]
764 /// #![feature(float_minimum_maximum)]
765 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
766 ///
767 /// let x = 1.0f16;
768 /// let y = 2.0f16;
769 ///
770 /// assert_eq!(x.maximum(y), y);
771 /// assert!(x.maximum(f16::NAN).is_nan());
772 /// # }
773 /// ```
774 #[inline]
775 #[unstable(feature = "f16", issue = "116909")]
776 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
777 #[must_use = "this returns the result of the comparison, without modifying either input"]
778 pub const fn maximum(self, other: f16) -> f16 {
779 intrinsics::maximumf16(self, other)
780 }
781
782 /// Returns the minimum of the two numbers, propagating NaN.
783 ///
784 /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
785 /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
786 /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
787 /// non-NaN inputs.
788 ///
789 /// This is in contrast to [`f16::min`] which only returns NaN when *both* arguments are NaN,
790 /// and which does not reliably order `-0.0` and `+0.0`.
791 ///
792 /// This follows the IEEE 754-2019 semantics for `minimum`.
793 ///
794 /// ```
795 /// #![feature(f16)]
796 /// #![feature(float_minimum_maximum)]
797 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
798 ///
799 /// let x = 1.0f16;
800 /// let y = 2.0f16;
801 ///
802 /// assert_eq!(x.minimum(y), x);
803 /// assert!(x.minimum(f16::NAN).is_nan());
804 /// # }
805 /// ```
806 #[inline]
807 #[unstable(feature = "f16", issue = "116909")]
808 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
809 #[must_use = "this returns the result of the comparison, without modifying either input"]
810 pub const fn minimum(self, other: f16) -> f16 {
811 intrinsics::minimumf16(self, other)
812 }
813
814 /// Calculates the midpoint (average) between `self` and `rhs`.
815 ///
816 /// This returns NaN when *either* argument is NaN or if a combination of
817 /// +inf and -inf is provided as arguments.
818 ///
819 /// # Examples
820 ///
821 /// ```
822 /// #![feature(f16)]
823 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
824 ///
825 /// assert_eq!(1f16.midpoint(4.0), 2.5);
826 /// assert_eq!((-5.5f16).midpoint(8.0), 1.25);
827 /// # }
828 /// ```
829 #[inline]
830 #[doc(alias = "average")]
831 #[unstable(feature = "f16", issue = "116909")]
832 #[rustc_const_unstable(feature = "f16", issue = "116909")]
833 pub const fn midpoint(self, other: f16) -> f16 {
834 const HI: f16 = f16::MAX / 2.;
835
836 let (a, b) = (self, other);
837 let abs_a = a.abs();
838 let abs_b = b.abs();
839
840 if abs_a <= HI && abs_b <= HI {
841 // Overflow is impossible
842 (a + b) / 2.
843 } else {
844 (a / 2.) + (b / 2.)
845 }
846 }
847
848 /// Rounds toward zero and converts to any primitive integer type,
849 /// assuming that the value is finite and fits in that type.
850 ///
851 /// ```
852 /// #![feature(f16)]
853 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
854 ///
855 /// let value = 4.6_f16;
856 /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
857 /// assert_eq!(rounded, 4);
858 ///
859 /// let value = -128.9_f16;
860 /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
861 /// assert_eq!(rounded, i8::MIN);
862 /// # }
863 /// ```
864 ///
865 /// # Safety
866 ///
867 /// The value must:
868 ///
869 /// * Not be `NaN`
870 /// * Not be infinite
871 /// * Be representable in the return type `Int`, after truncating off its fractional part
872 #[inline]
873 #[unstable(feature = "f16", issue = "116909")]
874 #[must_use = "this returns the result of the operation, without modifying the original"]
875 pub unsafe fn to_int_unchecked<Int>(self) -> Int
876 where
877 Self: FloatToInt<Int>,
878 {
879 // SAFETY: the caller must uphold the safety contract for
880 // `FloatToInt::to_int_unchecked`.
881 unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
882 }
883
884 /// Raw transmutation to `u16`.
885 ///
886 /// This is currently identical to `transmute::<f16, u16>(self)` on all platforms.
887 ///
888 /// See [`from_bits`](#method.from_bits) for some discussion of the
889 /// portability of this operation (there are almost no issues).
890 ///
891 /// Note that this function is distinct from `as` casting, which attempts to
892 /// preserve the *numeric* value, and not the bitwise value.
893 ///
894 /// ```
895 /// #![feature(f16)]
896 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
897 ///
898 /// # // FIXME(f16_f128): enable this once const casting works
899 /// # // assert_ne!((1f16).to_bits(), 1f16 as u128); // to_bits() is not casting!
900 /// assert_eq!((12.5f16).to_bits(), 0x4a40);
901 /// # }
902 /// ```
903 #[inline]
904 #[unstable(feature = "f16", issue = "116909")]
905 #[must_use = "this returns the result of the operation, without modifying the original"]
906 #[allow(unnecessary_transmutes)]
907 pub const fn to_bits(self) -> u16 {
908 // SAFETY: `u16` is a plain old datatype so we can always transmute to it.
909 unsafe { mem::transmute(self) }
910 }
911
912 /// Raw transmutation from `u16`.
913 ///
914 /// This is currently identical to `transmute::<u16, f16>(v)` on all platforms.
915 /// It turns out this is incredibly portable, for two reasons:
916 ///
917 /// * Floats and Ints have the same endianness on all supported platforms.
918 /// * IEEE 754 very precisely specifies the bit layout of floats.
919 ///
920 /// However there is one caveat: prior to the 2008 version of IEEE 754, how
921 /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
922 /// (notably x86 and ARM) picked the interpretation that was ultimately
923 /// standardized in 2008, but some didn't (notably MIPS). As a result, all
924 /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
925 ///
926 /// Rather than trying to preserve signaling-ness cross-platform, this
927 /// implementation favors preserving the exact bits. This means that
928 /// any payloads encoded in NaNs will be preserved even if the result of
929 /// this method is sent over the network from an x86 machine to a MIPS one.
930 ///
931 /// If the results of this method are only manipulated by the same
932 /// architecture that produced them, then there is no portability concern.
933 ///
934 /// If the input isn't NaN, then there is no portability concern.
935 ///
936 /// If you don't care about signalingness (very likely), then there is no
937 /// portability concern.
938 ///
939 /// Note that this function is distinct from `as` casting, which attempts to
940 /// preserve the *numeric* value, and not the bitwise value.
941 ///
942 /// ```
943 /// #![feature(f16)]
944 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
945 ///
946 /// let v = f16::from_bits(0x4a40);
947 /// assert_eq!(v, 12.5);
948 /// # }
949 /// ```
950 #[inline]
951 #[must_use]
952 #[unstable(feature = "f16", issue = "116909")]
953 #[allow(unnecessary_transmutes)]
954 pub const fn from_bits(v: u16) -> Self {
955 // It turns out the safety issues with sNaN were overblown! Hooray!
956 // SAFETY: `u16` is a plain old datatype so we can always transmute from it.
957 unsafe { mem::transmute(v) }
958 }
959
960 /// Returns the memory representation of this floating point number as a byte array in
961 /// big-endian (network) byte order.
962 ///
963 /// See [`from_bits`](Self::from_bits) for some discussion of the
964 /// portability of this operation (there are almost no issues).
965 ///
966 /// # Examples
967 ///
968 /// ```
969 /// #![feature(f16)]
970 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
971 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
972 ///
973 /// let bytes = 12.5f16.to_be_bytes();
974 /// assert_eq!(bytes, [0x4a, 0x40]);
975 /// # }
976 /// ```
977 #[inline]
978 #[unstable(feature = "f16", issue = "116909")]
979 #[must_use = "this returns the result of the operation, without modifying the original"]
980 pub const fn to_be_bytes(self) -> [u8; 2] {
981 self.to_bits().to_be_bytes()
982 }
983
984 /// Returns the memory representation of this floating point number as a byte array in
985 /// little-endian byte order.
986 ///
987 /// See [`from_bits`](Self::from_bits) for some discussion of the
988 /// portability of this operation (there are almost no issues).
989 ///
990 /// # Examples
991 ///
992 /// ```
993 /// #![feature(f16)]
994 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
995 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
996 ///
997 /// let bytes = 12.5f16.to_le_bytes();
998 /// assert_eq!(bytes, [0x40, 0x4a]);
999 /// # }
1000 /// ```
1001 #[inline]
1002 #[unstable(feature = "f16", issue = "116909")]
1003 #[must_use = "this returns the result of the operation, without modifying the original"]
1004 pub const fn to_le_bytes(self) -> [u8; 2] {
1005 self.to_bits().to_le_bytes()
1006 }
1007
1008 /// Returns the memory representation of this floating point number as a byte array in
1009 /// native byte order.
1010 ///
1011 /// As the target platform's native endianness is used, portable code
1012 /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
1013 ///
1014 /// [`to_be_bytes`]: f16::to_be_bytes
1015 /// [`to_le_bytes`]: f16::to_le_bytes
1016 ///
1017 /// See [`from_bits`](Self::from_bits) for some discussion of the
1018 /// portability of this operation (there are almost no issues).
1019 ///
1020 /// # Examples
1021 ///
1022 /// ```
1023 /// #![feature(f16)]
1024 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
1025 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1026 ///
1027 /// let bytes = 12.5f16.to_ne_bytes();
1028 /// assert_eq!(
1029 /// bytes,
1030 /// if cfg!(target_endian = "big") {
1031 /// [0x4a, 0x40]
1032 /// } else {
1033 /// [0x40, 0x4a]
1034 /// }
1035 /// );
1036 /// # }
1037 /// ```
1038 #[inline]
1039 #[unstable(feature = "f16", issue = "116909")]
1040 #[must_use = "this returns the result of the operation, without modifying the original"]
1041 pub const fn to_ne_bytes(self) -> [u8; 2] {
1042 self.to_bits().to_ne_bytes()
1043 }
1044
1045 /// Creates a floating point value from its representation as a byte array in big endian.
1046 ///
1047 /// See [`from_bits`](Self::from_bits) for some discussion of the
1048 /// portability of this operation (there are almost no issues).
1049 ///
1050 /// # Examples
1051 ///
1052 /// ```
1053 /// #![feature(f16)]
1054 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1055 ///
1056 /// let value = f16::from_be_bytes([0x4a, 0x40]);
1057 /// assert_eq!(value, 12.5);
1058 /// # }
1059 /// ```
1060 #[inline]
1061 #[must_use]
1062 #[unstable(feature = "f16", issue = "116909")]
1063 pub const fn from_be_bytes(bytes: [u8; 2]) -> Self {
1064 Self::from_bits(u16::from_be_bytes(bytes))
1065 }
1066
1067 /// Creates a floating point value from its representation as a byte array in little endian.
1068 ///
1069 /// See [`from_bits`](Self::from_bits) for some discussion of the
1070 /// portability of this operation (there are almost no issues).
1071 ///
1072 /// # Examples
1073 ///
1074 /// ```
1075 /// #![feature(f16)]
1076 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1077 ///
1078 /// let value = f16::from_le_bytes([0x40, 0x4a]);
1079 /// assert_eq!(value, 12.5);
1080 /// # }
1081 /// ```
1082 #[inline]
1083 #[must_use]
1084 #[unstable(feature = "f16", issue = "116909")]
1085 pub const fn from_le_bytes(bytes: [u8; 2]) -> Self {
1086 Self::from_bits(u16::from_le_bytes(bytes))
1087 }
1088
1089 /// Creates a floating point value from its representation as a byte array in native endian.
1090 ///
1091 /// As the target platform's native endianness is used, portable code
1092 /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
1093 /// appropriate instead.
1094 ///
1095 /// [`from_be_bytes`]: f16::from_be_bytes
1096 /// [`from_le_bytes`]: f16::from_le_bytes
1097 ///
1098 /// See [`from_bits`](Self::from_bits) for some discussion of the
1099 /// portability of this operation (there are almost no issues).
1100 ///
1101 /// # Examples
1102 ///
1103 /// ```
1104 /// #![feature(f16)]
1105 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1106 ///
1107 /// let value = f16::from_ne_bytes(if cfg!(target_endian = "big") {
1108 /// [0x4a, 0x40]
1109 /// } else {
1110 /// [0x40, 0x4a]
1111 /// });
1112 /// assert_eq!(value, 12.5);
1113 /// # }
1114 /// ```
1115 #[inline]
1116 #[must_use]
1117 #[unstable(feature = "f16", issue = "116909")]
1118 pub const fn from_ne_bytes(bytes: [u8; 2]) -> Self {
1119 Self::from_bits(u16::from_ne_bytes(bytes))
1120 }
1121
1122 /// Returns the ordering between `self` and `other`.
1123 ///
1124 /// Unlike the standard partial comparison between floating point numbers,
1125 /// this comparison always produces an ordering in accordance to
1126 /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
1127 /// floating point standard. The values are ordered in the following sequence:
1128 ///
1129 /// - negative quiet NaN
1130 /// - negative signaling NaN
1131 /// - negative infinity
1132 /// - negative numbers
1133 /// - negative subnormal numbers
1134 /// - negative zero
1135 /// - positive zero
1136 /// - positive subnormal numbers
1137 /// - positive numbers
1138 /// - positive infinity
1139 /// - positive signaling NaN
1140 /// - positive quiet NaN.
1141 ///
1142 /// The ordering established by this function does not always agree with the
1143 /// [`PartialOrd`] and [`PartialEq`] implementations of `f16`. For example,
1144 /// they consider negative and positive zero equal, while `total_cmp`
1145 /// doesn't.
1146 ///
1147 /// The interpretation of the signaling NaN bit follows the definition in
1148 /// the IEEE 754 standard, which may not match the interpretation by some of
1149 /// the older, non-conformant (e.g. MIPS) hardware implementations.
1150 ///
1151 /// # Example
1152 ///
1153 /// ```
1154 /// #![feature(f16)]
1155 /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms
1156 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1157 ///
1158 /// struct GoodBoy {
1159 /// name: &'static str,
1160 /// weight: f16,
1161 /// }
1162 ///
1163 /// let mut bois = vec![
1164 /// GoodBoy { name: "Pucci", weight: 0.1 },
1165 /// GoodBoy { name: "Woofer", weight: 99.0 },
1166 /// GoodBoy { name: "Yapper", weight: 10.0 },
1167 /// GoodBoy { name: "Chonk", weight: f16::INFINITY },
1168 /// GoodBoy { name: "Abs. Unit", weight: f16::NAN },
1169 /// GoodBoy { name: "Floaty", weight: -5.0 },
1170 /// ];
1171 ///
1172 /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
1173 ///
1174 /// // `f16::NAN` could be positive or negative, which will affect the sort order.
1175 /// if f16::NAN.is_sign_negative() {
1176 /// bois.into_iter().map(|b| b.weight)
1177 /// .zip([f16::NAN, -5.0, 0.1, 10.0, 99.0, f16::INFINITY].iter())
1178 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1179 /// } else {
1180 /// bois.into_iter().map(|b| b.weight)
1181 /// .zip([-5.0, 0.1, 10.0, 99.0, f16::INFINITY, f16::NAN].iter())
1182 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1183 /// }
1184 /// # }
1185 /// ```
1186 #[inline]
1187 #[must_use]
1188 #[unstable(feature = "f16", issue = "116909")]
1189 #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
1190 pub const fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
1191 let mut left = self.to_bits() as i16;
1192 let mut right = other.to_bits() as i16;
1193
1194 // In case of negatives, flip all the bits except the sign
1195 // to achieve a similar layout as two's complement integers
1196 //
1197 // Why does this work? IEEE 754 floats consist of three fields:
1198 // Sign bit, exponent and mantissa. The set of exponent and mantissa
1199 // fields as a whole have the property that their bitwise order is
1200 // equal to the numeric magnitude where the magnitude is defined.
1201 // The magnitude is not normally defined on NaN values, but
1202 // IEEE 754 totalOrder defines the NaN values also to follow the
1203 // bitwise order. This leads to order explained in the doc comment.
1204 // However, the representation of magnitude is the same for negative
1205 // and positive numbers – only the sign bit is different.
1206 // To easily compare the floats as signed integers, we need to
1207 // flip the exponent and mantissa bits in case of negative numbers.
1208 // We effectively convert the numbers to "two's complement" form.
1209 //
1210 // To do the flipping, we construct a mask and XOR against it.
1211 // We branchlessly calculate an "all-ones except for the sign bit"
1212 // mask from negative-signed values: right shifting sign-extends
1213 // the integer, so we "fill" the mask with sign bits, and then
1214 // convert to unsigned to push one more zero bit.
1215 // On positive values, the mask is all zeros, so it's a no-op.
1216 left ^= (((left >> 15) as u16) >> 1) as i16;
1217 right ^= (((right >> 15) as u16) >> 1) as i16;
1218
1219 left.cmp(&right)
1220 }
1221
1222 /// Restrict a value to a certain interval unless it is NaN.
1223 ///
1224 /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
1225 /// less than `min`. Otherwise this returns `self`.
1226 ///
1227 /// Note that this function returns NaN if the initial value was NaN as
1228 /// well. If the result is zero and among the three inputs `self`, `min`, and `max` there are
1229 /// zeros with different sign, either `0.0` or `-0.0` is returned non-deterministically.
1230 ///
1231 /// # Panics
1232 ///
1233 /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
1234 ///
1235 /// # Examples
1236 ///
1237 /// ```
1238 /// #![feature(f16)]
1239 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1240 ///
1241 /// assert!((-3.0f16).clamp(-2.0, 1.0) == -2.0);
1242 /// assert!((0.0f16).clamp(-2.0, 1.0) == 0.0);
1243 /// assert!((2.0f16).clamp(-2.0, 1.0) == 1.0);
1244 /// assert!((f16::NAN).clamp(-2.0, 1.0).is_nan());
1245 ///
1246 /// // These always returns zero, but the sign (which is ignored by `==`) is non-deterministic.
1247 /// assert!((0.0f16).clamp(-0.0, -0.0) == 0.0);
1248 /// assert!((1.0f16).clamp(-0.0, 0.0) == 0.0);
1249 /// // This is definitely a negative zero.
1250 /// assert!((-1.0f16).clamp(-0.0, 1.0).is_sign_negative());
1251 /// # }
1252 /// ```
1253 #[inline]
1254 #[unstable(feature = "f16", issue = "116909")]
1255 #[must_use = "method returns a new number and does not mutate the original value"]
1256 pub const fn clamp(mut self, min: f16, max: f16) -> f16 {
1257 const_assert!(
1258 min <= max,
1259 "min > max, or either was NaN",
1260 "min > max, or either was NaN. min = {min:?}, max = {max:?}",
1261 min: f16,
1262 max: f16,
1263 );
1264
1265 if self < min {
1266 self = min;
1267 }
1268 if self > max {
1269 self = max;
1270 }
1271 self
1272 }
1273
1274 /// Clamps this number to a symmetric range centered around zero.
1275 ///
1276 /// The method clamps the number's magnitude (absolute value) to be at most `limit`.
1277 ///
1278 /// This is functionally equivalent to `self.clamp(-limit, limit)`, but is more
1279 /// explicit about the intent.
1280 ///
1281 /// # Panics
1282 ///
1283 /// Panics if `limit` is negative or NaN, as this indicates a logic error.
1284 ///
1285 /// # Examples
1286 ///
1287 /// ```
1288 /// #![feature(f16)]
1289 /// #![feature(clamp_magnitude)]
1290 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1291 /// assert_eq!(5.0f16.clamp_magnitude(3.0), 3.0);
1292 /// assert_eq!((-5.0f16).clamp_magnitude(3.0), -3.0);
1293 /// assert_eq!(2.0f16.clamp_magnitude(3.0), 2.0);
1294 /// assert_eq!((-2.0f16).clamp_magnitude(3.0), -2.0);
1295 /// # }
1296 /// ```
1297 #[inline]
1298 #[unstable(feature = "clamp_magnitude", issue = "148519")]
1299 #[must_use = "this returns the clamped value and does not modify the original"]
1300 pub fn clamp_magnitude(self, limit: f16) -> f16 {
1301 assert!(limit >= 0.0, "limit must be non-negative");
1302 let limit = limit.abs(); // Canonicalises -0.0 to 0.0
1303 self.clamp(-limit, limit)
1304 }
1305
1306 /// Computes the absolute value of `self`.
1307 ///
1308 /// This function always returns the precise result.
1309 ///
1310 /// # Examples
1311 ///
1312 /// ```
1313 /// #![feature(f16)]
1314 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1315 ///
1316 /// let x = 3.5_f16;
1317 /// let y = -3.5_f16;
1318 ///
1319 /// assert_eq!(x.abs(), x);
1320 /// assert_eq!(y.abs(), -y);
1321 ///
1322 /// assert!(f16::NAN.abs().is_nan());
1323 /// # }
1324 /// ```
1325 #[inline]
1326 #[unstable(feature = "f16", issue = "116909")]
1327 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1328 #[must_use = "method returns a new number and does not mutate the original value"]
1329 pub const fn abs(self) -> Self {
1330 // FIXME(f16_f128): replace with `intrinsics::fabsf16` when available
1331 Self::from_bits(self.to_bits() & !(1 << 15))
1332 }
1333
1334 /// Returns a number that represents the sign of `self`.
1335 ///
1336 /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
1337 /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
1338 /// - NaN if the number is NaN
1339 ///
1340 /// # Examples
1341 ///
1342 /// ```
1343 /// #![feature(f16)]
1344 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1345 ///
1346 /// let f = 3.5_f16;
1347 ///
1348 /// assert_eq!(f.signum(), 1.0);
1349 /// assert_eq!(f16::NEG_INFINITY.signum(), -1.0);
1350 ///
1351 /// assert!(f16::NAN.signum().is_nan());
1352 /// # }
1353 /// ```
1354 #[inline]
1355 #[unstable(feature = "f16", issue = "116909")]
1356 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1357 #[must_use = "method returns a new number and does not mutate the original value"]
1358 pub const fn signum(self) -> f16 {
1359 if self.is_nan() { Self::NAN } else { 1.0_f16.copysign(self) }
1360 }
1361
1362 /// Returns a number composed of the magnitude of `self` and the sign of
1363 /// `sign`.
1364 ///
1365 /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`.
1366 /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is
1367 /// returned.
1368 ///
1369 /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note
1370 /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust
1371 /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the
1372 /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable
1373 /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more
1374 /// info.
1375 ///
1376 /// # Examples
1377 ///
1378 /// ```
1379 /// #![feature(f16)]
1380 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1381 ///
1382 /// let f = 3.5_f16;
1383 ///
1384 /// assert_eq!(f.copysign(0.42), 3.5_f16);
1385 /// assert_eq!(f.copysign(-0.42), -3.5_f16);
1386 /// assert_eq!((-f).copysign(0.42), 3.5_f16);
1387 /// assert_eq!((-f).copysign(-0.42), -3.5_f16);
1388 ///
1389 /// assert!(f16::NAN.copysign(1.0).is_nan());
1390 /// # }
1391 /// ```
1392 #[inline]
1393 #[unstable(feature = "f16", issue = "116909")]
1394 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1395 #[must_use = "method returns a new number and does not mutate the original value"]
1396 pub const fn copysign(self, sign: f16) -> f16 {
1397 intrinsics::copysignf16(self, sign)
1398 }
1399
1400 /// Float addition that allows optimizations based on algebraic rules.
1401 ///
1402 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1403 #[must_use = "method returns a new number and does not mutate the original value"]
1404 #[unstable(feature = "float_algebraic", issue = "136469")]
1405 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1406 #[inline]
1407 pub const fn algebraic_add(self, rhs: f16) -> f16 {
1408 intrinsics::fadd_algebraic(self, rhs)
1409 }
1410
1411 /// Float subtraction that allows optimizations based on algebraic rules.
1412 ///
1413 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1414 #[must_use = "method returns a new number and does not mutate the original value"]
1415 #[unstable(feature = "float_algebraic", issue = "136469")]
1416 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1417 #[inline]
1418 pub const fn algebraic_sub(self, rhs: f16) -> f16 {
1419 intrinsics::fsub_algebraic(self, rhs)
1420 }
1421
1422 /// Float multiplication that allows optimizations based on algebraic rules.
1423 ///
1424 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1425 #[must_use = "method returns a new number and does not mutate the original value"]
1426 #[unstable(feature = "float_algebraic", issue = "136469")]
1427 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1428 #[inline]
1429 pub const fn algebraic_mul(self, rhs: f16) -> f16 {
1430 intrinsics::fmul_algebraic(self, rhs)
1431 }
1432
1433 /// Float division that allows optimizations based on algebraic rules.
1434 ///
1435 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1436 #[must_use = "method returns a new number and does not mutate the original value"]
1437 #[unstable(feature = "float_algebraic", issue = "136469")]
1438 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1439 #[inline]
1440 pub const fn algebraic_div(self, rhs: f16) -> f16 {
1441 intrinsics::fdiv_algebraic(self, rhs)
1442 }
1443
1444 /// Float remainder that allows optimizations based on algebraic rules.
1445 ///
1446 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1447 #[must_use = "method returns a new number and does not mutate the original value"]
1448 #[unstable(feature = "float_algebraic", issue = "136469")]
1449 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1450 #[inline]
1451 pub const fn algebraic_rem(self, rhs: f16) -> f16 {
1452 intrinsics::frem_algebraic(self, rhs)
1453 }
1454}
1455
1456// Functions in this module fall into `core_float_math`
1457// #[unstable(feature = "core_float_math", issue = "137578")]
1458#[cfg(not(test))]
1459#[doc(test(attr(feature(cfg_target_has_reliable_f16_f128), expect(internal_features))))]
1460impl f16 {
1461 /// Returns the largest integer less than or equal to `self`.
1462 ///
1463 /// This function always returns the precise result.
1464 ///
1465 /// # Examples
1466 ///
1467 /// ```
1468 /// #![feature(f16)]
1469 /// # #[cfg(not(miri))]
1470 /// # #[cfg(target_has_reliable_f16_math)] {
1471 ///
1472 /// let f = 3.7_f16;
1473 /// let g = 3.0_f16;
1474 /// let h = -3.7_f16;
1475 ///
1476 /// assert_eq!(f.floor(), 3.0);
1477 /// assert_eq!(g.floor(), 3.0);
1478 /// assert_eq!(h.floor(), -4.0);
1479 /// # }
1480 /// ```
1481 #[inline]
1482 #[rustc_allow_incoherent_impl]
1483 #[unstable(feature = "f16", issue = "116909")]
1484 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1485 #[must_use = "method returns a new number and does not mutate the original value"]
1486 pub const fn floor(self) -> f16 {
1487 intrinsics::floorf16(self)
1488 }
1489
1490 /// Returns the smallest integer greater than or equal to `self`.
1491 ///
1492 /// This function always returns the precise result.
1493 ///
1494 /// # Examples
1495 ///
1496 /// ```
1497 /// #![feature(f16)]
1498 /// # #[cfg(not(miri))]
1499 /// # #[cfg(target_has_reliable_f16_math)] {
1500 ///
1501 /// let f = 3.01_f16;
1502 /// let g = 4.0_f16;
1503 ///
1504 /// assert_eq!(f.ceil(), 4.0);
1505 /// assert_eq!(g.ceil(), 4.0);
1506 /// # }
1507 /// ```
1508 #[inline]
1509 #[doc(alias = "ceiling")]
1510 #[rustc_allow_incoherent_impl]
1511 #[unstable(feature = "f16", issue = "116909")]
1512 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1513 #[must_use = "method returns a new number and does not mutate the original value"]
1514 pub const fn ceil(self) -> f16 {
1515 intrinsics::ceilf16(self)
1516 }
1517
1518 /// Returns the nearest integer to `self`. If a value is half-way between two
1519 /// integers, round away from `0.0`.
1520 ///
1521 /// This function always returns the precise result.
1522 ///
1523 /// # Examples
1524 ///
1525 /// ```
1526 /// #![feature(f16)]
1527 /// # #[cfg(not(miri))]
1528 /// # #[cfg(target_has_reliable_f16_math)] {
1529 ///
1530 /// let f = 3.3_f16;
1531 /// let g = -3.3_f16;
1532 /// let h = -3.7_f16;
1533 /// let i = 3.5_f16;
1534 /// let j = 4.5_f16;
1535 ///
1536 /// assert_eq!(f.round(), 3.0);
1537 /// assert_eq!(g.round(), -3.0);
1538 /// assert_eq!(h.round(), -4.0);
1539 /// assert_eq!(i.round(), 4.0);
1540 /// assert_eq!(j.round(), 5.0);
1541 /// # }
1542 /// ```
1543 #[inline]
1544 #[rustc_allow_incoherent_impl]
1545 #[unstable(feature = "f16", issue = "116909")]
1546 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1547 #[must_use = "method returns a new number and does not mutate the original value"]
1548 pub const fn round(self) -> f16 {
1549 intrinsics::roundf16(self)
1550 }
1551
1552 /// Returns the nearest integer to a number. Rounds half-way cases to the number
1553 /// with an even least significant digit.
1554 ///
1555 /// This function always returns the precise result.
1556 ///
1557 /// # Examples
1558 ///
1559 /// ```
1560 /// #![feature(f16)]
1561 /// # #[cfg(not(miri))]
1562 /// # #[cfg(target_has_reliable_f16_math)] {
1563 ///
1564 /// let f = 3.3_f16;
1565 /// let g = -3.3_f16;
1566 /// let h = 3.5_f16;
1567 /// let i = 4.5_f16;
1568 ///
1569 /// assert_eq!(f.round_ties_even(), 3.0);
1570 /// assert_eq!(g.round_ties_even(), -3.0);
1571 /// assert_eq!(h.round_ties_even(), 4.0);
1572 /// assert_eq!(i.round_ties_even(), 4.0);
1573 /// # }
1574 /// ```
1575 #[inline]
1576 #[rustc_allow_incoherent_impl]
1577 #[unstable(feature = "f16", issue = "116909")]
1578 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1579 #[must_use = "method returns a new number and does not mutate the original value"]
1580 pub const fn round_ties_even(self) -> f16 {
1581 intrinsics::round_ties_even_f16(self)
1582 }
1583
1584 /// Returns the integer part of `self`.
1585 /// This means that non-integer numbers are always truncated towards zero.
1586 ///
1587 /// This function always returns the precise result.
1588 ///
1589 /// # Examples
1590 ///
1591 /// ```
1592 /// #![feature(f16)]
1593 /// # #[cfg(not(miri))]
1594 /// # #[cfg(target_has_reliable_f16_math)] {
1595 ///
1596 /// let f = 3.7_f16;
1597 /// let g = 3.0_f16;
1598 /// let h = -3.7_f16;
1599 ///
1600 /// assert_eq!(f.trunc(), 3.0);
1601 /// assert_eq!(g.trunc(), 3.0);
1602 /// assert_eq!(h.trunc(), -3.0);
1603 /// # }
1604 /// ```
1605 #[inline]
1606 #[doc(alias = "truncate")]
1607 #[rustc_allow_incoherent_impl]
1608 #[unstable(feature = "f16", issue = "116909")]
1609 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1610 #[must_use = "method returns a new number and does not mutate the original value"]
1611 pub const fn trunc(self) -> f16 {
1612 intrinsics::truncf16(self)
1613 }
1614
1615 /// Returns the fractional part of `self`.
1616 ///
1617 /// This function always returns the precise result.
1618 ///
1619 /// # Examples
1620 ///
1621 /// ```
1622 /// #![feature(f16)]
1623 /// # #[cfg(not(miri))]
1624 /// # #[cfg(target_has_reliable_f16_math)] {
1625 ///
1626 /// let x = 3.6_f16;
1627 /// let y = -3.6_f16;
1628 /// let abs_difference_x = (x.fract() - 0.6).abs();
1629 /// let abs_difference_y = (y.fract() - (-0.6)).abs();
1630 ///
1631 /// assert!(abs_difference_x <= f16::EPSILON);
1632 /// assert!(abs_difference_y <= f16::EPSILON);
1633 /// # }
1634 /// ```
1635 #[inline]
1636 #[rustc_allow_incoherent_impl]
1637 #[unstable(feature = "f16", issue = "116909")]
1638 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1639 #[must_use = "method returns a new number and does not mutate the original value"]
1640 pub const fn fract(self) -> f16 {
1641 self - self.trunc()
1642 }
1643
1644 /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
1645 /// error, yielding a more accurate result than an unfused multiply-add.
1646 ///
1647 /// Using `mul_add` *may* be more performant than an unfused multiply-add if
1648 /// the target architecture has a dedicated `fma` CPU instruction. However,
1649 /// this is not always true, and will be heavily dependant on designing
1650 /// algorithms with specific target hardware in mind.
1651 ///
1652 /// # Precision
1653 ///
1654 /// The result of this operation is guaranteed to be the rounded
1655 /// infinite-precision result. It is specified by IEEE 754 as
1656 /// `fusedMultiplyAdd` and guaranteed not to change.
1657 ///
1658 /// # Examples
1659 ///
1660 /// ```
1661 /// #![feature(f16)]
1662 /// # #[cfg(not(miri))]
1663 /// # #[cfg(target_has_reliable_f16_math)] {
1664 ///
1665 /// let m = 10.0_f16;
1666 /// let x = 4.0_f16;
1667 /// let b = 60.0_f16;
1668 ///
1669 /// assert_eq!(m.mul_add(x, b), 100.0);
1670 /// assert_eq!(m * x + b, 100.0);
1671 ///
1672 /// let one_plus_eps = 1.0_f16 + f16::EPSILON;
1673 /// let one_minus_eps = 1.0_f16 - f16::EPSILON;
1674 /// let minus_one = -1.0_f16;
1675 ///
1676 /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps.
1677 /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f16::EPSILON * f16::EPSILON);
1678 /// // Different rounding with the non-fused multiply and add.
1679 /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0);
1680 /// # }
1681 /// ```
1682 #[inline]
1683 #[rustc_allow_incoherent_impl]
1684 #[unstable(feature = "f16", issue = "116909")]
1685 #[doc(alias = "fmaf16", alias = "fusedMultiplyAdd")]
1686 #[must_use = "method returns a new number and does not mutate the original value"]
1687 #[rustc_const_unstable(feature = "const_mul_add", issue = "146724")]
1688 pub const fn mul_add(self, a: f16, b: f16) -> f16 {
1689 intrinsics::fmaf16(self, a, b)
1690 }
1691
1692 /// Calculates Euclidean division, the matching method for `rem_euclid`.
1693 ///
1694 /// This computes the integer `n` such that
1695 /// `self = n * rhs + self.rem_euclid(rhs)`.
1696 /// In other words, the result is `self / rhs` rounded to the integer `n`
1697 /// such that `self >= n * rhs`.
1698 ///
1699 /// # Precision
1700 ///
1701 /// The result of this operation is guaranteed to be the rounded
1702 /// infinite-precision result.
1703 ///
1704 /// # Examples
1705 ///
1706 /// ```
1707 /// #![feature(f16)]
1708 /// # #[cfg(not(miri))]
1709 /// # #[cfg(target_has_reliable_f16_math)] {
1710 ///
1711 /// let a: f16 = 7.0;
1712 /// let b = 4.0;
1713 /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
1714 /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
1715 /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
1716 /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
1717 /// # }
1718 /// ```
1719 #[inline]
1720 #[rustc_allow_incoherent_impl]
1721 #[unstable(feature = "f16", issue = "116909")]
1722 #[must_use = "method returns a new number and does not mutate the original value"]
1723 pub fn div_euclid(self, rhs: f16) -> f16 {
1724 let q = (self / rhs).trunc();
1725 if self % rhs < 0.0 {
1726 return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
1727 }
1728 q
1729 }
1730
1731 /// Calculates the least nonnegative remainder of `self` when
1732 /// divided by `rhs`.
1733 ///
1734 /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
1735 /// most cases. However, due to a floating point round-off error it can
1736 /// result in `r == rhs.abs()`, violating the mathematical definition, if
1737 /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
1738 /// This result is not an element of the function's codomain, but it is the
1739 /// closest floating point number in the real numbers and thus fulfills the
1740 /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
1741 /// approximately.
1742 ///
1743 /// # Precision
1744 ///
1745 /// The result of this operation is guaranteed to be the rounded
1746 /// infinite-precision result.
1747 ///
1748 /// # Examples
1749 ///
1750 /// ```
1751 /// #![feature(f16)]
1752 /// # #[cfg(not(miri))]
1753 /// # #[cfg(target_has_reliable_f16_math)] {
1754 ///
1755 /// let a: f16 = 7.0;
1756 /// let b = 4.0;
1757 /// assert_eq!(a.rem_euclid(b), 3.0);
1758 /// assert_eq!((-a).rem_euclid(b), 1.0);
1759 /// assert_eq!(a.rem_euclid(-b), 3.0);
1760 /// assert_eq!((-a).rem_euclid(-b), 1.0);
1761 /// // limitation due to round-off error
1762 /// assert!((-f16::EPSILON).rem_euclid(3.0) != 0.0);
1763 /// # }
1764 /// ```
1765 #[inline]
1766 #[rustc_allow_incoherent_impl]
1767 #[doc(alias = "modulo", alias = "mod")]
1768 #[unstable(feature = "f16", issue = "116909")]
1769 #[must_use = "method returns a new number and does not mutate the original value"]
1770 pub fn rem_euclid(self, rhs: f16) -> f16 {
1771 let r = self % rhs;
1772 if r < 0.0 { r + rhs.abs() } else { r }
1773 }
1774
1775 /// Raises a number to an integer power.
1776 ///
1777 /// Using this function is generally faster than using `powf`.
1778 /// It might have a different sequence of rounding operations than `powf`,
1779 /// so the results are not guaranteed to agree.
1780 ///
1781 /// Note that this function is special in that it can return non-NaN results for NaN inputs. For
1782 /// example, `f16::powi(f16::NAN, 0)` returns `1.0`. However, if an input is a *signaling*
1783 /// NaN, then the result is non-deterministically either a NaN or the result that the
1784 /// corresponding quiet NaN would produce.
1785 ///
1786 /// # Unspecified precision
1787 ///
1788 /// The precision of this function is non-deterministic. This means it varies by platform,
1789 /// Rust version, and can even differ within the same execution from one invocation to the next.
1790 ///
1791 /// # Examples
1792 ///
1793 /// ```
1794 /// #![feature(f16)]
1795 /// # #[cfg(not(miri))]
1796 /// # #[cfg(target_has_reliable_f16_math)] {
1797 ///
1798 /// let x = 2.0_f16;
1799 /// let abs_difference = (x.powi(2) - (x * x)).abs();
1800 /// assert!(abs_difference <= f16::EPSILON);
1801 ///
1802 /// assert_eq!(f16::powi(f16::NAN, 0), 1.0);
1803 /// assert_eq!(f16::powi(0.0, 0), 1.0);
1804 /// # }
1805 /// ```
1806 #[inline]
1807 #[rustc_allow_incoherent_impl]
1808 #[unstable(feature = "f16", issue = "116909")]
1809 #[must_use = "method returns a new number and does not mutate the original value"]
1810 pub fn powi(self, n: i32) -> f16 {
1811 intrinsics::powif16(self, n)
1812 }
1813
1814 /// Returns the square root of a number.
1815 ///
1816 /// Returns NaN if `self` is a negative number other than `-0.0`.
1817 ///
1818 /// # Precision
1819 ///
1820 /// The result of this operation is guaranteed to be the rounded
1821 /// infinite-precision result. It is specified by IEEE 754 as `squareRoot`
1822 /// and guaranteed not to change.
1823 ///
1824 /// # Examples
1825 ///
1826 /// ```
1827 /// #![feature(f16)]
1828 /// # #[cfg(not(miri))]
1829 /// # #[cfg(target_has_reliable_f16_math)] {
1830 ///
1831 /// let positive = 4.0_f16;
1832 /// let negative = -4.0_f16;
1833 /// let negative_zero = -0.0_f16;
1834 ///
1835 /// assert_eq!(positive.sqrt(), 2.0);
1836 /// assert!(negative.sqrt().is_nan());
1837 /// assert!(negative_zero.sqrt() == negative_zero);
1838 /// # }
1839 /// ```
1840 #[inline]
1841 #[doc(alias = "squareRoot")]
1842 #[rustc_allow_incoherent_impl]
1843 #[unstable(feature = "f16", issue = "116909")]
1844 #[must_use = "method returns a new number and does not mutate the original value"]
1845 pub fn sqrt(self) -> f16 {
1846 intrinsics::sqrtf16(self)
1847 }
1848
1849 /// Returns the cube root of a number.
1850 ///
1851 /// # Unspecified precision
1852 ///
1853 /// The precision of this function is non-deterministic. This means it varies by platform,
1854 /// Rust version, and can even differ within the same execution from one invocation to the next.
1855 ///
1856 /// This function currently corresponds to the `cbrtf` from libc on Unix
1857 /// and Windows. Note that this might change in the future.
1858 ///
1859 /// # Examples
1860 ///
1861 /// ```
1862 /// #![feature(f16)]
1863 /// # #[cfg(not(miri))]
1864 /// # #[cfg(target_has_reliable_f16_math)] {
1865 ///
1866 /// let x = 8.0f16;
1867 ///
1868 /// // x^(1/3) - 2 == 0
1869 /// let abs_difference = (x.cbrt() - 2.0).abs();
1870 ///
1871 /// assert!(abs_difference <= f16::EPSILON);
1872 /// # }
1873 /// ```
1874 #[inline]
1875 #[rustc_allow_incoherent_impl]
1876 #[unstable(feature = "f16", issue = "116909")]
1877 #[must_use = "method returns a new number and does not mutate the original value"]
1878 pub fn cbrt(self) -> f16 {
1879 libm::cbrtf(self as f32) as f16
1880 }
1881}