1 // Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3 use crate::{f32::math, sse2::*, BVec4A, Vec2, Vec3, Vec3A};
4
5 #[cfg(not(target_arch = "spirv"))]
6 use core::fmt;
7 use core::iter::{Product, Sum};
8 use core::{f32, ops::*};
9
10 #[cfg(target_arch = "x86")]
11 use core::arch::x86::*;
12 #[cfg(target_arch = "x86_64")]
13 use core::arch::x86_64::*;
14
15 #[repr(C)]
16 union UnionCast {
17 a: [f32; 4],
18 v: Vec4,
19 }
20
21 /// Creates a 4-dimensional vector.
22 #[inline(always)]
23 #[must_use]
vec4(x: f32, y: f32, z: f32, w: f32) -> Vec424 pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
25 Vec4::new(x, y, z, w)
26 }
27
28 /// A 4-dimensional vector.
29 ///
30 /// SIMD vector types are used for storage on supported platforms.
31 ///
32 /// This type is 16 byte aligned.
33 #[derive(Clone, Copy)]
34 #[repr(transparent)]
35 pub struct Vec4(pub(crate) __m128);
36
37 impl Vec4 {
38 /// All zeroes.
39 pub const ZERO: Self = Self::splat(0.0);
40
41 /// All ones.
42 pub const ONE: Self = Self::splat(1.0);
43
44 /// All negative ones.
45 pub const NEG_ONE: Self = Self::splat(-1.0);
46
47 /// All `f32::MIN`.
48 pub const MIN: Self = Self::splat(f32::MIN);
49
50 /// All `f32::MAX`.
51 pub const MAX: Self = Self::splat(f32::MAX);
52
53 /// All `f32::NAN`.
54 pub const NAN: Self = Self::splat(f32::NAN);
55
56 /// All `f32::INFINITY`.
57 pub const INFINITY: Self = Self::splat(f32::INFINITY);
58
59 /// All `f32::NEG_INFINITY`.
60 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
61
62 /// A unit vector pointing along the positive X axis.
63 pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
64
65 /// A unit vector pointing along the positive Y axis.
66 pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
67
68 /// A unit vector pointing along the positive Z axis.
69 pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
70
71 /// A unit vector pointing along the positive W axis.
72 pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
73
74 /// A unit vector pointing along the negative X axis.
75 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
76
77 /// A unit vector pointing along the negative Y axis.
78 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
79
80 /// A unit vector pointing along the negative Z axis.
81 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
82
83 /// A unit vector pointing along the negative W axis.
84 pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
85
86 /// The unit axes.
87 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
88
89 /// Creates a new vector.
90 #[inline(always)]
91 #[must_use]
new(x: f32, y: f32, z: f32, w: f32) -> Self92 pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
93 unsafe { UnionCast { a: [x, y, z, w] }.v }
94 }
95
96 /// Creates a vector with all elements set to `v`.
97 #[inline]
98 #[must_use]
splat(v: f32) -> Self99 pub const fn splat(v: f32) -> Self {
100 unsafe { UnionCast { a: [v; 4] }.v }
101 }
102
103 /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
104 /// for each element of `self`.
105 ///
106 /// A true element in the mask uses the corresponding element from `if_true`, and false
107 /// uses the element from `if_false`.
108 #[inline]
109 #[must_use]
select(mask: BVec4A, if_true: Self, if_false: Self) -> Self110 pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
111 Self(unsafe {
112 _mm_or_ps(
113 _mm_andnot_ps(mask.0, if_false.0),
114 _mm_and_ps(if_true.0, mask.0),
115 )
116 })
117 }
118
119 /// Creates a new vector from an array.
120 #[inline]
121 #[must_use]
from_array(a: [f32; 4]) -> Self122 pub const fn from_array(a: [f32; 4]) -> Self {
123 Self::new(a[0], a[1], a[2], a[3])
124 }
125
126 /// `[x, y, z, w]`
127 #[inline]
128 #[must_use]
to_array(&self) -> [f32; 4]129 pub const fn to_array(&self) -> [f32; 4] {
130 unsafe { *(self as *const Vec4 as *const [f32; 4]) }
131 }
132
133 /// Creates a vector from the first 4 values in `slice`.
134 ///
135 /// # Panics
136 ///
137 /// Panics if `slice` is less than 4 elements long.
138 #[inline]
139 #[must_use]
from_slice(slice: &[f32]) -> Self140 pub const fn from_slice(slice: &[f32]) -> Self {
141 Self::new(slice[0], slice[1], slice[2], slice[3])
142 }
143
144 /// Writes the elements of `self` to the first 4 elements in `slice`.
145 ///
146 /// # Panics
147 ///
148 /// Panics if `slice` is less than 4 elements long.
149 #[inline]
write_to_slice(self, slice: &mut [f32])150 pub fn write_to_slice(self, slice: &mut [f32]) {
151 unsafe {
152 assert!(slice.len() >= 4);
153 _mm_storeu_ps(slice.as_mut_ptr(), self.0);
154 }
155 }
156
157 /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
158 ///
159 /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
160 ///
161 /// To truncate to [`Vec3A`] use [`Vec3A::from()`].
162 #[inline]
163 #[must_use]
truncate(self) -> Vec3164 pub fn truncate(self) -> Vec3 {
165 use crate::swizzles::Vec4Swizzles;
166 self.xyz()
167 }
168
169 /// Computes the dot product of `self` and `rhs`.
170 #[inline]
171 #[must_use]
dot(self, rhs: Self) -> f32172 pub fn dot(self, rhs: Self) -> f32 {
173 unsafe { dot4(self.0, rhs.0) }
174 }
175
176 /// Returns a vector where every component is the dot product of `self` and `rhs`.
177 #[inline]
178 #[must_use]
dot_into_vec(self, rhs: Self) -> Self179 pub fn dot_into_vec(self, rhs: Self) -> Self {
180 Self(unsafe { dot4_into_m128(self.0, rhs.0) })
181 }
182
183 /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
184 ///
185 /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`.
186 #[inline]
187 #[must_use]
min(self, rhs: Self) -> Self188 pub fn min(self, rhs: Self) -> Self {
189 Self(unsafe { _mm_min_ps(self.0, rhs.0) })
190 }
191
192 /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
193 ///
194 /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`.
195 #[inline]
196 #[must_use]
max(self, rhs: Self) -> Self197 pub fn max(self, rhs: Self) -> Self {
198 Self(unsafe { _mm_max_ps(self.0, rhs.0) })
199 }
200
201 /// Component-wise clamping of values, similar to [`f32::clamp`].
202 ///
203 /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
204 ///
205 /// # Panics
206 ///
207 /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
208 #[inline]
209 #[must_use]
clamp(self, min: Self, max: Self) -> Self210 pub fn clamp(self, min: Self, max: Self) -> Self {
211 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
212 self.max(min).min(max)
213 }
214
215 /// Returns the horizontal minimum of `self`.
216 ///
217 /// In other words this computes `min(x, y, ..)`.
218 #[inline]
219 #[must_use]
min_element(self) -> f32220 pub fn min_element(self) -> f32 {
221 unsafe {
222 let v = self.0;
223 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
224 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
225 _mm_cvtss_f32(v)
226 }
227 }
228
229 /// Returns the horizontal maximum of `self`.
230 ///
231 /// In other words this computes `max(x, y, ..)`.
232 #[inline]
233 #[must_use]
max_element(self) -> f32234 pub fn max_element(self) -> f32 {
235 unsafe {
236 let v = self.0;
237 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
238 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
239 _mm_cvtss_f32(v)
240 }
241 }
242
243 /// Returns a vector mask containing the result of a `==` comparison for each element of
244 /// `self` and `rhs`.
245 ///
246 /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
247 /// elements.
248 #[inline]
249 #[must_use]
cmpeq(self, rhs: Self) -> BVec4A250 pub fn cmpeq(self, rhs: Self) -> BVec4A {
251 BVec4A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
252 }
253
254 /// Returns a vector mask containing the result of a `!=` comparison for each element of
255 /// `self` and `rhs`.
256 ///
257 /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
258 /// elements.
259 #[inline]
260 #[must_use]
cmpne(self, rhs: Self) -> BVec4A261 pub fn cmpne(self, rhs: Self) -> BVec4A {
262 BVec4A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
263 }
264
265 /// Returns a vector mask containing the result of a `>=` comparison for each element of
266 /// `self` and `rhs`.
267 ///
268 /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
269 /// elements.
270 #[inline]
271 #[must_use]
cmpge(self, rhs: Self) -> BVec4A272 pub fn cmpge(self, rhs: Self) -> BVec4A {
273 BVec4A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
274 }
275
276 /// Returns a vector mask containing the result of a `>` comparison for each element of
277 /// `self` and `rhs`.
278 ///
279 /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
280 /// elements.
281 #[inline]
282 #[must_use]
cmpgt(self, rhs: Self) -> BVec4A283 pub fn cmpgt(self, rhs: Self) -> BVec4A {
284 BVec4A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
285 }
286
287 /// Returns a vector mask containing the result of a `<=` comparison for each element of
288 /// `self` and `rhs`.
289 ///
290 /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
291 /// elements.
292 #[inline]
293 #[must_use]
cmple(self, rhs: Self) -> BVec4A294 pub fn cmple(self, rhs: Self) -> BVec4A {
295 BVec4A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
296 }
297
298 /// Returns a vector mask containing the result of a `<` comparison for each element of
299 /// `self` and `rhs`.
300 ///
301 /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
302 /// elements.
303 #[inline]
304 #[must_use]
cmplt(self, rhs: Self) -> BVec4A305 pub fn cmplt(self, rhs: Self) -> BVec4A {
306 BVec4A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
307 }
308
309 /// Returns a vector containing the absolute value of each element of `self`.
310 #[inline]
311 #[must_use]
abs(self) -> Self312 pub fn abs(self) -> Self {
313 Self(unsafe { crate::sse2::m128_abs(self.0) })
314 }
315
316 /// Returns a vector with elements representing the sign of `self`.
317 ///
318 /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
319 /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
320 /// - `NAN` if the number is `NAN`
321 #[inline]
322 #[must_use]
signum(self) -> Self323 pub fn signum(self) -> Self {
324 unsafe {
325 let result = Self(_mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0));
326 let mask = self.is_nan_mask();
327 Self::select(mask, self, result)
328 }
329 }
330
331 /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
332 #[inline]
333 #[must_use]
copysign(self, rhs: Self) -> Self334 pub fn copysign(self, rhs: Self) -> Self {
335 unsafe {
336 let mask = Self::splat(-0.0);
337 Self(_mm_or_ps(
338 _mm_and_ps(rhs.0, mask.0),
339 _mm_andnot_ps(mask.0, self.0),
340 ))
341 }
342 }
343
344 /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
345 ///
346 /// A negative element results in a `1` bit and a positive element in a `0` bit. Element `x` goes
347 /// into the first lowest bit, element `y` into the second, etc.
348 #[inline]
349 #[must_use]
is_negative_bitmask(self) -> u32350 pub fn is_negative_bitmask(self) -> u32 {
351 unsafe { _mm_movemask_ps(self.0) as u32 }
352 }
353
354 /// Returns `true` if, and only if, all elements are finite. If any element is either
355 /// `NaN`, positive or negative infinity, this will return `false`.
356 #[inline]
357 #[must_use]
is_finite(self) -> bool358 pub fn is_finite(self) -> bool {
359 self.x.is_finite() && self.y.is_finite() && self.z.is_finite() && self.w.is_finite()
360 }
361
362 /// Returns `true` if any elements are `NaN`.
363 #[inline]
364 #[must_use]
is_nan(self) -> bool365 pub fn is_nan(self) -> bool {
366 self.is_nan_mask().any()
367 }
368
369 /// Performs `is_nan` on each element of self, returning a vector mask of the results.
370 ///
371 /// In other words, this computes `[x.is_nan(), y.is_nan(), z.is_nan(), w.is_nan()]`.
372 #[inline]
373 #[must_use]
is_nan_mask(self) -> BVec4A374 pub fn is_nan_mask(self) -> BVec4A {
375 BVec4A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
376 }
377
378 /// Computes the length of `self`.
379 #[doc(alias = "magnitude")]
380 #[inline]
381 #[must_use]
length(self) -> f32382 pub fn length(self) -> f32 {
383 unsafe {
384 let dot = dot4_in_x(self.0, self.0);
385 _mm_cvtss_f32(_mm_sqrt_ps(dot))
386 }
387 }
388
389 /// Computes the squared length of `self`.
390 ///
391 /// This is faster than `length()` as it avoids a square root operation.
392 #[doc(alias = "magnitude2")]
393 #[inline]
394 #[must_use]
length_squared(self) -> f32395 pub fn length_squared(self) -> f32 {
396 self.dot(self)
397 }
398
399 /// Computes `1.0 / length()`.
400 ///
401 /// For valid results, `self` must _not_ be of length zero.
402 #[inline]
403 #[must_use]
length_recip(self) -> f32404 pub fn length_recip(self) -> f32 {
405 unsafe {
406 let dot = dot4_in_x(self.0, self.0);
407 _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
408 }
409 }
410
411 /// Computes the Euclidean distance between two points in space.
412 #[inline]
413 #[must_use]
distance(self, rhs: Self) -> f32414 pub fn distance(self, rhs: Self) -> f32 {
415 (self - rhs).length()
416 }
417
418 /// Compute the squared euclidean distance between two points in space.
419 #[inline]
420 #[must_use]
distance_squared(self, rhs: Self) -> f32421 pub fn distance_squared(self, rhs: Self) -> f32 {
422 (self - rhs).length_squared()
423 }
424
425 /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
426 #[inline]
427 #[must_use]
div_euclid(self, rhs: Self) -> Self428 pub fn div_euclid(self, rhs: Self) -> Self {
429 Self::new(
430 math::div_euclid(self.x, rhs.x),
431 math::div_euclid(self.y, rhs.y),
432 math::div_euclid(self.z, rhs.z),
433 math::div_euclid(self.w, rhs.w),
434 )
435 }
436
437 /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
438 ///
439 /// [Euclidean division]: f32::rem_euclid
440 #[inline]
441 #[must_use]
rem_euclid(self, rhs: Self) -> Self442 pub fn rem_euclid(self, rhs: Self) -> Self {
443 Self::new(
444 math::rem_euclid(self.x, rhs.x),
445 math::rem_euclid(self.y, rhs.y),
446 math::rem_euclid(self.z, rhs.z),
447 math::rem_euclid(self.w, rhs.w),
448 )
449 }
450
451 /// Returns `self` normalized to length 1.0.
452 ///
453 /// For valid results, `self` must _not_ be of length zero, nor very close to zero.
454 ///
455 /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
456 ///
457 /// Panics
458 ///
459 /// Will panic if `self` is zero length when `glam_assert` is enabled.
460 #[inline]
461 #[must_use]
normalize(self) -> Self462 pub fn normalize(self) -> Self {
463 unsafe {
464 let length = _mm_sqrt_ps(dot4_into_m128(self.0, self.0));
465 #[allow(clippy::let_and_return)]
466 let normalized = Self(_mm_div_ps(self.0, length));
467 glam_assert!(normalized.is_finite());
468 normalized
469 }
470 }
471
472 /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
473 ///
474 /// In particular, if the input is zero (or very close to zero), or non-finite,
475 /// the result of this operation will be `None`.
476 ///
477 /// See also [`Self::normalize_or_zero()`].
478 #[inline]
479 #[must_use]
try_normalize(self) -> Option<Self>480 pub fn try_normalize(self) -> Option<Self> {
481 let rcp = self.length_recip();
482 if rcp.is_finite() && rcp > 0.0 {
483 Some(self * rcp)
484 } else {
485 None
486 }
487 }
488
489 /// Returns `self` normalized to length 1.0 if possible, else returns zero.
490 ///
491 /// In particular, if the input is zero (or very close to zero), or non-finite,
492 /// the result of this operation will be zero.
493 ///
494 /// See also [`Self::try_normalize()`].
495 #[inline]
496 #[must_use]
normalize_or_zero(self) -> Self497 pub fn normalize_or_zero(self) -> Self {
498 let rcp = self.length_recip();
499 if rcp.is_finite() && rcp > 0.0 {
500 self * rcp
501 } else {
502 Self::ZERO
503 }
504 }
505
506 /// Returns whether `self` is length `1.0` or not.
507 ///
508 /// Uses a precision threshold of `1e-6`.
509 #[inline]
510 #[must_use]
is_normalized(self) -> bool511 pub fn is_normalized(self) -> bool {
512 // TODO: do something with epsilon
513 math::abs(self.length_squared() - 1.0) <= 1e-4
514 }
515
516 /// Returns the vector projection of `self` onto `rhs`.
517 ///
518 /// `rhs` must be of non-zero length.
519 ///
520 /// # Panics
521 ///
522 /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
523 #[inline]
524 #[must_use]
project_onto(self, rhs: Self) -> Self525 pub fn project_onto(self, rhs: Self) -> Self {
526 let other_len_sq_rcp = rhs.dot(rhs).recip();
527 glam_assert!(other_len_sq_rcp.is_finite());
528 rhs * self.dot(rhs) * other_len_sq_rcp
529 }
530
531 /// Returns the vector rejection of `self` from `rhs`.
532 ///
533 /// The vector rejection is the vector perpendicular to the projection of `self` onto
534 /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
535 ///
536 /// `rhs` must be of non-zero length.
537 ///
538 /// # Panics
539 ///
540 /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
541 #[inline]
542 #[must_use]
reject_from(self, rhs: Self) -> Self543 pub fn reject_from(self, rhs: Self) -> Self {
544 self - self.project_onto(rhs)
545 }
546
547 /// Returns the vector projection of `self` onto `rhs`.
548 ///
549 /// `rhs` must be normalized.
550 ///
551 /// # Panics
552 ///
553 /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
554 #[inline]
555 #[must_use]
project_onto_normalized(self, rhs: Self) -> Self556 pub fn project_onto_normalized(self, rhs: Self) -> Self {
557 glam_assert!(rhs.is_normalized());
558 rhs * self.dot(rhs)
559 }
560
561 /// Returns the vector rejection of `self` from `rhs`.
562 ///
563 /// The vector rejection is the vector perpendicular to the projection of `self` onto
564 /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
565 ///
566 /// `rhs` must be normalized.
567 ///
568 /// # Panics
569 ///
570 /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
571 #[inline]
572 #[must_use]
reject_from_normalized(self, rhs: Self) -> Self573 pub fn reject_from_normalized(self, rhs: Self) -> Self {
574 self - self.project_onto_normalized(rhs)
575 }
576
577 /// Returns a vector containing the nearest integer to a number for each element of `self`.
578 /// Round half-way cases away from 0.0.
579 #[inline]
580 #[must_use]
round(self) -> Self581 pub fn round(self) -> Self {
582 Self(unsafe { m128_round(self.0) })
583 }
584
585 /// Returns a vector containing the largest integer less than or equal to a number for each
586 /// element of `self`.
587 #[inline]
588 #[must_use]
floor(self) -> Self589 pub fn floor(self) -> Self {
590 Self(unsafe { m128_floor(self.0) })
591 }
592
593 /// Returns a vector containing the smallest integer greater than or equal to a number for
594 /// each element of `self`.
595 #[inline]
596 #[must_use]
ceil(self) -> Self597 pub fn ceil(self) -> Self {
598 Self(unsafe { m128_ceil(self.0) })
599 }
600
601 /// Returns a vector containing the integer part each element of `self`. This means numbers are
602 /// always truncated towards zero.
603 #[inline]
604 #[must_use]
trunc(self) -> Self605 pub fn trunc(self) -> Self {
606 Self(unsafe { m128_trunc(self.0) })
607 }
608
609 /// Returns a vector containing the fractional part of the vector, e.g. `self -
610 /// self.floor()`.
611 ///
612 /// Note that this is fast but not precise for large numbers.
613 #[inline]
614 #[must_use]
fract(self) -> Self615 pub fn fract(self) -> Self {
616 self - self.floor()
617 }
618
619 /// Returns a vector containing `e^self` (the exponential function) for each element of
620 /// `self`.
621 #[inline]
622 #[must_use]
exp(self) -> Self623 pub fn exp(self) -> Self {
624 Self::new(
625 math::exp(self.x),
626 math::exp(self.y),
627 math::exp(self.z),
628 math::exp(self.w),
629 )
630 }
631
632 /// Returns a vector containing each element of `self` raised to the power of `n`.
633 #[inline]
634 #[must_use]
powf(self, n: f32) -> Self635 pub fn powf(self, n: f32) -> Self {
636 Self::new(
637 math::powf(self.x, n),
638 math::powf(self.y, n),
639 math::powf(self.z, n),
640 math::powf(self.w, n),
641 )
642 }
643
644 /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
645 #[inline]
646 #[must_use]
recip(self) -> Self647 pub fn recip(self) -> Self {
648 Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
649 }
650
651 /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
652 ///
653 /// When `s` is `0.0`, the result will be equal to `self`. When `s` is `1.0`, the result
654 /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
655 /// extrapolated.
656 #[doc(alias = "mix")]
657 #[inline]
658 #[must_use]
lerp(self, rhs: Self, s: f32) -> Self659 pub fn lerp(self, rhs: Self, s: f32) -> Self {
660 self + ((rhs - self) * s)
661 }
662
663 /// Returns true if the absolute difference of all elements between `self` and `rhs` is
664 /// less than or equal to `max_abs_diff`.
665 ///
666 /// This can be used to compare if two vectors contain similar elements. It works best when
667 /// comparing with a known value. The `max_abs_diff` that should be used used depends on
668 /// the values being compared against.
669 ///
670 /// For more see
671 /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
672 #[inline]
673 #[must_use]
abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool674 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
675 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
676 }
677
678 /// Returns a vector with a length no less than `min` and no more than `max`
679 ///
680 /// # Panics
681 ///
682 /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
683 #[inline]
684 #[must_use]
clamp_length(self, min: f32, max: f32) -> Self685 pub fn clamp_length(self, min: f32, max: f32) -> Self {
686 glam_assert!(min <= max);
687 let length_sq = self.length_squared();
688 if length_sq < min * min {
689 min * (self / math::sqrt(length_sq))
690 } else if length_sq > max * max {
691 max * (self / math::sqrt(length_sq))
692 } else {
693 self
694 }
695 }
696
697 /// Returns a vector with a length no more than `max`
698 #[inline]
699 #[must_use]
clamp_length_max(self, max: f32) -> Self700 pub fn clamp_length_max(self, max: f32) -> Self {
701 let length_sq = self.length_squared();
702 if length_sq > max * max {
703 max * (self / math::sqrt(length_sq))
704 } else {
705 self
706 }
707 }
708
709 /// Returns a vector with a length no less than `min`
710 #[inline]
711 #[must_use]
clamp_length_min(self, min: f32) -> Self712 pub fn clamp_length_min(self, min: f32) -> Self {
713 let length_sq = self.length_squared();
714 if length_sq < min * min {
715 min * (self / math::sqrt(length_sq))
716 } else {
717 self
718 }
719 }
720
721 /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
722 /// error, yielding a more accurate result than an unfused multiply-add.
723 ///
724 /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
725 /// architecture has a dedicated fma CPU instruction. However, this is not always true,
726 /// and will be heavily dependant on designing algorithms with specific target hardware in
727 /// mind.
728 #[inline]
729 #[must_use]
mul_add(self, a: Self, b: Self) -> Self730 pub fn mul_add(self, a: Self, b: Self) -> Self {
731 #[cfg(target_feature = "fma")]
732 unsafe {
733 Self(_mm_fmadd_ps(self.0, a.0, b.0))
734 }
735 #[cfg(not(target_feature = "fma"))]
736 Self::new(
737 math::mul_add(self.x, a.x, b.x),
738 math::mul_add(self.y, a.y, b.y),
739 math::mul_add(self.z, a.z, b.z),
740 math::mul_add(self.w, a.w, b.w),
741 )
742 }
743
744 /// Casts all elements of `self` to `f64`.
745 #[inline]
746 #[must_use]
as_dvec4(&self) -> crate::DVec4747 pub fn as_dvec4(&self) -> crate::DVec4 {
748 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
749 }
750
751 /// Casts all elements of `self` to `i16`.
752 #[inline]
753 #[must_use]
as_i16vec4(&self) -> crate::I16Vec4754 pub fn as_i16vec4(&self) -> crate::I16Vec4 {
755 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
756 }
757
758 /// Casts all elements of `self` to `u16`.
759 #[inline]
760 #[must_use]
as_u16vec4(&self) -> crate::U16Vec4761 pub fn as_u16vec4(&self) -> crate::U16Vec4 {
762 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
763 }
764
765 /// Casts all elements of `self` to `i32`.
766 #[inline]
767 #[must_use]
as_ivec4(&self) -> crate::IVec4768 pub fn as_ivec4(&self) -> crate::IVec4 {
769 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
770 }
771
772 /// Casts all elements of `self` to `u32`.
773 #[inline]
774 #[must_use]
as_uvec4(&self) -> crate::UVec4775 pub fn as_uvec4(&self) -> crate::UVec4 {
776 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
777 }
778
779 /// Casts all elements of `self` to `i64`.
780 #[inline]
781 #[must_use]
as_i64vec4(&self) -> crate::I64Vec4782 pub fn as_i64vec4(&self) -> crate::I64Vec4 {
783 crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
784 }
785
786 /// Casts all elements of `self` to `u64`.
787 #[inline]
788 #[must_use]
as_u64vec4(&self) -> crate::U64Vec4789 pub fn as_u64vec4(&self) -> crate::U64Vec4 {
790 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
791 }
792 }
793
794 impl Default for Vec4 {
795 #[inline(always)]
default() -> Self796 fn default() -> Self {
797 Self::ZERO
798 }
799 }
800
801 impl PartialEq for Vec4 {
802 #[inline]
eq(&self, rhs: &Self) -> bool803 fn eq(&self, rhs: &Self) -> bool {
804 self.cmpeq(*rhs).all()
805 }
806 }
807
808 impl Div<Vec4> for Vec4 {
809 type Output = Self;
810 #[inline]
div(self, rhs: Self) -> Self811 fn div(self, rhs: Self) -> Self {
812 Self(unsafe { _mm_div_ps(self.0, rhs.0) })
813 }
814 }
815
816 impl DivAssign<Vec4> for Vec4 {
817 #[inline]
div_assign(&mut self, rhs: Self)818 fn div_assign(&mut self, rhs: Self) {
819 self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
820 }
821 }
822
823 impl Div<f32> for Vec4 {
824 type Output = Self;
825 #[inline]
div(self, rhs: f32) -> Self826 fn div(self, rhs: f32) -> Self {
827 Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
828 }
829 }
830
831 impl DivAssign<f32> for Vec4 {
832 #[inline]
div_assign(&mut self, rhs: f32)833 fn div_assign(&mut self, rhs: f32) {
834 self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
835 }
836 }
837
838 impl Div<Vec4> for f32 {
839 type Output = Vec4;
840 #[inline]
div(self, rhs: Vec4) -> Vec4841 fn div(self, rhs: Vec4) -> Vec4 {
842 Vec4(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
843 }
844 }
845
846 impl Mul<Vec4> for Vec4 {
847 type Output = Self;
848 #[inline]
mul(self, rhs: Self) -> Self849 fn mul(self, rhs: Self) -> Self {
850 Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
851 }
852 }
853
854 impl MulAssign<Vec4> for Vec4 {
855 #[inline]
mul_assign(&mut self, rhs: Self)856 fn mul_assign(&mut self, rhs: Self) {
857 self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
858 }
859 }
860
861 impl Mul<f32> for Vec4 {
862 type Output = Self;
863 #[inline]
mul(self, rhs: f32) -> Self864 fn mul(self, rhs: f32) -> Self {
865 Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
866 }
867 }
868
869 impl MulAssign<f32> for Vec4 {
870 #[inline]
mul_assign(&mut self, rhs: f32)871 fn mul_assign(&mut self, rhs: f32) {
872 self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
873 }
874 }
875
876 impl Mul<Vec4> for f32 {
877 type Output = Vec4;
878 #[inline]
mul(self, rhs: Vec4) -> Vec4879 fn mul(self, rhs: Vec4) -> Vec4 {
880 Vec4(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
881 }
882 }
883
884 impl Add<Vec4> for Vec4 {
885 type Output = Self;
886 #[inline]
add(self, rhs: Self) -> Self887 fn add(self, rhs: Self) -> Self {
888 Self(unsafe { _mm_add_ps(self.0, rhs.0) })
889 }
890 }
891
892 impl AddAssign<Vec4> for Vec4 {
893 #[inline]
add_assign(&mut self, rhs: Self)894 fn add_assign(&mut self, rhs: Self) {
895 self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
896 }
897 }
898
899 impl Add<f32> for Vec4 {
900 type Output = Self;
901 #[inline]
add(self, rhs: f32) -> Self902 fn add(self, rhs: f32) -> Self {
903 Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
904 }
905 }
906
907 impl AddAssign<f32> for Vec4 {
908 #[inline]
add_assign(&mut self, rhs: f32)909 fn add_assign(&mut self, rhs: f32) {
910 self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
911 }
912 }
913
914 impl Add<Vec4> for f32 {
915 type Output = Vec4;
916 #[inline]
add(self, rhs: Vec4) -> Vec4917 fn add(self, rhs: Vec4) -> Vec4 {
918 Vec4(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
919 }
920 }
921
922 impl Sub<Vec4> for Vec4 {
923 type Output = Self;
924 #[inline]
sub(self, rhs: Self) -> Self925 fn sub(self, rhs: Self) -> Self {
926 Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
927 }
928 }
929
930 impl SubAssign<Vec4> for Vec4 {
931 #[inline]
sub_assign(&mut self, rhs: Vec4)932 fn sub_assign(&mut self, rhs: Vec4) {
933 self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
934 }
935 }
936
937 impl Sub<f32> for Vec4 {
938 type Output = Self;
939 #[inline]
sub(self, rhs: f32) -> Self940 fn sub(self, rhs: f32) -> Self {
941 Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
942 }
943 }
944
945 impl SubAssign<f32> for Vec4 {
946 #[inline]
sub_assign(&mut self, rhs: f32)947 fn sub_assign(&mut self, rhs: f32) {
948 self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
949 }
950 }
951
952 impl Sub<Vec4> for f32 {
953 type Output = Vec4;
954 #[inline]
sub(self, rhs: Vec4) -> Vec4955 fn sub(self, rhs: Vec4) -> Vec4 {
956 Vec4(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
957 }
958 }
959
960 impl Rem<Vec4> for Vec4 {
961 type Output = Self;
962 #[inline]
rem(self, rhs: Self) -> Self963 fn rem(self, rhs: Self) -> Self {
964 unsafe {
965 let n = m128_floor(_mm_div_ps(self.0, rhs.0));
966 Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
967 }
968 }
969 }
970
971 impl RemAssign<Vec4> for Vec4 {
972 #[inline]
rem_assign(&mut self, rhs: Self)973 fn rem_assign(&mut self, rhs: Self) {
974 *self = self.rem(rhs);
975 }
976 }
977
978 impl Rem<f32> for Vec4 {
979 type Output = Self;
980 #[inline]
rem(self, rhs: f32) -> Self981 fn rem(self, rhs: f32) -> Self {
982 self.rem(Self::splat(rhs))
983 }
984 }
985
986 impl RemAssign<f32> for Vec4 {
987 #[inline]
rem_assign(&mut self, rhs: f32)988 fn rem_assign(&mut self, rhs: f32) {
989 *self = self.rem(Self::splat(rhs));
990 }
991 }
992
993 impl Rem<Vec4> for f32 {
994 type Output = Vec4;
995 #[inline]
rem(self, rhs: Vec4) -> Vec4996 fn rem(self, rhs: Vec4) -> Vec4 {
997 Vec4::splat(self).rem(rhs)
998 }
999 }
1000
1001 #[cfg(not(target_arch = "spirv"))]
1002 impl AsRef<[f32; 4]> for Vec4 {
1003 #[inline]
as_ref(&self) -> &[f32; 4]1004 fn as_ref(&self) -> &[f32; 4] {
1005 unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
1006 }
1007 }
1008
1009 #[cfg(not(target_arch = "spirv"))]
1010 impl AsMut<[f32; 4]> for Vec4 {
1011 #[inline]
as_mut(&mut self) -> &mut [f32; 4]1012 fn as_mut(&mut self) -> &mut [f32; 4] {
1013 unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
1014 }
1015 }
1016
1017 impl Sum for Vec4 {
1018 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = Self>,1019 fn sum<I>(iter: I) -> Self
1020 where
1021 I: Iterator<Item = Self>,
1022 {
1023 iter.fold(Self::ZERO, Self::add)
1024 }
1025 }
1026
1027 impl<'a> Sum<&'a Self> for Vec4 {
1028 #[inline]
sum<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1029 fn sum<I>(iter: I) -> Self
1030 where
1031 I: Iterator<Item = &'a Self>,
1032 {
1033 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1034 }
1035 }
1036
1037 impl Product for Vec4 {
1038 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = Self>,1039 fn product<I>(iter: I) -> Self
1040 where
1041 I: Iterator<Item = Self>,
1042 {
1043 iter.fold(Self::ONE, Self::mul)
1044 }
1045 }
1046
1047 impl<'a> Product<&'a Self> for Vec4 {
1048 #[inline]
product<I>(iter: I) -> Self where I: Iterator<Item = &'a Self>,1049 fn product<I>(iter: I) -> Self
1050 where
1051 I: Iterator<Item = &'a Self>,
1052 {
1053 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1054 }
1055 }
1056
1057 impl Neg for Vec4 {
1058 type Output = Self;
1059 #[inline]
neg(self) -> Self1060 fn neg(self) -> Self {
1061 Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1062 }
1063 }
1064
1065 impl Index<usize> for Vec4 {
1066 type Output = f32;
1067 #[inline]
index(&self, index: usize) -> &Self::Output1068 fn index(&self, index: usize) -> &Self::Output {
1069 match index {
1070 0 => &self.x,
1071 1 => &self.y,
1072 2 => &self.z,
1073 3 => &self.w,
1074 _ => panic!("index out of bounds"),
1075 }
1076 }
1077 }
1078
1079 impl IndexMut<usize> for Vec4 {
1080 #[inline]
index_mut(&mut self, index: usize) -> &mut Self::Output1081 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1082 match index {
1083 0 => &mut self.x,
1084 1 => &mut self.y,
1085 2 => &mut self.z,
1086 3 => &mut self.w,
1087 _ => panic!("index out of bounds"),
1088 }
1089 }
1090 }
1091
1092 #[cfg(not(target_arch = "spirv"))]
1093 impl fmt::Display for Vec4 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1094 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1095 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1096 }
1097 }
1098
1099 #[cfg(not(target_arch = "spirv"))]
1100 impl fmt::Debug for Vec4 {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result1101 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1102 fmt.debug_tuple(stringify!(Vec4))
1103 .field(&self.x)
1104 .field(&self.y)
1105 .field(&self.z)
1106 .field(&self.w)
1107 .finish()
1108 }
1109 }
1110
1111 impl From<Vec4> for __m128 {
1112 #[inline]
from(t: Vec4) -> Self1113 fn from(t: Vec4) -> Self {
1114 t.0
1115 }
1116 }
1117
1118 impl From<__m128> for Vec4 {
1119 #[inline]
from(t: __m128) -> Self1120 fn from(t: __m128) -> Self {
1121 Self(t)
1122 }
1123 }
1124
1125 impl From<[f32; 4]> for Vec4 {
1126 #[inline]
from(a: [f32; 4]) -> Self1127 fn from(a: [f32; 4]) -> Self {
1128 Self(unsafe { _mm_loadu_ps(a.as_ptr()) })
1129 }
1130 }
1131
1132 impl From<Vec4> for [f32; 4] {
1133 #[inline]
from(v: Vec4) -> Self1134 fn from(v: Vec4) -> Self {
1135 use crate::Align16;
1136 use core::mem::MaybeUninit;
1137 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1138 unsafe {
1139 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1140 out.assume_init().0
1141 }
1142 }
1143 }
1144
1145 impl From<(f32, f32, f32, f32)> for Vec4 {
1146 #[inline]
from(t: (f32, f32, f32, f32)) -> Self1147 fn from(t: (f32, f32, f32, f32)) -> Self {
1148 Self::new(t.0, t.1, t.2, t.3)
1149 }
1150 }
1151
1152 impl From<Vec4> for (f32, f32, f32, f32) {
1153 #[inline]
from(v: Vec4) -> Self1154 fn from(v: Vec4) -> Self {
1155 use crate::Align16;
1156 use core::mem::MaybeUninit;
1157 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1158 unsafe {
1159 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1160 out.assume_init().0
1161 }
1162 }
1163 }
1164
1165 impl From<(Vec3A, f32)> for Vec4 {
1166 #[inline]
from((v, w): (Vec3A, f32)) -> Self1167 fn from((v, w): (Vec3A, f32)) -> Self {
1168 v.extend(w)
1169 }
1170 }
1171
1172 impl From<(f32, Vec3A)> for Vec4 {
1173 #[inline]
from((x, v): (f32, Vec3A)) -> Self1174 fn from((x, v): (f32, Vec3A)) -> Self {
1175 Self::new(x, v.x, v.y, v.z)
1176 }
1177 }
1178
1179 impl From<(Vec3, f32)> for Vec4 {
1180 #[inline]
from((v, w): (Vec3, f32)) -> Self1181 fn from((v, w): (Vec3, f32)) -> Self {
1182 Self::new(v.x, v.y, v.z, w)
1183 }
1184 }
1185
1186 impl From<(f32, Vec3)> for Vec4 {
1187 #[inline]
from((x, v): (f32, Vec3)) -> Self1188 fn from((x, v): (f32, Vec3)) -> Self {
1189 Self::new(x, v.x, v.y, v.z)
1190 }
1191 }
1192
1193 impl From<(Vec2, f32, f32)> for Vec4 {
1194 #[inline]
from((v, z, w): (Vec2, f32, f32)) -> Self1195 fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1196 Self::new(v.x, v.y, z, w)
1197 }
1198 }
1199
1200 impl From<(Vec2, Vec2)> for Vec4 {
1201 #[inline]
from((v, u): (Vec2, Vec2)) -> Self1202 fn from((v, u): (Vec2, Vec2)) -> Self {
1203 Self::new(v.x, v.y, u.x, u.y)
1204 }
1205 }
1206
1207 impl Deref for Vec4 {
1208 type Target = crate::deref::Vec4<f32>;
1209 #[inline]
deref(&self) -> &Self::Target1210 fn deref(&self) -> &Self::Target {
1211 unsafe { &*(self as *const Self).cast() }
1212 }
1213 }
1214
1215 impl DerefMut for Vec4 {
1216 #[inline]
deref_mut(&mut self) -> &mut Self::Target1217 fn deref_mut(&mut self) -> &mut Self::Target {
1218 unsafe { &mut *(self as *mut Self).cast() }
1219 }
1220 }
1221