From 03c2233ebbcd3b54ad84668050fa9f5b15a162f2 Mon Sep 17 00:00:00 2001 From: Kai Giebeler Date: Fri, 24 Nov 2023 23:44:07 +0100 Subject: [PATCH] add I16Vec and U16Vec (#439) --- codegen/src/outputs.rs | 78 ++ codegen/templates/vec.rs.tera | 70 +- src/f32/coresimd/vec3a.rs | 12 + src/f32/coresimd/vec4.rs | 12 + src/f32/scalar/vec3a.rs | 12 + src/f32/scalar/vec4.rs | 12 + src/f32/sse2/vec3a.rs | 12 + src/f32/sse2/vec4.rs | 12 + src/f32/vec2.rs | 12 + src/f32/vec3.rs | 12 + src/f32/wasm32/vec3a.rs | 12 + src/f32/wasm32/vec4.rs | 12 + src/f64/dvec2.rs | 12 + src/f64/dvec3.rs | 12 + src/f64/dvec4.rs | 12 + src/features/impl_bytemuck.rs | 32 +- src/features/impl_mint.rs | 8 +- src/features/impl_rand.rs | 20 + src/features/impl_rkyv.rs | 28 + src/features/impl_serde.rs | 46 + src/i16.rs | 50 + src/i16/i16vec2.rs | 1206 ++++++++++++++++++++ src/i16/i16vec3.rs | 1324 ++++++++++++++++++++++ src/i16/i16vec4.rs | 1436 ++++++++++++++++++++++++ src/i32/ivec2.rs | 36 +- src/i32/ivec3.rs | 36 +- src/i32/ivec4.rs | 46 +- src/i64/i64vec2.rs | 35 +- src/i64/i64vec3.rs | 35 +- src/i64/i64vec4.rs | 50 +- src/lib.rs | 12 + src/swizzles.rs | 8 + src/swizzles/i16vec2_impl.rs | 193 ++++ src/swizzles/i16vec3_impl.rs | 729 ++++++++++++ src/swizzles/i16vec4_impl.rs | 1993 +++++++++++++++++++++++++++++++++ src/swizzles/u16vec2_impl.rs | 193 ++++ src/swizzles/u16vec3_impl.rs | 729 ++++++++++++ src/swizzles/u16vec4_impl.rs | 1993 +++++++++++++++++++++++++++++++++ src/u16.rs | 50 + src/u16/u16vec2.rs | 1098 ++++++++++++++++++ src/u16/u16vec3.rs | 1231 ++++++++++++++++++++ src/u16/u16vec4.rs | 1334 ++++++++++++++++++++++ src/u32/uvec2.rs | 30 +- src/u32/uvec3.rs | 34 +- src/u32/uvec4.rs | 40 +- src/u64/u64vec2.rs | 39 +- src/u64/u64vec3.rs | 47 +- src/u64/u64vec4.rs | 54 +- tests/swizzles_i16.rs | 497 ++++++++ tests/swizzles_u16.rs | 497 ++++++++ tests/vec2.rs | 313 +++++- tests/vec3.rs | 10 +- tests/vec4.rs | 4 +- 53 files changed, 15760 insertions(+), 60 deletions(-) create mode 100644 src/i16.rs create mode 100644 src/i16/i16vec2.rs create mode 100644 src/i16/i16vec3.rs create mode 100644 src/i16/i16vec4.rs create mode 100644 src/swizzles/i16vec2_impl.rs create mode 100644 src/swizzles/i16vec3_impl.rs create mode 100644 src/swizzles/i16vec4_impl.rs create mode 100644 src/swizzles/u16vec2_impl.rs create mode 100644 src/swizzles/u16vec3_impl.rs create mode 100644 src/swizzles/u16vec4_impl.rs create mode 100644 src/u16.rs create mode 100644 src/u16/u16vec2.rs create mode 100644 src/u16/u16vec3.rs create mode 100644 src/u16/u16vec4.rs create mode 100644 tests/swizzles_i16.rs create mode 100644 tests/swizzles_u16.rs diff --git a/codegen/src/outputs.rs b/codegen/src/outputs.rs index 1c8ae8e6..62024fb2 100644 --- a/codegen/src/outputs.rs +++ b/codegen/src/outputs.rs @@ -57,6 +57,30 @@ impl ContextBuilder { Self::new_tvecn_swizzle_impl(4, "D") } + pub fn new_i16vec2_swizzle_impl() -> Self { + Self::new_tvecn_swizzle_impl(2, "I16") + } + + pub fn new_i16vec3_swizzle_impl() -> Self { + Self::new_tvecn_swizzle_impl(3, "I16") + } + + pub fn new_i16vec4_swizzle_impl() -> Self { + Self::new_tvecn_swizzle_impl(4, "I16") + } + + pub fn new_u16vec2_swizzle_impl() -> Self { + Self::new_tvecn_swizzle_impl(2, "U16") + } + + pub fn new_u16vec3_swizzle_impl() -> Self { + Self::new_tvecn_swizzle_impl(3, "U16") + } + + pub fn new_u16vec4_swizzle_impl() -> Self { + Self::new_tvecn_swizzle_impl(4, "U16") + } + pub fn new_ivec2_swizzle_impl() -> Self { Self::new_tvecn_swizzle_impl(2, "I") } @@ -200,6 +224,30 @@ impl ContextBuilder { Self::new_vecn(4).with_scalar_t("f64") } + pub fn new_i16vec2() -> Self { + Self::new_vecn(2).with_scalar_t("i16") + } + + pub fn new_i16vec3() -> Self { + Self::new_vecn(3).with_scalar_t("i16") + } + + pub fn new_i16vec4() -> Self { + Self::new_vecn(4).with_scalar_t("i16") + } + + pub fn new_u16vec2() -> Self { + Self::new_vecn(2).with_scalar_t("u16") + } + + pub fn new_u16vec3() -> Self { + Self::new_vecn(3).with_scalar_t("u16") + } + + pub fn new_u16vec4() -> Self { + Self::new_vecn(4).with_scalar_t("u16") + } + pub fn new_ivec2() -> Self { Self::new_vecn(2).with_scalar_t("i32") } @@ -426,6 +474,30 @@ pub fn build_output_pairs() -> HashMap<&'static str, tera::Context> { "src/swizzles/dvec4_impl.rs", ContextBuilder::new_dvec4_swizzle_impl().build(), ), + ( + "src/swizzles/i16vec2_impl.rs", + ContextBuilder::new_i16vec2_swizzle_impl().build(), + ), + ( + "src/swizzles/i16vec3_impl.rs", + ContextBuilder::new_i16vec3_swizzle_impl().build(), + ), + ( + "src/swizzles/i16vec4_impl.rs", + ContextBuilder::new_i16vec4_swizzle_impl().build(), + ), + ( + "src/swizzles/u16vec2_impl.rs", + ContextBuilder::new_u16vec2_swizzle_impl().build(), + ), + ( + "src/swizzles/u16vec3_impl.rs", + ContextBuilder::new_u16vec3_swizzle_impl().build(), + ), + ( + "src/swizzles/u16vec4_impl.rs", + ContextBuilder::new_u16vec4_swizzle_impl().build(), + ), ( "src/swizzles/ivec2_impl.rs", ContextBuilder::new_ivec2_swizzle_impl().build(), @@ -556,6 +628,12 @@ pub fn build_output_pairs() -> HashMap<&'static str, tera::Context> { ("src/f64/dvec2.rs", ContextBuilder::new_dvec2().build()), ("src/f64/dvec3.rs", ContextBuilder::new_dvec3().build()), ("src/f64/dvec4.rs", ContextBuilder::new_dvec4().build()), + ("src/i16/i16vec2.rs", ContextBuilder::new_i16vec2().build()), + ("src/i16/i16vec3.rs", ContextBuilder::new_i16vec3().build()), + ("src/i16/i16vec4.rs", ContextBuilder::new_i16vec4().build()), + ("src/u16/u16vec2.rs", ContextBuilder::new_u16vec2().build()), + ("src/u16/u16vec3.rs", ContextBuilder::new_u16vec3().build()), + ("src/u16/u16vec4.rs", ContextBuilder::new_u16vec4().build()), ("src/i32/ivec2.rs", ContextBuilder::new_ivec2().build()), ("src/i32/ivec3.rs", ContextBuilder::new_ivec3().build()), ("src/i32/ivec4.rs", ContextBuilder::new_ivec4().build()), diff --git a/codegen/templates/vec.rs.tera b/codegen/templates/vec.rs.tera index 65d93846..bba10c78 100644 --- a/codegen/templates/vec.rs.tera +++ b/codegen/templates/vec.rs.tera @@ -37,6 +37,22 @@ {% set vec4_t = "DVec4" %} {% set from_types = ["Vec" ~ dim, "IVec" ~ dim, "UVec" ~ dim] %} {% endif %} +{% elif scalar_t == "i16" %} + {% set is_signed = true %} + {% set is_float = false %} + {% set self_t = "I16Vec" ~ dim %} + {% set vec2_t = "I16Vec2" %} + {% set vec3_t = "I16Vec3" %} + {% set vec4_t = "I16Vec4" %} + {% set try_from_types = ["U16Vec" ~ dim, "IVec" ~ dim, "UVec" ~ dim, "I64Vec" ~ dim, "U64Vec" ~ dim] %} +{% elif scalar_t == "u16" %} + {% set is_signed = false %} + {% set is_float = false %} + {% set self_t = "U16Vec" ~ dim %} + {% set vec2_t = "U16Vec2" %} + {% set vec3_t = "U16Vec3" %} + {% set vec4_t = "U16Vec4" %} + {% set try_from_types = ["I16Vec" ~ dim, "IVec" ~ dim, "UVec" ~ dim, "I64Vec" ~ dim, "U64Vec" ~ dim] %} {% elif scalar_t == "i32" %} {% set is_signed = true %} {% set is_float = false %} @@ -44,7 +60,8 @@ {% set vec2_t = "IVec2" %} {% set vec3_t = "IVec3" %} {% set vec4_t = "IVec4" %} - {% set try_from_types = ["UVec" ~ dim, "U64Vec" ~ dim, "I64Vec" ~ dim] %} + {% set from_types = ["I16Vec" ~ dim, "U16Vec" ~ dim] %} + {% set try_from_types = ["UVec" ~ dim, "I64Vec" ~ dim, "U64Vec" ~ dim] %} {% elif scalar_t == "u32" %} {% set is_signed = false %} {% set is_float = false %} @@ -52,7 +69,8 @@ {% set vec2_t = "UVec2" %} {% set vec3_t = "UVec3" %} {% set vec4_t = "UVec4" %} - {% set try_from_types = ["IVec" ~ dim, "I64Vec" ~ dim, "U64Vec" ~ dim] %} + {% set from_types = ["U16Vec" ~ dim] %} + {% set try_from_types = ["I16Vec" ~ dim, "IVec" ~ dim, "I64Vec" ~ dim, "U64Vec" ~ dim] %} {% elif scalar_t == "i64" %} {% set is_signed = true %} {% set is_float = false %} @@ -60,7 +78,7 @@ {% set vec2_t = "I64Vec2" %} {% set vec3_t = "I64Vec3" %} {% set vec4_t = "I64Vec4" %} - {% set from_types = ["IVec" ~ dim] %} + {% set from_types = ["I16Vec" ~ dim, "U16Vec" ~ dim, "IVec" ~ dim, "UVec" ~ dim] %} {% set try_from_types = ["U64Vec" ~ dim] %} {% elif scalar_t == "u64" %} {% set is_signed = false %} @@ -69,8 +87,8 @@ {% set vec2_t = "U64Vec2" %} {% set vec3_t = "U64Vec3" %} {% set vec4_t = "U64Vec4" %} - {% set from_types = ["UVec" ~ dim] %} - {% set try_from_types = ["I64Vec" ~ dim] %} + {% set from_types = ["U16Vec" ~ dim, "UVec" ~ dim] %} + {% set try_from_types = ["I16Vec" ~ dim, "IVec" ~ dim, "I64Vec" ~ dim] %} {% endif %} {% if scalar_t == "f64" or scalar_t == "i64" or scalar_t == "u64" or dim == 4 %} @@ -1651,6 +1669,48 @@ impl {{ self_t }} { } {% endif %} {% endif %} +{% if scalar_t != "i16" %} + {% if dim == 2 %} + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec2(&self) -> crate::I16Vec2 { + crate::I16Vec2::new(self.x as i16, self.y as i16) + } + {% elif dim == 3 %} + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + {% elif dim == 4 %} + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + {% endif %} +{% endif %} +{% if scalar_t != "u16" %} + {% if dim == 2 %} + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec2(&self) -> crate::U16Vec2 { + crate::U16Vec2::new(self.x as u16, self.y as u16) + } + {% elif dim == 3 %} + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + {% elif dim == 4 %} + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + {% endif %} +{% endif %} {% if scalar_t != "i32" %} {% if dim == 2 %} /// Casts all elements of `self` to `i32`. diff --git a/src/f32/coresimd/vec3a.rs b/src/f32/coresimd/vec3a.rs index 8cbfc4e5..46141d8a 100644 --- a/src/f32/coresimd/vec3a.rs +++ b/src/f32/coresimd/vec3a.rs @@ -728,6 +728,18 @@ impl Vec3A { crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec3(&self) -> crate::IVec3 { diff --git a/src/f32/coresimd/vec4.rs b/src/f32/coresimd/vec4.rs index 33354b86..4057ec98 100644 --- a/src/f32/coresimd/vec4.rs +++ b/src/f32/coresimd/vec4.rs @@ -646,6 +646,18 @@ impl Vec4 { crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec4(&self) -> crate::IVec4 { diff --git a/src/f32/scalar/vec3a.rs b/src/f32/scalar/vec3a.rs index f8c96652..4ac15044 100644 --- a/src/f32/scalar/vec3a.rs +++ b/src/f32/scalar/vec3a.rs @@ -773,6 +773,18 @@ impl Vec3A { crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec3(&self) -> crate::IVec3 { diff --git a/src/f32/scalar/vec4.rs b/src/f32/scalar/vec4.rs index 9b0280b2..450d8b78 100644 --- a/src/f32/scalar/vec4.rs +++ b/src/f32/scalar/vec4.rs @@ -755,6 +755,18 @@ impl Vec4 { crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec4(&self) -> crate::IVec4 { diff --git a/src/f32/sse2/vec3a.rs b/src/f32/sse2/vec3a.rs index 87915fcd..957ce70d 100644 --- a/src/f32/sse2/vec3a.rs +++ b/src/f32/sse2/vec3a.rs @@ -775,6 +775,18 @@ impl Vec3A { crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec3(&self) -> crate::IVec3 { diff --git a/src/f32/sse2/vec4.rs b/src/f32/sse2/vec4.rs index 8660a323..6e406f1f 100644 --- a/src/f32/sse2/vec4.rs +++ b/src/f32/sse2/vec4.rs @@ -695,6 +695,18 @@ impl Vec4 { crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec4(&self) -> crate::IVec4 { diff --git a/src/f32/vec2.rs b/src/f32/vec2.rs index 80a1d353..3df8bbaf 100644 --- a/src/f32/vec2.rs +++ b/src/f32/vec2.rs @@ -705,6 +705,18 @@ impl Vec2 { crate::DVec2::new(self.x as f64, self.y as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec2(&self) -> crate::I16Vec2 { + crate::I16Vec2::new(self.x as i16, self.y as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec2(&self) -> crate::U16Vec2 { + crate::U16Vec2::new(self.x as u16, self.y as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec2(&self) -> crate::IVec2 { diff --git a/src/f32/vec3.rs b/src/f32/vec3.rs index 5b31d064..2532ee71 100644 --- a/src/f32/vec3.rs +++ b/src/f32/vec3.rs @@ -764,6 +764,18 @@ impl Vec3 { crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec3(&self) -> crate::IVec3 { diff --git a/src/f32/wasm32/vec3a.rs b/src/f32/wasm32/vec3a.rs index a2d9cbc5..08bf493b 100644 --- a/src/f32/wasm32/vec3a.rs +++ b/src/f32/wasm32/vec3a.rs @@ -745,6 +745,18 @@ impl Vec3A { crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec3(&self) -> crate::IVec3 { diff --git a/src/f32/wasm32/vec4.rs b/src/f32/wasm32/vec4.rs index 1fa9c48e..7d98c7db 100644 --- a/src/f32/wasm32/vec4.rs +++ b/src/f32/wasm32/vec4.rs @@ -672,6 +672,18 @@ impl Vec4 { crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec4(&self) -> crate::IVec4 { diff --git a/src/f64/dvec2.rs b/src/f64/dvec2.rs index fa692ba7..7a67fe7c 100644 --- a/src/f64/dvec2.rs +++ b/src/f64/dvec2.rs @@ -705,6 +705,18 @@ impl DVec2 { crate::Vec2::new(self.x as f32, self.y as f32) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec2(&self) -> crate::I16Vec2 { + crate::I16Vec2::new(self.x as i16, self.y as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec2(&self) -> crate::U16Vec2 { + crate::U16Vec2::new(self.x as u16, self.y as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec2(&self) -> crate::IVec2 { diff --git a/src/f64/dvec3.rs b/src/f64/dvec3.rs index 7d1349a2..a02ca2fe 100644 --- a/src/f64/dvec3.rs +++ b/src/f64/dvec3.rs @@ -770,6 +770,18 @@ impl DVec3 { crate::Vec3A::new(self.x as f32, self.y as f32, self.z as f32) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec3(&self) -> crate::IVec3 { diff --git a/src/f64/dvec4.rs b/src/f64/dvec4.rs index 3ff87b5f..052c8906 100644 --- a/src/f64/dvec4.rs +++ b/src/f64/dvec4.rs @@ -747,6 +747,18 @@ impl DVec4 { crate::Vec4::new(self.x as f32, self.y as f32, self.z as f32, self.w as f32) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec4(&self) -> crate::IVec4 { diff --git a/src/features/impl_bytemuck.rs b/src/features/impl_bytemuck.rs index 72fe2e0f..9024509d 100644 --- a/src/features/impl_bytemuck.rs +++ b/src/features/impl_bytemuck.rs @@ -1,7 +1,8 @@ use crate::{ Affine2, Affine3A, DAffine2, DAffine3, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, - I64Vec2, I64Vec3, I64Vec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat3A, Mat4, Quat, U64Vec2, - U64Vec3, U64Vec4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4, + I16Vec2, I16Vec3, I16Vec4, I64Vec2, I64Vec3, I64Vec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat3A, + Mat4, Quat, U16Vec2, U16Vec3, U16Vec4, U64Vec2, U64Vec3, U64Vec4, UVec2, UVec3, UVec4, Vec2, + Vec3, Vec3A, Vec4, }; use bytemuck::{AnyBitPattern, Pod, Zeroable}; @@ -53,6 +54,20 @@ unsafe impl Zeroable for DVec3 {} unsafe impl Pod for DVec4 {} unsafe impl Zeroable for DVec4 {} +unsafe impl Pod for I16Vec2 {} +unsafe impl Zeroable for I16Vec2 {} +unsafe impl Pod for I16Vec3 {} +unsafe impl Zeroable for I16Vec3 {} +unsafe impl Pod for I16Vec4 {} +unsafe impl Zeroable for I16Vec4 {} + +unsafe impl Pod for U16Vec2 {} +unsafe impl Zeroable for U16Vec2 {} +unsafe impl Pod for U16Vec3 {} +unsafe impl Zeroable for U16Vec3 {} +unsafe impl Pod for U16Vec4 {} +unsafe impl Zeroable for U16Vec4 {} + unsafe impl Pod for IVec2 {} unsafe impl Zeroable for IVec2 {} unsafe impl Pod for IVec3 {} @@ -85,8 +100,9 @@ unsafe impl Zeroable for U64Vec4 {} mod test { use crate::{ Affine2, Affine3A, DAffine2, DAffine3, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, - I64Vec2, I64Vec3, I64Vec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat3A, Mat4, Quat, U64Vec2, - U64Vec3, U64Vec4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4, + I16Vec2, I16Vec3, I16Vec4, I64Vec2, I64Vec3, I64Vec4, IVec2, IVec3, IVec4, Mat2, Mat3, + Mat3A, Mat4, Quat, U16Vec2, U16Vec3, U16Vec4, U64Vec2, U64Vec3, U64Vec4, UVec2, UVec3, + UVec4, Vec2, Vec3, Vec3A, Vec4, }; use core::mem; @@ -140,6 +156,14 @@ mod test { test_pod_t!(dvec3, DVec3); test_pod_t!(dvec4, DVec4); + test_pod_t!(i16vec2, I16Vec2); + test_pod_t!(i16vec3, I16Vec3); + test_pod_t!(i16vec4, I16Vec4); + + test_pod_t!(u16vec2, U16Vec2); + test_pod_t!(u16vec3, U16Vec3); + test_pod_t!(u16vec4, U16Vec4); + test_pod_t!(ivec2, IVec2); test_pod_t!(ivec3, IVec3); test_pod_t!(ivec4, IVec4); diff --git a/src/features/impl_mint.rs b/src/features/impl_mint.rs index e59ac87d..84cdcbdc 100644 --- a/src/features/impl_mint.rs +++ b/src/features/impl_mint.rs @@ -1,9 +1,9 @@ use mint::IntoMint; use crate::{ - DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, I64Vec2, I64Vec3, I64Vec4, IVec2, IVec3, - IVec4, Mat2, Mat3, Mat3A, Mat4, Quat, U64Vec2, U64Vec3, U64Vec4, UVec2, UVec3, UVec4, Vec2, - Vec3, Vec3A, Vec4, + DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, I16Vec2, I16Vec3, I16Vec4, I64Vec2, I64Vec3, + I64Vec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat3A, Mat4, Quat, U16Vec2, U16Vec3, U16Vec4, + U64Vec2, U64Vec3, U64Vec4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4, }; macro_rules! impl_vec_types { @@ -310,6 +310,8 @@ impl IntoMint for Mat3A { impl_float_types!(f32, Mat2, Mat3, Mat4, Quat, Vec2, Vec3, Vec4); impl_float_types!(f64, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4); +impl_vec_types!(i16, I16Vec2, I16Vec3, I16Vec4); +impl_vec_types!(u16, U16Vec2, U16Vec3, U16Vec4); impl_vec_types!(i32, IVec2, IVec3, IVec4); impl_vec_types!(u32, UVec2, UVec3, UVec4); impl_vec_types!(i64, I64Vec2, I64Vec3, I64Vec4); diff --git a/src/features/impl_rand.rs b/src/features/impl_rand.rs index 36c10e98..3c6c19aa 100644 --- a/src/features/impl_rand.rs +++ b/src/features/impl_rand.rs @@ -178,6 +178,16 @@ mod f64 { impl_float_types!(f64, DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4); } +mod i16 { + use crate::{I16Vec2, I16Vec3, I16Vec4}; + use rand::{ + distributions::{Distribution, Standard}, + Rng, + }; + + impl_vec_types!(i16, I16Vec2, I16Vec3, I16Vec4); +} + mod i32 { use crate::{IVec2, IVec3, IVec4}; use rand::{ @@ -198,6 +208,16 @@ mod i64 { impl_vec_types!(i64, I64Vec2, I64Vec3, I64Vec4); } +mod u16 { + use crate::{U16Vec2, U16Vec3, U16Vec4}; + use rand::{ + distributions::{Distribution, Standard}, + Rng, + }; + + impl_vec_types!(u16, U16Vec2, U16Vec3, U16Vec4); +} + mod u32 { use crate::{UVec2, UVec3, UVec4}; use rand::{ diff --git a/src/features/impl_rkyv.rs b/src/features/impl_rkyv.rs index 36e85fbe..fb72e376 100644 --- a/src/features/impl_rkyv.rs +++ b/src/features/impl_rkyv.rs @@ -90,6 +90,15 @@ mod f64 { impl_rkyv!(DVec4); } +mod i16 { + use crate::{I16Vec2, I16Vec3, I16Vec4}; + use rkyv::{from_archived, to_archived, Archive, Deserialize, Fallible, Serialize}; + + impl_rkyv!(I16Vec2); + impl_rkyv!(I16Vec3); + impl_rkyv!(I16Vec4); +} + mod i32 { use crate::{IVec2, IVec3, IVec4}; use rkyv::{from_archived, to_archived, Archive, Deserialize, Fallible, Serialize}; @@ -108,6 +117,15 @@ mod i64 { impl_rkyv!(I64Vec4); } +mod u16 { + use crate::{U16Vec2, U16Vec3, U16Vec4}; + use rkyv::{from_archived, to_archived, Archive, Deserialize, Fallible, Serialize}; + + impl_rkyv!(U16Vec2); + impl_rkyv!(U16Vec3); + impl_rkyv!(U16Vec4); +} + mod u32 { use crate::{UVec2, UVec3, UVec4}; use rkyv::{from_archived, to_archived, Archive, Deserialize, Fallible, Serialize}; @@ -193,6 +211,11 @@ mod test { test_archive(&DVec3::new(1.0, 2.0, 3.0)); test_archive(&DVec4::new(1.0, 2.0, 3.0, 4.0)); + use crate::{I16Vec2, I16Vec3, I16Vec4}; + test_archive(&I16Vec2::new(-1, 2)); + test_archive(&I16Vec3::new(-1, 2, 3)); + test_archive(&I16Vec4::new(-1, 2, 3, 4)); + use crate::{IVec2, IVec3, IVec4}; test_archive(&IVec2::new(-1, 2)); test_archive(&IVec3::new(-1, 2, 3)); @@ -203,6 +226,11 @@ mod test { test_archive(&I64Vec3::new(-1, 2, 3)); test_archive(&I64Vec4::new(-1, 2, 3, 4)); + use crate::{U16Vec2, U16Vec3, U16Vec4}; + test_archive(&U16Vec2::new(1, 2)); + test_archive(&U16Vec3::new(1, 2, 3)); + test_archive(&U16Vec4::new(1, 2, 3, 4)); + use crate::{UVec2, UVec3, UVec4}; test_archive(&UVec2::new(1, 2)); test_archive(&UVec3::new(1, 2, 3)); diff --git a/src/features/impl_serde.rs b/src/features/impl_serde.rs index 8bc74cba..ab0db6cf 100644 --- a/src/features/impl_serde.rs +++ b/src/features/impl_serde.rs @@ -726,6 +726,14 @@ mod test_f64 { pub const V4: f64 = 4.0; } +#[cfg(test)] +mod test_i16 { + pub const V1: i16 = 1; + pub const V2: i16 = 2; + pub const V3: i16 = 3; + pub const V4: i16 = 4; +} + #[cfg(test)] mod test_i32 { pub const V1: i32 = 1; @@ -742,6 +750,14 @@ mod test_i64 { pub const V4: i64 = 4; } +#[cfg(test)] +mod test_u16 { + pub const V1: u16 = 1; + pub const V2: u16 = 2; + pub const V3: u16 = 3; + pub const V4: u16 = 4; +} + #[cfg(test)] mod test_u32 { pub const V1: u32 = 1; @@ -987,6 +1003,21 @@ mod f64 { ); } +mod i16 { + #[cfg(test)] + use super::test_i16::*; + #[cfg(test)] + use super::test_int::*; + use crate::{I16Vec2, I16Vec3, I16Vec4}; + use core::fmt; + use serde::{ + de::{self, Deserialize, Deserializer, SeqAccess, Visitor}, + ser::{Serialize, SerializeTupleStruct, Serializer}, + }; + + impl_serde_vec_types!(i16, I16Vec2, I16Vec3, I16Vec4); +} + mod i32 { #[cfg(test)] use super::test_i32::*; @@ -1017,6 +1048,21 @@ mod i64 { impl_serde_vec_types!(i64, I64Vec2, I64Vec3, I64Vec4); } +mod u16 { + #[cfg(test)] + use super::test_int::*; + #[cfg(test)] + use super::test_u16::*; + use crate::{U16Vec2, U16Vec3, U16Vec4}; + use core::fmt; + use serde::{ + de::{self, Deserialize, Deserializer, SeqAccess, Visitor}, + ser::{Serialize, SerializeTupleStruct, Serializer}, + }; + + impl_serde_vec_types!(u16, U16Vec2, U16Vec3, U16Vec4); +} + mod u32 { #[cfg(test)] use super::test_int::*; diff --git a/src/i16.rs b/src/i16.rs new file mode 100644 index 00000000..cec4cebe --- /dev/null +++ b/src/i16.rs @@ -0,0 +1,50 @@ +mod i16vec2; +mod i16vec3; +mod i16vec4; + +pub use i16vec2::{i16vec2, I16Vec2}; +pub use i16vec3::{i16vec3, I16Vec3}; +pub use i16vec4::{i16vec4, I16Vec4}; + +#[cfg(not(target_arch = "spirv"))] +mod test { + use super::*; + + mod const_test_i16vec2 { + #[cfg(not(feature = "cuda"))] + const_assert_eq!( + core::mem::align_of::(), + core::mem::align_of::() + ); + #[cfg(not(feature = "cuda"))] + const_assert_eq!(4, core::mem::size_of::()); + + #[cfg(feature = "cuda")] + const_assert_eq!(8, core::mem::align_of::()); + #[cfg(feature = "cuda")] + const_assert_eq!(8, core::mem::size_of::()); + } + + mod const_test_i16vec3 { + const_assert_eq!( + core::mem::align_of::(), + core::mem::align_of::() + ); + const_assert_eq!(6, core::mem::size_of::()); + } + + mod const_test_i16vec4 { + #[cfg(not(feature = "cuda"))] + const_assert_eq!( + core::mem::align_of::(), + core::mem::align_of::() + ); + #[cfg(not(feature = "cuda"))] + const_assert_eq!(8, core::mem::size_of::()); + + #[cfg(feature = "cuda")] + const_assert_eq!(16, core::mem::align_of::()); + #[cfg(feature = "cuda")] + const_assert_eq!(16, core::mem::size_of::()); + } +} diff --git a/src/i16/i16vec2.rs b/src/i16/i16vec2.rs new file mode 100644 index 00000000..7b5bf9c4 --- /dev/null +++ b/src/i16/i16vec2.rs @@ -0,0 +1,1206 @@ +// Generated from vec.rs.tera template. Edit the template, not the generated file. + +use crate::{BVec2, I16Vec3, I64Vec2, IVec2, U16Vec2, U64Vec2, UVec2}; + +#[cfg(not(target_arch = "spirv"))] +use core::fmt; +use core::iter::{Product, Sum}; +use core::{f32, ops::*}; + +/// Creates a 2-dimensional vector. +#[inline(always)] +pub const fn i16vec2(x: i16, y: i16) -> I16Vec2 { + I16Vec2::new(x, y) +} + +/// A 2-dimensional vector. +#[cfg_attr(not(target_arch = "spirv"), derive(Hash))] +#[derive(Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "cuda", repr(align(8)))] +#[cfg_attr(not(target_arch = "spirv"), repr(C))] +#[cfg_attr(target_arch = "spirv", repr(simd))] +pub struct I16Vec2 { + pub x: i16, + pub y: i16, +} + +impl I16Vec2 { + /// All zeroes. + pub const ZERO: Self = Self::splat(0); + + /// All ones. + pub const ONE: Self = Self::splat(1); + + /// All negative ones. + pub const NEG_ONE: Self = Self::splat(-1); + + /// All `i16::MIN`. + pub const MIN: Self = Self::splat(i16::MIN); + + /// All `i16::MAX`. + pub const MAX: Self = Self::splat(i16::MAX); + + /// A unit vector pointing along the positive X axis. + pub const X: Self = Self::new(1, 0); + + /// A unit vector pointing along the positive Y axis. + pub const Y: Self = Self::new(0, 1); + + /// A unit vector pointing along the negative X axis. + pub const NEG_X: Self = Self::new(-1, 0); + + /// A unit vector pointing along the negative Y axis. + pub const NEG_Y: Self = Self::new(0, -1); + + /// The unit axes. + pub const AXES: [Self; 2] = [Self::X, Self::Y]; + + /// Creates a new vector. + #[inline(always)] + pub const fn new(x: i16, y: i16) -> Self { + Self { x, y } + } + + /// Creates a vector with all elements set to `v`. + #[inline] + pub const fn splat(v: i16) -> Self { + Self { x: v, y: v } + } + + /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use + /// for each element of `self`. + /// + /// A true element in the mask uses the corresponding element from `if_true`, and false + /// uses the element from `if_false`. + #[inline] + pub fn select(mask: BVec2, if_true: Self, if_false: Self) -> Self { + Self { + x: if mask.x { if_true.x } else { if_false.x }, + y: if mask.y { if_true.y } else { if_false.y }, + } + } + + /// Creates a new vector from an array. + #[inline] + pub const fn from_array(a: [i16; 2]) -> Self { + Self::new(a[0], a[1]) + } + + /// `[x, y]` + #[inline] + pub const fn to_array(&self) -> [i16; 2] { + [self.x, self.y] + } + + /// Creates a vector from the first 2 values in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 2 elements long. + #[inline] + pub const fn from_slice(slice: &[i16]) -> Self { + Self::new(slice[0], slice[1]) + } + + /// Writes the elements of `self` to the first 2 elements in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 2 elements long. + #[inline] + pub fn write_to_slice(self, slice: &mut [i16]) { + slice[0] = self.x; + slice[1] = self.y; + } + + /// Creates a 3D vector from `self` and the given `z` value. + #[inline] + pub const fn extend(self, z: i16) -> I16Vec3 { + I16Vec3::new(self.x, self.y, z) + } + + /// Computes the dot product of `self` and `rhs`. + #[inline] + pub fn dot(self, rhs: Self) -> i16 { + (self.x * rhs.x) + (self.y * rhs.y) + } + + /// Returns a vector where every component is the dot product of `self` and `rhs`. + #[inline] + pub fn dot_into_vec(self, rhs: Self) -> Self { + Self::splat(self.dot(rhs)) + } + + /// Returns a vector containing the minimum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`. + #[inline] + pub fn min(self, rhs: Self) -> Self { + Self { + x: self.x.min(rhs.x), + y: self.y.min(rhs.y), + } + } + + /// Returns a vector containing the maximum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`. + #[inline] + pub fn max(self, rhs: Self) -> Self { + Self { + x: self.x.max(rhs.x), + y: self.y.max(rhs.y), + } + } + + /// Component-wise clamping of values, similar to [`i16::clamp`]. + /// + /// Each element in `min` must be less-or-equal to the corresponding element in `max`. + /// + /// # Panics + /// + /// Will panic if `min` is greater than `max` when `glam_assert` is enabled. + #[inline] + pub fn clamp(self, min: Self, max: Self) -> Self { + glam_assert!(min.cmple(max).all(), "clamp: expected min <= max"); + self.max(min).min(max) + } + + /// Returns the horizontal minimum of `self`. + /// + /// In other words this computes `min(x, y, ..)`. + #[inline] + pub fn min_element(self) -> i16 { + self.x.min(self.y) + } + + /// Returns the horizontal maximum of `self`. + /// + /// In other words this computes `max(x, y, ..)`. + #[inline] + pub fn max_element(self) -> i16 { + self.x.max(self.y) + } + + /// Returns a vector mask containing the result of a `==` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpeq(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.eq(&rhs.x), self.y.eq(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `!=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpne(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.ne(&rhs.x), self.y.ne(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `>=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpge(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.ge(&rhs.x), self.y.ge(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `>` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpgt(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.gt(&rhs.x), self.y.gt(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `<=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmple(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.le(&rhs.x), self.y.le(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `<` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmplt(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.lt(&rhs.x), self.y.lt(&rhs.y)) + } + + /// Returns a vector containing the absolute value of each element of `self`. + #[inline] + pub fn abs(self) -> Self { + Self { + x: self.x.abs(), + y: self.y.abs(), + } + } + + /// Returns a vector with elements representing the sign of `self`. + /// + /// - `0` if the number is zero + /// - `1` if the number is positive + /// - `-1` if the number is negative + #[inline] + pub fn signum(self) -> Self { + Self { + x: self.x.signum(), + y: self.y.signum(), + } + } + + /// Returns a bitmask with the lowest 2 bits set to the sign bits from the elements of `self`. + /// + /// A negative element results in a `1` bit and a positive element in a `0` bit. Element `x` goes + /// into the first lowest bit, element `y` into the second, etc. + #[inline] + pub fn is_negative_bitmask(self) -> u32 { + (self.x.is_negative() as u32) | (self.y.is_negative() as u32) << 1 + } + + /// Computes the squared length of `self`. + #[doc(alias = "magnitude2")] + #[inline] + pub fn length_squared(self) -> i16 { + self.dot(self) + } + + /// Compute the squared euclidean distance between two points in space. + #[inline] + pub fn distance_squared(self, rhs: Self) -> i16 { + (self - rhs).length_squared() + } + + /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`. + /// + /// # Panics + /// This function will panic if any `rhs` element is 0 or the division results in overflow. + #[inline] + pub fn div_euclid(self, rhs: Self) -> Self { + Self::new(self.x.div_euclid(rhs.x), self.y.div_euclid(rhs.y)) + } + + /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`. + /// + /// # Panics + /// This function will panic if any `rhs` element is 0 or the division results in overflow. + /// + /// [Euclidean division]: i16::rem_euclid + #[inline] + pub fn rem_euclid(self, rhs: Self) -> Self { + Self::new(self.x.rem_euclid(rhs.x), self.y.rem_euclid(rhs.y)) + } + + /// Returns a vector that is equal to `self` rotated by 90 degrees. + #[inline] + pub fn perp(self) -> Self { + Self { + x: -self.y, + y: self.x, + } + } + + /// The perpendicular dot product of `self` and `rhs`. + /// Also known as the wedge product, 2D cross product, and determinant. + #[doc(alias = "wedge")] + #[doc(alias = "cross")] + #[doc(alias = "determinant")] + #[inline] + pub fn perp_dot(self, rhs: Self) -> i16 { + (self.x * rhs.y) - (self.y * rhs.x) + } + + /// Returns `rhs` rotated by the angle of `self`. If `self` is normalized, + /// then this just rotation. This is what you usually want. Otherwise, + /// it will be like a rotation with a multiplication by `self`'s length. + #[must_use] + #[inline] + pub fn rotate(self, rhs: Self) -> Self { + Self { + x: self.x * rhs.x - self.y * rhs.y, + y: self.y * rhs.x + self.x * rhs.y, + } + } + + /// Casts all elements of `self` to `f32`. + #[inline] + pub fn as_vec2(&self) -> crate::Vec2 { + crate::Vec2::new(self.x as f32, self.y as f32) + } + + /// Casts all elements of `self` to `f64`. + #[inline] + pub fn as_dvec2(&self) -> crate::DVec2 { + crate::DVec2::new(self.x as f64, self.y as f64) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec2(&self) -> crate::U16Vec2 { + crate::U16Vec2::new(self.x as u16, self.y as u16) + } + + /// Casts all elements of `self` to `i32`. + #[inline] + pub fn as_ivec2(&self) -> crate::IVec2 { + crate::IVec2::new(self.x as i32, self.y as i32) + } + + /// Casts all elements of `self` to `u32`. + #[inline] + pub fn as_uvec2(&self) -> crate::UVec2 { + crate::UVec2::new(self.x as u32, self.y as u32) + } + + /// Casts all elements of `self` to `i64`. + #[inline] + pub fn as_i64vec2(&self) -> crate::I64Vec2 { + crate::I64Vec2::new(self.x as i64, self.y as i64) + } + + /// Casts all elements of `self` to `u64`. + #[inline] + pub fn as_u64vec2(&self) -> crate::U64Vec2 { + crate::U64Vec2::new(self.x as u64, self.y as u64) + } + + /// Returns a vector containing the wrapping addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_add(rhs.x), self.y.wrapping_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_add(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_add(rhs.x), + y: self.y.wrapping_add(rhs.y), + } + } + + /// Returns a vector containing the wrapping subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_sub(rhs.x), self.y.wrapping_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_sub(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_sub(rhs.x), + y: self.y.wrapping_sub(rhs.y), + } + } + + /// Returns a vector containing the wrapping multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_mul(rhs.x), self.y.wrapping_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_mul(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_mul(rhs.x), + y: self.y.wrapping_mul(rhs.y), + } + } + + /// Returns a vector containing the wrapping division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_div(rhs.x), self.y.wrapping_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_div(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_div(rhs.x), + y: self.y.wrapping_div(rhs.y), + } + } + + /// Returns a vector containing the saturating addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_add(rhs.x), self.y.saturating_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_add(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_add(rhs.x), + y: self.y.saturating_add(rhs.y), + } + } + + /// Returns a vector containing the saturating subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_sub(rhs.x), self.y.saturating_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_sub(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_sub(rhs.x), + y: self.y.saturating_sub(rhs.y), + } + } + + /// Returns a vector containing the saturating multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_mul(rhs.x), self.y.saturating_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_mul(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_mul(rhs.x), + y: self.y.saturating_mul(rhs.y), + } + } + + /// Returns a vector containing the saturating division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_div(rhs.x), self.y.saturating_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_div(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_div(rhs.x), + y: self.y.saturating_div(rhs.y), + } + } +} + +impl Default for I16Vec2 { + #[inline(always)] + fn default() -> Self { + Self::ZERO + } +} + +impl Div for I16Vec2 { + type Output = Self; + #[inline] + fn div(self, rhs: Self) -> Self { + Self { + x: self.x.div(rhs.x), + y: self.y.div(rhs.y), + } + } +} + +impl DivAssign for I16Vec2 { + #[inline] + fn div_assign(&mut self, rhs: Self) { + self.x.div_assign(rhs.x); + self.y.div_assign(rhs.y); + } +} + +impl Div for I16Vec2 { + type Output = Self; + #[inline] + fn div(self, rhs: i16) -> Self { + Self { + x: self.x.div(rhs), + y: self.y.div(rhs), + } + } +} + +impl DivAssign for I16Vec2 { + #[inline] + fn div_assign(&mut self, rhs: i16) { + self.x.div_assign(rhs); + self.y.div_assign(rhs); + } +} + +impl Div for i16 { + type Output = I16Vec2; + #[inline] + fn div(self, rhs: I16Vec2) -> I16Vec2 { + I16Vec2 { + x: self.div(rhs.x), + y: self.div(rhs.y), + } + } +} + +impl Mul for I16Vec2 { + type Output = Self; + #[inline] + fn mul(self, rhs: Self) -> Self { + Self { + x: self.x.mul(rhs.x), + y: self.y.mul(rhs.y), + } + } +} + +impl MulAssign for I16Vec2 { + #[inline] + fn mul_assign(&mut self, rhs: Self) { + self.x.mul_assign(rhs.x); + self.y.mul_assign(rhs.y); + } +} + +impl Mul for I16Vec2 { + type Output = Self; + #[inline] + fn mul(self, rhs: i16) -> Self { + Self { + x: self.x.mul(rhs), + y: self.y.mul(rhs), + } + } +} + +impl MulAssign for I16Vec2 { + #[inline] + fn mul_assign(&mut self, rhs: i16) { + self.x.mul_assign(rhs); + self.y.mul_assign(rhs); + } +} + +impl Mul for i16 { + type Output = I16Vec2; + #[inline] + fn mul(self, rhs: I16Vec2) -> I16Vec2 { + I16Vec2 { + x: self.mul(rhs.x), + y: self.mul(rhs.y), + } + } +} + +impl Add for I16Vec2 { + type Output = Self; + #[inline] + fn add(self, rhs: Self) -> Self { + Self { + x: self.x.add(rhs.x), + y: self.y.add(rhs.y), + } + } +} + +impl AddAssign for I16Vec2 { + #[inline] + fn add_assign(&mut self, rhs: Self) { + self.x.add_assign(rhs.x); + self.y.add_assign(rhs.y); + } +} + +impl Add for I16Vec2 { + type Output = Self; + #[inline] + fn add(self, rhs: i16) -> Self { + Self { + x: self.x.add(rhs), + y: self.y.add(rhs), + } + } +} + +impl AddAssign for I16Vec2 { + #[inline] + fn add_assign(&mut self, rhs: i16) { + self.x.add_assign(rhs); + self.y.add_assign(rhs); + } +} + +impl Add for i16 { + type Output = I16Vec2; + #[inline] + fn add(self, rhs: I16Vec2) -> I16Vec2 { + I16Vec2 { + x: self.add(rhs.x), + y: self.add(rhs.y), + } + } +} + +impl Sub for I16Vec2 { + type Output = Self; + #[inline] + fn sub(self, rhs: Self) -> Self { + Self { + x: self.x.sub(rhs.x), + y: self.y.sub(rhs.y), + } + } +} + +impl SubAssign for I16Vec2 { + #[inline] + fn sub_assign(&mut self, rhs: I16Vec2) { + self.x.sub_assign(rhs.x); + self.y.sub_assign(rhs.y); + } +} + +impl Sub for I16Vec2 { + type Output = Self; + #[inline] + fn sub(self, rhs: i16) -> Self { + Self { + x: self.x.sub(rhs), + y: self.y.sub(rhs), + } + } +} + +impl SubAssign for I16Vec2 { + #[inline] + fn sub_assign(&mut self, rhs: i16) { + self.x.sub_assign(rhs); + self.y.sub_assign(rhs); + } +} + +impl Sub for i16 { + type Output = I16Vec2; + #[inline] + fn sub(self, rhs: I16Vec2) -> I16Vec2 { + I16Vec2 { + x: self.sub(rhs.x), + y: self.sub(rhs.y), + } + } +} + +impl Rem for I16Vec2 { + type Output = Self; + #[inline] + fn rem(self, rhs: Self) -> Self { + Self { + x: self.x.rem(rhs.x), + y: self.y.rem(rhs.y), + } + } +} + +impl RemAssign for I16Vec2 { + #[inline] + fn rem_assign(&mut self, rhs: Self) { + self.x.rem_assign(rhs.x); + self.y.rem_assign(rhs.y); + } +} + +impl Rem for I16Vec2 { + type Output = Self; + #[inline] + fn rem(self, rhs: i16) -> Self { + Self { + x: self.x.rem(rhs), + y: self.y.rem(rhs), + } + } +} + +impl RemAssign for I16Vec2 { + #[inline] + fn rem_assign(&mut self, rhs: i16) { + self.x.rem_assign(rhs); + self.y.rem_assign(rhs); + } +} + +impl Rem for i16 { + type Output = I16Vec2; + #[inline] + fn rem(self, rhs: I16Vec2) -> I16Vec2 { + I16Vec2 { + x: self.rem(rhs.x), + y: self.rem(rhs.y), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsRef<[i16; 2]> for I16Vec2 { + #[inline] + fn as_ref(&self) -> &[i16; 2] { + unsafe { &*(self as *const I16Vec2 as *const [i16; 2]) } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsMut<[i16; 2]> for I16Vec2 { + #[inline] + fn as_mut(&mut self) -> &mut [i16; 2] { + unsafe { &mut *(self as *mut I16Vec2 as *mut [i16; 2]) } + } +} + +impl Sum for I16Vec2 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, Self::add) + } +} + +impl<'a> Sum<&'a Self> for I16Vec2 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, |a, &b| Self::add(a, b)) + } +} + +impl Product for I16Vec2 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, Self::mul) + } +} + +impl<'a> Product<&'a Self> for I16Vec2 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, |a, &b| Self::mul(a, b)) + } +} + +impl Neg for I16Vec2 { + type Output = Self; + #[inline] + fn neg(self) -> Self { + Self { + x: self.x.neg(), + y: self.y.neg(), + } + } +} + +impl Not for I16Vec2 { + type Output = Self; + #[inline] + fn not(self) -> Self::Output { + Self { + x: self.x.not(), + y: self.y.not(), + } + } +} + +impl BitAnd for I16Vec2 { + type Output = Self; + #[inline] + fn bitand(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitand(rhs.x), + y: self.y.bitand(rhs.y), + } + } +} + +impl BitOr for I16Vec2 { + type Output = Self; + #[inline] + fn bitor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitor(rhs.x), + y: self.y.bitor(rhs.y), + } + } +} + +impl BitXor for I16Vec2 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitxor(rhs.x), + y: self.y.bitxor(rhs.y), + } + } +} + +impl BitAnd for I16Vec2 { + type Output = Self; + #[inline] + fn bitand(self, rhs: i16) -> Self::Output { + Self { + x: self.x.bitand(rhs), + y: self.y.bitand(rhs), + } + } +} + +impl BitOr for I16Vec2 { + type Output = Self; + #[inline] + fn bitor(self, rhs: i16) -> Self::Output { + Self { + x: self.x.bitor(rhs), + y: self.y.bitor(rhs), + } + } +} + +impl BitXor for I16Vec2 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: i16) -> Self::Output { + Self { + x: self.x.bitxor(rhs), + y: self.y.bitxor(rhs), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::IVec2) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::IVec2) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + } + } +} + +impl Shl for I16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::UVec2) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + } + } +} + +impl Shr for I16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::UVec2) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + } + } +} + +impl Index for I16Vec2 { + type Output = i16; + #[inline] + fn index(&self, index: usize) -> &Self::Output { + match index { + 0 => &self.x, + 1 => &self.y, + _ => panic!("index out of bounds"), + } + } +} + +impl IndexMut for I16Vec2 { + #[inline] + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + match index { + 0 => &mut self.x, + 1 => &mut self.y, + _ => panic!("index out of bounds"), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Display for I16Vec2 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[{}, {}]", self.x, self.y) + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Debug for I16Vec2 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_tuple(stringify!(I16Vec2)) + .field(&self.x) + .field(&self.y) + .finish() + } +} + +impl From<[i16; 2]> for I16Vec2 { + #[inline] + fn from(a: [i16; 2]) -> Self { + Self::new(a[0], a[1]) + } +} + +impl From for [i16; 2] { + #[inline] + fn from(v: I16Vec2) -> Self { + [v.x, v.y] + } +} + +impl From<(i16, i16)> for I16Vec2 { + #[inline] + fn from(t: (i16, i16)) -> Self { + Self::new(t.0, t.1) + } +} + +impl From for (i16, i16) { + #[inline] + fn from(v: I16Vec2) -> Self { + (v.x, v.y) + } +} + +impl TryFrom for I16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: U16Vec2) -> Result { + Ok(Self::new(i16::try_from(v.x)?, i16::try_from(v.y)?)) + } +} + +impl TryFrom for I16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: IVec2) -> Result { + Ok(Self::new(i16::try_from(v.x)?, i16::try_from(v.y)?)) + } +} + +impl TryFrom for I16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: UVec2) -> Result { + Ok(Self::new(i16::try_from(v.x)?, i16::try_from(v.y)?)) + } +} + +impl TryFrom for I16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I64Vec2) -> Result { + Ok(Self::new(i16::try_from(v.x)?, i16::try_from(v.y)?)) + } +} + +impl TryFrom for I16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: U64Vec2) -> Result { + Ok(Self::new(i16::try_from(v.x)?, i16::try_from(v.y)?)) + } +} diff --git a/src/i16/i16vec3.rs b/src/i16/i16vec3.rs new file mode 100644 index 00000000..978bfca2 --- /dev/null +++ b/src/i16/i16vec3.rs @@ -0,0 +1,1324 @@ +// Generated from vec.rs.tera template. Edit the template, not the generated file. + +use crate::{BVec3, I16Vec2, I16Vec4, I64Vec3, IVec3, U16Vec3, U64Vec3, UVec3}; + +#[cfg(not(target_arch = "spirv"))] +use core::fmt; +use core::iter::{Product, Sum}; +use core::{f32, ops::*}; + +/// Creates a 3-dimensional vector. +#[inline(always)] +pub const fn i16vec3(x: i16, y: i16, z: i16) -> I16Vec3 { + I16Vec3::new(x, y, z) +} + +/// A 3-dimensional vector. +#[cfg_attr(not(target_arch = "spirv"), derive(Hash))] +#[derive(Clone, Copy, PartialEq, Eq)] +#[cfg_attr(not(target_arch = "spirv"), repr(C))] +#[cfg_attr(target_arch = "spirv", repr(simd))] +pub struct I16Vec3 { + pub x: i16, + pub y: i16, + pub z: i16, +} + +impl I16Vec3 { + /// All zeroes. + pub const ZERO: Self = Self::splat(0); + + /// All ones. + pub const ONE: Self = Self::splat(1); + + /// All negative ones. + pub const NEG_ONE: Self = Self::splat(-1); + + /// All `i16::MIN`. + pub const MIN: Self = Self::splat(i16::MIN); + + /// All `i16::MAX`. + pub const MAX: Self = Self::splat(i16::MAX); + + /// A unit vector pointing along the positive X axis. + pub const X: Self = Self::new(1, 0, 0); + + /// A unit vector pointing along the positive Y axis. + pub const Y: Self = Self::new(0, 1, 0); + + /// A unit vector pointing along the positive Z axis. + pub const Z: Self = Self::new(0, 0, 1); + + /// A unit vector pointing along the negative X axis. + pub const NEG_X: Self = Self::new(-1, 0, 0); + + /// A unit vector pointing along the negative Y axis. + pub const NEG_Y: Self = Self::new(0, -1, 0); + + /// A unit vector pointing along the negative Z axis. + pub const NEG_Z: Self = Self::new(0, 0, -1); + + /// The unit axes. + pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z]; + + /// Creates a new vector. + #[inline(always)] + pub const fn new(x: i16, y: i16, z: i16) -> Self { + Self { x, y, z } + } + + /// Creates a vector with all elements set to `v`. + #[inline] + pub const fn splat(v: i16) -> Self { + Self { x: v, y: v, z: v } + } + + /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use + /// for each element of `self`. + /// + /// A true element in the mask uses the corresponding element from `if_true`, and false + /// uses the element from `if_false`. + #[inline] + pub fn select(mask: BVec3, if_true: Self, if_false: Self) -> Self { + Self { + x: if mask.x { if_true.x } else { if_false.x }, + y: if mask.y { if_true.y } else { if_false.y }, + z: if mask.z { if_true.z } else { if_false.z }, + } + } + + /// Creates a new vector from an array. + #[inline] + pub const fn from_array(a: [i16; 3]) -> Self { + Self::new(a[0], a[1], a[2]) + } + + /// `[x, y, z]` + #[inline] + pub const fn to_array(&self) -> [i16; 3] { + [self.x, self.y, self.z] + } + + /// Creates a vector from the first 3 values in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 3 elements long. + #[inline] + pub const fn from_slice(slice: &[i16]) -> Self { + Self::new(slice[0], slice[1], slice[2]) + } + + /// Writes the elements of `self` to the first 3 elements in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 3 elements long. + #[inline] + pub fn write_to_slice(self, slice: &mut [i16]) { + slice[0] = self.x; + slice[1] = self.y; + slice[2] = self.z; + } + + /// Internal method for creating a 3D vector from a 4D vector, discarding `w`. + #[allow(dead_code)] + #[inline] + pub(crate) fn from_vec4(v: I16Vec4) -> Self { + Self { + x: v.x, + y: v.y, + z: v.z, + } + } + + /// Creates a 4D vector from `self` and the given `w` value. + #[inline] + pub fn extend(self, w: i16) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.z, w) + } + + /// Creates a 2D vector from the `x` and `y` elements of `self`, discarding `z`. + /// + /// Truncation may also be performed by using [`self.xy()`][crate::swizzles::Vec3Swizzles::xy()]. + #[inline] + pub fn truncate(self) -> I16Vec2 { + use crate::swizzles::Vec3Swizzles; + self.xy() + } + + /// Computes the dot product of `self` and `rhs`. + #[inline] + pub fn dot(self, rhs: Self) -> i16 { + (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z) + } + + /// Returns a vector where every component is the dot product of `self` and `rhs`. + #[inline] + pub fn dot_into_vec(self, rhs: Self) -> Self { + Self::splat(self.dot(rhs)) + } + + /// Computes the cross product of `self` and `rhs`. + #[inline] + pub fn cross(self, rhs: Self) -> Self { + Self { + x: self.y * rhs.z - rhs.y * self.z, + y: self.z * rhs.x - rhs.z * self.x, + z: self.x * rhs.y - rhs.x * self.y, + } + } + + /// Returns a vector containing the minimum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`. + #[inline] + pub fn min(self, rhs: Self) -> Self { + Self { + x: self.x.min(rhs.x), + y: self.y.min(rhs.y), + z: self.z.min(rhs.z), + } + } + + /// Returns a vector containing the maximum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`. + #[inline] + pub fn max(self, rhs: Self) -> Self { + Self { + x: self.x.max(rhs.x), + y: self.y.max(rhs.y), + z: self.z.max(rhs.z), + } + } + + /// Component-wise clamping of values, similar to [`i16::clamp`]. + /// + /// Each element in `min` must be less-or-equal to the corresponding element in `max`. + /// + /// # Panics + /// + /// Will panic if `min` is greater than `max` when `glam_assert` is enabled. + #[inline] + pub fn clamp(self, min: Self, max: Self) -> Self { + glam_assert!(min.cmple(max).all(), "clamp: expected min <= max"); + self.max(min).min(max) + } + + /// Returns the horizontal minimum of `self`. + /// + /// In other words this computes `min(x, y, ..)`. + #[inline] + pub fn min_element(self) -> i16 { + self.x.min(self.y.min(self.z)) + } + + /// Returns the horizontal maximum of `self`. + /// + /// In other words this computes `max(x, y, ..)`. + #[inline] + pub fn max_element(self) -> i16 { + self.x.max(self.y.max(self.z)) + } + + /// Returns a vector mask containing the result of a `==` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpeq(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.eq(&rhs.x), self.y.eq(&rhs.y), self.z.eq(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `!=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpne(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.ne(&rhs.x), self.y.ne(&rhs.y), self.z.ne(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `>=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpge(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.ge(&rhs.x), self.y.ge(&rhs.y), self.z.ge(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `>` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpgt(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.gt(&rhs.x), self.y.gt(&rhs.y), self.z.gt(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `<=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmple(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.le(&rhs.x), self.y.le(&rhs.y), self.z.le(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `<` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmplt(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.lt(&rhs.x), self.y.lt(&rhs.y), self.z.lt(&rhs.z)) + } + + /// Returns a vector containing the absolute value of each element of `self`. + #[inline] + pub fn abs(self) -> Self { + Self { + x: self.x.abs(), + y: self.y.abs(), + z: self.z.abs(), + } + } + + /// Returns a vector with elements representing the sign of `self`. + /// + /// - `0` if the number is zero + /// - `1` if the number is positive + /// - `-1` if the number is negative + #[inline] + pub fn signum(self) -> Self { + Self { + x: self.x.signum(), + y: self.y.signum(), + z: self.z.signum(), + } + } + + /// Returns a bitmask with the lowest 3 bits set to the sign bits from the elements of `self`. + /// + /// A negative element results in a `1` bit and a positive element in a `0` bit. Element `x` goes + /// into the first lowest bit, element `y` into the second, etc. + #[inline] + pub fn is_negative_bitmask(self) -> u32 { + (self.x.is_negative() as u32) + | (self.y.is_negative() as u32) << 1 + | (self.z.is_negative() as u32) << 2 + } + + /// Computes the squared length of `self`. + #[doc(alias = "magnitude2")] + #[inline] + pub fn length_squared(self) -> i16 { + self.dot(self) + } + + /// Compute the squared euclidean distance between two points in space. + #[inline] + pub fn distance_squared(self, rhs: Self) -> i16 { + (self - rhs).length_squared() + } + + /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`. + /// + /// # Panics + /// This function will panic if any `rhs` element is 0 or the division results in overflow. + #[inline] + pub fn div_euclid(self, rhs: Self) -> Self { + Self::new( + self.x.div_euclid(rhs.x), + self.y.div_euclid(rhs.y), + self.z.div_euclid(rhs.z), + ) + } + + /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`. + /// + /// # Panics + /// This function will panic if any `rhs` element is 0 or the division results in overflow. + /// + /// [Euclidean division]: i16::rem_euclid + #[inline] + pub fn rem_euclid(self, rhs: Self) -> Self { + Self::new( + self.x.rem_euclid(rhs.x), + self.y.rem_euclid(rhs.y), + self.z.rem_euclid(rhs.z), + ) + } + + /// Casts all elements of `self` to `f32`. + #[inline] + pub fn as_vec3(&self) -> crate::Vec3 { + crate::Vec3::new(self.x as f32, self.y as f32, self.z as f32) + } + + /// Casts all elements of `self` to `f32`. + #[inline] + pub fn as_vec3a(&self) -> crate::Vec3A { + crate::Vec3A::new(self.x as f32, self.y as f32, self.z as f32) + } + + /// Casts all elements of `self` to `f64`. + #[inline] + pub fn as_dvec3(&self) -> crate::DVec3 { + crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + + /// Casts all elements of `self` to `i32`. + #[inline] + pub fn as_ivec3(&self) -> crate::IVec3 { + crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32) + } + + /// Casts all elements of `self` to `u32`. + #[inline] + pub fn as_uvec3(&self) -> crate::UVec3 { + crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32) + } + + /// Casts all elements of `self` to `i64`. + #[inline] + pub fn as_i64vec3(&self) -> crate::I64Vec3 { + crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64) + } + + /// Casts all elements of `self` to `u64`. + #[inline] + pub fn as_u64vec3(&self) -> crate::U64Vec3 { + crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64) + } + + /// Returns a vector containing the wrapping addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_add(rhs.x), self.y.wrapping_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_add(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_add(rhs.x), + y: self.y.wrapping_add(rhs.y), + z: self.z.wrapping_add(rhs.z), + } + } + + /// Returns a vector containing the wrapping subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_sub(rhs.x), self.y.wrapping_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_sub(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_sub(rhs.x), + y: self.y.wrapping_sub(rhs.y), + z: self.z.wrapping_sub(rhs.z), + } + } + + /// Returns a vector containing the wrapping multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_mul(rhs.x), self.y.wrapping_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_mul(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_mul(rhs.x), + y: self.y.wrapping_mul(rhs.y), + z: self.z.wrapping_mul(rhs.z), + } + } + + /// Returns a vector containing the wrapping division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_div(rhs.x), self.y.wrapping_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_div(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_div(rhs.x), + y: self.y.wrapping_div(rhs.y), + z: self.z.wrapping_div(rhs.z), + } + } + + /// Returns a vector containing the saturating addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_add(rhs.x), self.y.saturating_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_add(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_add(rhs.x), + y: self.y.saturating_add(rhs.y), + z: self.z.saturating_add(rhs.z), + } + } + + /// Returns a vector containing the saturating subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_sub(rhs.x), self.y.saturating_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_sub(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_sub(rhs.x), + y: self.y.saturating_sub(rhs.y), + z: self.z.saturating_sub(rhs.z), + } + } + + /// Returns a vector containing the saturating multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_mul(rhs.x), self.y.saturating_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_mul(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_mul(rhs.x), + y: self.y.saturating_mul(rhs.y), + z: self.z.saturating_mul(rhs.z), + } + } + + /// Returns a vector containing the saturating division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_div(rhs.x), self.y.saturating_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_div(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_div(rhs.x), + y: self.y.saturating_div(rhs.y), + z: self.z.saturating_div(rhs.z), + } + } +} + +impl Default for I16Vec3 { + #[inline(always)] + fn default() -> Self { + Self::ZERO + } +} + +impl Div for I16Vec3 { + type Output = Self; + #[inline] + fn div(self, rhs: Self) -> Self { + Self { + x: self.x.div(rhs.x), + y: self.y.div(rhs.y), + z: self.z.div(rhs.z), + } + } +} + +impl DivAssign for I16Vec3 { + #[inline] + fn div_assign(&mut self, rhs: Self) { + self.x.div_assign(rhs.x); + self.y.div_assign(rhs.y); + self.z.div_assign(rhs.z); + } +} + +impl Div for I16Vec3 { + type Output = Self; + #[inline] + fn div(self, rhs: i16) -> Self { + Self { + x: self.x.div(rhs), + y: self.y.div(rhs), + z: self.z.div(rhs), + } + } +} + +impl DivAssign for I16Vec3 { + #[inline] + fn div_assign(&mut self, rhs: i16) { + self.x.div_assign(rhs); + self.y.div_assign(rhs); + self.z.div_assign(rhs); + } +} + +impl Div for i16 { + type Output = I16Vec3; + #[inline] + fn div(self, rhs: I16Vec3) -> I16Vec3 { + I16Vec3 { + x: self.div(rhs.x), + y: self.div(rhs.y), + z: self.div(rhs.z), + } + } +} + +impl Mul for I16Vec3 { + type Output = Self; + #[inline] + fn mul(self, rhs: Self) -> Self { + Self { + x: self.x.mul(rhs.x), + y: self.y.mul(rhs.y), + z: self.z.mul(rhs.z), + } + } +} + +impl MulAssign for I16Vec3 { + #[inline] + fn mul_assign(&mut self, rhs: Self) { + self.x.mul_assign(rhs.x); + self.y.mul_assign(rhs.y); + self.z.mul_assign(rhs.z); + } +} + +impl Mul for I16Vec3 { + type Output = Self; + #[inline] + fn mul(self, rhs: i16) -> Self { + Self { + x: self.x.mul(rhs), + y: self.y.mul(rhs), + z: self.z.mul(rhs), + } + } +} + +impl MulAssign for I16Vec3 { + #[inline] + fn mul_assign(&mut self, rhs: i16) { + self.x.mul_assign(rhs); + self.y.mul_assign(rhs); + self.z.mul_assign(rhs); + } +} + +impl Mul for i16 { + type Output = I16Vec3; + #[inline] + fn mul(self, rhs: I16Vec3) -> I16Vec3 { + I16Vec3 { + x: self.mul(rhs.x), + y: self.mul(rhs.y), + z: self.mul(rhs.z), + } + } +} + +impl Add for I16Vec3 { + type Output = Self; + #[inline] + fn add(self, rhs: Self) -> Self { + Self { + x: self.x.add(rhs.x), + y: self.y.add(rhs.y), + z: self.z.add(rhs.z), + } + } +} + +impl AddAssign for I16Vec3 { + #[inline] + fn add_assign(&mut self, rhs: Self) { + self.x.add_assign(rhs.x); + self.y.add_assign(rhs.y); + self.z.add_assign(rhs.z); + } +} + +impl Add for I16Vec3 { + type Output = Self; + #[inline] + fn add(self, rhs: i16) -> Self { + Self { + x: self.x.add(rhs), + y: self.y.add(rhs), + z: self.z.add(rhs), + } + } +} + +impl AddAssign for I16Vec3 { + #[inline] + fn add_assign(&mut self, rhs: i16) { + self.x.add_assign(rhs); + self.y.add_assign(rhs); + self.z.add_assign(rhs); + } +} + +impl Add for i16 { + type Output = I16Vec3; + #[inline] + fn add(self, rhs: I16Vec3) -> I16Vec3 { + I16Vec3 { + x: self.add(rhs.x), + y: self.add(rhs.y), + z: self.add(rhs.z), + } + } +} + +impl Sub for I16Vec3 { + type Output = Self; + #[inline] + fn sub(self, rhs: Self) -> Self { + Self { + x: self.x.sub(rhs.x), + y: self.y.sub(rhs.y), + z: self.z.sub(rhs.z), + } + } +} + +impl SubAssign for I16Vec3 { + #[inline] + fn sub_assign(&mut self, rhs: I16Vec3) { + self.x.sub_assign(rhs.x); + self.y.sub_assign(rhs.y); + self.z.sub_assign(rhs.z); + } +} + +impl Sub for I16Vec3 { + type Output = Self; + #[inline] + fn sub(self, rhs: i16) -> Self { + Self { + x: self.x.sub(rhs), + y: self.y.sub(rhs), + z: self.z.sub(rhs), + } + } +} + +impl SubAssign for I16Vec3 { + #[inline] + fn sub_assign(&mut self, rhs: i16) { + self.x.sub_assign(rhs); + self.y.sub_assign(rhs); + self.z.sub_assign(rhs); + } +} + +impl Sub for i16 { + type Output = I16Vec3; + #[inline] + fn sub(self, rhs: I16Vec3) -> I16Vec3 { + I16Vec3 { + x: self.sub(rhs.x), + y: self.sub(rhs.y), + z: self.sub(rhs.z), + } + } +} + +impl Rem for I16Vec3 { + type Output = Self; + #[inline] + fn rem(self, rhs: Self) -> Self { + Self { + x: self.x.rem(rhs.x), + y: self.y.rem(rhs.y), + z: self.z.rem(rhs.z), + } + } +} + +impl RemAssign for I16Vec3 { + #[inline] + fn rem_assign(&mut self, rhs: Self) { + self.x.rem_assign(rhs.x); + self.y.rem_assign(rhs.y); + self.z.rem_assign(rhs.z); + } +} + +impl Rem for I16Vec3 { + type Output = Self; + #[inline] + fn rem(self, rhs: i16) -> Self { + Self { + x: self.x.rem(rhs), + y: self.y.rem(rhs), + z: self.z.rem(rhs), + } + } +} + +impl RemAssign for I16Vec3 { + #[inline] + fn rem_assign(&mut self, rhs: i16) { + self.x.rem_assign(rhs); + self.y.rem_assign(rhs); + self.z.rem_assign(rhs); + } +} + +impl Rem for i16 { + type Output = I16Vec3; + #[inline] + fn rem(self, rhs: I16Vec3) -> I16Vec3 { + I16Vec3 { + x: self.rem(rhs.x), + y: self.rem(rhs.y), + z: self.rem(rhs.z), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsRef<[i16; 3]> for I16Vec3 { + #[inline] + fn as_ref(&self) -> &[i16; 3] { + unsafe { &*(self as *const I16Vec3 as *const [i16; 3]) } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsMut<[i16; 3]> for I16Vec3 { + #[inline] + fn as_mut(&mut self) -> &mut [i16; 3] { + unsafe { &mut *(self as *mut I16Vec3 as *mut [i16; 3]) } + } +} + +impl Sum for I16Vec3 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, Self::add) + } +} + +impl<'a> Sum<&'a Self> for I16Vec3 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, |a, &b| Self::add(a, b)) + } +} + +impl Product for I16Vec3 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, Self::mul) + } +} + +impl<'a> Product<&'a Self> for I16Vec3 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, |a, &b| Self::mul(a, b)) + } +} + +impl Neg for I16Vec3 { + type Output = Self; + #[inline] + fn neg(self) -> Self { + Self { + x: self.x.neg(), + y: self.y.neg(), + z: self.z.neg(), + } + } +} + +impl Not for I16Vec3 { + type Output = Self; + #[inline] + fn not(self) -> Self::Output { + Self { + x: self.x.not(), + y: self.y.not(), + z: self.z.not(), + } + } +} + +impl BitAnd for I16Vec3 { + type Output = Self; + #[inline] + fn bitand(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitand(rhs.x), + y: self.y.bitand(rhs.y), + z: self.z.bitand(rhs.z), + } + } +} + +impl BitOr for I16Vec3 { + type Output = Self; + #[inline] + fn bitor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitor(rhs.x), + y: self.y.bitor(rhs.y), + z: self.z.bitor(rhs.z), + } + } +} + +impl BitXor for I16Vec3 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitxor(rhs.x), + y: self.y.bitxor(rhs.y), + z: self.z.bitxor(rhs.z), + } + } +} + +impl BitAnd for I16Vec3 { + type Output = Self; + #[inline] + fn bitand(self, rhs: i16) -> Self::Output { + Self { + x: self.x.bitand(rhs), + y: self.y.bitand(rhs), + z: self.z.bitand(rhs), + } + } +} + +impl BitOr for I16Vec3 { + type Output = Self; + #[inline] + fn bitor(self, rhs: i16) -> Self::Output { + Self { + x: self.x.bitor(rhs), + y: self.y.bitor(rhs), + z: self.z.bitor(rhs), + } + } +} + +impl BitXor for I16Vec3 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: i16) -> Self::Output { + Self { + x: self.x.bitxor(rhs), + y: self.y.bitxor(rhs), + z: self.z.bitxor(rhs), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::IVec3) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + z: self.z.shl(rhs.z), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::IVec3) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + z: self.z.shr(rhs.z), + } + } +} + +impl Shl for I16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::UVec3) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + z: self.z.shl(rhs.z), + } + } +} + +impl Shr for I16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::UVec3) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + z: self.z.shr(rhs.z), + } + } +} + +impl Index for I16Vec3 { + type Output = i16; + #[inline] + fn index(&self, index: usize) -> &Self::Output { + match index { + 0 => &self.x, + 1 => &self.y, + 2 => &self.z, + _ => panic!("index out of bounds"), + } + } +} + +impl IndexMut for I16Vec3 { + #[inline] + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + match index { + 0 => &mut self.x, + 1 => &mut self.y, + 2 => &mut self.z, + _ => panic!("index out of bounds"), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Display for I16Vec3 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[{}, {}, {}]", self.x, self.y, self.z) + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Debug for I16Vec3 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_tuple(stringify!(I16Vec3)) + .field(&self.x) + .field(&self.y) + .field(&self.z) + .finish() + } +} + +impl From<[i16; 3]> for I16Vec3 { + #[inline] + fn from(a: [i16; 3]) -> Self { + Self::new(a[0], a[1], a[2]) + } +} + +impl From for [i16; 3] { + #[inline] + fn from(v: I16Vec3) -> Self { + [v.x, v.y, v.z] + } +} + +impl From<(i16, i16, i16)> for I16Vec3 { + #[inline] + fn from(t: (i16, i16, i16)) -> Self { + Self::new(t.0, t.1, t.2) + } +} + +impl From for (i16, i16, i16) { + #[inline] + fn from(v: I16Vec3) -> Self { + (v.x, v.y, v.z) + } +} + +impl From<(I16Vec2, i16)> for I16Vec3 { + #[inline] + fn from((v, z): (I16Vec2, i16)) -> Self { + Self::new(v.x, v.y, z) + } +} + +impl TryFrom for I16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: U16Vec3) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + )) + } +} + +impl TryFrom for I16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: IVec3) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + )) + } +} + +impl TryFrom for I16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: UVec3) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + )) + } +} + +impl TryFrom for I16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I64Vec3) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + )) + } +} + +impl TryFrom for I16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: U64Vec3) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + )) + } +} diff --git a/src/i16/i16vec4.rs b/src/i16/i16vec4.rs new file mode 100644 index 00000000..e8bd3d86 --- /dev/null +++ b/src/i16/i16vec4.rs @@ -0,0 +1,1436 @@ +// Generated from vec.rs.tera template. Edit the template, not the generated file. + +use crate::{BVec4, I16Vec2, I16Vec3, I64Vec4, IVec4, U16Vec4, U64Vec4, UVec4}; + +#[cfg(not(target_arch = "spirv"))] +use core::fmt; +use core::iter::{Product, Sum}; +use core::{f32, ops::*}; + +/// Creates a 4-dimensional vector. +#[inline(always)] +pub const fn i16vec4(x: i16, y: i16, z: i16, w: i16) -> I16Vec4 { + I16Vec4::new(x, y, z, w) +} + +/// A 4-dimensional vector. +#[cfg_attr(not(target_arch = "spirv"), derive(Hash))] +#[derive(Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "cuda", repr(align(16)))] +#[cfg_attr(not(target_arch = "spirv"), repr(C))] +#[cfg_attr(target_arch = "spirv", repr(simd))] +pub struct I16Vec4 { + pub x: i16, + pub y: i16, + pub z: i16, + pub w: i16, +} + +impl I16Vec4 { + /// All zeroes. + pub const ZERO: Self = Self::splat(0); + + /// All ones. + pub const ONE: Self = Self::splat(1); + + /// All negative ones. + pub const NEG_ONE: Self = Self::splat(-1); + + /// All `i16::MIN`. + pub const MIN: Self = Self::splat(i16::MIN); + + /// All `i16::MAX`. + pub const MAX: Self = Self::splat(i16::MAX); + + /// A unit vector pointing along the positive X axis. + pub const X: Self = Self::new(1, 0, 0, 0); + + /// A unit vector pointing along the positive Y axis. + pub const Y: Self = Self::new(0, 1, 0, 0); + + /// A unit vector pointing along the positive Z axis. + pub const Z: Self = Self::new(0, 0, 1, 0); + + /// A unit vector pointing along the positive W axis. + pub const W: Self = Self::new(0, 0, 0, 1); + + /// A unit vector pointing along the negative X axis. + pub const NEG_X: Self = Self::new(-1, 0, 0, 0); + + /// A unit vector pointing along the negative Y axis. + pub const NEG_Y: Self = Self::new(0, -1, 0, 0); + + /// A unit vector pointing along the negative Z axis. + pub const NEG_Z: Self = Self::new(0, 0, -1, 0); + + /// A unit vector pointing along the negative W axis. + pub const NEG_W: Self = Self::new(0, 0, 0, -1); + + /// The unit axes. + pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W]; + + /// Creates a new vector. + #[inline(always)] + pub const fn new(x: i16, y: i16, z: i16, w: i16) -> Self { + Self { x, y, z, w } + } + + /// Creates a vector with all elements set to `v`. + #[inline] + pub const fn splat(v: i16) -> Self { + Self { + x: v, + + y: v, + + z: v, + + w: v, + } + } + + /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use + /// for each element of `self`. + /// + /// A true element in the mask uses the corresponding element from `if_true`, and false + /// uses the element from `if_false`. + #[inline] + pub fn select(mask: BVec4, if_true: Self, if_false: Self) -> Self { + Self { + x: if mask.x { if_true.x } else { if_false.x }, + y: if mask.y { if_true.y } else { if_false.y }, + z: if mask.z { if_true.z } else { if_false.z }, + w: if mask.w { if_true.w } else { if_false.w }, + } + } + + /// Creates a new vector from an array. + #[inline] + pub const fn from_array(a: [i16; 4]) -> Self { + Self::new(a[0], a[1], a[2], a[3]) + } + + /// `[x, y, z, w]` + #[inline] + pub const fn to_array(&self) -> [i16; 4] { + [self.x, self.y, self.z, self.w] + } + + /// Creates a vector from the first 4 values in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 4 elements long. + #[inline] + pub const fn from_slice(slice: &[i16]) -> Self { + Self::new(slice[0], slice[1], slice[2], slice[3]) + } + + /// Writes the elements of `self` to the first 4 elements in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 4 elements long. + #[inline] + pub fn write_to_slice(self, slice: &mut [i16]) { + slice[0] = self.x; + slice[1] = self.y; + slice[2] = self.z; + slice[3] = self.w; + } + + /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`. + /// + /// Truncation to [`I16Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()]. + #[inline] + pub fn truncate(self) -> I16Vec3 { + use crate::swizzles::Vec4Swizzles; + self.xyz() + } + + /// Computes the dot product of `self` and `rhs`. + #[inline] + pub fn dot(self, rhs: Self) -> i16 { + (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z) + (self.w * rhs.w) + } + + /// Returns a vector where every component is the dot product of `self` and `rhs`. + #[inline] + pub fn dot_into_vec(self, rhs: Self) -> Self { + Self::splat(self.dot(rhs)) + } + + /// Returns a vector containing the minimum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`. + #[inline] + pub fn min(self, rhs: Self) -> Self { + Self { + x: self.x.min(rhs.x), + y: self.y.min(rhs.y), + z: self.z.min(rhs.z), + w: self.w.min(rhs.w), + } + } + + /// Returns a vector containing the maximum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`. + #[inline] + pub fn max(self, rhs: Self) -> Self { + Self { + x: self.x.max(rhs.x), + y: self.y.max(rhs.y), + z: self.z.max(rhs.z), + w: self.w.max(rhs.w), + } + } + + /// Component-wise clamping of values, similar to [`i16::clamp`]. + /// + /// Each element in `min` must be less-or-equal to the corresponding element in `max`. + /// + /// # Panics + /// + /// Will panic if `min` is greater than `max` when `glam_assert` is enabled. + #[inline] + pub fn clamp(self, min: Self, max: Self) -> Self { + glam_assert!(min.cmple(max).all(), "clamp: expected min <= max"); + self.max(min).min(max) + } + + /// Returns the horizontal minimum of `self`. + /// + /// In other words this computes `min(x, y, ..)`. + #[inline] + pub fn min_element(self) -> i16 { + self.x.min(self.y.min(self.z.min(self.w))) + } + + /// Returns the horizontal maximum of `self`. + /// + /// In other words this computes `max(x, y, ..)`. + #[inline] + pub fn max_element(self) -> i16 { + self.x.max(self.y.max(self.z.max(self.w))) + } + + /// Returns a vector mask containing the result of a `==` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpeq(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.eq(&rhs.x), + self.y.eq(&rhs.y), + self.z.eq(&rhs.z), + self.w.eq(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `!=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpne(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.ne(&rhs.x), + self.y.ne(&rhs.y), + self.z.ne(&rhs.z), + self.w.ne(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `>=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpge(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.ge(&rhs.x), + self.y.ge(&rhs.y), + self.z.ge(&rhs.z), + self.w.ge(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `>` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpgt(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.gt(&rhs.x), + self.y.gt(&rhs.y), + self.z.gt(&rhs.z), + self.w.gt(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `<=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmple(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.le(&rhs.x), + self.y.le(&rhs.y), + self.z.le(&rhs.z), + self.w.le(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `<` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmplt(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.lt(&rhs.x), + self.y.lt(&rhs.y), + self.z.lt(&rhs.z), + self.w.lt(&rhs.w), + ) + } + + /// Returns a vector containing the absolute value of each element of `self`. + #[inline] + pub fn abs(self) -> Self { + Self { + x: self.x.abs(), + y: self.y.abs(), + z: self.z.abs(), + w: self.w.abs(), + } + } + + /// Returns a vector with elements representing the sign of `self`. + /// + /// - `0` if the number is zero + /// - `1` if the number is positive + /// - `-1` if the number is negative + #[inline] + pub fn signum(self) -> Self { + Self { + x: self.x.signum(), + y: self.y.signum(), + z: self.z.signum(), + w: self.w.signum(), + } + } + + /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`. + /// + /// A negative element results in a `1` bit and a positive element in a `0` bit. Element `x` goes + /// into the first lowest bit, element `y` into the second, etc. + #[inline] + pub fn is_negative_bitmask(self) -> u32 { + (self.x.is_negative() as u32) + | (self.y.is_negative() as u32) << 1 + | (self.z.is_negative() as u32) << 2 + | (self.w.is_negative() as u32) << 3 + } + + /// Computes the squared length of `self`. + #[doc(alias = "magnitude2")] + #[inline] + pub fn length_squared(self) -> i16 { + self.dot(self) + } + + /// Compute the squared euclidean distance between two points in space. + #[inline] + pub fn distance_squared(self, rhs: Self) -> i16 { + (self - rhs).length_squared() + } + + /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`. + /// + /// # Panics + /// This function will panic if any `rhs` element is 0 or the division results in overflow. + #[inline] + pub fn div_euclid(self, rhs: Self) -> Self { + Self::new( + self.x.div_euclid(rhs.x), + self.y.div_euclid(rhs.y), + self.z.div_euclid(rhs.z), + self.w.div_euclid(rhs.w), + ) + } + + /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`. + /// + /// # Panics + /// This function will panic if any `rhs` element is 0 or the division results in overflow. + /// + /// [Euclidean division]: i16::rem_euclid + #[inline] + pub fn rem_euclid(self, rhs: Self) -> Self { + Self::new( + self.x.rem_euclid(rhs.x), + self.y.rem_euclid(rhs.y), + self.z.rem_euclid(rhs.z), + self.w.rem_euclid(rhs.w), + ) + } + + /// Casts all elements of `self` to `f32`. + #[inline] + pub fn as_vec4(&self) -> crate::Vec4 { + crate::Vec4::new(self.x as f32, self.y as f32, self.z as f32, self.w as f32) + } + + /// Casts all elements of `self` to `f64`. + #[inline] + pub fn as_dvec4(&self) -> crate::DVec4 { + crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + + /// Casts all elements of `self` to `i32`. + #[inline] + pub fn as_ivec4(&self) -> crate::IVec4 { + crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32) + } + + /// Casts all elements of `self` to `u32`. + #[inline] + pub fn as_uvec4(&self) -> crate::UVec4 { + crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32) + } + + /// Casts all elements of `self` to `i64`. + #[inline] + pub fn as_i64vec4(&self) -> crate::I64Vec4 { + crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64) + } + + /// Casts all elements of `self` to `u64`. + #[inline] + pub fn as_u64vec4(&self) -> crate::U64Vec4 { + crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64) + } + + /// Returns a vector containing the wrapping addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_add(rhs.x), self.y.wrapping_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_add(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_add(rhs.x), + y: self.y.wrapping_add(rhs.y), + z: self.z.wrapping_add(rhs.z), + w: self.w.wrapping_add(rhs.w), + } + } + + /// Returns a vector containing the wrapping subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_sub(rhs.x), self.y.wrapping_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_sub(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_sub(rhs.x), + y: self.y.wrapping_sub(rhs.y), + z: self.z.wrapping_sub(rhs.z), + w: self.w.wrapping_sub(rhs.w), + } + } + + /// Returns a vector containing the wrapping multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_mul(rhs.x), self.y.wrapping_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_mul(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_mul(rhs.x), + y: self.y.wrapping_mul(rhs.y), + z: self.z.wrapping_mul(rhs.z), + w: self.w.wrapping_mul(rhs.w), + } + } + + /// Returns a vector containing the wrapping division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_div(rhs.x), self.y.wrapping_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_div(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_div(rhs.x), + y: self.y.wrapping_div(rhs.y), + z: self.z.wrapping_div(rhs.z), + w: self.w.wrapping_div(rhs.w), + } + } + + /// Returns a vector containing the saturating addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_add(rhs.x), self.y.saturating_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_add(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_add(rhs.x), + y: self.y.saturating_add(rhs.y), + z: self.z.saturating_add(rhs.z), + w: self.w.saturating_add(rhs.w), + } + } + + /// Returns a vector containing the saturating subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_sub(rhs.x), self.y.saturating_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_sub(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_sub(rhs.x), + y: self.y.saturating_sub(rhs.y), + z: self.z.saturating_sub(rhs.z), + w: self.w.saturating_sub(rhs.w), + } + } + + /// Returns a vector containing the saturating multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_mul(rhs.x), self.y.saturating_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_mul(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_mul(rhs.x), + y: self.y.saturating_mul(rhs.y), + z: self.z.saturating_mul(rhs.z), + w: self.w.saturating_mul(rhs.w), + } + } + + /// Returns a vector containing the saturating division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_div(rhs.x), self.y.saturating_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_div(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_div(rhs.x), + y: self.y.saturating_div(rhs.y), + z: self.z.saturating_div(rhs.z), + w: self.w.saturating_div(rhs.w), + } + } +} + +impl Default for I16Vec4 { + #[inline(always)] + fn default() -> Self { + Self::ZERO + } +} + +impl Div for I16Vec4 { + type Output = Self; + #[inline] + fn div(self, rhs: Self) -> Self { + Self { + x: self.x.div(rhs.x), + y: self.y.div(rhs.y), + z: self.z.div(rhs.z), + w: self.w.div(rhs.w), + } + } +} + +impl DivAssign for I16Vec4 { + #[inline] + fn div_assign(&mut self, rhs: Self) { + self.x.div_assign(rhs.x); + self.y.div_assign(rhs.y); + self.z.div_assign(rhs.z); + self.w.div_assign(rhs.w); + } +} + +impl Div for I16Vec4 { + type Output = Self; + #[inline] + fn div(self, rhs: i16) -> Self { + Self { + x: self.x.div(rhs), + y: self.y.div(rhs), + z: self.z.div(rhs), + w: self.w.div(rhs), + } + } +} + +impl DivAssign for I16Vec4 { + #[inline] + fn div_assign(&mut self, rhs: i16) { + self.x.div_assign(rhs); + self.y.div_assign(rhs); + self.z.div_assign(rhs); + self.w.div_assign(rhs); + } +} + +impl Div for i16 { + type Output = I16Vec4; + #[inline] + fn div(self, rhs: I16Vec4) -> I16Vec4 { + I16Vec4 { + x: self.div(rhs.x), + y: self.div(rhs.y), + z: self.div(rhs.z), + w: self.div(rhs.w), + } + } +} + +impl Mul for I16Vec4 { + type Output = Self; + #[inline] + fn mul(self, rhs: Self) -> Self { + Self { + x: self.x.mul(rhs.x), + y: self.y.mul(rhs.y), + z: self.z.mul(rhs.z), + w: self.w.mul(rhs.w), + } + } +} + +impl MulAssign for I16Vec4 { + #[inline] + fn mul_assign(&mut self, rhs: Self) { + self.x.mul_assign(rhs.x); + self.y.mul_assign(rhs.y); + self.z.mul_assign(rhs.z); + self.w.mul_assign(rhs.w); + } +} + +impl Mul for I16Vec4 { + type Output = Self; + #[inline] + fn mul(self, rhs: i16) -> Self { + Self { + x: self.x.mul(rhs), + y: self.y.mul(rhs), + z: self.z.mul(rhs), + w: self.w.mul(rhs), + } + } +} + +impl MulAssign for I16Vec4 { + #[inline] + fn mul_assign(&mut self, rhs: i16) { + self.x.mul_assign(rhs); + self.y.mul_assign(rhs); + self.z.mul_assign(rhs); + self.w.mul_assign(rhs); + } +} + +impl Mul for i16 { + type Output = I16Vec4; + #[inline] + fn mul(self, rhs: I16Vec4) -> I16Vec4 { + I16Vec4 { + x: self.mul(rhs.x), + y: self.mul(rhs.y), + z: self.mul(rhs.z), + w: self.mul(rhs.w), + } + } +} + +impl Add for I16Vec4 { + type Output = Self; + #[inline] + fn add(self, rhs: Self) -> Self { + Self { + x: self.x.add(rhs.x), + y: self.y.add(rhs.y), + z: self.z.add(rhs.z), + w: self.w.add(rhs.w), + } + } +} + +impl AddAssign for I16Vec4 { + #[inline] + fn add_assign(&mut self, rhs: Self) { + self.x.add_assign(rhs.x); + self.y.add_assign(rhs.y); + self.z.add_assign(rhs.z); + self.w.add_assign(rhs.w); + } +} + +impl Add for I16Vec4 { + type Output = Self; + #[inline] + fn add(self, rhs: i16) -> Self { + Self { + x: self.x.add(rhs), + y: self.y.add(rhs), + z: self.z.add(rhs), + w: self.w.add(rhs), + } + } +} + +impl AddAssign for I16Vec4 { + #[inline] + fn add_assign(&mut self, rhs: i16) { + self.x.add_assign(rhs); + self.y.add_assign(rhs); + self.z.add_assign(rhs); + self.w.add_assign(rhs); + } +} + +impl Add for i16 { + type Output = I16Vec4; + #[inline] + fn add(self, rhs: I16Vec4) -> I16Vec4 { + I16Vec4 { + x: self.add(rhs.x), + y: self.add(rhs.y), + z: self.add(rhs.z), + w: self.add(rhs.w), + } + } +} + +impl Sub for I16Vec4 { + type Output = Self; + #[inline] + fn sub(self, rhs: Self) -> Self { + Self { + x: self.x.sub(rhs.x), + y: self.y.sub(rhs.y), + z: self.z.sub(rhs.z), + w: self.w.sub(rhs.w), + } + } +} + +impl SubAssign for I16Vec4 { + #[inline] + fn sub_assign(&mut self, rhs: I16Vec4) { + self.x.sub_assign(rhs.x); + self.y.sub_assign(rhs.y); + self.z.sub_assign(rhs.z); + self.w.sub_assign(rhs.w); + } +} + +impl Sub for I16Vec4 { + type Output = Self; + #[inline] + fn sub(self, rhs: i16) -> Self { + Self { + x: self.x.sub(rhs), + y: self.y.sub(rhs), + z: self.z.sub(rhs), + w: self.w.sub(rhs), + } + } +} + +impl SubAssign for I16Vec4 { + #[inline] + fn sub_assign(&mut self, rhs: i16) { + self.x.sub_assign(rhs); + self.y.sub_assign(rhs); + self.z.sub_assign(rhs); + self.w.sub_assign(rhs); + } +} + +impl Sub for i16 { + type Output = I16Vec4; + #[inline] + fn sub(self, rhs: I16Vec4) -> I16Vec4 { + I16Vec4 { + x: self.sub(rhs.x), + y: self.sub(rhs.y), + z: self.sub(rhs.z), + w: self.sub(rhs.w), + } + } +} + +impl Rem for I16Vec4 { + type Output = Self; + #[inline] + fn rem(self, rhs: Self) -> Self { + Self { + x: self.x.rem(rhs.x), + y: self.y.rem(rhs.y), + z: self.z.rem(rhs.z), + w: self.w.rem(rhs.w), + } + } +} + +impl RemAssign for I16Vec4 { + #[inline] + fn rem_assign(&mut self, rhs: Self) { + self.x.rem_assign(rhs.x); + self.y.rem_assign(rhs.y); + self.z.rem_assign(rhs.z); + self.w.rem_assign(rhs.w); + } +} + +impl Rem for I16Vec4 { + type Output = Self; + #[inline] + fn rem(self, rhs: i16) -> Self { + Self { + x: self.x.rem(rhs), + y: self.y.rem(rhs), + z: self.z.rem(rhs), + w: self.w.rem(rhs), + } + } +} + +impl RemAssign for I16Vec4 { + #[inline] + fn rem_assign(&mut self, rhs: i16) { + self.x.rem_assign(rhs); + self.y.rem_assign(rhs); + self.z.rem_assign(rhs); + self.w.rem_assign(rhs); + } +} + +impl Rem for i16 { + type Output = I16Vec4; + #[inline] + fn rem(self, rhs: I16Vec4) -> I16Vec4 { + I16Vec4 { + x: self.rem(rhs.x), + y: self.rem(rhs.y), + z: self.rem(rhs.z), + w: self.rem(rhs.w), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsRef<[i16; 4]> for I16Vec4 { + #[inline] + fn as_ref(&self) -> &[i16; 4] { + unsafe { &*(self as *const I16Vec4 as *const [i16; 4]) } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsMut<[i16; 4]> for I16Vec4 { + #[inline] + fn as_mut(&mut self) -> &mut [i16; 4] { + unsafe { &mut *(self as *mut I16Vec4 as *mut [i16; 4]) } + } +} + +impl Sum for I16Vec4 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, Self::add) + } +} + +impl<'a> Sum<&'a Self> for I16Vec4 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, |a, &b| Self::add(a, b)) + } +} + +impl Product for I16Vec4 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, Self::mul) + } +} + +impl<'a> Product<&'a Self> for I16Vec4 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, |a, &b| Self::mul(a, b)) + } +} + +impl Neg for I16Vec4 { + type Output = Self; + #[inline] + fn neg(self) -> Self { + Self { + x: self.x.neg(), + y: self.y.neg(), + z: self.z.neg(), + w: self.w.neg(), + } + } +} + +impl Not for I16Vec4 { + type Output = Self; + #[inline] + fn not(self) -> Self::Output { + Self { + x: self.x.not(), + y: self.y.not(), + z: self.z.not(), + w: self.w.not(), + } + } +} + +impl BitAnd for I16Vec4 { + type Output = Self; + #[inline] + fn bitand(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitand(rhs.x), + y: self.y.bitand(rhs.y), + z: self.z.bitand(rhs.z), + w: self.w.bitand(rhs.w), + } + } +} + +impl BitOr for I16Vec4 { + type Output = Self; + #[inline] + fn bitor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitor(rhs.x), + y: self.y.bitor(rhs.y), + z: self.z.bitor(rhs.z), + w: self.w.bitor(rhs.w), + } + } +} + +impl BitXor for I16Vec4 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitxor(rhs.x), + y: self.y.bitxor(rhs.y), + z: self.z.bitxor(rhs.z), + w: self.w.bitxor(rhs.w), + } + } +} + +impl BitAnd for I16Vec4 { + type Output = Self; + #[inline] + fn bitand(self, rhs: i16) -> Self::Output { + Self { + x: self.x.bitand(rhs), + y: self.y.bitand(rhs), + z: self.z.bitand(rhs), + w: self.w.bitand(rhs), + } + } +} + +impl BitOr for I16Vec4 { + type Output = Self; + #[inline] + fn bitor(self, rhs: i16) -> Self::Output { + Self { + x: self.x.bitor(rhs), + y: self.y.bitor(rhs), + z: self.z.bitor(rhs), + w: self.w.bitor(rhs), + } + } +} + +impl BitXor for I16Vec4 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: i16) -> Self::Output { + Self { + x: self.x.bitxor(rhs), + y: self.y.bitxor(rhs), + z: self.z.bitxor(rhs), + w: self.w.bitxor(rhs), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::IVec4) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + z: self.z.shl(rhs.z), + w: self.w.shl(rhs.w), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::IVec4) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + z: self.z.shr(rhs.z), + w: self.w.shr(rhs.w), + } + } +} + +impl Shl for I16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::UVec4) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + z: self.z.shl(rhs.z), + w: self.w.shl(rhs.w), + } + } +} + +impl Shr for I16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::UVec4) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + z: self.z.shr(rhs.z), + w: self.w.shr(rhs.w), + } + } +} + +impl Index for I16Vec4 { + type Output = i16; + #[inline] + fn index(&self, index: usize) -> &Self::Output { + match index { + 0 => &self.x, + 1 => &self.y, + 2 => &self.z, + 3 => &self.w, + _ => panic!("index out of bounds"), + } + } +} + +impl IndexMut for I16Vec4 { + #[inline] + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + match index { + 0 => &mut self.x, + 1 => &mut self.y, + 2 => &mut self.z, + 3 => &mut self.w, + _ => panic!("index out of bounds"), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Display for I16Vec4 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w) + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Debug for I16Vec4 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_tuple(stringify!(I16Vec4)) + .field(&self.x) + .field(&self.y) + .field(&self.z) + .field(&self.w) + .finish() + } +} + +impl From<[i16; 4]> for I16Vec4 { + #[inline] + fn from(a: [i16; 4]) -> Self { + Self::new(a[0], a[1], a[2], a[3]) + } +} + +impl From for [i16; 4] { + #[inline] + fn from(v: I16Vec4) -> Self { + [v.x, v.y, v.z, v.w] + } +} + +impl From<(i16, i16, i16, i16)> for I16Vec4 { + #[inline] + fn from(t: (i16, i16, i16, i16)) -> Self { + Self::new(t.0, t.1, t.2, t.3) + } +} + +impl From for (i16, i16, i16, i16) { + #[inline] + fn from(v: I16Vec4) -> Self { + (v.x, v.y, v.z, v.w) + } +} + +impl From<(I16Vec3, i16)> for I16Vec4 { + #[inline] + fn from((v, w): (I16Vec3, i16)) -> Self { + Self::new(v.x, v.y, v.z, w) + } +} + +impl From<(i16, I16Vec3)> for I16Vec4 { + #[inline] + fn from((x, v): (i16, I16Vec3)) -> Self { + Self::new(x, v.x, v.y, v.z) + } +} + +impl From<(I16Vec2, i16, i16)> for I16Vec4 { + #[inline] + fn from((v, z, w): (I16Vec2, i16, i16)) -> Self { + Self::new(v.x, v.y, z, w) + } +} + +impl From<(I16Vec2, I16Vec2)> for I16Vec4 { + #[inline] + fn from((v, u): (I16Vec2, I16Vec2)) -> Self { + Self::new(v.x, v.y, u.x, u.y) + } +} + +impl TryFrom for I16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: U16Vec4) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + i16::try_from(v.w)?, + )) + } +} + +impl TryFrom for I16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: IVec4) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + i16::try_from(v.w)?, + )) + } +} + +impl TryFrom for I16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: UVec4) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + i16::try_from(v.w)?, + )) + } +} + +impl TryFrom for I16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I64Vec4) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + i16::try_from(v.w)?, + )) + } +} + +impl TryFrom for I16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: U64Vec4) -> Result { + Ok(Self::new( + i16::try_from(v.x)?, + i16::try_from(v.y)?, + i16::try_from(v.z)?, + i16::try_from(v.w)?, + )) + } +} diff --git a/src/i32/ivec2.rs b/src/i32/ivec2.rs index 3aead83e..2a4e7b7b 100644 --- a/src/i32/ivec2.rs +++ b/src/i32/ivec2.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec2, I64Vec2, IVec3, U64Vec2, UVec2}; +use crate::{BVec2, I16Vec2, I64Vec2, IVec3, U16Vec2, U64Vec2, UVec2}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -349,6 +349,18 @@ impl IVec2 { crate::DVec2::new(self.x as f64, self.y as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec2(&self) -> crate::I16Vec2 { + crate::I16Vec2::new(self.x as i16, self.y as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec2(&self) -> crate::U16Vec2 { + crate::U16Vec2::new(self.x as u16, self.y as u16) + } + /// Casts all elements of `self` to `u32`. #[inline] pub fn as_uvec2(&self) -> crate::UVec2 { @@ -1148,6 +1160,20 @@ impl From for (i32, i32) { } } +impl From for IVec2 { + #[inline] + fn from(v: I16Vec2) -> Self { + Self::new(i32::from(v.x), i32::from(v.y)) + } +} + +impl From for IVec2 { + #[inline] + fn from(v: U16Vec2) -> Self { + Self::new(i32::from(v.x), i32::from(v.y)) + } +} + impl TryFrom for IVec2 { type Error = core::num::TryFromIntError; @@ -1157,20 +1183,20 @@ impl TryFrom for IVec2 { } } -impl TryFrom for IVec2 { +impl TryFrom for IVec2 { type Error = core::num::TryFromIntError; #[inline] - fn try_from(v: U64Vec2) -> Result { + fn try_from(v: I64Vec2) -> Result { Ok(Self::new(i32::try_from(v.x)?, i32::try_from(v.y)?)) } } -impl TryFrom for IVec2 { +impl TryFrom for IVec2 { type Error = core::num::TryFromIntError; #[inline] - fn try_from(v: I64Vec2) -> Result { + fn try_from(v: U64Vec2) -> Result { Ok(Self::new(i32::try_from(v.x)?, i32::try_from(v.y)?)) } } diff --git a/src/i32/ivec3.rs b/src/i32/ivec3.rs index d50eedb3..44e41f28 100644 --- a/src/i32/ivec3.rs +++ b/src/i32/ivec3.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec3, I64Vec3, IVec2, IVec4, U64Vec3, UVec3}; +use crate::{BVec3, I16Vec3, I64Vec3, IVec2, IVec4, U16Vec3, U64Vec3, UVec3}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -376,6 +376,18 @@ impl IVec3 { crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `u32`. #[inline] pub fn as_uvec3(&self) -> crate::UVec3 { @@ -1246,6 +1258,20 @@ impl From<(IVec2, i32)> for IVec3 { } } +impl From for IVec3 { + #[inline] + fn from(v: I16Vec3) -> Self { + Self::new(i32::from(v.x), i32::from(v.y), i32::from(v.z)) + } +} + +impl From for IVec3 { + #[inline] + fn from(v: U16Vec3) -> Self { + Self::new(i32::from(v.x), i32::from(v.y), i32::from(v.z)) + } +} + impl TryFrom for IVec3 { type Error = core::num::TryFromIntError; @@ -1259,11 +1285,11 @@ impl TryFrom for IVec3 { } } -impl TryFrom for IVec3 { +impl TryFrom for IVec3 { type Error = core::num::TryFromIntError; #[inline] - fn try_from(v: U64Vec3) -> Result { + fn try_from(v: I64Vec3) -> Result { Ok(Self::new( i32::try_from(v.x)?, i32::try_from(v.y)?, @@ -1272,11 +1298,11 @@ impl TryFrom for IVec3 { } } -impl TryFrom for IVec3 { +impl TryFrom for IVec3 { type Error = core::num::TryFromIntError; #[inline] - fn try_from(v: I64Vec3) -> Result { + fn try_from(v: U64Vec3) -> Result { Ok(Self::new( i32::try_from(v.x)?, i32::try_from(v.y)?, diff --git a/src/i32/ivec4.rs b/src/i32/ivec4.rs index 8e69491c..b283f3e1 100644 --- a/src/i32/ivec4.rs +++ b/src/i32/ivec4.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec4, I64Vec4, IVec2, IVec3, U64Vec4, UVec4}; +use crate::{BVec4, I16Vec4, I64Vec4, IVec2, IVec3, U16Vec4, U64Vec4, UVec4}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -398,6 +398,18 @@ impl IVec4 { crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + /// Casts all elements of `self` to `u32`. #[inline] pub fn as_uvec4(&self) -> crate::UVec4 { @@ -1353,6 +1365,30 @@ impl From<(IVec2, IVec2)> for IVec4 { } } +impl From for IVec4 { + #[inline] + fn from(v: I16Vec4) -> Self { + Self::new( + i32::from(v.x), + i32::from(v.y), + i32::from(v.z), + i32::from(v.w), + ) + } +} + +impl From for IVec4 { + #[inline] + fn from(v: U16Vec4) -> Self { + Self::new( + i32::from(v.x), + i32::from(v.y), + i32::from(v.z), + i32::from(v.w), + ) + } +} + impl TryFrom for IVec4 { type Error = core::num::TryFromIntError; @@ -1367,11 +1403,11 @@ impl TryFrom for IVec4 { } } -impl TryFrom for IVec4 { +impl TryFrom for IVec4 { type Error = core::num::TryFromIntError; #[inline] - fn try_from(v: U64Vec4) -> Result { + fn try_from(v: I64Vec4) -> Result { Ok(Self::new( i32::try_from(v.x)?, i32::try_from(v.y)?, @@ -1381,11 +1417,11 @@ impl TryFrom for IVec4 { } } -impl TryFrom for IVec4 { +impl TryFrom for IVec4 { type Error = core::num::TryFromIntError; #[inline] - fn try_from(v: I64Vec4) -> Result { + fn try_from(v: U64Vec4) -> Result { Ok(Self::new( i32::try_from(v.x)?, i32::try_from(v.y)?, diff --git a/src/i64/i64vec2.rs b/src/i64/i64vec2.rs index cb002e10..0e009f96 100644 --- a/src/i64/i64vec2.rs +++ b/src/i64/i64vec2.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec2, I64Vec3, IVec2, U64Vec2}; +use crate::{BVec2, I16Vec2, I64Vec3, IVec2, U16Vec2, U64Vec2, UVec2}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -349,6 +349,18 @@ impl I64Vec2 { crate::DVec2::new(self.x as f64, self.y as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec2(&self) -> crate::I16Vec2 { + crate::I16Vec2::new(self.x as i16, self.y as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec2(&self) -> crate::U16Vec2 { + crate::U16Vec2::new(self.x as u16, self.y as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec2(&self) -> crate::IVec2 { @@ -1148,6 +1160,20 @@ impl From for (i64, i64) { } } +impl From for I64Vec2 { + #[inline] + fn from(v: I16Vec2) -> Self { + Self::new(i64::from(v.x), i64::from(v.y)) + } +} + +impl From for I64Vec2 { + #[inline] + fn from(v: U16Vec2) -> Self { + Self::new(i64::from(v.x), i64::from(v.y)) + } +} + impl From for I64Vec2 { #[inline] fn from(v: IVec2) -> Self { @@ -1155,6 +1181,13 @@ impl From for I64Vec2 { } } +impl From for I64Vec2 { + #[inline] + fn from(v: UVec2) -> Self { + Self::new(i64::from(v.x), i64::from(v.y)) + } +} + impl TryFrom for I64Vec2 { type Error = core::num::TryFromIntError; diff --git a/src/i64/i64vec3.rs b/src/i64/i64vec3.rs index e740c333..7964f5f2 100644 --- a/src/i64/i64vec3.rs +++ b/src/i64/i64vec3.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec3, I64Vec2, I64Vec4, IVec3, U64Vec3}; +use crate::{BVec3, I16Vec3, I64Vec2, I64Vec4, IVec3, U16Vec3, U64Vec3, UVec3}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -376,6 +376,18 @@ impl I64Vec3 { crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec3(&self) -> crate::IVec3 { @@ -1246,6 +1258,20 @@ impl From<(I64Vec2, i64)> for I64Vec3 { } } +impl From for I64Vec3 { + #[inline] + fn from(v: I16Vec3) -> Self { + Self::new(i64::from(v.x), i64::from(v.y), i64::from(v.z)) + } +} + +impl From for I64Vec3 { + #[inline] + fn from(v: U16Vec3) -> Self { + Self::new(i64::from(v.x), i64::from(v.y), i64::from(v.z)) + } +} + impl From for I64Vec3 { #[inline] fn from(v: IVec3) -> Self { @@ -1253,6 +1279,13 @@ impl From for I64Vec3 { } } +impl From for I64Vec3 { + #[inline] + fn from(v: UVec3) -> Self { + Self::new(i64::from(v.x), i64::from(v.y), i64::from(v.z)) + } +} + impl TryFrom for I64Vec3 { type Error = core::num::TryFromIntError; diff --git a/src/i64/i64vec4.rs b/src/i64/i64vec4.rs index 7c0a0fca..9cb1eb57 100644 --- a/src/i64/i64vec4.rs +++ b/src/i64/i64vec4.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec4, I64Vec2, I64Vec3, IVec4, U64Vec4}; +use crate::{BVec4, I16Vec4, I64Vec2, I64Vec3, IVec4, U16Vec4, U64Vec4, UVec4}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -398,6 +398,18 @@ impl I64Vec4 { crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec4(&self) -> crate::IVec4 { @@ -1353,6 +1365,30 @@ impl From<(I64Vec2, I64Vec2)> for I64Vec4 { } } +impl From for I64Vec4 { + #[inline] + fn from(v: I16Vec4) -> Self { + Self::new( + i64::from(v.x), + i64::from(v.y), + i64::from(v.z), + i64::from(v.w), + ) + } +} + +impl From for I64Vec4 { + #[inline] + fn from(v: U16Vec4) -> Self { + Self::new( + i64::from(v.x), + i64::from(v.y), + i64::from(v.z), + i64::from(v.w), + ) + } +} + impl From for I64Vec4 { #[inline] fn from(v: IVec4) -> Self { @@ -1365,6 +1401,18 @@ impl From for I64Vec4 { } } +impl From for I64Vec4 { + #[inline] + fn from(v: UVec4) -> Self { + Self::new( + i64::from(v.x), + i64::from(v.y), + i64::from(v.z), + i64::from(v.w), + ) + } +} + impl TryFrom for I64Vec4 { type Error = core::num::TryFromIntError; diff --git a/src/lib.rs b/src/lib.rs index c5566656..6fea6a8e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -15,6 +15,10 @@ * square matrices: [`DMat2`], [`DMat3`] and [`DMat4`] * a quaternion type: [`DQuat`] * affine transformation types: [`DAffine2`] and [`DAffine3`] +* [`i16`](mod@i16) types + * vectors: [`I16Vec2`], [`I16Vec3`] and [`I16Vec4`] +* [`u16`](mod@u16) types + * vectors: [`U16Vec2`], [`U16Vec3`] and [`U16Vec4`] * [`i32`](mod@i32) types * vectors: [`IVec2`], [`IVec3`] and [`IVec4`] * [`u32`](mod@u32) types @@ -307,6 +311,14 @@ pub use self::f32::*; pub mod f64; pub use self::f64::*; +/** `i16` vector types. */ +pub mod i16; +pub use self::i16::*; + +/** `u16` vector types. */ +pub mod u16; +pub use self::u16::*; + /** `i32` vector types. */ pub mod i32; pub use self::i32::*; diff --git a/src/swizzles.rs b/src/swizzles.rs index 916f50d5..683a574e 100644 --- a/src/swizzles.rs +++ b/src/swizzles.rs @@ -6,6 +6,14 @@ mod ivec2_impl; mod ivec3_impl; mod ivec4_impl; +mod i16vec2_impl; +mod i16vec3_impl; +mod i16vec4_impl; + +mod u16vec2_impl; +mod u16vec3_impl; +mod u16vec4_impl; + mod i64vec2_impl; mod i64vec3_impl; mod i64vec4_impl; diff --git a/src/swizzles/i16vec2_impl.rs b/src/swizzles/i16vec2_impl.rs new file mode 100644 index 00000000..8dc9eda6 --- /dev/null +++ b/src/swizzles/i16vec2_impl.rs @@ -0,0 +1,193 @@ +// Generated from swizzle_impl.rs.tera template. Edit the template, not the generated file. + +use crate::{I16Vec2, I16Vec3, I16Vec4, Vec2Swizzles}; + +impl Vec2Swizzles for I16Vec2 { + type Vec3 = I16Vec3; + + type Vec4 = I16Vec4; + + #[inline] + fn xx(self) -> I16Vec2 { + I16Vec2 { + x: self.x, + y: self.x, + } + } + + #[inline] + fn xy(self) -> I16Vec2 { + I16Vec2 { + x: self.x, + y: self.y, + } + } + + #[inline] + fn yx(self) -> I16Vec2 { + I16Vec2 { + x: self.y, + y: self.x, + } + } + + #[inline] + fn yy(self) -> I16Vec2 { + I16Vec2 { + x: self.y, + y: self.y, + } + } + + #[inline] + fn xxx(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.x, + z: self.x, + } + } + + #[inline] + fn xxy(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.x, + z: self.y, + } + } + + #[inline] + fn xyx(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.y, + z: self.x, + } + } + + #[inline] + fn xyy(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.y, + z: self.y, + } + } + + #[inline] + fn yxx(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.x, + z: self.x, + } + } + + #[inline] + fn yxy(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.x, + z: self.y, + } + } + + #[inline] + fn yyx(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.y, + z: self.x, + } + } + + #[inline] + fn yyy(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.y, + z: self.y, + } + } + + #[inline] + fn xxxx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.x, self.x) + } + + #[inline] + fn xxxy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.x, self.y) + } + + #[inline] + fn xxyx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.y, self.x) + } + + #[inline] + fn xxyy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.y, self.y) + } + + #[inline] + fn xyxx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.x, self.x) + } + + #[inline] + fn xyxy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.x, self.y) + } + + #[inline] + fn xyyx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.y, self.x) + } + + #[inline] + fn xyyy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.y, self.y) + } + + #[inline] + fn yxxx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.x, self.x) + } + + #[inline] + fn yxxy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.x, self.y) + } + + #[inline] + fn yxyx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.y, self.x) + } + + #[inline] + fn yxyy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.y, self.y) + } + + #[inline] + fn yyxx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.x, self.x) + } + + #[inline] + fn yyxy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.x, self.y) + } + + #[inline] + fn yyyx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.y, self.x) + } + + #[inline] + fn yyyy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.y, self.y) + } +} diff --git a/src/swizzles/i16vec3_impl.rs b/src/swizzles/i16vec3_impl.rs new file mode 100644 index 00000000..54f4a9ab --- /dev/null +++ b/src/swizzles/i16vec3_impl.rs @@ -0,0 +1,729 @@ +// Generated from swizzle_impl.rs.tera template. Edit the template, not the generated file. + +use crate::{I16Vec2, I16Vec3, I16Vec4, Vec3Swizzles}; + +impl Vec3Swizzles for I16Vec3 { + type Vec2 = I16Vec2; + + type Vec4 = I16Vec4; + + #[inline] + fn xx(self) -> I16Vec2 { + I16Vec2 { + x: self.x, + y: self.x, + } + } + + #[inline] + fn xy(self) -> I16Vec2 { + I16Vec2 { + x: self.x, + y: self.y, + } + } + + #[inline] + fn xz(self) -> I16Vec2 { + I16Vec2 { + x: self.x, + y: self.z, + } + } + + #[inline] + fn yx(self) -> I16Vec2 { + I16Vec2 { + x: self.y, + y: self.x, + } + } + + #[inline] + fn yy(self) -> I16Vec2 { + I16Vec2 { + x: self.y, + y: self.y, + } + } + + #[inline] + fn yz(self) -> I16Vec2 { + I16Vec2 { + x: self.y, + y: self.z, + } + } + + #[inline] + fn zx(self) -> I16Vec2 { + I16Vec2 { + x: self.z, + y: self.x, + } + } + + #[inline] + fn zy(self) -> I16Vec2 { + I16Vec2 { + x: self.z, + y: self.y, + } + } + + #[inline] + fn zz(self) -> I16Vec2 { + I16Vec2 { + x: self.z, + y: self.z, + } + } + + #[inline] + fn xxx(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.x, + z: self.x, + } + } + + #[inline] + fn xxy(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.x, + z: self.y, + } + } + + #[inline] + fn xxz(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.x, + z: self.z, + } + } + + #[inline] + fn xyx(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.y, + z: self.x, + } + } + + #[inline] + fn xyy(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.y, + z: self.y, + } + } + + #[inline] + fn xyz(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.y, + z: self.z, + } + } + + #[inline] + fn xzx(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.z, + z: self.x, + } + } + + #[inline] + fn xzy(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.z, + z: self.y, + } + } + + #[inline] + fn xzz(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.z, + z: self.z, + } + } + + #[inline] + fn yxx(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.x, + z: self.x, + } + } + + #[inline] + fn yxy(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.x, + z: self.y, + } + } + + #[inline] + fn yxz(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.x, + z: self.z, + } + } + + #[inline] + fn yyx(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.y, + z: self.x, + } + } + + #[inline] + fn yyy(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.y, + z: self.y, + } + } + + #[inline] + fn yyz(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.y, + z: self.z, + } + } + + #[inline] + fn yzx(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.z, + z: self.x, + } + } + + #[inline] + fn yzy(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.z, + z: self.y, + } + } + + #[inline] + fn yzz(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.z, + z: self.z, + } + } + + #[inline] + fn zxx(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.x, + z: self.x, + } + } + + #[inline] + fn zxy(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.x, + z: self.y, + } + } + + #[inline] + fn zxz(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.x, + z: self.z, + } + } + + #[inline] + fn zyx(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.y, + z: self.x, + } + } + + #[inline] + fn zyy(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.y, + z: self.y, + } + } + + #[inline] + fn zyz(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.y, + z: self.z, + } + } + + #[inline] + fn zzx(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.z, + z: self.x, + } + } + + #[inline] + fn zzy(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.z, + z: self.y, + } + } + + #[inline] + fn zzz(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.z, + z: self.z, + } + } + + #[inline] + fn xxxx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.x, self.x) + } + + #[inline] + fn xxxy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.x, self.y) + } + + #[inline] + fn xxxz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.x, self.z) + } + + #[inline] + fn xxyx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.y, self.x) + } + + #[inline] + fn xxyy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.y, self.y) + } + + #[inline] + fn xxyz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.y, self.z) + } + + #[inline] + fn xxzx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.z, self.x) + } + + #[inline] + fn xxzy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.z, self.y) + } + + #[inline] + fn xxzz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.z, self.z) + } + + #[inline] + fn xyxx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.x, self.x) + } + + #[inline] + fn xyxy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.x, self.y) + } + + #[inline] + fn xyxz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.x, self.z) + } + + #[inline] + fn xyyx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.y, self.x) + } + + #[inline] + fn xyyy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.y, self.y) + } + + #[inline] + fn xyyz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.y, self.z) + } + + #[inline] + fn xyzx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.z, self.x) + } + + #[inline] + fn xyzy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.z, self.y) + } + + #[inline] + fn xyzz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.z, self.z) + } + + #[inline] + fn xzxx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.x, self.x) + } + + #[inline] + fn xzxy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.x, self.y) + } + + #[inline] + fn xzxz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.x, self.z) + } + + #[inline] + fn xzyx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.y, self.x) + } + + #[inline] + fn xzyy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.y, self.y) + } + + #[inline] + fn xzyz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.y, self.z) + } + + #[inline] + fn xzzx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.z, self.x) + } + + #[inline] + fn xzzy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.z, self.y) + } + + #[inline] + fn xzzz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.z, self.z) + } + + #[inline] + fn yxxx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.x, self.x) + } + + #[inline] + fn yxxy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.x, self.y) + } + + #[inline] + fn yxxz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.x, self.z) + } + + #[inline] + fn yxyx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.y, self.x) + } + + #[inline] + fn yxyy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.y, self.y) + } + + #[inline] + fn yxyz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.y, self.z) + } + + #[inline] + fn yxzx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.z, self.x) + } + + #[inline] + fn yxzy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.z, self.y) + } + + #[inline] + fn yxzz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.z, self.z) + } + + #[inline] + fn yyxx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.x, self.x) + } + + #[inline] + fn yyxy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.x, self.y) + } + + #[inline] + fn yyxz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.x, self.z) + } + + #[inline] + fn yyyx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.y, self.x) + } + + #[inline] + fn yyyy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.y, self.y) + } + + #[inline] + fn yyyz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.y, self.z) + } + + #[inline] + fn yyzx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.z, self.x) + } + + #[inline] + fn yyzy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.z, self.y) + } + + #[inline] + fn yyzz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.z, self.z) + } + + #[inline] + fn yzxx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.x, self.x) + } + + #[inline] + fn yzxy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.x, self.y) + } + + #[inline] + fn yzxz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.x, self.z) + } + + #[inline] + fn yzyx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.y, self.x) + } + + #[inline] + fn yzyy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.y, self.y) + } + + #[inline] + fn yzyz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.y, self.z) + } + + #[inline] + fn yzzx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.z, self.x) + } + + #[inline] + fn yzzy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.z, self.y) + } + + #[inline] + fn yzzz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.z, self.z) + } + + #[inline] + fn zxxx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.x, self.x) + } + + #[inline] + fn zxxy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.x, self.y) + } + + #[inline] + fn zxxz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.x, self.z) + } + + #[inline] + fn zxyx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.y, self.x) + } + + #[inline] + fn zxyy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.y, self.y) + } + + #[inline] + fn zxyz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.y, self.z) + } + + #[inline] + fn zxzx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.z, self.x) + } + + #[inline] + fn zxzy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.z, self.y) + } + + #[inline] + fn zxzz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.z, self.z) + } + + #[inline] + fn zyxx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.x, self.x) + } + + #[inline] + fn zyxy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.x, self.y) + } + + #[inline] + fn zyxz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.x, self.z) + } + + #[inline] + fn zyyx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.y, self.x) + } + + #[inline] + fn zyyy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.y, self.y) + } + + #[inline] + fn zyyz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.y, self.z) + } + + #[inline] + fn zyzx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.z, self.x) + } + + #[inline] + fn zyzy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.z, self.y) + } + + #[inline] + fn zyzz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.z, self.z) + } + + #[inline] + fn zzxx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.x, self.x) + } + + #[inline] + fn zzxy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.x, self.y) + } + + #[inline] + fn zzxz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.x, self.z) + } + + #[inline] + fn zzyx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.y, self.x) + } + + #[inline] + fn zzyy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.y, self.y) + } + + #[inline] + fn zzyz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.y, self.z) + } + + #[inline] + fn zzzx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.z, self.x) + } + + #[inline] + fn zzzy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.z, self.y) + } + + #[inline] + fn zzzz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.z, self.z) + } +} diff --git a/src/swizzles/i16vec4_impl.rs b/src/swizzles/i16vec4_impl.rs new file mode 100644 index 00000000..36569feb --- /dev/null +++ b/src/swizzles/i16vec4_impl.rs @@ -0,0 +1,1993 @@ +// Generated from swizzle_impl.rs.tera template. Edit the template, not the generated file. + +use crate::{I16Vec2, I16Vec3, I16Vec4, Vec4Swizzles}; + +impl Vec4Swizzles for I16Vec4 { + type Vec2 = I16Vec2; + + type Vec3 = I16Vec3; + + #[inline] + fn xx(self) -> I16Vec2 { + I16Vec2 { + x: self.x, + y: self.x, + } + } + + #[inline] + fn xy(self) -> I16Vec2 { + I16Vec2 { + x: self.x, + y: self.y, + } + } + + #[inline] + fn xz(self) -> I16Vec2 { + I16Vec2 { + x: self.x, + y: self.z, + } + } + + #[inline] + fn xw(self) -> I16Vec2 { + I16Vec2 { + x: self.x, + y: self.w, + } + } + + #[inline] + fn yx(self) -> I16Vec2 { + I16Vec2 { + x: self.y, + y: self.x, + } + } + + #[inline] + fn yy(self) -> I16Vec2 { + I16Vec2 { + x: self.y, + y: self.y, + } + } + + #[inline] + fn yz(self) -> I16Vec2 { + I16Vec2 { + x: self.y, + y: self.z, + } + } + + #[inline] + fn yw(self) -> I16Vec2 { + I16Vec2 { + x: self.y, + y: self.w, + } + } + + #[inline] + fn zx(self) -> I16Vec2 { + I16Vec2 { + x: self.z, + y: self.x, + } + } + + #[inline] + fn zy(self) -> I16Vec2 { + I16Vec2 { + x: self.z, + y: self.y, + } + } + + #[inline] + fn zz(self) -> I16Vec2 { + I16Vec2 { + x: self.z, + y: self.z, + } + } + + #[inline] + fn zw(self) -> I16Vec2 { + I16Vec2 { + x: self.z, + y: self.w, + } + } + + #[inline] + fn wx(self) -> I16Vec2 { + I16Vec2 { + x: self.w, + y: self.x, + } + } + + #[inline] + fn wy(self) -> I16Vec2 { + I16Vec2 { + x: self.w, + y: self.y, + } + } + + #[inline] + fn wz(self) -> I16Vec2 { + I16Vec2 { + x: self.w, + y: self.z, + } + } + + #[inline] + fn ww(self) -> I16Vec2 { + I16Vec2 { + x: self.w, + y: self.w, + } + } + + #[inline] + fn xxx(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.x, + z: self.x, + } + } + + #[inline] + fn xxy(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.x, + z: self.y, + } + } + + #[inline] + fn xxz(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.x, + z: self.z, + } + } + + #[inline] + fn xxw(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.x, + z: self.w, + } + } + + #[inline] + fn xyx(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.y, + z: self.x, + } + } + + #[inline] + fn xyy(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.y, + z: self.y, + } + } + + #[inline] + fn xyz(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.y, + z: self.z, + } + } + + #[inline] + fn xyw(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.y, + z: self.w, + } + } + + #[inline] + fn xzx(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.z, + z: self.x, + } + } + + #[inline] + fn xzy(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.z, + z: self.y, + } + } + + #[inline] + fn xzz(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.z, + z: self.z, + } + } + + #[inline] + fn xzw(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.z, + z: self.w, + } + } + + #[inline] + fn xwx(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.w, + z: self.x, + } + } + + #[inline] + fn xwy(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.w, + z: self.y, + } + } + + #[inline] + fn xwz(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.w, + z: self.z, + } + } + + #[inline] + fn xww(self) -> I16Vec3 { + I16Vec3 { + x: self.x, + y: self.w, + z: self.w, + } + } + + #[inline] + fn yxx(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.x, + z: self.x, + } + } + + #[inline] + fn yxy(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.x, + z: self.y, + } + } + + #[inline] + fn yxz(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.x, + z: self.z, + } + } + + #[inline] + fn yxw(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.x, + z: self.w, + } + } + + #[inline] + fn yyx(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.y, + z: self.x, + } + } + + #[inline] + fn yyy(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.y, + z: self.y, + } + } + + #[inline] + fn yyz(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.y, + z: self.z, + } + } + + #[inline] + fn yyw(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.y, + z: self.w, + } + } + + #[inline] + fn yzx(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.z, + z: self.x, + } + } + + #[inline] + fn yzy(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.z, + z: self.y, + } + } + + #[inline] + fn yzz(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.z, + z: self.z, + } + } + + #[inline] + fn yzw(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.z, + z: self.w, + } + } + + #[inline] + fn ywx(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.w, + z: self.x, + } + } + + #[inline] + fn ywy(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.w, + z: self.y, + } + } + + #[inline] + fn ywz(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.w, + z: self.z, + } + } + + #[inline] + fn yww(self) -> I16Vec3 { + I16Vec3 { + x: self.y, + y: self.w, + z: self.w, + } + } + + #[inline] + fn zxx(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.x, + z: self.x, + } + } + + #[inline] + fn zxy(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.x, + z: self.y, + } + } + + #[inline] + fn zxz(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.x, + z: self.z, + } + } + + #[inline] + fn zxw(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.x, + z: self.w, + } + } + + #[inline] + fn zyx(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.y, + z: self.x, + } + } + + #[inline] + fn zyy(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.y, + z: self.y, + } + } + + #[inline] + fn zyz(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.y, + z: self.z, + } + } + + #[inline] + fn zyw(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.y, + z: self.w, + } + } + + #[inline] + fn zzx(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.z, + z: self.x, + } + } + + #[inline] + fn zzy(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.z, + z: self.y, + } + } + + #[inline] + fn zzz(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.z, + z: self.z, + } + } + + #[inline] + fn zzw(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.z, + z: self.w, + } + } + + #[inline] + fn zwx(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.w, + z: self.x, + } + } + + #[inline] + fn zwy(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.w, + z: self.y, + } + } + + #[inline] + fn zwz(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.w, + z: self.z, + } + } + + #[inline] + fn zww(self) -> I16Vec3 { + I16Vec3 { + x: self.z, + y: self.w, + z: self.w, + } + } + + #[inline] + fn wxx(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.x, + z: self.x, + } + } + + #[inline] + fn wxy(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.x, + z: self.y, + } + } + + #[inline] + fn wxz(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.x, + z: self.z, + } + } + + #[inline] + fn wxw(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.x, + z: self.w, + } + } + + #[inline] + fn wyx(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.y, + z: self.x, + } + } + + #[inline] + fn wyy(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.y, + z: self.y, + } + } + + #[inline] + fn wyz(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.y, + z: self.z, + } + } + + #[inline] + fn wyw(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.y, + z: self.w, + } + } + + #[inline] + fn wzx(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.z, + z: self.x, + } + } + + #[inline] + fn wzy(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.z, + z: self.y, + } + } + + #[inline] + fn wzz(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.z, + z: self.z, + } + } + + #[inline] + fn wzw(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.z, + z: self.w, + } + } + + #[inline] + fn wwx(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.w, + z: self.x, + } + } + + #[inline] + fn wwy(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.w, + z: self.y, + } + } + + #[inline] + fn wwz(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.w, + z: self.z, + } + } + + #[inline] + fn www(self) -> I16Vec3 { + I16Vec3 { + x: self.w, + y: self.w, + z: self.w, + } + } + + #[inline] + fn xxxx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.x, self.x) + } + + #[inline] + fn xxxy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.x, self.y) + } + + #[inline] + fn xxxz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.x, self.z) + } + + #[inline] + fn xxxw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.x, self.w) + } + + #[inline] + fn xxyx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.y, self.x) + } + + #[inline] + fn xxyy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.y, self.y) + } + + #[inline] + fn xxyz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.y, self.z) + } + + #[inline] + fn xxyw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.y, self.w) + } + + #[inline] + fn xxzx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.z, self.x) + } + + #[inline] + fn xxzy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.z, self.y) + } + + #[inline] + fn xxzz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.z, self.z) + } + + #[inline] + fn xxzw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.z, self.w) + } + + #[inline] + fn xxwx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.w, self.x) + } + + #[inline] + fn xxwy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.w, self.y) + } + + #[inline] + fn xxwz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.w, self.z) + } + + #[inline] + fn xxww(self) -> I16Vec4 { + I16Vec4::new(self.x, self.x, self.w, self.w) + } + + #[inline] + fn xyxx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.x, self.x) + } + + #[inline] + fn xyxy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.x, self.y) + } + + #[inline] + fn xyxz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.x, self.z) + } + + #[inline] + fn xyxw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.x, self.w) + } + + #[inline] + fn xyyx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.y, self.x) + } + + #[inline] + fn xyyy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.y, self.y) + } + + #[inline] + fn xyyz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.y, self.z) + } + + #[inline] + fn xyyw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.y, self.w) + } + + #[inline] + fn xyzx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.z, self.x) + } + + #[inline] + fn xyzy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.z, self.y) + } + + #[inline] + fn xyzz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.z, self.z) + } + + #[inline] + fn xyzw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.z, self.w) + } + + #[inline] + fn xywx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.w, self.x) + } + + #[inline] + fn xywy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.w, self.y) + } + + #[inline] + fn xywz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.w, self.z) + } + + #[inline] + fn xyww(self) -> I16Vec4 { + I16Vec4::new(self.x, self.y, self.w, self.w) + } + + #[inline] + fn xzxx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.x, self.x) + } + + #[inline] + fn xzxy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.x, self.y) + } + + #[inline] + fn xzxz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.x, self.z) + } + + #[inline] + fn xzxw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.x, self.w) + } + + #[inline] + fn xzyx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.y, self.x) + } + + #[inline] + fn xzyy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.y, self.y) + } + + #[inline] + fn xzyz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.y, self.z) + } + + #[inline] + fn xzyw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.y, self.w) + } + + #[inline] + fn xzzx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.z, self.x) + } + + #[inline] + fn xzzy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.z, self.y) + } + + #[inline] + fn xzzz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.z, self.z) + } + + #[inline] + fn xzzw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.z, self.w) + } + + #[inline] + fn xzwx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.w, self.x) + } + + #[inline] + fn xzwy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.w, self.y) + } + + #[inline] + fn xzwz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.w, self.z) + } + + #[inline] + fn xzww(self) -> I16Vec4 { + I16Vec4::new(self.x, self.z, self.w, self.w) + } + + #[inline] + fn xwxx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.x, self.x) + } + + #[inline] + fn xwxy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.x, self.y) + } + + #[inline] + fn xwxz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.x, self.z) + } + + #[inline] + fn xwxw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.x, self.w) + } + + #[inline] + fn xwyx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.y, self.x) + } + + #[inline] + fn xwyy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.y, self.y) + } + + #[inline] + fn xwyz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.y, self.z) + } + + #[inline] + fn xwyw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.y, self.w) + } + + #[inline] + fn xwzx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.z, self.x) + } + + #[inline] + fn xwzy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.z, self.y) + } + + #[inline] + fn xwzz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.z, self.z) + } + + #[inline] + fn xwzw(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.z, self.w) + } + + #[inline] + fn xwwx(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.w, self.x) + } + + #[inline] + fn xwwy(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.w, self.y) + } + + #[inline] + fn xwwz(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.w, self.z) + } + + #[inline] + fn xwww(self) -> I16Vec4 { + I16Vec4::new(self.x, self.w, self.w, self.w) + } + + #[inline] + fn yxxx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.x, self.x) + } + + #[inline] + fn yxxy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.x, self.y) + } + + #[inline] + fn yxxz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.x, self.z) + } + + #[inline] + fn yxxw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.x, self.w) + } + + #[inline] + fn yxyx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.y, self.x) + } + + #[inline] + fn yxyy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.y, self.y) + } + + #[inline] + fn yxyz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.y, self.z) + } + + #[inline] + fn yxyw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.y, self.w) + } + + #[inline] + fn yxzx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.z, self.x) + } + + #[inline] + fn yxzy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.z, self.y) + } + + #[inline] + fn yxzz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.z, self.z) + } + + #[inline] + fn yxzw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.z, self.w) + } + + #[inline] + fn yxwx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.w, self.x) + } + + #[inline] + fn yxwy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.w, self.y) + } + + #[inline] + fn yxwz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.w, self.z) + } + + #[inline] + fn yxww(self) -> I16Vec4 { + I16Vec4::new(self.y, self.x, self.w, self.w) + } + + #[inline] + fn yyxx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.x, self.x) + } + + #[inline] + fn yyxy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.x, self.y) + } + + #[inline] + fn yyxz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.x, self.z) + } + + #[inline] + fn yyxw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.x, self.w) + } + + #[inline] + fn yyyx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.y, self.x) + } + + #[inline] + fn yyyy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.y, self.y) + } + + #[inline] + fn yyyz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.y, self.z) + } + + #[inline] + fn yyyw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.y, self.w) + } + + #[inline] + fn yyzx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.z, self.x) + } + + #[inline] + fn yyzy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.z, self.y) + } + + #[inline] + fn yyzz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.z, self.z) + } + + #[inline] + fn yyzw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.z, self.w) + } + + #[inline] + fn yywx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.w, self.x) + } + + #[inline] + fn yywy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.w, self.y) + } + + #[inline] + fn yywz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.w, self.z) + } + + #[inline] + fn yyww(self) -> I16Vec4 { + I16Vec4::new(self.y, self.y, self.w, self.w) + } + + #[inline] + fn yzxx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.x, self.x) + } + + #[inline] + fn yzxy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.x, self.y) + } + + #[inline] + fn yzxz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.x, self.z) + } + + #[inline] + fn yzxw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.x, self.w) + } + + #[inline] + fn yzyx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.y, self.x) + } + + #[inline] + fn yzyy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.y, self.y) + } + + #[inline] + fn yzyz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.y, self.z) + } + + #[inline] + fn yzyw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.y, self.w) + } + + #[inline] + fn yzzx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.z, self.x) + } + + #[inline] + fn yzzy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.z, self.y) + } + + #[inline] + fn yzzz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.z, self.z) + } + + #[inline] + fn yzzw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.z, self.w) + } + + #[inline] + fn yzwx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.w, self.x) + } + + #[inline] + fn yzwy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.w, self.y) + } + + #[inline] + fn yzwz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.w, self.z) + } + + #[inline] + fn yzww(self) -> I16Vec4 { + I16Vec4::new(self.y, self.z, self.w, self.w) + } + + #[inline] + fn ywxx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.x, self.x) + } + + #[inline] + fn ywxy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.x, self.y) + } + + #[inline] + fn ywxz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.x, self.z) + } + + #[inline] + fn ywxw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.x, self.w) + } + + #[inline] + fn ywyx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.y, self.x) + } + + #[inline] + fn ywyy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.y, self.y) + } + + #[inline] + fn ywyz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.y, self.z) + } + + #[inline] + fn ywyw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.y, self.w) + } + + #[inline] + fn ywzx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.z, self.x) + } + + #[inline] + fn ywzy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.z, self.y) + } + + #[inline] + fn ywzz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.z, self.z) + } + + #[inline] + fn ywzw(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.z, self.w) + } + + #[inline] + fn ywwx(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.w, self.x) + } + + #[inline] + fn ywwy(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.w, self.y) + } + + #[inline] + fn ywwz(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.w, self.z) + } + + #[inline] + fn ywww(self) -> I16Vec4 { + I16Vec4::new(self.y, self.w, self.w, self.w) + } + + #[inline] + fn zxxx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.x, self.x) + } + + #[inline] + fn zxxy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.x, self.y) + } + + #[inline] + fn zxxz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.x, self.z) + } + + #[inline] + fn zxxw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.x, self.w) + } + + #[inline] + fn zxyx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.y, self.x) + } + + #[inline] + fn zxyy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.y, self.y) + } + + #[inline] + fn zxyz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.y, self.z) + } + + #[inline] + fn zxyw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.y, self.w) + } + + #[inline] + fn zxzx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.z, self.x) + } + + #[inline] + fn zxzy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.z, self.y) + } + + #[inline] + fn zxzz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.z, self.z) + } + + #[inline] + fn zxzw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.z, self.w) + } + + #[inline] + fn zxwx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.w, self.x) + } + + #[inline] + fn zxwy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.w, self.y) + } + + #[inline] + fn zxwz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.w, self.z) + } + + #[inline] + fn zxww(self) -> I16Vec4 { + I16Vec4::new(self.z, self.x, self.w, self.w) + } + + #[inline] + fn zyxx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.x, self.x) + } + + #[inline] + fn zyxy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.x, self.y) + } + + #[inline] + fn zyxz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.x, self.z) + } + + #[inline] + fn zyxw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.x, self.w) + } + + #[inline] + fn zyyx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.y, self.x) + } + + #[inline] + fn zyyy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.y, self.y) + } + + #[inline] + fn zyyz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.y, self.z) + } + + #[inline] + fn zyyw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.y, self.w) + } + + #[inline] + fn zyzx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.z, self.x) + } + + #[inline] + fn zyzy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.z, self.y) + } + + #[inline] + fn zyzz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.z, self.z) + } + + #[inline] + fn zyzw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.z, self.w) + } + + #[inline] + fn zywx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.w, self.x) + } + + #[inline] + fn zywy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.w, self.y) + } + + #[inline] + fn zywz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.w, self.z) + } + + #[inline] + fn zyww(self) -> I16Vec4 { + I16Vec4::new(self.z, self.y, self.w, self.w) + } + + #[inline] + fn zzxx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.x, self.x) + } + + #[inline] + fn zzxy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.x, self.y) + } + + #[inline] + fn zzxz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.x, self.z) + } + + #[inline] + fn zzxw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.x, self.w) + } + + #[inline] + fn zzyx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.y, self.x) + } + + #[inline] + fn zzyy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.y, self.y) + } + + #[inline] + fn zzyz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.y, self.z) + } + + #[inline] + fn zzyw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.y, self.w) + } + + #[inline] + fn zzzx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.z, self.x) + } + + #[inline] + fn zzzy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.z, self.y) + } + + #[inline] + fn zzzz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.z, self.z) + } + + #[inline] + fn zzzw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.z, self.w) + } + + #[inline] + fn zzwx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.w, self.x) + } + + #[inline] + fn zzwy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.w, self.y) + } + + #[inline] + fn zzwz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.w, self.z) + } + + #[inline] + fn zzww(self) -> I16Vec4 { + I16Vec4::new(self.z, self.z, self.w, self.w) + } + + #[inline] + fn zwxx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.x, self.x) + } + + #[inline] + fn zwxy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.x, self.y) + } + + #[inline] + fn zwxz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.x, self.z) + } + + #[inline] + fn zwxw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.x, self.w) + } + + #[inline] + fn zwyx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.y, self.x) + } + + #[inline] + fn zwyy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.y, self.y) + } + + #[inline] + fn zwyz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.y, self.z) + } + + #[inline] + fn zwyw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.y, self.w) + } + + #[inline] + fn zwzx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.z, self.x) + } + + #[inline] + fn zwzy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.z, self.y) + } + + #[inline] + fn zwzz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.z, self.z) + } + + #[inline] + fn zwzw(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.z, self.w) + } + + #[inline] + fn zwwx(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.w, self.x) + } + + #[inline] + fn zwwy(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.w, self.y) + } + + #[inline] + fn zwwz(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.w, self.z) + } + + #[inline] + fn zwww(self) -> I16Vec4 { + I16Vec4::new(self.z, self.w, self.w, self.w) + } + + #[inline] + fn wxxx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.x, self.x) + } + + #[inline] + fn wxxy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.x, self.y) + } + + #[inline] + fn wxxz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.x, self.z) + } + + #[inline] + fn wxxw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.x, self.w) + } + + #[inline] + fn wxyx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.y, self.x) + } + + #[inline] + fn wxyy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.y, self.y) + } + + #[inline] + fn wxyz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.y, self.z) + } + + #[inline] + fn wxyw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.y, self.w) + } + + #[inline] + fn wxzx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.z, self.x) + } + + #[inline] + fn wxzy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.z, self.y) + } + + #[inline] + fn wxzz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.z, self.z) + } + + #[inline] + fn wxzw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.z, self.w) + } + + #[inline] + fn wxwx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.w, self.x) + } + + #[inline] + fn wxwy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.w, self.y) + } + + #[inline] + fn wxwz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.w, self.z) + } + + #[inline] + fn wxww(self) -> I16Vec4 { + I16Vec4::new(self.w, self.x, self.w, self.w) + } + + #[inline] + fn wyxx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.x, self.x) + } + + #[inline] + fn wyxy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.x, self.y) + } + + #[inline] + fn wyxz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.x, self.z) + } + + #[inline] + fn wyxw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.x, self.w) + } + + #[inline] + fn wyyx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.y, self.x) + } + + #[inline] + fn wyyy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.y, self.y) + } + + #[inline] + fn wyyz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.y, self.z) + } + + #[inline] + fn wyyw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.y, self.w) + } + + #[inline] + fn wyzx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.z, self.x) + } + + #[inline] + fn wyzy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.z, self.y) + } + + #[inline] + fn wyzz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.z, self.z) + } + + #[inline] + fn wyzw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.z, self.w) + } + + #[inline] + fn wywx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.w, self.x) + } + + #[inline] + fn wywy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.w, self.y) + } + + #[inline] + fn wywz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.w, self.z) + } + + #[inline] + fn wyww(self) -> I16Vec4 { + I16Vec4::new(self.w, self.y, self.w, self.w) + } + + #[inline] + fn wzxx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.x, self.x) + } + + #[inline] + fn wzxy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.x, self.y) + } + + #[inline] + fn wzxz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.x, self.z) + } + + #[inline] + fn wzxw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.x, self.w) + } + + #[inline] + fn wzyx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.y, self.x) + } + + #[inline] + fn wzyy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.y, self.y) + } + + #[inline] + fn wzyz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.y, self.z) + } + + #[inline] + fn wzyw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.y, self.w) + } + + #[inline] + fn wzzx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.z, self.x) + } + + #[inline] + fn wzzy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.z, self.y) + } + + #[inline] + fn wzzz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.z, self.z) + } + + #[inline] + fn wzzw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.z, self.w) + } + + #[inline] + fn wzwx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.w, self.x) + } + + #[inline] + fn wzwy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.w, self.y) + } + + #[inline] + fn wzwz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.w, self.z) + } + + #[inline] + fn wzww(self) -> I16Vec4 { + I16Vec4::new(self.w, self.z, self.w, self.w) + } + + #[inline] + fn wwxx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.x, self.x) + } + + #[inline] + fn wwxy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.x, self.y) + } + + #[inline] + fn wwxz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.x, self.z) + } + + #[inline] + fn wwxw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.x, self.w) + } + + #[inline] + fn wwyx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.y, self.x) + } + + #[inline] + fn wwyy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.y, self.y) + } + + #[inline] + fn wwyz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.y, self.z) + } + + #[inline] + fn wwyw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.y, self.w) + } + + #[inline] + fn wwzx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.z, self.x) + } + + #[inline] + fn wwzy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.z, self.y) + } + + #[inline] + fn wwzz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.z, self.z) + } + + #[inline] + fn wwzw(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.z, self.w) + } + + #[inline] + fn wwwx(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.w, self.x) + } + + #[inline] + fn wwwy(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.w, self.y) + } + + #[inline] + fn wwwz(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.w, self.z) + } + + #[inline] + fn wwww(self) -> I16Vec4 { + I16Vec4::new(self.w, self.w, self.w, self.w) + } +} diff --git a/src/swizzles/u16vec2_impl.rs b/src/swizzles/u16vec2_impl.rs new file mode 100644 index 00000000..a556f0fe --- /dev/null +++ b/src/swizzles/u16vec2_impl.rs @@ -0,0 +1,193 @@ +// Generated from swizzle_impl.rs.tera template. Edit the template, not the generated file. + +use crate::{U16Vec2, U16Vec3, U16Vec4, Vec2Swizzles}; + +impl Vec2Swizzles for U16Vec2 { + type Vec3 = U16Vec3; + + type Vec4 = U16Vec4; + + #[inline] + fn xx(self) -> U16Vec2 { + U16Vec2 { + x: self.x, + y: self.x, + } + } + + #[inline] + fn xy(self) -> U16Vec2 { + U16Vec2 { + x: self.x, + y: self.y, + } + } + + #[inline] + fn yx(self) -> U16Vec2 { + U16Vec2 { + x: self.y, + y: self.x, + } + } + + #[inline] + fn yy(self) -> U16Vec2 { + U16Vec2 { + x: self.y, + y: self.y, + } + } + + #[inline] + fn xxx(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.x, + z: self.x, + } + } + + #[inline] + fn xxy(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.x, + z: self.y, + } + } + + #[inline] + fn xyx(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.y, + z: self.x, + } + } + + #[inline] + fn xyy(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.y, + z: self.y, + } + } + + #[inline] + fn yxx(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.x, + z: self.x, + } + } + + #[inline] + fn yxy(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.x, + z: self.y, + } + } + + #[inline] + fn yyx(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.y, + z: self.x, + } + } + + #[inline] + fn yyy(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.y, + z: self.y, + } + } + + #[inline] + fn xxxx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.x, self.x) + } + + #[inline] + fn xxxy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.x, self.y) + } + + #[inline] + fn xxyx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.y, self.x) + } + + #[inline] + fn xxyy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.y, self.y) + } + + #[inline] + fn xyxx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.x, self.x) + } + + #[inline] + fn xyxy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.x, self.y) + } + + #[inline] + fn xyyx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.y, self.x) + } + + #[inline] + fn xyyy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.y, self.y) + } + + #[inline] + fn yxxx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.x, self.x) + } + + #[inline] + fn yxxy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.x, self.y) + } + + #[inline] + fn yxyx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.y, self.x) + } + + #[inline] + fn yxyy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.y, self.y) + } + + #[inline] + fn yyxx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.x, self.x) + } + + #[inline] + fn yyxy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.x, self.y) + } + + #[inline] + fn yyyx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.y, self.x) + } + + #[inline] + fn yyyy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.y, self.y) + } +} diff --git a/src/swizzles/u16vec3_impl.rs b/src/swizzles/u16vec3_impl.rs new file mode 100644 index 00000000..4fd95986 --- /dev/null +++ b/src/swizzles/u16vec3_impl.rs @@ -0,0 +1,729 @@ +// Generated from swizzle_impl.rs.tera template. Edit the template, not the generated file. + +use crate::{U16Vec2, U16Vec3, U16Vec4, Vec3Swizzles}; + +impl Vec3Swizzles for U16Vec3 { + type Vec2 = U16Vec2; + + type Vec4 = U16Vec4; + + #[inline] + fn xx(self) -> U16Vec2 { + U16Vec2 { + x: self.x, + y: self.x, + } + } + + #[inline] + fn xy(self) -> U16Vec2 { + U16Vec2 { + x: self.x, + y: self.y, + } + } + + #[inline] + fn xz(self) -> U16Vec2 { + U16Vec2 { + x: self.x, + y: self.z, + } + } + + #[inline] + fn yx(self) -> U16Vec2 { + U16Vec2 { + x: self.y, + y: self.x, + } + } + + #[inline] + fn yy(self) -> U16Vec2 { + U16Vec2 { + x: self.y, + y: self.y, + } + } + + #[inline] + fn yz(self) -> U16Vec2 { + U16Vec2 { + x: self.y, + y: self.z, + } + } + + #[inline] + fn zx(self) -> U16Vec2 { + U16Vec2 { + x: self.z, + y: self.x, + } + } + + #[inline] + fn zy(self) -> U16Vec2 { + U16Vec2 { + x: self.z, + y: self.y, + } + } + + #[inline] + fn zz(self) -> U16Vec2 { + U16Vec2 { + x: self.z, + y: self.z, + } + } + + #[inline] + fn xxx(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.x, + z: self.x, + } + } + + #[inline] + fn xxy(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.x, + z: self.y, + } + } + + #[inline] + fn xxz(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.x, + z: self.z, + } + } + + #[inline] + fn xyx(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.y, + z: self.x, + } + } + + #[inline] + fn xyy(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.y, + z: self.y, + } + } + + #[inline] + fn xyz(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.y, + z: self.z, + } + } + + #[inline] + fn xzx(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.z, + z: self.x, + } + } + + #[inline] + fn xzy(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.z, + z: self.y, + } + } + + #[inline] + fn xzz(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.z, + z: self.z, + } + } + + #[inline] + fn yxx(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.x, + z: self.x, + } + } + + #[inline] + fn yxy(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.x, + z: self.y, + } + } + + #[inline] + fn yxz(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.x, + z: self.z, + } + } + + #[inline] + fn yyx(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.y, + z: self.x, + } + } + + #[inline] + fn yyy(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.y, + z: self.y, + } + } + + #[inline] + fn yyz(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.y, + z: self.z, + } + } + + #[inline] + fn yzx(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.z, + z: self.x, + } + } + + #[inline] + fn yzy(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.z, + z: self.y, + } + } + + #[inline] + fn yzz(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.z, + z: self.z, + } + } + + #[inline] + fn zxx(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.x, + z: self.x, + } + } + + #[inline] + fn zxy(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.x, + z: self.y, + } + } + + #[inline] + fn zxz(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.x, + z: self.z, + } + } + + #[inline] + fn zyx(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.y, + z: self.x, + } + } + + #[inline] + fn zyy(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.y, + z: self.y, + } + } + + #[inline] + fn zyz(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.y, + z: self.z, + } + } + + #[inline] + fn zzx(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.z, + z: self.x, + } + } + + #[inline] + fn zzy(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.z, + z: self.y, + } + } + + #[inline] + fn zzz(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.z, + z: self.z, + } + } + + #[inline] + fn xxxx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.x, self.x) + } + + #[inline] + fn xxxy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.x, self.y) + } + + #[inline] + fn xxxz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.x, self.z) + } + + #[inline] + fn xxyx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.y, self.x) + } + + #[inline] + fn xxyy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.y, self.y) + } + + #[inline] + fn xxyz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.y, self.z) + } + + #[inline] + fn xxzx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.z, self.x) + } + + #[inline] + fn xxzy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.z, self.y) + } + + #[inline] + fn xxzz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.z, self.z) + } + + #[inline] + fn xyxx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.x, self.x) + } + + #[inline] + fn xyxy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.x, self.y) + } + + #[inline] + fn xyxz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.x, self.z) + } + + #[inline] + fn xyyx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.y, self.x) + } + + #[inline] + fn xyyy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.y, self.y) + } + + #[inline] + fn xyyz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.y, self.z) + } + + #[inline] + fn xyzx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.z, self.x) + } + + #[inline] + fn xyzy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.z, self.y) + } + + #[inline] + fn xyzz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.z, self.z) + } + + #[inline] + fn xzxx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.x, self.x) + } + + #[inline] + fn xzxy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.x, self.y) + } + + #[inline] + fn xzxz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.x, self.z) + } + + #[inline] + fn xzyx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.y, self.x) + } + + #[inline] + fn xzyy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.y, self.y) + } + + #[inline] + fn xzyz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.y, self.z) + } + + #[inline] + fn xzzx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.z, self.x) + } + + #[inline] + fn xzzy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.z, self.y) + } + + #[inline] + fn xzzz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.z, self.z) + } + + #[inline] + fn yxxx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.x, self.x) + } + + #[inline] + fn yxxy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.x, self.y) + } + + #[inline] + fn yxxz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.x, self.z) + } + + #[inline] + fn yxyx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.y, self.x) + } + + #[inline] + fn yxyy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.y, self.y) + } + + #[inline] + fn yxyz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.y, self.z) + } + + #[inline] + fn yxzx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.z, self.x) + } + + #[inline] + fn yxzy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.z, self.y) + } + + #[inline] + fn yxzz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.z, self.z) + } + + #[inline] + fn yyxx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.x, self.x) + } + + #[inline] + fn yyxy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.x, self.y) + } + + #[inline] + fn yyxz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.x, self.z) + } + + #[inline] + fn yyyx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.y, self.x) + } + + #[inline] + fn yyyy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.y, self.y) + } + + #[inline] + fn yyyz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.y, self.z) + } + + #[inline] + fn yyzx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.z, self.x) + } + + #[inline] + fn yyzy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.z, self.y) + } + + #[inline] + fn yyzz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.z, self.z) + } + + #[inline] + fn yzxx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.x, self.x) + } + + #[inline] + fn yzxy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.x, self.y) + } + + #[inline] + fn yzxz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.x, self.z) + } + + #[inline] + fn yzyx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.y, self.x) + } + + #[inline] + fn yzyy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.y, self.y) + } + + #[inline] + fn yzyz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.y, self.z) + } + + #[inline] + fn yzzx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.z, self.x) + } + + #[inline] + fn yzzy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.z, self.y) + } + + #[inline] + fn yzzz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.z, self.z) + } + + #[inline] + fn zxxx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.x, self.x) + } + + #[inline] + fn zxxy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.x, self.y) + } + + #[inline] + fn zxxz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.x, self.z) + } + + #[inline] + fn zxyx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.y, self.x) + } + + #[inline] + fn zxyy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.y, self.y) + } + + #[inline] + fn zxyz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.y, self.z) + } + + #[inline] + fn zxzx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.z, self.x) + } + + #[inline] + fn zxzy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.z, self.y) + } + + #[inline] + fn zxzz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.z, self.z) + } + + #[inline] + fn zyxx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.x, self.x) + } + + #[inline] + fn zyxy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.x, self.y) + } + + #[inline] + fn zyxz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.x, self.z) + } + + #[inline] + fn zyyx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.y, self.x) + } + + #[inline] + fn zyyy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.y, self.y) + } + + #[inline] + fn zyyz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.y, self.z) + } + + #[inline] + fn zyzx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.z, self.x) + } + + #[inline] + fn zyzy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.z, self.y) + } + + #[inline] + fn zyzz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.z, self.z) + } + + #[inline] + fn zzxx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.x, self.x) + } + + #[inline] + fn zzxy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.x, self.y) + } + + #[inline] + fn zzxz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.x, self.z) + } + + #[inline] + fn zzyx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.y, self.x) + } + + #[inline] + fn zzyy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.y, self.y) + } + + #[inline] + fn zzyz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.y, self.z) + } + + #[inline] + fn zzzx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.z, self.x) + } + + #[inline] + fn zzzy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.z, self.y) + } + + #[inline] + fn zzzz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.z, self.z) + } +} diff --git a/src/swizzles/u16vec4_impl.rs b/src/swizzles/u16vec4_impl.rs new file mode 100644 index 00000000..26e655cd --- /dev/null +++ b/src/swizzles/u16vec4_impl.rs @@ -0,0 +1,1993 @@ +// Generated from swizzle_impl.rs.tera template. Edit the template, not the generated file. + +use crate::{U16Vec2, U16Vec3, U16Vec4, Vec4Swizzles}; + +impl Vec4Swizzles for U16Vec4 { + type Vec2 = U16Vec2; + + type Vec3 = U16Vec3; + + #[inline] + fn xx(self) -> U16Vec2 { + U16Vec2 { + x: self.x, + y: self.x, + } + } + + #[inline] + fn xy(self) -> U16Vec2 { + U16Vec2 { + x: self.x, + y: self.y, + } + } + + #[inline] + fn xz(self) -> U16Vec2 { + U16Vec2 { + x: self.x, + y: self.z, + } + } + + #[inline] + fn xw(self) -> U16Vec2 { + U16Vec2 { + x: self.x, + y: self.w, + } + } + + #[inline] + fn yx(self) -> U16Vec2 { + U16Vec2 { + x: self.y, + y: self.x, + } + } + + #[inline] + fn yy(self) -> U16Vec2 { + U16Vec2 { + x: self.y, + y: self.y, + } + } + + #[inline] + fn yz(self) -> U16Vec2 { + U16Vec2 { + x: self.y, + y: self.z, + } + } + + #[inline] + fn yw(self) -> U16Vec2 { + U16Vec2 { + x: self.y, + y: self.w, + } + } + + #[inline] + fn zx(self) -> U16Vec2 { + U16Vec2 { + x: self.z, + y: self.x, + } + } + + #[inline] + fn zy(self) -> U16Vec2 { + U16Vec2 { + x: self.z, + y: self.y, + } + } + + #[inline] + fn zz(self) -> U16Vec2 { + U16Vec2 { + x: self.z, + y: self.z, + } + } + + #[inline] + fn zw(self) -> U16Vec2 { + U16Vec2 { + x: self.z, + y: self.w, + } + } + + #[inline] + fn wx(self) -> U16Vec2 { + U16Vec2 { + x: self.w, + y: self.x, + } + } + + #[inline] + fn wy(self) -> U16Vec2 { + U16Vec2 { + x: self.w, + y: self.y, + } + } + + #[inline] + fn wz(self) -> U16Vec2 { + U16Vec2 { + x: self.w, + y: self.z, + } + } + + #[inline] + fn ww(self) -> U16Vec2 { + U16Vec2 { + x: self.w, + y: self.w, + } + } + + #[inline] + fn xxx(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.x, + z: self.x, + } + } + + #[inline] + fn xxy(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.x, + z: self.y, + } + } + + #[inline] + fn xxz(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.x, + z: self.z, + } + } + + #[inline] + fn xxw(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.x, + z: self.w, + } + } + + #[inline] + fn xyx(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.y, + z: self.x, + } + } + + #[inline] + fn xyy(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.y, + z: self.y, + } + } + + #[inline] + fn xyz(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.y, + z: self.z, + } + } + + #[inline] + fn xyw(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.y, + z: self.w, + } + } + + #[inline] + fn xzx(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.z, + z: self.x, + } + } + + #[inline] + fn xzy(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.z, + z: self.y, + } + } + + #[inline] + fn xzz(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.z, + z: self.z, + } + } + + #[inline] + fn xzw(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.z, + z: self.w, + } + } + + #[inline] + fn xwx(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.w, + z: self.x, + } + } + + #[inline] + fn xwy(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.w, + z: self.y, + } + } + + #[inline] + fn xwz(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.w, + z: self.z, + } + } + + #[inline] + fn xww(self) -> U16Vec3 { + U16Vec3 { + x: self.x, + y: self.w, + z: self.w, + } + } + + #[inline] + fn yxx(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.x, + z: self.x, + } + } + + #[inline] + fn yxy(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.x, + z: self.y, + } + } + + #[inline] + fn yxz(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.x, + z: self.z, + } + } + + #[inline] + fn yxw(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.x, + z: self.w, + } + } + + #[inline] + fn yyx(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.y, + z: self.x, + } + } + + #[inline] + fn yyy(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.y, + z: self.y, + } + } + + #[inline] + fn yyz(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.y, + z: self.z, + } + } + + #[inline] + fn yyw(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.y, + z: self.w, + } + } + + #[inline] + fn yzx(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.z, + z: self.x, + } + } + + #[inline] + fn yzy(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.z, + z: self.y, + } + } + + #[inline] + fn yzz(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.z, + z: self.z, + } + } + + #[inline] + fn yzw(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.z, + z: self.w, + } + } + + #[inline] + fn ywx(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.w, + z: self.x, + } + } + + #[inline] + fn ywy(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.w, + z: self.y, + } + } + + #[inline] + fn ywz(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.w, + z: self.z, + } + } + + #[inline] + fn yww(self) -> U16Vec3 { + U16Vec3 { + x: self.y, + y: self.w, + z: self.w, + } + } + + #[inline] + fn zxx(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.x, + z: self.x, + } + } + + #[inline] + fn zxy(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.x, + z: self.y, + } + } + + #[inline] + fn zxz(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.x, + z: self.z, + } + } + + #[inline] + fn zxw(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.x, + z: self.w, + } + } + + #[inline] + fn zyx(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.y, + z: self.x, + } + } + + #[inline] + fn zyy(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.y, + z: self.y, + } + } + + #[inline] + fn zyz(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.y, + z: self.z, + } + } + + #[inline] + fn zyw(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.y, + z: self.w, + } + } + + #[inline] + fn zzx(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.z, + z: self.x, + } + } + + #[inline] + fn zzy(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.z, + z: self.y, + } + } + + #[inline] + fn zzz(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.z, + z: self.z, + } + } + + #[inline] + fn zzw(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.z, + z: self.w, + } + } + + #[inline] + fn zwx(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.w, + z: self.x, + } + } + + #[inline] + fn zwy(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.w, + z: self.y, + } + } + + #[inline] + fn zwz(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.w, + z: self.z, + } + } + + #[inline] + fn zww(self) -> U16Vec3 { + U16Vec3 { + x: self.z, + y: self.w, + z: self.w, + } + } + + #[inline] + fn wxx(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.x, + z: self.x, + } + } + + #[inline] + fn wxy(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.x, + z: self.y, + } + } + + #[inline] + fn wxz(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.x, + z: self.z, + } + } + + #[inline] + fn wxw(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.x, + z: self.w, + } + } + + #[inline] + fn wyx(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.y, + z: self.x, + } + } + + #[inline] + fn wyy(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.y, + z: self.y, + } + } + + #[inline] + fn wyz(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.y, + z: self.z, + } + } + + #[inline] + fn wyw(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.y, + z: self.w, + } + } + + #[inline] + fn wzx(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.z, + z: self.x, + } + } + + #[inline] + fn wzy(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.z, + z: self.y, + } + } + + #[inline] + fn wzz(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.z, + z: self.z, + } + } + + #[inline] + fn wzw(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.z, + z: self.w, + } + } + + #[inline] + fn wwx(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.w, + z: self.x, + } + } + + #[inline] + fn wwy(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.w, + z: self.y, + } + } + + #[inline] + fn wwz(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.w, + z: self.z, + } + } + + #[inline] + fn www(self) -> U16Vec3 { + U16Vec3 { + x: self.w, + y: self.w, + z: self.w, + } + } + + #[inline] + fn xxxx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.x, self.x) + } + + #[inline] + fn xxxy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.x, self.y) + } + + #[inline] + fn xxxz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.x, self.z) + } + + #[inline] + fn xxxw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.x, self.w) + } + + #[inline] + fn xxyx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.y, self.x) + } + + #[inline] + fn xxyy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.y, self.y) + } + + #[inline] + fn xxyz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.y, self.z) + } + + #[inline] + fn xxyw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.y, self.w) + } + + #[inline] + fn xxzx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.z, self.x) + } + + #[inline] + fn xxzy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.z, self.y) + } + + #[inline] + fn xxzz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.z, self.z) + } + + #[inline] + fn xxzw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.z, self.w) + } + + #[inline] + fn xxwx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.w, self.x) + } + + #[inline] + fn xxwy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.w, self.y) + } + + #[inline] + fn xxwz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.w, self.z) + } + + #[inline] + fn xxww(self) -> U16Vec4 { + U16Vec4::new(self.x, self.x, self.w, self.w) + } + + #[inline] + fn xyxx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.x, self.x) + } + + #[inline] + fn xyxy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.x, self.y) + } + + #[inline] + fn xyxz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.x, self.z) + } + + #[inline] + fn xyxw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.x, self.w) + } + + #[inline] + fn xyyx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.y, self.x) + } + + #[inline] + fn xyyy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.y, self.y) + } + + #[inline] + fn xyyz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.y, self.z) + } + + #[inline] + fn xyyw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.y, self.w) + } + + #[inline] + fn xyzx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.z, self.x) + } + + #[inline] + fn xyzy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.z, self.y) + } + + #[inline] + fn xyzz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.z, self.z) + } + + #[inline] + fn xyzw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.z, self.w) + } + + #[inline] + fn xywx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.w, self.x) + } + + #[inline] + fn xywy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.w, self.y) + } + + #[inline] + fn xywz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.w, self.z) + } + + #[inline] + fn xyww(self) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.w, self.w) + } + + #[inline] + fn xzxx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.x, self.x) + } + + #[inline] + fn xzxy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.x, self.y) + } + + #[inline] + fn xzxz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.x, self.z) + } + + #[inline] + fn xzxw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.x, self.w) + } + + #[inline] + fn xzyx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.y, self.x) + } + + #[inline] + fn xzyy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.y, self.y) + } + + #[inline] + fn xzyz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.y, self.z) + } + + #[inline] + fn xzyw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.y, self.w) + } + + #[inline] + fn xzzx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.z, self.x) + } + + #[inline] + fn xzzy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.z, self.y) + } + + #[inline] + fn xzzz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.z, self.z) + } + + #[inline] + fn xzzw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.z, self.w) + } + + #[inline] + fn xzwx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.w, self.x) + } + + #[inline] + fn xzwy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.w, self.y) + } + + #[inline] + fn xzwz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.w, self.z) + } + + #[inline] + fn xzww(self) -> U16Vec4 { + U16Vec4::new(self.x, self.z, self.w, self.w) + } + + #[inline] + fn xwxx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.x, self.x) + } + + #[inline] + fn xwxy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.x, self.y) + } + + #[inline] + fn xwxz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.x, self.z) + } + + #[inline] + fn xwxw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.x, self.w) + } + + #[inline] + fn xwyx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.y, self.x) + } + + #[inline] + fn xwyy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.y, self.y) + } + + #[inline] + fn xwyz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.y, self.z) + } + + #[inline] + fn xwyw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.y, self.w) + } + + #[inline] + fn xwzx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.z, self.x) + } + + #[inline] + fn xwzy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.z, self.y) + } + + #[inline] + fn xwzz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.z, self.z) + } + + #[inline] + fn xwzw(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.z, self.w) + } + + #[inline] + fn xwwx(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.w, self.x) + } + + #[inline] + fn xwwy(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.w, self.y) + } + + #[inline] + fn xwwz(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.w, self.z) + } + + #[inline] + fn xwww(self) -> U16Vec4 { + U16Vec4::new(self.x, self.w, self.w, self.w) + } + + #[inline] + fn yxxx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.x, self.x) + } + + #[inline] + fn yxxy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.x, self.y) + } + + #[inline] + fn yxxz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.x, self.z) + } + + #[inline] + fn yxxw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.x, self.w) + } + + #[inline] + fn yxyx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.y, self.x) + } + + #[inline] + fn yxyy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.y, self.y) + } + + #[inline] + fn yxyz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.y, self.z) + } + + #[inline] + fn yxyw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.y, self.w) + } + + #[inline] + fn yxzx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.z, self.x) + } + + #[inline] + fn yxzy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.z, self.y) + } + + #[inline] + fn yxzz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.z, self.z) + } + + #[inline] + fn yxzw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.z, self.w) + } + + #[inline] + fn yxwx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.w, self.x) + } + + #[inline] + fn yxwy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.w, self.y) + } + + #[inline] + fn yxwz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.w, self.z) + } + + #[inline] + fn yxww(self) -> U16Vec4 { + U16Vec4::new(self.y, self.x, self.w, self.w) + } + + #[inline] + fn yyxx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.x, self.x) + } + + #[inline] + fn yyxy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.x, self.y) + } + + #[inline] + fn yyxz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.x, self.z) + } + + #[inline] + fn yyxw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.x, self.w) + } + + #[inline] + fn yyyx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.y, self.x) + } + + #[inline] + fn yyyy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.y, self.y) + } + + #[inline] + fn yyyz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.y, self.z) + } + + #[inline] + fn yyyw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.y, self.w) + } + + #[inline] + fn yyzx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.z, self.x) + } + + #[inline] + fn yyzy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.z, self.y) + } + + #[inline] + fn yyzz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.z, self.z) + } + + #[inline] + fn yyzw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.z, self.w) + } + + #[inline] + fn yywx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.w, self.x) + } + + #[inline] + fn yywy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.w, self.y) + } + + #[inline] + fn yywz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.w, self.z) + } + + #[inline] + fn yyww(self) -> U16Vec4 { + U16Vec4::new(self.y, self.y, self.w, self.w) + } + + #[inline] + fn yzxx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.x, self.x) + } + + #[inline] + fn yzxy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.x, self.y) + } + + #[inline] + fn yzxz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.x, self.z) + } + + #[inline] + fn yzxw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.x, self.w) + } + + #[inline] + fn yzyx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.y, self.x) + } + + #[inline] + fn yzyy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.y, self.y) + } + + #[inline] + fn yzyz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.y, self.z) + } + + #[inline] + fn yzyw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.y, self.w) + } + + #[inline] + fn yzzx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.z, self.x) + } + + #[inline] + fn yzzy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.z, self.y) + } + + #[inline] + fn yzzz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.z, self.z) + } + + #[inline] + fn yzzw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.z, self.w) + } + + #[inline] + fn yzwx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.w, self.x) + } + + #[inline] + fn yzwy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.w, self.y) + } + + #[inline] + fn yzwz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.w, self.z) + } + + #[inline] + fn yzww(self) -> U16Vec4 { + U16Vec4::new(self.y, self.z, self.w, self.w) + } + + #[inline] + fn ywxx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.x, self.x) + } + + #[inline] + fn ywxy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.x, self.y) + } + + #[inline] + fn ywxz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.x, self.z) + } + + #[inline] + fn ywxw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.x, self.w) + } + + #[inline] + fn ywyx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.y, self.x) + } + + #[inline] + fn ywyy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.y, self.y) + } + + #[inline] + fn ywyz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.y, self.z) + } + + #[inline] + fn ywyw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.y, self.w) + } + + #[inline] + fn ywzx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.z, self.x) + } + + #[inline] + fn ywzy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.z, self.y) + } + + #[inline] + fn ywzz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.z, self.z) + } + + #[inline] + fn ywzw(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.z, self.w) + } + + #[inline] + fn ywwx(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.w, self.x) + } + + #[inline] + fn ywwy(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.w, self.y) + } + + #[inline] + fn ywwz(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.w, self.z) + } + + #[inline] + fn ywww(self) -> U16Vec4 { + U16Vec4::new(self.y, self.w, self.w, self.w) + } + + #[inline] + fn zxxx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.x, self.x) + } + + #[inline] + fn zxxy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.x, self.y) + } + + #[inline] + fn zxxz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.x, self.z) + } + + #[inline] + fn zxxw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.x, self.w) + } + + #[inline] + fn zxyx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.y, self.x) + } + + #[inline] + fn zxyy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.y, self.y) + } + + #[inline] + fn zxyz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.y, self.z) + } + + #[inline] + fn zxyw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.y, self.w) + } + + #[inline] + fn zxzx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.z, self.x) + } + + #[inline] + fn zxzy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.z, self.y) + } + + #[inline] + fn zxzz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.z, self.z) + } + + #[inline] + fn zxzw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.z, self.w) + } + + #[inline] + fn zxwx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.w, self.x) + } + + #[inline] + fn zxwy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.w, self.y) + } + + #[inline] + fn zxwz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.w, self.z) + } + + #[inline] + fn zxww(self) -> U16Vec4 { + U16Vec4::new(self.z, self.x, self.w, self.w) + } + + #[inline] + fn zyxx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.x, self.x) + } + + #[inline] + fn zyxy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.x, self.y) + } + + #[inline] + fn zyxz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.x, self.z) + } + + #[inline] + fn zyxw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.x, self.w) + } + + #[inline] + fn zyyx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.y, self.x) + } + + #[inline] + fn zyyy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.y, self.y) + } + + #[inline] + fn zyyz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.y, self.z) + } + + #[inline] + fn zyyw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.y, self.w) + } + + #[inline] + fn zyzx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.z, self.x) + } + + #[inline] + fn zyzy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.z, self.y) + } + + #[inline] + fn zyzz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.z, self.z) + } + + #[inline] + fn zyzw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.z, self.w) + } + + #[inline] + fn zywx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.w, self.x) + } + + #[inline] + fn zywy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.w, self.y) + } + + #[inline] + fn zywz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.w, self.z) + } + + #[inline] + fn zyww(self) -> U16Vec4 { + U16Vec4::new(self.z, self.y, self.w, self.w) + } + + #[inline] + fn zzxx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.x, self.x) + } + + #[inline] + fn zzxy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.x, self.y) + } + + #[inline] + fn zzxz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.x, self.z) + } + + #[inline] + fn zzxw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.x, self.w) + } + + #[inline] + fn zzyx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.y, self.x) + } + + #[inline] + fn zzyy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.y, self.y) + } + + #[inline] + fn zzyz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.y, self.z) + } + + #[inline] + fn zzyw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.y, self.w) + } + + #[inline] + fn zzzx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.z, self.x) + } + + #[inline] + fn zzzy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.z, self.y) + } + + #[inline] + fn zzzz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.z, self.z) + } + + #[inline] + fn zzzw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.z, self.w) + } + + #[inline] + fn zzwx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.w, self.x) + } + + #[inline] + fn zzwy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.w, self.y) + } + + #[inline] + fn zzwz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.w, self.z) + } + + #[inline] + fn zzww(self) -> U16Vec4 { + U16Vec4::new(self.z, self.z, self.w, self.w) + } + + #[inline] + fn zwxx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.x, self.x) + } + + #[inline] + fn zwxy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.x, self.y) + } + + #[inline] + fn zwxz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.x, self.z) + } + + #[inline] + fn zwxw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.x, self.w) + } + + #[inline] + fn zwyx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.y, self.x) + } + + #[inline] + fn zwyy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.y, self.y) + } + + #[inline] + fn zwyz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.y, self.z) + } + + #[inline] + fn zwyw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.y, self.w) + } + + #[inline] + fn zwzx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.z, self.x) + } + + #[inline] + fn zwzy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.z, self.y) + } + + #[inline] + fn zwzz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.z, self.z) + } + + #[inline] + fn zwzw(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.z, self.w) + } + + #[inline] + fn zwwx(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.w, self.x) + } + + #[inline] + fn zwwy(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.w, self.y) + } + + #[inline] + fn zwwz(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.w, self.z) + } + + #[inline] + fn zwww(self) -> U16Vec4 { + U16Vec4::new(self.z, self.w, self.w, self.w) + } + + #[inline] + fn wxxx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.x, self.x) + } + + #[inline] + fn wxxy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.x, self.y) + } + + #[inline] + fn wxxz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.x, self.z) + } + + #[inline] + fn wxxw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.x, self.w) + } + + #[inline] + fn wxyx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.y, self.x) + } + + #[inline] + fn wxyy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.y, self.y) + } + + #[inline] + fn wxyz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.y, self.z) + } + + #[inline] + fn wxyw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.y, self.w) + } + + #[inline] + fn wxzx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.z, self.x) + } + + #[inline] + fn wxzy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.z, self.y) + } + + #[inline] + fn wxzz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.z, self.z) + } + + #[inline] + fn wxzw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.z, self.w) + } + + #[inline] + fn wxwx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.w, self.x) + } + + #[inline] + fn wxwy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.w, self.y) + } + + #[inline] + fn wxwz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.w, self.z) + } + + #[inline] + fn wxww(self) -> U16Vec4 { + U16Vec4::new(self.w, self.x, self.w, self.w) + } + + #[inline] + fn wyxx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.x, self.x) + } + + #[inline] + fn wyxy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.x, self.y) + } + + #[inline] + fn wyxz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.x, self.z) + } + + #[inline] + fn wyxw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.x, self.w) + } + + #[inline] + fn wyyx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.y, self.x) + } + + #[inline] + fn wyyy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.y, self.y) + } + + #[inline] + fn wyyz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.y, self.z) + } + + #[inline] + fn wyyw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.y, self.w) + } + + #[inline] + fn wyzx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.z, self.x) + } + + #[inline] + fn wyzy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.z, self.y) + } + + #[inline] + fn wyzz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.z, self.z) + } + + #[inline] + fn wyzw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.z, self.w) + } + + #[inline] + fn wywx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.w, self.x) + } + + #[inline] + fn wywy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.w, self.y) + } + + #[inline] + fn wywz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.w, self.z) + } + + #[inline] + fn wyww(self) -> U16Vec4 { + U16Vec4::new(self.w, self.y, self.w, self.w) + } + + #[inline] + fn wzxx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.x, self.x) + } + + #[inline] + fn wzxy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.x, self.y) + } + + #[inline] + fn wzxz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.x, self.z) + } + + #[inline] + fn wzxw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.x, self.w) + } + + #[inline] + fn wzyx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.y, self.x) + } + + #[inline] + fn wzyy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.y, self.y) + } + + #[inline] + fn wzyz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.y, self.z) + } + + #[inline] + fn wzyw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.y, self.w) + } + + #[inline] + fn wzzx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.z, self.x) + } + + #[inline] + fn wzzy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.z, self.y) + } + + #[inline] + fn wzzz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.z, self.z) + } + + #[inline] + fn wzzw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.z, self.w) + } + + #[inline] + fn wzwx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.w, self.x) + } + + #[inline] + fn wzwy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.w, self.y) + } + + #[inline] + fn wzwz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.w, self.z) + } + + #[inline] + fn wzww(self) -> U16Vec4 { + U16Vec4::new(self.w, self.z, self.w, self.w) + } + + #[inline] + fn wwxx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.x, self.x) + } + + #[inline] + fn wwxy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.x, self.y) + } + + #[inline] + fn wwxz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.x, self.z) + } + + #[inline] + fn wwxw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.x, self.w) + } + + #[inline] + fn wwyx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.y, self.x) + } + + #[inline] + fn wwyy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.y, self.y) + } + + #[inline] + fn wwyz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.y, self.z) + } + + #[inline] + fn wwyw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.y, self.w) + } + + #[inline] + fn wwzx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.z, self.x) + } + + #[inline] + fn wwzy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.z, self.y) + } + + #[inline] + fn wwzz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.z, self.z) + } + + #[inline] + fn wwzw(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.z, self.w) + } + + #[inline] + fn wwwx(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.w, self.x) + } + + #[inline] + fn wwwy(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.w, self.y) + } + + #[inline] + fn wwwz(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.w, self.z) + } + + #[inline] + fn wwww(self) -> U16Vec4 { + U16Vec4::new(self.w, self.w, self.w, self.w) + } +} diff --git a/src/u16.rs b/src/u16.rs new file mode 100644 index 00000000..36745f8a --- /dev/null +++ b/src/u16.rs @@ -0,0 +1,50 @@ +mod u16vec2; +mod u16vec3; +mod u16vec4; + +pub use u16vec2::{u16vec2, U16Vec2}; +pub use u16vec3::{u16vec3, U16Vec3}; +pub use u16vec4::{u16vec4, U16Vec4}; + +#[cfg(not(target_arch = "spirv"))] +mod test { + use super::*; + + mod const_test_u16vec2 { + #[cfg(not(feature = "cuda"))] + const_assert_eq!( + core::mem::align_of::(), + core::mem::align_of::() + ); + #[cfg(not(feature = "cuda"))] + const_assert_eq!(4, core::mem::size_of::()); + #[cfg(feature = "cuda")] + + const_assert_eq!(8, core::mem::align_of::()); + #[cfg(feature = "cuda")] + const_assert_eq!(8, core::mem::size_of::()); + } + + mod const_test_u16vec3 { + const_assert_eq!( + core::mem::align_of::(), + core::mem::align_of::() + ); + const_assert_eq!(6, core::mem::size_of::()); + } + + mod const_test_u16vec4 { + #[cfg(not(feature = "cuda"))] + const_assert_eq!( + core::mem::align_of::(), + core::mem::align_of::() + ); + #[cfg(not(feature = "cuda"))] + const_assert_eq!(8, core::mem::size_of::()); + + #[cfg(feature = "cuda")] + const_assert_eq!(16, core::mem::align_of::()); + #[cfg(feature = "cuda")] + const_assert_eq!(16, core::mem::size_of::()); + } +} diff --git a/src/u16/u16vec2.rs b/src/u16/u16vec2.rs new file mode 100644 index 00000000..37289adc --- /dev/null +++ b/src/u16/u16vec2.rs @@ -0,0 +1,1098 @@ +// Generated from vec.rs.tera template. Edit the template, not the generated file. + +use crate::{BVec2, I16Vec2, I64Vec2, IVec2, U16Vec3, U64Vec2, UVec2}; + +#[cfg(not(target_arch = "spirv"))] +use core::fmt; +use core::iter::{Product, Sum}; +use core::{f32, ops::*}; + +/// Creates a 2-dimensional vector. +#[inline(always)] +pub const fn u16vec2(x: u16, y: u16) -> U16Vec2 { + U16Vec2::new(x, y) +} + +/// A 2-dimensional vector. +#[cfg_attr(not(target_arch = "spirv"), derive(Hash))] +#[derive(Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "cuda", repr(align(8)))] +#[cfg_attr(not(target_arch = "spirv"), repr(C))] +#[cfg_attr(target_arch = "spirv", repr(simd))] +pub struct U16Vec2 { + pub x: u16, + pub y: u16, +} + +impl U16Vec2 { + /// All zeroes. + pub const ZERO: Self = Self::splat(0); + + /// All ones. + pub const ONE: Self = Self::splat(1); + + /// All `u16::MIN`. + pub const MIN: Self = Self::splat(u16::MIN); + + /// All `u16::MAX`. + pub const MAX: Self = Self::splat(u16::MAX); + + /// A unit vector pointing along the positive X axis. + pub const X: Self = Self::new(1, 0); + + /// A unit vector pointing along the positive Y axis. + pub const Y: Self = Self::new(0, 1); + + /// The unit axes. + pub const AXES: [Self; 2] = [Self::X, Self::Y]; + + /// Creates a new vector. + #[inline(always)] + pub const fn new(x: u16, y: u16) -> Self { + Self { x, y } + } + + /// Creates a vector with all elements set to `v`. + #[inline] + pub const fn splat(v: u16) -> Self { + Self { x: v, y: v } + } + + /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use + /// for each element of `self`. + /// + /// A true element in the mask uses the corresponding element from `if_true`, and false + /// uses the element from `if_false`. + #[inline] + pub fn select(mask: BVec2, if_true: Self, if_false: Self) -> Self { + Self { + x: if mask.x { if_true.x } else { if_false.x }, + y: if mask.y { if_true.y } else { if_false.y }, + } + } + + /// Creates a new vector from an array. + #[inline] + pub const fn from_array(a: [u16; 2]) -> Self { + Self::new(a[0], a[1]) + } + + /// `[x, y]` + #[inline] + pub const fn to_array(&self) -> [u16; 2] { + [self.x, self.y] + } + + /// Creates a vector from the first 2 values in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 2 elements long. + #[inline] + pub const fn from_slice(slice: &[u16]) -> Self { + Self::new(slice[0], slice[1]) + } + + /// Writes the elements of `self` to the first 2 elements in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 2 elements long. + #[inline] + pub fn write_to_slice(self, slice: &mut [u16]) { + slice[0] = self.x; + slice[1] = self.y; + } + + /// Creates a 3D vector from `self` and the given `z` value. + #[inline] + pub const fn extend(self, z: u16) -> U16Vec3 { + U16Vec3::new(self.x, self.y, z) + } + + /// Computes the dot product of `self` and `rhs`. + #[inline] + pub fn dot(self, rhs: Self) -> u16 { + (self.x * rhs.x) + (self.y * rhs.y) + } + + /// Returns a vector where every component is the dot product of `self` and `rhs`. + #[inline] + pub fn dot_into_vec(self, rhs: Self) -> Self { + Self::splat(self.dot(rhs)) + } + + /// Returns a vector containing the minimum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`. + #[inline] + pub fn min(self, rhs: Self) -> Self { + Self { + x: self.x.min(rhs.x), + y: self.y.min(rhs.y), + } + } + + /// Returns a vector containing the maximum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`. + #[inline] + pub fn max(self, rhs: Self) -> Self { + Self { + x: self.x.max(rhs.x), + y: self.y.max(rhs.y), + } + } + + /// Component-wise clamping of values, similar to [`u16::clamp`]. + /// + /// Each element in `min` must be less-or-equal to the corresponding element in `max`. + /// + /// # Panics + /// + /// Will panic if `min` is greater than `max` when `glam_assert` is enabled. + #[inline] + pub fn clamp(self, min: Self, max: Self) -> Self { + glam_assert!(min.cmple(max).all(), "clamp: expected min <= max"); + self.max(min).min(max) + } + + /// Returns the horizontal minimum of `self`. + /// + /// In other words this computes `min(x, y, ..)`. + #[inline] + pub fn min_element(self) -> u16 { + self.x.min(self.y) + } + + /// Returns the horizontal maximum of `self`. + /// + /// In other words this computes `max(x, y, ..)`. + #[inline] + pub fn max_element(self) -> u16 { + self.x.max(self.y) + } + + /// Returns a vector mask containing the result of a `==` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpeq(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.eq(&rhs.x), self.y.eq(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `!=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpne(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.ne(&rhs.x), self.y.ne(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `>=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpge(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.ge(&rhs.x), self.y.ge(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `>` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpgt(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.gt(&rhs.x), self.y.gt(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `<=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmple(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.le(&rhs.x), self.y.le(&rhs.y)) + } + + /// Returns a vector mask containing the result of a `<` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmplt(self, rhs: Self) -> BVec2 { + BVec2::new(self.x.lt(&rhs.x), self.y.lt(&rhs.y)) + } + + /// Computes the squared length of `self`. + #[doc(alias = "magnitude2")] + #[inline] + pub fn length_squared(self) -> u16 { + self.dot(self) + } + + /// Casts all elements of `self` to `f32`. + #[inline] + pub fn as_vec2(&self) -> crate::Vec2 { + crate::Vec2::new(self.x as f32, self.y as f32) + } + + /// Casts all elements of `self` to `f64`. + #[inline] + pub fn as_dvec2(&self) -> crate::DVec2 { + crate::DVec2::new(self.x as f64, self.y as f64) + } + + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec2(&self) -> crate::I16Vec2 { + crate::I16Vec2::new(self.x as i16, self.y as i16) + } + + /// Casts all elements of `self` to `i32`. + #[inline] + pub fn as_ivec2(&self) -> crate::IVec2 { + crate::IVec2::new(self.x as i32, self.y as i32) + } + + /// Casts all elements of `self` to `u32`. + #[inline] + pub fn as_uvec2(&self) -> crate::UVec2 { + crate::UVec2::new(self.x as u32, self.y as u32) + } + + /// Casts all elements of `self` to `i64`. + #[inline] + pub fn as_i64vec2(&self) -> crate::I64Vec2 { + crate::I64Vec2::new(self.x as i64, self.y as i64) + } + + /// Casts all elements of `self` to `u64`. + #[inline] + pub fn as_u64vec2(&self) -> crate::U64Vec2 { + crate::U64Vec2::new(self.x as u64, self.y as u64) + } + + /// Returns a vector containing the wrapping addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_add(rhs.x), self.y.wrapping_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_add(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_add(rhs.x), + y: self.y.wrapping_add(rhs.y), + } + } + + /// Returns a vector containing the wrapping subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_sub(rhs.x), self.y.wrapping_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_sub(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_sub(rhs.x), + y: self.y.wrapping_sub(rhs.y), + } + } + + /// Returns a vector containing the wrapping multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_mul(rhs.x), self.y.wrapping_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_mul(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_mul(rhs.x), + y: self.y.wrapping_mul(rhs.y), + } + } + + /// Returns a vector containing the wrapping division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_div(rhs.x), self.y.wrapping_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_div(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_div(rhs.x), + y: self.y.wrapping_div(rhs.y), + } + } + + /// Returns a vector containing the saturating addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_add(rhs.x), self.y.saturating_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_add(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_add(rhs.x), + y: self.y.saturating_add(rhs.y), + } + } + + /// Returns a vector containing the saturating subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_sub(rhs.x), self.y.saturating_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_sub(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_sub(rhs.x), + y: self.y.saturating_sub(rhs.y), + } + } + + /// Returns a vector containing the saturating multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_mul(rhs.x), self.y.saturating_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_mul(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_mul(rhs.x), + y: self.y.saturating_mul(rhs.y), + } + } + + /// Returns a vector containing the saturating division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_div(rhs.x), self.y.saturating_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_div(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_div(rhs.x), + y: self.y.saturating_div(rhs.y), + } + } +} + +impl Default for U16Vec2 { + #[inline(always)] + fn default() -> Self { + Self::ZERO + } +} + +impl Div for U16Vec2 { + type Output = Self; + #[inline] + fn div(self, rhs: Self) -> Self { + Self { + x: self.x.div(rhs.x), + y: self.y.div(rhs.y), + } + } +} + +impl DivAssign for U16Vec2 { + #[inline] + fn div_assign(&mut self, rhs: Self) { + self.x.div_assign(rhs.x); + self.y.div_assign(rhs.y); + } +} + +impl Div for U16Vec2 { + type Output = Self; + #[inline] + fn div(self, rhs: u16) -> Self { + Self { + x: self.x.div(rhs), + y: self.y.div(rhs), + } + } +} + +impl DivAssign for U16Vec2 { + #[inline] + fn div_assign(&mut self, rhs: u16) { + self.x.div_assign(rhs); + self.y.div_assign(rhs); + } +} + +impl Div for u16 { + type Output = U16Vec2; + #[inline] + fn div(self, rhs: U16Vec2) -> U16Vec2 { + U16Vec2 { + x: self.div(rhs.x), + y: self.div(rhs.y), + } + } +} + +impl Mul for U16Vec2 { + type Output = Self; + #[inline] + fn mul(self, rhs: Self) -> Self { + Self { + x: self.x.mul(rhs.x), + y: self.y.mul(rhs.y), + } + } +} + +impl MulAssign for U16Vec2 { + #[inline] + fn mul_assign(&mut self, rhs: Self) { + self.x.mul_assign(rhs.x); + self.y.mul_assign(rhs.y); + } +} + +impl Mul for U16Vec2 { + type Output = Self; + #[inline] + fn mul(self, rhs: u16) -> Self { + Self { + x: self.x.mul(rhs), + y: self.y.mul(rhs), + } + } +} + +impl MulAssign for U16Vec2 { + #[inline] + fn mul_assign(&mut self, rhs: u16) { + self.x.mul_assign(rhs); + self.y.mul_assign(rhs); + } +} + +impl Mul for u16 { + type Output = U16Vec2; + #[inline] + fn mul(self, rhs: U16Vec2) -> U16Vec2 { + U16Vec2 { + x: self.mul(rhs.x), + y: self.mul(rhs.y), + } + } +} + +impl Add for U16Vec2 { + type Output = Self; + #[inline] + fn add(self, rhs: Self) -> Self { + Self { + x: self.x.add(rhs.x), + y: self.y.add(rhs.y), + } + } +} + +impl AddAssign for U16Vec2 { + #[inline] + fn add_assign(&mut self, rhs: Self) { + self.x.add_assign(rhs.x); + self.y.add_assign(rhs.y); + } +} + +impl Add for U16Vec2 { + type Output = Self; + #[inline] + fn add(self, rhs: u16) -> Self { + Self { + x: self.x.add(rhs), + y: self.y.add(rhs), + } + } +} + +impl AddAssign for U16Vec2 { + #[inline] + fn add_assign(&mut self, rhs: u16) { + self.x.add_assign(rhs); + self.y.add_assign(rhs); + } +} + +impl Add for u16 { + type Output = U16Vec2; + #[inline] + fn add(self, rhs: U16Vec2) -> U16Vec2 { + U16Vec2 { + x: self.add(rhs.x), + y: self.add(rhs.y), + } + } +} + +impl Sub for U16Vec2 { + type Output = Self; + #[inline] + fn sub(self, rhs: Self) -> Self { + Self { + x: self.x.sub(rhs.x), + y: self.y.sub(rhs.y), + } + } +} + +impl SubAssign for U16Vec2 { + #[inline] + fn sub_assign(&mut self, rhs: U16Vec2) { + self.x.sub_assign(rhs.x); + self.y.sub_assign(rhs.y); + } +} + +impl Sub for U16Vec2 { + type Output = Self; + #[inline] + fn sub(self, rhs: u16) -> Self { + Self { + x: self.x.sub(rhs), + y: self.y.sub(rhs), + } + } +} + +impl SubAssign for U16Vec2 { + #[inline] + fn sub_assign(&mut self, rhs: u16) { + self.x.sub_assign(rhs); + self.y.sub_assign(rhs); + } +} + +impl Sub for u16 { + type Output = U16Vec2; + #[inline] + fn sub(self, rhs: U16Vec2) -> U16Vec2 { + U16Vec2 { + x: self.sub(rhs.x), + y: self.sub(rhs.y), + } + } +} + +impl Rem for U16Vec2 { + type Output = Self; + #[inline] + fn rem(self, rhs: Self) -> Self { + Self { + x: self.x.rem(rhs.x), + y: self.y.rem(rhs.y), + } + } +} + +impl RemAssign for U16Vec2 { + #[inline] + fn rem_assign(&mut self, rhs: Self) { + self.x.rem_assign(rhs.x); + self.y.rem_assign(rhs.y); + } +} + +impl Rem for U16Vec2 { + type Output = Self; + #[inline] + fn rem(self, rhs: u16) -> Self { + Self { + x: self.x.rem(rhs), + y: self.y.rem(rhs), + } + } +} + +impl RemAssign for U16Vec2 { + #[inline] + fn rem_assign(&mut self, rhs: u16) { + self.x.rem_assign(rhs); + self.y.rem_assign(rhs); + } +} + +impl Rem for u16 { + type Output = U16Vec2; + #[inline] + fn rem(self, rhs: U16Vec2) -> U16Vec2 { + U16Vec2 { + x: self.rem(rhs.x), + y: self.rem(rhs.y), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsRef<[u16; 2]> for U16Vec2 { + #[inline] + fn as_ref(&self) -> &[u16; 2] { + unsafe { &*(self as *const U16Vec2 as *const [u16; 2]) } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsMut<[u16; 2]> for U16Vec2 { + #[inline] + fn as_mut(&mut self) -> &mut [u16; 2] { + unsafe { &mut *(self as *mut U16Vec2 as *mut [u16; 2]) } + } +} + +impl Sum for U16Vec2 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, Self::add) + } +} + +impl<'a> Sum<&'a Self> for U16Vec2 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, |a, &b| Self::add(a, b)) + } +} + +impl Product for U16Vec2 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, Self::mul) + } +} + +impl<'a> Product<&'a Self> for U16Vec2 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, |a, &b| Self::mul(a, b)) + } +} + +impl Not for U16Vec2 { + type Output = Self; + #[inline] + fn not(self) -> Self::Output { + Self { + x: self.x.not(), + y: self.y.not(), + } + } +} + +impl BitAnd for U16Vec2 { + type Output = Self; + #[inline] + fn bitand(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitand(rhs.x), + y: self.y.bitand(rhs.y), + } + } +} + +impl BitOr for U16Vec2 { + type Output = Self; + #[inline] + fn bitor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitor(rhs.x), + y: self.y.bitor(rhs.y), + } + } +} + +impl BitXor for U16Vec2 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitxor(rhs.x), + y: self.y.bitxor(rhs.y), + } + } +} + +impl BitAnd for U16Vec2 { + type Output = Self; + #[inline] + fn bitand(self, rhs: u16) -> Self::Output { + Self { + x: self.x.bitand(rhs), + y: self.y.bitand(rhs), + } + } +} + +impl BitOr for U16Vec2 { + type Output = Self; + #[inline] + fn bitor(self, rhs: u16) -> Self::Output { + Self { + x: self.x.bitor(rhs), + y: self.y.bitor(rhs), + } + } +} + +impl BitXor for U16Vec2 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: u16) -> Self::Output { + Self { + x: self.x.bitxor(rhs), + y: self.y.bitxor(rhs), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::IVec2) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::IVec2) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + } + } +} + +impl Shl for U16Vec2 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::UVec2) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + } + } +} + +impl Shr for U16Vec2 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::UVec2) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + } + } +} + +impl Index for U16Vec2 { + type Output = u16; + #[inline] + fn index(&self, index: usize) -> &Self::Output { + match index { + 0 => &self.x, + 1 => &self.y, + _ => panic!("index out of bounds"), + } + } +} + +impl IndexMut for U16Vec2 { + #[inline] + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + match index { + 0 => &mut self.x, + 1 => &mut self.y, + _ => panic!("index out of bounds"), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Display for U16Vec2 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[{}, {}]", self.x, self.y) + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Debug for U16Vec2 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_tuple(stringify!(U16Vec2)) + .field(&self.x) + .field(&self.y) + .finish() + } +} + +impl From<[u16; 2]> for U16Vec2 { + #[inline] + fn from(a: [u16; 2]) -> Self { + Self::new(a[0], a[1]) + } +} + +impl From for [u16; 2] { + #[inline] + fn from(v: U16Vec2) -> Self { + [v.x, v.y] + } +} + +impl From<(u16, u16)> for U16Vec2 { + #[inline] + fn from(t: (u16, u16)) -> Self { + Self::new(t.0, t.1) + } +} + +impl From for (u16, u16) { + #[inline] + fn from(v: U16Vec2) -> Self { + (v.x, v.y) + } +} + +impl TryFrom for U16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I16Vec2) -> Result { + Ok(Self::new(u16::try_from(v.x)?, u16::try_from(v.y)?)) + } +} + +impl TryFrom for U16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: IVec2) -> Result { + Ok(Self::new(u16::try_from(v.x)?, u16::try_from(v.y)?)) + } +} + +impl TryFrom for U16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: UVec2) -> Result { + Ok(Self::new(u16::try_from(v.x)?, u16::try_from(v.y)?)) + } +} + +impl TryFrom for U16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I64Vec2) -> Result { + Ok(Self::new(u16::try_from(v.x)?, u16::try_from(v.y)?)) + } +} + +impl TryFrom for U16Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: U64Vec2) -> Result { + Ok(Self::new(u16::try_from(v.x)?, u16::try_from(v.y)?)) + } +} diff --git a/src/u16/u16vec3.rs b/src/u16/u16vec3.rs new file mode 100644 index 00000000..7623b835 --- /dev/null +++ b/src/u16/u16vec3.rs @@ -0,0 +1,1231 @@ +// Generated from vec.rs.tera template. Edit the template, not the generated file. + +use crate::{BVec3, I16Vec3, I64Vec3, IVec3, U16Vec2, U16Vec4, U64Vec3, UVec3}; + +#[cfg(not(target_arch = "spirv"))] +use core::fmt; +use core::iter::{Product, Sum}; +use core::{f32, ops::*}; + +/// Creates a 3-dimensional vector. +#[inline(always)] +pub const fn u16vec3(x: u16, y: u16, z: u16) -> U16Vec3 { + U16Vec3::new(x, y, z) +} + +/// A 3-dimensional vector. +#[cfg_attr(not(target_arch = "spirv"), derive(Hash))] +#[derive(Clone, Copy, PartialEq, Eq)] +#[cfg_attr(not(target_arch = "spirv"), repr(C))] +#[cfg_attr(target_arch = "spirv", repr(simd))] +pub struct U16Vec3 { + pub x: u16, + pub y: u16, + pub z: u16, +} + +impl U16Vec3 { + /// All zeroes. + pub const ZERO: Self = Self::splat(0); + + /// All ones. + pub const ONE: Self = Self::splat(1); + + /// All `u16::MIN`. + pub const MIN: Self = Self::splat(u16::MIN); + + /// All `u16::MAX`. + pub const MAX: Self = Self::splat(u16::MAX); + + /// A unit vector pointing along the positive X axis. + pub const X: Self = Self::new(1, 0, 0); + + /// A unit vector pointing along the positive Y axis. + pub const Y: Self = Self::new(0, 1, 0); + + /// A unit vector pointing along the positive Z axis. + pub const Z: Self = Self::new(0, 0, 1); + + /// The unit axes. + pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z]; + + /// Creates a new vector. + #[inline(always)] + pub const fn new(x: u16, y: u16, z: u16) -> Self { + Self { x, y, z } + } + + /// Creates a vector with all elements set to `v`. + #[inline] + pub const fn splat(v: u16) -> Self { + Self { x: v, y: v, z: v } + } + + /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use + /// for each element of `self`. + /// + /// A true element in the mask uses the corresponding element from `if_true`, and false + /// uses the element from `if_false`. + #[inline] + pub fn select(mask: BVec3, if_true: Self, if_false: Self) -> Self { + Self { + x: if mask.x { if_true.x } else { if_false.x }, + y: if mask.y { if_true.y } else { if_false.y }, + z: if mask.z { if_true.z } else { if_false.z }, + } + } + + /// Creates a new vector from an array. + #[inline] + pub const fn from_array(a: [u16; 3]) -> Self { + Self::new(a[0], a[1], a[2]) + } + + /// `[x, y, z]` + #[inline] + pub const fn to_array(&self) -> [u16; 3] { + [self.x, self.y, self.z] + } + + /// Creates a vector from the first 3 values in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 3 elements long. + #[inline] + pub const fn from_slice(slice: &[u16]) -> Self { + Self::new(slice[0], slice[1], slice[2]) + } + + /// Writes the elements of `self` to the first 3 elements in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 3 elements long. + #[inline] + pub fn write_to_slice(self, slice: &mut [u16]) { + slice[0] = self.x; + slice[1] = self.y; + slice[2] = self.z; + } + + /// Internal method for creating a 3D vector from a 4D vector, discarding `w`. + #[allow(dead_code)] + #[inline] + pub(crate) fn from_vec4(v: U16Vec4) -> Self { + Self { + x: v.x, + y: v.y, + z: v.z, + } + } + + /// Creates a 4D vector from `self` and the given `w` value. + #[inline] + pub fn extend(self, w: u16) -> U16Vec4 { + U16Vec4::new(self.x, self.y, self.z, w) + } + + /// Creates a 2D vector from the `x` and `y` elements of `self`, discarding `z`. + /// + /// Truncation may also be performed by using [`self.xy()`][crate::swizzles::Vec3Swizzles::xy()]. + #[inline] + pub fn truncate(self) -> U16Vec2 { + use crate::swizzles::Vec3Swizzles; + self.xy() + } + + /// Computes the dot product of `self` and `rhs`. + #[inline] + pub fn dot(self, rhs: Self) -> u16 { + (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z) + } + + /// Returns a vector where every component is the dot product of `self` and `rhs`. + #[inline] + pub fn dot_into_vec(self, rhs: Self) -> Self { + Self::splat(self.dot(rhs)) + } + + /// Computes the cross product of `self` and `rhs`. + #[inline] + pub fn cross(self, rhs: Self) -> Self { + Self { + x: self.y * rhs.z - rhs.y * self.z, + y: self.z * rhs.x - rhs.z * self.x, + z: self.x * rhs.y - rhs.x * self.y, + } + } + + /// Returns a vector containing the minimum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`. + #[inline] + pub fn min(self, rhs: Self) -> Self { + Self { + x: self.x.min(rhs.x), + y: self.y.min(rhs.y), + z: self.z.min(rhs.z), + } + } + + /// Returns a vector containing the maximum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`. + #[inline] + pub fn max(self, rhs: Self) -> Self { + Self { + x: self.x.max(rhs.x), + y: self.y.max(rhs.y), + z: self.z.max(rhs.z), + } + } + + /// Component-wise clamping of values, similar to [`u16::clamp`]. + /// + /// Each element in `min` must be less-or-equal to the corresponding element in `max`. + /// + /// # Panics + /// + /// Will panic if `min` is greater than `max` when `glam_assert` is enabled. + #[inline] + pub fn clamp(self, min: Self, max: Self) -> Self { + glam_assert!(min.cmple(max).all(), "clamp: expected min <= max"); + self.max(min).min(max) + } + + /// Returns the horizontal minimum of `self`. + /// + /// In other words this computes `min(x, y, ..)`. + #[inline] + pub fn min_element(self) -> u16 { + self.x.min(self.y.min(self.z)) + } + + /// Returns the horizontal maximum of `self`. + /// + /// In other words this computes `max(x, y, ..)`. + #[inline] + pub fn max_element(self) -> u16 { + self.x.max(self.y.max(self.z)) + } + + /// Returns a vector mask containing the result of a `==` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpeq(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.eq(&rhs.x), self.y.eq(&rhs.y), self.z.eq(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `!=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpne(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.ne(&rhs.x), self.y.ne(&rhs.y), self.z.ne(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `>=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpge(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.ge(&rhs.x), self.y.ge(&rhs.y), self.z.ge(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `>` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpgt(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.gt(&rhs.x), self.y.gt(&rhs.y), self.z.gt(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `<=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmple(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.le(&rhs.x), self.y.le(&rhs.y), self.z.le(&rhs.z)) + } + + /// Returns a vector mask containing the result of a `<` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmplt(self, rhs: Self) -> BVec3 { + BVec3::new(self.x.lt(&rhs.x), self.y.lt(&rhs.y), self.z.lt(&rhs.z)) + } + + /// Computes the squared length of `self`. + #[doc(alias = "magnitude2")] + #[inline] + pub fn length_squared(self) -> u16 { + self.dot(self) + } + + /// Casts all elements of `self` to `f32`. + #[inline] + pub fn as_vec3(&self) -> crate::Vec3 { + crate::Vec3::new(self.x as f32, self.y as f32, self.z as f32) + } + + /// Casts all elements of `self` to `f32`. + #[inline] + pub fn as_vec3a(&self) -> crate::Vec3A { + crate::Vec3A::new(self.x as f32, self.y as f32, self.z as f32) + } + + /// Casts all elements of `self` to `f64`. + #[inline] + pub fn as_dvec3(&self) -> crate::DVec3 { + crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) + } + + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `i32`. + #[inline] + pub fn as_ivec3(&self) -> crate::IVec3 { + crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32) + } + + /// Casts all elements of `self` to `u32`. + #[inline] + pub fn as_uvec3(&self) -> crate::UVec3 { + crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32) + } + + /// Casts all elements of `self` to `i64`. + #[inline] + pub fn as_i64vec3(&self) -> crate::I64Vec3 { + crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64) + } + + /// Casts all elements of `self` to `u64`. + #[inline] + pub fn as_u64vec3(&self) -> crate::U64Vec3 { + crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64) + } + + /// Returns a vector containing the wrapping addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_add(rhs.x), self.y.wrapping_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_add(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_add(rhs.x), + y: self.y.wrapping_add(rhs.y), + z: self.z.wrapping_add(rhs.z), + } + } + + /// Returns a vector containing the wrapping subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_sub(rhs.x), self.y.wrapping_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_sub(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_sub(rhs.x), + y: self.y.wrapping_sub(rhs.y), + z: self.z.wrapping_sub(rhs.z), + } + } + + /// Returns a vector containing the wrapping multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_mul(rhs.x), self.y.wrapping_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_mul(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_mul(rhs.x), + y: self.y.wrapping_mul(rhs.y), + z: self.z.wrapping_mul(rhs.z), + } + } + + /// Returns a vector containing the wrapping division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_div(rhs.x), self.y.wrapping_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_div(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_div(rhs.x), + y: self.y.wrapping_div(rhs.y), + z: self.z.wrapping_div(rhs.z), + } + } + + /// Returns a vector containing the saturating addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_add(rhs.x), self.y.saturating_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_add(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_add(rhs.x), + y: self.y.saturating_add(rhs.y), + z: self.z.saturating_add(rhs.z), + } + } + + /// Returns a vector containing the saturating subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_sub(rhs.x), self.y.saturating_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_sub(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_sub(rhs.x), + y: self.y.saturating_sub(rhs.y), + z: self.z.saturating_sub(rhs.z), + } + } + + /// Returns a vector containing the saturating multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_mul(rhs.x), self.y.saturating_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_mul(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_mul(rhs.x), + y: self.y.saturating_mul(rhs.y), + z: self.z.saturating_mul(rhs.z), + } + } + + /// Returns a vector containing the saturating division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_div(rhs.x), self.y.saturating_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_div(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_div(rhs.x), + y: self.y.saturating_div(rhs.y), + z: self.z.saturating_div(rhs.z), + } + } +} + +impl Default for U16Vec3 { + #[inline(always)] + fn default() -> Self { + Self::ZERO + } +} + +impl Div for U16Vec3 { + type Output = Self; + #[inline] + fn div(self, rhs: Self) -> Self { + Self { + x: self.x.div(rhs.x), + y: self.y.div(rhs.y), + z: self.z.div(rhs.z), + } + } +} + +impl DivAssign for U16Vec3 { + #[inline] + fn div_assign(&mut self, rhs: Self) { + self.x.div_assign(rhs.x); + self.y.div_assign(rhs.y); + self.z.div_assign(rhs.z); + } +} + +impl Div for U16Vec3 { + type Output = Self; + #[inline] + fn div(self, rhs: u16) -> Self { + Self { + x: self.x.div(rhs), + y: self.y.div(rhs), + z: self.z.div(rhs), + } + } +} + +impl DivAssign for U16Vec3 { + #[inline] + fn div_assign(&mut self, rhs: u16) { + self.x.div_assign(rhs); + self.y.div_assign(rhs); + self.z.div_assign(rhs); + } +} + +impl Div for u16 { + type Output = U16Vec3; + #[inline] + fn div(self, rhs: U16Vec3) -> U16Vec3 { + U16Vec3 { + x: self.div(rhs.x), + y: self.div(rhs.y), + z: self.div(rhs.z), + } + } +} + +impl Mul for U16Vec3 { + type Output = Self; + #[inline] + fn mul(self, rhs: Self) -> Self { + Self { + x: self.x.mul(rhs.x), + y: self.y.mul(rhs.y), + z: self.z.mul(rhs.z), + } + } +} + +impl MulAssign for U16Vec3 { + #[inline] + fn mul_assign(&mut self, rhs: Self) { + self.x.mul_assign(rhs.x); + self.y.mul_assign(rhs.y); + self.z.mul_assign(rhs.z); + } +} + +impl Mul for U16Vec3 { + type Output = Self; + #[inline] + fn mul(self, rhs: u16) -> Self { + Self { + x: self.x.mul(rhs), + y: self.y.mul(rhs), + z: self.z.mul(rhs), + } + } +} + +impl MulAssign for U16Vec3 { + #[inline] + fn mul_assign(&mut self, rhs: u16) { + self.x.mul_assign(rhs); + self.y.mul_assign(rhs); + self.z.mul_assign(rhs); + } +} + +impl Mul for u16 { + type Output = U16Vec3; + #[inline] + fn mul(self, rhs: U16Vec3) -> U16Vec3 { + U16Vec3 { + x: self.mul(rhs.x), + y: self.mul(rhs.y), + z: self.mul(rhs.z), + } + } +} + +impl Add for U16Vec3 { + type Output = Self; + #[inline] + fn add(self, rhs: Self) -> Self { + Self { + x: self.x.add(rhs.x), + y: self.y.add(rhs.y), + z: self.z.add(rhs.z), + } + } +} + +impl AddAssign for U16Vec3 { + #[inline] + fn add_assign(&mut self, rhs: Self) { + self.x.add_assign(rhs.x); + self.y.add_assign(rhs.y); + self.z.add_assign(rhs.z); + } +} + +impl Add for U16Vec3 { + type Output = Self; + #[inline] + fn add(self, rhs: u16) -> Self { + Self { + x: self.x.add(rhs), + y: self.y.add(rhs), + z: self.z.add(rhs), + } + } +} + +impl AddAssign for U16Vec3 { + #[inline] + fn add_assign(&mut self, rhs: u16) { + self.x.add_assign(rhs); + self.y.add_assign(rhs); + self.z.add_assign(rhs); + } +} + +impl Add for u16 { + type Output = U16Vec3; + #[inline] + fn add(self, rhs: U16Vec3) -> U16Vec3 { + U16Vec3 { + x: self.add(rhs.x), + y: self.add(rhs.y), + z: self.add(rhs.z), + } + } +} + +impl Sub for U16Vec3 { + type Output = Self; + #[inline] + fn sub(self, rhs: Self) -> Self { + Self { + x: self.x.sub(rhs.x), + y: self.y.sub(rhs.y), + z: self.z.sub(rhs.z), + } + } +} + +impl SubAssign for U16Vec3 { + #[inline] + fn sub_assign(&mut self, rhs: U16Vec3) { + self.x.sub_assign(rhs.x); + self.y.sub_assign(rhs.y); + self.z.sub_assign(rhs.z); + } +} + +impl Sub for U16Vec3 { + type Output = Self; + #[inline] + fn sub(self, rhs: u16) -> Self { + Self { + x: self.x.sub(rhs), + y: self.y.sub(rhs), + z: self.z.sub(rhs), + } + } +} + +impl SubAssign for U16Vec3 { + #[inline] + fn sub_assign(&mut self, rhs: u16) { + self.x.sub_assign(rhs); + self.y.sub_assign(rhs); + self.z.sub_assign(rhs); + } +} + +impl Sub for u16 { + type Output = U16Vec3; + #[inline] + fn sub(self, rhs: U16Vec3) -> U16Vec3 { + U16Vec3 { + x: self.sub(rhs.x), + y: self.sub(rhs.y), + z: self.sub(rhs.z), + } + } +} + +impl Rem for U16Vec3 { + type Output = Self; + #[inline] + fn rem(self, rhs: Self) -> Self { + Self { + x: self.x.rem(rhs.x), + y: self.y.rem(rhs.y), + z: self.z.rem(rhs.z), + } + } +} + +impl RemAssign for U16Vec3 { + #[inline] + fn rem_assign(&mut self, rhs: Self) { + self.x.rem_assign(rhs.x); + self.y.rem_assign(rhs.y); + self.z.rem_assign(rhs.z); + } +} + +impl Rem for U16Vec3 { + type Output = Self; + #[inline] + fn rem(self, rhs: u16) -> Self { + Self { + x: self.x.rem(rhs), + y: self.y.rem(rhs), + z: self.z.rem(rhs), + } + } +} + +impl RemAssign for U16Vec3 { + #[inline] + fn rem_assign(&mut self, rhs: u16) { + self.x.rem_assign(rhs); + self.y.rem_assign(rhs); + self.z.rem_assign(rhs); + } +} + +impl Rem for u16 { + type Output = U16Vec3; + #[inline] + fn rem(self, rhs: U16Vec3) -> U16Vec3 { + U16Vec3 { + x: self.rem(rhs.x), + y: self.rem(rhs.y), + z: self.rem(rhs.z), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsRef<[u16; 3]> for U16Vec3 { + #[inline] + fn as_ref(&self) -> &[u16; 3] { + unsafe { &*(self as *const U16Vec3 as *const [u16; 3]) } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsMut<[u16; 3]> for U16Vec3 { + #[inline] + fn as_mut(&mut self) -> &mut [u16; 3] { + unsafe { &mut *(self as *mut U16Vec3 as *mut [u16; 3]) } + } +} + +impl Sum for U16Vec3 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, Self::add) + } +} + +impl<'a> Sum<&'a Self> for U16Vec3 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, |a, &b| Self::add(a, b)) + } +} + +impl Product for U16Vec3 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, Self::mul) + } +} + +impl<'a> Product<&'a Self> for U16Vec3 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, |a, &b| Self::mul(a, b)) + } +} + +impl Not for U16Vec3 { + type Output = Self; + #[inline] + fn not(self) -> Self::Output { + Self { + x: self.x.not(), + y: self.y.not(), + z: self.z.not(), + } + } +} + +impl BitAnd for U16Vec3 { + type Output = Self; + #[inline] + fn bitand(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitand(rhs.x), + y: self.y.bitand(rhs.y), + z: self.z.bitand(rhs.z), + } + } +} + +impl BitOr for U16Vec3 { + type Output = Self; + #[inline] + fn bitor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitor(rhs.x), + y: self.y.bitor(rhs.y), + z: self.z.bitor(rhs.z), + } + } +} + +impl BitXor for U16Vec3 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitxor(rhs.x), + y: self.y.bitxor(rhs.y), + z: self.z.bitxor(rhs.z), + } + } +} + +impl BitAnd for U16Vec3 { + type Output = Self; + #[inline] + fn bitand(self, rhs: u16) -> Self::Output { + Self { + x: self.x.bitand(rhs), + y: self.y.bitand(rhs), + z: self.z.bitand(rhs), + } + } +} + +impl BitOr for U16Vec3 { + type Output = Self; + #[inline] + fn bitor(self, rhs: u16) -> Self::Output { + Self { + x: self.x.bitor(rhs), + y: self.y.bitor(rhs), + z: self.z.bitor(rhs), + } + } +} + +impl BitXor for U16Vec3 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: u16) -> Self::Output { + Self { + x: self.x.bitxor(rhs), + y: self.y.bitxor(rhs), + z: self.z.bitxor(rhs), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::IVec3) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + z: self.z.shl(rhs.z), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::IVec3) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + z: self.z.shr(rhs.z), + } + } +} + +impl Shl for U16Vec3 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::UVec3) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + z: self.z.shl(rhs.z), + } + } +} + +impl Shr for U16Vec3 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::UVec3) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + z: self.z.shr(rhs.z), + } + } +} + +impl Index for U16Vec3 { + type Output = u16; + #[inline] + fn index(&self, index: usize) -> &Self::Output { + match index { + 0 => &self.x, + 1 => &self.y, + 2 => &self.z, + _ => panic!("index out of bounds"), + } + } +} + +impl IndexMut for U16Vec3 { + #[inline] + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + match index { + 0 => &mut self.x, + 1 => &mut self.y, + 2 => &mut self.z, + _ => panic!("index out of bounds"), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Display for U16Vec3 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[{}, {}, {}]", self.x, self.y, self.z) + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Debug for U16Vec3 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_tuple(stringify!(U16Vec3)) + .field(&self.x) + .field(&self.y) + .field(&self.z) + .finish() + } +} + +impl From<[u16; 3]> for U16Vec3 { + #[inline] + fn from(a: [u16; 3]) -> Self { + Self::new(a[0], a[1], a[2]) + } +} + +impl From for [u16; 3] { + #[inline] + fn from(v: U16Vec3) -> Self { + [v.x, v.y, v.z] + } +} + +impl From<(u16, u16, u16)> for U16Vec3 { + #[inline] + fn from(t: (u16, u16, u16)) -> Self { + Self::new(t.0, t.1, t.2) + } +} + +impl From for (u16, u16, u16) { + #[inline] + fn from(v: U16Vec3) -> Self { + (v.x, v.y, v.z) + } +} + +impl From<(U16Vec2, u16)> for U16Vec3 { + #[inline] + fn from((v, z): (U16Vec2, u16)) -> Self { + Self::new(v.x, v.y, z) + } +} + +impl TryFrom for U16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I16Vec3) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + )) + } +} + +impl TryFrom for U16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: IVec3) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + )) + } +} + +impl TryFrom for U16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: UVec3) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + )) + } +} + +impl TryFrom for U16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I64Vec3) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + )) + } +} + +impl TryFrom for U16Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: U64Vec3) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + )) + } +} diff --git a/src/u16/u16vec4.rs b/src/u16/u16vec4.rs new file mode 100644 index 00000000..f9c200c9 --- /dev/null +++ b/src/u16/u16vec4.rs @@ -0,0 +1,1334 @@ +// Generated from vec.rs.tera template. Edit the template, not the generated file. + +use crate::{BVec4, I16Vec4, I64Vec4, IVec4, U16Vec2, U16Vec3, U64Vec4, UVec4}; + +#[cfg(not(target_arch = "spirv"))] +use core::fmt; +use core::iter::{Product, Sum}; +use core::{f32, ops::*}; + +/// Creates a 4-dimensional vector. +#[inline(always)] +pub const fn u16vec4(x: u16, y: u16, z: u16, w: u16) -> U16Vec4 { + U16Vec4::new(x, y, z, w) +} + +/// A 4-dimensional vector. +#[cfg_attr(not(target_arch = "spirv"), derive(Hash))] +#[derive(Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "cuda", repr(align(16)))] +#[cfg_attr(not(target_arch = "spirv"), repr(C))] +#[cfg_attr(target_arch = "spirv", repr(simd))] +pub struct U16Vec4 { + pub x: u16, + pub y: u16, + pub z: u16, + pub w: u16, +} + +impl U16Vec4 { + /// All zeroes. + pub const ZERO: Self = Self::splat(0); + + /// All ones. + pub const ONE: Self = Self::splat(1); + + /// All `u16::MIN`. + pub const MIN: Self = Self::splat(u16::MIN); + + /// All `u16::MAX`. + pub const MAX: Self = Self::splat(u16::MAX); + + /// A unit vector pointing along the positive X axis. + pub const X: Self = Self::new(1, 0, 0, 0); + + /// A unit vector pointing along the positive Y axis. + pub const Y: Self = Self::new(0, 1, 0, 0); + + /// A unit vector pointing along the positive Z axis. + pub const Z: Self = Self::new(0, 0, 1, 0); + + /// A unit vector pointing along the positive W axis. + pub const W: Self = Self::new(0, 0, 0, 1); + + /// The unit axes. + pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W]; + + /// Creates a new vector. + #[inline(always)] + pub const fn new(x: u16, y: u16, z: u16, w: u16) -> Self { + Self { x, y, z, w } + } + + /// Creates a vector with all elements set to `v`. + #[inline] + pub const fn splat(v: u16) -> Self { + Self { + x: v, + + y: v, + + z: v, + + w: v, + } + } + + /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use + /// for each element of `self`. + /// + /// A true element in the mask uses the corresponding element from `if_true`, and false + /// uses the element from `if_false`. + #[inline] + pub fn select(mask: BVec4, if_true: Self, if_false: Self) -> Self { + Self { + x: if mask.x { if_true.x } else { if_false.x }, + y: if mask.y { if_true.y } else { if_false.y }, + z: if mask.z { if_true.z } else { if_false.z }, + w: if mask.w { if_true.w } else { if_false.w }, + } + } + + /// Creates a new vector from an array. + #[inline] + pub const fn from_array(a: [u16; 4]) -> Self { + Self::new(a[0], a[1], a[2], a[3]) + } + + /// `[x, y, z, w]` + #[inline] + pub const fn to_array(&self) -> [u16; 4] { + [self.x, self.y, self.z, self.w] + } + + /// Creates a vector from the first 4 values in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 4 elements long. + #[inline] + pub const fn from_slice(slice: &[u16]) -> Self { + Self::new(slice[0], slice[1], slice[2], slice[3]) + } + + /// Writes the elements of `self` to the first 4 elements in `slice`. + /// + /// # Panics + /// + /// Panics if `slice` is less than 4 elements long. + #[inline] + pub fn write_to_slice(self, slice: &mut [u16]) { + slice[0] = self.x; + slice[1] = self.y; + slice[2] = self.z; + slice[3] = self.w; + } + + /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`. + /// + /// Truncation to [`U16Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()]. + #[inline] + pub fn truncate(self) -> U16Vec3 { + use crate::swizzles::Vec4Swizzles; + self.xyz() + } + + /// Computes the dot product of `self` and `rhs`. + #[inline] + pub fn dot(self, rhs: Self) -> u16 { + (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z) + (self.w * rhs.w) + } + + /// Returns a vector where every component is the dot product of `self` and `rhs`. + #[inline] + pub fn dot_into_vec(self, rhs: Self) -> Self { + Self::splat(self.dot(rhs)) + } + + /// Returns a vector containing the minimum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`. + #[inline] + pub fn min(self, rhs: Self) -> Self { + Self { + x: self.x.min(rhs.x), + y: self.y.min(rhs.y), + z: self.z.min(rhs.z), + w: self.w.min(rhs.w), + } + } + + /// Returns a vector containing the maximum values for each element of `self` and `rhs`. + /// + /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`. + #[inline] + pub fn max(self, rhs: Self) -> Self { + Self { + x: self.x.max(rhs.x), + y: self.y.max(rhs.y), + z: self.z.max(rhs.z), + w: self.w.max(rhs.w), + } + } + + /// Component-wise clamping of values, similar to [`u16::clamp`]. + /// + /// Each element in `min` must be less-or-equal to the corresponding element in `max`. + /// + /// # Panics + /// + /// Will panic if `min` is greater than `max` when `glam_assert` is enabled. + #[inline] + pub fn clamp(self, min: Self, max: Self) -> Self { + glam_assert!(min.cmple(max).all(), "clamp: expected min <= max"); + self.max(min).min(max) + } + + /// Returns the horizontal minimum of `self`. + /// + /// In other words this computes `min(x, y, ..)`. + #[inline] + pub fn min_element(self) -> u16 { + self.x.min(self.y.min(self.z.min(self.w))) + } + + /// Returns the horizontal maximum of `self`. + /// + /// In other words this computes `max(x, y, ..)`. + #[inline] + pub fn max_element(self) -> u16 { + self.x.max(self.y.max(self.z.max(self.w))) + } + + /// Returns a vector mask containing the result of a `==` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpeq(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.eq(&rhs.x), + self.y.eq(&rhs.y), + self.z.eq(&rhs.z), + self.w.eq(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `!=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpne(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.ne(&rhs.x), + self.y.ne(&rhs.y), + self.z.ne(&rhs.z), + self.w.ne(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `>=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpge(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.ge(&rhs.x), + self.y.ge(&rhs.y), + self.z.ge(&rhs.z), + self.w.ge(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `>` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmpgt(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.gt(&rhs.x), + self.y.gt(&rhs.y), + self.z.gt(&rhs.z), + self.w.gt(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `<=` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmple(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.le(&rhs.x), + self.y.le(&rhs.y), + self.z.le(&rhs.z), + self.w.le(&rhs.w), + ) + } + + /// Returns a vector mask containing the result of a `<` comparison for each element of + /// `self` and `rhs`. + /// + /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all + /// elements. + #[inline] + pub fn cmplt(self, rhs: Self) -> BVec4 { + BVec4::new( + self.x.lt(&rhs.x), + self.y.lt(&rhs.y), + self.z.lt(&rhs.z), + self.w.lt(&rhs.w), + ) + } + + /// Computes the squared length of `self`. + #[doc(alias = "magnitude2")] + #[inline] + pub fn length_squared(self) -> u16 { + self.dot(self) + } + + /// Casts all elements of `self` to `f32`. + #[inline] + pub fn as_vec4(&self) -> crate::Vec4 { + crate::Vec4::new(self.x as f32, self.y as f32, self.z as f32, self.w as f32) + } + + /// Casts all elements of `self` to `f64`. + #[inline] + pub fn as_dvec4(&self) -> crate::DVec4 { + crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) + } + + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `i32`. + #[inline] + pub fn as_ivec4(&self) -> crate::IVec4 { + crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32) + } + + /// Casts all elements of `self` to `u32`. + #[inline] + pub fn as_uvec4(&self) -> crate::UVec4 { + crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32) + } + + /// Casts all elements of `self` to `i64`. + #[inline] + pub fn as_i64vec4(&self) -> crate::I64Vec4 { + crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64) + } + + /// Casts all elements of `self` to `u64`. + #[inline] + pub fn as_u64vec4(&self) -> crate::U64Vec4 { + crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64) + } + + /// Returns a vector containing the wrapping addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_add(rhs.x), self.y.wrapping_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_add(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_add(rhs.x), + y: self.y.wrapping_add(rhs.y), + z: self.z.wrapping_add(rhs.z), + w: self.w.wrapping_add(rhs.w), + } + } + + /// Returns a vector containing the wrapping subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_sub(rhs.x), self.y.wrapping_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_sub(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_sub(rhs.x), + y: self.y.wrapping_sub(rhs.y), + z: self.z.wrapping_sub(rhs.z), + w: self.w.wrapping_sub(rhs.w), + } + } + + /// Returns a vector containing the wrapping multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_mul(rhs.x), self.y.wrapping_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_mul(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_mul(rhs.x), + y: self.y.wrapping_mul(rhs.y), + z: self.z.wrapping_mul(rhs.z), + w: self.w.wrapping_mul(rhs.w), + } + } + + /// Returns a vector containing the wrapping division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.wrapping_div(rhs.x), self.y.wrapping_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn wrapping_div(self, rhs: Self) -> Self { + Self { + x: self.x.wrapping_div(rhs.x), + y: self.y.wrapping_div(rhs.y), + z: self.z.wrapping_div(rhs.z), + w: self.w.wrapping_div(rhs.w), + } + } + + /// Returns a vector containing the saturating addition of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_add(rhs.x), self.y.saturating_add(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_add(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_add(rhs.x), + y: self.y.saturating_add(rhs.y), + z: self.z.saturating_add(rhs.z), + w: self.w.saturating_add(rhs.w), + } + } + + /// Returns a vector containing the saturating subtraction of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_sub(rhs.x), self.y.saturating_sub(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_sub(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_sub(rhs.x), + y: self.y.saturating_sub(rhs.y), + z: self.z.saturating_sub(rhs.z), + w: self.w.saturating_sub(rhs.w), + } + } + + /// Returns a vector containing the saturating multiplication of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_mul(rhs.x), self.y.saturating_mul(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_mul(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_mul(rhs.x), + y: self.y.saturating_mul(rhs.y), + z: self.z.saturating_mul(rhs.z), + w: self.w.saturating_mul(rhs.w), + } + } + + /// Returns a vector containing the saturating division of `self` and `rhs`. + /// + /// In other words this computes `[self.x.saturating_div(rhs.x), self.y.saturating_div(rhs.y), ..]`. + #[inline] + #[must_use] + pub const fn saturating_div(self, rhs: Self) -> Self { + Self { + x: self.x.saturating_div(rhs.x), + y: self.y.saturating_div(rhs.y), + z: self.z.saturating_div(rhs.z), + w: self.w.saturating_div(rhs.w), + } + } +} + +impl Default for U16Vec4 { + #[inline(always)] + fn default() -> Self { + Self::ZERO + } +} + +impl Div for U16Vec4 { + type Output = Self; + #[inline] + fn div(self, rhs: Self) -> Self { + Self { + x: self.x.div(rhs.x), + y: self.y.div(rhs.y), + z: self.z.div(rhs.z), + w: self.w.div(rhs.w), + } + } +} + +impl DivAssign for U16Vec4 { + #[inline] + fn div_assign(&mut self, rhs: Self) { + self.x.div_assign(rhs.x); + self.y.div_assign(rhs.y); + self.z.div_assign(rhs.z); + self.w.div_assign(rhs.w); + } +} + +impl Div for U16Vec4 { + type Output = Self; + #[inline] + fn div(self, rhs: u16) -> Self { + Self { + x: self.x.div(rhs), + y: self.y.div(rhs), + z: self.z.div(rhs), + w: self.w.div(rhs), + } + } +} + +impl DivAssign for U16Vec4 { + #[inline] + fn div_assign(&mut self, rhs: u16) { + self.x.div_assign(rhs); + self.y.div_assign(rhs); + self.z.div_assign(rhs); + self.w.div_assign(rhs); + } +} + +impl Div for u16 { + type Output = U16Vec4; + #[inline] + fn div(self, rhs: U16Vec4) -> U16Vec4 { + U16Vec4 { + x: self.div(rhs.x), + y: self.div(rhs.y), + z: self.div(rhs.z), + w: self.div(rhs.w), + } + } +} + +impl Mul for U16Vec4 { + type Output = Self; + #[inline] + fn mul(self, rhs: Self) -> Self { + Self { + x: self.x.mul(rhs.x), + y: self.y.mul(rhs.y), + z: self.z.mul(rhs.z), + w: self.w.mul(rhs.w), + } + } +} + +impl MulAssign for U16Vec4 { + #[inline] + fn mul_assign(&mut self, rhs: Self) { + self.x.mul_assign(rhs.x); + self.y.mul_assign(rhs.y); + self.z.mul_assign(rhs.z); + self.w.mul_assign(rhs.w); + } +} + +impl Mul for U16Vec4 { + type Output = Self; + #[inline] + fn mul(self, rhs: u16) -> Self { + Self { + x: self.x.mul(rhs), + y: self.y.mul(rhs), + z: self.z.mul(rhs), + w: self.w.mul(rhs), + } + } +} + +impl MulAssign for U16Vec4 { + #[inline] + fn mul_assign(&mut self, rhs: u16) { + self.x.mul_assign(rhs); + self.y.mul_assign(rhs); + self.z.mul_assign(rhs); + self.w.mul_assign(rhs); + } +} + +impl Mul for u16 { + type Output = U16Vec4; + #[inline] + fn mul(self, rhs: U16Vec4) -> U16Vec4 { + U16Vec4 { + x: self.mul(rhs.x), + y: self.mul(rhs.y), + z: self.mul(rhs.z), + w: self.mul(rhs.w), + } + } +} + +impl Add for U16Vec4 { + type Output = Self; + #[inline] + fn add(self, rhs: Self) -> Self { + Self { + x: self.x.add(rhs.x), + y: self.y.add(rhs.y), + z: self.z.add(rhs.z), + w: self.w.add(rhs.w), + } + } +} + +impl AddAssign for U16Vec4 { + #[inline] + fn add_assign(&mut self, rhs: Self) { + self.x.add_assign(rhs.x); + self.y.add_assign(rhs.y); + self.z.add_assign(rhs.z); + self.w.add_assign(rhs.w); + } +} + +impl Add for U16Vec4 { + type Output = Self; + #[inline] + fn add(self, rhs: u16) -> Self { + Self { + x: self.x.add(rhs), + y: self.y.add(rhs), + z: self.z.add(rhs), + w: self.w.add(rhs), + } + } +} + +impl AddAssign for U16Vec4 { + #[inline] + fn add_assign(&mut self, rhs: u16) { + self.x.add_assign(rhs); + self.y.add_assign(rhs); + self.z.add_assign(rhs); + self.w.add_assign(rhs); + } +} + +impl Add for u16 { + type Output = U16Vec4; + #[inline] + fn add(self, rhs: U16Vec4) -> U16Vec4 { + U16Vec4 { + x: self.add(rhs.x), + y: self.add(rhs.y), + z: self.add(rhs.z), + w: self.add(rhs.w), + } + } +} + +impl Sub for U16Vec4 { + type Output = Self; + #[inline] + fn sub(self, rhs: Self) -> Self { + Self { + x: self.x.sub(rhs.x), + y: self.y.sub(rhs.y), + z: self.z.sub(rhs.z), + w: self.w.sub(rhs.w), + } + } +} + +impl SubAssign for U16Vec4 { + #[inline] + fn sub_assign(&mut self, rhs: U16Vec4) { + self.x.sub_assign(rhs.x); + self.y.sub_assign(rhs.y); + self.z.sub_assign(rhs.z); + self.w.sub_assign(rhs.w); + } +} + +impl Sub for U16Vec4 { + type Output = Self; + #[inline] + fn sub(self, rhs: u16) -> Self { + Self { + x: self.x.sub(rhs), + y: self.y.sub(rhs), + z: self.z.sub(rhs), + w: self.w.sub(rhs), + } + } +} + +impl SubAssign for U16Vec4 { + #[inline] + fn sub_assign(&mut self, rhs: u16) { + self.x.sub_assign(rhs); + self.y.sub_assign(rhs); + self.z.sub_assign(rhs); + self.w.sub_assign(rhs); + } +} + +impl Sub for u16 { + type Output = U16Vec4; + #[inline] + fn sub(self, rhs: U16Vec4) -> U16Vec4 { + U16Vec4 { + x: self.sub(rhs.x), + y: self.sub(rhs.y), + z: self.sub(rhs.z), + w: self.sub(rhs.w), + } + } +} + +impl Rem for U16Vec4 { + type Output = Self; + #[inline] + fn rem(self, rhs: Self) -> Self { + Self { + x: self.x.rem(rhs.x), + y: self.y.rem(rhs.y), + z: self.z.rem(rhs.z), + w: self.w.rem(rhs.w), + } + } +} + +impl RemAssign for U16Vec4 { + #[inline] + fn rem_assign(&mut self, rhs: Self) { + self.x.rem_assign(rhs.x); + self.y.rem_assign(rhs.y); + self.z.rem_assign(rhs.z); + self.w.rem_assign(rhs.w); + } +} + +impl Rem for U16Vec4 { + type Output = Self; + #[inline] + fn rem(self, rhs: u16) -> Self { + Self { + x: self.x.rem(rhs), + y: self.y.rem(rhs), + z: self.z.rem(rhs), + w: self.w.rem(rhs), + } + } +} + +impl RemAssign for U16Vec4 { + #[inline] + fn rem_assign(&mut self, rhs: u16) { + self.x.rem_assign(rhs); + self.y.rem_assign(rhs); + self.z.rem_assign(rhs); + self.w.rem_assign(rhs); + } +} + +impl Rem for u16 { + type Output = U16Vec4; + #[inline] + fn rem(self, rhs: U16Vec4) -> U16Vec4 { + U16Vec4 { + x: self.rem(rhs.x), + y: self.rem(rhs.y), + z: self.rem(rhs.z), + w: self.rem(rhs.w), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsRef<[u16; 4]> for U16Vec4 { + #[inline] + fn as_ref(&self) -> &[u16; 4] { + unsafe { &*(self as *const U16Vec4 as *const [u16; 4]) } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl AsMut<[u16; 4]> for U16Vec4 { + #[inline] + fn as_mut(&mut self) -> &mut [u16; 4] { + unsafe { &mut *(self as *mut U16Vec4 as *mut [u16; 4]) } + } +} + +impl Sum for U16Vec4 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, Self::add) + } +} + +impl<'a> Sum<&'a Self> for U16Vec4 { + #[inline] + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ZERO, |a, &b| Self::add(a, b)) + } +} + +impl Product for U16Vec4 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, Self::mul) + } +} + +impl<'a> Product<&'a Self> for U16Vec4 { + #[inline] + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::ONE, |a, &b| Self::mul(a, b)) + } +} + +impl Not for U16Vec4 { + type Output = Self; + #[inline] + fn not(self) -> Self::Output { + Self { + x: self.x.not(), + y: self.y.not(), + z: self.z.not(), + w: self.w.not(), + } + } +} + +impl BitAnd for U16Vec4 { + type Output = Self; + #[inline] + fn bitand(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitand(rhs.x), + y: self.y.bitand(rhs.y), + z: self.z.bitand(rhs.z), + w: self.w.bitand(rhs.w), + } + } +} + +impl BitOr for U16Vec4 { + type Output = Self; + #[inline] + fn bitor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitor(rhs.x), + y: self.y.bitor(rhs.y), + z: self.z.bitor(rhs.z), + w: self.w.bitor(rhs.w), + } + } +} + +impl BitXor for U16Vec4 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: Self) -> Self::Output { + Self { + x: self.x.bitxor(rhs.x), + y: self.y.bitxor(rhs.y), + z: self.z.bitxor(rhs.z), + w: self.w.bitxor(rhs.w), + } + } +} + +impl BitAnd for U16Vec4 { + type Output = Self; + #[inline] + fn bitand(self, rhs: u16) -> Self::Output { + Self { + x: self.x.bitand(rhs), + y: self.y.bitand(rhs), + z: self.z.bitand(rhs), + w: self.w.bitand(rhs), + } + } +} + +impl BitOr for U16Vec4 { + type Output = Self; + #[inline] + fn bitor(self, rhs: u16) -> Self::Output { + Self { + x: self.x.bitor(rhs), + y: self.y.bitor(rhs), + z: self.z.bitor(rhs), + w: self.w.bitor(rhs), + } + } +} + +impl BitXor for U16Vec4 { + type Output = Self; + #[inline] + fn bitxor(self, rhs: u16) -> Self::Output { + Self { + x: self.x.bitxor(rhs), + y: self.y.bitxor(rhs), + z: self.z.bitxor(rhs), + w: self.w.bitxor(rhs), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: i8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: i16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: i32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: i64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: u8) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: u16) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: u32) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shl(rhs), + y: self.y.shl(rhs), + z: self.z.shl(rhs), + w: self.w.shl(rhs), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: u64) -> Self::Output { + Self { + x: self.x.shr(rhs), + y: self.y.shr(rhs), + z: self.z.shr(rhs), + w: self.w.shr(rhs), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::IVec4) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + z: self.z.shl(rhs.z), + w: self.w.shl(rhs.w), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::IVec4) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + z: self.z.shr(rhs.z), + w: self.w.shr(rhs.w), + } + } +} + +impl Shl for U16Vec4 { + type Output = Self; + #[inline] + fn shl(self, rhs: crate::UVec4) -> Self::Output { + Self { + x: self.x.shl(rhs.x), + y: self.y.shl(rhs.y), + z: self.z.shl(rhs.z), + w: self.w.shl(rhs.w), + } + } +} + +impl Shr for U16Vec4 { + type Output = Self; + #[inline] + fn shr(self, rhs: crate::UVec4) -> Self::Output { + Self { + x: self.x.shr(rhs.x), + y: self.y.shr(rhs.y), + z: self.z.shr(rhs.z), + w: self.w.shr(rhs.w), + } + } +} + +impl Index for U16Vec4 { + type Output = u16; + #[inline] + fn index(&self, index: usize) -> &Self::Output { + match index { + 0 => &self.x, + 1 => &self.y, + 2 => &self.z, + 3 => &self.w, + _ => panic!("index out of bounds"), + } + } +} + +impl IndexMut for U16Vec4 { + #[inline] + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + match index { + 0 => &mut self.x, + 1 => &mut self.y, + 2 => &mut self.z, + 3 => &mut self.w, + _ => panic!("index out of bounds"), + } + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Display for U16Vec4 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w) + } +} + +#[cfg(not(target_arch = "spirv"))] +impl fmt::Debug for U16Vec4 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_tuple(stringify!(U16Vec4)) + .field(&self.x) + .field(&self.y) + .field(&self.z) + .field(&self.w) + .finish() + } +} + +impl From<[u16; 4]> for U16Vec4 { + #[inline] + fn from(a: [u16; 4]) -> Self { + Self::new(a[0], a[1], a[2], a[3]) + } +} + +impl From for [u16; 4] { + #[inline] + fn from(v: U16Vec4) -> Self { + [v.x, v.y, v.z, v.w] + } +} + +impl From<(u16, u16, u16, u16)> for U16Vec4 { + #[inline] + fn from(t: (u16, u16, u16, u16)) -> Self { + Self::new(t.0, t.1, t.2, t.3) + } +} + +impl From for (u16, u16, u16, u16) { + #[inline] + fn from(v: U16Vec4) -> Self { + (v.x, v.y, v.z, v.w) + } +} + +impl From<(U16Vec3, u16)> for U16Vec4 { + #[inline] + fn from((v, w): (U16Vec3, u16)) -> Self { + Self::new(v.x, v.y, v.z, w) + } +} + +impl From<(u16, U16Vec3)> for U16Vec4 { + #[inline] + fn from((x, v): (u16, U16Vec3)) -> Self { + Self::new(x, v.x, v.y, v.z) + } +} + +impl From<(U16Vec2, u16, u16)> for U16Vec4 { + #[inline] + fn from((v, z, w): (U16Vec2, u16, u16)) -> Self { + Self::new(v.x, v.y, z, w) + } +} + +impl From<(U16Vec2, U16Vec2)> for U16Vec4 { + #[inline] + fn from((v, u): (U16Vec2, U16Vec2)) -> Self { + Self::new(v.x, v.y, u.x, u.y) + } +} + +impl TryFrom for U16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I16Vec4) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + u16::try_from(v.w)?, + )) + } +} + +impl TryFrom for U16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: IVec4) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + u16::try_from(v.w)?, + )) + } +} + +impl TryFrom for U16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: UVec4) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + u16::try_from(v.w)?, + )) + } +} + +impl TryFrom for U16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I64Vec4) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + u16::try_from(v.w)?, + )) + } +} + +impl TryFrom for U16Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: U64Vec4) -> Result { + Ok(Self::new( + u16::try_from(v.x)?, + u16::try_from(v.y)?, + u16::try_from(v.z)?, + u16::try_from(v.w)?, + )) + } +} diff --git a/src/u32/uvec2.rs b/src/u32/uvec2.rs index 9ad20e60..436cff77 100644 --- a/src/u32/uvec2.rs +++ b/src/u32/uvec2.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec2, I64Vec2, IVec2, U64Vec2, UVec3}; +use crate::{BVec2, I16Vec2, I64Vec2, IVec2, U16Vec2, U64Vec2, UVec3}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -252,6 +252,18 @@ impl UVec2 { crate::DVec2::new(self.x as f64, self.y as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec2(&self) -> crate::I16Vec2 { + crate::I16Vec2::new(self.x as i16, self.y as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec2(&self) -> crate::U16Vec2 { + crate::U16Vec2::new(self.x as u16, self.y as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec2(&self) -> crate::IVec2 { @@ -1040,6 +1052,22 @@ impl From for (u32, u32) { } } +impl From for UVec2 { + #[inline] + fn from(v: U16Vec2) -> Self { + Self::new(u32::from(v.x), u32::from(v.y)) + } +} + +impl TryFrom for UVec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I16Vec2) -> Result { + Ok(Self::new(u32::try_from(v.x)?, u32::try_from(v.y)?)) + } +} + impl TryFrom for UVec2 { type Error = core::num::TryFromIntError; diff --git a/src/u32/uvec3.rs b/src/u32/uvec3.rs index 5fd1a50d..553eb155 100644 --- a/src/u32/uvec3.rs +++ b/src/u32/uvec3.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec3, I64Vec3, IVec3, U64Vec3, UVec2, UVec4}; +use crate::{BVec3, I16Vec3, I64Vec3, IVec3, U16Vec3, U64Vec3, UVec2, UVec4}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -295,6 +295,18 @@ impl UVec3 { crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec3(&self) -> crate::IVec3 { @@ -1153,6 +1165,26 @@ impl From<(UVec2, u32)> for UVec3 { } } +impl From for UVec3 { + #[inline] + fn from(v: U16Vec3) -> Self { + Self::new(u32::from(v.x), u32::from(v.y), u32::from(v.z)) + } +} + +impl TryFrom for UVec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I16Vec3) -> Result { + Ok(Self::new( + u32::try_from(v.x)?, + u32::try_from(v.y)?, + u32::try_from(v.z)?, + )) + } +} + impl TryFrom for UVec3 { type Error = core::num::TryFromIntError; diff --git a/src/u32/uvec4.rs b/src/u32/uvec4.rs index 60105a11..62da8f5d 100644 --- a/src/u32/uvec4.rs +++ b/src/u32/uvec4.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec4, I64Vec4, IVec4, U64Vec4, UVec2, UVec3}; +use crate::{BVec4, I16Vec4, I64Vec4, IVec4, U16Vec4, U64Vec4, UVec2, UVec3}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -309,6 +309,18 @@ impl UVec4 { crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec4(&self) -> crate::IVec4 { @@ -1251,6 +1263,32 @@ impl From<(UVec2, UVec2)> for UVec4 { } } +impl From for UVec4 { + #[inline] + fn from(v: U16Vec4) -> Self { + Self::new( + u32::from(v.x), + u32::from(v.y), + u32::from(v.z), + u32::from(v.w), + ) + } +} + +impl TryFrom for UVec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I16Vec4) -> Result { + Ok(Self::new( + u32::try_from(v.x)?, + u32::try_from(v.y)?, + u32::try_from(v.z)?, + u32::try_from(v.w)?, + )) + } +} + impl TryFrom for UVec4 { type Error = core::num::TryFromIntError; diff --git a/src/u64/u64vec2.rs b/src/u64/u64vec2.rs index a96d4ac6..c46cc962 100644 --- a/src/u64/u64vec2.rs +++ b/src/u64/u64vec2.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec2, I64Vec2, U64Vec3, UVec2}; +use crate::{BVec2, I16Vec2, I64Vec2, IVec2, U16Vec2, U64Vec3, UVec2}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -252,6 +252,18 @@ impl U64Vec2 { crate::DVec2::new(self.x as f64, self.y as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec2(&self) -> crate::I16Vec2 { + crate::I16Vec2::new(self.x as i16, self.y as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec2(&self) -> crate::U16Vec2 { + crate::U16Vec2::new(self.x as u16, self.y as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec2(&self) -> crate::IVec2 { @@ -1040,6 +1052,13 @@ impl From for (u64, u64) { } } +impl From for U64Vec2 { + #[inline] + fn from(v: U16Vec2) -> Self { + Self::new(u64::from(v.x), u64::from(v.y)) + } +} + impl From for U64Vec2 { #[inline] fn from(v: UVec2) -> Self { @@ -1047,6 +1066,24 @@ impl From for U64Vec2 { } } +impl TryFrom for U64Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I16Vec2) -> Result { + Ok(Self::new(u64::try_from(v.x)?, u64::try_from(v.y)?)) + } +} + +impl TryFrom for U64Vec2 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: IVec2) -> Result { + Ok(Self::new(u64::try_from(v.x)?, u64::try_from(v.y)?)) + } +} + impl TryFrom for U64Vec2 { type Error = core::num::TryFromIntError; diff --git a/src/u64/u64vec3.rs b/src/u64/u64vec3.rs index 5bde114f..38e1ccfc 100644 --- a/src/u64/u64vec3.rs +++ b/src/u64/u64vec3.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec3, I64Vec3, U64Vec2, U64Vec4, UVec3}; +use crate::{BVec3, I16Vec3, I64Vec3, IVec3, U16Vec3, U64Vec2, U64Vec4, UVec3}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -295,6 +295,18 @@ impl U64Vec3 { crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec3(&self) -> crate::I16Vec3 { + crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec3(&self) -> crate::U16Vec3 { + crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec3(&self) -> crate::IVec3 { @@ -1153,6 +1165,13 @@ impl From<(U64Vec2, u64)> for U64Vec3 { } } +impl From for U64Vec3 { + #[inline] + fn from(v: U16Vec3) -> Self { + Self::new(u64::from(v.x), u64::from(v.y), u64::from(v.z)) + } +} + impl From for U64Vec3 { #[inline] fn from(v: UVec3) -> Self { @@ -1160,6 +1179,32 @@ impl From for U64Vec3 { } } +impl TryFrom for U64Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I16Vec3) -> Result { + Ok(Self::new( + u64::try_from(v.x)?, + u64::try_from(v.y)?, + u64::try_from(v.z)?, + )) + } +} + +impl TryFrom for U64Vec3 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: IVec3) -> Result { + Ok(Self::new( + u64::try_from(v.x)?, + u64::try_from(v.y)?, + u64::try_from(v.z)?, + )) + } +} + impl TryFrom for U64Vec3 { type Error = core::num::TryFromIntError; diff --git a/src/u64/u64vec4.rs b/src/u64/u64vec4.rs index 12fd5217..fd1ecc36 100644 --- a/src/u64/u64vec4.rs +++ b/src/u64/u64vec4.rs @@ -1,6 +1,6 @@ // Generated from vec.rs.tera template. Edit the template, not the generated file. -use crate::{BVec4, I64Vec4, U64Vec2, U64Vec3, UVec4}; +use crate::{BVec4, I16Vec4, I64Vec4, IVec4, U16Vec4, U64Vec2, U64Vec3, UVec4}; #[cfg(not(target_arch = "spirv"))] use core::fmt; @@ -309,6 +309,18 @@ impl U64Vec4 { crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64) } + /// Casts all elements of `self` to `i16`. + #[inline] + pub fn as_i16vec4(&self) -> crate::I16Vec4 { + crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16) + } + + /// Casts all elements of `self` to `u16`. + #[inline] + pub fn as_u16vec4(&self) -> crate::U16Vec4 { + crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16) + } + /// Casts all elements of `self` to `i32`. #[inline] pub fn as_ivec4(&self) -> crate::IVec4 { @@ -1251,6 +1263,18 @@ impl From<(U64Vec2, U64Vec2)> for U64Vec4 { } } +impl From for U64Vec4 { + #[inline] + fn from(v: U16Vec4) -> Self { + Self::new( + u64::from(v.x), + u64::from(v.y), + u64::from(v.z), + u64::from(v.w), + ) + } +} + impl From for U64Vec4 { #[inline] fn from(v: UVec4) -> Self { @@ -1263,6 +1287,34 @@ impl From for U64Vec4 { } } +impl TryFrom for U64Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: I16Vec4) -> Result { + Ok(Self::new( + u64::try_from(v.x)?, + u64::try_from(v.y)?, + u64::try_from(v.z)?, + u64::try_from(v.w)?, + )) + } +} + +impl TryFrom for U64Vec4 { + type Error = core::num::TryFromIntError; + + #[inline] + fn try_from(v: IVec4) -> Result { + Ok(Self::new( + u64::try_from(v.x)?, + u64::try_from(v.y)?, + u64::try_from(v.z)?, + u64::try_from(v.w)?, + )) + } +} + impl TryFrom for U64Vec4 { type Error = core::num::TryFromIntError; diff --git a/tests/swizzles_i16.rs b/tests/swizzles_i16.rs new file mode 100644 index 00000000..7aaeb692 --- /dev/null +++ b/tests/swizzles_i16.rs @@ -0,0 +1,497 @@ +// Generated by swizzlegen. Do not edit. +#[macro_use] +mod support; +use glam::*; + +glam_test!(test_i16vec4_swizzles, { + let v = i16vec4(1_i16, 2_i16, 3_i16, 4_i16); + assert_eq!(v, v.xyzw()); + assert_eq!(v.xxxx(), i16vec4(1_i16, 1_i16, 1_i16, 1_i16)); + assert_eq!(v.xxxy(), i16vec4(1_i16, 1_i16, 1_i16, 2_i16)); + assert_eq!(v.xxxz(), i16vec4(1_i16, 1_i16, 1_i16, 3_i16)); + assert_eq!(v.xxxw(), i16vec4(1_i16, 1_i16, 1_i16, 4_i16)); + assert_eq!(v.xxyx(), i16vec4(1_i16, 1_i16, 2_i16, 1_i16)); + assert_eq!(v.xxyy(), i16vec4(1_i16, 1_i16, 2_i16, 2_i16)); + assert_eq!(v.xxyz(), i16vec4(1_i16, 1_i16, 2_i16, 3_i16)); + assert_eq!(v.xxyw(), i16vec4(1_i16, 1_i16, 2_i16, 4_i16)); + assert_eq!(v.xxzx(), i16vec4(1_i16, 1_i16, 3_i16, 1_i16)); + assert_eq!(v.xxzy(), i16vec4(1_i16, 1_i16, 3_i16, 2_i16)); + assert_eq!(v.xxzz(), i16vec4(1_i16, 1_i16, 3_i16, 3_i16)); + assert_eq!(v.xxzw(), i16vec4(1_i16, 1_i16, 3_i16, 4_i16)); + assert_eq!(v.xxwx(), i16vec4(1_i16, 1_i16, 4_i16, 1_i16)); + assert_eq!(v.xxwy(), i16vec4(1_i16, 1_i16, 4_i16, 2_i16)); + assert_eq!(v.xxwz(), i16vec4(1_i16, 1_i16, 4_i16, 3_i16)); + assert_eq!(v.xxww(), i16vec4(1_i16, 1_i16, 4_i16, 4_i16)); + assert_eq!(v.xyxx(), i16vec4(1_i16, 2_i16, 1_i16, 1_i16)); + assert_eq!(v.xyxy(), i16vec4(1_i16, 2_i16, 1_i16, 2_i16)); + assert_eq!(v.xyxz(), i16vec4(1_i16, 2_i16, 1_i16, 3_i16)); + assert_eq!(v.xyxw(), i16vec4(1_i16, 2_i16, 1_i16, 4_i16)); + assert_eq!(v.xyyx(), i16vec4(1_i16, 2_i16, 2_i16, 1_i16)); + assert_eq!(v.xyyy(), i16vec4(1_i16, 2_i16, 2_i16, 2_i16)); + assert_eq!(v.xyyz(), i16vec4(1_i16, 2_i16, 2_i16, 3_i16)); + assert_eq!(v.xyyw(), i16vec4(1_i16, 2_i16, 2_i16, 4_i16)); + assert_eq!(v.xyzx(), i16vec4(1_i16, 2_i16, 3_i16, 1_i16)); + assert_eq!(v.xyzy(), i16vec4(1_i16, 2_i16, 3_i16, 2_i16)); + assert_eq!(v.xyzz(), i16vec4(1_i16, 2_i16, 3_i16, 3_i16)); + assert_eq!(v.xywx(), i16vec4(1_i16, 2_i16, 4_i16, 1_i16)); + assert_eq!(v.xywy(), i16vec4(1_i16, 2_i16, 4_i16, 2_i16)); + assert_eq!(v.xywz(), i16vec4(1_i16, 2_i16, 4_i16, 3_i16)); + assert_eq!(v.xyww(), i16vec4(1_i16, 2_i16, 4_i16, 4_i16)); + assert_eq!(v.xzxx(), i16vec4(1_i16, 3_i16, 1_i16, 1_i16)); + assert_eq!(v.xzxy(), i16vec4(1_i16, 3_i16, 1_i16, 2_i16)); + assert_eq!(v.xzxz(), i16vec4(1_i16, 3_i16, 1_i16, 3_i16)); + assert_eq!(v.xzxw(), i16vec4(1_i16, 3_i16, 1_i16, 4_i16)); + assert_eq!(v.xzyx(), i16vec4(1_i16, 3_i16, 2_i16, 1_i16)); + assert_eq!(v.xzyy(), i16vec4(1_i16, 3_i16, 2_i16, 2_i16)); + assert_eq!(v.xzyz(), i16vec4(1_i16, 3_i16, 2_i16, 3_i16)); + assert_eq!(v.xzyw(), i16vec4(1_i16, 3_i16, 2_i16, 4_i16)); + assert_eq!(v.xzzx(), i16vec4(1_i16, 3_i16, 3_i16, 1_i16)); + assert_eq!(v.xzzy(), i16vec4(1_i16, 3_i16, 3_i16, 2_i16)); + assert_eq!(v.xzzz(), i16vec4(1_i16, 3_i16, 3_i16, 3_i16)); + assert_eq!(v.xzzw(), i16vec4(1_i16, 3_i16, 3_i16, 4_i16)); + assert_eq!(v.xzwx(), i16vec4(1_i16, 3_i16, 4_i16, 1_i16)); + assert_eq!(v.xzwy(), i16vec4(1_i16, 3_i16, 4_i16, 2_i16)); + assert_eq!(v.xzwz(), i16vec4(1_i16, 3_i16, 4_i16, 3_i16)); + assert_eq!(v.xzww(), i16vec4(1_i16, 3_i16, 4_i16, 4_i16)); + assert_eq!(v.xwxx(), i16vec4(1_i16, 4_i16, 1_i16, 1_i16)); + assert_eq!(v.xwxy(), i16vec4(1_i16, 4_i16, 1_i16, 2_i16)); + assert_eq!(v.xwxz(), i16vec4(1_i16, 4_i16, 1_i16, 3_i16)); + assert_eq!(v.xwxw(), i16vec4(1_i16, 4_i16, 1_i16, 4_i16)); + assert_eq!(v.xwyx(), i16vec4(1_i16, 4_i16, 2_i16, 1_i16)); + assert_eq!(v.xwyy(), i16vec4(1_i16, 4_i16, 2_i16, 2_i16)); + assert_eq!(v.xwyz(), i16vec4(1_i16, 4_i16, 2_i16, 3_i16)); + assert_eq!(v.xwyw(), i16vec4(1_i16, 4_i16, 2_i16, 4_i16)); + assert_eq!(v.xwzx(), i16vec4(1_i16, 4_i16, 3_i16, 1_i16)); + assert_eq!(v.xwzy(), i16vec4(1_i16, 4_i16, 3_i16, 2_i16)); + assert_eq!(v.xwzz(), i16vec4(1_i16, 4_i16, 3_i16, 3_i16)); + assert_eq!(v.xwzw(), i16vec4(1_i16, 4_i16, 3_i16, 4_i16)); + assert_eq!(v.xwwx(), i16vec4(1_i16, 4_i16, 4_i16, 1_i16)); + assert_eq!(v.xwwy(), i16vec4(1_i16, 4_i16, 4_i16, 2_i16)); + assert_eq!(v.xwwz(), i16vec4(1_i16, 4_i16, 4_i16, 3_i16)); + assert_eq!(v.xwww(), i16vec4(1_i16, 4_i16, 4_i16, 4_i16)); + assert_eq!(v.yxxx(), i16vec4(2_i16, 1_i16, 1_i16, 1_i16)); + assert_eq!(v.yxxy(), i16vec4(2_i16, 1_i16, 1_i16, 2_i16)); + assert_eq!(v.yxxz(), i16vec4(2_i16, 1_i16, 1_i16, 3_i16)); + assert_eq!(v.yxxw(), i16vec4(2_i16, 1_i16, 1_i16, 4_i16)); + assert_eq!(v.yxyx(), i16vec4(2_i16, 1_i16, 2_i16, 1_i16)); + assert_eq!(v.yxyy(), i16vec4(2_i16, 1_i16, 2_i16, 2_i16)); + assert_eq!(v.yxyz(), i16vec4(2_i16, 1_i16, 2_i16, 3_i16)); + assert_eq!(v.yxyw(), i16vec4(2_i16, 1_i16, 2_i16, 4_i16)); + assert_eq!(v.yxzx(), i16vec4(2_i16, 1_i16, 3_i16, 1_i16)); + assert_eq!(v.yxzy(), i16vec4(2_i16, 1_i16, 3_i16, 2_i16)); + assert_eq!(v.yxzz(), i16vec4(2_i16, 1_i16, 3_i16, 3_i16)); + assert_eq!(v.yxzw(), i16vec4(2_i16, 1_i16, 3_i16, 4_i16)); + assert_eq!(v.yxwx(), i16vec4(2_i16, 1_i16, 4_i16, 1_i16)); + assert_eq!(v.yxwy(), i16vec4(2_i16, 1_i16, 4_i16, 2_i16)); + assert_eq!(v.yxwz(), i16vec4(2_i16, 1_i16, 4_i16, 3_i16)); + assert_eq!(v.yxww(), i16vec4(2_i16, 1_i16, 4_i16, 4_i16)); + assert_eq!(v.yyxx(), i16vec4(2_i16, 2_i16, 1_i16, 1_i16)); + assert_eq!(v.yyxy(), i16vec4(2_i16, 2_i16, 1_i16, 2_i16)); + assert_eq!(v.yyxz(), i16vec4(2_i16, 2_i16, 1_i16, 3_i16)); + assert_eq!(v.yyxw(), i16vec4(2_i16, 2_i16, 1_i16, 4_i16)); + assert_eq!(v.yyyx(), i16vec4(2_i16, 2_i16, 2_i16, 1_i16)); + assert_eq!(v.yyyy(), i16vec4(2_i16, 2_i16, 2_i16, 2_i16)); + assert_eq!(v.yyyz(), i16vec4(2_i16, 2_i16, 2_i16, 3_i16)); + assert_eq!(v.yyyw(), i16vec4(2_i16, 2_i16, 2_i16, 4_i16)); + assert_eq!(v.yyzx(), i16vec4(2_i16, 2_i16, 3_i16, 1_i16)); + assert_eq!(v.yyzy(), i16vec4(2_i16, 2_i16, 3_i16, 2_i16)); + assert_eq!(v.yyzz(), i16vec4(2_i16, 2_i16, 3_i16, 3_i16)); + assert_eq!(v.yyzw(), i16vec4(2_i16, 2_i16, 3_i16, 4_i16)); + assert_eq!(v.yywx(), i16vec4(2_i16, 2_i16, 4_i16, 1_i16)); + assert_eq!(v.yywy(), i16vec4(2_i16, 2_i16, 4_i16, 2_i16)); + assert_eq!(v.yywz(), i16vec4(2_i16, 2_i16, 4_i16, 3_i16)); + assert_eq!(v.yyww(), i16vec4(2_i16, 2_i16, 4_i16, 4_i16)); + assert_eq!(v.yzxx(), i16vec4(2_i16, 3_i16, 1_i16, 1_i16)); + assert_eq!(v.yzxy(), i16vec4(2_i16, 3_i16, 1_i16, 2_i16)); + assert_eq!(v.yzxz(), i16vec4(2_i16, 3_i16, 1_i16, 3_i16)); + assert_eq!(v.yzxw(), i16vec4(2_i16, 3_i16, 1_i16, 4_i16)); + assert_eq!(v.yzyx(), i16vec4(2_i16, 3_i16, 2_i16, 1_i16)); + assert_eq!(v.yzyy(), i16vec4(2_i16, 3_i16, 2_i16, 2_i16)); + assert_eq!(v.yzyz(), i16vec4(2_i16, 3_i16, 2_i16, 3_i16)); + assert_eq!(v.yzyw(), i16vec4(2_i16, 3_i16, 2_i16, 4_i16)); + assert_eq!(v.yzzx(), i16vec4(2_i16, 3_i16, 3_i16, 1_i16)); + assert_eq!(v.yzzy(), i16vec4(2_i16, 3_i16, 3_i16, 2_i16)); + assert_eq!(v.yzzz(), i16vec4(2_i16, 3_i16, 3_i16, 3_i16)); + assert_eq!(v.yzzw(), i16vec4(2_i16, 3_i16, 3_i16, 4_i16)); + assert_eq!(v.yzwx(), i16vec4(2_i16, 3_i16, 4_i16, 1_i16)); + assert_eq!(v.yzwy(), i16vec4(2_i16, 3_i16, 4_i16, 2_i16)); + assert_eq!(v.yzwz(), i16vec4(2_i16, 3_i16, 4_i16, 3_i16)); + assert_eq!(v.yzww(), i16vec4(2_i16, 3_i16, 4_i16, 4_i16)); + assert_eq!(v.ywxx(), i16vec4(2_i16, 4_i16, 1_i16, 1_i16)); + assert_eq!(v.ywxy(), i16vec4(2_i16, 4_i16, 1_i16, 2_i16)); + assert_eq!(v.ywxz(), i16vec4(2_i16, 4_i16, 1_i16, 3_i16)); + assert_eq!(v.ywxw(), i16vec4(2_i16, 4_i16, 1_i16, 4_i16)); + assert_eq!(v.ywyx(), i16vec4(2_i16, 4_i16, 2_i16, 1_i16)); + assert_eq!(v.ywyy(), i16vec4(2_i16, 4_i16, 2_i16, 2_i16)); + assert_eq!(v.ywyz(), i16vec4(2_i16, 4_i16, 2_i16, 3_i16)); + assert_eq!(v.ywyw(), i16vec4(2_i16, 4_i16, 2_i16, 4_i16)); + assert_eq!(v.ywzx(), i16vec4(2_i16, 4_i16, 3_i16, 1_i16)); + assert_eq!(v.ywzy(), i16vec4(2_i16, 4_i16, 3_i16, 2_i16)); + assert_eq!(v.ywzz(), i16vec4(2_i16, 4_i16, 3_i16, 3_i16)); + assert_eq!(v.ywzw(), i16vec4(2_i16, 4_i16, 3_i16, 4_i16)); + assert_eq!(v.ywwx(), i16vec4(2_i16, 4_i16, 4_i16, 1_i16)); + assert_eq!(v.ywwy(), i16vec4(2_i16, 4_i16, 4_i16, 2_i16)); + assert_eq!(v.ywwz(), i16vec4(2_i16, 4_i16, 4_i16, 3_i16)); + assert_eq!(v.ywww(), i16vec4(2_i16, 4_i16, 4_i16, 4_i16)); + assert_eq!(v.zxxx(), i16vec4(3_i16, 1_i16, 1_i16, 1_i16)); + assert_eq!(v.zxxy(), i16vec4(3_i16, 1_i16, 1_i16, 2_i16)); + assert_eq!(v.zxxz(), i16vec4(3_i16, 1_i16, 1_i16, 3_i16)); + assert_eq!(v.zxxw(), i16vec4(3_i16, 1_i16, 1_i16, 4_i16)); + assert_eq!(v.zxyx(), i16vec4(3_i16, 1_i16, 2_i16, 1_i16)); + assert_eq!(v.zxyy(), i16vec4(3_i16, 1_i16, 2_i16, 2_i16)); + assert_eq!(v.zxyz(), i16vec4(3_i16, 1_i16, 2_i16, 3_i16)); + assert_eq!(v.zxyw(), i16vec4(3_i16, 1_i16, 2_i16, 4_i16)); + assert_eq!(v.zxzx(), i16vec4(3_i16, 1_i16, 3_i16, 1_i16)); + assert_eq!(v.zxzy(), i16vec4(3_i16, 1_i16, 3_i16, 2_i16)); + assert_eq!(v.zxzz(), i16vec4(3_i16, 1_i16, 3_i16, 3_i16)); + assert_eq!(v.zxzw(), i16vec4(3_i16, 1_i16, 3_i16, 4_i16)); + assert_eq!(v.zxwx(), i16vec4(3_i16, 1_i16, 4_i16, 1_i16)); + assert_eq!(v.zxwy(), i16vec4(3_i16, 1_i16, 4_i16, 2_i16)); + assert_eq!(v.zxwz(), i16vec4(3_i16, 1_i16, 4_i16, 3_i16)); + assert_eq!(v.zxww(), i16vec4(3_i16, 1_i16, 4_i16, 4_i16)); + assert_eq!(v.zyxx(), i16vec4(3_i16, 2_i16, 1_i16, 1_i16)); + assert_eq!(v.zyxy(), i16vec4(3_i16, 2_i16, 1_i16, 2_i16)); + assert_eq!(v.zyxz(), i16vec4(3_i16, 2_i16, 1_i16, 3_i16)); + assert_eq!(v.zyxw(), i16vec4(3_i16, 2_i16, 1_i16, 4_i16)); + assert_eq!(v.zyyx(), i16vec4(3_i16, 2_i16, 2_i16, 1_i16)); + assert_eq!(v.zyyy(), i16vec4(3_i16, 2_i16, 2_i16, 2_i16)); + assert_eq!(v.zyyz(), i16vec4(3_i16, 2_i16, 2_i16, 3_i16)); + assert_eq!(v.zyyw(), i16vec4(3_i16, 2_i16, 2_i16, 4_i16)); + assert_eq!(v.zyzx(), i16vec4(3_i16, 2_i16, 3_i16, 1_i16)); + assert_eq!(v.zyzy(), i16vec4(3_i16, 2_i16, 3_i16, 2_i16)); + assert_eq!(v.zyzz(), i16vec4(3_i16, 2_i16, 3_i16, 3_i16)); + assert_eq!(v.zyzw(), i16vec4(3_i16, 2_i16, 3_i16, 4_i16)); + assert_eq!(v.zywx(), i16vec4(3_i16, 2_i16, 4_i16, 1_i16)); + assert_eq!(v.zywy(), i16vec4(3_i16, 2_i16, 4_i16, 2_i16)); + assert_eq!(v.zywz(), i16vec4(3_i16, 2_i16, 4_i16, 3_i16)); + assert_eq!(v.zyww(), i16vec4(3_i16, 2_i16, 4_i16, 4_i16)); + assert_eq!(v.zzxx(), i16vec4(3_i16, 3_i16, 1_i16, 1_i16)); + assert_eq!(v.zzxy(), i16vec4(3_i16, 3_i16, 1_i16, 2_i16)); + assert_eq!(v.zzxz(), i16vec4(3_i16, 3_i16, 1_i16, 3_i16)); + assert_eq!(v.zzxw(), i16vec4(3_i16, 3_i16, 1_i16, 4_i16)); + assert_eq!(v.zzyx(), i16vec4(3_i16, 3_i16, 2_i16, 1_i16)); + assert_eq!(v.zzyy(), i16vec4(3_i16, 3_i16, 2_i16, 2_i16)); + assert_eq!(v.zzyz(), i16vec4(3_i16, 3_i16, 2_i16, 3_i16)); + assert_eq!(v.zzyw(), i16vec4(3_i16, 3_i16, 2_i16, 4_i16)); + assert_eq!(v.zzzx(), i16vec4(3_i16, 3_i16, 3_i16, 1_i16)); + assert_eq!(v.zzzy(), i16vec4(3_i16, 3_i16, 3_i16, 2_i16)); + assert_eq!(v.zzzz(), i16vec4(3_i16, 3_i16, 3_i16, 3_i16)); + assert_eq!(v.zzzw(), i16vec4(3_i16, 3_i16, 3_i16, 4_i16)); + assert_eq!(v.zzwx(), i16vec4(3_i16, 3_i16, 4_i16, 1_i16)); + assert_eq!(v.zzwy(), i16vec4(3_i16, 3_i16, 4_i16, 2_i16)); + assert_eq!(v.zzwz(), i16vec4(3_i16, 3_i16, 4_i16, 3_i16)); + assert_eq!(v.zzww(), i16vec4(3_i16, 3_i16, 4_i16, 4_i16)); + assert_eq!(v.zwxx(), i16vec4(3_i16, 4_i16, 1_i16, 1_i16)); + assert_eq!(v.zwxy(), i16vec4(3_i16, 4_i16, 1_i16, 2_i16)); + assert_eq!(v.zwxz(), i16vec4(3_i16, 4_i16, 1_i16, 3_i16)); + assert_eq!(v.zwxw(), i16vec4(3_i16, 4_i16, 1_i16, 4_i16)); + assert_eq!(v.zwyx(), i16vec4(3_i16, 4_i16, 2_i16, 1_i16)); + assert_eq!(v.zwyy(), i16vec4(3_i16, 4_i16, 2_i16, 2_i16)); + assert_eq!(v.zwyz(), i16vec4(3_i16, 4_i16, 2_i16, 3_i16)); + assert_eq!(v.zwyw(), i16vec4(3_i16, 4_i16, 2_i16, 4_i16)); + assert_eq!(v.zwzx(), i16vec4(3_i16, 4_i16, 3_i16, 1_i16)); + assert_eq!(v.zwzy(), i16vec4(3_i16, 4_i16, 3_i16, 2_i16)); + assert_eq!(v.zwzz(), i16vec4(3_i16, 4_i16, 3_i16, 3_i16)); + assert_eq!(v.zwzw(), i16vec4(3_i16, 4_i16, 3_i16, 4_i16)); + assert_eq!(v.zwwx(), i16vec4(3_i16, 4_i16, 4_i16, 1_i16)); + assert_eq!(v.zwwy(), i16vec4(3_i16, 4_i16, 4_i16, 2_i16)); + assert_eq!(v.zwwz(), i16vec4(3_i16, 4_i16, 4_i16, 3_i16)); + assert_eq!(v.zwww(), i16vec4(3_i16, 4_i16, 4_i16, 4_i16)); + assert_eq!(v.wxxx(), i16vec4(4_i16, 1_i16, 1_i16, 1_i16)); + assert_eq!(v.wxxy(), i16vec4(4_i16, 1_i16, 1_i16, 2_i16)); + assert_eq!(v.wxxz(), i16vec4(4_i16, 1_i16, 1_i16, 3_i16)); + assert_eq!(v.wxxw(), i16vec4(4_i16, 1_i16, 1_i16, 4_i16)); + assert_eq!(v.wxyx(), i16vec4(4_i16, 1_i16, 2_i16, 1_i16)); + assert_eq!(v.wxyy(), i16vec4(4_i16, 1_i16, 2_i16, 2_i16)); + assert_eq!(v.wxyz(), i16vec4(4_i16, 1_i16, 2_i16, 3_i16)); + assert_eq!(v.wxyw(), i16vec4(4_i16, 1_i16, 2_i16, 4_i16)); + assert_eq!(v.wxzx(), i16vec4(4_i16, 1_i16, 3_i16, 1_i16)); + assert_eq!(v.wxzy(), i16vec4(4_i16, 1_i16, 3_i16, 2_i16)); + assert_eq!(v.wxzz(), i16vec4(4_i16, 1_i16, 3_i16, 3_i16)); + assert_eq!(v.wxzw(), i16vec4(4_i16, 1_i16, 3_i16, 4_i16)); + assert_eq!(v.wxwx(), i16vec4(4_i16, 1_i16, 4_i16, 1_i16)); + assert_eq!(v.wxwy(), i16vec4(4_i16, 1_i16, 4_i16, 2_i16)); + assert_eq!(v.wxwz(), i16vec4(4_i16, 1_i16, 4_i16, 3_i16)); + assert_eq!(v.wxww(), i16vec4(4_i16, 1_i16, 4_i16, 4_i16)); + assert_eq!(v.wyxx(), i16vec4(4_i16, 2_i16, 1_i16, 1_i16)); + assert_eq!(v.wyxy(), i16vec4(4_i16, 2_i16, 1_i16, 2_i16)); + assert_eq!(v.wyxz(), i16vec4(4_i16, 2_i16, 1_i16, 3_i16)); + assert_eq!(v.wyxw(), i16vec4(4_i16, 2_i16, 1_i16, 4_i16)); + assert_eq!(v.wyyx(), i16vec4(4_i16, 2_i16, 2_i16, 1_i16)); + assert_eq!(v.wyyy(), i16vec4(4_i16, 2_i16, 2_i16, 2_i16)); + assert_eq!(v.wyyz(), i16vec4(4_i16, 2_i16, 2_i16, 3_i16)); + assert_eq!(v.wyyw(), i16vec4(4_i16, 2_i16, 2_i16, 4_i16)); + assert_eq!(v.wyzx(), i16vec4(4_i16, 2_i16, 3_i16, 1_i16)); + assert_eq!(v.wyzy(), i16vec4(4_i16, 2_i16, 3_i16, 2_i16)); + assert_eq!(v.wyzz(), i16vec4(4_i16, 2_i16, 3_i16, 3_i16)); + assert_eq!(v.wyzw(), i16vec4(4_i16, 2_i16, 3_i16, 4_i16)); + assert_eq!(v.wywx(), i16vec4(4_i16, 2_i16, 4_i16, 1_i16)); + assert_eq!(v.wywy(), i16vec4(4_i16, 2_i16, 4_i16, 2_i16)); + assert_eq!(v.wywz(), i16vec4(4_i16, 2_i16, 4_i16, 3_i16)); + assert_eq!(v.wyww(), i16vec4(4_i16, 2_i16, 4_i16, 4_i16)); + assert_eq!(v.wzxx(), i16vec4(4_i16, 3_i16, 1_i16, 1_i16)); + assert_eq!(v.wzxy(), i16vec4(4_i16, 3_i16, 1_i16, 2_i16)); + assert_eq!(v.wzxz(), i16vec4(4_i16, 3_i16, 1_i16, 3_i16)); + assert_eq!(v.wzxw(), i16vec4(4_i16, 3_i16, 1_i16, 4_i16)); + assert_eq!(v.wzyx(), i16vec4(4_i16, 3_i16, 2_i16, 1_i16)); + assert_eq!(v.wzyy(), i16vec4(4_i16, 3_i16, 2_i16, 2_i16)); + assert_eq!(v.wzyz(), i16vec4(4_i16, 3_i16, 2_i16, 3_i16)); + assert_eq!(v.wzyw(), i16vec4(4_i16, 3_i16, 2_i16, 4_i16)); + assert_eq!(v.wzzx(), i16vec4(4_i16, 3_i16, 3_i16, 1_i16)); + assert_eq!(v.wzzy(), i16vec4(4_i16, 3_i16, 3_i16, 2_i16)); + assert_eq!(v.wzzz(), i16vec4(4_i16, 3_i16, 3_i16, 3_i16)); + assert_eq!(v.wzzw(), i16vec4(4_i16, 3_i16, 3_i16, 4_i16)); + assert_eq!(v.wzwx(), i16vec4(4_i16, 3_i16, 4_i16, 1_i16)); + assert_eq!(v.wzwy(), i16vec4(4_i16, 3_i16, 4_i16, 2_i16)); + assert_eq!(v.wzwz(), i16vec4(4_i16, 3_i16, 4_i16, 3_i16)); + assert_eq!(v.wzww(), i16vec4(4_i16, 3_i16, 4_i16, 4_i16)); + assert_eq!(v.wwxx(), i16vec4(4_i16, 4_i16, 1_i16, 1_i16)); + assert_eq!(v.wwxy(), i16vec4(4_i16, 4_i16, 1_i16, 2_i16)); + assert_eq!(v.wwxz(), i16vec4(4_i16, 4_i16, 1_i16, 3_i16)); + assert_eq!(v.wwxw(), i16vec4(4_i16, 4_i16, 1_i16, 4_i16)); + assert_eq!(v.wwyx(), i16vec4(4_i16, 4_i16, 2_i16, 1_i16)); + assert_eq!(v.wwyy(), i16vec4(4_i16, 4_i16, 2_i16, 2_i16)); + assert_eq!(v.wwyz(), i16vec4(4_i16, 4_i16, 2_i16, 3_i16)); + assert_eq!(v.wwyw(), i16vec4(4_i16, 4_i16, 2_i16, 4_i16)); + assert_eq!(v.wwzx(), i16vec4(4_i16, 4_i16, 3_i16, 1_i16)); + assert_eq!(v.wwzy(), i16vec4(4_i16, 4_i16, 3_i16, 2_i16)); + assert_eq!(v.wwzz(), i16vec4(4_i16, 4_i16, 3_i16, 3_i16)); + assert_eq!(v.wwzw(), i16vec4(4_i16, 4_i16, 3_i16, 4_i16)); + assert_eq!(v.wwwx(), i16vec4(4_i16, 4_i16, 4_i16, 1_i16)); + assert_eq!(v.wwwy(), i16vec4(4_i16, 4_i16, 4_i16, 2_i16)); + assert_eq!(v.wwwz(), i16vec4(4_i16, 4_i16, 4_i16, 3_i16)); + assert_eq!(v.wwww(), i16vec4(4_i16, 4_i16, 4_i16, 4_i16)); + assert_eq!(v.xxx(), i16vec3(1_i16, 1_i16, 1_i16)); + assert_eq!(v.xxy(), i16vec3(1_i16, 1_i16, 2_i16)); + assert_eq!(v.xxz(), i16vec3(1_i16, 1_i16, 3_i16)); + assert_eq!(v.xxw(), i16vec3(1_i16, 1_i16, 4_i16)); + assert_eq!(v.xyx(), i16vec3(1_i16, 2_i16, 1_i16)); + assert_eq!(v.xyy(), i16vec3(1_i16, 2_i16, 2_i16)); + assert_eq!(v.xyz(), i16vec3(1_i16, 2_i16, 3_i16)); + assert_eq!(v.xyw(), i16vec3(1_i16, 2_i16, 4_i16)); + assert_eq!(v.xzx(), i16vec3(1_i16, 3_i16, 1_i16)); + assert_eq!(v.xzy(), i16vec3(1_i16, 3_i16, 2_i16)); + assert_eq!(v.xzz(), i16vec3(1_i16, 3_i16, 3_i16)); + assert_eq!(v.xzw(), i16vec3(1_i16, 3_i16, 4_i16)); + assert_eq!(v.xwx(), i16vec3(1_i16, 4_i16, 1_i16)); + assert_eq!(v.xwy(), i16vec3(1_i16, 4_i16, 2_i16)); + assert_eq!(v.xwz(), i16vec3(1_i16, 4_i16, 3_i16)); + assert_eq!(v.xww(), i16vec3(1_i16, 4_i16, 4_i16)); + assert_eq!(v.yxx(), i16vec3(2_i16, 1_i16, 1_i16)); + assert_eq!(v.yxy(), i16vec3(2_i16, 1_i16, 2_i16)); + assert_eq!(v.yxz(), i16vec3(2_i16, 1_i16, 3_i16)); + assert_eq!(v.yxw(), i16vec3(2_i16, 1_i16, 4_i16)); + assert_eq!(v.yyx(), i16vec3(2_i16, 2_i16, 1_i16)); + assert_eq!(v.yyy(), i16vec3(2_i16, 2_i16, 2_i16)); + assert_eq!(v.yyz(), i16vec3(2_i16, 2_i16, 3_i16)); + assert_eq!(v.yyw(), i16vec3(2_i16, 2_i16, 4_i16)); + assert_eq!(v.yzx(), i16vec3(2_i16, 3_i16, 1_i16)); + assert_eq!(v.yzy(), i16vec3(2_i16, 3_i16, 2_i16)); + assert_eq!(v.yzz(), i16vec3(2_i16, 3_i16, 3_i16)); + assert_eq!(v.yzw(), i16vec3(2_i16, 3_i16, 4_i16)); + assert_eq!(v.ywx(), i16vec3(2_i16, 4_i16, 1_i16)); + assert_eq!(v.ywy(), i16vec3(2_i16, 4_i16, 2_i16)); + assert_eq!(v.ywz(), i16vec3(2_i16, 4_i16, 3_i16)); + assert_eq!(v.yww(), i16vec3(2_i16, 4_i16, 4_i16)); + assert_eq!(v.zxx(), i16vec3(3_i16, 1_i16, 1_i16)); + assert_eq!(v.zxy(), i16vec3(3_i16, 1_i16, 2_i16)); + assert_eq!(v.zxz(), i16vec3(3_i16, 1_i16, 3_i16)); + assert_eq!(v.zxw(), i16vec3(3_i16, 1_i16, 4_i16)); + assert_eq!(v.zyx(), i16vec3(3_i16, 2_i16, 1_i16)); + assert_eq!(v.zyy(), i16vec3(3_i16, 2_i16, 2_i16)); + assert_eq!(v.zyz(), i16vec3(3_i16, 2_i16, 3_i16)); + assert_eq!(v.zyw(), i16vec3(3_i16, 2_i16, 4_i16)); + assert_eq!(v.zzx(), i16vec3(3_i16, 3_i16, 1_i16)); + assert_eq!(v.zzy(), i16vec3(3_i16, 3_i16, 2_i16)); + assert_eq!(v.zzz(), i16vec3(3_i16, 3_i16, 3_i16)); + assert_eq!(v.zzw(), i16vec3(3_i16, 3_i16, 4_i16)); + assert_eq!(v.zwx(), i16vec3(3_i16, 4_i16, 1_i16)); + assert_eq!(v.zwy(), i16vec3(3_i16, 4_i16, 2_i16)); + assert_eq!(v.zwz(), i16vec3(3_i16, 4_i16, 3_i16)); + assert_eq!(v.zww(), i16vec3(3_i16, 4_i16, 4_i16)); + assert_eq!(v.wxx(), i16vec3(4_i16, 1_i16, 1_i16)); + assert_eq!(v.wxy(), i16vec3(4_i16, 1_i16, 2_i16)); + assert_eq!(v.wxz(), i16vec3(4_i16, 1_i16, 3_i16)); + assert_eq!(v.wxw(), i16vec3(4_i16, 1_i16, 4_i16)); + assert_eq!(v.wyx(), i16vec3(4_i16, 2_i16, 1_i16)); + assert_eq!(v.wyy(), i16vec3(4_i16, 2_i16, 2_i16)); + assert_eq!(v.wyz(), i16vec3(4_i16, 2_i16, 3_i16)); + assert_eq!(v.wyw(), i16vec3(4_i16, 2_i16, 4_i16)); + assert_eq!(v.wzx(), i16vec3(4_i16, 3_i16, 1_i16)); + assert_eq!(v.wzy(), i16vec3(4_i16, 3_i16, 2_i16)); + assert_eq!(v.wzz(), i16vec3(4_i16, 3_i16, 3_i16)); + assert_eq!(v.wzw(), i16vec3(4_i16, 3_i16, 4_i16)); + assert_eq!(v.wwx(), i16vec3(4_i16, 4_i16, 1_i16)); + assert_eq!(v.wwy(), i16vec3(4_i16, 4_i16, 2_i16)); + assert_eq!(v.wwz(), i16vec3(4_i16, 4_i16, 3_i16)); + assert_eq!(v.www(), i16vec3(4_i16, 4_i16, 4_i16)); + assert_eq!(v.xx(), i16vec2(1_i16, 1_i16)); + assert_eq!(v.xy(), i16vec2(1_i16, 2_i16)); + assert_eq!(v.xz(), i16vec2(1_i16, 3_i16)); + assert_eq!(v.xw(), i16vec2(1_i16, 4_i16)); + assert_eq!(v.yx(), i16vec2(2_i16, 1_i16)); + assert_eq!(v.yy(), i16vec2(2_i16, 2_i16)); + assert_eq!(v.yz(), i16vec2(2_i16, 3_i16)); + assert_eq!(v.yw(), i16vec2(2_i16, 4_i16)); + assert_eq!(v.zx(), i16vec2(3_i16, 1_i16)); + assert_eq!(v.zy(), i16vec2(3_i16, 2_i16)); + assert_eq!(v.zz(), i16vec2(3_i16, 3_i16)); + assert_eq!(v.zw(), i16vec2(3_i16, 4_i16)); + assert_eq!(v.wx(), i16vec2(4_i16, 1_i16)); + assert_eq!(v.wy(), i16vec2(4_i16, 2_i16)); + assert_eq!(v.wz(), i16vec2(4_i16, 3_i16)); + assert_eq!(v.ww(), i16vec2(4_i16, 4_i16)); +}); + +glam_test!(test_i16vec3_swizzles, { + let v = i16vec3(1_i16, 2_i16, 3_i16); + assert_eq!(v, v.xyz()); + assert_eq!(v.xxxx(), i16vec4(1_i16, 1_i16, 1_i16, 1_i16)); + assert_eq!(v.xxxy(), i16vec4(1_i16, 1_i16, 1_i16, 2_i16)); + assert_eq!(v.xxxz(), i16vec4(1_i16, 1_i16, 1_i16, 3_i16)); + assert_eq!(v.xxyx(), i16vec4(1_i16, 1_i16, 2_i16, 1_i16)); + assert_eq!(v.xxyy(), i16vec4(1_i16, 1_i16, 2_i16, 2_i16)); + assert_eq!(v.xxyz(), i16vec4(1_i16, 1_i16, 2_i16, 3_i16)); + assert_eq!(v.xxzx(), i16vec4(1_i16, 1_i16, 3_i16, 1_i16)); + assert_eq!(v.xxzy(), i16vec4(1_i16, 1_i16, 3_i16, 2_i16)); + assert_eq!(v.xxzz(), i16vec4(1_i16, 1_i16, 3_i16, 3_i16)); + assert_eq!(v.xyxx(), i16vec4(1_i16, 2_i16, 1_i16, 1_i16)); + assert_eq!(v.xyxy(), i16vec4(1_i16, 2_i16, 1_i16, 2_i16)); + assert_eq!(v.xyxz(), i16vec4(1_i16, 2_i16, 1_i16, 3_i16)); + assert_eq!(v.xyyx(), i16vec4(1_i16, 2_i16, 2_i16, 1_i16)); + assert_eq!(v.xyyy(), i16vec4(1_i16, 2_i16, 2_i16, 2_i16)); + assert_eq!(v.xyyz(), i16vec4(1_i16, 2_i16, 2_i16, 3_i16)); + assert_eq!(v.xyzx(), i16vec4(1_i16, 2_i16, 3_i16, 1_i16)); + assert_eq!(v.xyzy(), i16vec4(1_i16, 2_i16, 3_i16, 2_i16)); + assert_eq!(v.xyzz(), i16vec4(1_i16, 2_i16, 3_i16, 3_i16)); + assert_eq!(v.xzxx(), i16vec4(1_i16, 3_i16, 1_i16, 1_i16)); + assert_eq!(v.xzxy(), i16vec4(1_i16, 3_i16, 1_i16, 2_i16)); + assert_eq!(v.xzxz(), i16vec4(1_i16, 3_i16, 1_i16, 3_i16)); + assert_eq!(v.xzyx(), i16vec4(1_i16, 3_i16, 2_i16, 1_i16)); + assert_eq!(v.xzyy(), i16vec4(1_i16, 3_i16, 2_i16, 2_i16)); + assert_eq!(v.xzyz(), i16vec4(1_i16, 3_i16, 2_i16, 3_i16)); + assert_eq!(v.xzzx(), i16vec4(1_i16, 3_i16, 3_i16, 1_i16)); + assert_eq!(v.xzzy(), i16vec4(1_i16, 3_i16, 3_i16, 2_i16)); + assert_eq!(v.xzzz(), i16vec4(1_i16, 3_i16, 3_i16, 3_i16)); + assert_eq!(v.yxxx(), i16vec4(2_i16, 1_i16, 1_i16, 1_i16)); + assert_eq!(v.yxxy(), i16vec4(2_i16, 1_i16, 1_i16, 2_i16)); + assert_eq!(v.yxxz(), i16vec4(2_i16, 1_i16, 1_i16, 3_i16)); + assert_eq!(v.yxyx(), i16vec4(2_i16, 1_i16, 2_i16, 1_i16)); + assert_eq!(v.yxyy(), i16vec4(2_i16, 1_i16, 2_i16, 2_i16)); + assert_eq!(v.yxyz(), i16vec4(2_i16, 1_i16, 2_i16, 3_i16)); + assert_eq!(v.yxzx(), i16vec4(2_i16, 1_i16, 3_i16, 1_i16)); + assert_eq!(v.yxzy(), i16vec4(2_i16, 1_i16, 3_i16, 2_i16)); + assert_eq!(v.yxzz(), i16vec4(2_i16, 1_i16, 3_i16, 3_i16)); + assert_eq!(v.yyxx(), i16vec4(2_i16, 2_i16, 1_i16, 1_i16)); + assert_eq!(v.yyxy(), i16vec4(2_i16, 2_i16, 1_i16, 2_i16)); + assert_eq!(v.yyxz(), i16vec4(2_i16, 2_i16, 1_i16, 3_i16)); + assert_eq!(v.yyyx(), i16vec4(2_i16, 2_i16, 2_i16, 1_i16)); + assert_eq!(v.yyyy(), i16vec4(2_i16, 2_i16, 2_i16, 2_i16)); + assert_eq!(v.yyyz(), i16vec4(2_i16, 2_i16, 2_i16, 3_i16)); + assert_eq!(v.yyzx(), i16vec4(2_i16, 2_i16, 3_i16, 1_i16)); + assert_eq!(v.yyzy(), i16vec4(2_i16, 2_i16, 3_i16, 2_i16)); + assert_eq!(v.yyzz(), i16vec4(2_i16, 2_i16, 3_i16, 3_i16)); + assert_eq!(v.yzxx(), i16vec4(2_i16, 3_i16, 1_i16, 1_i16)); + assert_eq!(v.yzxy(), i16vec4(2_i16, 3_i16, 1_i16, 2_i16)); + assert_eq!(v.yzxz(), i16vec4(2_i16, 3_i16, 1_i16, 3_i16)); + assert_eq!(v.yzyx(), i16vec4(2_i16, 3_i16, 2_i16, 1_i16)); + assert_eq!(v.yzyy(), i16vec4(2_i16, 3_i16, 2_i16, 2_i16)); + assert_eq!(v.yzyz(), i16vec4(2_i16, 3_i16, 2_i16, 3_i16)); + assert_eq!(v.yzzx(), i16vec4(2_i16, 3_i16, 3_i16, 1_i16)); + assert_eq!(v.yzzy(), i16vec4(2_i16, 3_i16, 3_i16, 2_i16)); + assert_eq!(v.yzzz(), i16vec4(2_i16, 3_i16, 3_i16, 3_i16)); + assert_eq!(v.zxxx(), i16vec4(3_i16, 1_i16, 1_i16, 1_i16)); + assert_eq!(v.zxxy(), i16vec4(3_i16, 1_i16, 1_i16, 2_i16)); + assert_eq!(v.zxxz(), i16vec4(3_i16, 1_i16, 1_i16, 3_i16)); + assert_eq!(v.zxyx(), i16vec4(3_i16, 1_i16, 2_i16, 1_i16)); + assert_eq!(v.zxyy(), i16vec4(3_i16, 1_i16, 2_i16, 2_i16)); + assert_eq!(v.zxyz(), i16vec4(3_i16, 1_i16, 2_i16, 3_i16)); + assert_eq!(v.zxzx(), i16vec4(3_i16, 1_i16, 3_i16, 1_i16)); + assert_eq!(v.zxzy(), i16vec4(3_i16, 1_i16, 3_i16, 2_i16)); + assert_eq!(v.zxzz(), i16vec4(3_i16, 1_i16, 3_i16, 3_i16)); + assert_eq!(v.zyxx(), i16vec4(3_i16, 2_i16, 1_i16, 1_i16)); + assert_eq!(v.zyxy(), i16vec4(3_i16, 2_i16, 1_i16, 2_i16)); + assert_eq!(v.zyxz(), i16vec4(3_i16, 2_i16, 1_i16, 3_i16)); + assert_eq!(v.zyyx(), i16vec4(3_i16, 2_i16, 2_i16, 1_i16)); + assert_eq!(v.zyyy(), i16vec4(3_i16, 2_i16, 2_i16, 2_i16)); + assert_eq!(v.zyyz(), i16vec4(3_i16, 2_i16, 2_i16, 3_i16)); + assert_eq!(v.zyzx(), i16vec4(3_i16, 2_i16, 3_i16, 1_i16)); + assert_eq!(v.zyzy(), i16vec4(3_i16, 2_i16, 3_i16, 2_i16)); + assert_eq!(v.zyzz(), i16vec4(3_i16, 2_i16, 3_i16, 3_i16)); + assert_eq!(v.zzxx(), i16vec4(3_i16, 3_i16, 1_i16, 1_i16)); + assert_eq!(v.zzxy(), i16vec4(3_i16, 3_i16, 1_i16, 2_i16)); + assert_eq!(v.zzxz(), i16vec4(3_i16, 3_i16, 1_i16, 3_i16)); + assert_eq!(v.zzyx(), i16vec4(3_i16, 3_i16, 2_i16, 1_i16)); + assert_eq!(v.zzyy(), i16vec4(3_i16, 3_i16, 2_i16, 2_i16)); + assert_eq!(v.zzyz(), i16vec4(3_i16, 3_i16, 2_i16, 3_i16)); + assert_eq!(v.zzzx(), i16vec4(3_i16, 3_i16, 3_i16, 1_i16)); + assert_eq!(v.zzzy(), i16vec4(3_i16, 3_i16, 3_i16, 2_i16)); + assert_eq!(v.zzzz(), i16vec4(3_i16, 3_i16, 3_i16, 3_i16)); + assert_eq!(v.xxx(), i16vec3(1_i16, 1_i16, 1_i16)); + assert_eq!(v.xxy(), i16vec3(1_i16, 1_i16, 2_i16)); + assert_eq!(v.xxz(), i16vec3(1_i16, 1_i16, 3_i16)); + assert_eq!(v.xyx(), i16vec3(1_i16, 2_i16, 1_i16)); + assert_eq!(v.xyy(), i16vec3(1_i16, 2_i16, 2_i16)); + assert_eq!(v.xzx(), i16vec3(1_i16, 3_i16, 1_i16)); + assert_eq!(v.xzy(), i16vec3(1_i16, 3_i16, 2_i16)); + assert_eq!(v.xzz(), i16vec3(1_i16, 3_i16, 3_i16)); + assert_eq!(v.yxx(), i16vec3(2_i16, 1_i16, 1_i16)); + assert_eq!(v.yxy(), i16vec3(2_i16, 1_i16, 2_i16)); + assert_eq!(v.yxz(), i16vec3(2_i16, 1_i16, 3_i16)); + assert_eq!(v.yyx(), i16vec3(2_i16, 2_i16, 1_i16)); + assert_eq!(v.yyy(), i16vec3(2_i16, 2_i16, 2_i16)); + assert_eq!(v.yyz(), i16vec3(2_i16, 2_i16, 3_i16)); + assert_eq!(v.yzx(), i16vec3(2_i16, 3_i16, 1_i16)); + assert_eq!(v.yzy(), i16vec3(2_i16, 3_i16, 2_i16)); + assert_eq!(v.yzz(), i16vec3(2_i16, 3_i16, 3_i16)); + assert_eq!(v.zxx(), i16vec3(3_i16, 1_i16, 1_i16)); + assert_eq!(v.zxy(), i16vec3(3_i16, 1_i16, 2_i16)); + assert_eq!(v.zxz(), i16vec3(3_i16, 1_i16, 3_i16)); + assert_eq!(v.zyx(), i16vec3(3_i16, 2_i16, 1_i16)); + assert_eq!(v.zyy(), i16vec3(3_i16, 2_i16, 2_i16)); + assert_eq!(v.zyz(), i16vec3(3_i16, 2_i16, 3_i16)); + assert_eq!(v.zzx(), i16vec3(3_i16, 3_i16, 1_i16)); + assert_eq!(v.zzy(), i16vec3(3_i16, 3_i16, 2_i16)); + assert_eq!(v.zzz(), i16vec3(3_i16, 3_i16, 3_i16)); + assert_eq!(v.xx(), i16vec2(1_i16, 1_i16)); + assert_eq!(v.xy(), i16vec2(1_i16, 2_i16)); + assert_eq!(v.xz(), i16vec2(1_i16, 3_i16)); + assert_eq!(v.yx(), i16vec2(2_i16, 1_i16)); + assert_eq!(v.yy(), i16vec2(2_i16, 2_i16)); + assert_eq!(v.yz(), i16vec2(2_i16, 3_i16)); + assert_eq!(v.zx(), i16vec2(3_i16, 1_i16)); + assert_eq!(v.zy(), i16vec2(3_i16, 2_i16)); + assert_eq!(v.zz(), i16vec2(3_i16, 3_i16)); +}); + +glam_test!(test_i16vec2_swizzles, { + let v = i16vec2(1_i16, 2_i16); + assert_eq!(v, v.xy()); + assert_eq!(v.xxxx(), i16vec4(1_i16, 1_i16, 1_i16, 1_i16)); + assert_eq!(v.xxxy(), i16vec4(1_i16, 1_i16, 1_i16, 2_i16)); + assert_eq!(v.xxyx(), i16vec4(1_i16, 1_i16, 2_i16, 1_i16)); + assert_eq!(v.xxyy(), i16vec4(1_i16, 1_i16, 2_i16, 2_i16)); + assert_eq!(v.xyxx(), i16vec4(1_i16, 2_i16, 1_i16, 1_i16)); + assert_eq!(v.xyxy(), i16vec4(1_i16, 2_i16, 1_i16, 2_i16)); + assert_eq!(v.xyyx(), i16vec4(1_i16, 2_i16, 2_i16, 1_i16)); + assert_eq!(v.xyyy(), i16vec4(1_i16, 2_i16, 2_i16, 2_i16)); + assert_eq!(v.yxxx(), i16vec4(2_i16, 1_i16, 1_i16, 1_i16)); + assert_eq!(v.yxxy(), i16vec4(2_i16, 1_i16, 1_i16, 2_i16)); + assert_eq!(v.yxyx(), i16vec4(2_i16, 1_i16, 2_i16, 1_i16)); + assert_eq!(v.yxyy(), i16vec4(2_i16, 1_i16, 2_i16, 2_i16)); + assert_eq!(v.yyxx(), i16vec4(2_i16, 2_i16, 1_i16, 1_i16)); + assert_eq!(v.yyxy(), i16vec4(2_i16, 2_i16, 1_i16, 2_i16)); + assert_eq!(v.yyyx(), i16vec4(2_i16, 2_i16, 2_i16, 1_i16)); + assert_eq!(v.yyyy(), i16vec4(2_i16, 2_i16, 2_i16, 2_i16)); + assert_eq!(v.xxx(), i16vec3(1_i16, 1_i16, 1_i16)); + assert_eq!(v.xxy(), i16vec3(1_i16, 1_i16, 2_i16)); + assert_eq!(v.xyx(), i16vec3(1_i16, 2_i16, 1_i16)); + assert_eq!(v.xyy(), i16vec3(1_i16, 2_i16, 2_i16)); + assert_eq!(v.yxx(), i16vec3(2_i16, 1_i16, 1_i16)); + assert_eq!(v.yxy(), i16vec3(2_i16, 1_i16, 2_i16)); + assert_eq!(v.yyx(), i16vec3(2_i16, 2_i16, 1_i16)); + assert_eq!(v.yyy(), i16vec3(2_i16, 2_i16, 2_i16)); + assert_eq!(v.xx(), i16vec2(1_i16, 1_i16)); + assert_eq!(v.yx(), i16vec2(2_i16, 1_i16)); + assert_eq!(v.yy(), i16vec2(2_i16, 2_i16)); +}); diff --git a/tests/swizzles_u16.rs b/tests/swizzles_u16.rs new file mode 100644 index 00000000..e72b3017 --- /dev/null +++ b/tests/swizzles_u16.rs @@ -0,0 +1,497 @@ +// Generated by swizzlegen. Do not edit. +#[macro_use] +mod support; +use glam::*; + +glam_test!(test_u16vec4_swizzles, { + let v = u16vec4(1_u16, 2_u16, 3_u16, 4_u16); + assert_eq!(v, v.xyzw()); + assert_eq!(v.xxxx(), u16vec4(1_u16, 1_u16, 1_u16, 1_u16)); + assert_eq!(v.xxxy(), u16vec4(1_u16, 1_u16, 1_u16, 2_u16)); + assert_eq!(v.xxxz(), u16vec4(1_u16, 1_u16, 1_u16, 3_u16)); + assert_eq!(v.xxxw(), u16vec4(1_u16, 1_u16, 1_u16, 4_u16)); + assert_eq!(v.xxyx(), u16vec4(1_u16, 1_u16, 2_u16, 1_u16)); + assert_eq!(v.xxyy(), u16vec4(1_u16, 1_u16, 2_u16, 2_u16)); + assert_eq!(v.xxyz(), u16vec4(1_u16, 1_u16, 2_u16, 3_u16)); + assert_eq!(v.xxyw(), u16vec4(1_u16, 1_u16, 2_u16, 4_u16)); + assert_eq!(v.xxzx(), u16vec4(1_u16, 1_u16, 3_u16, 1_u16)); + assert_eq!(v.xxzy(), u16vec4(1_u16, 1_u16, 3_u16, 2_u16)); + assert_eq!(v.xxzz(), u16vec4(1_u16, 1_u16, 3_u16, 3_u16)); + assert_eq!(v.xxzw(), u16vec4(1_u16, 1_u16, 3_u16, 4_u16)); + assert_eq!(v.xxwx(), u16vec4(1_u16, 1_u16, 4_u16, 1_u16)); + assert_eq!(v.xxwy(), u16vec4(1_u16, 1_u16, 4_u16, 2_u16)); + assert_eq!(v.xxwz(), u16vec4(1_u16, 1_u16, 4_u16, 3_u16)); + assert_eq!(v.xxww(), u16vec4(1_u16, 1_u16, 4_u16, 4_u16)); + assert_eq!(v.xyxx(), u16vec4(1_u16, 2_u16, 1_u16, 1_u16)); + assert_eq!(v.xyxy(), u16vec4(1_u16, 2_u16, 1_u16, 2_u16)); + assert_eq!(v.xyxz(), u16vec4(1_u16, 2_u16, 1_u16, 3_u16)); + assert_eq!(v.xyxw(), u16vec4(1_u16, 2_u16, 1_u16, 4_u16)); + assert_eq!(v.xyyx(), u16vec4(1_u16, 2_u16, 2_u16, 1_u16)); + assert_eq!(v.xyyy(), u16vec4(1_u16, 2_u16, 2_u16, 2_u16)); + assert_eq!(v.xyyz(), u16vec4(1_u16, 2_u16, 2_u16, 3_u16)); + assert_eq!(v.xyyw(), u16vec4(1_u16, 2_u16, 2_u16, 4_u16)); + assert_eq!(v.xyzx(), u16vec4(1_u16, 2_u16, 3_u16, 1_u16)); + assert_eq!(v.xyzy(), u16vec4(1_u16, 2_u16, 3_u16, 2_u16)); + assert_eq!(v.xyzz(), u16vec4(1_u16, 2_u16, 3_u16, 3_u16)); + assert_eq!(v.xywx(), u16vec4(1_u16, 2_u16, 4_u16, 1_u16)); + assert_eq!(v.xywy(), u16vec4(1_u16, 2_u16, 4_u16, 2_u16)); + assert_eq!(v.xywz(), u16vec4(1_u16, 2_u16, 4_u16, 3_u16)); + assert_eq!(v.xyww(), u16vec4(1_u16, 2_u16, 4_u16, 4_u16)); + assert_eq!(v.xzxx(), u16vec4(1_u16, 3_u16, 1_u16, 1_u16)); + assert_eq!(v.xzxy(), u16vec4(1_u16, 3_u16, 1_u16, 2_u16)); + assert_eq!(v.xzxz(), u16vec4(1_u16, 3_u16, 1_u16, 3_u16)); + assert_eq!(v.xzxw(), u16vec4(1_u16, 3_u16, 1_u16, 4_u16)); + assert_eq!(v.xzyx(), u16vec4(1_u16, 3_u16, 2_u16, 1_u16)); + assert_eq!(v.xzyy(), u16vec4(1_u16, 3_u16, 2_u16, 2_u16)); + assert_eq!(v.xzyz(), u16vec4(1_u16, 3_u16, 2_u16, 3_u16)); + assert_eq!(v.xzyw(), u16vec4(1_u16, 3_u16, 2_u16, 4_u16)); + assert_eq!(v.xzzx(), u16vec4(1_u16, 3_u16, 3_u16, 1_u16)); + assert_eq!(v.xzzy(), u16vec4(1_u16, 3_u16, 3_u16, 2_u16)); + assert_eq!(v.xzzz(), u16vec4(1_u16, 3_u16, 3_u16, 3_u16)); + assert_eq!(v.xzzw(), u16vec4(1_u16, 3_u16, 3_u16, 4_u16)); + assert_eq!(v.xzwx(), u16vec4(1_u16, 3_u16, 4_u16, 1_u16)); + assert_eq!(v.xzwy(), u16vec4(1_u16, 3_u16, 4_u16, 2_u16)); + assert_eq!(v.xzwz(), u16vec4(1_u16, 3_u16, 4_u16, 3_u16)); + assert_eq!(v.xzww(), u16vec4(1_u16, 3_u16, 4_u16, 4_u16)); + assert_eq!(v.xwxx(), u16vec4(1_u16, 4_u16, 1_u16, 1_u16)); + assert_eq!(v.xwxy(), u16vec4(1_u16, 4_u16, 1_u16, 2_u16)); + assert_eq!(v.xwxz(), u16vec4(1_u16, 4_u16, 1_u16, 3_u16)); + assert_eq!(v.xwxw(), u16vec4(1_u16, 4_u16, 1_u16, 4_u16)); + assert_eq!(v.xwyx(), u16vec4(1_u16, 4_u16, 2_u16, 1_u16)); + assert_eq!(v.xwyy(), u16vec4(1_u16, 4_u16, 2_u16, 2_u16)); + assert_eq!(v.xwyz(), u16vec4(1_u16, 4_u16, 2_u16, 3_u16)); + assert_eq!(v.xwyw(), u16vec4(1_u16, 4_u16, 2_u16, 4_u16)); + assert_eq!(v.xwzx(), u16vec4(1_u16, 4_u16, 3_u16, 1_u16)); + assert_eq!(v.xwzy(), u16vec4(1_u16, 4_u16, 3_u16, 2_u16)); + assert_eq!(v.xwzz(), u16vec4(1_u16, 4_u16, 3_u16, 3_u16)); + assert_eq!(v.xwzw(), u16vec4(1_u16, 4_u16, 3_u16, 4_u16)); + assert_eq!(v.xwwx(), u16vec4(1_u16, 4_u16, 4_u16, 1_u16)); + assert_eq!(v.xwwy(), u16vec4(1_u16, 4_u16, 4_u16, 2_u16)); + assert_eq!(v.xwwz(), u16vec4(1_u16, 4_u16, 4_u16, 3_u16)); + assert_eq!(v.xwww(), u16vec4(1_u16, 4_u16, 4_u16, 4_u16)); + assert_eq!(v.yxxx(), u16vec4(2_u16, 1_u16, 1_u16, 1_u16)); + assert_eq!(v.yxxy(), u16vec4(2_u16, 1_u16, 1_u16, 2_u16)); + assert_eq!(v.yxxz(), u16vec4(2_u16, 1_u16, 1_u16, 3_u16)); + assert_eq!(v.yxxw(), u16vec4(2_u16, 1_u16, 1_u16, 4_u16)); + assert_eq!(v.yxyx(), u16vec4(2_u16, 1_u16, 2_u16, 1_u16)); + assert_eq!(v.yxyy(), u16vec4(2_u16, 1_u16, 2_u16, 2_u16)); + assert_eq!(v.yxyz(), u16vec4(2_u16, 1_u16, 2_u16, 3_u16)); + assert_eq!(v.yxyw(), u16vec4(2_u16, 1_u16, 2_u16, 4_u16)); + assert_eq!(v.yxzx(), u16vec4(2_u16, 1_u16, 3_u16, 1_u16)); + assert_eq!(v.yxzy(), u16vec4(2_u16, 1_u16, 3_u16, 2_u16)); + assert_eq!(v.yxzz(), u16vec4(2_u16, 1_u16, 3_u16, 3_u16)); + assert_eq!(v.yxzw(), u16vec4(2_u16, 1_u16, 3_u16, 4_u16)); + assert_eq!(v.yxwx(), u16vec4(2_u16, 1_u16, 4_u16, 1_u16)); + assert_eq!(v.yxwy(), u16vec4(2_u16, 1_u16, 4_u16, 2_u16)); + assert_eq!(v.yxwz(), u16vec4(2_u16, 1_u16, 4_u16, 3_u16)); + assert_eq!(v.yxww(), u16vec4(2_u16, 1_u16, 4_u16, 4_u16)); + assert_eq!(v.yyxx(), u16vec4(2_u16, 2_u16, 1_u16, 1_u16)); + assert_eq!(v.yyxy(), u16vec4(2_u16, 2_u16, 1_u16, 2_u16)); + assert_eq!(v.yyxz(), u16vec4(2_u16, 2_u16, 1_u16, 3_u16)); + assert_eq!(v.yyxw(), u16vec4(2_u16, 2_u16, 1_u16, 4_u16)); + assert_eq!(v.yyyx(), u16vec4(2_u16, 2_u16, 2_u16, 1_u16)); + assert_eq!(v.yyyy(), u16vec4(2_u16, 2_u16, 2_u16, 2_u16)); + assert_eq!(v.yyyz(), u16vec4(2_u16, 2_u16, 2_u16, 3_u16)); + assert_eq!(v.yyyw(), u16vec4(2_u16, 2_u16, 2_u16, 4_u16)); + assert_eq!(v.yyzx(), u16vec4(2_u16, 2_u16, 3_u16, 1_u16)); + assert_eq!(v.yyzy(), u16vec4(2_u16, 2_u16, 3_u16, 2_u16)); + assert_eq!(v.yyzz(), u16vec4(2_u16, 2_u16, 3_u16, 3_u16)); + assert_eq!(v.yyzw(), u16vec4(2_u16, 2_u16, 3_u16, 4_u16)); + assert_eq!(v.yywx(), u16vec4(2_u16, 2_u16, 4_u16, 1_u16)); + assert_eq!(v.yywy(), u16vec4(2_u16, 2_u16, 4_u16, 2_u16)); + assert_eq!(v.yywz(), u16vec4(2_u16, 2_u16, 4_u16, 3_u16)); + assert_eq!(v.yyww(), u16vec4(2_u16, 2_u16, 4_u16, 4_u16)); + assert_eq!(v.yzxx(), u16vec4(2_u16, 3_u16, 1_u16, 1_u16)); + assert_eq!(v.yzxy(), u16vec4(2_u16, 3_u16, 1_u16, 2_u16)); + assert_eq!(v.yzxz(), u16vec4(2_u16, 3_u16, 1_u16, 3_u16)); + assert_eq!(v.yzxw(), u16vec4(2_u16, 3_u16, 1_u16, 4_u16)); + assert_eq!(v.yzyx(), u16vec4(2_u16, 3_u16, 2_u16, 1_u16)); + assert_eq!(v.yzyy(), u16vec4(2_u16, 3_u16, 2_u16, 2_u16)); + assert_eq!(v.yzyz(), u16vec4(2_u16, 3_u16, 2_u16, 3_u16)); + assert_eq!(v.yzyw(), u16vec4(2_u16, 3_u16, 2_u16, 4_u16)); + assert_eq!(v.yzzx(), u16vec4(2_u16, 3_u16, 3_u16, 1_u16)); + assert_eq!(v.yzzy(), u16vec4(2_u16, 3_u16, 3_u16, 2_u16)); + assert_eq!(v.yzzz(), u16vec4(2_u16, 3_u16, 3_u16, 3_u16)); + assert_eq!(v.yzzw(), u16vec4(2_u16, 3_u16, 3_u16, 4_u16)); + assert_eq!(v.yzwx(), u16vec4(2_u16, 3_u16, 4_u16, 1_u16)); + assert_eq!(v.yzwy(), u16vec4(2_u16, 3_u16, 4_u16, 2_u16)); + assert_eq!(v.yzwz(), u16vec4(2_u16, 3_u16, 4_u16, 3_u16)); + assert_eq!(v.yzww(), u16vec4(2_u16, 3_u16, 4_u16, 4_u16)); + assert_eq!(v.ywxx(), u16vec4(2_u16, 4_u16, 1_u16, 1_u16)); + assert_eq!(v.ywxy(), u16vec4(2_u16, 4_u16, 1_u16, 2_u16)); + assert_eq!(v.ywxz(), u16vec4(2_u16, 4_u16, 1_u16, 3_u16)); + assert_eq!(v.ywxw(), u16vec4(2_u16, 4_u16, 1_u16, 4_u16)); + assert_eq!(v.ywyx(), u16vec4(2_u16, 4_u16, 2_u16, 1_u16)); + assert_eq!(v.ywyy(), u16vec4(2_u16, 4_u16, 2_u16, 2_u16)); + assert_eq!(v.ywyz(), u16vec4(2_u16, 4_u16, 2_u16, 3_u16)); + assert_eq!(v.ywyw(), u16vec4(2_u16, 4_u16, 2_u16, 4_u16)); + assert_eq!(v.ywzx(), u16vec4(2_u16, 4_u16, 3_u16, 1_u16)); + assert_eq!(v.ywzy(), u16vec4(2_u16, 4_u16, 3_u16, 2_u16)); + assert_eq!(v.ywzz(), u16vec4(2_u16, 4_u16, 3_u16, 3_u16)); + assert_eq!(v.ywzw(), u16vec4(2_u16, 4_u16, 3_u16, 4_u16)); + assert_eq!(v.ywwx(), u16vec4(2_u16, 4_u16, 4_u16, 1_u16)); + assert_eq!(v.ywwy(), u16vec4(2_u16, 4_u16, 4_u16, 2_u16)); + assert_eq!(v.ywwz(), u16vec4(2_u16, 4_u16, 4_u16, 3_u16)); + assert_eq!(v.ywww(), u16vec4(2_u16, 4_u16, 4_u16, 4_u16)); + assert_eq!(v.zxxx(), u16vec4(3_u16, 1_u16, 1_u16, 1_u16)); + assert_eq!(v.zxxy(), u16vec4(3_u16, 1_u16, 1_u16, 2_u16)); + assert_eq!(v.zxxz(), u16vec4(3_u16, 1_u16, 1_u16, 3_u16)); + assert_eq!(v.zxxw(), u16vec4(3_u16, 1_u16, 1_u16, 4_u16)); + assert_eq!(v.zxyx(), u16vec4(3_u16, 1_u16, 2_u16, 1_u16)); + assert_eq!(v.zxyy(), u16vec4(3_u16, 1_u16, 2_u16, 2_u16)); + assert_eq!(v.zxyz(), u16vec4(3_u16, 1_u16, 2_u16, 3_u16)); + assert_eq!(v.zxyw(), u16vec4(3_u16, 1_u16, 2_u16, 4_u16)); + assert_eq!(v.zxzx(), u16vec4(3_u16, 1_u16, 3_u16, 1_u16)); + assert_eq!(v.zxzy(), u16vec4(3_u16, 1_u16, 3_u16, 2_u16)); + assert_eq!(v.zxzz(), u16vec4(3_u16, 1_u16, 3_u16, 3_u16)); + assert_eq!(v.zxzw(), u16vec4(3_u16, 1_u16, 3_u16, 4_u16)); + assert_eq!(v.zxwx(), u16vec4(3_u16, 1_u16, 4_u16, 1_u16)); + assert_eq!(v.zxwy(), u16vec4(3_u16, 1_u16, 4_u16, 2_u16)); + assert_eq!(v.zxwz(), u16vec4(3_u16, 1_u16, 4_u16, 3_u16)); + assert_eq!(v.zxww(), u16vec4(3_u16, 1_u16, 4_u16, 4_u16)); + assert_eq!(v.zyxx(), u16vec4(3_u16, 2_u16, 1_u16, 1_u16)); + assert_eq!(v.zyxy(), u16vec4(3_u16, 2_u16, 1_u16, 2_u16)); + assert_eq!(v.zyxz(), u16vec4(3_u16, 2_u16, 1_u16, 3_u16)); + assert_eq!(v.zyxw(), u16vec4(3_u16, 2_u16, 1_u16, 4_u16)); + assert_eq!(v.zyyx(), u16vec4(3_u16, 2_u16, 2_u16, 1_u16)); + assert_eq!(v.zyyy(), u16vec4(3_u16, 2_u16, 2_u16, 2_u16)); + assert_eq!(v.zyyz(), u16vec4(3_u16, 2_u16, 2_u16, 3_u16)); + assert_eq!(v.zyyw(), u16vec4(3_u16, 2_u16, 2_u16, 4_u16)); + assert_eq!(v.zyzx(), u16vec4(3_u16, 2_u16, 3_u16, 1_u16)); + assert_eq!(v.zyzy(), u16vec4(3_u16, 2_u16, 3_u16, 2_u16)); + assert_eq!(v.zyzz(), u16vec4(3_u16, 2_u16, 3_u16, 3_u16)); + assert_eq!(v.zyzw(), u16vec4(3_u16, 2_u16, 3_u16, 4_u16)); + assert_eq!(v.zywx(), u16vec4(3_u16, 2_u16, 4_u16, 1_u16)); + assert_eq!(v.zywy(), u16vec4(3_u16, 2_u16, 4_u16, 2_u16)); + assert_eq!(v.zywz(), u16vec4(3_u16, 2_u16, 4_u16, 3_u16)); + assert_eq!(v.zyww(), u16vec4(3_u16, 2_u16, 4_u16, 4_u16)); + assert_eq!(v.zzxx(), u16vec4(3_u16, 3_u16, 1_u16, 1_u16)); + assert_eq!(v.zzxy(), u16vec4(3_u16, 3_u16, 1_u16, 2_u16)); + assert_eq!(v.zzxz(), u16vec4(3_u16, 3_u16, 1_u16, 3_u16)); + assert_eq!(v.zzxw(), u16vec4(3_u16, 3_u16, 1_u16, 4_u16)); + assert_eq!(v.zzyx(), u16vec4(3_u16, 3_u16, 2_u16, 1_u16)); + assert_eq!(v.zzyy(), u16vec4(3_u16, 3_u16, 2_u16, 2_u16)); + assert_eq!(v.zzyz(), u16vec4(3_u16, 3_u16, 2_u16, 3_u16)); + assert_eq!(v.zzyw(), u16vec4(3_u16, 3_u16, 2_u16, 4_u16)); + assert_eq!(v.zzzx(), u16vec4(3_u16, 3_u16, 3_u16, 1_u16)); + assert_eq!(v.zzzy(), u16vec4(3_u16, 3_u16, 3_u16, 2_u16)); + assert_eq!(v.zzzz(), u16vec4(3_u16, 3_u16, 3_u16, 3_u16)); + assert_eq!(v.zzzw(), u16vec4(3_u16, 3_u16, 3_u16, 4_u16)); + assert_eq!(v.zzwx(), u16vec4(3_u16, 3_u16, 4_u16, 1_u16)); + assert_eq!(v.zzwy(), u16vec4(3_u16, 3_u16, 4_u16, 2_u16)); + assert_eq!(v.zzwz(), u16vec4(3_u16, 3_u16, 4_u16, 3_u16)); + assert_eq!(v.zzww(), u16vec4(3_u16, 3_u16, 4_u16, 4_u16)); + assert_eq!(v.zwxx(), u16vec4(3_u16, 4_u16, 1_u16, 1_u16)); + assert_eq!(v.zwxy(), u16vec4(3_u16, 4_u16, 1_u16, 2_u16)); + assert_eq!(v.zwxz(), u16vec4(3_u16, 4_u16, 1_u16, 3_u16)); + assert_eq!(v.zwxw(), u16vec4(3_u16, 4_u16, 1_u16, 4_u16)); + assert_eq!(v.zwyx(), u16vec4(3_u16, 4_u16, 2_u16, 1_u16)); + assert_eq!(v.zwyy(), u16vec4(3_u16, 4_u16, 2_u16, 2_u16)); + assert_eq!(v.zwyz(), u16vec4(3_u16, 4_u16, 2_u16, 3_u16)); + assert_eq!(v.zwyw(), u16vec4(3_u16, 4_u16, 2_u16, 4_u16)); + assert_eq!(v.zwzx(), u16vec4(3_u16, 4_u16, 3_u16, 1_u16)); + assert_eq!(v.zwzy(), u16vec4(3_u16, 4_u16, 3_u16, 2_u16)); + assert_eq!(v.zwzz(), u16vec4(3_u16, 4_u16, 3_u16, 3_u16)); + assert_eq!(v.zwzw(), u16vec4(3_u16, 4_u16, 3_u16, 4_u16)); + assert_eq!(v.zwwx(), u16vec4(3_u16, 4_u16, 4_u16, 1_u16)); + assert_eq!(v.zwwy(), u16vec4(3_u16, 4_u16, 4_u16, 2_u16)); + assert_eq!(v.zwwz(), u16vec4(3_u16, 4_u16, 4_u16, 3_u16)); + assert_eq!(v.zwww(), u16vec4(3_u16, 4_u16, 4_u16, 4_u16)); + assert_eq!(v.wxxx(), u16vec4(4_u16, 1_u16, 1_u16, 1_u16)); + assert_eq!(v.wxxy(), u16vec4(4_u16, 1_u16, 1_u16, 2_u16)); + assert_eq!(v.wxxz(), u16vec4(4_u16, 1_u16, 1_u16, 3_u16)); + assert_eq!(v.wxxw(), u16vec4(4_u16, 1_u16, 1_u16, 4_u16)); + assert_eq!(v.wxyx(), u16vec4(4_u16, 1_u16, 2_u16, 1_u16)); + assert_eq!(v.wxyy(), u16vec4(4_u16, 1_u16, 2_u16, 2_u16)); + assert_eq!(v.wxyz(), u16vec4(4_u16, 1_u16, 2_u16, 3_u16)); + assert_eq!(v.wxyw(), u16vec4(4_u16, 1_u16, 2_u16, 4_u16)); + assert_eq!(v.wxzx(), u16vec4(4_u16, 1_u16, 3_u16, 1_u16)); + assert_eq!(v.wxzy(), u16vec4(4_u16, 1_u16, 3_u16, 2_u16)); + assert_eq!(v.wxzz(), u16vec4(4_u16, 1_u16, 3_u16, 3_u16)); + assert_eq!(v.wxzw(), u16vec4(4_u16, 1_u16, 3_u16, 4_u16)); + assert_eq!(v.wxwx(), u16vec4(4_u16, 1_u16, 4_u16, 1_u16)); + assert_eq!(v.wxwy(), u16vec4(4_u16, 1_u16, 4_u16, 2_u16)); + assert_eq!(v.wxwz(), u16vec4(4_u16, 1_u16, 4_u16, 3_u16)); + assert_eq!(v.wxww(), u16vec4(4_u16, 1_u16, 4_u16, 4_u16)); + assert_eq!(v.wyxx(), u16vec4(4_u16, 2_u16, 1_u16, 1_u16)); + assert_eq!(v.wyxy(), u16vec4(4_u16, 2_u16, 1_u16, 2_u16)); + assert_eq!(v.wyxz(), u16vec4(4_u16, 2_u16, 1_u16, 3_u16)); + assert_eq!(v.wyxw(), u16vec4(4_u16, 2_u16, 1_u16, 4_u16)); + assert_eq!(v.wyyx(), u16vec4(4_u16, 2_u16, 2_u16, 1_u16)); + assert_eq!(v.wyyy(), u16vec4(4_u16, 2_u16, 2_u16, 2_u16)); + assert_eq!(v.wyyz(), u16vec4(4_u16, 2_u16, 2_u16, 3_u16)); + assert_eq!(v.wyyw(), u16vec4(4_u16, 2_u16, 2_u16, 4_u16)); + assert_eq!(v.wyzx(), u16vec4(4_u16, 2_u16, 3_u16, 1_u16)); + assert_eq!(v.wyzy(), u16vec4(4_u16, 2_u16, 3_u16, 2_u16)); + assert_eq!(v.wyzz(), u16vec4(4_u16, 2_u16, 3_u16, 3_u16)); + assert_eq!(v.wyzw(), u16vec4(4_u16, 2_u16, 3_u16, 4_u16)); + assert_eq!(v.wywx(), u16vec4(4_u16, 2_u16, 4_u16, 1_u16)); + assert_eq!(v.wywy(), u16vec4(4_u16, 2_u16, 4_u16, 2_u16)); + assert_eq!(v.wywz(), u16vec4(4_u16, 2_u16, 4_u16, 3_u16)); + assert_eq!(v.wyww(), u16vec4(4_u16, 2_u16, 4_u16, 4_u16)); + assert_eq!(v.wzxx(), u16vec4(4_u16, 3_u16, 1_u16, 1_u16)); + assert_eq!(v.wzxy(), u16vec4(4_u16, 3_u16, 1_u16, 2_u16)); + assert_eq!(v.wzxz(), u16vec4(4_u16, 3_u16, 1_u16, 3_u16)); + assert_eq!(v.wzxw(), u16vec4(4_u16, 3_u16, 1_u16, 4_u16)); + assert_eq!(v.wzyx(), u16vec4(4_u16, 3_u16, 2_u16, 1_u16)); + assert_eq!(v.wzyy(), u16vec4(4_u16, 3_u16, 2_u16, 2_u16)); + assert_eq!(v.wzyz(), u16vec4(4_u16, 3_u16, 2_u16, 3_u16)); + assert_eq!(v.wzyw(), u16vec4(4_u16, 3_u16, 2_u16, 4_u16)); + assert_eq!(v.wzzx(), u16vec4(4_u16, 3_u16, 3_u16, 1_u16)); + assert_eq!(v.wzzy(), u16vec4(4_u16, 3_u16, 3_u16, 2_u16)); + assert_eq!(v.wzzz(), u16vec4(4_u16, 3_u16, 3_u16, 3_u16)); + assert_eq!(v.wzzw(), u16vec4(4_u16, 3_u16, 3_u16, 4_u16)); + assert_eq!(v.wzwx(), u16vec4(4_u16, 3_u16, 4_u16, 1_u16)); + assert_eq!(v.wzwy(), u16vec4(4_u16, 3_u16, 4_u16, 2_u16)); + assert_eq!(v.wzwz(), u16vec4(4_u16, 3_u16, 4_u16, 3_u16)); + assert_eq!(v.wzww(), u16vec4(4_u16, 3_u16, 4_u16, 4_u16)); + assert_eq!(v.wwxx(), u16vec4(4_u16, 4_u16, 1_u16, 1_u16)); + assert_eq!(v.wwxy(), u16vec4(4_u16, 4_u16, 1_u16, 2_u16)); + assert_eq!(v.wwxz(), u16vec4(4_u16, 4_u16, 1_u16, 3_u16)); + assert_eq!(v.wwxw(), u16vec4(4_u16, 4_u16, 1_u16, 4_u16)); + assert_eq!(v.wwyx(), u16vec4(4_u16, 4_u16, 2_u16, 1_u16)); + assert_eq!(v.wwyy(), u16vec4(4_u16, 4_u16, 2_u16, 2_u16)); + assert_eq!(v.wwyz(), u16vec4(4_u16, 4_u16, 2_u16, 3_u16)); + assert_eq!(v.wwyw(), u16vec4(4_u16, 4_u16, 2_u16, 4_u16)); + assert_eq!(v.wwzx(), u16vec4(4_u16, 4_u16, 3_u16, 1_u16)); + assert_eq!(v.wwzy(), u16vec4(4_u16, 4_u16, 3_u16, 2_u16)); + assert_eq!(v.wwzz(), u16vec4(4_u16, 4_u16, 3_u16, 3_u16)); + assert_eq!(v.wwzw(), u16vec4(4_u16, 4_u16, 3_u16, 4_u16)); + assert_eq!(v.wwwx(), u16vec4(4_u16, 4_u16, 4_u16, 1_u16)); + assert_eq!(v.wwwy(), u16vec4(4_u16, 4_u16, 4_u16, 2_u16)); + assert_eq!(v.wwwz(), u16vec4(4_u16, 4_u16, 4_u16, 3_u16)); + assert_eq!(v.wwww(), u16vec4(4_u16, 4_u16, 4_u16, 4_u16)); + assert_eq!(v.xxx(), u16vec3(1_u16, 1_u16, 1_u16)); + assert_eq!(v.xxy(), u16vec3(1_u16, 1_u16, 2_u16)); + assert_eq!(v.xxz(), u16vec3(1_u16, 1_u16, 3_u16)); + assert_eq!(v.xxw(), u16vec3(1_u16, 1_u16, 4_u16)); + assert_eq!(v.xyx(), u16vec3(1_u16, 2_u16, 1_u16)); + assert_eq!(v.xyy(), u16vec3(1_u16, 2_u16, 2_u16)); + assert_eq!(v.xyz(), u16vec3(1_u16, 2_u16, 3_u16)); + assert_eq!(v.xyw(), u16vec3(1_u16, 2_u16, 4_u16)); + assert_eq!(v.xzx(), u16vec3(1_u16, 3_u16, 1_u16)); + assert_eq!(v.xzy(), u16vec3(1_u16, 3_u16, 2_u16)); + assert_eq!(v.xzz(), u16vec3(1_u16, 3_u16, 3_u16)); + assert_eq!(v.xzw(), u16vec3(1_u16, 3_u16, 4_u16)); + assert_eq!(v.xwx(), u16vec3(1_u16, 4_u16, 1_u16)); + assert_eq!(v.xwy(), u16vec3(1_u16, 4_u16, 2_u16)); + assert_eq!(v.xwz(), u16vec3(1_u16, 4_u16, 3_u16)); + assert_eq!(v.xww(), u16vec3(1_u16, 4_u16, 4_u16)); + assert_eq!(v.yxx(), u16vec3(2_u16, 1_u16, 1_u16)); + assert_eq!(v.yxy(), u16vec3(2_u16, 1_u16, 2_u16)); + assert_eq!(v.yxz(), u16vec3(2_u16, 1_u16, 3_u16)); + assert_eq!(v.yxw(), u16vec3(2_u16, 1_u16, 4_u16)); + assert_eq!(v.yyx(), u16vec3(2_u16, 2_u16, 1_u16)); + assert_eq!(v.yyy(), u16vec3(2_u16, 2_u16, 2_u16)); + assert_eq!(v.yyz(), u16vec3(2_u16, 2_u16, 3_u16)); + assert_eq!(v.yyw(), u16vec3(2_u16, 2_u16, 4_u16)); + assert_eq!(v.yzx(), u16vec3(2_u16, 3_u16, 1_u16)); + assert_eq!(v.yzy(), u16vec3(2_u16, 3_u16, 2_u16)); + assert_eq!(v.yzz(), u16vec3(2_u16, 3_u16, 3_u16)); + assert_eq!(v.yzw(), u16vec3(2_u16, 3_u16, 4_u16)); + assert_eq!(v.ywx(), u16vec3(2_u16, 4_u16, 1_u16)); + assert_eq!(v.ywy(), u16vec3(2_u16, 4_u16, 2_u16)); + assert_eq!(v.ywz(), u16vec3(2_u16, 4_u16, 3_u16)); + assert_eq!(v.yww(), u16vec3(2_u16, 4_u16, 4_u16)); + assert_eq!(v.zxx(), u16vec3(3_u16, 1_u16, 1_u16)); + assert_eq!(v.zxy(), u16vec3(3_u16, 1_u16, 2_u16)); + assert_eq!(v.zxz(), u16vec3(3_u16, 1_u16, 3_u16)); + assert_eq!(v.zxw(), u16vec3(3_u16, 1_u16, 4_u16)); + assert_eq!(v.zyx(), u16vec3(3_u16, 2_u16, 1_u16)); + assert_eq!(v.zyy(), u16vec3(3_u16, 2_u16, 2_u16)); + assert_eq!(v.zyz(), u16vec3(3_u16, 2_u16, 3_u16)); + assert_eq!(v.zyw(), u16vec3(3_u16, 2_u16, 4_u16)); + assert_eq!(v.zzx(), u16vec3(3_u16, 3_u16, 1_u16)); + assert_eq!(v.zzy(), u16vec3(3_u16, 3_u16, 2_u16)); + assert_eq!(v.zzz(), u16vec3(3_u16, 3_u16, 3_u16)); + assert_eq!(v.zzw(), u16vec3(3_u16, 3_u16, 4_u16)); + assert_eq!(v.zwx(), u16vec3(3_u16, 4_u16, 1_u16)); + assert_eq!(v.zwy(), u16vec3(3_u16, 4_u16, 2_u16)); + assert_eq!(v.zwz(), u16vec3(3_u16, 4_u16, 3_u16)); + assert_eq!(v.zww(), u16vec3(3_u16, 4_u16, 4_u16)); + assert_eq!(v.wxx(), u16vec3(4_u16, 1_u16, 1_u16)); + assert_eq!(v.wxy(), u16vec3(4_u16, 1_u16, 2_u16)); + assert_eq!(v.wxz(), u16vec3(4_u16, 1_u16, 3_u16)); + assert_eq!(v.wxw(), u16vec3(4_u16, 1_u16, 4_u16)); + assert_eq!(v.wyx(), u16vec3(4_u16, 2_u16, 1_u16)); + assert_eq!(v.wyy(), u16vec3(4_u16, 2_u16, 2_u16)); + assert_eq!(v.wyz(), u16vec3(4_u16, 2_u16, 3_u16)); + assert_eq!(v.wyw(), u16vec3(4_u16, 2_u16, 4_u16)); + assert_eq!(v.wzx(), u16vec3(4_u16, 3_u16, 1_u16)); + assert_eq!(v.wzy(), u16vec3(4_u16, 3_u16, 2_u16)); + assert_eq!(v.wzz(), u16vec3(4_u16, 3_u16, 3_u16)); + assert_eq!(v.wzw(), u16vec3(4_u16, 3_u16, 4_u16)); + assert_eq!(v.wwx(), u16vec3(4_u16, 4_u16, 1_u16)); + assert_eq!(v.wwy(), u16vec3(4_u16, 4_u16, 2_u16)); + assert_eq!(v.wwz(), u16vec3(4_u16, 4_u16, 3_u16)); + assert_eq!(v.www(), u16vec3(4_u16, 4_u16, 4_u16)); + assert_eq!(v.xx(), u16vec2(1_u16, 1_u16)); + assert_eq!(v.xy(), u16vec2(1_u16, 2_u16)); + assert_eq!(v.xz(), u16vec2(1_u16, 3_u16)); + assert_eq!(v.xw(), u16vec2(1_u16, 4_u16)); + assert_eq!(v.yx(), u16vec2(2_u16, 1_u16)); + assert_eq!(v.yy(), u16vec2(2_u16, 2_u16)); + assert_eq!(v.yz(), u16vec2(2_u16, 3_u16)); + assert_eq!(v.yw(), u16vec2(2_u16, 4_u16)); + assert_eq!(v.zx(), u16vec2(3_u16, 1_u16)); + assert_eq!(v.zy(), u16vec2(3_u16, 2_u16)); + assert_eq!(v.zz(), u16vec2(3_u16, 3_u16)); + assert_eq!(v.zw(), u16vec2(3_u16, 4_u16)); + assert_eq!(v.wx(), u16vec2(4_u16, 1_u16)); + assert_eq!(v.wy(), u16vec2(4_u16, 2_u16)); + assert_eq!(v.wz(), u16vec2(4_u16, 3_u16)); + assert_eq!(v.ww(), u16vec2(4_u16, 4_u16)); +}); + +glam_test!(test_u16vec3_swizzles, { + let v = u16vec3(1_u16, 2_u16, 3_u16); + assert_eq!(v, v.xyz()); + assert_eq!(v.xxxx(), u16vec4(1_u16, 1_u16, 1_u16, 1_u16)); + assert_eq!(v.xxxy(), u16vec4(1_u16, 1_u16, 1_u16, 2_u16)); + assert_eq!(v.xxxz(), u16vec4(1_u16, 1_u16, 1_u16, 3_u16)); + assert_eq!(v.xxyx(), u16vec4(1_u16, 1_u16, 2_u16, 1_u16)); + assert_eq!(v.xxyy(), u16vec4(1_u16, 1_u16, 2_u16, 2_u16)); + assert_eq!(v.xxyz(), u16vec4(1_u16, 1_u16, 2_u16, 3_u16)); + assert_eq!(v.xxzx(), u16vec4(1_u16, 1_u16, 3_u16, 1_u16)); + assert_eq!(v.xxzy(), u16vec4(1_u16, 1_u16, 3_u16, 2_u16)); + assert_eq!(v.xxzz(), u16vec4(1_u16, 1_u16, 3_u16, 3_u16)); + assert_eq!(v.xyxx(), u16vec4(1_u16, 2_u16, 1_u16, 1_u16)); + assert_eq!(v.xyxy(), u16vec4(1_u16, 2_u16, 1_u16, 2_u16)); + assert_eq!(v.xyxz(), u16vec4(1_u16, 2_u16, 1_u16, 3_u16)); + assert_eq!(v.xyyx(), u16vec4(1_u16, 2_u16, 2_u16, 1_u16)); + assert_eq!(v.xyyy(), u16vec4(1_u16, 2_u16, 2_u16, 2_u16)); + assert_eq!(v.xyyz(), u16vec4(1_u16, 2_u16, 2_u16, 3_u16)); + assert_eq!(v.xyzx(), u16vec4(1_u16, 2_u16, 3_u16, 1_u16)); + assert_eq!(v.xyzy(), u16vec4(1_u16, 2_u16, 3_u16, 2_u16)); + assert_eq!(v.xyzz(), u16vec4(1_u16, 2_u16, 3_u16, 3_u16)); + assert_eq!(v.xzxx(), u16vec4(1_u16, 3_u16, 1_u16, 1_u16)); + assert_eq!(v.xzxy(), u16vec4(1_u16, 3_u16, 1_u16, 2_u16)); + assert_eq!(v.xzxz(), u16vec4(1_u16, 3_u16, 1_u16, 3_u16)); + assert_eq!(v.xzyx(), u16vec4(1_u16, 3_u16, 2_u16, 1_u16)); + assert_eq!(v.xzyy(), u16vec4(1_u16, 3_u16, 2_u16, 2_u16)); + assert_eq!(v.xzyz(), u16vec4(1_u16, 3_u16, 2_u16, 3_u16)); + assert_eq!(v.xzzx(), u16vec4(1_u16, 3_u16, 3_u16, 1_u16)); + assert_eq!(v.xzzy(), u16vec4(1_u16, 3_u16, 3_u16, 2_u16)); + assert_eq!(v.xzzz(), u16vec4(1_u16, 3_u16, 3_u16, 3_u16)); + assert_eq!(v.yxxx(), u16vec4(2_u16, 1_u16, 1_u16, 1_u16)); + assert_eq!(v.yxxy(), u16vec4(2_u16, 1_u16, 1_u16, 2_u16)); + assert_eq!(v.yxxz(), u16vec4(2_u16, 1_u16, 1_u16, 3_u16)); + assert_eq!(v.yxyx(), u16vec4(2_u16, 1_u16, 2_u16, 1_u16)); + assert_eq!(v.yxyy(), u16vec4(2_u16, 1_u16, 2_u16, 2_u16)); + assert_eq!(v.yxyz(), u16vec4(2_u16, 1_u16, 2_u16, 3_u16)); + assert_eq!(v.yxzx(), u16vec4(2_u16, 1_u16, 3_u16, 1_u16)); + assert_eq!(v.yxzy(), u16vec4(2_u16, 1_u16, 3_u16, 2_u16)); + assert_eq!(v.yxzz(), u16vec4(2_u16, 1_u16, 3_u16, 3_u16)); + assert_eq!(v.yyxx(), u16vec4(2_u16, 2_u16, 1_u16, 1_u16)); + assert_eq!(v.yyxy(), u16vec4(2_u16, 2_u16, 1_u16, 2_u16)); + assert_eq!(v.yyxz(), u16vec4(2_u16, 2_u16, 1_u16, 3_u16)); + assert_eq!(v.yyyx(), u16vec4(2_u16, 2_u16, 2_u16, 1_u16)); + assert_eq!(v.yyyy(), u16vec4(2_u16, 2_u16, 2_u16, 2_u16)); + assert_eq!(v.yyyz(), u16vec4(2_u16, 2_u16, 2_u16, 3_u16)); + assert_eq!(v.yyzx(), u16vec4(2_u16, 2_u16, 3_u16, 1_u16)); + assert_eq!(v.yyzy(), u16vec4(2_u16, 2_u16, 3_u16, 2_u16)); + assert_eq!(v.yyzz(), u16vec4(2_u16, 2_u16, 3_u16, 3_u16)); + assert_eq!(v.yzxx(), u16vec4(2_u16, 3_u16, 1_u16, 1_u16)); + assert_eq!(v.yzxy(), u16vec4(2_u16, 3_u16, 1_u16, 2_u16)); + assert_eq!(v.yzxz(), u16vec4(2_u16, 3_u16, 1_u16, 3_u16)); + assert_eq!(v.yzyx(), u16vec4(2_u16, 3_u16, 2_u16, 1_u16)); + assert_eq!(v.yzyy(), u16vec4(2_u16, 3_u16, 2_u16, 2_u16)); + assert_eq!(v.yzyz(), u16vec4(2_u16, 3_u16, 2_u16, 3_u16)); + assert_eq!(v.yzzx(), u16vec4(2_u16, 3_u16, 3_u16, 1_u16)); + assert_eq!(v.yzzy(), u16vec4(2_u16, 3_u16, 3_u16, 2_u16)); + assert_eq!(v.yzzz(), u16vec4(2_u16, 3_u16, 3_u16, 3_u16)); + assert_eq!(v.zxxx(), u16vec4(3_u16, 1_u16, 1_u16, 1_u16)); + assert_eq!(v.zxxy(), u16vec4(3_u16, 1_u16, 1_u16, 2_u16)); + assert_eq!(v.zxxz(), u16vec4(3_u16, 1_u16, 1_u16, 3_u16)); + assert_eq!(v.zxyx(), u16vec4(3_u16, 1_u16, 2_u16, 1_u16)); + assert_eq!(v.zxyy(), u16vec4(3_u16, 1_u16, 2_u16, 2_u16)); + assert_eq!(v.zxyz(), u16vec4(3_u16, 1_u16, 2_u16, 3_u16)); + assert_eq!(v.zxzx(), u16vec4(3_u16, 1_u16, 3_u16, 1_u16)); + assert_eq!(v.zxzy(), u16vec4(3_u16, 1_u16, 3_u16, 2_u16)); + assert_eq!(v.zxzz(), u16vec4(3_u16, 1_u16, 3_u16, 3_u16)); + assert_eq!(v.zyxx(), u16vec4(3_u16, 2_u16, 1_u16, 1_u16)); + assert_eq!(v.zyxy(), u16vec4(3_u16, 2_u16, 1_u16, 2_u16)); + assert_eq!(v.zyxz(), u16vec4(3_u16, 2_u16, 1_u16, 3_u16)); + assert_eq!(v.zyyx(), u16vec4(3_u16, 2_u16, 2_u16, 1_u16)); + assert_eq!(v.zyyy(), u16vec4(3_u16, 2_u16, 2_u16, 2_u16)); + assert_eq!(v.zyyz(), u16vec4(3_u16, 2_u16, 2_u16, 3_u16)); + assert_eq!(v.zyzx(), u16vec4(3_u16, 2_u16, 3_u16, 1_u16)); + assert_eq!(v.zyzy(), u16vec4(3_u16, 2_u16, 3_u16, 2_u16)); + assert_eq!(v.zyzz(), u16vec4(3_u16, 2_u16, 3_u16, 3_u16)); + assert_eq!(v.zzxx(), u16vec4(3_u16, 3_u16, 1_u16, 1_u16)); + assert_eq!(v.zzxy(), u16vec4(3_u16, 3_u16, 1_u16, 2_u16)); + assert_eq!(v.zzxz(), u16vec4(3_u16, 3_u16, 1_u16, 3_u16)); + assert_eq!(v.zzyx(), u16vec4(3_u16, 3_u16, 2_u16, 1_u16)); + assert_eq!(v.zzyy(), u16vec4(3_u16, 3_u16, 2_u16, 2_u16)); + assert_eq!(v.zzyz(), u16vec4(3_u16, 3_u16, 2_u16, 3_u16)); + assert_eq!(v.zzzx(), u16vec4(3_u16, 3_u16, 3_u16, 1_u16)); + assert_eq!(v.zzzy(), u16vec4(3_u16, 3_u16, 3_u16, 2_u16)); + assert_eq!(v.zzzz(), u16vec4(3_u16, 3_u16, 3_u16, 3_u16)); + assert_eq!(v.xxx(), u16vec3(1_u16, 1_u16, 1_u16)); + assert_eq!(v.xxy(), u16vec3(1_u16, 1_u16, 2_u16)); + assert_eq!(v.xxz(), u16vec3(1_u16, 1_u16, 3_u16)); + assert_eq!(v.xyx(), u16vec3(1_u16, 2_u16, 1_u16)); + assert_eq!(v.xyy(), u16vec3(1_u16, 2_u16, 2_u16)); + assert_eq!(v.xzx(), u16vec3(1_u16, 3_u16, 1_u16)); + assert_eq!(v.xzy(), u16vec3(1_u16, 3_u16, 2_u16)); + assert_eq!(v.xzz(), u16vec3(1_u16, 3_u16, 3_u16)); + assert_eq!(v.yxx(), u16vec3(2_u16, 1_u16, 1_u16)); + assert_eq!(v.yxy(), u16vec3(2_u16, 1_u16, 2_u16)); + assert_eq!(v.yxz(), u16vec3(2_u16, 1_u16, 3_u16)); + assert_eq!(v.yyx(), u16vec3(2_u16, 2_u16, 1_u16)); + assert_eq!(v.yyy(), u16vec3(2_u16, 2_u16, 2_u16)); + assert_eq!(v.yyz(), u16vec3(2_u16, 2_u16, 3_u16)); + assert_eq!(v.yzx(), u16vec3(2_u16, 3_u16, 1_u16)); + assert_eq!(v.yzy(), u16vec3(2_u16, 3_u16, 2_u16)); + assert_eq!(v.yzz(), u16vec3(2_u16, 3_u16, 3_u16)); + assert_eq!(v.zxx(), u16vec3(3_u16, 1_u16, 1_u16)); + assert_eq!(v.zxy(), u16vec3(3_u16, 1_u16, 2_u16)); + assert_eq!(v.zxz(), u16vec3(3_u16, 1_u16, 3_u16)); + assert_eq!(v.zyx(), u16vec3(3_u16, 2_u16, 1_u16)); + assert_eq!(v.zyy(), u16vec3(3_u16, 2_u16, 2_u16)); + assert_eq!(v.zyz(), u16vec3(3_u16, 2_u16, 3_u16)); + assert_eq!(v.zzx(), u16vec3(3_u16, 3_u16, 1_u16)); + assert_eq!(v.zzy(), u16vec3(3_u16, 3_u16, 2_u16)); + assert_eq!(v.zzz(), u16vec3(3_u16, 3_u16, 3_u16)); + assert_eq!(v.xx(), u16vec2(1_u16, 1_u16)); + assert_eq!(v.xy(), u16vec2(1_u16, 2_u16)); + assert_eq!(v.xz(), u16vec2(1_u16, 3_u16)); + assert_eq!(v.yx(), u16vec2(2_u16, 1_u16)); + assert_eq!(v.yy(), u16vec2(2_u16, 2_u16)); + assert_eq!(v.yz(), u16vec2(2_u16, 3_u16)); + assert_eq!(v.zx(), u16vec2(3_u16, 1_u16)); + assert_eq!(v.zy(), u16vec2(3_u16, 2_u16)); + assert_eq!(v.zz(), u16vec2(3_u16, 3_u16)); +}); + +glam_test!(test_u16vec2_swizzles, { + let v = u16vec2(1_u16, 2_u16); + assert_eq!(v, v.xy()); + assert_eq!(v.xxxx(), u16vec4(1_u16, 1_u16, 1_u16, 1_u16)); + assert_eq!(v.xxxy(), u16vec4(1_u16, 1_u16, 1_u16, 2_u16)); + assert_eq!(v.xxyx(), u16vec4(1_u16, 1_u16, 2_u16, 1_u16)); + assert_eq!(v.xxyy(), u16vec4(1_u16, 1_u16, 2_u16, 2_u16)); + assert_eq!(v.xyxx(), u16vec4(1_u16, 2_u16, 1_u16, 1_u16)); + assert_eq!(v.xyxy(), u16vec4(1_u16, 2_u16, 1_u16, 2_u16)); + assert_eq!(v.xyyx(), u16vec4(1_u16, 2_u16, 2_u16, 1_u16)); + assert_eq!(v.xyyy(), u16vec4(1_u16, 2_u16, 2_u16, 2_u16)); + assert_eq!(v.yxxx(), u16vec4(2_u16, 1_u16, 1_u16, 1_u16)); + assert_eq!(v.yxxy(), u16vec4(2_u16, 1_u16, 1_u16, 2_u16)); + assert_eq!(v.yxyx(), u16vec4(2_u16, 1_u16, 2_u16, 1_u16)); + assert_eq!(v.yxyy(), u16vec4(2_u16, 1_u16, 2_u16, 2_u16)); + assert_eq!(v.yyxx(), u16vec4(2_u16, 2_u16, 1_u16, 1_u16)); + assert_eq!(v.yyxy(), u16vec4(2_u16, 2_u16, 1_u16, 2_u16)); + assert_eq!(v.yyyx(), u16vec4(2_u16, 2_u16, 2_u16, 1_u16)); + assert_eq!(v.yyyy(), u16vec4(2_u16, 2_u16, 2_u16, 2_u16)); + assert_eq!(v.xxx(), u16vec3(1_u16, 1_u16, 1_u16)); + assert_eq!(v.xxy(), u16vec3(1_u16, 1_u16, 2_u16)); + assert_eq!(v.xyx(), u16vec3(1_u16, 2_u16, 1_u16)); + assert_eq!(v.xyy(), u16vec3(1_u16, 2_u16, 2_u16)); + assert_eq!(v.yxx(), u16vec3(2_u16, 1_u16, 1_u16)); + assert_eq!(v.yxy(), u16vec3(2_u16, 1_u16, 2_u16)); + assert_eq!(v.yyx(), u16vec3(2_u16, 2_u16, 1_u16)); + assert_eq!(v.yyy(), u16vec3(2_u16, 2_u16, 2_u16)); + assert_eq!(v.xx(), u16vec2(1_u16, 1_u16)); + assert_eq!(v.yx(), u16vec2(2_u16, 1_u16)); + assert_eq!(v.yy(), u16vec2(2_u16, 2_u16)); +}); diff --git a/tests/vec2.rs b/tests/vec2.rs index 7557aaec..59563823 100644 --- a/tests/vec2.rs +++ b/tests/vec2.rs @@ -1069,42 +1069,70 @@ mod vec2 { }); glam_test!(test_as, { - use glam::{DVec2, I64Vec2, IVec2, U64Vec2, UVec2}; + use glam::{DVec2, I16Vec2, I64Vec2, IVec2, U16Vec2, U64Vec2, UVec2}; assert_eq!(DVec2::new(-1.0, -2.0), Vec2::new(-1.0, -2.0).as_dvec2()); + assert_eq!(I16Vec2::new(-1, -2), Vec2::new(-1.0, -2.0).as_i16vec2()); + assert_eq!(U16Vec2::new(1, 2), Vec2::new(1.0, 2.0).as_u16vec2()); assert_eq!(IVec2::new(-1, -2), Vec2::new(-1.0, -2.0).as_ivec2()); assert_eq!(UVec2::new(1, 2), Vec2::new(1.0, 2.0).as_uvec2()); assert_eq!(I64Vec2::new(-1, -2), Vec2::new(-1.0, -2.0).as_i64vec2()); assert_eq!(U64Vec2::new(1, 2), Vec2::new(1.0, 2.0).as_u64vec2()); + assert_eq!(Vec2::new(-1.0, -2.0), DVec2::new(-1.0, -2.0).as_vec2()); + assert_eq!(I16Vec2::new(-1, -2), DVec2::new(-1.0, -2.0).as_i16vec2()); + assert_eq!(U16Vec2::new(1, 2), DVec2::new(1.0, 2.0).as_u16vec2()); assert_eq!(IVec2::new(-1, -2), DVec2::new(-1.0, -2.0).as_ivec2()); assert_eq!(UVec2::new(1, 2), DVec2::new(1.0, 2.0).as_uvec2()); - assert_eq!(Vec2::new(-1.0, -2.0), DVec2::new(-1.0, -2.0).as_vec2()); assert_eq!(I64Vec2::new(-1, -2), DVec2::new(-1.0, -2.0).as_i64vec2()); assert_eq!(U64Vec2::new(1, 2), DVec2::new(1.0, 2.0).as_u64vec2()); + assert_eq!(Vec2::new(-1.0, -2.0), I16Vec2::new(-1, -2).as_vec2()); + assert_eq!(DVec2::new(-1.0, -2.0), I16Vec2::new(-1, -2).as_dvec2()); + assert_eq!(U16Vec2::new(1, 2), I16Vec2::new(1, 2).as_u16vec2()); + assert_eq!(IVec2::new(-1, -2), I16Vec2::new(-1, -2).as_ivec2()); + assert_eq!(UVec2::new(1, 2), I16Vec2::new(1, 2).as_uvec2()); + assert_eq!(I64Vec2::new(-1, -2), I16Vec2::new(-1, -2).as_i64vec2()); + assert_eq!(U64Vec2::new(1, 2), I16Vec2::new(1, 2).as_u64vec2()); + + assert_eq!(Vec2::new(1.0, 2.0), U16Vec2::new(1, 2).as_vec2()); + assert_eq!(DVec2::new(1.0, 2.0), U16Vec2::new(1, 2).as_dvec2()); + assert_eq!(I16Vec2::new(1, 2), U16Vec2::new(1, 2).as_i16vec2()); + assert_eq!(IVec2::new(1, 2), U16Vec2::new(1, 2).as_ivec2()); + assert_eq!(UVec2::new(1, 2), U16Vec2::new(1, 2).as_uvec2()); + assert_eq!(I64Vec2::new(1, 2), U16Vec2::new(1, 2).as_i64vec2()); + assert_eq!(U64Vec2::new(1, 2), U16Vec2::new(1, 2).as_u64vec2()); + + assert_eq!(Vec2::new(-1.0, -2.0), IVec2::new(-1, -2).as_vec2()); assert_eq!(DVec2::new(-1.0, -2.0), IVec2::new(-1, -2).as_dvec2()); assert_eq!(UVec2::new(1, 2), IVec2::new(1, 2).as_uvec2()); - assert_eq!(Vec2::new(-1.0, -2.0), IVec2::new(-1, -2).as_vec2()); + assert_eq!(I16Vec2::new(-1, -2), IVec2::new(-1, -2).as_i16vec2()); + assert_eq!(U16Vec2::new(1, 2), IVec2::new(1, 2).as_u16vec2()); assert_eq!(I64Vec2::new(-1, -2), IVec2::new(-1, -2).as_i64vec2()); assert_eq!(U64Vec2::new(1, 2), IVec2::new(1, 2).as_u64vec2()); + assert_eq!(Vec2::new(1.0, 2.0), UVec2::new(1, 2).as_vec2()); assert_eq!(DVec2::new(1.0, 2.0), UVec2::new(1, 2).as_dvec2()); + assert_eq!(I16Vec2::new(1, 2), UVec2::new(1, 2).as_i16vec2()); + assert_eq!(U16Vec2::new(1, 2), UVec2::new(1, 2).as_u16vec2()); assert_eq!(IVec2::new(1, 2), UVec2::new(1, 2).as_ivec2()); - assert_eq!(Vec2::new(1.0, 2.0), UVec2::new(1, 2).as_vec2()); assert_eq!(I64Vec2::new(1, 2), UVec2::new(1, 2).as_i64vec2()); assert_eq!(U64Vec2::new(1, 2), UVec2::new(1, 2).as_u64vec2()); + assert_eq!(Vec2::new(-1.0, -2.0), I64Vec2::new(-1, -2).as_vec2()); assert_eq!(DVec2::new(-1.0, -2.0), I64Vec2::new(-1, -2).as_dvec2()); + assert_eq!(U16Vec2::new(1, 2), I64Vec2::new(1, 2).as_u16vec2()); + assert_eq!(I16Vec2::new(-1, -2), I64Vec2::new(-1, -2).as_i16vec2()); assert_eq!(UVec2::new(1, 2), I64Vec2::new(1, 2).as_uvec2()); - assert_eq!(Vec2::new(-1.0, -2.0), I64Vec2::new(-1, -2).as_vec2()); assert_eq!(IVec2::new(-1, -2), I64Vec2::new(-1, -2).as_ivec2()); assert_eq!(U64Vec2::new(1, 2), I64Vec2::new(1, 2).as_u64vec2()); + assert_eq!(Vec2::new(1.0, 2.0), U64Vec2::new(1, 2).as_vec2()); assert_eq!(DVec2::new(1.0, 2.0), U64Vec2::new(1, 2).as_dvec2()); + assert_eq!(I16Vec2::new(1, 2), U64Vec2::new(1, 2).as_i16vec2()); + assert_eq!(U16Vec2::new(1, 2), U64Vec2::new(1, 2).as_u16vec2()); assert_eq!(IVec2::new(1, 2), U64Vec2::new(1, 2).as_ivec2()); - assert_eq!(Vec2::new(1.0, 2.0), U64Vec2::new(1, 2).as_vec2()); - assert_eq!(I64Vec2::new(1, 2), U64Vec2::new(1, 2).as_i64vec2()); assert_eq!(UVec2::new(1, 2), U64Vec2::new(1, 2).as_uvec2()); + assert_eq!(I64Vec2::new(1, 2), U64Vec2::new(1, 2).as_i64vec2()); }); impl_vec2_float_tests!(f32, vec2, Vec2, Vec3, BVec2); @@ -1133,6 +1161,248 @@ mod dvec2 { impl_vec2_float_tests!(f64, dvec2, DVec2, DVec3, BVec2); } +mod i16vec2 { + use glam::{i16vec2, BVec2, I16Vec2, I16Vec3, I64Vec2, IVec2, U16Vec2, U64Vec2, UVec2}; + + glam_test!(test_align, { + use core::mem; + #[cfg(not(feature = "cuda"))] + assert_eq!(2, mem::align_of::()); + #[cfg(not(feature = "cuda"))] + assert_eq!(4, mem::size_of::()); + #[cfg(feature = "cuda")] + assert_eq!(8, mem::align_of::()); + #[cfg(feature = "cuda")] + assert_eq!(8, mem::size_of::()); + }); + + glam_test!(test_try_from, { + assert_eq!( + I16Vec2::new(1, 2), + I16Vec2::try_from(U16Vec2::new(1, 2)).unwrap() + ); + assert!(I16Vec2::try_from(U16Vec2::new(u16::MAX, 2)).is_err()); + assert!(I16Vec2::try_from(U16Vec2::new(1, u16::MAX)).is_err()); + + assert_eq!( + I16Vec2::new(1, 2), + I16Vec2::try_from(IVec2::new(1, 2)).unwrap() + ); + assert!(I16Vec2::try_from(IVec2::new(i32::MAX, 2)).is_err()); + assert!(I16Vec2::try_from(IVec2::new(1, i32::MAX)).is_err()); + + assert_eq!( + I16Vec2::new(1, 2), + I16Vec2::try_from(UVec2::new(1, 2)).unwrap() + ); + assert!(I16Vec2::try_from(UVec2::new(u32::MAX, 2)).is_err()); + assert!(I16Vec2::try_from(UVec2::new(1, u32::MAX)).is_err()); + + assert_eq!( + I16Vec2::new(1, 2), + I16Vec2::try_from(I64Vec2::new(1, 2)).unwrap() + ); + assert!(I16Vec2::try_from(I64Vec2::new(i64::MAX, 2)).is_err()); + assert!(I16Vec2::try_from(I64Vec2::new(1, i64::MAX)).is_err()); + + assert_eq!( + I16Vec2::new(1, 2), + I16Vec2::try_from(U64Vec2::new(1, 2)).unwrap() + ); + assert!(I16Vec2::try_from(U64Vec2::new(u64::MAX, 2)).is_err()); + assert!(I16Vec2::try_from(U64Vec2::new(1, u64::MAX)).is_err()); + }); + + glam_test!(test_wrapping_add, { + assert_eq!( + I16Vec2::new(i16::MAX, 5).wrapping_add(I16Vec2::new(1, 3)), + I16Vec2::new(i16::MIN, 8), + ); + }); + + glam_test!(test_wrapping_sub, { + assert_eq!( + I16Vec2::new(i16::MAX, 5).wrapping_sub(I16Vec2::new(1, 3)), + I16Vec2::new(32766, 2) + ); + }); + + glam_test!(test_wrapping_mul, { + assert_eq!( + I16Vec2::new(i16::MAX, 5).wrapping_mul(I16Vec2::new(3, 3)), + I16Vec2::new(32765, 15) + ); + }); + + glam_test!(test_wrapping_div, { + assert_eq!( + I16Vec2::new(i16::MAX, 5).wrapping_div(I16Vec2::new(3, 3)), + I16Vec2::new(10922, 1) + ); + }); + + glam_test!(test_saturating_add, { + assert_eq!( + I16Vec2::new(i16::MAX, i16::MIN,).saturating_add(I16Vec2::new(1, -1)), + I16Vec2::new(i16::MAX, i16::MIN) + ); + }); + + glam_test!(test_saturating_sub, { + assert_eq!( + I16Vec2::new(i16::MIN, i16::MAX).saturating_sub(I16Vec2::new(1, -1)), + I16Vec2::new(i16::MIN, i16::MAX) + ); + }); + + glam_test!(test_saturating_mul, { + assert_eq!( + I16Vec2::new(i16::MAX, i16::MIN).saturating_mul(I16Vec2::new(2, 2)), + I16Vec2::new(i16::MAX, i16::MIN) + ); + }); + + glam_test!(test_saturating_div, { + assert_eq!( + I16Vec2::new(i16::MAX, i16::MIN).saturating_div(I16Vec2::new(2, 2)), + I16Vec2::new(16383, -16384) + ); + }); + + impl_vec2_signed_integer_tests!(i16, i16vec2, I16Vec2, I16Vec3, BVec2); + impl_vec2_eq_hash_tests!(i16, i16vec2); + + impl_vec2_scalar_shift_op_tests!(I16Vec2, -2, 2); + impl_vec2_shift_op_tests!(I16Vec2); + + impl_vec2_scalar_bit_op_tests!(I16Vec2, -2, 2); + impl_vec2_bit_op_tests!(I16Vec2, -2, 2); +} + +mod u16vec2 { + use glam::{u16vec2, BVec2, I16Vec2, I64Vec2, IVec2, U16Vec2, U16Vec3, U64Vec2, UVec2}; + + glam_test!(test_align, { + use core::mem; + #[cfg(not(feature = "cuda"))] + assert_eq!(2, mem::align_of::()); + #[cfg(not(feature = "cuda"))] + assert_eq!(4, mem::size_of::()); + #[cfg(feature = "cuda")] + assert_eq!(8, mem::align_of::()); + #[cfg(feature = "cuda")] + assert_eq!(8, mem::size_of::()); + }); + + glam_test!(test_try_from, { + assert_eq!( + U16Vec2::new(1, 2), + U16Vec2::try_from(I16Vec2::new(1, 2)).unwrap() + ); + assert!(U16Vec2::try_from(I16Vec2::new(-1, 2)).is_err()); + assert!(U16Vec2::try_from(I16Vec2::new(1, -2)).is_err()); + + assert_eq!( + U16Vec2::new(1, 2), + U16Vec2::try_from(IVec2::new(1, 2)).unwrap() + ); + assert!(U16Vec2::try_from(IVec2::new(-1, 2)).is_err()); + assert!(U16Vec2::try_from(IVec2::new(1, -2)).is_err()); + + assert!(U16Vec2::try_from(IVec2::new(i32::MAX, 2)).is_err()); + assert!(U16Vec2::try_from(IVec2::new(1, i32::MAX)).is_err()); + + assert_eq!( + U16Vec2::new(1, 2), + U16Vec2::try_from(UVec2::new(1, 2)).unwrap() + ); + assert!(U16Vec2::try_from(UVec2::new(u32::MAX, 2)).is_err()); + assert!(U16Vec2::try_from(UVec2::new(1, u32::MAX)).is_err()); + + assert_eq!( + U16Vec2::new(1, 2), + U16Vec2::try_from(I64Vec2::new(1, 2)).unwrap() + ); + assert!(U16Vec2::try_from(I64Vec2::new(-1, 2)).is_err()); + assert!(U16Vec2::try_from(I64Vec2::new(1, -2)).is_err()); + + assert!(U16Vec2::try_from(I64Vec2::new(i64::MAX, 2)).is_err()); + assert!(U16Vec2::try_from(I64Vec2::new(1, i64::MAX)).is_err()); + + assert_eq!( + U16Vec2::new(1, 2), + U16Vec2::try_from(U64Vec2::new(1, 2)).unwrap() + ); + assert!(U16Vec2::try_from(U64Vec2::new(u64::MAX, 2)).is_err()); + assert!(U16Vec2::try_from(U64Vec2::new(1, u64::MAX)).is_err()); + }); + + glam_test!(test_wrapping_add, { + assert_eq!( + U16Vec2::new(u16::MAX, 5).wrapping_add(U16Vec2::new(1, 3)), + U16Vec2::new(0, 8), + ); + }); + + glam_test!(test_wrapping_sub, { + assert_eq!( + U16Vec2::new(u16::MAX, 5).wrapping_sub(U16Vec2::new(1, 3)), + U16Vec2::new(65534, 2) + ); + }); + + glam_test!(test_wrapping_mul, { + assert_eq!( + U16Vec2::new(u16::MAX, 5).wrapping_mul(U16Vec2::new(3, 3)), + U16Vec2::new(65533, 15) + ); + }); + + glam_test!(test_wrapping_div, { + assert_eq!( + U16Vec2::new(u16::MAX, 5).wrapping_div(U16Vec2::new(3, 3)), + U16Vec2::new(21845, 1) + ); + }); + + glam_test!(test_saturating_add, { + assert_eq!( + U16Vec2::new(u16::MAX, u16::MAX).saturating_add(U16Vec2::new(1, u16::MAX)), + U16Vec2::new(u16::MAX, u16::MAX) + ); + }); + + glam_test!(test_saturating_sub, { + assert_eq!( + U16Vec2::new(0, u16::MAX).saturating_sub(U16Vec2::new(1, 1)), + U16Vec2::new(0, 65534) + ); + }); + + glam_test!(test_saturating_mul, { + assert_eq!( + U16Vec2::new(u16::MAX, u16::MAX).saturating_mul(U16Vec2::new(2, u16::MAX)), + U16Vec2::new(u16::MAX, u16::MAX) + ); + }); + + glam_test!(test_saturating_div, { + assert_eq!( + U16Vec2::new(u16::MAX, u16::MAX).saturating_div(U16Vec2::new(2, u16::MAX)), + U16Vec2::new(32767, 1) + ); + }); + + impl_vec2_tests!(u16, u16vec2, U16Vec2, U16Vec3, BVec2); + impl_vec2_eq_hash_tests!(u16, u16vec2); + + impl_vec2_scalar_shift_op_tests!(U16Vec2, 0, 2); + impl_vec2_shift_op_tests!(U16Vec2); + + impl_vec2_scalar_bit_op_tests!(U16Vec2, 0, 2); + impl_vec2_bit_op_tests!(U16Vec2, 0, 2); +} + mod ivec2 { use glam::{ivec2, BVec2, I64Vec2, IVec2, IVec3, U64Vec2, UVec2}; @@ -1234,7 +1504,7 @@ mod ivec2 { } mod uvec2 { - use glam::{uvec2, BVec2, I64Vec2, IVec2, U64Vec2, UVec2, UVec3}; + use glam::{uvec2, BVec2, I16Vec2, I64Vec2, IVec2, U64Vec2, UVec2, UVec3}; glam_test!(test_align, { use core::mem; @@ -1248,6 +1518,13 @@ mod uvec2 { }); glam_test!(test_try_from, { + assert_eq!( + UVec2::new(1, 2), + UVec2::try_from(I16Vec2::new(1, 2)).unwrap() + ); + assert!(UVec2::try_from(I16Vec2::new(-1, 2)).is_err()); + assert!(UVec2::try_from(I16Vec2::new(1, -1)).is_err()); + assert_eq!(UVec2::new(1, 2), UVec2::try_from(IVec2::new(1, 2)).unwrap()); assert!(UVec2::try_from(IVec2::new(-1, 2)).is_err()); assert!(UVec2::try_from(IVec2::new(1, -2)).is_err()); @@ -1351,10 +1628,7 @@ mod i64vec2 { }); glam_test!(test_try_from, { - assert_eq!( - I64Vec2::new(1, 2), - I64Vec2::try_from(IVec2::new(1, 2)).unwrap() - ); + assert_eq!(I64Vec2::new(1, 2), I64Vec2::from(IVec2::new(1, 2))); assert_eq!( I64Vec2::new(1, 2), @@ -1375,7 +1649,7 @@ mod i64vec2 { } mod u64vec2 { - use glam::{u64vec2, BVec2, I64Vec2, IVec2, U64Vec2, U64Vec3, UVec2}; + use glam::{u64vec2, BVec2, I16Vec2, I64Vec2, IVec2, U64Vec2, U64Vec3, UVec2}; glam_test!(test_align, { use core::mem; @@ -1391,8 +1665,19 @@ mod u64vec2 { glam_test!(test_try_from, { assert_eq!( U64Vec2::new(1, 2), - U64Vec2::try_from(UVec2::new(1, 2)).unwrap() + U64Vec2::try_from(I16Vec2::new(1, 2)).unwrap() ); + assert!(U64Vec2::try_from(I16Vec2::new(-1, 2)).is_err()); + assert!(U64Vec2::try_from(I16Vec2::new(1, -2)).is_err()); + + assert_eq!( + U64Vec2::new(1, 2), + U64Vec2::try_from(IVec2::new(1, 2)).unwrap() + ); + assert!(U64Vec2::try_from(IVec2::new(-1, 2)).is_err()); + assert!(U64Vec2::try_from(IVec2::new(1, -2)).is_err()); + + assert_eq!(U64Vec2::new(1, 2), U64Vec2::from(UVec2::new(1, 2))); assert_eq!( U64Vec2::new(1, 2), diff --git a/tests/vec3.rs b/tests/vec3.rs index 1af63486..129e51ce 100644 --- a/tests/vec3.rs +++ b/tests/vec3.rs @@ -1601,10 +1601,7 @@ mod i64vec3 { }); glam_test!(test_try_from, { - assert_eq!( - I64Vec3::new(1, 2, 3), - I64Vec3::try_from(IVec3::new(1, 2, 3)).unwrap() - ); + assert_eq!(I64Vec3::new(1, 2, 3), I64Vec3::from(IVec3::new(1, 2, 3))); assert_eq!( I64Vec3::new(1, 2, 3), @@ -1637,10 +1634,7 @@ mod u64vec3 { }); glam_test!(test_try_from, { - assert_eq!( - U64Vec3::new(1, 2, 3), - U64Vec3::try_from(UVec3::new(1, 2, 3)).unwrap() - ); + assert_eq!(U64Vec3::new(1, 2, 3), U64Vec3::from(UVec3::new(1, 2, 3))); assert_eq!( U64Vec3::new(1, 2, 3), diff --git a/tests/vec4.rs b/tests/vec4.rs index acea048e..5ecba394 100644 --- a/tests/vec4.rs +++ b/tests/vec4.rs @@ -1745,7 +1745,7 @@ mod i64vec4 { glam_test!(test_try_from, { assert_eq!( I64Vec4::new(1, 2, 3, 4), - I64Vec4::try_from(IVec4::new(1, 2, 3, 4)).unwrap() + I64Vec4::from(IVec4::new(1, 2, 3, 4)) ); assert_eq!( @@ -1785,7 +1785,7 @@ mod u64vec4 { glam_test!(test_try_from, { assert_eq!( U64Vec4::new(1, 2, 3, 4), - U64Vec4::try_from(UVec4::new(1, 2, 3, 4)).unwrap() + U64Vec4::from(UVec4::new(1, 2, 3, 4)) ); assert_eq!(