提交 de13b20b 编写于 作者: C Caleb Zulawski

Convert all masks to a single type

上级 ddc67e3b
此差异已折叠。
use crate::{LaneCount, SupportedLaneCount};
/// Helper trait for limiting int conversion types
pub trait ConvertToInt {}
impl<const LANES: usize> ConvertToInt for crate::SimdI8<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
impl<const LANES: usize> ConvertToInt for crate::SimdI16<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
impl<const LANES: usize> ConvertToInt for crate::SimdI32<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
impl<const LANES: usize> ConvertToInt for crate::SimdI64<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
impl<const LANES: usize> ConvertToInt for crate::SimdIsize<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
use crate::{LaneCount, MaskElement, Simd, SupportedLaneCount};
use core::marker::PhantomData;
/// A mask where each lane is represented by a single bit.
#[repr(transparent)]
pub struct BitMask<const LANES: usize>(<LaneCount<LANES> as SupportedLaneCount>::BitMask)
pub struct Mask<Element, const LANES: usize>(
<LaneCount<LANES> as SupportedLaneCount>::BitMask,
PhantomData<Element>,
)
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount;
impl<const LANES: usize> Copy for BitMask<LANES> where LaneCount<LANES>: SupportedLaneCount {}
impl<Element, const LANES: usize> Copy for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
}
impl<const LANES: usize> Clone for BitMask<LANES>
impl<Element, const LANES: usize> Clone for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn clone(&self) -> Self {
......@@ -40,8 +28,9 @@ fn clone(&self) -> Self {
}
}
impl<const LANES: usize> PartialEq for BitMask<LANES>
impl<Element, const LANES: usize> PartialEq for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn eq(&self, other: &Self) -> bool {
......@@ -49,8 +38,9 @@ fn eq(&self, other: &Self) -> bool {
}
}
impl<const LANES: usize> PartialOrd for BitMask<LANES>
impl<Element, const LANES: usize> PartialOrd for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
......@@ -58,10 +48,16 @@ fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
}
}
impl<const LANES: usize> Eq for BitMask<LANES> where LaneCount<LANES>: SupportedLaneCount {}
impl<Element, const LANES: usize> Eq for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
}
impl<const LANES: usize> Ord for BitMask<LANES>
impl<Element, const LANES: usize> Ord for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
......@@ -69,8 +65,9 @@ fn cmp(&self, other: &Self) -> core::cmp::Ordering {
}
}
impl<const LANES: usize> BitMask<LANES>
impl<Element, const LANES: usize> Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
......@@ -84,7 +81,7 @@ pub fn splat(value: bool) -> Self {
if LANES % 8 > 0 {
*mask.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - LANES % 8);
}
Self(mask)
Self(mask, PhantomData)
}
#[inline]
......@@ -98,33 +95,28 @@ pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
}
#[inline]
pub fn to_int<V>(self) -> V
where
V: ConvertToInt + Default + core::ops::Not<Output = V>,
{
pub fn to_int(self) -> Simd<Element, LANES> {
unsafe {
let mask: <LaneCount<LANES> as SupportedLaneCount>::IntBitMask =
core::mem::transmute_copy(&self);
crate::intrinsics::simd_select_bitmask(mask, !V::default(), V::default())
crate::intrinsics::simd_select_bitmask(
mask,
Simd::splat(Element::TRUE),
Simd::splat(Element::FALSE),
)
}
}
#[inline]
pub unsafe fn from_int_unchecked<V>(value: V) -> Self
where
V: crate::Vector,
{
pub unsafe fn from_int_unchecked(value: Simd<Element, LANES>) -> Self {
// TODO remove the transmute when rustc is more flexible
assert_eq!(
core::mem::size_of::<<crate::LaneCount::<LANES> as crate::SupportedLaneCount>::BitMask>(
),
core::mem::size_of::<
<crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask,
>(),
core::mem::size_of::<<LaneCount::<LANES> as SupportedLaneCount>::BitMask>(),
core::mem::size_of::<<LaneCount::<LANES> as SupportedLaneCount>::IntBitMask>(),
);
let mask: <LaneCount<LANES> as SupportedLaneCount>::IntBitMask =
crate::intrinsics::simd_bitmask(value);
Self(core::mem::transmute_copy(&mask))
Self(core::mem::transmute_copy(&mask), PhantomData)
}
#[inline]
......@@ -136,7 +128,15 @@ pub unsafe fn from_int_unchecked<V>(value: V) -> Self
#[inline]
pub fn from_bitmask(bitmask: [u8; LaneCount::<LANES>::BITMASK_LEN]) -> Self {
// Safety: these are the same type and we are laundering the generic
Self(unsafe { core::mem::transmute_copy(&bitmask) })
Self(unsafe { core::mem::transmute_copy(&bitmask) }, PhantomData)
}
#[inline]
pub fn convert<T>(self) -> Mask<T, LANES>
where
T: MaskElement,
{
unsafe { core::mem::transmute_copy(&self) }
}
#[inline]
......@@ -150,10 +150,11 @@ pub fn all(self) -> bool {
}
}
impl<const LANES: usize> core::ops::BitAnd for BitMask<LANES>
impl<Element, const LANES: usize> core::ops::BitAnd for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
<LaneCount<LANES> as SupportedLaneCount>::BitMask: Default + AsRef<[u8]> + AsMut<[u8]>,
<LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
{
type Output = Self;
#[inline]
......@@ -165,10 +166,11 @@ fn bitand(mut self, rhs: Self) -> Self {
}
}
impl<const LANES: usize> core::ops::BitOr for BitMask<LANES>
impl<Element, const LANES: usize> core::ops::BitOr for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
<LaneCount<LANES> as SupportedLaneCount>::BitMask: Default + AsRef<[u8]> + AsMut<[u8]>,
<LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
{
type Output = Self;
#[inline]
......@@ -180,8 +182,9 @@ fn bitor(mut self, rhs: Self) -> Self {
}
}
impl<const LANES: usize> core::ops::BitXor for BitMask<LANES>
impl<Element, const LANES: usize> core::ops::BitXor for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
......@@ -194,8 +197,9 @@ fn bitxor(mut self, rhs: Self) -> Self::Output {
}
}
impl<const LANES: usize> core::ops::Not for BitMask<LANES>
impl<Element, const LANES: usize> core::ops::Not for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
......@@ -210,9 +214,3 @@ fn not(mut self) -> Self::Output {
self
}
}
pub type Mask8<const LANES: usize> = BitMask<LANES>;
pub type Mask16<const LANES: usize> = BitMask<LANES>;
pub type Mask32<const LANES: usize> = BitMask<LANES>;
pub type Mask64<const LANES: usize> = BitMask<LANES>;
pub type MaskSize<const LANES: usize> = BitMask<LANES>;
//! Masks that take up full SIMD vector registers.
macro_rules! define_mask {
{
$(#[$attr:meta])*
struct $name:ident<const $lanes:ident: usize>(
crate::$type:ident<$lanes2:ident>
);
} => {
$(#[$attr])*
#[repr(transparent)]
pub struct $name<const $lanes: usize>(crate::$type<$lanes>)
where
crate::LaneCount<$lanes>: crate::SupportedLaneCount;
impl_full_mask_reductions! { $name, $type }
impl<const LANES: usize> Copy for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{}
impl<const LANES: usize> Clone for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<const LANES: usize> PartialEq for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<const LANES: usize> PartialOrd for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl<const LANES: usize> Eq for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{}
use super::MaskElement;
use crate::{LaneCount, Simd, SupportedLaneCount};
#[repr(transparent)]
pub struct Mask<Element, const LANES: usize>(Simd<Element, LANES>)
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount;
impl<Element, const LANES: usize> Copy for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
}
impl<const LANES: usize> Ord for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.0.cmp(&other.0)
}
}
impl<Element, const LANES: usize> Clone for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<const LANES: usize> $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
pub fn splat(value: bool) -> Self {
Self(
<crate::$type<LANES>>::splat(
if value {
-1
} else {
0
}
),
)
}
impl<Element, const LANES: usize> PartialEq for Mask<Element, LANES>
where
Element: MaskElement + PartialEq,
LaneCount<LANES>: SupportedLaneCount,
{
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
#[inline]
pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
self.0[lane] == -1
}
impl<Element, const LANES: usize> PartialOrd for Mask<Element, LANES>
where
Element: MaskElement + PartialOrd,
LaneCount<LANES>: SupportedLaneCount,
{
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
#[inline]
pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
self.0[lane] = if value {
-1
} else {
0
}
}
impl<Element, const LANES: usize> Eq for Mask<Element, LANES>
where
Element: MaskElement + Eq,
LaneCount<LANES>: SupportedLaneCount,
{
}
#[inline]
pub fn to_int(self) -> crate::$type<LANES> {
self.0
}
impl<Element, const LANES: usize> Ord for Mask<Element, LANES>
where
Element: MaskElement + Ord,
LaneCount<LANES>: SupportedLaneCount,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.0.cmp(&other.0)
}
}
#[inline]
pub unsafe fn from_int_unchecked(value: crate::$type<LANES>) -> Self {
Self(value)
}
impl<Element, const LANES: usize> Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
pub fn splat(value: bool) -> Self {
Self(Simd::splat(if value {
Element::TRUE
} else {
Element::FALSE
}))
}
#[inline]
pub fn to_bitmask(self) -> [u8; crate::LaneCount::<LANES>::BITMASK_LEN] {
unsafe {
// TODO remove the transmute when rustc can use arrays of u8 as bitmasks
assert_eq!(
core::mem::size_of::<<crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask>(),
crate::LaneCount::<LANES>::BITMASK_LEN,
);
let bitmask: <crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask = crate::intrinsics::simd_bitmask(self.0);
let mut bitmask: [u8; crate::LaneCount::<LANES>::BITMASK_LEN] = core::mem::transmute_copy(&bitmask);
#[inline]
pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
Element::eq(self.0[lane], Element::TRUE)
}
// There is a bug where LLVM appears to implement this operation with the wrong
// bit order.
// TODO fix this in a better way
if cfg!(any(target_arch = "mips", target_arch = "mips64")) {
for x in bitmask.as_mut() {
*x = x.reverse_bits();
}
}
#[inline]
pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
self.0[lane] = if value { Element::TRUE } else { Element::FALSE }
}
bitmask
}
}
#[inline]
pub fn to_int(self) -> Simd<Element, LANES> {
self.0
}
#[inline]
pub fn from_bitmask(mut bitmask: [u8; crate::LaneCount::<LANES>::BITMASK_LEN]) -> Self {
unsafe {
// There is a bug where LLVM appears to implement this operation with the wrong
// bit order.
// TODO fix this in a better way
if cfg!(any(target_arch = "mips", target_arch = "mips64")) {
for x in bitmask.as_mut() {
*x = x.reverse_bits();
}
}
#[inline]
pub unsafe fn from_int_unchecked(value: Simd<Element, LANES>) -> Self {
Self(value)
}
// TODO remove the transmute when rustc can use arrays of u8 as bitmasks
assert_eq!(
core::mem::size_of::<<crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask>(),
crate::LaneCount::<LANES>::BITMASK_LEN,
);
let bitmask: <crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask = core::mem::transmute_copy(&bitmask);
#[inline]
pub fn convert<T>(self) -> Mask<T, LANES>
where
T: MaskElement,
{
unsafe { Mask(crate::intrinsics::simd_cast(self.0)) }
}
Self::from_int_unchecked(crate::intrinsics::simd_select_bitmask(
bitmask,
Self::splat(true).to_int(),
Self::splat(false).to_int(),
))
#[inline]
pub fn to_bitmask(self) -> [u8; LaneCount::<LANES>::BITMASK_LEN] {
unsafe {
// TODO remove the transmute when rustc can use arrays of u8 as bitmasks
assert_eq!(
core::mem::size_of::<<LaneCount::<LANES> as SupportedLaneCount>::IntBitMask>(),
LaneCount::<LANES>::BITMASK_LEN,
);
let bitmask: <LaneCount<LANES> as SupportedLaneCount>::IntBitMask =
crate::intrinsics::simd_bitmask(self.0);
let mut bitmask: [u8; LaneCount::<LANES>::BITMASK_LEN] =
core::mem::transmute_copy(&bitmask);
// There is a bug where LLVM appears to implement this operation with the wrong
// bit order.
// TODO fix this in a better way
if cfg!(any(target_arch = "mips", target_arch = "mips64")) {
for x in bitmask.as_mut() {
*x = x.reverse_bits();
}
}
}
impl<const LANES: usize> core::convert::From<$name<LANES>> for crate::$type<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn from(value: $name<LANES>) -> Self {
value.0
}
}
impl<const LANES: usize> core::ops::BitAnd for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: Self) -> Self {
Self(self.0 & rhs.0)
}
bitmask
}
}
impl<const LANES: usize> core::ops::BitOr for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
#[inline]
pub fn from_bitmask(mut bitmask: [u8; LaneCount::<LANES>::BITMASK_LEN]) -> Self {
unsafe {
// There is a bug where LLVM appears to implement this operation with the wrong
// bit order.
// TODO fix this in a better way
if cfg!(any(target_arch = "mips", target_arch = "mips64")) {
for x in bitmask.as_mut() {
*x = x.reverse_bits();
}
}
}
impl<const LANES: usize> core::ops::BitXor for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: Self) -> Self::Output {
Self(self.0 ^ rhs.0)
}
}
// TODO remove the transmute when rustc can use arrays of u8 as bitmasks
assert_eq!(
core::mem::size_of::<<LaneCount::<LANES> as SupportedLaneCount>::IntBitMask>(),
LaneCount::<LANES>::BITMASK_LEN,
);
let bitmask: <LaneCount<LANES> as SupportedLaneCount>::IntBitMask =
core::mem::transmute_copy(&bitmask);
impl<const LANES: usize> core::ops::Not for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self(!self.0)
}
Self::from_int_unchecked(crate::intrinsics::simd_select_bitmask(
bitmask,
Self::splat(true).to_int(),
Self::splat(false).to_int(),
))
}
}
}
define_mask! {
/// A mask equivalent to [SimdI8](crate::SimdI8), where all bits in the lane must be either set
/// or unset.
struct Mask8<const LANES: usize>(crate::SimdI8<LANES>);
}
define_mask! {
/// A mask equivalent to [SimdI16](crate::SimdI16), where all bits in the lane must be either set
/// or unset.
struct Mask16<const LANES: usize>(crate::SimdI16<LANES>);
impl<Element, const LANES: usize> core::convert::From<Mask<Element, LANES>> for Simd<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn from(value: Mask<Element, LANES>) -> Self {
value.0
}
}
define_mask! {
/// A mask equivalent to [SimdI32](crate::SimdI32), where all bits in the lane must be either set
/// or unset.
struct Mask32<const LANES: usize>(crate::SimdI32<LANES>);
impl<Element, const LANES: usize> core::ops::BitAnd for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: Self) -> Self {
unsafe { Self(crate::intrinsics::simd_and(self.0, rhs.0)) }
}
}
define_mask! {
/// A mask equivalent to [SimdI64](crate::SimdI64), where all bits in the lane must be either set
/// or unset.
struct Mask64<const LANES: usize>(crate::SimdI64<LANES>);
impl<Element, const LANES: usize> core::ops::BitOr for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self {
unsafe { Self(crate::intrinsics::simd_or(self.0, rhs.0)) }
}
}
define_mask! {
/// A mask equivalent to [SimdIsize](crate::SimdIsize), where all bits in the lane must be either set
/// or unset.
struct MaskSize<const LANES: usize>(crate::SimdIsize<LANES>);
impl<Element, const LANES: usize> core::ops::BitXor for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: Self) -> Self {
unsafe { Self(crate::intrinsics::simd_xor(self.0, rhs.0)) }
}
}
macro_rules! impl_from {
{ $from:ident ($from_inner:ident) => $($to:ident ($to_inner:ident)),* } => {
$(
impl<const LANES: usize> From<$from<LANES>> for $to<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn from(value: $from<LANES>) -> Self {
let mut new = Self::splat(false);
for i in 0..LANES {
unsafe { new.set_unchecked(i, value.test_unchecked(i)) }
}
new
}
}
)*
impl<Element, const LANES: usize> core::ops::Not for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::splat(true) ^ self
}
}
impl_from! { Mask8 (SimdI8) => Mask16 (SimdI16), Mask32 (SimdI32), Mask64 (SimdI64), MaskSize (SimdIsize) }
impl_from! { Mask16 (SimdI16) => Mask32 (SimdI32), Mask64 (SimdI64), MaskSize (SimdIsize), Mask8 (SimdI8) }
impl_from! { Mask32 (SimdI32) => Mask64 (SimdI64), MaskSize (SimdIsize), Mask8 (SimdI8), Mask16 (SimdI16) }
impl_from! { Mask64 (SimdI64) => MaskSize (SimdIsize), Mask8 (SimdI8), Mask16 (SimdI16), Mask32 (SimdI32) }
impl_from! { MaskSize (SimdIsize) => Mask8 (SimdI8), Mask16 (SimdI16), Mask32 (SimdI32), Mask64 (SimdI64) }
impl_full_mask_reductions! {}
use crate::{LaneCount, SupportedLaneCount};
use crate::{LaneCount, Simd, SimdElement, SupportedLaneCount};
impl<I, Element, const LANES: usize> core::ops::Index<I> for Simd<Element, LANES>
where
Element: SimdElement,
LaneCount<LANES>: SupportedLaneCount,
I: core::slice::SliceIndex<[Element]>,
{
type Output = I::Output;
fn index(&self, index: I) -> &Self::Output {
&self.as_array()[index]
}
}
impl<I, Element, const LANES: usize> core::ops::IndexMut<I> for Simd<Element, LANES>
where
Element: SimdElement,
LaneCount<LANES>: SupportedLaneCount,
I: core::slice::SliceIndex<[Element]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
&mut self.as_mut_array()[index]
}
}
/// Checks if the right-hand side argument of a left- or right-shift would cause overflow.
fn invalid_shift_rhs<T>(rhs: T) -> bool
......@@ -191,31 +214,6 @@ fn neg(self) -> Self::Output {
}
};
{ impl Index for $type:ident, $scalar:ty } => {
impl<I, const LANES: usize> core::ops::Index<I> for crate::$type<LANES>
where
LaneCount<LANES>: SupportedLaneCount,
I: core::slice::SliceIndex<[$scalar]>,
{
type Output = I::Output;
fn index(&self, index: I) -> &Self::Output {
let slice: &[_] = self.as_ref();
&slice[index]
}
}
impl<I, const LANES: usize> core::ops::IndexMut<I> for crate::$type<LANES>
where
LaneCount<LANES>: SupportedLaneCount,
I: core::slice::SliceIndex<[$scalar]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
let slice: &mut [_] = self.as_mut();
&mut slice[index]
}
}
};
// generic binary op with assignment when output is `Self`
{ @binary $type:ident, $scalar:ty, $trait:ident :: $trait_fn:ident, $assign_trait:ident :: $assign_trait_fn:ident, $intrinsic:ident } => {
impl_ref_ops! {
......@@ -301,7 +299,6 @@ fn $assign_trait_fn(&mut self, rhs: $scalar) {
impl_op! { impl Div for $vector, $scalar }
impl_op! { impl Rem for $vector, $scalar }
impl_op! { impl Neg for $vector, $scalar }
impl_op! { impl Index for $vector, $scalar }
)*
)*
};
......@@ -319,7 +316,6 @@ fn $assign_trait_fn(&mut self, rhs: $scalar) {
impl_op! { impl BitOr for $vector, $scalar }
impl_op! { impl BitXor for $vector, $scalar }
impl_op! { impl Not for $vector, $scalar }
impl_op! { impl Index for $vector, $scalar }
// Integers panic on divide by 0
impl_ref_ops! {
......
......@@ -103,10 +103,11 @@ pub fn horizontal_min(self) -> $scalar {
}
macro_rules! impl_full_mask_reductions {
{ $name:ident, $bits_ty:ident } => {
impl<const LANES: usize> $name<LANES>
{} => {
impl<Element, const LANES: usize> Mask<Element, LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
pub fn any(self) -> bool {
......@@ -120,24 +121,3 @@ pub fn all(self) -> bool {
}
}
}
macro_rules! impl_opaque_mask_reductions {
{ $name:ident, $bits_ty:ident } => {
impl<const LANES: usize> $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
/// Returns true if any lane is set, or false otherwise.
#[inline]
pub fn any(self) -> bool {
self.0.any()
}
/// Returns true if all lanes are set, or false otherwise.
#[inline]
pub fn all(self) -> bool {
self.0.all()
}
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册