提交 de13b20b 编写于 作者: C Caleb Zulawski

Convert all masks to a single type

上级 ddc67e3b
......@@ -12,420 +12,461 @@
)]
mod mask_impl;
use crate::{SimdI16, SimdI32, SimdI64, SimdI8, SimdIsize};
macro_rules! define_opaque_mask {
{
$(#[$attr:meta])*
struct $name:ident<const $lanes:ident: usize>($inner_ty:ty);
@bits $bits_ty:ident
} => {
$(#[$attr])*
#[allow(non_camel_case_types)]
pub struct $name<const LANES: usize>($inner_ty)
where
crate::LaneCount<LANES>: crate::SupportedLaneCount;
impl_opaque_mask_reductions! { $name, $bits_ty }
impl<const LANES: usize> $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
/// Construct a mask by setting all lanes to the given value.
pub fn splat(value: bool) -> Self {
Self(<$inner_ty>::splat(value))
}
use crate::{LaneCount, Simd, SimdElement, SupportedLaneCount};
/// Converts an array to a SIMD vector.
pub fn from_array(array: [bool; LANES]) -> Self {
let mut vector = Self::splat(false);
let mut i = 0;
while i < $lanes {
vector.set(i, array[i]);
i += 1;
}
vector
}
/// Marker trait for types that may be used as SIMD mask elements.
pub unsafe trait MaskElement: SimdElement {
#[doc(hidden)]
fn valid<const LANES: usize>(values: Simd<Self, LANES>) -> bool
where
LaneCount<LANES>: SupportedLaneCount;
/// Converts a SIMD vector to an array.
pub fn to_array(self) -> [bool; LANES] {
let mut array = [false; LANES];
let mut i = 0;
while i < $lanes {
array[i] = self.test(i);
i += 1;
}
array
}
#[doc(hidden)]
fn eq(self, other: Self) -> bool;
/// Converts a vector of integers to a mask, where 0 represents `false` and -1
/// represents `true`.
///
/// # Safety
/// All lanes must be either 0 or -1.
#[inline]
pub unsafe fn from_int_unchecked(value: $bits_ty<LANES>) -> Self {
Self(<$inner_ty>::from_int_unchecked(value))
}
#[doc(hidden)]
const TRUE: Self;
/// Converts a vector of integers to a mask, where 0 represents `false` and -1
/// represents `true`.
///
/// # Panics
/// Panics if any lane is not 0 or -1.
#[inline]
pub fn from_int(value: $bits_ty<LANES>) -> Self {
assert!(
(value.lanes_eq($bits_ty::splat(0)) | value.lanes_eq($bits_ty::splat(-1))).all(),
"all values must be either 0 or -1",
);
unsafe { Self::from_int_unchecked(value) }
}
#[doc(hidden)]
const FALSE: Self;
}
/// Converts the mask to a vector of integers, where 0 represents `false` and -1
/// represents `true`.
#[inline]
pub fn to_int(self) -> $bits_ty<LANES> {
self.0.to_int()
macro_rules! impl_element {
{ $ty:ty } => {
unsafe impl MaskElement for $ty {
fn valid<const LANES: usize>(value: Simd<Self, LANES>) -> bool
where
LaneCount<LANES>: SupportedLaneCount,
{
(value.lanes_eq(Simd::splat(0)) | value.lanes_eq(Simd::splat(-1))).all()
}
/// Tests the value of the specified lane.
///
/// # Safety
/// `lane` must be less than `LANES`.
#[inline]
pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
self.0.test_unchecked(lane)
}
fn eq(self, other: Self) -> bool { self == other }
/// Tests the value of the specified lane.
///
/// # Panics
/// Panics if `lane` is greater than or equal to the number of lanes in the vector.
#[inline]
pub fn test(&self, lane: usize) -> bool {
assert!(lane < LANES, "lane index out of range");
unsafe { self.test_unchecked(lane) }
}
const TRUE: Self = -1;
const FALSE: Self = 0;
}
}
}
/// Sets the value of the specified lane.
///
/// # Safety
/// `lane` must be less than `LANES`.
#[inline]
pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
self.0.set_unchecked(lane, value);
}
impl_element! { i8 }
impl_element! { i16 }
impl_element! { i32 }
impl_element! { i64 }
impl_element! { isize }
/// A SIMD vector mask for `LANES` elements of width specified by `Element`.
///
/// The layout of this type is unspecified.
#[repr(transparent)]
pub struct Mask<Element, const LANES: usize>(mask_impl::Mask<Element, LANES>)
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount;
impl<Element, const LANES: usize> Copy for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
}
/// Sets the value of the specified lane.
///
/// # Panics
/// Panics if `lane` is greater than or equal to the number of lanes in the vector.
#[inline]
pub fn set(&mut self, lane: usize, value: bool) {
assert!(lane < LANES, "lane index out of range");
unsafe { self.set_unchecked(lane, value); }
}
impl<Element, const LANES: usize> Clone for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn clone(&self) -> Self {
*self
}
}
/// Convert this mask to a bitmask, with one bit set per lane.
pub fn to_bitmask(self) -> [u8; crate::LaneCount::<LANES>::BITMASK_LEN] {
self.0.to_bitmask()
}
impl<Element, const LANES: usize> Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
/// Construct a mask by setting all lanes to the given value.
pub fn splat(value: bool) -> Self {
Self(mask_impl::Mask::splat(value))
}
/// Convert a bitmask to a mask.
pub fn from_bitmask(bitmask: [u8; crate::LaneCount::<LANES>::BITMASK_LEN]) -> Self {
Self(<$inner_ty>::from_bitmask(bitmask))
}
/// Converts an array to a SIMD vector.
pub fn from_array(array: [bool; LANES]) -> Self {
let mut vector = Self::splat(false);
for (i, v) in array.iter().enumerate() {
vector.set(i, *v);
}
vector
}
// vector/array conversion
impl<const LANES: usize> From<[bool; LANES]> for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn from(array: [bool; LANES]) -> Self {
Self::from_array(array)
}
/// Converts a SIMD vector to an array.
pub fn to_array(self) -> [bool; LANES] {
let mut array = [false; LANES];
for (i, v) in array.iter_mut().enumerate() {
*v = self.test(i);
}
array
}
impl <const LANES: usize> From<$name<LANES>> for [bool; LANES]
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn from(vector: $name<LANES>) -> Self {
vector.to_array()
}
}
/// Converts a vector of integers to a mask, where 0 represents `false` and -1
/// represents `true`.
///
/// # Safety
/// All lanes must be either 0 or -1.
#[inline]
pub unsafe fn from_int_unchecked(value: Simd<Element, LANES>) -> Self {
Self(mask_impl::Mask::from_int_unchecked(value))
}
impl<const LANES: usize> Copy for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{}
/// Converts a vector of integers to a mask, where 0 represents `false` and -1
/// represents `true`.
///
/// # Panics
/// Panics if any lane is not 0 or -1.
#[inline]
pub fn from_int(value: Simd<Element, LANES>) -> Self {
assert!(Element::valid(value), "all values must be either 0 or -1",);
unsafe { Self::from_int_unchecked(value) }
}
impl<const LANES: usize> Clone for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn clone(&self) -> Self {
*self
}
}
/// Converts the mask to a vector of integers, where 0 represents `false` and -1
/// represents `true`.
#[inline]
pub fn to_int(self) -> Simd<Element, LANES> {
self.0.to_int()
}
impl<const LANES: usize> Default for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn default() -> Self {
Self::splat(false)
}
}
/// Tests the value of the specified lane.
///
/// # Safety
/// `lane` must be less than `LANES`.
#[inline]
pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
self.0.test_unchecked(lane)
}
impl<const LANES: usize> PartialEq for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
/// Tests the value of the specified lane.
///
/// # Panics
/// Panics if `lane` is greater than or equal to the number of lanes in the vector.
#[inline]
pub fn test(&self, lane: usize) -> bool {
assert!(lane < LANES, "lane index out of range");
unsafe { self.test_unchecked(lane) }
}
impl<const LANES: usize> PartialOrd for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
/// Sets the value of the specified lane.
///
/// # Safety
/// `lane` must be less than `LANES`.
#[inline]
pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
self.0.set_unchecked(lane, value);
}
impl<const LANES: usize> core::fmt::Debug for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_list()
.entries((0..LANES).map(|lane| self.test(lane)))
.finish()
}
/// Sets the value of the specified lane.
///
/// # Panics
/// Panics if `lane` is greater than or equal to the number of lanes in the vector.
#[inline]
pub fn set(&mut self, lane: usize, value: bool) {
assert!(lane < LANES, "lane index out of range");
unsafe {
self.set_unchecked(lane, value);
}
}
impl<const LANES: usize> core::ops::BitAnd for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: Self) -> Self {
Self(self.0 & rhs.0)
}
}
/// Convert this mask to a bitmask, with one bit set per lane.
pub fn to_bitmask(self) -> [u8; LaneCount::<LANES>::BITMASK_LEN] {
self.0.to_bitmask()
}
impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: bool) -> Self {
self & Self::splat(rhs)
}
}
/// Convert a bitmask to a mask.
pub fn from_bitmask(bitmask: [u8; LaneCount::<LANES>::BITMASK_LEN]) -> Self {
Self(mask_impl::Mask::from_bitmask(bitmask))
}
impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = $name<LANES>;
#[inline]
fn bitand(self, rhs: $name<LANES>) -> $name<LANES> {
$name::<LANES>::splat(self) & rhs
}
}
/// Returns true if any lane is set, or false otherwise.
#[inline]
pub fn any(self) -> bool {
self.0.any()
}
impl<const LANES: usize> core::ops::BitOr for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Returns true if all lanes are set, or false otherwise.
#[inline]
pub fn all(self) -> bool {
self.0.all()
}
}
impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: bool) -> Self {
self | Self::splat(rhs)
}
}
// vector/array conversion
impl<Element, const LANES: usize> From<[bool; LANES]> for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn from(array: [bool; LANES]) -> Self {
Self::from_array(array)
}
}
impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = $name<LANES>;
#[inline]
fn bitor(self, rhs: $name<LANES>) -> $name<LANES> {
$name::<LANES>::splat(self) | rhs
}
}
impl<Element, const LANES: usize> From<Mask<Element, LANES>> for [bool; LANES]
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn from(vector: Mask<Element, LANES>) -> Self {
vector.to_array()
}
}
impl<const LANES: usize> core::ops::BitXor for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: Self) -> Self::Output {
Self(self.0 ^ rhs.0)
}
}
impl<Element, const LANES: usize> Default for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn default() -> Self {
Self::splat(false)
}
}
impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: bool) -> Self::Output {
self ^ Self::splat(rhs)
}
}
impl<Element, const LANES: usize> PartialEq for Mask<Element, LANES>
where
Element: MaskElement + PartialEq,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = $name<LANES>;
#[inline]
fn bitxor(self, rhs: $name<LANES>) -> Self::Output {
$name::<LANES>::splat(self) ^ rhs
}
}
impl<Element, const LANES: usize> PartialOrd for Mask<Element, LANES>
where
Element: MaskElement + PartialOrd,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl<const LANES: usize> core::ops::Not for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = $name<LANES>;
#[inline]
fn not(self) -> Self::Output {
Self(!self.0)
}
}
impl<Element, const LANES: usize> core::fmt::Debug for Mask<Element, LANES>
where
Element: MaskElement + core::fmt::Debug,
LaneCount<LANES>: SupportedLaneCount,
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_list()
.entries((0..LANES).map(|lane| self.test(lane)))
.finish()
}
}
impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
self.0 = self.0 & rhs.0;
}
}
impl<Element, const LANES: usize> core::ops::BitAnd for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: Self) -> Self {
Self(self.0 & rhs.0)
}
}
impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
*self &= Self::splat(rhs);
}
}
impl<Element, const LANES: usize> core::ops::BitAnd<bool> for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: bool) -> Self {
self & Self::splat(rhs)
}
}
impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
self.0 = self.0 | rhs.0;
}
}
impl<Element, const LANES: usize> core::ops::BitAnd<Mask<Element, LANES>> for bool
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Mask<Element, LANES>;
#[inline]
fn bitand(self, rhs: Mask<Element, LANES>) -> Mask<Element, LANES> {
Mask::splat(self) & rhs
}
}
impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
*self |= Self::splat(rhs);
}
}
impl<Element, const LANES: usize> core::ops::BitOr for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
self.0 = self.0 ^ rhs.0;
}
}
impl<Element, const LANES: usize> core::ops::BitOr<bool> for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: bool) -> Self {
self | Self::splat(rhs)
}
}
impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {
*self ^= Self::splat(rhs);
}
}
};
impl<Element, const LANES: usize> core::ops::BitOr<Mask<Element, LANES>> for bool
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Mask<Element, LANES>;
#[inline]
fn bitor(self, rhs: Mask<Element, LANES>) -> Mask<Element, LANES> {
Mask::splat(self) | rhs
}
}
define_opaque_mask! {
/// Mask for vectors with `LANES` 8-bit elements.
///
/// The layout of this type is unspecified.
struct Mask8<const LANES: usize>(mask_impl::Mask8<LANES>);
@bits SimdI8
impl<Element, const LANES: usize> core::ops::BitXor for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: Self) -> Self::Output {
Self(self.0 ^ rhs.0)
}
}
define_opaque_mask! {
/// Mask for vectors with `LANES` 16-bit elements.
///
/// The layout of this type is unspecified.
struct Mask16<const LANES: usize>(mask_impl::Mask16<LANES>);
@bits SimdI16
impl<Element, const LANES: usize> core::ops::BitXor<bool> for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: bool) -> Self::Output {
self ^ Self::splat(rhs)
}
}
define_opaque_mask! {
/// Mask for vectors with `LANES` 32-bit elements.
///
/// The layout of this type is unspecified.
struct Mask32<const LANES: usize>(mask_impl::Mask32<LANES>);
@bits SimdI32
impl<Element, const LANES: usize> core::ops::BitXor<Mask<Element, LANES>> for bool
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Mask<Element, LANES>;
#[inline]
fn bitxor(self, rhs: Mask<Element, LANES>) -> Self::Output {
Mask::splat(self) ^ rhs
}
}
define_opaque_mask! {
/// Mask for vectors with `LANES` 64-bit elements.
///
/// The layout of this type is unspecified.
struct Mask64<const LANES: usize>(mask_impl::Mask64<LANES>);
@bits SimdI64
impl<Element, const LANES: usize> core::ops::Not for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Mask<Element, LANES>;
#[inline]
fn not(self) -> Self::Output {
Self(!self.0)
}
}
define_opaque_mask! {
/// Mask for vectors with `LANES` pointer-width elements.
///
/// The layout of this type is unspecified.
struct MaskSize<const LANES: usize>(mask_impl::MaskSize<LANES>);
@bits SimdIsize
impl<Element, const LANES: usize> core::ops::BitAndAssign for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
self.0 = self.0 & rhs.0;
}
}
impl<Element, const LANES: usize> core::ops::BitAndAssign<bool> for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
*self &= Self::splat(rhs);
}
}
impl<Element, const LANES: usize> core::ops::BitOrAssign for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
self.0 = self.0 | rhs.0;
}
}
impl<Element, const LANES: usize> core::ops::BitOrAssign<bool> for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
*self |= Self::splat(rhs);
}
}
impl<Element, const LANES: usize> core::ops::BitXorAssign for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
self.0 = self.0 ^ rhs.0;
}
}
impl<Element, const LANES: usize> core::ops::BitXorAssign<bool> for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {
*self ^= Self::splat(rhs);
}
}
/// A SIMD mask of `LANES` 8-bit values.
pub type Mask8<const LANES: usize> = Mask<i8, LANES>;
/// A SIMD mask of `LANES` 16-bit values.
pub type Mask16<const LANES: usize> = Mask<i16, LANES>;
/// A SIMD mask of `LANES` 32-bit values.
pub type Mask32<const LANES: usize> = Mask<i32, LANES>;
/// A SIMD mask of `LANES` 64-bit values.
pub type Mask64<const LANES: usize> = Mask<i64, LANES>;
/// A SIMD mask of `LANES` pointer-width values.
pub type MaskSize<const LANES: usize> = Mask<isize, LANES>;
/// Vector of eight 8-bit masks
pub type mask8x8 = Mask8<8>;
......@@ -488,7 +529,7 @@ impl<const LANES: usize> From<$from<LANES>> for $to<LANES>
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn from(value: $from<LANES>) -> Self {
Self(value.0.into())
Self(value.0.convert())
}
}
)*
......
use crate::{LaneCount, SupportedLaneCount};
/// Helper trait for limiting int conversion types
pub trait ConvertToInt {}
impl<const LANES: usize> ConvertToInt for crate::SimdI8<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
impl<const LANES: usize> ConvertToInt for crate::SimdI16<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
impl<const LANES: usize> ConvertToInt for crate::SimdI32<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
impl<const LANES: usize> ConvertToInt for crate::SimdI64<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
impl<const LANES: usize> ConvertToInt for crate::SimdIsize<LANES> where
LaneCount<LANES>: SupportedLaneCount
{
}
use crate::{LaneCount, MaskElement, Simd, SupportedLaneCount};
use core::marker::PhantomData;
/// A mask where each lane is represented by a single bit.
#[repr(transparent)]
pub struct BitMask<const LANES: usize>(<LaneCount<LANES> as SupportedLaneCount>::BitMask)
pub struct Mask<Element, const LANES: usize>(
<LaneCount<LANES> as SupportedLaneCount>::BitMask,
PhantomData<Element>,
)
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount;
impl<const LANES: usize> Copy for BitMask<LANES> where LaneCount<LANES>: SupportedLaneCount {}
impl<Element, const LANES: usize> Copy for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
}
impl<const LANES: usize> Clone for BitMask<LANES>
impl<Element, const LANES: usize> Clone for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn clone(&self) -> Self {
......@@ -40,8 +28,9 @@ fn clone(&self) -> Self {
}
}
impl<const LANES: usize> PartialEq for BitMask<LANES>
impl<Element, const LANES: usize> PartialEq for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn eq(&self, other: &Self) -> bool {
......@@ -49,8 +38,9 @@ fn eq(&self, other: &Self) -> bool {
}
}
impl<const LANES: usize> PartialOrd for BitMask<LANES>
impl<Element, const LANES: usize> PartialOrd for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
......@@ -58,10 +48,16 @@ fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
}
}
impl<const LANES: usize> Eq for BitMask<LANES> where LaneCount<LANES>: SupportedLaneCount {}
impl<Element, const LANES: usize> Eq for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
}
impl<const LANES: usize> Ord for BitMask<LANES>
impl<Element, const LANES: usize> Ord for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
......@@ -69,8 +65,9 @@ fn cmp(&self, other: &Self) -> core::cmp::Ordering {
}
}
impl<const LANES: usize> BitMask<LANES>
impl<Element, const LANES: usize> Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
......@@ -84,7 +81,7 @@ pub fn splat(value: bool) -> Self {
if LANES % 8 > 0 {
*mask.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - LANES % 8);
}
Self(mask)
Self(mask, PhantomData)
}
#[inline]
......@@ -98,33 +95,28 @@ pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
}
#[inline]
pub fn to_int<V>(self) -> V
where
V: ConvertToInt + Default + core::ops::Not<Output = V>,
{
pub fn to_int(self) -> Simd<Element, LANES> {
unsafe {
let mask: <LaneCount<LANES> as SupportedLaneCount>::IntBitMask =
core::mem::transmute_copy(&self);
crate::intrinsics::simd_select_bitmask(mask, !V::default(), V::default())
crate::intrinsics::simd_select_bitmask(
mask,
Simd::splat(Element::TRUE),
Simd::splat(Element::FALSE),
)
}
}
#[inline]
pub unsafe fn from_int_unchecked<V>(value: V) -> Self
where
V: crate::Vector,
{
pub unsafe fn from_int_unchecked(value: Simd<Element, LANES>) -> Self {
// TODO remove the transmute when rustc is more flexible
assert_eq!(
core::mem::size_of::<<crate::LaneCount::<LANES> as crate::SupportedLaneCount>::BitMask>(
),
core::mem::size_of::<
<crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask,
>(),
core::mem::size_of::<<LaneCount::<LANES> as SupportedLaneCount>::BitMask>(),
core::mem::size_of::<<LaneCount::<LANES> as SupportedLaneCount>::IntBitMask>(),
);
let mask: <LaneCount<LANES> as SupportedLaneCount>::IntBitMask =
crate::intrinsics::simd_bitmask(value);
Self(core::mem::transmute_copy(&mask))
Self(core::mem::transmute_copy(&mask), PhantomData)
}
#[inline]
......@@ -136,7 +128,15 @@ pub unsafe fn from_int_unchecked<V>(value: V) -> Self
#[inline]
pub fn from_bitmask(bitmask: [u8; LaneCount::<LANES>::BITMASK_LEN]) -> Self {
// Safety: these are the same type and we are laundering the generic
Self(unsafe { core::mem::transmute_copy(&bitmask) })
Self(unsafe { core::mem::transmute_copy(&bitmask) }, PhantomData)
}
#[inline]
pub fn convert<T>(self) -> Mask<T, LANES>
where
T: MaskElement,
{
unsafe { core::mem::transmute_copy(&self) }
}
#[inline]
......@@ -150,10 +150,11 @@ pub fn all(self) -> bool {
}
}
impl<const LANES: usize> core::ops::BitAnd for BitMask<LANES>
impl<Element, const LANES: usize> core::ops::BitAnd for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
<LaneCount<LANES> as SupportedLaneCount>::BitMask: Default + AsRef<[u8]> + AsMut<[u8]>,
<LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
{
type Output = Self;
#[inline]
......@@ -165,10 +166,11 @@ fn bitand(mut self, rhs: Self) -> Self {
}
}
impl<const LANES: usize> core::ops::BitOr for BitMask<LANES>
impl<Element, const LANES: usize> core::ops::BitOr for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
<LaneCount<LANES> as SupportedLaneCount>::BitMask: Default + AsRef<[u8]> + AsMut<[u8]>,
<LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>,
{
type Output = Self;
#[inline]
......@@ -180,8 +182,9 @@ fn bitor(mut self, rhs: Self) -> Self {
}
}
impl<const LANES: usize> core::ops::BitXor for BitMask<LANES>
impl<Element, const LANES: usize> core::ops::BitXor for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
......@@ -194,8 +197,9 @@ fn bitxor(mut self, rhs: Self) -> Self::Output {
}
}
impl<const LANES: usize> core::ops::Not for BitMask<LANES>
impl<Element, const LANES: usize> core::ops::Not for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
......@@ -210,9 +214,3 @@ fn not(mut self) -> Self::Output {
self
}
}
pub type Mask8<const LANES: usize> = BitMask<LANES>;
pub type Mask16<const LANES: usize> = BitMask<LANES>;
pub type Mask32<const LANES: usize> = BitMask<LANES>;
pub type Mask64<const LANES: usize> = BitMask<LANES>;
pub type MaskSize<const LANES: usize> = BitMask<LANES>;
//! Masks that take up full SIMD vector registers.
macro_rules! define_mask {
{
$(#[$attr:meta])*
struct $name:ident<const $lanes:ident: usize>(
crate::$type:ident<$lanes2:ident>
);
} => {
$(#[$attr])*
#[repr(transparent)]
pub struct $name<const $lanes: usize>(crate::$type<$lanes>)
where
crate::LaneCount<$lanes>: crate::SupportedLaneCount;
impl_full_mask_reductions! { $name, $type }
impl<const LANES: usize> Copy for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{}
impl<const LANES: usize> Clone for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<const LANES: usize> PartialEq for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<const LANES: usize> PartialOrd for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl<const LANES: usize> Eq for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{}
use super::MaskElement;
use crate::{LaneCount, Simd, SupportedLaneCount};
#[repr(transparent)]
pub struct Mask<Element, const LANES: usize>(Simd<Element, LANES>)
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount;
impl<Element, const LANES: usize> Copy for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
}
impl<const LANES: usize> Ord for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.0.cmp(&other.0)
}
}
impl<Element, const LANES: usize> Clone for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<const LANES: usize> $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
pub fn splat(value: bool) -> Self {
Self(
<crate::$type<LANES>>::splat(
if value {
-1
} else {
0
}
),
)
}
impl<Element, const LANES: usize> PartialEq for Mask<Element, LANES>
where
Element: MaskElement + PartialEq,
LaneCount<LANES>: SupportedLaneCount,
{
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
#[inline]
pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
self.0[lane] == -1
}
impl<Element, const LANES: usize> PartialOrd for Mask<Element, LANES>
where
Element: MaskElement + PartialOrd,
LaneCount<LANES>: SupportedLaneCount,
{
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
#[inline]
pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
self.0[lane] = if value {
-1
} else {
0
}
}
impl<Element, const LANES: usize> Eq for Mask<Element, LANES>
where
Element: MaskElement + Eq,
LaneCount<LANES>: SupportedLaneCount,
{
}
#[inline]
pub fn to_int(self) -> crate::$type<LANES> {
self.0
}
impl<Element, const LANES: usize> Ord for Mask<Element, LANES>
where
Element: MaskElement + Ord,
LaneCount<LANES>: SupportedLaneCount,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.0.cmp(&other.0)
}
}
#[inline]
pub unsafe fn from_int_unchecked(value: crate::$type<LANES>) -> Self {
Self(value)
}
impl<Element, const LANES: usize> Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
pub fn splat(value: bool) -> Self {
Self(Simd::splat(if value {
Element::TRUE
} else {
Element::FALSE
}))
}
#[inline]
pub fn to_bitmask(self) -> [u8; crate::LaneCount::<LANES>::BITMASK_LEN] {
unsafe {
// TODO remove the transmute when rustc can use arrays of u8 as bitmasks
assert_eq!(
core::mem::size_of::<<crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask>(),
crate::LaneCount::<LANES>::BITMASK_LEN,
);
let bitmask: <crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask = crate::intrinsics::simd_bitmask(self.0);
let mut bitmask: [u8; crate::LaneCount::<LANES>::BITMASK_LEN] = core::mem::transmute_copy(&bitmask);
#[inline]
pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
Element::eq(self.0[lane], Element::TRUE)
}
// There is a bug where LLVM appears to implement this operation with the wrong
// bit order.
// TODO fix this in a better way
if cfg!(any(target_arch = "mips", target_arch = "mips64")) {
for x in bitmask.as_mut() {
*x = x.reverse_bits();
}
}
#[inline]
pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
self.0[lane] = if value { Element::TRUE } else { Element::FALSE }
}
bitmask
}
}
#[inline]
pub fn to_int(self) -> Simd<Element, LANES> {
self.0
}
#[inline]
pub fn from_bitmask(mut bitmask: [u8; crate::LaneCount::<LANES>::BITMASK_LEN]) -> Self {
unsafe {
// There is a bug where LLVM appears to implement this operation with the wrong
// bit order.
// TODO fix this in a better way
if cfg!(any(target_arch = "mips", target_arch = "mips64")) {
for x in bitmask.as_mut() {
*x = x.reverse_bits();
}
}
#[inline]
pub unsafe fn from_int_unchecked(value: Simd<Element, LANES>) -> Self {
Self(value)
}
// TODO remove the transmute when rustc can use arrays of u8 as bitmasks
assert_eq!(
core::mem::size_of::<<crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask>(),
crate::LaneCount::<LANES>::BITMASK_LEN,
);
let bitmask: <crate::LaneCount::<LANES> as crate::SupportedLaneCount>::IntBitMask = core::mem::transmute_copy(&bitmask);
#[inline]
pub fn convert<T>(self) -> Mask<T, LANES>
where
T: MaskElement,
{
unsafe { Mask(crate::intrinsics::simd_cast(self.0)) }
}
Self::from_int_unchecked(crate::intrinsics::simd_select_bitmask(
bitmask,
Self::splat(true).to_int(),
Self::splat(false).to_int(),
))
#[inline]
pub fn to_bitmask(self) -> [u8; LaneCount::<LANES>::BITMASK_LEN] {
unsafe {
// TODO remove the transmute when rustc can use arrays of u8 as bitmasks
assert_eq!(
core::mem::size_of::<<LaneCount::<LANES> as SupportedLaneCount>::IntBitMask>(),
LaneCount::<LANES>::BITMASK_LEN,
);
let bitmask: <LaneCount<LANES> as SupportedLaneCount>::IntBitMask =
crate::intrinsics::simd_bitmask(self.0);
let mut bitmask: [u8; LaneCount::<LANES>::BITMASK_LEN] =
core::mem::transmute_copy(&bitmask);
// There is a bug where LLVM appears to implement this operation with the wrong
// bit order.
// TODO fix this in a better way
if cfg!(any(target_arch = "mips", target_arch = "mips64")) {
for x in bitmask.as_mut() {
*x = x.reverse_bits();
}
}
}
impl<const LANES: usize> core::convert::From<$name<LANES>> for crate::$type<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn from(value: $name<LANES>) -> Self {
value.0
}
}
impl<const LANES: usize> core::ops::BitAnd for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: Self) -> Self {
Self(self.0 & rhs.0)
}
bitmask
}
}
impl<const LANES: usize> core::ops::BitOr for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
#[inline]
pub fn from_bitmask(mut bitmask: [u8; LaneCount::<LANES>::BITMASK_LEN]) -> Self {
unsafe {
// There is a bug where LLVM appears to implement this operation with the wrong
// bit order.
// TODO fix this in a better way
if cfg!(any(target_arch = "mips", target_arch = "mips64")) {
for x in bitmask.as_mut() {
*x = x.reverse_bits();
}
}
}
impl<const LANES: usize> core::ops::BitXor for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: Self) -> Self::Output {
Self(self.0 ^ rhs.0)
}
}
// TODO remove the transmute when rustc can use arrays of u8 as bitmasks
assert_eq!(
core::mem::size_of::<<LaneCount::<LANES> as SupportedLaneCount>::IntBitMask>(),
LaneCount::<LANES>::BITMASK_LEN,
);
let bitmask: <LaneCount<LANES> as SupportedLaneCount>::IntBitMask =
core::mem::transmute_copy(&bitmask);
impl<const LANES: usize> core::ops::Not for $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self(!self.0)
}
Self::from_int_unchecked(crate::intrinsics::simd_select_bitmask(
bitmask,
Self::splat(true).to_int(),
Self::splat(false).to_int(),
))
}
}
}
define_mask! {
/// A mask equivalent to [SimdI8](crate::SimdI8), where all bits in the lane must be either set
/// or unset.
struct Mask8<const LANES: usize>(crate::SimdI8<LANES>);
}
define_mask! {
/// A mask equivalent to [SimdI16](crate::SimdI16), where all bits in the lane must be either set
/// or unset.
struct Mask16<const LANES: usize>(crate::SimdI16<LANES>);
impl<Element, const LANES: usize> core::convert::From<Mask<Element, LANES>> for Simd<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
fn from(value: Mask<Element, LANES>) -> Self {
value.0
}
}
define_mask! {
/// A mask equivalent to [SimdI32](crate::SimdI32), where all bits in the lane must be either set
/// or unset.
struct Mask32<const LANES: usize>(crate::SimdI32<LANES>);
impl<Element, const LANES: usize> core::ops::BitAnd for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: Self) -> Self {
unsafe { Self(crate::intrinsics::simd_and(self.0, rhs.0)) }
}
}
define_mask! {
/// A mask equivalent to [SimdI64](crate::SimdI64), where all bits in the lane must be either set
/// or unset.
struct Mask64<const LANES: usize>(crate::SimdI64<LANES>);
impl<Element, const LANES: usize> core::ops::BitOr for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self {
unsafe { Self(crate::intrinsics::simd_or(self.0, rhs.0)) }
}
}
define_mask! {
/// A mask equivalent to [SimdIsize](crate::SimdIsize), where all bits in the lane must be either set
/// or unset.
struct MaskSize<const LANES: usize>(crate::SimdIsize<LANES>);
impl<Element, const LANES: usize> core::ops::BitXor for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn bitxor(self, rhs: Self) -> Self {
unsafe { Self(crate::intrinsics::simd_xor(self.0, rhs.0)) }
}
}
macro_rules! impl_from {
{ $from:ident ($from_inner:ident) => $($to:ident ($to_inner:ident)),* } => {
$(
impl<const LANES: usize> From<$from<LANES>> for $to<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
fn from(value: $from<LANES>) -> Self {
let mut new = Self::splat(false);
for i in 0..LANES {
unsafe { new.set_unchecked(i, value.test_unchecked(i)) }
}
new
}
}
)*
impl<Element, const LANES: usize> core::ops::Not for Mask<Element, LANES>
where
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::splat(true) ^ self
}
}
impl_from! { Mask8 (SimdI8) => Mask16 (SimdI16), Mask32 (SimdI32), Mask64 (SimdI64), MaskSize (SimdIsize) }
impl_from! { Mask16 (SimdI16) => Mask32 (SimdI32), Mask64 (SimdI64), MaskSize (SimdIsize), Mask8 (SimdI8) }
impl_from! { Mask32 (SimdI32) => Mask64 (SimdI64), MaskSize (SimdIsize), Mask8 (SimdI8), Mask16 (SimdI16) }
impl_from! { Mask64 (SimdI64) => MaskSize (SimdIsize), Mask8 (SimdI8), Mask16 (SimdI16), Mask32 (SimdI32) }
impl_from! { MaskSize (SimdIsize) => Mask8 (SimdI8), Mask16 (SimdI16), Mask32 (SimdI32), Mask64 (SimdI64) }
impl_full_mask_reductions! {}
use crate::{LaneCount, SupportedLaneCount};
use crate::{LaneCount, Simd, SimdElement, SupportedLaneCount};
impl<I, Element, const LANES: usize> core::ops::Index<I> for Simd<Element, LANES>
where
Element: SimdElement,
LaneCount<LANES>: SupportedLaneCount,
I: core::slice::SliceIndex<[Element]>,
{
type Output = I::Output;
fn index(&self, index: I) -> &Self::Output {
&self.as_array()[index]
}
}
impl<I, Element, const LANES: usize> core::ops::IndexMut<I> for Simd<Element, LANES>
where
Element: SimdElement,
LaneCount<LANES>: SupportedLaneCount,
I: core::slice::SliceIndex<[Element]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
&mut self.as_mut_array()[index]
}
}
/// Checks if the right-hand side argument of a left- or right-shift would cause overflow.
fn invalid_shift_rhs<T>(rhs: T) -> bool
......@@ -191,31 +214,6 @@ fn neg(self) -> Self::Output {
}
};
{ impl Index for $type:ident, $scalar:ty } => {
impl<I, const LANES: usize> core::ops::Index<I> for crate::$type<LANES>
where
LaneCount<LANES>: SupportedLaneCount,
I: core::slice::SliceIndex<[$scalar]>,
{
type Output = I::Output;
fn index(&self, index: I) -> &Self::Output {
let slice: &[_] = self.as_ref();
&slice[index]
}
}
impl<I, const LANES: usize> core::ops::IndexMut<I> for crate::$type<LANES>
where
LaneCount<LANES>: SupportedLaneCount,
I: core::slice::SliceIndex<[$scalar]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
let slice: &mut [_] = self.as_mut();
&mut slice[index]
}
}
};
// generic binary op with assignment when output is `Self`
{ @binary $type:ident, $scalar:ty, $trait:ident :: $trait_fn:ident, $assign_trait:ident :: $assign_trait_fn:ident, $intrinsic:ident } => {
impl_ref_ops! {
......@@ -301,7 +299,6 @@ fn $assign_trait_fn(&mut self, rhs: $scalar) {
impl_op! { impl Div for $vector, $scalar }
impl_op! { impl Rem for $vector, $scalar }
impl_op! { impl Neg for $vector, $scalar }
impl_op! { impl Index for $vector, $scalar }
)*
)*
};
......@@ -319,7 +316,6 @@ fn $assign_trait_fn(&mut self, rhs: $scalar) {
impl_op! { impl BitOr for $vector, $scalar }
impl_op! { impl BitXor for $vector, $scalar }
impl_op! { impl Not for $vector, $scalar }
impl_op! { impl Index for $vector, $scalar }
// Integers panic on divide by 0
impl_ref_ops! {
......
......@@ -103,10 +103,11 @@ pub fn horizontal_min(self) -> $scalar {
}
macro_rules! impl_full_mask_reductions {
{ $name:ident, $bits_ty:ident } => {
impl<const LANES: usize> $name<LANES>
{} => {
impl<Element, const LANES: usize> Mask<Element, LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
Element: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
pub fn any(self) -> bool {
......@@ -120,24 +121,3 @@ pub fn all(self) -> bool {
}
}
}
macro_rules! impl_opaque_mask_reductions {
{ $name:ident, $bits_ty:ident } => {
impl<const LANES: usize> $name<LANES>
where
crate::LaneCount<LANES>: crate::SupportedLaneCount,
{
/// Returns true if any lane is set, or false otherwise.
#[inline]
pub fn any(self) -> bool {
self.0.any()
}
/// Returns true if all lanes are set, or false otherwise.
#[inline]
pub fn all(self) -> bool {
self.0.all()
}
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册