Skip to content

Add remaining insturctions #1250

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Nov 10, 2021
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
434 changes: 370 additions & 64 deletions crates/core_arch/src/aarch64/neon/generated.rs

Large diffs are not rendered by default.

140 changes: 140 additions & 0 deletions crates/core_arch/src/aarch64/neon/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3208,6 +3208,97 @@ pub unsafe fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x
transmute(vsriq_n_s64_(transmute(a), transmute(b), N))
}

/// SM3TT1A
#[inline]
#[target_feature(enable = "neon,sm4")]
#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vsm3tt1aq_u32<const IMM2: i32>(
a: uint32x4_t,
b: uint32x4_t,
c: uint32x4_t,
) -> uint32x4_t {
static_assert_imm2!(IMM2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt1a")]
fn vsm3tt1aq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t;
}
vsm3tt1aq_u32_(a, b, c, IMM2 as i64)
}

/// SM3TT1B
#[inline]
#[target_feature(enable = "neon,sm4")]
#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vsm3tt1bq_u32<const IMM2: i32>(
a: uint32x4_t,
b: uint32x4_t,
c: uint32x4_t,
) -> uint32x4_t {
static_assert_imm2!(IMM2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt1b")]
fn vsm3tt1bq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t;
}
vsm3tt1bq_u32_(a, b, c, IMM2 as i64)
}

/// SM3TT2A
#[inline]
#[target_feature(enable = "neon,sm4")]
#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vsm3tt2aq_u32<const IMM2: i32>(
a: uint32x4_t,
b: uint32x4_t,
c: uint32x4_t,
) -> uint32x4_t {
static_assert_imm2!(IMM2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt2a")]
fn vsm3tt2aq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t;
}
vsm3tt2aq_u32_(a, b, c, IMM2 as i64)
}

/// SM3TT2B
#[inline]
#[target_feature(enable = "neon,sm4")]
#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vsm3tt2bq_u32<const IMM2: i32>(
a: uint32x4_t,
b: uint32x4_t,
c: uint32x4_t,
) -> uint32x4_t {
static_assert_imm2!(IMM2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt2b")]
fn vsm3tt2bq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t;
}
vsm3tt2bq_u32_(a, b, c, IMM2 as i64)
}

/// Exclusive OR and rotate
#[inline]
#[target_feature(enable = "neon,sha3")]
#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
static_assert_imm6!(IMM6);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.xar")]
fn vxarq_u64_(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
}
vxarq_u64_(a, b, IMM6 as i64)
}

#[cfg(test)]
mod tests {
use crate::core_arch::aarch64::test_support::*;
Expand Down Expand Up @@ -4866,6 +4957,55 @@ mod tests {
assert_eq!(vals[1], 1.);
assert_eq!(vals[2], 2.);
}

#[simd_test(enable = "neon,sm4")]
unsafe fn test_vsm3tt1aq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let c: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(2, 1536, 4, 16395);
let r: u32x4 = transmute(vsm3tt1aq_u32::<0>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}

#[simd_test(enable = "neon,sm4")]
unsafe fn test_vsm3tt1bq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let c: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(2, 1536, 4, 16392);
let r: u32x4 = transmute(vsm3tt1bq_u32::<0>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}

#[simd_test(enable = "neon,sm4")]
unsafe fn test_vsm3tt2aq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let c: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(2, 1572864, 4, 1447435);
let r: u32x4 = transmute(vsm3tt2aq_u32::<0>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}

#[simd_test(enable = "neon,sm4")]
unsafe fn test_vsm3tt2bq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let c: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(2, 1572864, 4, 1052680);
let r: u32x4 = transmute(vsm3tt2bq_u32::<0>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}

#[simd_test(enable = "neon,sha3")]
unsafe fn test_vxarq_u64() {
let a: u64x2 = u64x2::new(1, 2);
let b: u64x2 = u64x2::new(3, 4);
let e: u64x2 = u64x2::new(2, 6);
let r: u64x2 = transmute(vxarq_u64::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
}

#[cfg(test)]
Expand Down
86 changes: 86 additions & 0 deletions crates/core_arch/src/arm_shared/neon/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4806,6 +4806,63 @@ pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
}
}

/// 8-bit integer matrix multiply-accumulate
#[inline]
#[target_feature(enable = "neon,i8mm")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smmla))]
pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8"
)]
fn vmmlaq_s32_(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t;
}
vmmlaq_s32_(a, b, c)
}

/// 8-bit integer matrix multiply-accumulate
#[inline]
#[target_feature(enable = "neon,i8mm")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ummla))]
pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8"
)]
fn vmmlaq_u32_(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t;
}
vmmlaq_u32_(a, b, c)
}

/// Unsigned and signed 8-bit integer matrix multiply-accumulate
#[inline]
#[target_feature(enable = "neon,i8mm")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usmmla))]
pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")]
#[cfg_attr(
target_arch = "aarch64",
link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8"
)]
fn vusmmlaq_s32_(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t;
}
vusmmlaq_s32_(a, b, c)
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down Expand Up @@ -10368,6 +10425,35 @@ mod tests {
let e: u16x8 = transmute(vrev64q_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon,i8mm")]
unsafe fn test_vmmlaq_s32() {
let a: i32x4 = i32x4::new(1, 3, 4, 9);
let b: i8x16 = i8x16::new(1, 21, 31, 14, 5, 6, 17, 8, 9, 13, 15, 12, 13, 19, 20, 16);
let c: i8x16 = i8x16::new(12, 22, 3, 4, 5, 56, 7, 8, 91, 10, 11, 15, 13, 14, 17, 16);
let e: i32x4 = i32x4::new(1, 2, 3, 4);
let r: i32x4 = transmute(vmmlaq_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}

#[simd_test(enable = "neon,i8mm")]
unsafe fn test_vmmlaq_u32() {
let a: u32x4 = u32x4::new(1, 3, 4, 9);
let b: i8x16 = i8x16::new(1, 21, 31, 14, 5, 6, 17, 8, 9, 13, 15, 12, 13, 19, 20, 16);
let c: i8x16 = i8x16::new(12, 22, 3, 4, 5, 56, 7, 8, 91, 10, 11, 15, 13, 14, 17, 16);
let e: u32x4 = u32x4::new(1, 2, 3, 4);
let r: u32x4 = transmute(vmmlaq_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}

#[simd_test(enable = "neon,i8mm")]
unsafe fn test_vusmmlaq_s32() {
let a: i32x4 = i32x4::new(1, 3, 4, 9);
let b: i8x16 = i8x16::new(1, 21, 31, 14, 5, 6, 17, 8, 9, 13, 15, 12, 13, 19, 20, 16);
let c: i8x16 = i8x16::new(12, 22, 3, 4, 5, 56, 7, 8, 91, 10, 11, 15, 13, 14, 17, 16);
let e: i32x4 = i32x4::new(1, 2, 3, 4);
let r: i32x4 = transmute(vusmmlaq_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
}

#[cfg(all(test, target_arch = "arm", target_endian = "little"))]
Expand Down
3 changes: 2 additions & 1 deletion crates/core_arch/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@
f16c_target_feature,
allow_internal_unstable,
decl_macro,
bench_black_box
bench_black_box,
asm_const
)]
#![cfg_attr(test, feature(test, abi_vectorcall))]
#![deny(clippy::missing_inline_in_public_items)]
Expand Down
2 changes: 2 additions & 0 deletions crates/std_detect/src/detect/arch/arm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,6 @@ features! {
/// FEAT_AES (AES instructions)
@FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] sha2: "sha2";
/// FEAT_SHA1 & FEAT_SHA256 (SHA1 & SHA2-256 instructions)
@FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] i8mm: "i8mm";
/// FEAT_I8MM
}
Loading