Skip to content

Commit 9142a8a

Browse files
authored
Support ARM crypto extension on A32/T32 (#929)
1 parent 9b9a7d5 commit 9142a8a

File tree

3 files changed

+48
-18
lines changed

3 files changed

+48
-18
lines changed

crates/core_arch/src/aarch64/mod.rs

-3
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,6 @@ pub use self::v8::*;
1212
mod neon;
1313
pub use self::neon::*;
1414

15-
mod crypto;
16-
pub use self::crypto::*;
17-
1815
mod tme;
1916
pub use self::tme::*;
2017

crates/core_arch/src/aarch64/crypto.rs renamed to crates/core_arch/src/arm/crypto.rs

+43-15
Original file line numberDiff line numberDiff line change
@@ -2,35 +2,49 @@ use crate::core_arch::arm::{uint32x4_t, uint8x16_t};
22

33
#[allow(improper_ctypes)]
44
extern "C" {
5-
#[link_name = "llvm.aarch64.crypto.aese"]
5+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.aese")]
6+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")]
67
fn vaeseq_u8_(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t;
7-
#[link_name = "llvm.aarch64.crypto.aesd"]
8+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.aesd")]
9+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")]
810
fn vaesdq_u8_(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t;
9-
#[link_name = "llvm.aarch64.crypto.aesmc"]
11+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.aesmc")]
12+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")]
1013
fn vaesmcq_u8_(data: uint8x16_t) -> uint8x16_t;
11-
#[link_name = "llvm.aarch64.crypto.aesimc"]
14+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.aesimc")]
15+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")]
1216
fn vaesimcq_u8_(data: uint8x16_t) -> uint8x16_t;
1317

14-
#[link_name = "llvm.aarch64.crypto.sha1h"]
18+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1h")]
19+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")]
1520
fn vsha1h_u32_(hash_e: u32) -> u32;
16-
#[link_name = "llvm.aarch64.crypto.sha1su0"]
21+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1su0")]
22+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")]
1723
fn vsha1su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t;
18-
#[link_name = "llvm.aarch64.crypto.sha1su1"]
24+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1su1")]
25+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")]
1926
fn vsha1su1q_u32_(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
20-
#[link_name = "llvm.aarch64.crypto.sha1c"]
27+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1c")]
28+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")]
2129
fn vsha1cq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
22-
#[link_name = "llvm.aarch64.crypto.sha1p"]
30+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1p")]
31+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")]
2332
fn vsha1pq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
24-
#[link_name = "llvm.aarch64.crypto.sha1m"]
33+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1m")]
34+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")]
2535
fn vsha1mq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
2636

27-
#[link_name = "llvm.aarch64.crypto.sha256h"]
37+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha256h")]
38+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")]
2839
fn vsha256hq_u32_(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t;
29-
#[link_name = "llvm.aarch64.crypto.sha256h2"]
40+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha256h2")]
41+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")]
3042
fn vsha256h2q_u32_(hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t) -> uint32x4_t;
31-
#[link_name = "llvm.aarch64.crypto.sha256su0"]
43+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha256su0")]
44+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")]
3245
fn vsha256su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t;
33-
#[link_name = "llvm.aarch64.crypto.sha256su1"]
46+
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha256su1")]
47+
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")]
3448
fn vsha256su1q_u32_(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
3549
}
3650

@@ -40,6 +54,7 @@ use stdarch_test::assert_instr;
4054
/// AES single round encryption.
4155
#[inline]
4256
#[target_feature(enable = "crypto")]
57+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
4358
#[cfg_attr(test, assert_instr(aese))]
4459
pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
4560
vaeseq_u8_(data, key)
@@ -48,6 +63,7 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
4863
/// AES single round decryption.
4964
#[inline]
5065
#[target_feature(enable = "crypto")]
66+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
5167
#[cfg_attr(test, assert_instr(aesd))]
5268
pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
5369
vaesdq_u8_(data, key)
@@ -56,6 +72,7 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
5672
/// AES mix columns.
5773
#[inline]
5874
#[target_feature(enable = "crypto")]
75+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
5976
#[cfg_attr(test, assert_instr(aesmc))]
6077
pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
6178
vaesmcq_u8_(data)
@@ -64,6 +81,7 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
6481
/// AES inverse mix columns.
6582
#[inline]
6683
#[target_feature(enable = "crypto")]
84+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
6785
#[cfg_attr(test, assert_instr(aesimc))]
6886
pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
6987
vaesimcq_u8_(data)
@@ -72,6 +90,7 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
7290
/// SHA1 fixed rotate.
7391
#[inline]
7492
#[target_feature(enable = "crypto")]
93+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
7594
#[cfg_attr(test, assert_instr(sha1h))]
7695
pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
7796
vsha1h_u32_(hash_e)
@@ -80,6 +99,7 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
8099
/// SHA1 hash update accelerator, choose.
81100
#[inline]
82101
#[target_feature(enable = "crypto")]
102+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
83103
#[cfg_attr(test, assert_instr(sha1c))]
84104
pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
85105
vsha1cq_u32_(hash_abcd, hash_e, wk)
@@ -88,6 +108,7 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
88108
/// SHA1 hash update accelerator, majority.
89109
#[inline]
90110
#[target_feature(enable = "crypto")]
111+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
91112
#[cfg_attr(test, assert_instr(sha1m))]
92113
pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
93114
vsha1mq_u32_(hash_abcd, hash_e, wk)
@@ -96,6 +117,7 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
96117
/// SHA1 hash update accelerator, parity.
97118
#[inline]
98119
#[target_feature(enable = "crypto")]
120+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
99121
#[cfg_attr(test, assert_instr(sha1p))]
100122
pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
101123
vsha1pq_u32_(hash_abcd, hash_e, wk)
@@ -104,6 +126,7 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
104126
/// SHA1 schedule update accelerator, first part.
105127
#[inline]
106128
#[target_feature(enable = "crypto")]
129+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
107130
#[cfg_attr(test, assert_instr(sha1su0))]
108131
pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t {
109132
vsha1su0q_u32_(w0_3, w4_7, w8_11)
@@ -112,6 +135,7 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_
112135
/// SHA1 schedule update accelerator, second part.
113136
#[inline]
114137
#[target_feature(enable = "crypto")]
138+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
115139
#[cfg_attr(test, assert_instr(sha1su1))]
116140
pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t {
117141
vsha1su1q_u32_(tw0_3, w12_15)
@@ -120,6 +144,7 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t
120144
/// SHA256 hash update accelerator.
121145
#[inline]
122146
#[target_feature(enable = "crypto")]
147+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
123148
#[cfg_attr(test, assert_instr(sha256h))]
124149
pub unsafe fn vsha256hq_u32(
125150
hash_abcd: uint32x4_t,
@@ -132,6 +157,7 @@ pub unsafe fn vsha256hq_u32(
132157
/// SHA256 hash update accelerator, upper part.
133158
#[inline]
134159
#[target_feature(enable = "crypto")]
160+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
135161
#[cfg_attr(test, assert_instr(sha256h2))]
136162
pub unsafe fn vsha256h2q_u32(
137163
hash_efgh: uint32x4_t,
@@ -144,6 +170,7 @@ pub unsafe fn vsha256h2q_u32(
144170
/// SHA256 schedule update accelerator, first part.
145171
#[inline]
146172
#[target_feature(enable = "crypto")]
173+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
147174
#[cfg_attr(test, assert_instr(sha256su0))]
148175
pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t {
149176
vsha256su0q_u32_(w0_3, w4_7)
@@ -152,6 +179,7 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t
152179
/// SHA256 schedule update accelerator, second part.
153180
#[inline]
154181
#[target_feature(enable = "crypto")]
182+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
155183
#[cfg_attr(test, assert_instr(sha256su1))]
156184
pub unsafe fn vsha256su1q_u32(
157185
tw0_3: uint32x4_t,
@@ -163,7 +191,7 @@ pub unsafe fn vsha256su1q_u32(
163191

164192
#[cfg(test)]
165193
mod tests {
166-
use crate::core_arch::{aarch64::*, simd::*};
194+
use crate::core_arch::{arm::*, simd::*};
167195
use std::mem;
168196
use stdarch_test::simd_test;
169197

crates/core_arch/src/arm/mod.rs

+5
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,11 @@ mod crc;
2929
#[cfg(any(target_arch = "aarch64", target_feature = "v7"))]
3030
pub use self::crc::*;
3131

32+
#[cfg(any(target_arch = "aarch64", target_feature = "v7"))]
33+
mod crypto;
34+
#[cfg(any(target_arch = "aarch64", target_feature = "v7"))]
35+
pub use self::crypto::*;
36+
3237
pub use crate::core_arch::acle::*;
3338

3439
#[cfg(test)]

0 commit comments

Comments
 (0)