@@ -2,35 +2,49 @@ use crate::core_arch::arm::{uint32x4_t, uint8x16_t};
2
2
3
3
#[ allow( improper_ctypes) ]
4
4
extern "C" {
5
- #[ link_name = "llvm.aarch64.crypto.aese" ]
5
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.aese" ) ]
6
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.aese" ) ]
6
7
fn vaeseq_u8_ ( data : uint8x16_t , key : uint8x16_t ) -> uint8x16_t ;
7
- #[ link_name = "llvm.aarch64.crypto.aesd" ]
8
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.aesd" ) ]
9
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.aesd" ) ]
8
10
fn vaesdq_u8_ ( data : uint8x16_t , key : uint8x16_t ) -> uint8x16_t ;
9
- #[ link_name = "llvm.aarch64.crypto.aesmc" ]
11
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.aesmc" ) ]
12
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.aesmc" ) ]
10
13
fn vaesmcq_u8_ ( data : uint8x16_t ) -> uint8x16_t ;
11
- #[ link_name = "llvm.aarch64.crypto.aesimc" ]
14
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.aesimc" ) ]
15
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.aesimc" ) ]
12
16
fn vaesimcq_u8_ ( data : uint8x16_t ) -> uint8x16_t ;
13
17
14
- #[ link_name = "llvm.aarch64.crypto.sha1h" ]
18
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha1h" ) ]
19
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha1h" ) ]
15
20
fn vsha1h_u32_ ( hash_e : u32 ) -> u32 ;
16
- #[ link_name = "llvm.aarch64.crypto.sha1su0" ]
21
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha1su0" ) ]
22
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha1su0" ) ]
17
23
fn vsha1su0q_u32_ ( w0_3 : uint32x4_t , w4_7 : uint32x4_t , w8_11 : uint32x4_t ) -> uint32x4_t ;
18
- #[ link_name = "llvm.aarch64.crypto.sha1su1" ]
24
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha1su1" ) ]
25
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha1su1" ) ]
19
26
fn vsha1su1q_u32_ ( tw0_3 : uint32x4_t , w12_15 : uint32x4_t ) -> uint32x4_t ;
20
- #[ link_name = "llvm.aarch64.crypto.sha1c" ]
27
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha1c" ) ]
28
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha1c" ) ]
21
29
fn vsha1cq_u32_ ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t ;
22
- #[ link_name = "llvm.aarch64.crypto.sha1p" ]
30
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha1p" ) ]
31
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha1p" ) ]
23
32
fn vsha1pq_u32_ ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t ;
24
- #[ link_name = "llvm.aarch64.crypto.sha1m" ]
33
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha1m" ) ]
34
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha1m" ) ]
25
35
fn vsha1mq_u32_ ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t ;
26
36
27
- #[ link_name = "llvm.aarch64.crypto.sha256h" ]
37
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha256h" ) ]
38
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha256h" ) ]
28
39
fn vsha256hq_u32_ ( hash_abcd : uint32x4_t , hash_efgh : uint32x4_t , wk : uint32x4_t ) -> uint32x4_t ;
29
- #[ link_name = "llvm.aarch64.crypto.sha256h2" ]
40
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha256h2" ) ]
41
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha256h2" ) ]
30
42
fn vsha256h2q_u32_ ( hash_efgh : uint32x4_t , hash_abcd : uint32x4_t , wk : uint32x4_t ) -> uint32x4_t ;
31
- #[ link_name = "llvm.aarch64.crypto.sha256su0" ]
43
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha256su0" ) ]
44
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha256su0" ) ]
32
45
fn vsha256su0q_u32_ ( w0_3 : uint32x4_t , w4_7 : uint32x4_t ) -> uint32x4_t ;
33
- #[ link_name = "llvm.aarch64.crypto.sha256su1" ]
46
+ #[ cfg_attr( target_arch = "aarch64" , link_name = "llvm.aarch64.crypto.sha256su1" ) ]
47
+ #[ cfg_attr( target_arch = "arm" , link_name = "llvm.arm.neon.sha256su1" ) ]
34
48
fn vsha256su1q_u32_ ( tw0_3 : uint32x4_t , w8_11 : uint32x4_t , w12_15 : uint32x4_t ) -> uint32x4_t ;
35
49
}
36
50
@@ -40,6 +54,7 @@ use stdarch_test::assert_instr;
40
54
/// AES single round encryption.
41
55
#[ inline]
42
56
#[ target_feature( enable = "crypto" ) ]
57
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
43
58
#[ cfg_attr( test, assert_instr( aese) ) ]
44
59
pub unsafe fn vaeseq_u8 ( data : uint8x16_t , key : uint8x16_t ) -> uint8x16_t {
45
60
vaeseq_u8_ ( data, key)
@@ -48,6 +63,7 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
48
63
/// AES single round decryption.
49
64
#[ inline]
50
65
#[ target_feature( enable = "crypto" ) ]
66
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
51
67
#[ cfg_attr( test, assert_instr( aesd) ) ]
52
68
pub unsafe fn vaesdq_u8 ( data : uint8x16_t , key : uint8x16_t ) -> uint8x16_t {
53
69
vaesdq_u8_ ( data, key)
@@ -56,6 +72,7 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
56
72
/// AES mix columns.
57
73
#[ inline]
58
74
#[ target_feature( enable = "crypto" ) ]
75
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
59
76
#[ cfg_attr( test, assert_instr( aesmc) ) ]
60
77
pub unsafe fn vaesmcq_u8 ( data : uint8x16_t ) -> uint8x16_t {
61
78
vaesmcq_u8_ ( data)
@@ -64,6 +81,7 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
64
81
/// AES inverse mix columns.
65
82
#[ inline]
66
83
#[ target_feature( enable = "crypto" ) ]
84
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
67
85
#[ cfg_attr( test, assert_instr( aesimc) ) ]
68
86
pub unsafe fn vaesimcq_u8 ( data : uint8x16_t ) -> uint8x16_t {
69
87
vaesimcq_u8_ ( data)
@@ -72,6 +90,7 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
72
90
/// SHA1 fixed rotate.
73
91
#[ inline]
74
92
#[ target_feature( enable = "crypto" ) ]
93
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
75
94
#[ cfg_attr( test, assert_instr( sha1h) ) ]
76
95
pub unsafe fn vsha1h_u32 ( hash_e : u32 ) -> u32 {
77
96
vsha1h_u32_ ( hash_e)
@@ -80,6 +99,7 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
80
99
/// SHA1 hash update accelerator, choose.
81
100
#[ inline]
82
101
#[ target_feature( enable = "crypto" ) ]
102
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
83
103
#[ cfg_attr( test, assert_instr( sha1c) ) ]
84
104
pub unsafe fn vsha1cq_u32 ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t {
85
105
vsha1cq_u32_ ( hash_abcd, hash_e, wk)
@@ -88,6 +108,7 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
88
108
/// SHA1 hash update accelerator, majority.
89
109
#[ inline]
90
110
#[ target_feature( enable = "crypto" ) ]
111
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
91
112
#[ cfg_attr( test, assert_instr( sha1m) ) ]
92
113
pub unsafe fn vsha1mq_u32 ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t {
93
114
vsha1mq_u32_ ( hash_abcd, hash_e, wk)
@@ -96,6 +117,7 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
96
117
/// SHA1 hash update accelerator, parity.
97
118
#[ inline]
98
119
#[ target_feature( enable = "crypto" ) ]
120
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
99
121
#[ cfg_attr( test, assert_instr( sha1p) ) ]
100
122
pub unsafe fn vsha1pq_u32 ( hash_abcd : uint32x4_t , hash_e : u32 , wk : uint32x4_t ) -> uint32x4_t {
101
123
vsha1pq_u32_ ( hash_abcd, hash_e, wk)
@@ -104,6 +126,7 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
104
126
/// SHA1 schedule update accelerator, first part.
105
127
#[ inline]
106
128
#[ target_feature( enable = "crypto" ) ]
129
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
107
130
#[ cfg_attr( test, assert_instr( sha1su0) ) ]
108
131
pub unsafe fn vsha1su0q_u32 ( w0_3 : uint32x4_t , w4_7 : uint32x4_t , w8_11 : uint32x4_t ) -> uint32x4_t {
109
132
vsha1su0q_u32_ ( w0_3, w4_7, w8_11)
@@ -112,6 +135,7 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_
112
135
/// SHA1 schedule update accelerator, second part.
113
136
#[ inline]
114
137
#[ target_feature( enable = "crypto" ) ]
138
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
115
139
#[ cfg_attr( test, assert_instr( sha1su1) ) ]
116
140
pub unsafe fn vsha1su1q_u32 ( tw0_3 : uint32x4_t , w12_15 : uint32x4_t ) -> uint32x4_t {
117
141
vsha1su1q_u32_ ( tw0_3, w12_15)
@@ -120,6 +144,7 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t
120
144
/// SHA256 hash update accelerator.
121
145
#[ inline]
122
146
#[ target_feature( enable = "crypto" ) ]
147
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
123
148
#[ cfg_attr( test, assert_instr( sha256h) ) ]
124
149
pub unsafe fn vsha256hq_u32 (
125
150
hash_abcd : uint32x4_t ,
@@ -132,6 +157,7 @@ pub unsafe fn vsha256hq_u32(
132
157
/// SHA256 hash update accelerator, upper part.
133
158
#[ inline]
134
159
#[ target_feature( enable = "crypto" ) ]
160
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
135
161
#[ cfg_attr( test, assert_instr( sha256h2) ) ]
136
162
pub unsafe fn vsha256h2q_u32 (
137
163
hash_efgh : uint32x4_t ,
@@ -144,6 +170,7 @@ pub unsafe fn vsha256h2q_u32(
144
170
/// SHA256 schedule update accelerator, first part.
145
171
#[ inline]
146
172
#[ target_feature( enable = "crypto" ) ]
173
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
147
174
#[ cfg_attr( test, assert_instr( sha256su0) ) ]
148
175
pub unsafe fn vsha256su0q_u32 ( w0_3 : uint32x4_t , w4_7 : uint32x4_t ) -> uint32x4_t {
149
176
vsha256su0q_u32_ ( w0_3, w4_7)
@@ -152,6 +179,7 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t
152
179
/// SHA256 schedule update accelerator, second part.
153
180
#[ inline]
154
181
#[ target_feature( enable = "crypto" ) ]
182
+ #[ cfg_attr( target_arch = "arm" , target_feature( enable = "v8" ) ) ]
155
183
#[ cfg_attr( test, assert_instr( sha256su1) ) ]
156
184
pub unsafe fn vsha256su1q_u32 (
157
185
tw0_3 : uint32x4_t ,
@@ -163,7 +191,7 @@ pub unsafe fn vsha256su1q_u32(
163
191
164
192
#[ cfg( test) ]
165
193
mod tests {
166
- use crate :: core_arch:: { aarch64 :: * , simd:: * } ;
194
+ use crate :: core_arch:: { arm :: * , simd:: * } ;
167
195
use std:: mem;
168
196
use stdarch_test:: simd_test;
169
197
0 commit comments