Skip to content

Commit 1fabed5

Browse files
committed
c_void -> *mut u8
1 parent 3fc4c61 commit 1fabed5

File tree

5 files changed

+26
-48
lines changed

5 files changed

+26
-48
lines changed

coresimd/src/runtime/x86.rs

+3-5
Original file line numberDiff line numberDiff line change
@@ -364,8 +364,7 @@ pub fn detect_features() -> usize {
364364
enable(extended_features_ebx, 5, __Feature::avx2);
365365

366366
// For AVX-512 the OS also needs to support saving/restoring
367-
// the extended state, only then we enable
368-
// AVX-512 support:
367+
// the extended state, only then we enable AVX-512 support:
369368
if os_avx512_support {
370369
enable(extended_features_ebx, 16, __Feature::avx512f);
371370
enable(extended_features_ebx, 17, __Feature::avx512dq);
@@ -384,8 +383,8 @@ pub fn detect_features() -> usize {
384383
}
385384
}
386385

387-
// Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX =
388-
// 1)
386+
// Processor Extended State Enumeration Sub-leaf
387+
// (EAX = 0DH, ECX = 1)
389388
if max_basic_leaf >= 0xd {
390389
let CpuidResult {
391390
eax: proc_extended_state1_eax,
@@ -417,7 +416,6 @@ pub fn detect_features() -> usize {
417416

418417
#[cfg(test)]
419418
mod tests {
420-
#[cfg(feature = "std")]
421419
#[test]
422420
fn runtime_detection_x86_nocapture() {
423421
println!("sse: {:?}", cfg_feature_enabled!("sse"));

coresimd/src/x86/i586/sse.rs

+2-3
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ use core::ptr;
66
use simd_llvm::simd_shuffle4;
77
use v128::*;
88
use v64::f32x2;
9-
use x86::c_void;
109

1110
#[cfg(test)]
1211
use stdsimd_test::assert_instr;
@@ -1572,7 +1571,7 @@ pub const _MM_HINT_NTA: i8 = 0;
15721571
#[cfg_attr(test, assert_instr(prefetcht1, strategy = _MM_HINT_T1))]
15731572
#[cfg_attr(test, assert_instr(prefetcht2, strategy = _MM_HINT_T2))]
15741573
#[cfg_attr(test, assert_instr(prefetchnta, strategy = _MM_HINT_NTA))]
1575-
pub unsafe fn _mm_prefetch(p: *const c_void, strategy: i8) {
1574+
pub unsafe fn _mm_prefetch(p: *const u8, strategy: i8) {
15761575
// The `strategy` must be a compile-time constant, so we use a short form
15771576
// of `constify_imm8!` for now.
15781577
// We use the `llvm.prefetch` instrinsic with `rw` = 0 (read), and
@@ -1684,7 +1683,7 @@ extern "C" {
16841683
#[link_name = "llvm.x86.sse.ldmxcsr"]
16851684
fn ldmxcsr(p: *const i8);
16861685
#[link_name = "llvm.prefetch"]
1687-
fn prefetch(p: *const c_void, rw: i32, loc: i32, ty: i32);
1686+
fn prefetch(p: *const u8, rw: i32, loc: i32, ty: i32);
16881687
#[link_name = "llvm.x86.sse.cmp.ss"]
16891688
fn cmpss(a: f32x4, b: f32x4, imm8: i8) -> f32x4;
16901689
}

coresimd/src/x86/i586/sse2.rs

+3-5
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ use core::ptr;
88

99
use simd_llvm::{simd_cast, simd_shuffle16, simd_shuffle2, simd_shuffle4,
1010
simd_shuffle8};
11-
use x86::c_void;
1211
use x86::__m128i;
1312
use v128::*;
1413
use v64::*;
@@ -29,7 +28,7 @@ pub unsafe fn _mm_pause() {
2928
#[inline(always)]
3029
#[target_feature = "+sse2"]
3130
#[cfg_attr(test, assert_instr(clflush))]
32-
pub unsafe fn _mm_clflush(p: *mut c_void) {
31+
pub unsafe fn _mm_clflush(p: *mut u8) {
3332
clflush(p)
3433
}
3534

@@ -1989,7 +1988,7 @@ extern "C" {
19891988
#[link_name = "llvm.x86.sse2.pause"]
19901989
fn pause();
19911990
#[link_name = "llvm.x86.sse2.clflush"]
1992-
fn clflush(p: *mut c_void);
1991+
fn clflush(p: *mut u8);
19931992
#[link_name = "llvm.x86.sse2.lfence"]
19941993
fn lfence();
19951994
#[link_name = "llvm.x86.sse2.mfence"]
@@ -2142,7 +2141,6 @@ extern "C" {
21422141

21432142
#[cfg(test)]
21442143
mod tests {
2145-
use super::c_void;
21462144
use stdsimd_test::simd_test;
21472145
use test::black_box; // Used to inhibit constant-folding.
21482146

@@ -2158,7 +2156,7 @@ mod tests {
21582156
#[simd_test = "sse2"]
21592157
unsafe fn _mm_clflush() {
21602158
let x = 0;
2161-
sse2::_mm_clflush(&x as *const _ as *mut c_void);
2159+
sse2::_mm_clflush(&x as *const _ as *mut u8);
21622160
}
21632161

21642162
#[simd_test = "sse2"]

coresimd/src/x86/i586/xsave.rs

+18-21
Original file line numberDiff line numberDiff line change
@@ -5,22 +5,20 @@
55
#[cfg(test)]
66
use stdsimd_test::assert_instr;
77

8-
use x86::c_void;
9-
108
#[allow(improper_ctypes)]
119
extern "C" {
1210
#[link_name = "llvm.x86.xsave"]
1311
fn xsave(p: *mut i8, hi: i32, lo: i32) -> ();
1412
#[link_name = "llvm.x86.xrstor"]
15-
fn xrstor(p: *const c_void, hi: i32, lo: i32) -> ();
13+
fn xrstor(p: *const u8, hi: i32, lo: i32) -> ();
1614
#[link_name = "llvm.x86.xsetbv"]
1715
fn xsetbv(v: i32, hi: i32, lo: i32) -> ();
1816
#[link_name = "llvm.x86.xgetbv"]
1917
fn xgetbv(x: i32) -> i64;
2018
#[link_name = "llvm.x86.xsave64"]
2119
fn xsave64(p: *mut i8, hi: i32, lo: i32) -> ();
2220
#[link_name = "llvm.x86.xrstor64"]
23-
fn xrstor64(p: *const c_void, hi: i32, lo: i32) -> ();
21+
fn xrstor64(p: *const u8, hi: i32, lo: i32) -> ();
2422
#[link_name = "llvm.x86.xsaveopt"]
2523
fn xsaveopt(p: *mut i8, hi: i32, lo: i32) -> ();
2624
#[link_name = "llvm.x86.xsaveopt64"]
@@ -34,9 +32,9 @@ extern "C" {
3432
#[link_name = "llvm.x86.xsaves64"]
3533
fn xsaves64(p: *mut i8, hi: i32, lo: i32) -> ();
3634
#[link_name = "llvm.x86.xrstors"]
37-
fn xrstors(p: *const c_void, hi: i32, lo: i32) -> ();
35+
fn xrstors(p: *const u8, hi: i32, lo: i32) -> ();
3836
#[link_name = "llvm.x86.xrstors64"]
39-
fn xrstors64(p: *const c_void, hi: i32, lo: i32) -> ();
37+
fn xrstors64(p: *const u8, hi: i32, lo: i32) -> ();
4038
}
4139

4240
/// Perform a full or partial save of the enabled processor states to memory at
@@ -50,7 +48,7 @@ extern "C" {
5048
#[inline(always)]
5149
#[target_feature = "+xsave"]
5250
#[cfg_attr(test, assert_instr(xsave))]
53-
pub unsafe fn _xsave(mem_addr: *mut c_void, save_mask: u64) -> () {
51+
pub unsafe fn _xsave(mem_addr: *mut u8, save_mask: u64) -> () {
5452
xsave(mem_addr as *mut i8, (save_mask >> 32) as i32, save_mask as i32);
5553
}
5654

@@ -63,7 +61,7 @@ pub unsafe fn _xsave(mem_addr: *mut c_void, save_mask: u64) -> () {
6361
#[inline(always)]
6462
#[target_feature = "+xsave"]
6563
#[cfg_attr(test, assert_instr(xrstor))]
66-
pub unsafe fn _xrstor(mem_addr: *const c_void, rs_mask: u64) -> () {
64+
pub unsafe fn _xrstor(mem_addr: *const u8, rs_mask: u64) -> () {
6765
xrstor(mem_addr, (rs_mask >> 32) as i32, rs_mask as i32);
6866
}
6967

@@ -104,7 +102,7 @@ pub unsafe fn _xgetbv(xcr_no: u32) -> u64 {
104102
#[target_feature = "+xsave"]
105103
#[cfg_attr(test, assert_instr(xsave64))]
106104
#[cfg(not(target_arch = "x86"))]
107-
pub unsafe fn _xsave64(mem_addr: *mut c_void, save_mask: u64) -> () {
105+
pub unsafe fn _xsave64(mem_addr: *mut u8, save_mask: u64) -> () {
108106
xsave64(mem_addr as *mut i8, (save_mask >> 32) as i32, save_mask as i32);
109107
}
110108

@@ -118,7 +116,7 @@ pub unsafe fn _xsave64(mem_addr: *mut c_void, save_mask: u64) -> () {
118116
#[target_feature = "+xsave"]
119117
#[cfg_attr(test, assert_instr(xrstor64))]
120118
#[cfg(not(target_arch = "x86"))]
121-
pub unsafe fn _xrstor64(mem_addr: *const c_void, rs_mask: u64) -> () {
119+
pub unsafe fn _xrstor64(mem_addr: *const u8, rs_mask: u64) -> () {
122120
xrstor64(mem_addr, (rs_mask >> 32) as i32, rs_mask as i32);
123121
}
124122

@@ -132,7 +130,7 @@ pub unsafe fn _xrstor64(mem_addr: *const c_void, rs_mask: u64) -> () {
132130
#[inline(always)]
133131
#[target_feature = "+xsave,+xsaveopt"]
134132
#[cfg_attr(test, assert_instr(xsaveopt))]
135-
pub unsafe fn _xsaveopt(mem_addr: *mut c_void, save_mask: u64) -> () {
133+
pub unsafe fn _xsaveopt(mem_addr: *mut u8, save_mask: u64) -> () {
136134
xsaveopt(mem_addr as *mut i8, (save_mask >> 32) as i32, save_mask as i32);
137135
}
138136

@@ -147,7 +145,7 @@ pub unsafe fn _xsaveopt(mem_addr: *mut c_void, save_mask: u64) -> () {
147145
#[target_feature = "+xsave,+xsaveopt"]
148146
#[cfg_attr(test, assert_instr(xsaveopt64))]
149147
#[cfg(not(target_arch = "x86"))]
150-
pub unsafe fn _xsaveopt64(mem_addr: *mut c_void, save_mask: u64) -> () {
148+
pub unsafe fn _xsaveopt64(mem_addr: *mut u8, save_mask: u64) -> () {
151149
xsaveopt64(
152150
mem_addr as *mut i8,
153151
(save_mask >> 32) as i32,
@@ -164,7 +162,7 @@ pub unsafe fn _xsaveopt64(mem_addr: *mut c_void, save_mask: u64) -> () {
164162
#[inline(always)]
165163
#[target_feature = "+xsave,+xsavec"]
166164
#[cfg_attr(test, assert_instr(xsavec))]
167-
pub unsafe fn _xsavec(mem_addr: *mut c_void, save_mask: u64) -> () {
165+
pub unsafe fn _xsavec(mem_addr: *mut u8, save_mask: u64) -> () {
168166
xsavec(mem_addr as *mut i8, (save_mask >> 32) as i32, save_mask as i32);
169167
}
170168

@@ -178,7 +176,7 @@ pub unsafe fn _xsavec(mem_addr: *mut c_void, save_mask: u64) -> () {
178176
#[target_feature = "+xsave,+xsavec"]
179177
#[cfg_attr(test, assert_instr(xsavec64))]
180178
#[cfg(not(target_arch = "x86"))]
181-
pub unsafe fn _xsavec64(mem_addr: *mut c_void, save_mask: u64) -> () {
179+
pub unsafe fn _xsavec64(mem_addr: *mut u8, save_mask: u64) -> () {
182180
xsavec64(mem_addr as *mut i8, (save_mask >> 32) as i32, save_mask as i32);
183181
}
184182

@@ -192,7 +190,7 @@ pub unsafe fn _xsavec64(mem_addr: *mut c_void, save_mask: u64) -> () {
192190
#[inline(always)]
193191
#[target_feature = "+xsave,+xsaves"]
194192
#[cfg_attr(test, assert_instr(xsaves))]
195-
pub unsafe fn _xsaves(mem_addr: *mut c_void, save_mask: u64) -> () {
193+
pub unsafe fn _xsaves(mem_addr: *mut u8, save_mask: u64) -> () {
196194
xsaves(mem_addr as *mut i8, (save_mask >> 32) as i32, save_mask as i32);
197195
}
198196

@@ -207,7 +205,7 @@ pub unsafe fn _xsaves(mem_addr: *mut c_void, save_mask: u64) -> () {
207205
#[target_feature = "+xsave,+xsaves"]
208206
#[cfg_attr(test, assert_instr(xsaves64))]
209207
#[cfg(not(target_arch = "x86"))]
210-
pub unsafe fn _xsaves64(mem_addr: *mut c_void, save_mask: u64) -> () {
208+
pub unsafe fn _xsaves64(mem_addr: *mut u8, save_mask: u64) -> () {
211209
xsaves64(mem_addr as *mut i8, (save_mask >> 32) as i32, save_mask as i32);
212210
}
213211

@@ -223,7 +221,7 @@ pub unsafe fn _xsaves64(mem_addr: *mut c_void, save_mask: u64) -> () {
223221
#[inline(always)]
224222
#[target_feature = "+xsave,+xsaves"]
225223
#[cfg_attr(test, assert_instr(xrstors))]
226-
pub unsafe fn _xrstors(mem_addr: *const c_void, rs_mask: u64) -> () {
224+
pub unsafe fn _xrstors(mem_addr: *const u8, rs_mask: u64) -> () {
227225
xrstors(mem_addr, (rs_mask >> 32) as i32, rs_mask as i32);
228226
}
229227

@@ -240,13 +238,12 @@ pub unsafe fn _xrstors(mem_addr: *const c_void, rs_mask: u64) -> () {
240238
#[target_feature = "+xsave,+xsaves"]
241239
#[cfg_attr(test, assert_instr(xrstors64))]
242240
#[cfg(not(target_arch = "x86"))]
243-
pub unsafe fn _xrstors64(mem_addr: *const c_void, rs_mask: u64) -> () {
241+
pub unsafe fn _xrstors64(mem_addr: *const u8, rs_mask: u64) -> () {
244242
xrstors64(mem_addr, (rs_mask >> 32) as i32, rs_mask as i32);
245243
}
246244

247245
#[cfg(test)]
248246
mod tests {
249-
use x86::c_void;
250247
use x86::i586::xsave;
251248
use stdsimd_test::simd_test;
252249
use std::fmt;
@@ -260,8 +257,8 @@ mod tests {
260257
fn new() -> Buffer {
261258
Buffer { data: [0; 1024] }
262259
}
263-
fn ptr(&mut self) -> *mut c_void {
264-
&mut self.data[0] as *mut _ as *mut c_void
260+
fn ptr(&mut self) -> *mut u8 {
261+
&mut self.data[0] as *mut _ as *mut u8
265262
}
266263
}
267264

coresimd/src/x86/mod.rs

-14
Original file line numberDiff line numberDiff line change
@@ -32,17 +32,3 @@ pub type __m128i = ::v128::i8x16;
3232
/// 256-bit wide signed integer vector type
3333
#[allow(non_camel_case_types)]
3434
pub type __m256i = ::v256::i8x32;
35-
36-
37-
/// `C`'s `void` type.
38-
#[cfg(not(feature = "std"))]
39-
#[allow(non_camel_case_types)]
40-
#[repr(u8)]
41-
pub enum c_void {
42-
#[doc(hidden)] __variant1,
43-
#[doc(hidden)] __variant2,
44-
}
45-
46-
// FIXME: we should not depend on std for this
47-
#[cfg(feature = "std")]
48-
use std::os::raw::c_void;

0 commit comments

Comments
 (0)