@@ -87,33 +87,32 @@ struct DescriptorRing {
87
87
// Controlling variables for the ring
88
88
//
89
89
/// where to insert available descriptors next
90
- write_index : usize ,
90
+ write_index : u16 ,
91
91
/// How much descriptors can be inserted
92
- capacity : usize ,
92
+ capacity : u16 ,
93
93
/// Where to expect the next used descriptor by the device
94
- poll_index : usize ,
94
+ poll_index : u16 ,
95
95
/// See Virtio specification v1.1. - 2.7.1
96
96
drv_wc : WrapCount ,
97
97
dev_wc : WrapCount ,
98
98
}
99
99
100
100
impl DescriptorRing {
101
101
fn new ( size : u16 ) -> Self {
102
- let size = usize:: from ( size) ;
103
-
104
102
// Allocate heap memory via a vec, leak and cast
105
- let _mem_len =
106
- ( size * core :: mem :: size_of :: < Descriptor > ( ) ) . align_up ( BasePageSize :: SIZE as usize ) ;
103
+ let _mem_len = ( usize :: from ( size ) * core :: mem :: size_of :: < Descriptor > ( ) )
104
+ . align_up ( BasePageSize :: SIZE as usize ) ;
107
105
let ptr = ptr:: with_exposed_provenance_mut ( crate :: mm:: allocate ( _mem_len, true ) . 0 as usize ) ;
108
106
109
- let ring: & ' static mut [ Descriptor ] = unsafe { core:: slice:: from_raw_parts_mut ( ptr, size) } ;
107
+ let ring: & ' static mut [ Descriptor ] =
108
+ unsafe { core:: slice:: from_raw_parts_mut ( ptr, size. into ( ) ) } ;
110
109
111
110
// Descriptor ID's run from 1 to size_of_queue. In order to index directly into the
112
111
// reference ring via an ID it is much easier to simply have an array of size = size_of_queue + 1
113
112
// and do not care about the first element being unused.
114
113
// `Box` is not Clone, so neither is `None::<Box<_>>`. Hence, we need to produce `None`s with a closure.
115
114
let tkn_ref_ring = core:: iter:: repeat_with ( || None )
116
- . take ( size + 1 )
115
+ . take ( ( size + 1 ) . into ( ) )
117
116
. collect :: < Vec < _ > > ( )
118
117
. into_boxed_slice ( ) ;
119
118
@@ -141,12 +140,12 @@ impl DescriptorRing {
141
140
}
142
141
}
143
142
144
- fn push_batch ( & mut self , tkn_lst : Vec < TransferToken > ) -> ( usize , u8 ) {
143
+ fn push_batch ( & mut self , tkn_lst : Vec < TransferToken > ) -> ( u16 , u8 ) {
145
144
// Catch empty push, in order to allow zero initialized first_ctrl_settings struct
146
145
// which will be overwritten in the first iteration of the for-loop
147
146
assert ! ( !tkn_lst. is_empty( ) ) ;
148
147
149
- let mut first_ctrl_settings: ( usize , u16 , WrapCount ) = ( 0 , 0 , WrapCount :: new ( ) ) ;
148
+ let mut first_ctrl_settings: ( u16 , u16 , WrapCount ) = ( 0 , 0 , WrapCount :: new ( ) ) ;
150
149
let mut first_buffer = None ;
151
150
152
151
for ( i, tkn) in tkn_lst. into_iter ( ) . enumerate ( ) {
@@ -275,13 +274,14 @@ impl DescriptorRing {
275
274
// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
276
275
// See Virtio specfification v1.1. - 2.7.21
277
276
fence ( Ordering :: SeqCst ) ;
278
- self . ring [ first_ctrl_settings. 0 ] . flags |= first_ctrl_settings. 2 . as_flags_avail ( ) . into ( ) ;
277
+ self . ring [ usize:: from ( first_ctrl_settings. 0 ) ] . flags |=
278
+ first_ctrl_settings. 2 . as_flags_avail ( ) . into ( ) ;
279
279
280
280
// Converting a boolean as u8 is fine
281
281
( first_ctrl_settings. 0 , first_ctrl_settings. 2 . 0 as u8 )
282
282
}
283
283
284
- fn push ( & mut self , tkn : TransferToken ) -> ( usize , u8 ) {
284
+ fn push ( & mut self , tkn : TransferToken ) -> ( u16 , u8 ) {
285
285
// Check length and if its fits. This should always be true due to the restriction of
286
286
// the memory pool, but to be sure.
287
287
assert ! ( tkn. buff_tkn. as_ref( ) . unwrap( ) . num_consuming_descr( ) <= self . capacity) ;
@@ -405,7 +405,7 @@ impl DescriptorRing {
405
405
WriteCtrl {
406
406
start : self . write_index ,
407
407
position : self . write_index ,
408
- modulo : self . ring . len ( ) ,
408
+ modulo : u16 :: try_from ( self . ring . len ( ) ) . unwrap ( ) ,
409
409
wrap_at_init : self . drv_wc ,
410
410
buff_id : 0 ,
411
411
@@ -418,7 +418,7 @@ impl DescriptorRing {
418
418
fn get_read_ctrler ( & mut self ) -> ReadCtrl < ' _ > {
419
419
ReadCtrl {
420
420
position : self . poll_index ,
421
- modulo : self . ring . len ( ) ,
421
+ modulo : u16 :: try_from ( self . ring . len ( ) ) . unwrap ( ) ,
422
422
423
423
desc_ring : self ,
424
424
}
@@ -427,8 +427,8 @@ impl DescriptorRing {
427
427
428
428
struct ReadCtrl < ' a > {
429
429
/// Poll index of the ring at init of ReadCtrl
430
- position : usize ,
431
- modulo : usize ,
430
+ position : u16 ,
431
+ modulo : u16 ,
432
432
433
433
desc_ring : & ' a mut DescriptorRing ,
434
434
}
@@ -438,10 +438,10 @@ impl<'a> ReadCtrl<'a> {
438
438
/// updating the queue and returns the respective TransferToken.
439
439
fn poll_next ( & mut self ) -> Option < Box < TransferToken > > {
440
440
// Check if descriptor has been marked used.
441
- if self . desc_ring . ring [ self . position ] . flags . get ( ) & WrapCount :: flag_mask ( )
441
+ if self . desc_ring . ring [ usize :: from ( self . position ) ] . flags . get ( ) & WrapCount :: flag_mask ( )
442
442
== self . desc_ring . dev_wc . as_flags_used ( )
443
443
{
444
- let buff_id = usize:: from ( self . desc_ring . ring [ self . position ] . buff_id ) ;
444
+ let buff_id = usize:: from ( self . desc_ring . ring [ usize :: from ( self . position ) ] . buff_id ) ;
445
445
let mut tkn = self . desc_ring . tkn_ref_ring [ buff_id] . take ( ) . expect (
446
446
"The buff_id is incorrect or the reference to the TransferToken was misplaced." ,
447
447
) ;
@@ -472,7 +472,7 @@ impl<'a> ReadCtrl<'a> {
472
472
// INFO:
473
473
// Due to the behaviour of the currently used devices and the virtio code from the linux kernel, we assume, that device do NOT set this
474
474
// flag correctly upon writes. Hence we omit it, in order to receive data.
475
- let write_len = self . desc_ring . ring [ self . position ] . len ;
475
+ let write_len = self . desc_ring . ring [ usize :: from ( self . position ) ] . len ;
476
476
477
477
match ( send_buff, recv_buff) {
478
478
( Some ( send_buff) , Some ( recv_buff) ) => {
@@ -624,7 +624,8 @@ impl<'a> ReadCtrl<'a> {
624
624
// self.desc_ring.ring[self.position].address = 0;
625
625
// self.desc_ring.ring[self.position].len = 0;
626
626
// self.desc_ring.ring[self.position].buff_id = 0;
627
- self . desc_ring . ring [ self . position ] . flags = self . desc_ring . dev_wc . as_flags_used ( ) . into ( ) ;
627
+ self . desc_ring . ring [ usize:: from ( self . position ) ] . flags =
628
+ self . desc_ring . dev_wc . as_flags_used ( ) . into ( ) ;
628
629
}
629
630
630
631
/// Updates the accessible len of the memory areas accessible by the drivers to be consistent with
@@ -673,7 +674,7 @@ impl<'a> ReadCtrl<'a> {
673
674
}
674
675
675
676
// Increment capcity as we have one more free now!
676
- assert ! ( self . desc_ring. capacity <= self . desc_ring. ring. len( ) ) ;
677
+ assert ! ( self . desc_ring. capacity <= u16 :: try_from ( self . desc_ring. ring. len( ) ) . unwrap ( ) ) ;
677
678
self . desc_ring . capacity += 1 ;
678
679
679
680
self . desc_ring . poll_index = ( self . desc_ring . poll_index + 1 ) % self . modulo ;
@@ -688,11 +689,11 @@ struct WriteCtrl<'a> {
688
689
/// Where did the write of the buffer start in the descriptor ring
689
690
/// This is important, as we must make this descriptor available
690
691
/// lastly.
691
- start : usize ,
692
+ start : u16 ,
692
693
/// Where to write next. This should always be equal to the Rings
693
694
/// write_next field.
694
- position : usize ,
695
- modulo : usize ,
695
+ position : u16 ,
696
+ modulo : u16 ,
696
697
/// What was the WrapCount at the first write position
697
698
/// Important in order to set the right avail and used flags
698
699
wrap_at_init : WrapCount ,
@@ -733,7 +734,7 @@ impl<'a> WriteCtrl<'a> {
733
734
// This also sets the buff_id for the WriteCtrl struct to the ID of the first
734
735
// descriptor.
735
736
if self . start == self . position {
736
- let desc_ref = & mut self . desc_ring . ring [ self . position ] ;
737
+ let desc_ref = & mut self . desc_ring . ring [ usize :: from ( self . position ) ] ;
737
738
desc_ref
738
739
. address
739
740
. set ( paging:: virt_to_phys ( VirtAddr :: from ( mem_desc. ptr as u64 ) ) . into ( ) ) ;
@@ -747,7 +748,7 @@ impl<'a> WriteCtrl<'a> {
747
748
self . buff_id = mem_desc. id . as_ref ( ) . unwrap ( ) . 0 ;
748
749
self . incrmt ( ) ;
749
750
} else {
750
- let desc_ref = & mut self . desc_ring . ring [ self . position ] ;
751
+ let desc_ref = & mut self . desc_ring . ring [ usize :: from ( self . position ) ] ;
751
752
desc_ref
752
753
. address
753
754
. set ( paging:: virt_to_phys ( VirtAddr :: from ( mem_desc. ptr as u64 ) ) . into ( ) ) ;
@@ -775,7 +776,8 @@ impl<'a> WriteCtrl<'a> {
775
776
// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
776
777
// See Virtio specfification v1.1. - 2.7.21
777
778
fence ( Ordering :: SeqCst ) ;
778
- self . desc_ring . ring [ self . start ] . flags |= self . wrap_at_init . as_flags_avail ( ) . into ( ) ;
779
+ self . desc_ring . ring [ usize:: from ( self . start ) ] . flags |=
780
+ self . wrap_at_init . as_flags_avail ( ) . into ( ) ;
779
781
}
780
782
}
781
783
@@ -898,15 +900,15 @@ impl DevNotif {
898
900
self . raw . flags & ( 1 << 0 ) == 0
899
901
}
900
902
901
- fn is_notif_specfic ( & self , next_off : usize , next_wrap : u8 ) -> bool {
903
+ fn is_notif_specfic ( & self , next_off : u16 , next_wrap : u8 ) -> bool {
902
904
if self . f_notif_idx {
903
905
if self . raw . flags & 1 << 1 == 2 {
904
906
// as u16 is okay for usize, as size of queue is restricted to 2^15
905
907
// it is also okay to just loose the upper 8 bits, as we only check the LSB in second clause.
906
908
let desc_event_off = self . raw . event & !( 1 << 15 ) ;
907
909
let desc_event_wrap = ( self . raw . event >> 15 ) as u8 ;
908
910
909
- desc_event_off == next_off as u16 && desc_event_wrap == next_wrap
911
+ desc_event_off == next_off && desc_event_wrap == next_wrap
910
912
} else {
911
913
false
912
914
}
@@ -965,13 +967,13 @@ impl Virtq for PackedVq {
965
967
if notif {
966
968
self . drv_event
967
969
. borrow_mut ( )
968
- . enable_specific ( next_off as u16 , next_wrap) ;
970
+ . enable_specific ( next_off, next_wrap) ;
969
971
}
970
972
971
973
if self . dev_event . is_notif ( ) | self . dev_event . is_notif_specfic ( next_off, next_wrap) {
972
974
let index = self . index . 0 . to_le_bytes ( ) ;
973
975
let mut index = index. iter ( ) ;
974
- let det_notif_data: u16 = ( next_off as u16 ) & !( 1 << 15 ) ;
976
+ let det_notif_data: u16 = next_off & !( 1 << 15 ) ;
975
977
let flags = ( det_notif_data | ( u16:: from ( next_wrap) << 15 ) ) . to_le_bytes ( ) ;
976
978
let mut flags = flags. iter ( ) ;
977
979
let mut notif_data: [ u8 ; 4 ] = [ 0 , 0 , 0 , 0 ] ;
@@ -1007,13 +1009,13 @@ impl Virtq for PackedVq {
1007
1009
if notif {
1008
1010
self . drv_event
1009
1011
. borrow_mut ( )
1010
- . enable_specific ( next_off as u16 , next_wrap) ;
1012
+ . enable_specific ( next_off, next_wrap) ;
1011
1013
}
1012
1014
1013
1015
if self . dev_event . is_notif ( ) {
1014
1016
let index = self . index . 0 . to_le_bytes ( ) ;
1015
1017
let mut index = index. iter ( ) ;
1016
- let det_notif_data: u16 = ( next_off as u16 ) & !( 1 << 15 ) ;
1018
+ let det_notif_data: u16 = next_off & !( 1 << 15 ) ;
1017
1019
let flags = ( det_notif_data | ( u16:: from ( next_wrap) << 15 ) ) . to_le_bytes ( ) ;
1018
1020
let mut flags = flags. iter ( ) ;
1019
1021
let mut notif_data: [ u8 ; 4 ] = [ 0 , 0 , 0 , 0 ] ;
@@ -1036,13 +1038,13 @@ impl Virtq for PackedVq {
1036
1038
if notif {
1037
1039
self . drv_event
1038
1040
. borrow_mut ( )
1039
- . enable_specific ( next_off as u16 , next_wrap) ;
1041
+ . enable_specific ( next_off, next_wrap) ;
1040
1042
}
1041
1043
1042
1044
if self . dev_event . is_notif ( ) {
1043
1045
let index = self . index . 0 . to_le_bytes ( ) ;
1044
1046
let mut index = index. iter ( ) ;
1045
- let det_notif_data: u16 = ( next_off as u16 ) & !( 1 << 15 ) ;
1047
+ let det_notif_data: u16 = next_off & !( 1 << 15 ) ;
1046
1048
let flags = ( det_notif_data | ( u16:: from ( next_wrap) << 15 ) ) . to_le_bytes ( ) ;
1047
1049
let mut flags = flags. iter ( ) ;
1048
1050
let mut notif_data: [ u8 ; 4 ] = [ 0 , 0 , 0 , 0 ] ;
0 commit comments