2
2
use std:: mem:: ManuallyDrop ;
3
3
use std:: num:: Wrapping ;
4
4
5
- use vm_memory:: { FileOffset , GuestMemoryRegion , MemoryRegionAddress , MmapRegion } ;
5
+ use vm_memory:: { AtomicAccess , GuestMemoryError , GuestMemoryRegion , MemoryRegionAddress } ;
6
+
7
+ use std:: io:: { Read , Write } ;
8
+ use std:: mem:: MaybeUninit ;
9
+ use vm_memory:: ByteValued ;
6
10
7
11
use super :: * ;
8
12
13
+ pub struct StubRegion {
14
+ buffer : * mut u8 ,
15
+ region_len : usize ,
16
+ region_start : GuestAddress ,
17
+ }
18
+
19
+ // A stub region that implements the GuestMemoryRegion and Bytes traits. This is
20
+ // used to create a single memory region for testing purposes in Kani. It allows
21
+ // us to simulate a memory region without needing a full memory management
22
+ // implementation. The region is backed by a raw pointer to a buffer, which is
23
+ // allocated and leaked to ensure a stable address. The region supports reading
24
+ // and writing bytes, objects, and slices. It also provides methods to convert
25
+ // between guest addresses and memory region addresses, and to check offsets
26
+ // within the region.
27
+ impl StubRegion {
28
+ pub fn new ( buf_ptr : * mut u8 , buf_len : usize , start_offset : u64 ) -> Self {
29
+ Self {
30
+ buffer : buf_ptr,
31
+ region_len : buf_len,
32
+ region_start : GuestAddress ( start_offset) ,
33
+ }
34
+ }
35
+
36
+ fn to_region_addr ( & self , addr : GuestAddress ) -> Option < MemoryRegionAddress > {
37
+ let offset = addr
38
+ . raw_value ( )
39
+ . checked_sub ( self . region_start . raw_value ( ) ) ?;
40
+ if offset < self . region_len as u64 {
41
+ Some ( MemoryRegionAddress ( offset) )
42
+ } else {
43
+ None
44
+ }
45
+ }
46
+
47
+ fn checked_offset (
48
+ & self ,
49
+ addr : MemoryRegionAddress ,
50
+ count : usize ,
51
+ ) -> Option < MemoryRegionAddress > {
52
+ let end = addr. 0 . checked_add ( count as u64 ) ?;
53
+ if end <= self . region_len as u64 {
54
+ Some ( MemoryRegionAddress ( end) )
55
+ } else {
56
+ None
57
+ }
58
+ }
59
+ }
60
+
61
+ impl GuestMemoryRegion for StubRegion {
62
+ type B = ( ) ;
63
+
64
+ fn len ( & self ) -> <GuestAddress as vm_memory:: AddressValue >:: V {
65
+ self . region_len . try_into ( ) . unwrap ( )
66
+ }
67
+
68
+ fn start_addr ( & self ) -> GuestAddress {
69
+ self . region_start
70
+ }
71
+
72
+ fn bitmap ( & self ) -> & Self :: B {
73
+ // For Kani, we do not need a bitmap, so we return an empty tuple.
74
+ & ( )
75
+ }
76
+ }
77
+
78
+ // Implements the Bytes trait for StubRegion, allowing it to read and write
79
+ // bytes and objects. This is used to simulate memory operations in Kani proofs.
80
+ impl Bytes < MemoryRegionAddress > for StubRegion {
81
+ type E = GuestMemoryError ;
82
+
83
+ fn write ( & self , buf : & [ u8 ] , addr : MemoryRegionAddress ) -> Result < usize , Self :: E > {
84
+ let offset = addr. 0 as usize ;
85
+ let end = offset
86
+ . checked_add ( buf. len ( ) )
87
+ . ok_or ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ?;
88
+ if end > self . region_len as usize {
89
+ return Err ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ;
90
+ }
91
+ unsafe {
92
+ std:: ptr:: copy_nonoverlapping ( buf. as_ptr ( ) , self . buffer . add ( offset) , buf. len ( ) ) ;
93
+ }
94
+ Ok ( buf. len ( ) )
95
+ }
96
+
97
+ fn read ( & self , buf : & mut [ u8 ] , addr : MemoryRegionAddress ) -> Result < usize , Self :: E > {
98
+ let offset = addr. 0 as usize ;
99
+ let end = offset
100
+ . checked_add ( buf. len ( ) )
101
+ . ok_or ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ?;
102
+ if end > self . region_len as usize {
103
+ return Err ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ;
104
+ }
105
+ unsafe {
106
+ std:: ptr:: copy_nonoverlapping ( self . buffer . add ( offset) , buf. as_mut_ptr ( ) , buf. len ( ) ) ;
107
+ }
108
+ Ok ( buf. len ( ) )
109
+ }
110
+
111
+ fn write_slice ( & self , buf : & [ u8 ] , addr : MemoryRegionAddress ) -> Result < ( ) , Self :: E > {
112
+ self . write ( buf, addr) . map ( |_| ( ) )
113
+ }
114
+
115
+ fn read_slice ( & self , buf : & mut [ u8 ] , addr : MemoryRegionAddress ) -> Result < ( ) , Self :: E > {
116
+ self . read ( buf, addr) . map ( |_| ( ) )
117
+ }
118
+
119
+ fn read_from < F : Read > (
120
+ & self ,
121
+ addr : MemoryRegionAddress ,
122
+ src : & mut F ,
123
+ count : usize ,
124
+ ) -> Result < usize , Self :: E > {
125
+ let mut temp = vec ! [ 0u8 ; count] ;
126
+ src. read_exact ( & mut temp)
127
+ . map_err ( |_| GuestMemoryError :: PartialBuffer {
128
+ expected : count,
129
+ completed : 0 ,
130
+ } ) ?;
131
+ self . write ( & temp, addr)
132
+ }
133
+
134
+ fn read_exact_from < F : Read > (
135
+ & self ,
136
+ addr : MemoryRegionAddress ,
137
+ src : & mut F ,
138
+ count : usize ,
139
+ ) -> Result < ( ) , Self :: E > {
140
+ let mut temp = vec ! [ 0u8 ; count] ;
141
+ src. read_exact ( & mut temp)
142
+ . map_err ( |_| GuestMemoryError :: PartialBuffer {
143
+ expected : count,
144
+ completed : 0 ,
145
+ } ) ?;
146
+ self . write_slice ( & temp, addr)
147
+ }
148
+
149
+ fn read_obj < T : ByteValued > ( & self , addr : MemoryRegionAddress ) -> Result < T , Self :: E > {
150
+ let size = std:: mem:: size_of :: < T > ( ) ;
151
+ let offset = addr. 0 as usize ;
152
+ let end = offset
153
+ . checked_add ( size)
154
+ . ok_or ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ?;
155
+ if end > self . region_len as usize {
156
+ return Err ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ;
157
+ }
158
+ let mut result: T = unsafe { MaybeUninit :: < T > :: zeroed ( ) . assume_init ( ) } ;
159
+ unsafe {
160
+ std:: ptr:: copy_nonoverlapping (
161
+ self . buffer . add ( offset) ,
162
+ ( & mut result as * mut T ) as * mut u8 ,
163
+ size,
164
+ ) ;
165
+ }
166
+ Ok ( result)
167
+ }
168
+
169
+ fn write_to < F : Write > (
170
+ & self ,
171
+ addr : MemoryRegionAddress ,
172
+ dst : & mut F ,
173
+ count : usize ,
174
+ ) -> Result < usize , Self :: E > {
175
+ let offset = addr. 0 as usize ;
176
+ let end = offset
177
+ . checked_add ( count)
178
+ . ok_or ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ?;
179
+ if end > self . region_len as usize {
180
+ return Err ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ;
181
+ }
182
+ unsafe {
183
+ let slice = std:: slice:: from_raw_parts ( self . buffer . add ( offset) , count) ;
184
+ dst. write_all ( slice)
185
+ . map_err ( |_| GuestMemoryError :: PartialBuffer {
186
+ expected : count,
187
+ completed : 0 ,
188
+ } ) ?;
189
+ }
190
+ Ok ( count)
191
+ }
192
+
193
+ fn write_obj < T : ByteValued > ( & self , val : T , addr : MemoryRegionAddress ) -> Result < ( ) , Self :: E > {
194
+ let size = std:: mem:: size_of :: < T > ( ) ;
195
+ let offset = addr. 0 as usize ;
196
+ let end = offset
197
+ . checked_add ( size)
198
+ . ok_or ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ?;
199
+ if end > self . region_len as usize {
200
+ return Err ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ;
201
+ }
202
+ let bytes = val. as_slice ( ) ;
203
+ unsafe {
204
+ std:: ptr:: copy_nonoverlapping ( bytes. as_ptr ( ) , self . buffer . add ( offset) , size) ;
205
+ }
206
+ Ok ( ( ) )
207
+ }
208
+
209
+ fn write_all_to < F : Write > (
210
+ & self ,
211
+ addr : MemoryRegionAddress ,
212
+ dst : & mut F ,
213
+ count : usize ,
214
+ ) -> Result < ( ) , Self :: E > {
215
+ self . write_to ( addr, dst, count) . map ( |_| ( ) )
216
+ }
217
+
218
+ fn store < T : AtomicAccess > (
219
+ & self ,
220
+ val : T ,
221
+ addr : MemoryRegionAddress ,
222
+ _order : Ordering ,
223
+ ) -> Result < ( ) , Self :: E > {
224
+ let size = std:: mem:: size_of :: < T > ( ) ;
225
+ let offset = addr. 0 as usize ;
226
+ let end = offset
227
+ . checked_add ( size)
228
+ . ok_or ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ?;
229
+ if end > self . region_len as usize {
230
+ return Err ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ;
231
+ }
232
+ let bytes = val. as_slice ( ) ;
233
+ unsafe {
234
+ std:: ptr:: copy_nonoverlapping ( bytes. as_ptr ( ) , self . buffer . add ( offset) , size) ;
235
+ }
236
+ Ok ( ( ) )
237
+ }
238
+
239
+ fn load < T : AtomicAccess > (
240
+ & self ,
241
+ addr : MemoryRegionAddress ,
242
+ _order : Ordering ,
243
+ ) -> Result < T , Self :: E > {
244
+ let size = std:: mem:: size_of :: < T > ( ) ;
245
+ let offset = addr. 0 as usize ;
246
+ let end = offset
247
+ . checked_add ( size)
248
+ . ok_or ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ?;
249
+ if end > self . region_len as usize {
250
+ return Err ( GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) ) ;
251
+ }
252
+ unsafe {
253
+ let slice = std:: slice:: from_raw_parts ( self . buffer . add ( offset) , size) ;
254
+ T :: from_slice ( slice)
255
+ . ok_or_else ( || GuestMemoryError :: InvalidGuestAddress ( GuestAddress ( addr. 0 ) ) )
256
+ . copied ( )
257
+ }
258
+ }
259
+ }
260
+
261
+ // A proof that write to StubRegion and read back gives the same bytes.
262
+ #[ kani:: proof]
263
+ #[ kani:: unwind( 0 ) ]
264
+ fn verify_stubregion_write_read ( ) {
265
+ // Prepare a buffer and a StubRegion
266
+ let mut buffer = kani:: vec:: exact_vec :: < u8 , 16 > ( ) ;
267
+ let region = StubRegion :: new ( buffer. as_mut_ptr ( ) , buffer. len ( ) , 0 ) ;
268
+
269
+ // Arbitrary bytes to write
270
+ let bytes: [ u8 ; 16 ] = kani:: any ( ) ;
271
+ // Write bytes to region at offset 0
272
+ let write_offset: usize = kani:: any ( ) ;
273
+ kani:: assume ( write_offset <= buffer. len ( ) - 16 ) ;
274
+ assert ! ( region
275
+ . write( & bytes, MemoryRegionAddress ( write_offset as u64 ) )
276
+ . is_ok( ) ) ;
277
+
278
+ // Read back into a new buffer
279
+ let mut readback = kani:: vec:: exact_vec :: < u8 , 16 > ( ) ;
280
+ assert ! ( region
281
+ . read( & mut readback, MemoryRegionAddress ( write_offset as u64 ) )
282
+ . is_ok( ) ) ;
283
+
284
+ // Choose a nondet index and check both match
285
+ let idx: usize = kani:: any ( ) ;
286
+ kani:: assume ( idx < 16 ) ;
287
+ assert_eq ! ( bytes[ idx] , readback[ idx] ) ;
288
+ }
289
+
9
290
/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
10
291
/// `GuestMemoryMmap`, which manages a list of regions and then does a binary
11
292
/// search to determine which region a specific read or write request goes to,
@@ -14,11 +295,11 @@ use super::*;
14
295
/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
15
296
/// it works identically to `GuestMemoryMmap` with only a single contained region.
16
297
pub struct SingleRegionGuestMemory {
17
- the_region : vm_memory :: GuestRegionMmap ,
298
+ the_region : StubRegion ,
18
299
}
19
300
20
301
impl GuestMemory for SingleRegionGuestMemory {
21
- type R = vm_memory :: GuestRegionMmap ;
302
+ type R = StubRegion ;
22
303
23
304
fn num_regions ( & self ) -> usize {
24
305
1
@@ -64,27 +345,20 @@ impl GuestMemory for SingleRegionGuestMemory {
64
345
65
346
impl kani:: Arbitrary for SingleRegionGuestMemory {
66
347
fn any ( ) -> Self {
67
- guest_memory (
68
- ManuallyDrop :: new ( kani:: vec:: exact_vec :: < u8 , GUEST_MEMORY_SIZE > ( ) ) . as_mut_ptr ( ) ,
69
- )
348
+ let memory =
349
+ ManuallyDrop :: new ( kani:: vec:: exact_vec :: < u8 , GUEST_MEMORY_SIZE > ( ) ) . as_mut_ptr ( ) ;
350
+ let size = GUEST_MEMORY_SIZE ;
351
+ let start = GUEST_MEMORY_BASE ;
352
+ Self {
353
+ the_region : StubRegion :: new ( memory, size, start) ,
354
+ }
70
355
}
71
356
}
72
357
pub struct ProofContext {
73
358
pub queue : Queue ,
74
359
pub memory : SingleRegionGuestMemory ,
75
360
}
76
361
77
- pub struct MmapRegionStub {
78
- _addr : * mut u8 ,
79
- _size : usize ,
80
- _bitmap : ( ) ,
81
- _file_offset : Option < FileOffset > ,
82
- _prot : i32 ,
83
- _flags : i32 ,
84
- _owned : bool ,
85
- _hugetlbfs : Option < bool > ,
86
- }
87
-
88
362
/// We start the first guest memory region at an offset so that harnesses using
89
363
/// Queue::any() will be exposed to queue segments both before and after valid guest memory.
90
364
/// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the
@@ -97,43 +371,6 @@ const GUEST_MEMORY_BASE: u64 = 0;
97
371
// able to change its address, as it is 16-byte aligned.
98
372
const GUEST_MEMORY_SIZE : usize = QUEUE_END as usize + 30 ;
99
373
100
- fn guest_memory ( memory : * mut u8 ) -> SingleRegionGuestMemory {
101
- // Ideally, we'd want to do
102
- // let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
103
- // .with_raw_mmap_pointer(bytes.as_mut_ptr())
104
- // .build()
105
- // .unwrap()};
106
- // However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
107
- // Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
108
- // standard library using a special version of the libc crate, it runs into some problems
109
- // [1] Even if we work around those problems, we run into performance problems [2].
110
- // Therefore, for now we stick to this ugly transmute hack (which only works because
111
- // the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
112
- //
113
- // [1]: https://github.com/model-checking/kani/issues/2673
114
- // [2]: https://github.com/model-checking/kani/issues/2538
115
- let region_stub = MmapRegionStub {
116
- _addr : memory,
117
- _size : GUEST_MEMORY_SIZE ,
118
- _bitmap : Default :: default ( ) ,
119
- _file_offset : None ,
120
- _prot : 0 ,
121
- _flags : libc:: MAP_ANONYMOUS | libc:: MAP_PRIVATE ,
122
- _owned : false ,
123
- _hugetlbfs : None ,
124
- } ;
125
-
126
- let region: MmapRegion < ( ) > = unsafe { std:: mem:: transmute ( region_stub) } ;
127
-
128
- let guest_region =
129
- vm_memory:: GuestRegionMmap :: new ( region, GuestAddress ( GUEST_MEMORY_BASE ) ) . unwrap ( ) ;
130
-
131
- // Use a single memory region for guests of size < 2GB
132
- SingleRegionGuestMemory {
133
- the_region : guest_region,
134
- }
135
- }
136
-
137
374
const MAX_QUEUE_SIZE : u16 = 4 ;
138
375
139
376
// Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting
0 commit comments