@@ -2202,7 +2202,7 @@ pub fn new_vcpu(
22022202 vcpu,
22032203 kvm_run_ptr,
22042204 coalesced_mmio_ring : None ,
2205- dirty_log_ring : dirty_log_ring ,
2205+ dirty_log_ring,
22062206 }
22072207}
22082208
@@ -2874,6 +2874,144 @@ mod tests {
28742874 }
28752875 }
28762876
2877+ #[ cfg( target_arch = "x86_64" ) ]
2878+ #[ test]
2879+ fn test_run_code_dirty_log_ring ( ) {
2880+ use std:: io:: Write ;
2881+
2882+ let kvm = Kvm :: new ( ) . unwrap ( ) ;
2883+ let mut vm = kvm. create_vm ( ) . unwrap ( ) ;
2884+
2885+ // Enable dirty log ring
2886+ let need_bitmap = vm. enable_dirty_log_ring ( None ) . unwrap ( ) ;
2887+
2888+ // This example is based on https://lwn.net/Articles/658511/
2889+ #[ rustfmt:: skip]
2890+ let code = [
2891+ 0xba , 0xf8 , 0x03 , /* mov $0x3f8, %dx */
2892+ 0x00 , 0xd8 , /* add %bl, %al */
2893+ 0x04 , b'0' , /* add $'0', %al */
2894+ 0xee , /* out %al, %dx */
2895+ 0xec , /* in %dx, %al */
2896+ 0xc6 , 0x06 , 0x00 , 0x80 , 0x00 , /* movl $0, (0x8000); This generates a MMIO Write.*/
2897+ 0x8a , 0x16 , 0x00 , 0x80 , /* movl (0x8000), %dl; This generates a MMIO Read.*/
2898+ 0xc6 , 0x06 , 0x00 , 0x20 , 0x00 , /* movl $0, (0x2000); Dirty one page in guest mem. */
2899+ 0xf4 , /* hlt */
2900+ ] ;
2901+ let expected_rips: [ u64 ; 3 ] = [ 0x1003 , 0x1005 , 0x1007 ] ;
2902+
2903+ let mem_size = 0x4000 ;
2904+ let load_addr = mmap_anonymous ( mem_size) . as_ptr ( ) ;
2905+ let guest_addr: u64 = 0x1000 ;
2906+ let slot: u32 = 0 ;
2907+ let mem_region = kvm_userspace_memory_region {
2908+ slot,
2909+ guest_phys_addr : guest_addr,
2910+ memory_size : mem_size as u64 ,
2911+ userspace_addr : load_addr as u64 ,
2912+ flags : KVM_MEM_LOG_DIRTY_PAGES ,
2913+ } ;
2914+ unsafe {
2915+ vm. set_user_memory_region ( mem_region) . unwrap ( ) ;
2916+ }
2917+
2918+ unsafe {
2919+ // Get a mutable slice of `mem_size` from `load_addr`.
2920+ // This is safe because we mapped it before.
2921+ let mut slice = std:: slice:: from_raw_parts_mut ( load_addr, mem_size) ;
2922+ slice. write_all ( & code) . unwrap ( ) ;
2923+ }
2924+
2925+ let mut vcpu_fd = vm. create_vcpu ( 0 ) . unwrap ( ) ;
2926+
2927+ let mut vcpu_sregs = vcpu_fd. get_sregs ( ) . unwrap ( ) ;
2928+ assert_ne ! ( vcpu_sregs. cs. base, 0 ) ;
2929+ assert_ne ! ( vcpu_sregs. cs. selector, 0 ) ;
2930+ vcpu_sregs. cs . base = 0 ;
2931+ vcpu_sregs. cs . selector = 0 ;
2932+ vcpu_fd. set_sregs ( & vcpu_sregs) . unwrap ( ) ;
2933+
2934+ let mut vcpu_regs = vcpu_fd. get_regs ( ) . unwrap ( ) ;
2935+ // Set the Instruction Pointer to the guest address where we loaded the code.
2936+ vcpu_regs. rip = guest_addr;
2937+ vcpu_regs. rax = 2 ;
2938+ vcpu_regs. rbx = 3 ;
2939+ vcpu_regs. rflags = 2 ;
2940+ vcpu_fd. set_regs ( & vcpu_regs) . unwrap ( ) ;
2941+
2942+ let mut debug_struct = kvm_guest_debug {
2943+ control : KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP ,
2944+ pad : 0 ,
2945+ arch : kvm_guest_debug_arch {
2946+ debugreg : [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] ,
2947+ } ,
2948+ } ;
2949+ vcpu_fd. set_guest_debug ( & debug_struct) . unwrap ( ) ;
2950+
2951+ let mut instr_idx = 0 ;
2952+ loop {
2953+ match vcpu_fd. run ( ) . expect ( "run failed" ) {
2954+ VcpuExit :: IoIn ( addr, data) => {
2955+ assert_eq ! ( addr, 0x3f8 ) ;
2956+ assert_eq ! ( data. len( ) , 1 ) ;
2957+ }
2958+ VcpuExit :: IoOut ( addr, data) => {
2959+ assert_eq ! ( addr, 0x3f8 ) ;
2960+ assert_eq ! ( data. len( ) , 1 ) ;
2961+ assert_eq ! ( data[ 0 ] , b'5' ) ;
2962+ }
2963+ VcpuExit :: MmioRead ( addr, data) => {
2964+ assert_eq ! ( addr, 0x8000 ) ;
2965+ assert_eq ! ( data. len( ) , 1 ) ;
2966+ }
2967+ VcpuExit :: MmioWrite ( addr, data) => {
2968+ assert_eq ! ( addr, 0x8000 ) ;
2969+ assert_eq ! ( data. len( ) , 1 ) ;
2970+ assert_eq ! ( data[ 0 ] , 0 ) ;
2971+ }
2972+ VcpuExit :: Debug ( debug) => {
2973+ if instr_idx == expected_rips. len ( ) - 1 {
2974+ // Disabling debugging/single-stepping
2975+ debug_struct. control = 0 ;
2976+ vcpu_fd. set_guest_debug ( & debug_struct) . unwrap ( ) ;
2977+ } else if instr_idx >= expected_rips. len ( ) {
2978+ unreachable ! ( ) ;
2979+ }
2980+ let vcpu_regs = vcpu_fd. get_regs ( ) . unwrap ( ) ;
2981+ assert_eq ! ( vcpu_regs. rip, expected_rips[ instr_idx] ) ;
2982+ assert_eq ! ( debug. exception, 1 ) ;
2983+ assert_eq ! ( debug. pc, expected_rips[ instr_idx] ) ;
2984+ // Check first 15 bits of DR6
2985+ let mask = ( 1 << 16 ) - 1 ;
2986+ assert_eq ! ( debug. dr6 & mask, 0b100111111110000 ) ;
2987+ // Bit 10 in DR7 is always 1
2988+ assert_eq ! ( debug. dr7, 1 << 10 ) ;
2989+ instr_idx += 1 ;
2990+ }
2991+ VcpuExit :: Hlt => {
2992+ // The code snippet dirties 2 pages:
2993+ // * one when the code itself is loaded in memory;
2994+ // * and one more from the `movl` that writes to address 0x8000
2995+
2996+ let dirty_pages: u32 =
2997+ u32:: try_from ( vcpu_fd. dirty_log_ring_iter ( ) . unwrap ( ) . count ( ) ) . unwrap ( )
2998+ + if need_bitmap {
2999+ let dirty_pages_bitmap = vm. get_dirty_log ( slot, mem_size) . unwrap ( ) ;
3000+ dirty_pages_bitmap
3001+ . into_iter ( )
3002+ . map ( |page| page. count_ones ( ) )
3003+ . sum ( )
3004+ } else {
3005+ 0
3006+ } ;
3007+ assert_eq ! ( dirty_pages, 2 ) ;
3008+ break ;
3009+ }
3010+ r => panic ! ( "unexpected exit reason: {:?}" , r) ,
3011+ }
3012+ }
3013+ }
3014+
28773015 #[ test]
28783016 #[ cfg( target_arch = "aarch64" ) ]
28793017 fn test_get_preferred_target ( ) {
0 commit comments