-
Notifications
You must be signed in to change notification settings - Fork 264
QEMU issues #1858
Copy link
Copy link
Open
Description
I encountered 2 issues with qemu while using drakvuf (built from source, using xen 4.20.1 and drakvuf-sandbox):
-
using qemu v9 (version included with xen since 4.20)
topshowed >90% cpu usage by qemu-system-i386 process andperf top>90% cpu time in the functionxen_invalidate_map_cache_single
this seems to be fixed in qemu v10 -
using v8 (qemu-xen 4.19.1) or v10.2.0 (upstream) qemu sometimes entered a deadlock. This may be hard to reproduce but if someone also encounters this issue applying these patches helped on my system:
https://lore.kernel.org/qemu-devel/d69ad42912db00f9520a7205c4d2bae6efca5279.1687430874.git.lukasstraub2@web.de/
some information from trying to debug the deadlock:
pwndbg> bt
#0 0x00007f0447b6a356 in __ppoll (fds=0x55c48a01ad30, nfds=1, timeout=<optimized out>, timeout@entry=0x7ffecdff6bc0, sigmask=sigmask@entry=0x0) at ../sysdeps/unix/sysv/linux/ppoll.c:42
#1 0x000055c44e33db6e in ppoll (__ss=0x0, __timeout=0x7ffecdff6bc0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/x86_64-linux-gnu/bits/poll2.h:64
#2 qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=timeout@entry=579068529131) at ../qemu-xen-dir-remote/util/qemu-timer.c:351
#3 0x000055c44e325c99 in fdmon_poll_wait (ctx=0x55c489db5820, ready_list=0x7ffecdff6c68, timeout=579068529131) at ../qemu-xen-dir-remote/util/fdmon-poll.c:80
#4 0x000055c44e32516d in aio_poll (ctx=0x55c489db5820, blocking=blocking@entry=true) at ../qemu-xen-dir-remote/util/aio-posix.c:680
#5 0x000055c44e224ee4 in bdrv_drain_all_begin () at ../qemu-xen-dir-remote/block/io.c:527
#6 bdrv_drain_all_begin () at ../qemu-xen-dir-remote/block/io.c:506
#7 0x000055c44e225232 in bdrv_drain_all () at ../qemu-xen-dir-remote/block/io.c:576
#8 0x000055c44e061460 in xen_invalidate_map_cache () at ../qemu-xen-dir-remote/hw/i386/xen/xen-mapcache.c:497
#9 0x000055c44e05e4dd in handle_ioreq (state=state@entry=0x55c489db6db0, req=req@entry=0x7ffecdff6dc0) at ../qemu-xen-dir-remote/hw/i386/xen/xen-hvm.c:1064
#10 0x000055c44e060253 in cpu_handle_ioreq (opaque=0x55c489db6db0) at ../qemu-xen-dir-remote/hw/i386/xen/xen-hvm.c:1165
#11 0x000055c44e324888 in aio_dispatch_handler (ctx=ctx@entry=0x55c489dad710, node=0x55c48aca66e0) at ../qemu-xen-dir-remote/util/aio-posix.c:380
#12 0x000055c44e325032 in aio_dispatch_handlers (ctx=0x55c489dad710) at ../qemu-xen-dir-remote/util/aio-posix.c:423
#13 aio_dispatch (ctx=0x55c489dad710) at ../qemu-xen-dir-remote/util/aio-posix.c:433
#14 0x000055c44e339b4e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at ../qemu-xen-dir-remote/util/async.c:358
#15 0x00007f0447f3c709 in g_main_context_dispatch () from /lib/x86_64-linux-gnu/libglib-2.0.so.0
#16 0x000055c44e33b508 in glib_pollfds_poll () at ../qemu-xen-dir-remote/util/main-loop.c:290
#17 os_host_main_loop_wait (timeout=0) at ../qemu-xen-dir-remote/util/main-loop.c:313
#18 main_loop_wait (nonblocking=nonblocking@entry=0) at ../qemu-xen-dir-remote/util/main-loop.c:592
#19 0x000055c44dfbd857 in qemu_main_loop () at ../qemu-xen-dir-remote/softmmu/runstate.c:731
#20 0x000055c44dde4716 in qemu_default_main () at ../qemu-xen-dir-remote/softmmu/main.c:37
#21 0x00007f0447a9524a in __libc_start_call_main (main=main@entry=0x55c44dddfb70 <main>, argc=argc@entry=47, argv=argv@entry=0x7ffecdff7128) at ../sysdeps/nptl/libc_start_call_main.h:58
#22 0x00007f0447a95305 in __libc_start_main_impl (main=0x55c44dddfb70 <main>, argc=47, argv=0x7ffecdff7128, init=<optimized out>, fini=<optimized out>, rtld_fini=<optimized out>, stack_end=0x7ffecdff7118)
at ../csu/libc-start.c:360
#23 0x000055c44dde4641 in _start ()
pwndbg> p *all_bdrv_states.tqh_first->parents.lh_first->klass
$14 = {
stay_at_node = false,
parent_is_bds = false,
inherit_options = 0x55a6e0dfb8bb <blk_root_inherit_options>,
change_media = 0x55a6e1234ff0 <blk_root_change_media>,
get_parent_desc = 0x55a6e1234cf0 <blk_root_get_parent_desc>,
activate = 0x55a6e12349a0 <blk_root_activate>,
inactivate = 0x55a6e1233910 <blk_root_inactivate>,
attach = 0x55a6e1233800 <blk_root_attach>,
detach = 0x55a6e12336f0 <blk_root_detach>,
update_filename = 0x0,
change_aio_ctx = 0x55a6e1233b40 <blk_root_change_aio_ctx>,
resize = 0x55a6e12335c0 <blk_root_resize>,
get_name = 0x55a6e12335a0 <blk_root_get_name>,
get_parent_aio_context = 0x55a6e1233df0 <blk_root_get_parent_aio_context>,
drained_begin = 0x55a6e1233690 <blk_root_drained_begin>,
drained_end = 0x55a6e12335f0 <blk_root_drained_end>,
drained_poll = 0x55a6e1233aa0 <blk_root_drained_poll>
}
pwndbg> p *(BlockBackend*)all_bdrv_states.tqh_first->parents.lh_first->opaque
$18 = {
name = 0x55a70a456c90 "ide0-hd0",
refcnt = 2,
root = 0x55a70a4575c0,
ctx = 0x55a70a1f2820,
legacy_dinfo = 0x55a70a448ad0,
link = {
tqe_next = 0x55a70a47bbb0,
tqe_circ = {
tql_next = 0x55a70a47bbb0,
tql_prev = 0x55a6e1bb9900 <block_backends>
}
},
monitor_link = {
tqe_next = 0x55a70a47bbb0,
tqe_circ = {
tql_next = 0x55a70a47bbb0,
tql_prev = 0x55a6e1bb98f0 <monitor_block_backends>
}
},
public = {
throttle_group_member = {
aio_context = 0x0,
throttled_reqs_lock = {
locked = 0,
ctx = 0x0,
from_push = {
slh_first = 0x0
},
to_pop = {
slh_first = 0x0
},
handoff = 0,
sequence = 0,
holder = 0x0
},
throttled_reqs = {{
entries = {
sqh_first = 0x0,
sqh_last = 0x0
}
throttled_reqs = {{
entries = {
sqh_first = 0x0,
sqh_last = 0x0
}
}, {
entries = {
sqh_first = 0x0,
sqh_last = 0x0
}
}},
io_limits_disabled = 1,
restart_pending = 0,
throttle_state = 0x0,
throttle_timers = {
timers = {0x0, 0x0},
clock_type = QEMU_CLOCK_REALTIME,
read_timer_cb = 0x0,
write_timer_cb = 0x0,
timer_opaque = 0x0
},
pending_reqs = {0, 0},
round_robin = {
le_next = 0x0,
le_prev = 0x0
}
}
},
dev = 0x55a70a3542c0,
dev_ops = 0x55a6e16b6100 <ide_hd_block_ops>,
dev_opaque = 0x55a70ab85bd8,
root_state = {
open_flags = 0,
detect_zeroes = BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
},
enable_write_cache = true,
stats = {
lock = {
lock = {
__data = {
__lock = 0,
__count = 0,
__owner = 0,
__nusers = 0,
__kind = 0,
__spins = 0,
__elision = 0,
__list = {
__prev = 0x0,
__next = 0x0
}
},
__size = '\000' <repeats 39 times>,
__align = 0
},
initialized = true
},
nr_bytes = {0, 157676544, 25410048, 0, 9474048},
nr_ops = {0, 5770, 1538, 144, 93},
invalid_ops = {0, 0, 0, 0, 0},
failed_ops = {0, 0, 0, 0, 0},
total_time_ns = {0, 2838578373, 1144327452, 604792065, 662217},
merged = {0, 0, 0, 0, 0},
last_access_time_ns = 130702766168680,
intervals = {
slh_first = 0x0
},
account_invalid = true,
account_failed = true,
latency_histogram = {{
nbins = 0,
boundaries = 0x0,
bins = 0x0
}, {
nbins = 0,
boundaries = 0x0,
bins = 0x0
}, {
nbins = 0,
boundaries = 0x0,
bins = 0x0
}, {
nbins = 0,
boundaries = 0x0,
bins = 0x0
}, {
nbins = 0,
boundaries = 0x0,
bins = 0x0
}}
},
on_read_error = BLOCKDEV_ON_ERROR_REPORT,
on_write_error = BLOCKDEV_ON_ERROR_ENOSPC,
iostatus_enabled = true,
iostatus = BLOCK_DEVICE_IO_STATUS_OK,
perm = 3,
shared_perm = 13,
disable_perm = false,
allow_aio_context_change = false,
allow_write_beyond_eof = false,
remove_bs_notifiers = {
notifiers = {
lh_first = 0x0
}
},
insert_bs_notifiers = {
notifiers = {
lh_first = 0x0
}
},
aio_notifiers = {
lh_first = 0x0
},
quiesce_counter = 1,
queued_requests = {
entries = {
sqh_first = 0x55a70b0e3320,
sqh_last = 0x55a70b0e3358
}
},
disable_request_queuing = false,
vmsh = 0x0,
force_allow_inactivate = false,
in_flight = 1
}
pwndbg> p *(BlockBackendAIOCB*)0x55a70b0e3320
$19 = {
common = {
aiocb_info = 0x55a6e1236600 <blk_aio_pdiscard_entry>,
bs = 0x55a70a243000,
cb = 0x0,
opaque = 0x0,
refcnt = 0
},
blk = 0x55a70a1f2820,
ret = 0
}
Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
No labels