On Fri, Nov 21, 2025 at 05:59:02PM +0100, Laurent Vivier wrote:
With the recent addition of multiqueue support to passt's vhost-user implementation, we need test coverage to validate the functionality. The test infrastructure previously only tested single queue configurations.
Add a VHOST_USER_MQ environment variable to control the number of queue pairs. When set to values greater than 1, the setup scripts pass --max-qpairs to passt and configure QEMU's vhost-user netdev with the corresponding queues= parameter.
The test suite now runs an additional set of tests with 8 queue pairs to exercise the multiqueue paths across all protocols (TCP, UDP, ICMP) and services (DHCP, NDP). Note that the guest kernel will only enable as many queues as there are vCPUs.
Signed-off-by: Laurent Vivier
--- test/lib/setup | 58 +++++++++++++++++++++++++++++++++++++++----------- test/run | 23 ++++++++++++++++++++ 2 files changed, 69 insertions(+), 12 deletions(-) diff --git a/test/lib/setup b/test/lib/setup index 5994598744a3..2af34d670473 100755 --- a/test/lib/setup +++ b/test/lib/setup @@ -18,6 +18,8 @@ VCPUS="$( [ $(nproc) -ge 8 ] && echo 6 || echo $(( $(nproc) / 2 + 1 )) )" MEM_KIB="$(sed -n 's/MemTotal:[ ]*\([0-9]*\) kB/\1/p' /proc/meminfo)" QEMU_ARCH="$(uname -m)" [ "${QEMU_ARCH}" = "i686" ] && QEMU_ARCH=i386 +VHOST_USER=0 +VHOST_USER_MQ=1
# setup_build() - Set up pane layout for build tests setup_build() { @@ -45,7 +47,8 @@ setup_passt() { [ ${PCAP} -eq 1 ] && __opts="${__opts} -p ${LOGDIR}/passt.pcap" [ ${DEBUG} -eq 1 ] && __opts="${__opts} -d" [ ${TRACE} -eq 1 ] && __opts="${__opts} --trace" - [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user" + [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user" && \ + [ ${VHOST_USER_MQ} -gt 1 ] && __opts="${__opts} --max-qpairs ${VHOST_USER_MQ}"
context_run passt "make clean" context_run passt "make valgrind" @@ -59,10 +62,18 @@ setup_passt() { __vmem="$(((${__vmem} + 500) / 1000))G" __qemu_netdev=" \ -chardev socket,id=c,path=${STATESETUP}/passt.socket \ - -netdev vhost-user,id=v,chardev=c \ - -device virtio-net,netdev=v \ -object memory-backend-memfd,id=m,share=on,size=${__vmem} \ -numa node,memdev=m" + + if [ ${VHOST_USER_MQ} -gt 1 ]; then + __qemu_netdev="${__qemu_netdev} \ + -device virtio-net,netdev=v,mq=true \ + -netdev vhost-user,id=v,chardev=c,queues=${VHOST_USER_MQ}" + else + __qemu_netdev="${__qemu_netdev} \ + -device virtio-net,netdev=v \ + -netdev vhost-user,id=v,chardev=c"
Is there a diffence for qemu between omitting queues= and using queues=1? If not we can simplify this. For the passt option it's worth explicitly not-setting it for the single-queue case, so that we're exercising the command line option as well. But exercising qemu's options is not our concern, so we can use queues=1 if it means the same thing as omitting it entirely. Otherwise LGTM.
+ fi else __qemu_netdev="-device virtio-net-pci,netdev=s \ -netdev stream,id=s,server=off,addr.type=unix,addr.path=${STATESETUP}/passt.socket" @@ -155,7 +166,8 @@ setup_passt_in_ns() { [ ${PCAP} -eq 1 ] && __opts="${__opts} -p ${LOGDIR}/passt_in_pasta.pcap" [ ${DEBUG} -eq 1 ] && __opts="${__opts} -d" [ ${TRACE} -eq 1 ] && __opts="${__opts} --trace" - [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user" + [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user" && \ + [ ${VHOST_USER_MQ} -gt 1 ] && __opts="${__opts} --max-qpairs ${VHOST_USER_MQ}"
if [ ${VALGRIND} -eq 1 ]; then context_run passt "make clean" @@ -173,10 +185,18 @@ setup_passt_in_ns() { __vmem="$(((${__vmem} + 500) / 1000))G" __qemu_netdev=" \ -chardev socket,id=c,path=${STATESETUP}/passt.socket \ - -netdev vhost-user,id=v,chardev=c \ - -device virtio-net,netdev=v \ -object memory-backend-memfd,id=m,share=on,size=${__vmem} \ -numa node,memdev=m" + + if [ ${VHOST_USER_MQ} -gt 1 ]; then + __qemu_netdev="${__qemu_netdev} \ + -device virtio-net,netdev=v,mq=true \ + -netdev vhost-user,id=v,chardev=c,queues=${VHOST_USER_MQ}" + else + __qemu_netdev="${__qemu_netdev} \ + -device virtio-net,netdev=v \ + -netdev vhost-user,id=v,chardev=c" + fi else __qemu_netdev="-device virtio-net-pci,netdev=s \ -netdev stream,id=s,server=off,addr.type=unix,addr.path=${STATESETUP}/passt.socket" @@ -241,7 +261,8 @@ setup_two_guests() { [ ${PCAP} -eq 1 ] && __opts="${__opts} -p ${LOGDIR}/passt_1.pcap" [ ${DEBUG} -eq 1 ] && __opts="${__opts} -d" [ ${TRACE} -eq 1 ] && __opts="${__opts} --trace" - [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user" + [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user" && \ + [ ${VHOST_USER_MQ} -gt 1 ] && __opts="${__opts} --max-qpairs ${VHOST_USER_MQ}"
context_run_bg passt_1 "./passt -s ${STATESETUP}/passt_1.socket -P ${STATESETUP}/passt_1.pid -f ${__opts} --fqdn fqdn1.passt.test -H hostname1 -t 10001 -u 10001" wait_for [ -f "${STATESETUP}/passt_1.pid" ] @@ -250,7 +271,8 @@ setup_two_guests() { [ ${PCAP} -eq 1 ] && __opts="${__opts} -p ${LOGDIR}/passt_2.pcap" [ ${DEBUG} -eq 1 ] && __opts="${__opts} -d" [ ${TRACE} -eq 1 ] && __opts="${__opts} --trace" - [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user" + [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user" && \ + [ ${VHOST_USER_MQ} -gt 1 ] && __opts="${__opts} --max-qpairs ${VHOST_USER_MQ}"
context_run_bg passt_2 "./passt -s ${STATESETUP}/passt_2.socket -P ${STATESETUP}/passt_2.pid -f ${__opts} --hostname hostname2 --fqdn fqdn2 -t 10004 -u 10004" wait_for [ -f "${STATESETUP}/passt_2.pid" ] @@ -260,16 +282,28 @@ setup_two_guests() { __vmem="$(((${__vmem} + 500) / 1000))G" __qemu_netdev1=" \ -chardev socket,id=c,path=${STATESETUP}/passt_1.socket \ - -netdev vhost-user,id=v,chardev=c \ - -device virtio-net,netdev=v \ -object memory-backend-memfd,id=m,share=on,size=${__vmem} \ -numa node,memdev=m" __qemu_netdev2=" \ -chardev socket,id=c,path=${STATESETUP}/passt_2.socket \ - -netdev vhost-user,id=v,chardev=c \ - -device virtio-net,netdev=v \ -object memory-backend-memfd,id=m,share=on,size=${__vmem} \ -numa node,memdev=m" + + if [ ${VHOST_USER_MQ} -gt 1 ]; then + __qemu_netdev1="${__qemu_netdev1} \ + -device virtio-net,netdev=v,mq=true \ + -netdev vhost-user,id=v,chardev=c,queues=${VHOST_USER_MQ}" + __qemu_netdev2="${__qemu_netdev2} \ + -device virtio-net,netdev=v,mq=true \ + -netdev vhost-user,id=v,chardev=c,queues=${VHOST_USER_MQ}" + else + __qemu_netdev1="${__qemu_netdev1} \ + -device virtio-net,netdev=v \ + -netdev vhost-user,id=v,chardev=c" + __qemu_netdev2="${__qemu_netdev2} \ + -device virtio-net,netdev=v \ + -netdev vhost-user,id=v,chardev=c" + fi else __qemu_netdev1="-device virtio-net-pci,netdev=s \ -netdev stream,id=s,server=off,addr.type=unix,addr.path=${STATESETUP}/passt_1.socket" diff --git a/test/run b/test/run index f858e5586847..652cc12b1234 100755 --- a/test/run +++ b/test/run @@ -190,6 +190,29 @@ run() { test passt_vu_in_ns/shutdown teardown passt_in_ns
+ VHOST_USER=1 + VHOST_USER_MQ=8 + setup passt_in_ns + test passt_vu/ndp + test passt_vu_in_ns/dhcp + test passt_vu_in_ns/icmp + test passt_vu_in_ns/tcp + test passt_vu_in_ns/udp + test passt_vu_in_ns/shutdown + teardown passt_in_ns + + setup two_guests + test two_guests_vu/basic + teardown two_guests + + setup passt_in_ns + test passt_vu/ndp + test passt_vu_in_ns/dhcp + test perf/passt_vu_tcp + test perf/passt_vu_udp + test passt_vu_in_ns/shutdown + teardown passt_in_ns + # TODO: Make those faster by at least pre-installing gcc and make on # non-x86 images, then re-enable. skip_distro() { -- 2.51.0
-- David Gibson (he or they) | I'll have my music baroque, and my code david AT gibson.dropbear.id.au | minimalist, thank you, not the other way | around. http://www.ozlabs.org/~dgibson