On Fri, Apr 03, 2026 at 06:38:11PM +0200, Laurent Vivier wrote:
The previous per-protocol padding done by vu_pad() in tcp_vu.c and udp_vu.c was only correct for single-buffer frames: it assumed the padding area always fell within the first iov, writing past its end with a plain memset().
It also required each caller to compute MAX(..., ETH_ZLEN + VNET_HLEN) for vu_collect() and to call vu_pad() at the right point, duplicating the minimum-size logic across protocols.
Move the Ethernet minimum size enforcement into vu_collect() itself, so that enough buffer space is always reserved for padding regardless of the requested frame size.
Rewrite vu_pad() to take a full iovec array and use iov_memset(), making it safe for multi-buffer (mergeable rx buffer) frames.
In tcp_vu_sock_recv(), replace iov_truncate() with iov_skip_bytes(): now that all consumers receive explicit data lengths, truncating the iovecs is no longer needed. In tcp_vu_data_from_sock(), cap each frame's data length against the remaining bytes actually received from the socket, so that the last partial frame gets correct headers and sequence number advancement.
Signed-off-by: Laurent Vivier
--- iov.c | 1 - tcp_vu.c | 34 ++++++++++++++++++---------------- udp_vu.c | 14 ++++++++------ vu_common.c | 31 +++++++++++++++---------------- vu_common.h | 2 +- 5 files changed, 42 insertions(+), 40 deletions(-) diff --git a/iov.c b/iov.c index dabc4f1ceea3..28c6d40d2986 100644 --- a/iov.c +++ b/iov.c @@ -180,7 +180,6 @@ size_t iov_truncate(struct iovec *iov, size_t iov_cnt, size_t size) * Will write less than @length bytes if it runs out of space in * the iov */ -/* cppcheck-suppress unusedFunction */ void iov_memset(const struct iovec *iov, size_t iov_cnt, size_t offset, int c, size_t length) { diff --git a/tcp_vu.c b/tcp_vu.c index 8c1894dca7fe..2dfe14485eee 100644 --- a/tcp_vu.c +++ b/tcp_vu.c @@ -88,7 +88,7 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1, &flags_iov[0], 1, NULL, - MAX(hdrlen + sizeof(*opts), ETH_ZLEN + VNET_HLEN), NULL); + hdrlen + sizeof(*opts), NULL); if (elem_cnt != 1) return -1;
@@ -128,8 +128,6 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags) return ret; }
- l2len = hdrlen + optlen - VNET_HLEN; - iov_truncate(&flags_iov[0], 1, l2len + VNET_HLEN); payload = IOV_TAIL(flags_elem[0].in_sg, 1, hdrlen);
if (flags & KEEPALIVE) @@ -138,17 +136,17 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags) tcp_fill_headers(c, conn, eh, ip4h, ip6h, th, &payload, optlen, NULL, seq, !*c->pcap);
- vu_pad(&flags_elem[0].in_sg[0], l2len); - + vu_pad(flags_elem[0].in_sg, 1, hdrlen + optlen);
Is there a reason not to fold vu_pad() into vu_flush()?
vu_flush(vdev, vq, flags_elem, 1, hdrlen + optlen);
+ l2len = hdrlen + optlen - VNET_HLEN; if (*c->pcap) pcap_iov(&flags_elem[0].in_sg[0], 1, VNET_HLEN, l2len);
if (flags & DUP_ACK) { elem_cnt = vu_collect(vdev, vq, &flags_elem[1], 1, &flags_iov[1], 1, NULL, - flags_elem[0].in_sg[0].iov_len, NULL); + hdrlen + optlen, NULL); if (elem_cnt == 1 && flags_elem[1].in_sg[0].iov_len >= flags_elem[0].in_sg[0].iov_len) { @@ -213,7 +211,7 @@ static ssize_t tcp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, ARRAY_SIZE(elem) - elem_cnt, &iov_vu[DISCARD_IOV_NUM + iov_used], VIRTQUEUE_MAX_SIZE - iov_used, &in_total, - MAX(MIN(mss, fillsize) + hdrlen, ETH_ZLEN + VNET_HLEN), + MIN(mss, fillsize) + hdrlen, &frame_size); if (cnt == 0) break; @@ -249,8 +247,11 @@ static ssize_t tcp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, if (!peek_offset_cap) ret -= already_sent;
- /* adjust iov number and length of the last iov */ - i = iov_truncate(&iov_vu[DISCARD_IOV_NUM], iov_used, ret); + i = iov_skip_bytes(&iov_vu[DISCARD_IOV_NUM], iov_used, + MAX(hdrlen + ret, VNET_HLEN + ETH_ZLEN), + NULL); + if ((size_t)i < iov_used) + i++;
/* adjust head count */ while (*head_cnt > 0 && head[*head_cnt - 1] >= i) @@ -447,11 +448,13 @@ int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn) size_t frame_size = iov_size(iov, buf_cnt); bool push = i == head_cnt - 1; ssize_t dlen; - size_t l2len;
assert(frame_size >= hdrlen);
dlen = frame_size - hdrlen; + if (dlen > len) + dlen = len; + len -= dlen;
/* The IPv4 header checksum varies only with dlen */ if (previous_dlen != dlen) @@ -460,14 +463,13 @@ int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
tcp_vu_prepare(c, conn, iov, buf_cnt, dlen, &check, !*c->pcap, push);
- /* Pad first/single buffer only, it's at least ETH_ZLEN long */ - l2len = dlen + hdrlen - VNET_HLEN; - vu_pad(iov, l2len); - + vu_pad(elem[head[i]].in_sg, buf_cnt, dlen + hdrlen); vu_flush(vdev, vq, &elem[head[i]], buf_cnt, dlen + hdrlen);
- if (*c->pcap) - pcap_iov(iov, buf_cnt, VNET_HLEN, l2len); + if (*c->pcap) { + pcap_iov(iov, buf_cnt, VNET_HLEN, + dlen + hdrlen - VNET_HLEN); + }
conn->seq_to_tap += dlen; } diff --git a/udp_vu.c b/udp_vu.c index 4641f42eb5c4..30af64034516 100644 --- a/udp_vu.c +++ b/udp_vu.c @@ -65,7 +65,7 @@ static size_t udp_vu_hdrlen(bool v6) static ssize_t udp_vu_sock_recv(struct iovec *iov, size_t *cnt, int s, bool v6) { struct msghdr msg = { 0 }; - size_t hdrlen, l2len; + size_t hdrlen, iov_used; ssize_t dlen;
/* compute L2 header length */ @@ -88,11 +88,12 @@ static ssize_t udp_vu_sock_recv(struct iovec *iov, size_t *cnt, int s, bool v6) iov[0].iov_base = (char *)iov[0].iov_base - hdrlen; iov[0].iov_len += hdrlen;
- *cnt = iov_truncate(iov, *cnt, dlen + hdrlen); - - /* pad frame to 60 bytes: first buffer is at least ETH_ZLEN long */ - l2len = dlen + hdrlen - VNET_HLEN; - vu_pad(&iov[0], l2len); + iov_used = iov_skip_bytes(iov, *cnt, + MAX(dlen + hdrlen, VNET_HLEN + ETH_ZLEN), + NULL); + if (iov_used < *cnt) + iov_used++; + *cnt = iov_used; /* one iovec per element */
return dlen; } @@ -234,6 +235,7 @@ void udp_vu_sock_to_tap(const struct ctx *c, int s, int n, flow_sidx_t tosidx) pcap_iov(iov_vu, iov_cnt, VNET_HLEN, hdrlen + dlen - VNET_HLEN); } + vu_pad(iov_vu, iov_cnt, hdrlen + dlen); vu_flush(vdev, vq, elem, elem_used, hdrlen + dlen); vu_queue_notify(vdev, vq); } diff --git a/vu_common.c b/vu_common.c index 704e908aa02c..d07f584f228a 100644 --- a/vu_common.c +++ b/vu_common.c @@ -74,6 +74,7 @@ int vu_collect(const struct vu_dev *vdev, struct vu_virtq *vq, size_t current_iov = 0; int elem_cnt = 0;
+ size = MAX(size, ETH_ZLEN /* Ethernet minimum size */ + VNET_HLEN); while (current_size < size && elem_cnt < max_elem && current_iov < max_in_sg) { int ret; @@ -261,29 +262,27 @@ int vu_send_single(const struct ctx *c, const void *buf, size_t size) return -1; }
- size += VNET_HLEN; elem_cnt = vu_collect(vdev, vq, elem, ARRAY_SIZE(elem), in_sg, - ARRAY_SIZE(in_sg), &in_total, size, &total); - if (elem_cnt == 0 || total < size) { + ARRAY_SIZE(in_sg), &in_total, VNET_HLEN + size, &total); + if (elem_cnt == 0 || total < VNET_HLEN + size) { debug("vu_send_single: no space to send the data " "elem_cnt %d size %zu", elem_cnt, total); goto err; }
- total -= VNET_HLEN; - /* copy data from the buffer to the iovec */ - iov_from_buf(in_sg, in_total, VNET_HLEN, buf, total); + iov_from_buf(in_sg, in_total, VNET_HLEN, buf, size);
if (*c->pcap) pcap_iov(in_sg, in_total, VNET_HLEN, size);
+ vu_pad(in_sg, in_total, VNET_HLEN + size); vu_flush(vdev, vq, elem, elem_cnt, VNET_HLEN + size); vu_queue_notify(vdev, vq);
- trace("vhost-user sent %zu", total); + trace("vhost-user sent %zu", size);
- return total; + return size; err: for (i = 0; i < elem_cnt; i++) vu_queue_detach_element(vq); @@ -292,15 +291,15 @@ err: }
/** - * vu_pad() - Pad 802.3 frame to minimum length (60 bytes) if needed - * @iov: Buffer in iovec array where end of 802.3 frame is stored - * @l2len: Layer-2 length already filled in frame + * vu_pad() - Pad short frames to minimum Ethernet length and truncate iovec + * @iov: Pointer to iovec array + * @cnt: Number of entries in @iov + * @frame_len: Data length in @iov (including virtio-net header) */ -void vu_pad(struct iovec *iov, size_t l2len) +void vu_pad(const struct iovec *iov, size_t cnt, size_t frame_len) { - if (l2len >= ETH_ZLEN) - return; + size_t min_frame_len = ETH_ZLEN + VNET_HLEN;
- memset((char *)iov->iov_base + iov->iov_len, 0, ETH_ZLEN - l2len); - iov->iov_len += ETH_ZLEN - l2len; + if (frame_len < min_frame_len) + iov_memset(iov, cnt, frame_len, 0, min_frame_len - frame_len); } diff --git a/vu_common.h b/vu_common.h index 77d1849e6115..51f70084a7cb 100644 --- a/vu_common.h +++ b/vu_common.h @@ -44,6 +44,6 @@ void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq, void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref, const struct timespec *now); int vu_send_single(const struct ctx *c, const void *buf, size_t size); -void vu_pad(struct iovec *iov, size_t l2len); +void vu_pad(const struct iovec *iov, size_t cnt, size_t frame_len);
#endif /* VU_COMMON_H */ -- 2.53.0
-- David Gibson (he or they) | I'll have my music baroque, and my code david AT gibson.dropbear.id.au | minimalist, thank you, not the other way | around. http://www.ozlabs.org/~dgibson