It only calls tcp_fill_headers4() and tcp_fill_headers6() according to the connection IP version. We can inline them in tcp_data_to_tap() that already has a switch on the IP version. In tcp_send_flag(), it will ease to separate code from the common part and the buffer/vhost-user parts. Signed-off-by: Laurent Vivier <lvivier(a)redhat.com> --- tcp.c | 54 +++++++++++++++++++----------------------------------- 1 file changed, 19 insertions(+), 35 deletions(-) diff --git a/tcp.c b/tcp.c index 21d0af061aec..e948014c611d 100644 --- a/tcp.c +++ b/tcp.c @@ -1405,37 +1405,6 @@ static size_t tcp_fill_headers6(const struct ctx *c, return l4len; } -/** - * tcp_l2_buf_fill_headers() - Fill 802.3, IP, TCP headers in pre-cooked buffers - * @c: Execution context - * @conn: Connection pointer - * @iov: Pointer to an array of iovec of TCP pre-cooked buffers - * @dlen: TCP payload length - * @check: Checksum, if already known - * @seq: Sequence number for this segment - * - * Return: IP payload length, host order - */ -static size_t tcp_l2_buf_fill_headers(const struct ctx *c, - const struct tcp_tap_conn *conn, - struct iovec *iov, size_t dlen, - const uint16_t *check, uint32_t seq) -{ - const struct in_addr *a4 = inany_v4(&conn->faddr); - - if (a4) { - return tcp_fill_headers4(c, conn, iov[TCP_IOV_TAP].iov_base, - iov[TCP_IOV_IP].iov_base, - iov[TCP_IOV_PAYLOAD].iov_base, dlen, - check, seq); - } - - return tcp_fill_headers6(c, conn, iov[TCP_IOV_TAP].iov_base, - iov[TCP_IOV_IP].iov_base, - iov[TCP_IOV_PAYLOAD].iov_base, dlen, - seq); -} - /** * tcp_update_seqack_wnd() - Update ACK sequence and window to guest/tap * @c: Execution context @@ -1646,8 +1615,17 @@ static int tcp_send_flag(struct ctx *c, struct tcp_tap_conn *conn, int flags) th->syn = !!(flags & SYN); th->fin = !!(flags & FIN); - l4len = tcp_l2_buf_fill_headers(c, conn, iov, optlen, NULL, - conn->seq_to_tap); + if (CONN_V4(conn)) { + l4len = tcp_fill_headers4(c, conn, iov[TCP_IOV_TAP].iov_base, + iov[TCP_IOV_IP].iov_base, + iov[TCP_IOV_PAYLOAD].iov_base, optlen, + NULL, conn->seq_to_tap); + } else { + l4len = tcp_fill_headers6(c, conn, iov[TCP_IOV_TAP].iov_base, + iov[TCP_IOV_IP].iov_base, + iov[TCP_IOV_PAYLOAD].iov_base, optlen, + conn->seq_to_tap); + } iov[TCP_IOV_PAYLOAD].iov_len = l4len; if (th->ack) { @@ -2146,7 +2124,10 @@ static void tcp_data_to_tap(const struct ctx *c, struct tcp_tap_conn *conn, tcp4_seq_update[tcp4_payload_used].len = dlen; iov = tcp4_l2_iov[tcp4_payload_used++]; - l4len = tcp_l2_buf_fill_headers(c, conn, iov, dlen, check, seq); + l4len = tcp_fill_headers4(c, conn, iov[TCP_IOV_TAP].iov_base, + iov[TCP_IOV_IP].iov_base, + iov[TCP_IOV_PAYLOAD].iov_base, dlen, + check, seq); iov[TCP_IOV_PAYLOAD].iov_len = l4len; if (tcp4_payload_used > TCP_FRAMES_MEM - 1) tcp_payload_flush(c); @@ -2155,7 +2136,10 @@ static void tcp_data_to_tap(const struct ctx *c, struct tcp_tap_conn *conn, tcp6_seq_update[tcp6_payload_used].len = dlen; iov = tcp6_l2_iov[tcp6_payload_used++]; - l4len = tcp_l2_buf_fill_headers(c, conn, iov, dlen, NULL, seq); + l4len = tcp_fill_headers6(c, conn, iov[TCP_IOV_TAP].iov_base, + iov[TCP_IOV_IP].iov_base, + iov[TCP_IOV_PAYLOAD].iov_base, dlen, + seq); iov[TCP_IOV_PAYLOAD].iov_len = l4len; if (tcp6_payload_used > TCP_FRAMES_MEM - 1) tcp_payload_flush(c); -- 2.44.0