Commit 3fea89a9 authored by Will Deacon's avatar Will Deacon
Browse files

kvmtool: virtio-net: fix VIRTIO_NET_F_MRG_RXBUF usage in rx thread



When merging virtio-net buffers using the VIRTIO_NET_F_MRG_RXBUF feature,
the first buffer added to the used ring should indicate the total number
of buffers used to hold the packet. Unfortunately, kvmtool has a number
of issues when constructing these merged buffers:

  - Commit 5131332e3f1a ("kvmtool: convert net backend to support
    bi-endianness") introduced a strange loop counter, which resulted in
    hdr->num_buffers being set redundantly the first time round

  - When adding the buffers to the ring, we actually add them one-by-one,
    allowing the guest to see the header before we've inserted the rest
    of the data buffers...

  - ... which is made worse because we non-atomically increment the
    num_buffers count in the header each time we insert a new data buffer

Consequently, the guest quickly becomes confused in its net rx code and
the whole thing grinds to a halt. This is easily exemplified by trying
to boot a root filesystem over NFS, which seldom succeeds.

This patch resolves the issues by allowing us to insert items into the
used ring without updating the index. Once the full payload has been
added and num_buffers corresponds to the total size, we *then* publish
the buffers to the guest.

Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 9a8af7e3
......@@ -112,6 +112,8 @@ static inline bool virt_queue__available(struct virt_queue *vq)
return virtio_guest_to_host_u16(vq, vq->vring.avail->idx) != vq->last_avail_idx;
}
void virt_queue__used_idx_advance(struct virt_queue *queue, u16 jump);
struct vring_used_elem * virt_queue__set_used_elem_no_update(struct virt_queue *queue, u32 head, u32 len, u16 offset);
struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len);
bool virtio_queue__should_signal(struct virt_queue *vq);
......
......@@ -21,22 +21,17 @@ const char* virtio_trans_name(enum virtio_trans trans)
return "unknown";
}
struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len)
void virt_queue__used_idx_advance(struct virt_queue *queue, u16 jump)
{
struct vring_used_elem *used_elem;
u16 idx = virtio_guest_to_host_u16(queue, queue->vring.used->idx);
used_elem = &queue->vring.used->ring[idx % queue->vring.num];
used_elem->id = virtio_host_to_guest_u32(queue, head);
used_elem->len = virtio_host_to_guest_u32(queue, len);
/*
* Use wmb to assure that used elem was updated with head and len.
* We need a wmb here since we can't advance idx unless we're ready
* to pass the used element to the guest.
*/
wmb();
idx++;
idx += jump;
queue->vring.used->idx = virtio_host_to_guest_u16(queue, idx);
/*
......@@ -45,6 +40,29 @@ struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32
* an updated idx.
*/
wmb();
}
struct vring_used_elem *
virt_queue__set_used_elem_no_update(struct virt_queue *queue, u32 head,
u32 len, u16 offset)
{
struct vring_used_elem *used_elem;
u16 idx = virtio_guest_to_host_u16(queue, queue->vring.used->idx);
idx += offset;
used_elem = &queue->vring.used->ring[idx % queue->vring.num];
used_elem->id = virtio_host_to_guest_u32(queue, head);
used_elem->len = virtio_host_to_guest_u32(queue, len);
return used_elem;
}
struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len)
{
struct vring_used_elem *used_elem;
used_elem = virt_queue__set_used_elem_no_update(queue, head, len, 0);
virt_queue__used_idx_advance(queue, 1);
return used_elem;
}
......
......@@ -80,14 +80,12 @@ static void virtio_net_fix_tx_hdr(struct virtio_net_hdr *hdr, struct net_dev *nd
hdr->csum_offset = virtio_guest_to_host_u16(&ndev->vdev, hdr->csum_offset);
}
static void virtio_net_fix_rx_hdr(struct virtio_net_hdr_mrg_rxbuf *hdr, struct net_dev *ndev)
static void virtio_net_fix_rx_hdr(struct virtio_net_hdr *hdr, struct net_dev *ndev)
{
hdr->hdr.hdr_len = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr.hdr_len);
hdr->hdr.gso_size = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr.gso_size);
hdr->hdr.csum_start = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr.csum_start);
hdr->hdr.csum_offset = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr.csum_offset);
if (has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF))
hdr->num_buffers = virtio_host_to_guest_u16(&ndev->vdev, hdr->num_buffers);
hdr->hdr_len = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr_len);
hdr->gso_size = virtio_host_to_guest_u16(&ndev->vdev, hdr->gso_size);
hdr->csum_start = virtio_host_to_guest_u16(&ndev->vdev, hdr->csum_start);
hdr->csum_offset = virtio_host_to_guest_u16(&ndev->vdev, hdr->csum_offset);
}
static void *virtio_net_rx_thread(void *p)
......@@ -123,7 +121,7 @@ static void *virtio_net_rx_thread(void *p)
.iov_len = sizeof(buffer),
};
struct virtio_net_hdr_mrg_rxbuf *hdr;
int i;
u16 num_buffers;
len = ndev->ops->rx(&dummy_iov, 1, ndev);
if (len < 0) {
......@@ -132,7 +130,7 @@ static void *virtio_net_rx_thread(void *p)
goto out_err;
}
copied = i = 0;
copied = num_buffers = 0;
head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
hdr = iov[0].iov_base;
while (copied < len) {
......@@ -140,19 +138,20 @@ static void *virtio_net_rx_thread(void *p)
memcpy_toiovec(iov, buffer + copied, iovsize);
copied += iovsize;
if (i++ == 0)
virtio_net_fix_rx_hdr(hdr, ndev);
if (has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF)) {
u16 num_buffers = virtio_guest_to_host_u16(vq, hdr->num_buffers);
hdr->num_buffers = virtio_host_to_guest_u16(vq, num_buffers + 1);
}
virt_queue__set_used_elem(vq, head, iovsize);
virt_queue__set_used_elem_no_update(vq, head, iovsize, num_buffers++);
if (copied == len)
break;
while (!virt_queue__available(vq))
sleep(0);
head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
}
virtio_net_fix_rx_hdr(&hdr->hdr, ndev);
if (has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF))
hdr->num_buffers = virtio_host_to_guest_u16(vq, num_buffers);
virt_queue__used_idx_advance(vq, num_buffers);
/* We should interrupt guest right now, otherwise latency is huge. */
if (virtio_queue__should_signal(vq))
ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, id);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment