Commit 7f5ffaf5 authored by Asias He's avatar Asias He Committed by Will Deacon
Browse files

kvm tools: Respect ISR status in virtio header



Inject IRQ to guest only when ISR status is low which means
guest has read ISR status and device has cleared this bit as
the side effect of this reading.

This reduces a lot of unnecessary IRQ inject from device to
guest.

Netpef test shows this patch changes:

the host to guest bandwidth
from 2866.27 Mbps (cpu 33.96%) to 5548.87 Mbps (cpu 53.87%),

the guest to host bandwitdth
form 1408.86 Mbps (cpu 99.9%) to 1301.29 Mbps (cpu 99.9%).

The bottleneck of the guest to host bandwidth is guest cpu power.

Signed-off-by: default avatarAsias He <asias.hejun@gmail.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 31638bca
......@@ -8,6 +8,9 @@
#include "kvm/kvm.h"
#define VIRTIO_IRQ_LOW 0
#define VIRTIO_IRQ_HIGH 1
struct virt_queue {
struct vring vring;
u32 pfn;
......@@ -37,4 +40,6 @@ struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32
u16 virt_queue__get_iov(struct virt_queue *queue, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm);
void virt_queue__trigger_irq(struct virt_queue *vq, int irq, u8 *isr, struct kvm *kvm);
#endif /* KVM__VIRTIO_H */
......@@ -57,3 +57,11 @@ u16 virt_queue__get_iov(struct virt_queue *queue, struct iovec iov[], u16 *out,
return head;
}
void virt_queue__trigger_irq(struct virt_queue *vq, int irq, u8 *isr, struct kvm *kvm)
{
if (*isr == VIRTIO_IRQ_LOW) {
*isr = VIRTIO_IRQ_HIGH;
kvm__irq_line(kvm, irq, VIRTIO_IRQ_HIGH);
}
}
......@@ -35,6 +35,7 @@ struct net_device {
u32 guest_features;
u16 config_vector;
u8 status;
u8 isr;
u16 queue_selector;
pthread_t io_rx_thread;
......@@ -88,8 +89,9 @@ static void *virtio_net_rx_thread(void *p)
head = virt_queue__get_iov(vq, iov, &out, &in, self);
len = readv(net_device.tap_fd, iov, in);
virt_queue__set_used_elem(vq, head, len);
/* We should interrupt guest right now, otherwise latency is huge. */
kvm__irq_line(self, VIRTIO_NET_IRQ, 1);
virt_queue__trigger_irq(vq, VIRTIO_NET_IRQ, &net_device.isr, self);
}
}
......@@ -123,7 +125,8 @@ static void *virtio_net_tx_thread(void *p)
virt_queue__set_used_elem(vq, head, len);
}
kvm__irq_line(self, VIRTIO_NET_IRQ, 1);
virt_queue__trigger_irq(vq, VIRTIO_NET_IRQ, &net_device.isr, self);
}
pthread_exit(NULL);
......@@ -175,8 +178,9 @@ static bool virtio_net_pci_io_in(struct kvm *self, u16 port, void *data, int siz
ioport__write8(data, net_device.status);
break;
case VIRTIO_PCI_ISR:
ioport__write8(data, 0x1);
kvm__irq_line(self, VIRTIO_NET_IRQ, 0);
ioport__write8(data, net_device.isr);
kvm__irq_line(self, VIRTIO_NET_IRQ, VIRTIO_IRQ_LOW);
net_device.isr = VIRTIO_IRQ_LOW;
break;
case VIRTIO_MSI_CONFIG_VECTOR:
ioport__write16(data, net_device.config_vector);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment