Commit ad346c2e authored by Jean-Philippe Brucker's avatar Jean-Philippe Brucker Committed by Will Deacon
Browse files

virtio: Add exit_vq() callback



Virtio allows to reset individual virtqueues. For legacy devices, it's
done by writing an address of 0 into the PFN register. Modern devices have
an "enable" register. Add an exit_vq() callback to all devices. A lot more
work is required by each device to clean up their virtqueue state, and by
the core to reset things like MSI routes and ioeventfds.
Signed-off-by: default avatarJean-Philippe Brucker <jean-philippe.brucker@arm.com>
Signed-off-by: default avatarJulien Thierry <julien.thierry@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 53fbb17b
......@@ -51,6 +51,7 @@ struct virt_queue {
u16 last_used_signalled;
u16 endian;
bool use_event_idx;
bool enabled;
};
/*
......@@ -187,6 +188,7 @@ struct virtio_ops {
int (*get_vq_count)(struct kvm *kvm, void *dev);
int (*init_vq)(struct kvm *kvm, void *dev, u32 vq, u32 page_size,
u32 align, u32 pfn);
void (*exit_vq)(struct kvm *kvm, void *dev, u32 vq);
int (*notify_vq)(struct kvm *kvm, void *dev, u32 vq);
struct virt_queue *(*get_vq)(struct kvm *kvm, void *dev, u32 vq);
int (*get_size_vq)(struct kvm *kvm, void *dev, u32 vq);
......@@ -217,8 +219,11 @@ static inline void virtio_init_device_vq(struct virtio_device *vdev,
{
vq->endian = vdev->endian;
vq->use_event_idx = (vdev->features & VIRTIO_RING_F_EVENT_IDX);
vq->enabled = true;
}
void virtio_exit_vq(struct kvm *kvm, struct virtio_device *vdev, void *dev,
int num);
void virtio_set_guest_features(struct kvm *kvm, struct virtio_device *vdev,
void *dev, u32 features);
void virtio_notify_status(struct kvm *kvm, struct virtio_device *vdev,
......
......@@ -166,6 +166,16 @@ u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue,
return head;
}
void virtio_exit_vq(struct kvm *kvm, struct virtio_device *vdev,
void *dev, int num)
{
struct virt_queue *vq = vdev->ops->get_vq(kvm, dev, num);
if (vq->enabled && vdev->ops->exit_vq)
vdev->ops->exit_vq(kvm, dev, num);
memset(vq, 0, sizeof(*vq));
}
int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off)
{
if (msix) {
......
......@@ -79,6 +79,15 @@ int virtio_mmio_signal_vq(struct kvm *kvm, struct virtio_device *vdev, u32 vq)
return 0;
}
static void virtio_mmio_exit_vq(struct kvm *kvm, struct virtio_device *vdev,
int vq)
{
struct virtio_mmio *vmmio = vdev->virtio;
ioeventfd__del_event(vmmio->addr + VIRTIO_MMIO_QUEUE_NOTIFY, vq);
virtio_exit_vq(kvm, vdev, vmmio->dev, vq);
}
int virtio_mmio_signal_config(struct kvm *kvm, struct virtio_device *vdev)
{
struct virtio_mmio *vmmio = vdev->virtio;
......@@ -188,12 +197,17 @@ static void virtio_mmio_config_out(struct kvm_cpu *vcpu,
break;
case VIRTIO_MMIO_QUEUE_PFN:
val = ioport__read32(data);
virtio_mmio_init_ioeventfd(vmmio->kvm, vdev, vmmio->hdr.queue_sel);
vdev->ops->init_vq(vmmio->kvm, vmmio->dev,
vmmio->hdr.queue_sel,
vmmio->hdr.guest_page_size,
vmmio->hdr.queue_align,
val);
if (val) {
virtio_mmio_init_ioeventfd(vmmio->kvm, vdev,
vmmio->hdr.queue_sel);
vdev->ops->init_vq(vmmio->kvm, vmmio->dev,
vmmio->hdr.queue_sel,
vmmio->hdr.guest_page_size,
vmmio->hdr.queue_align,
val);
} else {
virtio_mmio_exit_vq(kvm, vdev, vmmio->hdr.queue_sel);
}
break;
case VIRTIO_MMIO_QUEUE_NOTIFY:
val = ioport__read32(data);
......
......@@ -72,6 +72,16 @@ free_ioport_evt:
return r;
}
static void virtio_pci_exit_vq(struct kvm *kvm, struct virtio_device *vdev,
int vq)
{
struct virtio_pci *vpci = vdev->virtio;
ioeventfd__del_event(vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY, vq);
ioeventfd__del_event(vpci->mmio_addr + VIRTIO_PCI_QUEUE_NOTIFY, vq);
virtio_exit_vq(kvm, vdev, vpci->dev, vq);
}
static inline bool virtio_pci__msix_enabled(struct virtio_pci *vpci)
{
return vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_ENABLE);
......@@ -270,10 +280,15 @@ static bool virtio_pci__io_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16
break;
case VIRTIO_PCI_QUEUE_PFN:
val = ioport__read32(data);
virtio_pci__init_ioeventfd(kvm, vdev, vpci->queue_selector);
vdev->ops->init_vq(kvm, vpci->dev, vpci->queue_selector,
1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT,
VIRTIO_PCI_VRING_ALIGN, val);
if (val) {
virtio_pci__init_ioeventfd(kvm, vdev,
vpci->queue_selector);
vdev->ops->init_vq(kvm, vpci->dev, vpci->queue_selector,
1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT,
VIRTIO_PCI_VRING_ALIGN, val);
} else {
virtio_pci_exit_vq(kvm, vdev, vpci->queue_selector);
}
break;
case VIRTIO_PCI_QUEUE_SEL:
vpci->queue_selector = ioport__read16(data);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment