virtio.h 7.09 KB
Newer Older
1
2
3
#ifndef KVM__VIRTIO_H
#define KVM__VIRTIO_H

4
5
#include <endian.h>

6
#include <linux/virtio_ring.h>
7
#include <linux/virtio_pci.h>
8

9
#include <linux/types.h>
10
#include <linux/virtio_config.h>
11
12
#include <sys/uio.h>

13
#include "kvm/barrier.h"
14
15
#include "kvm/kvm.h"

16
17
18
#define VIRTIO_IRQ_LOW		0
#define VIRTIO_IRQ_HIGH		1

19
20
21
#define VIRTIO_PCI_O_CONFIG	0
#define VIRTIO_PCI_O_MSIX	1

22
23
24
#define VIRTIO_ENDIAN_LE	(1 << 0)
#define VIRTIO_ENDIAN_BE	(1 << 1)

25
26
27
28
29
30
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define VIRTIO_ENDIAN_HOST VIRTIO_ENDIAN_LE
#else
#define VIRTIO_ENDIAN_HOST VIRTIO_ENDIAN_BE
#endif

31
32
33
34
35
36
37
38
39
40
41
42
43
44
/* Reserved status bits */
#define VIRTIO_CONFIG_S_MASK \
	(VIRTIO_CONFIG_S_ACKNOWLEDGE |	\
	 VIRTIO_CONFIG_S_DRIVER |	\
	 VIRTIO_CONFIG_S_DRIVER_OK |	\
	 VIRTIO_CONFIG_S_FEATURES_OK |	\
	 VIRTIO_CONFIG_S_FAILED)

/* Kvmtool status bits */
/* Start the device */
#define VIRTIO__STATUS_START		(1 << 8)
/* Stop the device */
#define VIRTIO__STATUS_STOP		(1 << 9)

45
struct virt_queue {
46
47
	struct vring	vring;
	u32		pfn;
48
49
	/* The last_avail_idx field is an index to ->ring of struct vring_avail.
	   It's where we assume the next request index is at.  */
50
	u16		last_avail_idx;
51
	u16		last_used_signalled;
52
	u16		endian;
53
	bool		use_event_idx;
54
	bool		enabled;
55
56
};

57
58
59
60
61
62
63
64
65
/*
 * The default policy is not to cope with the guest endianness.
 * It also helps not breaking archs that do not care about supporting
 * such a configuration.
 */
#ifndef VIRTIO_RING_ENDIAN
#define VIRTIO_RING_ENDIAN VIRTIO_ENDIAN_HOST
#endif

66
#if VIRTIO_RING_ENDIAN != VIRTIO_ENDIAN_HOST
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115

static inline __u16 __virtio_g2h_u16(u16 endian, __u16 val)
{
	return (endian == VIRTIO_ENDIAN_LE) ? le16toh(val) : be16toh(val);
}

static inline __u16 __virtio_h2g_u16(u16 endian, __u16 val)
{
	return (endian == VIRTIO_ENDIAN_LE) ? htole16(val) : htobe16(val);
}

static inline __u32 __virtio_g2h_u32(u16 endian, __u32 val)
{
	return (endian == VIRTIO_ENDIAN_LE) ? le32toh(val) : be32toh(val);
}

static inline __u32 __virtio_h2g_u32(u16 endian, __u32 val)
{
	return (endian == VIRTIO_ENDIAN_LE) ? htole32(val) : htobe32(val);
}

static inline __u64 __virtio_g2h_u64(u16 endian, __u64 val)
{
	return (endian == VIRTIO_ENDIAN_LE) ? le64toh(val) : be64toh(val);
}

static inline __u64 __virtio_h2g_u64(u16 endian, __u64 val)
{
	return (endian == VIRTIO_ENDIAN_LE) ? htole64(val) : htobe64(val);
}

#define virtio_guest_to_host_u16(x, v)	__virtio_g2h_u16((x)->endian, (v))
#define virtio_host_to_guest_u16(x, v)	__virtio_h2g_u16((x)->endian, (v))
#define virtio_guest_to_host_u32(x, v)	__virtio_g2h_u32((x)->endian, (v))
#define virtio_host_to_guest_u32(x, v)	__virtio_h2g_u32((x)->endian, (v))
#define virtio_guest_to_host_u64(x, v)	__virtio_g2h_u64((x)->endian, (v))
#define virtio_host_to_guest_u64(x, v)	__virtio_h2g_u64((x)->endian, (v))

#else

#define virtio_guest_to_host_u16(x, v)	(v)
#define virtio_host_to_guest_u16(x, v)	(v)
#define virtio_guest_to_host_u32(x, v)	(v)
#define virtio_host_to_guest_u32(x, v)	(v)
#define virtio_guest_to_host_u64(x, v)	(v)
#define virtio_host_to_guest_u64(x, v)	(v)

#endif

116
static inline u16 virt_queue__pop(struct virt_queue *queue)
117
{
118
119
	__u16 guest_idx;

120
121
122
123
124
125
126
	/*
	 * The guest updates the avail index after writing the ring entry.
	 * Ensure that we read the updated entry once virt_queue__available()
	 * observes the new index.
	 */
	rmb();

127
128
	guest_idx = queue->vring.avail->ring[queue->last_avail_idx++ % queue->vring.num];
	return virtio_guest_to_host_u16(queue, guest_idx);
129
130
}

131
static inline struct vring_desc *virt_queue__get_desc(struct virt_queue *queue, u16 desc_ndx)
132
133
134
135
136
137
{
	return &queue->vring.desc[desc_ndx];
}

static inline bool virt_queue__available(struct virt_queue *vq)
{
138
139
	u16 last_avail_idx = virtio_host_to_guest_u16(vq, vq->last_avail_idx);

140
141
	if (!vq->vring.avail)
		return 0;
142

143
	if (vq->use_event_idx) {
144
		vring_avail_event(&vq->vring) = last_avail_idx;
145
146
147
148
149
150
151
		/*
		 * After the driver writes a new avail index, it reads the event
		 * index to see if we need any notification. Ensure that it
		 * reads the updated index, or else we'll miss the notification.
		 */
		mb();
	}
152
153

	return vq->vring.avail->idx != last_avail_idx;
154
155
}

156
157
void virt_queue__used_idx_advance(struct virt_queue *queue, u16 jump);
struct vring_used_elem * virt_queue__set_used_elem_no_update(struct virt_queue *queue, u32 head, u32 len, u16 offset);
158
struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len);
159

160
bool virtio_queue__should_signal(struct virt_queue *vq);
Asias He's avatar
Asias He committed
161
162
163
164
u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[],
			u16 *out, u16 *in, struct kvm *kvm);
u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[],
			     u16 *out, u16 *in, u16 head, struct kvm *kvm);
165
166
167
u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue,
			      struct iovec in_iov[], struct iovec out_iov[],
			      u16 *in, u16 *out);
168
int virtio__get_dev_specific_field(int offset, bool msix, u32 *config_off);
169

170
171
172
173
174
175
enum virtio_trans {
	VIRTIO_PCI,
	VIRTIO_MMIO,
};

struct virtio_device {
176
	bool			use_vhost;
177
178
	void			*virtio;
	struct virtio_ops	*ops;
179
	u16			endian;
180
	u32			features;
181
	u32			status;
182
183
184
};

struct virtio_ops {
185
	u8 *(*get_config)(struct kvm *kvm, void *dev);
186
187
	u32 (*get_host_features)(struct kvm *kvm, void *dev);
	void (*set_guest_features)(struct kvm *kvm, void *dev, u32 features);
188
	int (*get_vq_count)(struct kvm *kvm, void *dev);
189
190
	int (*init_vq)(struct kvm *kvm, void *dev, u32 vq, u32 page_size,
		       u32 align, u32 pfn);
191
	void (*exit_vq)(struct kvm *kvm, void *dev, u32 vq);
192
	int (*notify_vq)(struct kvm *kvm, void *dev, u32 vq);
193
	struct virt_queue *(*get_vq)(struct kvm *kvm, void *dev, u32 vq);
194
195
196
197
198
199
	int (*get_size_vq)(struct kvm *kvm, void *dev, u32 vq);
	int (*set_size_vq)(struct kvm *kvm, void *dev, u32 vq, int size);
	void (*notify_vq_gsi)(struct kvm *kvm, void *dev, u32 vq, u32 gsi);
	void (*notify_vq_eventfd)(struct kvm *kvm, void *dev, u32 vq, u32 efd);
	int (*signal_vq)(struct kvm *kvm, struct virtio_device *vdev, u32 queueid);
	int (*signal_config)(struct kvm *kvm, struct virtio_device *vdev);
200
	void (*notify_status)(struct kvm *kvm, void *dev, u32 status);
201
202
203
204
205
206
207
208
	int (*init)(struct kvm *kvm, void *dev, struct virtio_device *vdev,
		    int device_id, int subsys_id, int class);
	int (*exit)(struct kvm *kvm, struct virtio_device *vdev);
};

int virtio_init(struct kvm *kvm, void *dev, struct virtio_device *vdev,
		struct virtio_ops *ops, enum virtio_trans trans,
		int device_id, int subsys_id, int class);
209
int virtio_compat_add_message(const char *device, const char *config);
210
const char* virtio_trans_name(enum virtio_trans trans);
211
212
213
214
215
216

static inline void *virtio_get_vq(struct kvm *kvm, u32 pfn, u32 page_size)
{
	return guest_flat_to_host(kvm, (u64)pfn * page_size);
}

217
218
219
220
static inline void virtio_init_device_vq(struct virtio_device *vdev,
					 struct virt_queue *vq)
{
	vq->endian = vdev->endian;
221
	vq->use_event_idx = (vdev->features & VIRTIO_RING_F_EVENT_IDX);
222
	vq->enabled = true;
223
224
}

225
226
void virtio_exit_vq(struct kvm *kvm, struct virtio_device *vdev, void *dev,
		    int num);
227
228
void virtio_set_guest_features(struct kvm *kvm, struct virtio_device *vdev,
			       void *dev, u32 features);
229
230
void virtio_notify_status(struct kvm *kvm, struct virtio_device *vdev,
			  void *dev, u8 status);
231

232
#endif /* KVM__VIRTIO_H */