mmio.c 8.8 KB
Newer Older
1
#include "kvm/devices.h"
Asias He's avatar
Asias He committed
2
3
4
5
6
#include "kvm/virtio-mmio.h"
#include "kvm/ioeventfd.h"
#include "kvm/ioport.h"
#include "kvm/virtio.h"
#include "kvm/kvm.h"
7
#include "kvm/kvm-cpu.h"
Asias He's avatar
Asias He committed
8
#include "kvm/irq.h"
9
#include "kvm/fdt.h"
Asias He's avatar
Asias He committed
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53

#include <linux/virtio_mmio.h>
#include <string.h>

static u32 virtio_mmio_io_space_blocks = KVM_VIRTIO_MMIO_AREA;

static u32 virtio_mmio_get_io_space_block(u32 size)
{
	u32 block = virtio_mmio_io_space_blocks;
	virtio_mmio_io_space_blocks += size;

	return block;
}

static void virtio_mmio_ioevent_callback(struct kvm *kvm, void *param)
{
	struct virtio_mmio_ioevent_param *ioeventfd = param;
	struct virtio_mmio *vmmio = ioeventfd->vdev->virtio;

	ioeventfd->vdev->ops->notify_vq(kvm, vmmio->dev, ioeventfd->vq);
}

static int virtio_mmio_init_ioeventfd(struct kvm *kvm,
				      struct virtio_device *vdev, u32 vq)
{
	struct virtio_mmio *vmmio = vdev->virtio;
	struct ioevent ioevent;
	int err;

	vmmio->ioeventfds[vq] = (struct virtio_mmio_ioevent_param) {
		.vdev		= vdev,
		.vq		= vq,
	};

	ioevent = (struct ioevent) {
		.io_addr	= vmmio->addr + VIRTIO_MMIO_QUEUE_NOTIFY,
		.io_len		= sizeof(u32),
		.fn		= virtio_mmio_ioevent_callback,
		.fn_ptr		= &vmmio->ioeventfds[vq],
		.datamatch	= vq,
		.fn_kvm		= kvm,
		.fd		= eventfd(0, 0),
	};

54
55
56
57
58
	if (vdev->use_vhost)
		/*
		 * Vhost will poll the eventfd in host kernel side,
		 * no need to poll in userspace.
		 */
59
		err = ioeventfd__add_event(&ioevent, 0);
60
61
	else
		/* Need to poll in userspace. */
62
		err = ioeventfd__add_event(&ioevent, IOEVENTFD_FLAG_USER_POLL);
Asias He's avatar
Asias He committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
	if (err)
		return err;

	if (vdev->ops->notify_vq_eventfd)
		vdev->ops->notify_vq_eventfd(kvm, vmmio->dev, vq, ioevent.fd);

	return 0;
}

int virtio_mmio_signal_vq(struct kvm *kvm, struct virtio_device *vdev, u32 vq)
{
	struct virtio_mmio *vmmio = vdev->virtio;

	vmmio->hdr.interrupt_state |= VIRTIO_MMIO_INT_VRING;
	kvm__irq_trigger(vmmio->kvm, vmmio->irq);

	return 0;
}

82
83
84
85
86
87
88
89
90
static void virtio_mmio_exit_vq(struct kvm *kvm, struct virtio_device *vdev,
				int vq)
{
	struct virtio_mmio *vmmio = vdev->virtio;

	ioeventfd__del_event(vmmio->addr + VIRTIO_MMIO_QUEUE_NOTIFY, vq);
	virtio_exit_vq(kvm, vdev, vmmio->dev, vq);
}

Asias He's avatar
Asias He committed
91
92
93
94
95
96
97
98
99
100
int virtio_mmio_signal_config(struct kvm *kvm, struct virtio_device *vdev)
{
	struct virtio_mmio *vmmio = vdev->virtio;

	vmmio->hdr.interrupt_state |= VIRTIO_MMIO_INT_CONFIG;
	kvm__irq_trigger(vmmio->kvm, vmmio->irq);

	return 0;
}

101
102
static void virtio_mmio_device_specific(struct kvm_cpu *vcpu,
					u64 addr, u8 *data, u32 len,
Asias He's avatar
Asias He committed
103
104
105
106
107
108
109
					u8 is_write, struct virtio_device *vdev)
{
	struct virtio_mmio *vmmio = vdev->virtio;
	u32 i;

	for (i = 0; i < len; i++) {
		if (is_write)
110
111
			vdev->ops->get_config(vmmio->kvm, vmmio->dev)[addr + i] =
					      *(u8 *)data + i;
Asias He's avatar
Asias He committed
112
113
		else
			data[i] = vdev->ops->get_config(vmmio->kvm,
114
							vmmio->dev)[addr + i];
Asias He's avatar
Asias He committed
115
116
117
	}
}

118
119
static void virtio_mmio_config_in(struct kvm_cpu *vcpu,
				  u64 addr, void *data, u32 len,
Asias He's avatar
Asias He committed
120
121
122
				  struct virtio_device *vdev)
{
	struct virtio_mmio *vmmio = vdev->virtio;
123
	struct virt_queue *vq;
Asias He's avatar
Asias He committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
	u32 val = 0;

	switch (addr) {
	case VIRTIO_MMIO_MAGIC_VALUE:
	case VIRTIO_MMIO_VERSION:
	case VIRTIO_MMIO_DEVICE_ID:
	case VIRTIO_MMIO_VENDOR_ID:
	case VIRTIO_MMIO_STATUS:
	case VIRTIO_MMIO_INTERRUPT_STATUS:
		ioport__write32(data, *(u32 *)(((void *)&vmmio->hdr) + addr));
		break;
	case VIRTIO_MMIO_HOST_FEATURES:
		if (vmmio->hdr.host_features_sel == 0)
			val = vdev->ops->get_host_features(vmmio->kvm,
							   vmmio->dev);
		ioport__write32(data, val);
		break;
	case VIRTIO_MMIO_QUEUE_PFN:
142
143
144
		vq = vdev->ops->get_vq(vmmio->kvm, vmmio->dev,
				       vmmio->hdr.queue_sel);
		ioport__write32(data, vq->pfn);
Asias He's avatar
Asias He committed
145
146
147
148
149
150
151
152
153
154
155
		break;
	case VIRTIO_MMIO_QUEUE_NUM_MAX:
		val = vdev->ops->get_size_vq(vmmio->kvm, vmmio->dev,
					     vmmio->hdr.queue_sel);
		ioport__write32(data, val);
		break;
	default:
		break;
	}
}

156
157
static void virtio_mmio_config_out(struct kvm_cpu *vcpu,
				   u64 addr, void *data, u32 len,
Asias He's avatar
Asias He committed
158
159
160
				   struct virtio_device *vdev)
{
	struct virtio_mmio *vmmio = vdev->virtio;
161
	struct kvm *kvm = vmmio->kvm;
Asias He's avatar
Asias He committed
162
163
164
165
166
167
168
169
170
	u32 val = 0;

	switch (addr) {
	case VIRTIO_MMIO_HOST_FEATURES_SEL:
	case VIRTIO_MMIO_GUEST_FEATURES_SEL:
	case VIRTIO_MMIO_QUEUE_SEL:
		val = ioport__read32(data);
		*(u32 *)(((void *)&vmmio->hdr) + addr) = val;
		break;
171
172
	case VIRTIO_MMIO_STATUS:
		vmmio->hdr.status = ioport__read32(data);
173
174
		if (!vmmio->hdr.status) /* Sample endianness on reset */
			vdev->endian = kvm_cpu__get_endianness(vcpu);
175
		virtio_notify_status(kvm, vdev, vmmio->dev, vmmio->hdr.status);
176
		break;
Asias He's avatar
Asias He committed
177
178
179
	case VIRTIO_MMIO_GUEST_FEATURES:
		if (vmmio->hdr.guest_features_sel == 0) {
			val = ioport__read32(data);
180
181
			virtio_set_guest_features(vmmio->kvm, vdev,
						  vmmio->dev, val);
Asias He's avatar
Asias He committed
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
		}
		break;
	case VIRTIO_MMIO_GUEST_PAGE_SIZE:
		val = ioport__read32(data);
		vmmio->hdr.guest_page_size = val;
		break;
	case VIRTIO_MMIO_QUEUE_NUM:
		val = ioport__read32(data);
		vmmio->hdr.queue_num = val;
		vdev->ops->set_size_vq(vmmio->kvm, vmmio->dev,
				       vmmio->hdr.queue_sel, val);
		break;
	case VIRTIO_MMIO_QUEUE_ALIGN:
		val = ioport__read32(data);
		vmmio->hdr.queue_align = val;
		break;
	case VIRTIO_MMIO_QUEUE_PFN:
		val = ioport__read32(data);
200
201
202
203
204
205
206
207
208
209
210
		if (val) {
			virtio_mmio_init_ioeventfd(vmmio->kvm, vdev,
						   vmmio->hdr.queue_sel);
			vdev->ops->init_vq(vmmio->kvm, vmmio->dev,
					   vmmio->hdr.queue_sel,
					   vmmio->hdr.guest_page_size,
					   vmmio->hdr.queue_align,
					   val);
		} else {
			virtio_mmio_exit_vq(kvm, vdev, vmmio->hdr.queue_sel);
		}
Asias He's avatar
Asias He committed
211
212
213
214
215
216
217
218
219
220
221
222
223
224
		break;
	case VIRTIO_MMIO_QUEUE_NOTIFY:
		val = ioport__read32(data);
		vdev->ops->notify_vq(vmmio->kvm, vmmio->dev, val);
		break;
	case VIRTIO_MMIO_INTERRUPT_ACK:
		val = ioport__read32(data);
		vmmio->hdr.interrupt_state &= ~val;
		break;
	default:
		break;
	};
}

225
226
static void virtio_mmio_mmio_callback(struct kvm_cpu *vcpu,
				      u64 addr, u8 *data, u32 len,
Asias He's avatar
Asias He committed
227
228
229
230
231
232
233
234
				      u8 is_write, void *ptr)
{
	struct virtio_device *vdev = ptr;
	struct virtio_mmio *vmmio = vdev->virtio;
	u32 offset = addr - vmmio->addr;

	if (offset >= VIRTIO_MMIO_CONFIG) {
		offset -= VIRTIO_MMIO_CONFIG;
235
		virtio_mmio_device_specific(vcpu, offset, data, len, is_write, ptr);
Asias He's avatar
Asias He committed
236
237
238
239
		return;
	}

	if (is_write)
240
		virtio_mmio_config_out(vcpu, offset, data, len, ptr);
Asias He's avatar
Asias He committed
241
	else
242
		virtio_mmio_config_in(vcpu, offset, data, len, ptr);
Asias He's avatar
Asias He committed
243
244
}

245
246
#ifdef CONFIG_HAS_LIBFDT
#define DEVICE_NAME_MAX_LEN 32
247
248
249
250
251
252
static
void generate_virtio_mmio_fdt_node(void *fdt,
				   struct device_header *dev_hdr,
				   void (*generate_irq_prop)(void *fdt,
							     u8 irq,
							     enum irq_type))
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
{
	char dev_name[DEVICE_NAME_MAX_LEN];
	struct virtio_mmio *vmmio = container_of(dev_hdr,
						 struct virtio_mmio,
						 dev_hdr);
	u64 addr = vmmio->addr;
	u64 reg_prop[] = {
		cpu_to_fdt64(addr),
		cpu_to_fdt64(VIRTIO_MMIO_IO_SIZE),
	};

	snprintf(dev_name, DEVICE_NAME_MAX_LEN, "virtio@%llx", addr);

	_FDT(fdt_begin_node(fdt, dev_name));
	_FDT(fdt_property_string(fdt, "compatible", "virtio,mmio"));
	_FDT(fdt_property(fdt, "reg", reg_prop, sizeof(reg_prop)));
269
	_FDT(fdt_property(fdt, "dma-coherent", NULL, 0));
270
	generate_irq_prop(fdt, vmmio->irq, IRQ_TYPE_EDGE_RISING);
271
272
273
274
275
276
277
278
279
280
281
282
	_FDT(fdt_end_node(fdt));
}
#else
static void generate_virtio_mmio_fdt_node(void *fdt,
					  struct device_header *dev_hdr,
					  void (*generate_irq_prop)(void *fdt,
								    u8 irq))
{
	die("Unable to generate device tree nodes without libfdt\n");
}
#endif

283
284
285
286
287
288
289
290
void virtio_mmio_assign_irq(struct device_header *dev_hdr)
{
	struct virtio_mmio *vmmio = container_of(dev_hdr,
						 struct virtio_mmio,
						 dev_hdr);
	vmmio->irq = irq__alloc_line();
}

Asias He's avatar
Asias He committed
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
int virtio_mmio_init(struct kvm *kvm, void *dev, struct virtio_device *vdev,
		     int device_id, int subsys_id, int class)
{
	struct virtio_mmio *vmmio = vdev->virtio;

	vmmio->addr	= virtio_mmio_get_io_space_block(VIRTIO_MMIO_IO_SIZE);
	vmmio->kvm	= kvm;
	vmmio->dev	= dev;

	kvm__register_mmio(kvm, vmmio->addr, VIRTIO_MMIO_IO_SIZE,
			   false, virtio_mmio_mmio_callback, vdev);

	vmmio->hdr = (struct virtio_mmio_hdr) {
		.magic		= {'v', 'i', 'r', 't'},
		.version	= 1,
306
		.device_id	= subsys_id,
Asias He's avatar
Asias He committed
307
308
309
310
		.vendor_id	= 0x4d564b4c , /* 'LKVM' */
		.queue_num_max	= 256,
	};

311
312
	vmmio->dev_hdr = (struct device_header) {
		.bus_type	= DEVICE_BUS_MMIO,
313
		.data		= generate_virtio_mmio_fdt_node,
314
315
316
	};

	device__register(&vmmio->dev_hdr);
Asias He's avatar
Asias He committed
317
318
319
320
321
322
323

	/*
	 * Instantiate guest virtio-mmio devices using kernel command line
	 * (or module) parameter, e.g
	 *
	 * virtio_mmio.devices=0x200@0xd2000000:5,0x200@0xd2000200:6
	 */
324
	pr_info("virtio-mmio.devices=0x%x@0x%x:%d\n", VIRTIO_MMIO_IO_SIZE, vmmio->addr, vmmio->irq);
Asias He's avatar
Asias He committed
325
326
327
328

	return 0;
}

329
330
331
332
333
334
335
336
337
338
339
int virtio_mmio_reset(struct kvm *kvm, struct virtio_device *vdev)
{
	int vq;
	struct virtio_mmio *vmmio = vdev->virtio;

	for (vq = 0; vq < vdev->ops->get_vq_count(kvm, vmmio->dev); vq++)
		virtio_mmio_exit_vq(kvm, vdev, vq);

	return 0;
}

Asias He's avatar
Asias He committed
340
341
342
343
int virtio_mmio_exit(struct kvm *kvm, struct virtio_device *vdev)
{
	struct virtio_mmio *vmmio = vdev->virtio;

344
	virtio_mmio_reset(kvm, vdev);
Asias He's avatar
Asias He committed
345
346
347
348
	kvm__deregister_mmio(kvm, vmmio->addr);

	return 0;
}