vmx.c 206 KB
Newer Older
Avi Kivity's avatar
Avi Kivity committed
1
2
3
4
5
6
7
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
8
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity's avatar
Avi Kivity committed
9
10
11
12
13
14
15
16
17
18
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

19
#include "irq.h"
20
#include "mmu.h"
Avi Kivity's avatar
Avi Kivity committed
21
#include "cpuid.h"
Avi Kivity's avatar
Avi Kivity committed
22

23
#include <linux/kvm_host.h>
Avi Kivity's avatar
Avi Kivity committed
24
#include <linux/module.h>
25
#include <linux/kernel.h>
Avi Kivity's avatar
Avi Kivity committed
26
27
#include <linux/mm.h>
#include <linux/highmem.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
28
#include <linux/sched.h>
29
#include <linux/moduleparam.h>
30
#include <linux/ftrace_event.h>
31
#include <linux/slab.h>
32
#include <linux/tboot.h>
33
#include "kvm_cache_regs.h"
34
#include "x86.h"
Avi Kivity's avatar
Avi Kivity committed
35

Avi Kivity's avatar
Avi Kivity committed
36
#include <asm/io.h>
37
#include <asm/desc.h>
38
#include <asm/vmx.h>
39
#include <asm/virtext.h>
40
#include <asm/mce.h>
41
42
#include <asm/i387.h>
#include <asm/xcr.h>
43
#include <asm/perf_event.h>
Avi Kivity's avatar
Avi Kivity committed
44

45
46
#include "trace.h"

47
#define __ex(x) __kvm_handle_fault_on_reboot(x)
48
49
#define __ex_clear(x, reg) \
	____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
50

Avi Kivity's avatar
Avi Kivity committed
51
52
53
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

54
static bool __read_mostly enable_vpid = 1;
55
module_param_named(vpid, enable_vpid, bool, 0444);
56

57
static bool __read_mostly flexpriority_enabled = 1;
58
module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
59

60
static bool __read_mostly enable_ept = 1;
61
module_param_named(ept, enable_ept, bool, S_IRUGO);
Sheng Yang's avatar
Sheng Yang committed
62

63
static bool __read_mostly enable_unrestricted_guest = 1;
64
65
66
module_param_named(unrestricted_guest,
			enable_unrestricted_guest, bool, S_IRUGO);

67
static bool __read_mostly emulate_invalid_guest_state = 0;
68
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
69

70
static bool __read_mostly vmm_exclusive = 1;
71
72
module_param(vmm_exclusive, bool, S_IRUGO);

73
static bool __read_mostly fasteoi = 1;
74
75
module_param(fasteoi, bool, S_IRUGO);

76
77
78
79
80
/*
 * If nested=1, nested virtualization is supported, i.e., guests may use
 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
 * use VMX instructions.
 */
81
static bool __read_mostly nested = 0;
82
83
module_param(nested, bool, S_IRUGO);

84
85
86
87
88
#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST				\
	(X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
#define KVM_GUEST_CR0_MASK						\
	(KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST				\
89
	(X86_CR0_WP | X86_CR0_NE)
90
91
#define KVM_VM_CR0_ALWAYS_ON						\
	(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
92
93
94
95
#define KVM_CR4_GUEST_OWNED_BITS				      \
	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
	 | X86_CR4_OSXMMEXCPT)

96
97
98
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)

99
100
#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))

101
102
103
104
/*
 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
 * ple_gap:    upper bound on the amount of time between two successive
 *             executions of PAUSE in a loop. Also indicate if ple enabled.
105
 *             According to test, this time is usually smaller than 128 cycles.
106
107
108
109
110
111
 * ple_window: upper bound on the amount of time a guest is allowed to execute
 *             in a PAUSE loop. Tests indicate that most spinlocks are held for
 *             less than 2^12 cycles
 * Time is measured based on a counter that runs at the same rate as the TSC,
 * refer SDM volume 3b section 21.6.13 & 22.1.3.
 */
112
#define KVM_VMX_DEFAULT_PLE_GAP    128
113
114
115
116
117
118
119
#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
module_param(ple_gap, int, S_IRUGO);

static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, int, S_IRUGO);

120
#define NR_AUTOLOAD_MSRS 8
121
#define VMCS02_POOL_SIZE 1
122

123
124
125
126
127
128
struct vmcs {
	u32 revision_id;
	u32 abort;
	char data[0];
};

129
130
131
132
133
134
135
136
137
138
139
140
/*
 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
 * loaded on this CPU (so we can clear them if the CPU goes down).
 */
struct loaded_vmcs {
	struct vmcs *vmcs;
	int cpu;
	int launched;
	struct list_head loaded_vmcss_on_cpu_link;
};

141
142
143
struct shared_msr_entry {
	unsigned index;
	u64 data;
144
	u64 mask;
145
146
};

147
148
149
150
151
152
153
154
155
156
157
158
159
/*
 * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
 * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
 * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
 * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
 * More than one of these structures may exist, if L1 runs multiple L2 guests.
 * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
 * underlying hardware which will be used to run L2.
 * This structure is packed to ensure that its layout is identical across
 * machines (necessary for live migration).
 * If there are changes in this struct, VMCS12_REVISION must be changed.
 */
160
typedef u64 natural_width;
161
162
163
164
165
166
struct __packed vmcs12 {
	/* According to the Intel spec, a VMCS region must start with the
	 * following two fields. Then follow implementation-specific data.
	 */
	u32 revision_id;
	u32 abort;
167

Nadav Har'El's avatar
Nadav Har'El committed
168
169
170
	u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
	u32 padding[7]; /* room for future expansion */

171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
	u64 io_bitmap_a;
	u64 io_bitmap_b;
	u64 msr_bitmap;
	u64 vm_exit_msr_store_addr;
	u64 vm_exit_msr_load_addr;
	u64 vm_entry_msr_load_addr;
	u64 tsc_offset;
	u64 virtual_apic_page_addr;
	u64 apic_access_addr;
	u64 ept_pointer;
	u64 guest_physical_address;
	u64 vmcs_link_pointer;
	u64 guest_ia32_debugctl;
	u64 guest_ia32_pat;
	u64 guest_ia32_efer;
	u64 guest_ia32_perf_global_ctrl;
	u64 guest_pdptr0;
	u64 guest_pdptr1;
	u64 guest_pdptr2;
	u64 guest_pdptr3;
	u64 host_ia32_pat;
	u64 host_ia32_efer;
	u64 host_ia32_perf_global_ctrl;
	u64 padding64[8]; /* room for future expansion */
	/*
	 * To allow migration of L1 (complete with its L2 guests) between
	 * machines of different natural widths (32 or 64 bit), we cannot have
	 * unsigned long fields with no explict size. We use u64 (aliased
	 * natural_width) instead. Luckily, x86 is little-endian.
	 */
	natural_width cr0_guest_host_mask;
	natural_width cr4_guest_host_mask;
	natural_width cr0_read_shadow;
	natural_width cr4_read_shadow;
	natural_width cr3_target_value0;
	natural_width cr3_target_value1;
	natural_width cr3_target_value2;
	natural_width cr3_target_value3;
	natural_width exit_qualification;
	natural_width guest_linear_address;
	natural_width guest_cr0;
	natural_width guest_cr3;
	natural_width guest_cr4;
	natural_width guest_es_base;
	natural_width guest_cs_base;
	natural_width guest_ss_base;
	natural_width guest_ds_base;
	natural_width guest_fs_base;
	natural_width guest_gs_base;
	natural_width guest_ldtr_base;
	natural_width guest_tr_base;
	natural_width guest_gdtr_base;
	natural_width guest_idtr_base;
	natural_width guest_dr7;
	natural_width guest_rsp;
	natural_width guest_rip;
	natural_width guest_rflags;
	natural_width guest_pending_dbg_exceptions;
	natural_width guest_sysenter_esp;
	natural_width guest_sysenter_eip;
	natural_width host_cr0;
	natural_width host_cr3;
	natural_width host_cr4;
	natural_width host_fs_base;
	natural_width host_gs_base;
	natural_width host_tr_base;
	natural_width host_gdtr_base;
	natural_width host_idtr_base;
	natural_width host_ia32_sysenter_esp;
	natural_width host_ia32_sysenter_eip;
	natural_width host_rsp;
	natural_width host_rip;
	natural_width paddingl[8]; /* room for future expansion */
	u32 pin_based_vm_exec_control;
	u32 cpu_based_vm_exec_control;
	u32 exception_bitmap;
	u32 page_fault_error_code_mask;
	u32 page_fault_error_code_match;
	u32 cr3_target_count;
	u32 vm_exit_controls;
	u32 vm_exit_msr_store_count;
	u32 vm_exit_msr_load_count;
	u32 vm_entry_controls;
	u32 vm_entry_msr_load_count;
	u32 vm_entry_intr_info_field;
	u32 vm_entry_exception_error_code;
	u32 vm_entry_instruction_len;
	u32 tpr_threshold;
	u32 secondary_vm_exec_control;
	u32 vm_instruction_error;
	u32 vm_exit_reason;
	u32 vm_exit_intr_info;
	u32 vm_exit_intr_error_code;
	u32 idt_vectoring_info_field;
	u32 idt_vectoring_error_code;
	u32 vm_exit_instruction_len;
	u32 vmx_instruction_info;
	u32 guest_es_limit;
	u32 guest_cs_limit;
	u32 guest_ss_limit;
	u32 guest_ds_limit;
	u32 guest_fs_limit;
	u32 guest_gs_limit;
	u32 guest_ldtr_limit;
	u32 guest_tr_limit;
	u32 guest_gdtr_limit;
	u32 guest_idtr_limit;
	u32 guest_es_ar_bytes;
	u32 guest_cs_ar_bytes;
	u32 guest_ss_ar_bytes;
	u32 guest_ds_ar_bytes;
	u32 guest_fs_ar_bytes;
	u32 guest_gs_ar_bytes;
	u32 guest_ldtr_ar_bytes;
	u32 guest_tr_ar_bytes;
	u32 guest_interruptibility_info;
	u32 guest_activity_state;
	u32 guest_sysenter_cs;
	u32 host_ia32_sysenter_cs;
	u32 padding32[8]; /* room for future expansion */
	u16 virtual_processor_id;
	u16 guest_es_selector;
	u16 guest_cs_selector;
	u16 guest_ss_selector;
	u16 guest_ds_selector;
	u16 guest_fs_selector;
	u16 guest_gs_selector;
	u16 guest_ldtr_selector;
	u16 guest_tr_selector;
	u16 host_es_selector;
	u16 host_cs_selector;
	u16 host_ss_selector;
	u16 host_ds_selector;
	u16 host_fs_selector;
	u16 host_gs_selector;
	u16 host_tr_selector;
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
};

/*
 * VMCS12_REVISION is an arbitrary id that should be changed if the content or
 * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
 * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
 */
#define VMCS12_REVISION 0x11e57ed0

/*
 * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
 * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
 * current implementation, 4K are reserved to avoid future complications.
 */
#define VMCS12_SIZE 0x1000

323
324
325
326
327
328
329
/* Used to remember the last vmcs02 used for some recently used vmcs12s */
struct vmcs02_list {
	struct list_head list;
	gpa_t vmptr;
	struct loaded_vmcs vmcs02;
};

330
331
332
333
334
335
336
/*
 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
 */
struct nested_vmx {
	/* Has the level1 guest done vmxon? */
	bool vmxon;
337
338
339
340
341
342

	/* The guest-physical address of the current VMCS L1 keeps for L2 */
	gpa_t current_vmptr;
	/* The host-usable pointer to the above */
	struct page *current_vmcs12_page;
	struct vmcs12 *current_vmcs12;
343
344
345
346

	/* vmcs02_list cache of VMCSs recently used to run L2 guests */
	struct list_head vmcs02_pool;
	int vmcs02_num;
347
	u64 vmcs01_tsc_offset;
348
349
	/* L2 must run next, and mustn't decide to exit to L1. */
	bool nested_run_pending;
350
351
352
353
354
	/*
	 * Guest pages referred to in vmcs02 with host-physical pointers, so
	 * we must keep them pinned while L2 runs.
	 */
	struct page *apic_access_page;
355
356
};

357
struct vcpu_vmx {
358
	struct kvm_vcpu       vcpu;
359
	unsigned long         host_rsp;
360
	u8                    fail;
Avi Kivity's avatar
Avi Kivity committed
361
	u8                    cpl;
362
	bool                  nmi_known_unmasked;
363
	u32                   exit_intr_info;
364
	u32                   idt_vectoring_info;
365
	ulong                 rflags;
366
	struct shared_msr_entry *guest_msrs;
367
368
369
	int                   nmsrs;
	int                   save_nmsrs;
#ifdef CONFIG_X86_64
370
371
	u64 		      msr_host_kernel_gs_base;
	u64 		      msr_guest_kernel_gs_base;
372
#endif
373
374
375
376
377
378
379
380
	/*
	 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
	 * non-nested (L1) guest, it always points to vmcs01. For a nested
	 * guest (L2), it points to a different VMCS.
	 */
	struct loaded_vmcs    vmcs01;
	struct loaded_vmcs   *loaded_vmcs;
	bool                  __launched; /* temporary, used in vmx_vcpu_run */
381
382
383
384
385
	struct msr_autoload {
		unsigned nr;
		struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
		struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
	} msr_autoload;
386
387
388
	struct {
		int           loaded;
		u16           fs_sel, gs_sel, ldt_sel;
389
390
		int           gs_ldt_reload_needed;
		int           fs_reload_needed;
Mike Day's avatar
Mike Day committed
391
	} host_state;
392
	struct {
393
		int vm86_active;
394
		ulong save_rflags;
395
396
397
398
399
400
		struct kvm_save_segment {
			u16 selector;
			unsigned long base;
			u32 limit;
			u32 ar;
		} tr, es, ds, fs, gs;
401
	} rmode;
402
403
404
405
	struct {
		u32 bitmask; /* 4 bits per segment (1 bit per field) */
		struct kvm_save_segment seg[8];
	} segment_cache;
406
	int vpid;
407
	bool emulation_required;
408
409
410
411
412

	/* Support for vnmi-less CPUs */
	int soft_vnmi_blocked;
	ktime_t entry_time;
	s64 vnmi_blocked_time;
413
	u32 exit_reason;
414
415

	bool rdtscp_enabled;
416
417
418

	/* Support for a guest hypervisor (nested VMX) */
	struct nested_vmx nested;
419
420
};

421
422
423
424
425
426
427
428
429
enum segment_cache_field {
	SEG_FIELD_SEL = 0,
	SEG_FIELD_BASE = 1,
	SEG_FIELD_LIMIT = 2,
	SEG_FIELD_AR = 3,

	SEG_FIELD_NR = 4
};

430
431
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
{
432
	return container_of(vcpu, struct vcpu_vmx, vcpu);
433
434
}

435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
#define FIELD(number, name)	[number] = VMCS12_OFFSET(name)
#define FIELD64(number, name)	[number] = VMCS12_OFFSET(name), \
				[number##_HIGH] = VMCS12_OFFSET(name)+4

static unsigned short vmcs_field_to_offset_table[] = {
	FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
	FIELD(GUEST_ES_SELECTOR, guest_es_selector),
	FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
	FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
	FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
	FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
	FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
	FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
	FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
	FIELD(HOST_ES_SELECTOR, host_es_selector),
	FIELD(HOST_CS_SELECTOR, host_cs_selector),
	FIELD(HOST_SS_SELECTOR, host_ss_selector),
	FIELD(HOST_DS_SELECTOR, host_ds_selector),
	FIELD(HOST_FS_SELECTOR, host_fs_selector),
	FIELD(HOST_GS_SELECTOR, host_gs_selector),
	FIELD(HOST_TR_SELECTOR, host_tr_selector),
	FIELD64(IO_BITMAP_A, io_bitmap_a),
	FIELD64(IO_BITMAP_B, io_bitmap_b),
	FIELD64(MSR_BITMAP, msr_bitmap),
	FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
	FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
	FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
	FIELD64(TSC_OFFSET, tsc_offset),
	FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
	FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
	FIELD64(EPT_POINTER, ept_pointer),
	FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
	FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
	FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
	FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
	FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
	FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
	FIELD64(GUEST_PDPTR0, guest_pdptr0),
	FIELD64(GUEST_PDPTR1, guest_pdptr1),
	FIELD64(GUEST_PDPTR2, guest_pdptr2),
	FIELD64(GUEST_PDPTR3, guest_pdptr3),
	FIELD64(HOST_IA32_PAT, host_ia32_pat),
	FIELD64(HOST_IA32_EFER, host_ia32_efer),
	FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
	FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
	FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
	FIELD(EXCEPTION_BITMAP, exception_bitmap),
	FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
	FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
	FIELD(CR3_TARGET_COUNT, cr3_target_count),
	FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
	FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
	FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
	FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
	FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
	FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
	FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
	FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
	FIELD(TPR_THRESHOLD, tpr_threshold),
	FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
	FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
	FIELD(VM_EXIT_REASON, vm_exit_reason),
	FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
	FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
	FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
	FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
	FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
	FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
	FIELD(GUEST_ES_LIMIT, guest_es_limit),
	FIELD(GUEST_CS_LIMIT, guest_cs_limit),
	FIELD(GUEST_SS_LIMIT, guest_ss_limit),
	FIELD(GUEST_DS_LIMIT, guest_ds_limit),
	FIELD(GUEST_FS_LIMIT, guest_fs_limit),
	FIELD(GUEST_GS_LIMIT, guest_gs_limit),
	FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
	FIELD(GUEST_TR_LIMIT, guest_tr_limit),
	FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
	FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
	FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
	FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
	FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
	FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
	FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
	FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
	FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
	FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
	FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
	FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
	FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
	FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
	FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
	FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
	FIELD(CR0_READ_SHADOW, cr0_read_shadow),
	FIELD(CR4_READ_SHADOW, cr4_read_shadow),
	FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
	FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
	FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
	FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
	FIELD(EXIT_QUALIFICATION, exit_qualification),
	FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
	FIELD(GUEST_CR0, guest_cr0),
	FIELD(GUEST_CR3, guest_cr3),
	FIELD(GUEST_CR4, guest_cr4),
	FIELD(GUEST_ES_BASE, guest_es_base),
	FIELD(GUEST_CS_BASE, guest_cs_base),
	FIELD(GUEST_SS_BASE, guest_ss_base),
	FIELD(GUEST_DS_BASE, guest_ds_base),
	FIELD(GUEST_FS_BASE, guest_fs_base),
	FIELD(GUEST_GS_BASE, guest_gs_base),
	FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
	FIELD(GUEST_TR_BASE, guest_tr_base),
	FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
	FIELD(GUEST_IDTR_BASE, guest_idtr_base),
	FIELD(GUEST_DR7, guest_dr7),
	FIELD(GUEST_RSP, guest_rsp),
	FIELD(GUEST_RIP, guest_rip),
	FIELD(GUEST_RFLAGS, guest_rflags),
	FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
	FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
	FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
	FIELD(HOST_CR0, host_cr0),
	FIELD(HOST_CR3, host_cr3),
	FIELD(HOST_CR4, host_cr4),
	FIELD(HOST_FS_BASE, host_fs_base),
	FIELD(HOST_GS_BASE, host_gs_base),
	FIELD(HOST_TR_BASE, host_tr_base),
	FIELD(HOST_GDTR_BASE, host_gdtr_base),
	FIELD(HOST_IDTR_BASE, host_idtr_base),
	FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
	FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
	FIELD(HOST_RSP, host_rsp),
	FIELD(HOST_RIP, host_rip),
};
static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);

static inline short vmcs_field_to_offset(unsigned long field)
{
	if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
		return -1;
	return vmcs_field_to_offset_table[field];
}

578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{
	return to_vmx(vcpu)->nested.current_vmcs12;
}

static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
{
	struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		return NULL;
	}
	return page;
}

static void nested_release_page(struct page *page)
{
	kvm_release_page_dirty(page);
}

static void nested_release_page_clean(struct page *page)
{
	kvm_release_page_clean(page);
}

603
static u64 construct_eptp(unsigned long root_hpa);
604
605
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
606
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
607
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
608

Avi Kivity's avatar
Avi Kivity committed
609
610
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
611
612
613
614
615
/*
 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
 */
static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
616
static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
Avi Kivity's avatar
Avi Kivity committed
617

618
619
static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
620
621
static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode;
622

623
static bool cpu_has_load_ia32_efer;
624
static bool cpu_has_load_perf_global_ctrl;
625

626
627
628
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock);

629
static struct vmcs_config {
Avi Kivity's avatar
Avi Kivity committed
630
631
632
	int size;
	int order;
	u32 revision_id;
633
634
	u32 pin_based_exec_ctrl;
	u32 cpu_based_exec_ctrl;
635
	u32 cpu_based_2nd_exec_ctrl;
636
637
638
	u32 vmexit_ctrl;
	u32 vmentry_ctrl;
} vmcs_config;
Avi Kivity's avatar
Avi Kivity committed
639

Hannes Eder's avatar
Hannes Eder committed
640
static struct vmx_capability {
Sheng Yang's avatar
Sheng Yang committed
641
642
643
644
	u32 ept;
	u32 vpid;
} vmx_capability;

Avi Kivity's avatar
Avi Kivity committed
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
#define VMX_SEGMENT_FIELD(seg)					\
	[VCPU_SREG_##seg] = {                                   \
		.selector = GUEST_##seg##_SELECTOR,		\
		.base = GUEST_##seg##_BASE,		   	\
		.limit = GUEST_##seg##_LIMIT,		   	\
		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\
	}

static struct kvm_vmx_segment_field {
	unsigned selector;
	unsigned base;
	unsigned limit;
	unsigned ar_bytes;
} kvm_vmx_segment_fields[] = {
	VMX_SEGMENT_FIELD(CS),
	VMX_SEGMENT_FIELD(DS),
	VMX_SEGMENT_FIELD(ES),
	VMX_SEGMENT_FIELD(FS),
	VMX_SEGMENT_FIELD(GS),
	VMX_SEGMENT_FIELD(SS),
	VMX_SEGMENT_FIELD(TR),
	VMX_SEGMENT_FIELD(LDTR),
};

669
670
static u64 host_efer;

Avi Kivity's avatar
Avi Kivity committed
671
672
static void ept_save_pdptrs(struct kvm_vcpu *vcpu);

673
/*
Brian Gerst's avatar
Brian Gerst committed
674
 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
675
676
 * away by decrementing the array size.
 */
Avi Kivity's avatar
Avi Kivity committed
677
static const u32 vmx_msr_index[] = {
678
#ifdef CONFIG_X86_64
679
	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
Avi Kivity's avatar
Avi Kivity committed
680
#endif
Brian Gerst's avatar
Brian Gerst committed
681
	MSR_EFER, MSR_TSC_AUX, MSR_STAR,
Avi Kivity's avatar
Avi Kivity committed
682
};
683
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
Avi Kivity's avatar
Avi Kivity committed
684

685
static inline bool is_page_fault(u32 intr_info)
Avi Kivity's avatar
Avi Kivity committed
686
687
688
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
			     INTR_INFO_VALID_MASK)) ==
689
		(INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
Avi Kivity's avatar
Avi Kivity committed
690
691
}

692
static inline bool is_no_device(u32 intr_info)
693
694
695
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
			     INTR_INFO_VALID_MASK)) ==
696
		(INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
697
698
}

699
static inline bool is_invalid_opcode(u32 intr_info)
700
701
702
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
			     INTR_INFO_VALID_MASK)) ==
703
		(INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
704
705
}

706
static inline bool is_external_interrupt(u32 intr_info)
Avi Kivity's avatar
Avi Kivity committed
707
708
709
710
711
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
		== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}

712
static inline bool is_machine_check(u32 intr_info)
713
714
715
716
717
718
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
			     INTR_INFO_VALID_MASK)) ==
		(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
}

719
static inline bool cpu_has_vmx_msr_bitmap(void)
720
{
721
	return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
722
723
}

724
static inline bool cpu_has_vmx_tpr_shadow(void)
725
{
726
	return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
727
728
}

729
static inline bool vm_need_tpr_shadow(struct kvm *kvm)
730
{
731
	return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
732
733
}

734
static inline bool cpu_has_secondary_exec_ctrls(void)
735
{
736
737
	return vmcs_config.cpu_based_exec_ctrl &
		CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
738
739
}

740
static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
741
{
742
743
744
745
746
747
748
749
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
}

static inline bool cpu_has_vmx_flexpriority(void)
{
	return cpu_has_vmx_tpr_shadow() &&
		cpu_has_vmx_virtualize_apic_accesses();
750
751
}

752
753
static inline bool cpu_has_vmx_ept_execute_only(void)
{
754
	return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
755
756
757
758
}

static inline bool cpu_has_vmx_eptp_uncacheable(void)
{
759
	return vmx_capability.ept & VMX_EPTP_UC_BIT;
760
761
762
763
}

static inline bool cpu_has_vmx_eptp_writeback(void)
{
764
	return vmx_capability.ept & VMX_EPTP_WB_BIT;
765
766
767
768
}

static inline bool cpu_has_vmx_ept_2m_page(void)
{
769
	return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
770
771
}

772
773
static inline bool cpu_has_vmx_ept_1g_page(void)
{
774
	return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
775
776
}

777
778
779
780
781
static inline bool cpu_has_vmx_ept_4levels(void)
{
	return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
}

782
static inline bool cpu_has_vmx_invept_individual_addr(void)
Sheng Yang's avatar
Sheng Yang committed
783
{
784
	return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
Sheng Yang's avatar
Sheng Yang committed
785
786
}

787
static inline bool cpu_has_vmx_invept_context(void)
Sheng Yang's avatar
Sheng Yang committed
788
{
789
	return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
Sheng Yang's avatar
Sheng Yang committed
790
791
}

792
static inline bool cpu_has_vmx_invept_global(void)
Sheng Yang's avatar
Sheng Yang committed
793
{
794
	return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
Sheng Yang's avatar
Sheng Yang committed
795
796
}

797
798
799
800
801
static inline bool cpu_has_vmx_invvpid_single(void)
{
	return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
}

802
803
804
805
806
static inline bool cpu_has_vmx_invvpid_global(void)
{
	return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
}

807
static inline bool cpu_has_vmx_ept(void)
Sheng Yang's avatar
Sheng Yang committed
808
{
809
810
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_ENABLE_EPT;
Sheng Yang's avatar
Sheng Yang committed
811
812
}

813
static inline bool cpu_has_vmx_unrestricted_guest(void)
814
815
816
817
818
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_UNRESTRICTED_GUEST;
}

819
static inline bool cpu_has_vmx_ple(void)
820
821
822
823
824
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_PAUSE_LOOP_EXITING;
}

825
static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
826
{
827
	return flexpriority_enabled && irqchip_in_kernel(kvm);
828
829
}

830
static inline bool cpu_has_vmx_vpid(void)
831
{
832
833
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_ENABLE_VPID;
834
835
}

836
static inline bool cpu_has_vmx_rdtscp(void)
837
838
839
840
841
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_RDTSCP;
}

842
static inline bool cpu_has_virtual_nmis(void)
843
844
845
846
{
	return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
}

847
848
849
850
851
852
static inline bool cpu_has_vmx_wbinvd_exit(void)
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_WBINVD_EXITING;
}

853
854
855
856
857
static inline bool report_flexpriority(void)
{
	return flexpriority_enabled;
}

858
859
860
861
862
863
864
865
866
867
868
869
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{
	return vmcs12->cpu_based_vm_exec_control & bit;
}

static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
{
	return (vmcs12->cpu_based_vm_exec_control &
			CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
		(vmcs12->secondary_vm_exec_control & bit);
}

870
871
872
873
874
875
876
877
878
879
880
881
882
static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
	struct kvm_vcpu *vcpu)
{
	return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
}

static inline bool is_exception(u32 intr_info)
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
		== (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
}

static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
883
884
885
886
static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
			struct vmcs12 *vmcs12,
			u32 reason, unsigned long qualification);

887
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
888
889
890
{
	int i;

891
	for (i = 0; i < vmx->nmsrs; ++i)
892
		if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
893
894
895
896
			return i;
	return -1;
}

897
898
899
900
901
902
903
904
static inline void __invvpid(int ext, u16 vpid, gva_t gva)
{
    struct {
	u64 vpid : 16;
	u64 rsvd : 48;
	u64 gva;
    } operand = { vpid, 0, gva };

905
    asm volatile (__ex(ASM_VMX_INVVPID)
906
907
908
909
910
		  /* CF==1 or ZF==1 --> rc = -1 */
		  "; ja 1f ; ud2 ; 1:"
		  : : "a"(&operand), "c"(ext) : "cc", "memory");
}

911
912
913
914
915
916
static inline void __invept(int ext, u64 eptp, gpa_t gpa)
{
	struct {
		u64 eptp, gpa;
	} operand = {eptp, gpa};

917
	asm volatile (__ex(ASM_VMX_INVEPT)
918
919
920
921
922
			/* CF==1 or ZF==1 --> rc = -1 */
			"; ja 1f ; ud2 ; 1:\n"
			: : "a" (&operand), "c" (ext) : "cc", "memory");
}

923
static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
924
925
926
{
	int i;

927
	i = __find_msr_index(vmx, msr);
928
	if (i >= 0)
929
		return &vmx->guest_msrs[i];
Al Viro's avatar
Al Viro committed
930
	return NULL;
931
932
}

Avi Kivity's avatar
Avi Kivity committed
933
934
935
936
937
static void vmcs_clear(struct vmcs *vmcs)
{
	u64 phys_addr = __pa(vmcs);
	u8 error;

938
	asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
939
		      : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
Avi Kivity's avatar
Avi Kivity committed
940
941
942
943
944
945
		      : "cc", "memory");
	if (error)
		printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
		       vmcs, phys_addr);
}

946
947
948
949
950
951
952
static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
{
	vmcs_clear(loaded_vmcs->vmcs);
	loaded_vmcs->cpu = -1;
	loaded_vmcs->launched = 0;
}

953
954
955
956
957
958
static void vmcs_load(struct vmcs *vmcs)
{
	u64 phys_addr = __pa(vmcs);
	u8 error;

	asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
959
			: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
960
961
			: "cc", "memory");
	if (error)
962
		printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
963
964
965
		       vmcs, phys_addr);
}

966
static void __loaded_vmcs_clear(void *arg)
Avi Kivity's avatar
Avi Kivity committed
967
{
968
	struct loaded_vmcs *loaded_vmcs = arg;
969
	int cpu = raw_smp_processor_id();
Avi Kivity's avatar
Avi Kivity committed
970

971
972
973
	if (loaded_vmcs->cpu != cpu)
		return; /* vcpu migration can race with cpu offline */
	if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
Avi Kivity's avatar
Avi Kivity committed
974
		per_cpu(current_vmcs, cpu) = NULL;
975
976
	list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
	loaded_vmcs_init(loaded_vmcs);
Avi Kivity's avatar
Avi Kivity committed
977
978
}

979
static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
980
{
981
982
983
	if (loaded_vmcs->cpu != -1)
		smp_call_function_single(
			loaded_vmcs->cpu, __loaded_vmcs_clear, loaded_vmcs, 1);
984
985
}

986
static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
987
988
989
990
{
	if (vmx->vpid == 0)
		return;

991
992
	if (cpu_has_vmx_invvpid_single())
		__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
993
994
}

995
996
997
998
999
1000
static inline void vpid_sync_vcpu_global(void)
{
	if (cpu_has_vmx_invvpid_global())
		__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
}