mmu.c 57.6 KB
Newer Older
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
/*
 * Xen mmu operations
 *
 * This file contains the various mmu fetch and update operations.
 * The most important job they must perform is the mapping between the
 * domain's pfn and the overall machine mfns.
 *
 * Xen allows guests to directly update the pagetable, in a controlled
 * fashion.  In other words, the guest modifies the same pagetable
 * that the CPU actually uses, which eliminates the overhead of having
 * a separate shadow pagetable.
 *
 * In order to allow this, it falls on the guest domain to map its
 * notion of a "physical" pfn - which is just a domain-local linear
 * address - into a real "machine address" which the CPU's MMU can
 * use.
 *
 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
 * inserted directly into the pagetable.  When creating a new
 * pte/pmd/pgd, it converts the passed pfn into an mfn.  Conversely,
 * when reading the content back with __(pgd|pmd|pte)_val, it converts
 * the mfn back into a pfn.
 *
 * The other constraint is that all pages which make up a pagetable
 * must be mapped read-only in the guest.  This prevents uncontrolled
 * guest updates to the pagetable.  Xen strictly enforces this, and
 * will disallow any pagetable update which will end up mapping a
 * pagetable page RW, and will disallow using any writable page as a
 * pagetable.
 *
 * Naively, when loading %cr3 with the base of a new pagetable, Xen
 * would need to validate the whole pagetable before going on.
 * Naturally, this is quite slow.  The solution is to "pin" a
 * pagetable, which enforces all the constraints on the pagetable even
 * when it is not actively in use.  This menas that Xen can be assured
 * that it is still valid when you do load it into %cr3, and doesn't
 * need to revalidate it.
 *
 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
 */
41
#include <linux/sched.h>
42
#include <linux/highmem.h>
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
43
#include <linux/debugfs.h>
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
44
#include <linux/bug.h>
45
#include <linux/vmalloc.h>
46
#include <linux/module.h>
47
#include <linux/gfp.h>
48
#include <linux/memblock.h>
49
#include <linux/seq_file.h>
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
50

51
52
#include <trace/events/xen.h>

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
53
54
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
55
#include <asm/fixmap.h>
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
56
#include <asm/mmu_context.h>
57
#include <asm/setup.h>
58
#include <asm/paravirt.h>
59
#include <asm/e820.h>
60
#include <asm/linkage.h>
61
#include <asm/page.h>
62
#include <asm/init.h>
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
63
#include <asm/pat.h>
Andrew Jones's avatar
Andrew Jones committed
64
#include <asm/smp.h>
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
65
66

#include <asm/xen/hypercall.h>
67
#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
68

69
#include <xen/xen.h>
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
70
71
#include <xen/page.h>
#include <xen/interface/xen.h>
72
#include <xen/interface/hvm/hvm_op.h>
73
#include <xen/interface/version.h>
74
#include <xen/interface/memory.h>
75
#include <xen/hvc-console.h>
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
76

77
#include "multicalls.h"
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
78
#include "mmu.h"
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
79
80
#include "debugfs.h"

Alex Nixon's avatar
Alex Nixon committed
81
82
/*
 * Protects atomic reservation decrease/increase against concurrent increases.
83
 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon's avatar
Alex Nixon committed
84
85
86
 */
DEFINE_SPINLOCK(xen_reservation_lock);

87
88
89
90
91
/*
 * Identity map, in addition to plain kernel map.  This needs to be
 * large enough to allocate page table pages to allocate the rest.
 * Each page can map 2MB.
 */
92
93
#define LEVEL1_IDENT_ENTRIES	(PTRS_PER_PTE * 4)
static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117

#ifdef CONFIG_X86_64
/* l3 pud for userspace vsyscall mapping */
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
#endif /* CONFIG_X86_64 */

/*
 * Note about cr3 (pagetable base) values:
 *
 * xen_cr3 contains the current logical cr3 value; it contains the
 * last set cr3.  This may not be the current effective cr3, because
 * its update may be being lazily deferred.  However, a vcpu looking
 * at its own cr3 can use this value knowing that it everything will
 * be self-consistent.
 *
 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
 * hypercall to set the vcpu cr3 is complete (so it may be a little
 * out of date, but it will never be set early).  If one vcpu is
 * looking at another vcpu's cr3 value, it should use this variable.
 */
DEFINE_PER_CPU(unsigned long, xen_cr3);	 /* cr3 stored as physaddr */
DEFINE_PER_CPU(unsigned long, xen_current_cr3);	 /* actual vcpu cr3 */


118
119
120
121
122
123
/*
 * Just beyond the highest usermode address.  STACK_TOP_MAX has a
 * redzone above it, so round it up to a PGD boundary.
 */
#define USER_LIMIT	((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)

124
125
126
127
128
129
130
unsigned long arbitrary_virt_to_mfn(void *vaddr)
{
	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);

	return PFN_DOWN(maddr.maddr);
}

131
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
132
{
133
	unsigned long address = (unsigned long)vaddr;
134
	unsigned int level;
135
136
	pte_t *pte;
	unsigned offset;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
137

138
139
140
141
142
143
144
145
	/*
	 * if the PFN is in the linear mapped vaddr range, we can just use
	 * the (quick) virt_to_machine() p2m lookup
	 */
	if (virt_addr_valid(vaddr))
		return virt_to_machine(vaddr);

	/* otherwise we have to do a (slower) full page-table walk */
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
146

147
148
149
	pte = lookup_address(address, &level);
	BUG_ON(pte == NULL);
	offset = address & ~PAGE_MASK;
150
	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
151
}
152
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
153
154
155
156
157

void make_lowmem_page_readonly(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;
158
	unsigned int level;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
159

160
	pte = lookup_address(address, &level);
161
162
	if (pte == NULL)
		return;		/* vaddr missing */
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
163
164
165
166
167
168
169
170
171
172
173

	ptev = pte_wrprotect(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}

void make_lowmem_page_readwrite(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;
174
	unsigned int level;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
175

176
	pte = lookup_address(address, &level);
177
178
	if (pte == NULL)
		return;		/* vaddr missing */
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
179
180
181
182
183
184
185
186

	ptev = pte_mkwrite(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}


187
static bool xen_page_pinned(void *ptr)
188
189
190
191
192
193
{
	struct page *page = virt_to_page(ptr);

	return PagePinned(page);
}

194
void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
195
196
197
198
{
	struct multicall_space mcs;
	struct mmu_update *u;

199
200
	trace_xen_mmu_set_domain_pte(ptep, pteval, domid);

201
202
203
204
	mcs = xen_mc_entry(sizeof(*u));
	u = mcs.args;

	/* ptep might be kmapped when using 32-bit HIGHPTE */
205
	u->ptr = virt_to_machine(ptep).maddr;
206
207
	u->val = pte_val_ma(pteval);

208
	MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
209
210
211

	xen_mc_issue(PARAVIRT_LAZY_MMU);
}
212
213
EXPORT_SYMBOL_GPL(xen_set_domain_pte);

214
static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
215
{
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
216
217
	struct multicall_space mcs;
	struct mmu_update *u;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
218

219
220
	mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
221
	if (mcs.mc != NULL) {
222
		mcs.mc->args[1]++;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
223
	} else {
224
225
226
		mcs = __xen_mc_entry(sizeof(*u));
		MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
	}
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
227
228

	u = mcs.args;
229
230
231
	*u = *update;
}

232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
static void xen_extend_mmuext_op(const struct mmuext_op *op)
{
	struct multicall_space mcs;
	struct mmuext_op *u;

	mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));

	if (mcs.mc != NULL) {
		mcs.mc->args[1]++;
	} else {
		mcs = __xen_mc_entry(sizeof(*u));
		MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
	}

	u = mcs.args;
	*u = *op;
}

250
static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
251
252
253
254
255
256
257
{
	struct mmu_update u;

	preempt_disable();

	xen_mc_batch();

258
259
	/* ptr may be ioremapped for 64-bit pagetable setup */
	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
260
	u.val = pmd_val_ma(val);
261
	xen_extend_mmu_update(&u);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
262
263
264
265

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	preempt_enable();
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
266
267
}

268
static void xen_set_pmd(pmd_t *ptr, pmd_t val)
269
{
270
271
	trace_xen_mmu_set_pmd(ptr, val);

272
273
	/* If page is not pinned, we can just update the entry
	   directly */
274
	if (!xen_page_pinned(ptr)) {
275
276
277
278
279
280
281
		*ptr = val;
		return;
	}

	xen_set_pmd_hyper(ptr, val);
}

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
282
283
284
285
286
287
/*
 * Associate a virtual page frame with a given physical page frame
 * and protection flags for that frame.
 */
void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
{
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
288
	set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
289
290
}

291
static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
292
{
293
	struct mmu_update u;
294

295
296
	if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
		return false;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
297

298
	xen_mc_batch();
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
299

300
301
302
	u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
	u.val = pte_val_ma(pteval);
	xen_extend_mmu_update(&u);
303

304
	xen_mc_issue(PARAVIRT_LAZY_MMU);
305

306
307
308
	return true;
}

309
static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
310
311
{
	if (!xen_batched_set_pte(ptep, pteval))
312
		native_set_pte(ptep, pteval);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
313
314
}

315
316
317
318
319
320
static void xen_set_pte(pte_t *ptep, pte_t pteval)
{
	trace_xen_mmu_set_pte(ptep, pteval);
	__xen_set_pte(ptep, pteval);
}

321
static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
322
323
		    pte_t *ptep, pte_t pteval)
{
324
325
	trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
	__xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
326
327
}

Tej's avatar
Tej committed
328
329
pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
				 unsigned long addr, pte_t *ptep)
330
{
331
	/* Just return the pte as-is.  We preserve the bits on commit */
332
	trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
333
334
335
336
337
338
	return *ptep;
}

void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
				 pte_t *ptep, pte_t pte)
{
339
	struct mmu_update u;
340

341
	trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
342
	xen_mc_batch();
343

344
	u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
345
	u.val = pte_val_ma(pte);
346
	xen_extend_mmu_update(&u);
347

348
	xen_mc_issue(PARAVIRT_LAZY_MMU);
349
350
}

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
351
352
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
353
{
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
354
	if (val & _PAGE_PRESENT) {
355
		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
356
		pteval_t flags = val & PTE_FLAGS_MASK;
357
		val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
358
	}
359

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
360
	return val;
361
362
}

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
363
static pteval_t pte_pfn_to_mfn(pteval_t val)
364
{
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
365
	if (val & _PAGE_PRESENT) {
366
		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
367
		pteval_t flags = val & PTE_FLAGS_MASK;
368
		unsigned long mfn;
369

370
371
372
373
		if (!xen_feature(XENFEAT_auto_translated_physmap))
			mfn = get_phys_to_machine(pfn);
		else
			mfn = pfn;
374
375
376
377
378
379
380
381
382
		/*
		 * If there's no mfn for the pfn, then just create an
		 * empty non-present pte.  Unfortunately this loses
		 * information about the original pfn, so
		 * pte_mfn_to_pfn is asymmetric.
		 */
		if (unlikely(mfn == INVALID_P2M_ENTRY)) {
			mfn = 0;
			flags = 0;
383
384
385
386
387
388
389
390
391
392
393
		} else {
			/*
			 * Paramount to do this test _after_ the
			 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
			 * IDENTITY_FRAME_BIT resolves to true.
			 */
			mfn &= ~FOREIGN_FRAME_BIT;
			if (mfn & IDENTITY_FRAME_BIT) {
				mfn &= ~IDENTITY_FRAME_BIT;
				flags |= _PAGE_IOMAP;
			}
394
395
		}
		val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
396
397
	}

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
398
	return val;
399
400
}

401
402
403
404
405
406
407
408
409
410
411
412
413
414
static pteval_t iomap_pte(pteval_t val)
{
	if (val & _PAGE_PRESENT) {
		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
		pteval_t flags = val & PTE_FLAGS_MASK;

		/* We assume the pte frame number is a MFN, so
		   just use it as-is. */
		val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
	}

	return val;
}

415
static pteval_t xen_pte_val(pte_t pte)
416
{
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
417
	pteval_t pteval = pte.pte;
418
#if 0
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
419
420
421
422
423
	/* If this is a WC pte, convert back from Xen WC to Linux WC */
	if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
		WARN_ON(!pat_enabled);
		pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
	}
424
#endif
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
425
426
427
428
	if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
		return pteval;

	return pte_mfn_to_pfn(pteval);
429
}
430
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
431

432
static pgdval_t xen_pgd_val(pgd_t pgd)
433
{
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
434
	return pte_mfn_to_pfn(pgd.pgd);
435
}
436
PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
437

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
/*
 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
 * are reserved for now, to correspond to the Intel-reserved PAT
 * types.
 *
 * We expect Linux's PAT set as follows:
 *
 * Idx  PTE flags        Linux    Xen    Default
 * 0                     WB       WB     WB
 * 1            PWT      WC       WT     WT
 * 2        PCD          UC-      UC-    UC-
 * 3        PCD PWT      UC       UC     UC
 * 4    PAT              WB       WC     WB
 * 5    PAT     PWT      WC       WP     WT
 * 6    PAT PCD          UC-      UC     UC-
 * 7    PAT PCD PWT      UC       UC     UC
 */

void xen_set_pat(u64 pat)
{
	/* We expect Linux to use a PAT setting of
	 * UC UC- WC WB (ignoring the PAT flag) */
	WARN_ON(pat != 0x0007010600070106ull);
}

463
static pte_t xen_make_pte(pteval_t pte)
464
{
465
	phys_addr_t addr = (pte & PTE_PFN_MASK);
466
#if 0
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
467
468
469
470
471
472
473
474
475
476
477
478
	/* If Linux is trying to set a WC pte, then map to the Xen WC.
	 * If _PAGE_PAT is set, then it probably means it is really
	 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
	 * things work out OK...
	 *
	 * (We should never see kernel mappings with _PAGE_PSE set,
	 * but we could see hugetlbfs mappings, I think.).
	 */
	if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
		if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
			pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
	}
479
#endif
480
481
482
483
484
485
486
487
	/*
	 * Unprivileged domains are allowed to do IOMAPpings for
	 * PCI passthrough, but not map ISA space.  The ISA
	 * mappings are just dummy local mappings to keep other
	 * parts of the kernel happy.
	 */
	if (unlikely(pte & _PAGE_IOMAP) &&
	    (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
488
		pte = iomap_pte(pte);
489
490
	} else {
		pte &= ~_PAGE_IOMAP;
491
		pte = pte_pfn_to_mfn(pte);
492
	}
493

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
494
	return native_make_pte(pte);
495
}
496
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
497

498
static pgd_t xen_make_pgd(pgdval_t pgd)
499
{
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
500
501
	pgd = pte_pfn_to_mfn(pgd);
	return native_make_pgd(pgd);
502
}
503
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
504

505
static pmdval_t xen_pmd_val(pmd_t pmd)
506
{
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
507
	return pte_mfn_to_pfn(pmd.pmd);
508
}
509
PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
510

511
static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
512
{
513
	struct mmu_update u;
514

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
515
516
	preempt_disable();

517
518
	xen_mc_batch();

519
520
	/* ptr may be ioremapped for 64-bit pagetable setup */
	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
521
	u.val = pud_val_ma(val);
522
	xen_extend_mmu_update(&u);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
523
524
525
526

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	preempt_enable();
527
528
}

529
static void xen_set_pud(pud_t *ptr, pud_t val)
530
{
531
532
	trace_xen_mmu_set_pud(ptr, val);

533
534
	/* If page is not pinned, we can just update the entry
	   directly */
535
	if (!xen_page_pinned(ptr)) {
536
537
538
539
540
541
542
		*ptr = val;
		return;
	}

	xen_set_pud_hyper(ptr, val);
}

543
#ifdef CONFIG_X86_PAE
544
static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
545
{
546
	trace_xen_mmu_set_pte_atomic(ptep, pte);
547
	set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
548
549
}

550
static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
551
{
552
	trace_xen_mmu_pte_clear(mm, addr, ptep);
553
554
	if (!xen_batched_set_pte(ptep, native_make_pte(0)))
		native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
555
556
}

557
static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
558
{
559
	trace_xen_mmu_pmd_clear(pmdp);
560
	set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
561
}
562
#endif	/* CONFIG_X86_PAE */
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
563

564
static pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
565
{
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
566
	pmd = pte_pfn_to_mfn(pmd);
567
	return native_make_pmd(pmd);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
568
}
569
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
570

571
#if PAGETABLE_LEVELS == 4
572
static pudval_t xen_pud_val(pud_t pud)
573
574
575
{
	return pte_mfn_to_pfn(pud.pud);
}
576
PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
577

578
static pud_t xen_make_pud(pudval_t pud)
579
580
581
582
583
{
	pud = pte_pfn_to_mfn(pud);

	return native_make_pud(pud);
}
584
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
585

586
static pgd_t *xen_get_user_pgd(pgd_t *pgd)
587
{
588
589
590
	pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
	unsigned offset = pgd - pgd_page;
	pgd_t *user_ptr = NULL;
591

592
593
594
595
596
597
	if (offset < pgd_index(USER_LIMIT)) {
		struct page *page = virt_to_page(pgd_page);
		user_ptr = (pgd_t *)page->private;
		if (user_ptr)
			user_ptr += offset;
	}
598

599
600
601
602
603
604
	return user_ptr;
}

static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
{
	struct mmu_update u;
605
606
607

	u.ptr = virt_to_machine(ptr).maddr;
	u.val = pgd_val_ma(val);
608
	xen_extend_mmu_update(&u);
609
610
611
612
613
614
615
616
617
}

/*
 * Raw hypercall-based set_pgd, intended for in early boot before
 * there's a page structure.  This implies:
 *  1. The only existing pagetable is the kernel's
 *  2. It is always pinned
 *  3. It has no user pagetable attached to it
 */
618
static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
619
620
621
622
623
624
{
	preempt_disable();

	xen_mc_batch();

	__xen_set_pgd_hyper(ptr, val);
625
626
627
628
629
630

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	preempt_enable();
}

631
static void xen_set_pgd(pgd_t *ptr, pgd_t val)
632
{
633
634
	pgd_t *user_ptr = xen_get_user_pgd(ptr);

635
636
	trace_xen_mmu_set_pgd(ptr, user_ptr, val);

637
638
	/* If page is not pinned, we can just update the entry
	   directly */
639
	if (!xen_page_pinned(ptr)) {
640
		*ptr = val;
641
		if (user_ptr) {
642
			WARN_ON(xen_page_pinned(user_ptr));
643
644
			*user_ptr = val;
		}
645
646
647
		return;
	}

648
649
650
651
652
653
654
655
656
	/* If it's pinned, then we can at least batch the kernel and
	   user updates together. */
	xen_mc_batch();

	__xen_set_pgd_hyper(ptr, val);
	if (user_ptr)
		__xen_set_pgd_hyper(user_ptr, val);

	xen_mc_issue(PARAVIRT_LAZY_MMU);
657
658
659
}
#endif	/* PAGETABLE_LEVELS == 4 */

660
/*
661
662
663
664
665
666
667
668
669
670
671
672
673
674
 * (Yet another) pagetable walker.  This one is intended for pinning a
 * pagetable.  This means that it walks a pagetable and calls the
 * callback function on each page it finds making up the page table,
 * at every level.  It walks the entire pagetable, but it only bothers
 * pinning pte pages which are below limit.  In the normal case this
 * will be STACK_TOP_MAX, but at boot we need to pin up to
 * FIXADDR_TOP.
 *
 * For 32-bit the important bit is that we don't pin beyond there,
 * because then we start getting into Xen's ptes.
 *
 * For 64-bit, we must skip the Xen hole in the middle of the address
 * space, just after the big x86-64 virtual hole.
 */
Ian Campbell's avatar
Ian Campbell committed
675
676
677
678
static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
			  int (*func)(struct mm_struct *mm, struct page *,
				      enum pt_level),
			  unsigned long limit)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
679
{
680
	int flush = 0;
681
682
683
	unsigned hole_low, hole_high;
	unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
	unsigned pgdidx, pudidx, pmdidx;
684

685
686
687
	/* The limit is the last byte to be touched */
	limit--;
	BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
688
689

	if (xen_feature(XENFEAT_auto_translated_physmap))
690
691
		return 0;

692
693
694
695
696
	/*
	 * 64-bit has a great big hole in the middle of the address
	 * space, which contains the Xen mappings.  On 32-bit these
	 * will end up making a zero-sized hole and so is a no-op.
	 */
697
	hole_low = pgd_index(USER_LIMIT);
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
	hole_high = pgd_index(PAGE_OFFSET);

	pgdidx_limit = pgd_index(limit);
#if PTRS_PER_PUD > 1
	pudidx_limit = pud_index(limit);
#else
	pudidx_limit = 0;
#endif
#if PTRS_PER_PMD > 1
	pmdidx_limit = pmd_index(limit);
#else
	pmdidx_limit = 0;
#endif

	for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
713
		pud_t *pud;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
714

715
716
		if (pgdidx >= hole_low && pgdidx < hole_high)
			continue;
717

718
		if (!pgd_val(pgd[pgdidx]))
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
719
			continue;
720

721
		pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
722
723

		if (PTRS_PER_PUD > 1) /* not folded */
724
			flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
725

726
		for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
727
728
			pmd_t *pmd;

729
730
731
			if (pgdidx == pgdidx_limit &&
			    pudidx > pudidx_limit)
				goto out;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
732

733
			if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
734
				continue;
735

736
			pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
737
738

			if (PTRS_PER_PMD > 1) /* not folded */
739
				flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
740

741
742
743
744
745
746
747
			for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
				struct page *pte;

				if (pgdidx == pgdidx_limit &&
				    pudidx == pudidx_limit &&
				    pmdidx > pmdidx_limit)
					goto out;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
748

749
				if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
750
751
					continue;

752
				pte = pmd_page(pmd[pmdidx]);
753
				flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
754
755
756
			}
		}
	}
757

758
out:
759
760
	/* Do the top level last, so that the callbacks can use it as
	   a cue to do final things like tlb flushes. */
761
	flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
762
763

	return flush;
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
764
765
}

Ian Campbell's avatar
Ian Campbell committed
766
767
768
769
770
771
772
773
static int xen_pgd_walk(struct mm_struct *mm,
			int (*func)(struct mm_struct *mm, struct page *,
				    enum pt_level),
			unsigned long limit)
{
	return __xen_pgd_walk(mm, mm->pgd, func, limit);
}

774
775
/* If we're using split pte locks, then take the page's lock and
   return a pointer to it.  Otherwise return NULL. */
776
static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
777
778
779
{
	spinlock_t *ptl = NULL;

780
#if USE_SPLIT_PTLOCKS
781
	ptl = __pte_lockptr(page);
782
	spin_lock_nest_lock(ptl, &mm->page_table_lock);
783
784
785
786
787
#endif

	return ptl;
}

788
static void xen_pte_unlock(void *v)
789
790
791
792
793
794
795
{
	spinlock_t *ptl = v;
	spin_unlock(ptl);
}

static void xen_do_pin(unsigned level, unsigned long pfn)
{
796
	struct mmuext_op op;
797

798
799
800
801
	op.cmd = level;
	op.arg1.mfn = pfn_to_mfn(pfn);

	xen_extend_mmuext_op(&op);
802
803
}

804
805
static int xen_pin_page(struct mm_struct *mm, struct page *page,
			enum pt_level level)
806
{
807
	unsigned pgfl = TestSetPagePinned(page);
808
809
810
811
812
813
814
815
816
817
818
819
	int flush;

	if (pgfl)
		flush = 0;		/* already pinned */
	else if (PageHighMem(page))
		/* kmaps need flushing if we found an unpinned
		   highpage */
		flush = 1;
	else {
		void *pt = lowmem_page_address(page);
		unsigned long pfn = page_to_pfn(page);
		struct multicall_space mcs = __xen_mc_entry(0);
820
		spinlock_t *ptl;
821
822
823

		flush = 0;

824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
		/*
		 * We need to hold the pagetable lock between the time
		 * we make the pagetable RO and when we actually pin
		 * it.  If we don't, then other users may come in and
		 * attempt to update the pagetable by writing it,
		 * which will fail because the memory is RO but not
		 * pinned, so Xen won't do the trap'n'emulate.
		 *
		 * If we're using split pte locks, we can't hold the
		 * entire pagetable's worth of locks during the
		 * traverse, because we may wrap the preempt count (8
		 * bits).  The solution is to mark RO and pin each PTE
		 * page while holding the lock.  This means the number
		 * of locks we end up holding is never more than a
		 * batch size (~32 entries, at present).
		 *
		 * If we're not using split pte locks, we needn't pin
		 * the PTE pages independently, because we're
		 * protected by the overall pagetable lock.
		 */
844
845
		ptl = NULL;
		if (level == PT_PTE)
846
			ptl = xen_pte_lock(page, mm);
847

848
849
		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
					pfn_pte(pfn, PAGE_KERNEL_RO),
850
851
					level == PT_PGD ? UVMF_TLB_FLUSH : 0);

852
		if (ptl) {
853
854
855
856
			xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);

			/* Queue a deferred unlock for when this batch
			   is completed. */
857
			xen_mc_callback(xen_pte_unlock, ptl);
858
		}
859
860
861
862
	}

	return flush;
}
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
863

864
865
866
/* This is called just after a mm has been created, but it has not
   been used yet.  We need to make sure that its pagetable is all
   read-only, and can be pinned. */
867
static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
868
{
869
870
	trace_xen_mmu_pgd_pin(mm, pgd);

871
	xen_mc_batch();
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
872

Ian Campbell's avatar
Ian Campbell committed
873
	if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
874
		/* re-enable interrupts for flushing */
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
875
		xen_mc_issue(0);
876

877
		kmap_flush_unused();
878

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
879
880
		xen_mc_batch();
	}
881

882
883
884
885
886
887
888
#ifdef CONFIG_X86_64
	{
		pgd_t *user_pgd = xen_get_user_pgd(pgd);

		xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));

		if (user_pgd) {
889
			xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tej's avatar
Tej committed
890
891
			xen_do_pin(MMUEXT_PIN_L4_TABLE,
				   PFN_DOWN(__pa(user_pgd)));
892
893
894
		}
	}
#else /* CONFIG_X86_32 */
895
896
#ifdef CONFIG_X86_PAE
	/* Need to make sure unshared kernel PMD is pinnable */
897
	xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
898
		     PT_PMD);
899
#endif
900
	xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
901
#endif /* CONFIG_X86_64 */
902
	xen_mc_issue(0);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
903
904
}

905
906
907
908
909
static void xen_pgd_pin(struct mm_struct *mm)
{
	__xen_pgd_pin(mm, mm->pgd);
}

910
911
912
913
914
/*
 * On save, we need to pin all pagetables to make sure they get their
 * mfns turned into pfns.  Search the list for any unpinned pgds and pin
 * them (unpinned pgds are not currently in use, probably because the
 * process is under construction or destruction).
915
916
917
918
 *
 * Expected to be called in stop_machine() ("equivalent to taking
 * every spinlock in the system"), so the locking doesn't really
 * matter all that much.
919
920
921
922
 */
void xen_mm_pin_all(void)
{
	struct page *page;
923

924
	spin_lock(&pgd_lock);
925

926
927
	list_for_each_entry(page, &pgd_list, lru) {
		if (!PagePinned(page)) {
928
			__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
929
930
931
932
			SetPageSavePinned(page);
		}
	}

933
	spin_unlock(&pgd_lock);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
934
935
}

936
937
938
939
940
/*
 * The init_mm pagetable is really pinned as soon as its created, but
 * that's before we have page structures to store the bits.  So do all
 * the book-keeping now.
 */
941
static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
942
				  enum pt_level level)
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
943
{
944
945
946
	SetPagePinned(page);
	return 0;
}
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
947

948
static void __init xen_mark_init_mm_pinned(void)
949
{
950
	xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
951
}
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
952

953
954
static int xen_unpin_page(struct mm_struct *mm, struct page *page,
			  enum pt_level level)
955
{
956
	unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
957

958
959
960
	if (pgfl && !PageHighMem(page)) {
		void *pt = lowmem_page_address(page);
		unsigned long pfn = page_to_pfn(page);
961
962
963
		spinlock_t *ptl = NULL;
		struct multicall_space mcs;

964
965
966
967
968
969
970
		/*
		 * Do the converse to pin_page.  If we're using split
		 * pte locks, we must be holding the lock for while
		 * the pte page is unpinned but still RO to prevent
		 * concurrent updates from seeing it in this
		 * partially-pinned state.
		 */
971
		if (level == PT_PTE) {
972
			ptl = xen_pte_lock(page, mm);
973

974
975
			if (ptl)
				xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
976
977
978
		}

		mcs = __xen_mc_entry(0);
979
980
981

		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
					pfn_pte(pfn, PAGE_KERNEL),
982
983
984
985
					level == PT_PGD ? UVMF_TLB_FLUSH : 0);

		if (ptl) {
			/* unlock when batch completed */
986
			xen_mc_callback(xen_pte_unlock, ptl);
987
		}
988
989
990
	}

	return 0;		/* never need to flush on unpin */
Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
991
992
}

993
/* Release a pagetables pages back as normal RW */
994
static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
995
{
996
997
	trace_xen_mmu_pgd_unpin(mm, pgd);

998
999
	xen_mc_batch();

1000
	xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));