mmu-hash.h 27.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3
#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
4
5
6
7
8
9
10
11
/*
 * PowerPC64 memory management structures
 *
 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
 *   PPC64 rework.
 */

#include <asm/page.h>
12
#include <asm/bug.h>
13
#include <asm/asm-const.h>
14

15
16
17
18
19
/*
 * This is necessary to get the definition of PGTABLE_RANGE which we
 * need for various slices related matters. Note that this isn't the
 * complete pgtable.h but only a portion of it.
 */
20
#include <asm/book3s/64/pgtable.h>
21
#include <asm/bug.h>
22
#include <asm/task_size_64.h>
23
#include <asm/cpu_has_feature.h>
24

25
26
27
28
/*
 * SLB
 */

29
#define SLB_NUM_BOLTED		2
30
#define SLB_CACHE_ENTRIES	8
31
#define SLB_MIN_SIZE		32
32
33
34
35
36
37

/* Bits in the SLB ESID word */
#define SLB_ESID_V		ASM_CONST(0x0000000008000000) /* valid */

/* Bits in the SLB VSID word */
#define SLB_VSID_SHIFT		12
38
#define SLB_VSID_SHIFT_256M	SLB_VSID_SHIFT
Paul Mackerras's avatar
Paul Mackerras committed
39
40
#define SLB_VSID_SHIFT_1T	24
#define SLB_VSID_SSIZE_SHIFT	62
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#define SLB_VSID_B		ASM_CONST(0xc000000000000000)
#define SLB_VSID_B_256M		ASM_CONST(0x0000000000000000)
#define SLB_VSID_B_1T		ASM_CONST(0x4000000000000000)
#define SLB_VSID_KS		ASM_CONST(0x0000000000000800)
#define SLB_VSID_KP		ASM_CONST(0x0000000000000400)
#define SLB_VSID_N		ASM_CONST(0x0000000000000200) /* no-execute */
#define SLB_VSID_L		ASM_CONST(0x0000000000000100)
#define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
#define SLB_VSID_LP		ASM_CONST(0x0000000000000030)
#define SLB_VSID_LP_00		ASM_CONST(0x0000000000000000)
#define SLB_VSID_LP_01		ASM_CONST(0x0000000000000010)
#define SLB_VSID_LP_10		ASM_CONST(0x0000000000000020)
#define SLB_VSID_LP_11		ASM_CONST(0x0000000000000030)
#define SLB_VSID_LLP		(SLB_VSID_L|SLB_VSID_LP)

#define SLB_VSID_KERNEL		(SLB_VSID_KP)
#define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)

#define SLBIE_C			(0x08000000)
Paul Mackerras's avatar
Paul Mackerras committed
60
#define SLBIE_SSIZE_SHIFT	25
61
62
63
64
65
66
67

/*
 * Hash table
 */

#define HPTES_PER_GROUP 8

68
#define HPTE_V_SSIZE_SHIFT	62
69
#define HPTE_V_AVPN_SHIFT	7
70
#define HPTE_V_COMMON_BITS	ASM_CONST(0x000fffffffffffff)
71
#define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
72
#define HPTE_V_AVPN_3_0		ASM_CONST(0x000fffffffffff80)
73
#define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
74
#define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
75
76
77
78
79
80
#define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
#define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
#define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
#define HPTE_V_SECONDARY	ASM_CONST(0x0000000000000002)
#define HPTE_V_VALID		ASM_CONST(0x0000000000000001)

81
/*
82
 * ISA 3.0 has a different HPTE format.
83
84
 */
#define HPTE_R_3_0_SSIZE_SHIFT	58
85
#define HPTE_R_3_0_SSIZE_MASK	(3ull << HPTE_R_3_0_SSIZE_SHIFT)
86
87
#define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
#define HPTE_R_TS		ASM_CONST(0x4000000000000000)
88
#define HPTE_R_KEY_HI		ASM_CONST(0x3000000000000000)
89
90
#define HPTE_R_KEY_BIT0		ASM_CONST(0x2000000000000000)
#define HPTE_R_KEY_BIT1		ASM_CONST(0x1000000000000000)
91
#define HPTE_R_RPN_SHIFT	12
92
#define HPTE_R_RPN		ASM_CONST(0x0ffffffffffff000)
93
#define HPTE_R_RPN_3_0		ASM_CONST(0x01fffffffffff000)
94
#define HPTE_R_PP		ASM_CONST(0x0000000000000003)
95
#define HPTE_R_PPP		ASM_CONST(0x8000000000000003)
96
#define HPTE_R_N		ASM_CONST(0x0000000000000004)
97
98
99
100
101
#define HPTE_R_G		ASM_CONST(0x0000000000000008)
#define HPTE_R_M		ASM_CONST(0x0000000000000010)
#define HPTE_R_I		ASM_CONST(0x0000000000000020)
#define HPTE_R_W		ASM_CONST(0x0000000000000040)
#define HPTE_R_WIMG		ASM_CONST(0x0000000000000078)
102
103
#define HPTE_R_C		ASM_CONST(0x0000000000000080)
#define HPTE_R_R		ASM_CONST(0x0000000000000100)
104
#define HPTE_R_KEY_LO		ASM_CONST(0x0000000000000e00)
105
106
107
#define HPTE_R_KEY_BIT2		ASM_CONST(0x0000000000000800)
#define HPTE_R_KEY_BIT3		ASM_CONST(0x0000000000000400)
#define HPTE_R_KEY_BIT4		ASM_CONST(0x0000000000000200)
108
#define HPTE_R_KEY		(HPTE_R_KEY_LO | HPTE_R_KEY_HI)
109

110
111
112
#define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
#define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)

113
114
115
116
117
/* Values for PP (assumes Ks=0, Kp=1) */
#define PP_RWXX	0	/* Supervisor read/write, User none */
#define PP_RWRX 1	/* Supervisor read/write, User read */
#define PP_RWRW 2	/* Supervisor read/write, User read/write */
#define PP_RXRX 3	/* Supervisor read,       User read */
118
#define PP_RXXX	(HPTE_R_PP0 | 2)	/* Supervisor read, user none */
119

120
121
122
123
124
125
126
127
128
/* Fields for tlbiel instruction in architecture 2.06 */
#define TLBIEL_INVAL_SEL_MASK	0xc00	/* invalidation selector */
#define  TLBIEL_INVAL_PAGE	0x000	/* invalidate a single page */
#define  TLBIEL_INVAL_SET_LPID	0x800	/* invalidate a set for current LPID */
#define  TLBIEL_INVAL_SET	0xc00	/* invalidate a set for all LPIDs */
#define TLBIEL_INVAL_SET_MASK	0xfff000	/* set number to inval. */
#define TLBIEL_INVAL_SET_SHIFT	12

#define POWER7_TLB_SETS		128	/* # sets in POWER7 TLB */
129
#define POWER8_TLB_SETS		512	/* # sets in POWER8 TLB */
130
#define POWER9_TLB_SETS_HASH	256	/* # sets in POWER9 TLB Hash mode */
131
#define POWER9_TLB_SETS_RADIX	128	/* # sets in POWER9 TLB Radix mode */
132

133
134
#ifndef __ASSEMBLY__

135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
struct mmu_hash_ops {
	void            (*hpte_invalidate)(unsigned long slot,
					   unsigned long vpn,
					   int bpsize, int apsize,
					   int ssize, int local);
	long		(*hpte_updatepp)(unsigned long slot,
					 unsigned long newpp,
					 unsigned long vpn,
					 int bpsize, int apsize,
					 int ssize, unsigned long flags);
	void            (*hpte_updateboltedpp)(unsigned long newpp,
					       unsigned long ea,
					       int psize, int ssize);
	long		(*hpte_insert)(unsigned long hpte_group,
				       unsigned long vpn,
				       unsigned long prpn,
				       unsigned long rflags,
				       unsigned long vflags,
				       int psize, int apsize,
				       int ssize);
	long		(*hpte_remove)(unsigned long hpte_group);
	int             (*hpte_removebolted)(unsigned long ea,
					     int psize, int ssize);
	void		(*flush_hash_range)(unsigned long number, int local);
	void		(*hugepage_invalidate)(unsigned long vsid,
					       unsigned long addr,
					       unsigned char *hpte_slot_array,
					       int psize, int ssize, int local);
163
	int		(*resize_hpt)(unsigned long shift);
164
165
166
167
168
169
170
171
172
173
174
	/*
	 * Special for kexec.
	 * To be called in real mode with interrupts disabled. No locks are
	 * taken as such, concurrent access on pre POWER5 hardware could result
	 * in a deadlock.
	 * The linear mapping is destroyed as well.
	 */
	void		(*hpte_clear_all)(void);
};
extern struct mmu_hash_ops mmu_hash_ops;

175
struct hash_pte {
176
177
	__be64 v;
	__be64 r;
178
};
179

180
extern struct hash_pte *htab_address;
181
182
183
extern unsigned long htab_size_bytes;
extern unsigned long htab_hash_mask;

184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200

static inline int shift_to_mmu_psize(unsigned int shift)
{
	int psize;

	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
		if (mmu_psize_defs[psize].shift == shift)
			return psize;
	return -1;
}

static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
	if (mmu_psize_defs[mmu_psize].shift)
		return mmu_psize_defs[mmu_psize].shift;
	BUG();
}
201

202
203
204
205
206
207
208
209
210
211
212
213
static inline unsigned int ap_to_shift(unsigned long ap)
{
	int psize;

	for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
		if (mmu_psize_defs[psize].ap == ap)
			return mmu_psize_defs[psize].shift;
	}

	return -1;
}

214
215
216
217
218
219
220
221
222
static inline unsigned long get_sllp_encoding(int psize)
{
	unsigned long sllp;

	sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
		((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
	return sllp;
}

223
224
#endif /* __ASSEMBLY__ */

225
226
227
228
229
230
231
232
233
/*
 * Segment sizes.
 * These are the values used by hardware in the B field of
 * SLB entries and the first dword of MMU hashtable entries.
 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
 */
#define MMU_SEGSIZE_256M	0
#define MMU_SEGSIZE_1T		1

234
235
236
237
238
239
240
241
242
/*
 * encode page number shift.
 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
 * 12 bits. This enable us to address upto 76 bit va.
 * For hpt hash from a va we can ignore the page size bits of va and for
 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
 * we work in all cases including 4k page size.
 */
#define VPN_SHIFT	12
Paul Mackerras's avatar
Paul Mackerras committed
243

244
245
246
247
248
249
250
/*
 * HPTE Large Page (LP) details
 */
#define LP_SHIFT	12
#define LP_BITS		8
#define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)

251
252
#ifndef __ASSEMBLY__

253
254
255
256
257
258
259
static inline int slb_vsid_shift(int ssize)
{
	if (ssize == MMU_SEGSIZE_256M)
		return SLB_VSID_SHIFT;
	return SLB_VSID_SHIFT_1T;
}

260
261
262
263
264
265
266
static inline int segment_shift(int ssize)
{
	if (ssize == MMU_SEGSIZE_256M)
		return SID_SHIFT;
	return SID_SHIFT_1T;
}

267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
/*
 * This array is indexed by the LP field of the HPTE second dword.
 * Since this field may contain some RPN bits, some entries are
 * replicated so that we get the same value irrespective of RPN.
 * The top 4 bits are the page size index (MMU_PAGE_*) for the
 * actual page size, the bottom 4 bits are the base page size.
 */
extern u8 hpte_page_sizes[1 << LP_BITS];

static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
					     bool is_base_size)
{
	unsigned int i, lp;

	if (!(h & HPTE_V_LARGE))
		return 1ul << 12;

	/* Look at the 8 bit LP value */
	lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
	i = hpte_page_sizes[lp];
	if (!i)
		return 0;
	if (!is_base_size)
		i >>= 4;
	return 1ul << mmu_psize_defs[i & 0xf].shift;
}

static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
{
	return __hpte_page_size(h, l, 0);
}

static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
{
	return __hpte_page_size(h, l, 1);
}

304
/*
Paul Mackerras's avatar
Paul Mackerras committed
305
 * The current system page and segment sizes
306
 */
Paul Mackerras's avatar
Paul Mackerras committed
307
308
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
309
extern u16 mmu_slb_size;
310
extern unsigned long tce_alloc_start, tce_alloc_end;
311
312
313
314
315
316
317
318
319

/*
 * If the processor supports 64k normal pages but not 64k cache
 * inhibited pages, we have to be prepared to switch processes
 * to use 4k pages when they create cache-inhibited mappings.
 * If this is the case, mmu_ci_restrictions will be set to 1.
 */
extern int mmu_ci_restrictions;

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
/*
 * This computes the AVPN and B fields of the first dword of a HPTE,
 * for use when we want to match an existing PTE.  The bottom 7 bits
 * of the returned value are zero.
 */
static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
					     int ssize)
{
	unsigned long v;
	/*
	 * The AVA field omits the low-order 23 bits of the 78 bits VA.
	 * These bits are not needed in the PTE, because the
	 * low-order b of these bits are part of the byte offset
	 * into the virtual page and, if b < 23, the high-order
	 * 23-b of these bits are always used in selecting the
	 * PTEGs to be searched
	 */
	v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
	v <<= HPTE_V_AVPN_SHIFT;
339
	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
340
341
342
	return v;
}

343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
/*
 * ISA v3.0 defines a new HPTE format, which differs from the old
 * format in having smaller AVPN and ARPN fields, and the B field
 * in the second dword instead of the first.
 */
static inline unsigned long hpte_old_to_new_v(unsigned long v)
{
	/* trim AVPN, drop B */
	return v & HPTE_V_COMMON_BITS;
}

static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
{
	/* move B field from 1st to 2nd dword, trim ARPN */
	return (r & ~HPTE_R_3_0_SSIZE_MASK) |
		(((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
}

static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
{
	/* insert B field */
	return (v & HPTE_V_COMMON_BITS) |
		((r & HPTE_R_3_0_SSIZE_MASK) <<
		 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
}

static inline unsigned long hpte_new_to_old_r(unsigned long r)
{
	/* clear out B field */
	return r & ~HPTE_R_3_0_SSIZE_MASK;
}

375
376
377
378
379
380
381
382
383
384
static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
{
	unsigned long hpte_v;

	hpte_v = be64_to_cpu(hptep->v);
	if (cpu_has_feature(CPU_FTR_ARCH_300))
		hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
	return hpte_v;
}

385
386
/*
 * This function sets the AVPN and L fields of the HPTE  appropriately
387
 * using the base page size and actual page size.
388
 */
389
390
static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
					  int actual_psize, int ssize)
391
{
Paul Mackerras's avatar
Paul Mackerras committed
392
	unsigned long v;
393
394
	v = hpte_encode_avpn(vpn, base_psize, ssize);
	if (actual_psize != MMU_PAGE_4K)
395
396
397
398
399
400
401
402
403
		v |= HPTE_V_LARGE;
	return v;
}

/*
 * This function sets the ARPN, and LP fields of the HPTE appropriately
 * for the page size. We assume the pa is already "clean" that is properly
 * aligned for the requested page size
 */
404
static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
405
					  int actual_psize)
406
407
{
	/* A 4K page needs no special encoding */
408
	if (actual_psize == MMU_PAGE_4K)
409
410
		return pa & HPTE_R_RPN;
	else {
411
412
413
		unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
		unsigned int shift = mmu_psize_defs[actual_psize].shift;
		return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
414
415
416
417
	}
}

/*
418
 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
419
 */
420
421
static inline unsigned long hpt_vpn(unsigned long ea,
				    unsigned long vsid, int ssize)
Paul Mackerras's avatar
Paul Mackerras committed
422
{
423
424
425
426
427
	unsigned long mask;
	int s_shift = segment_shift(ssize);

	mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
	return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
Paul Mackerras's avatar
Paul Mackerras committed
428
}
429

Paul Mackerras's avatar
Paul Mackerras committed
430
431
432
/*
 * This hashes a virtual address
 */
433
434
static inline unsigned long hpt_hash(unsigned long vpn,
				     unsigned int shift, int ssize)
435
{
436
	unsigned long mask;
Paul Mackerras's avatar
Paul Mackerras committed
437
438
	unsigned long hash, vsid;

439
	/* VPN_SHIFT can be atmost 12 */
Paul Mackerras's avatar
Paul Mackerras committed
440
	if (ssize == MMU_SEGSIZE_256M) {
441
442
443
		mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
		hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
			((vpn & mask) >> (shift - VPN_SHIFT));
Paul Mackerras's avatar
Paul Mackerras committed
444
	} else {
445
446
447
448
		mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
		vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
		hash = vsid ^ (vsid << 25) ^
			((vpn & mask) >> (shift - VPN_SHIFT)) ;
Paul Mackerras's avatar
Paul Mackerras committed
449
450
	}
	return hash & 0x7fffffffffUL;
451
452
}

453
454
455
#define HPTE_LOCAL_UPDATE	0x1
#define HPTE_NOHPTE_UPDATE	0x2

456
457
extern int __hash_page_4K(unsigned long ea, unsigned long access,
			  unsigned long vsid, pte_t *ptep, unsigned long trap,
458
			  unsigned long flags, int ssize, int subpage_prot);
459
460
extern int __hash_page_64K(unsigned long ea, unsigned long access,
			   unsigned long vsid, pte_t *ptep, unsigned long trap,
461
			   unsigned long flags, int ssize);
462
struct mm_struct;
463
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
464
465
466
467
468
extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
			unsigned long access, unsigned long trap,
			unsigned long flags);
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
		     unsigned long dsisr);
469
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
470
471
		     pte_t *ptep, unsigned long trap, unsigned long flags,
		     int ssize, unsigned int shift, unsigned int mmu_psize);
472
473
474
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int __hash_page_thp(unsigned long ea, unsigned long access,
			   unsigned long vsid, pmd_t *pmdp, unsigned long trap,
475
			   unsigned long flags, int ssize, unsigned int psize);
476
477
478
#else
static inline int __hash_page_thp(unsigned long ea, unsigned long access,
				  unsigned long vsid, pmd_t *pmdp,
479
				  unsigned long trap, unsigned long flags,
480
481
482
				  int ssize, unsigned int psize)
{
	BUG();
483
	return -1;
484
485
}
#endif
486
487
extern void hash_failure_debug(unsigned long ea, unsigned long access,
			       unsigned long vsid, unsigned long trap,
488
489
			       int ssize, int psize, int lpsize,
			       unsigned long pte);
490
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
491
			     unsigned long pstart, unsigned long prot,
Paul Mackerras's avatar
Paul Mackerras committed
492
			     int psize, int ssize);
493
494
int htab_remove_mapping(unsigned long vstart, unsigned long vend,
			int psize, int ssize);
495
extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
496
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
497

498
499
extern void hash__setup_new_exec(void);

500
501
502
503
504
505
#ifdef CONFIG_PPC_PSERIES
void hpte_init_pseries(void);
#else
static inline void hpte_init_pseries(void) { }
#endif

506
507
extern void hpte_init_native(void);

508
509
510
511
512
struct slb_entry {
	u64	esid;
	u64	vsid;
};

513
extern void slb_initialize(void);
514
void slb_flush_and_restore_bolted(void);
515
516
517
void slb_flush_all_realmode(void);
void __slb_restore_bolted_realmode(void);
void slb_restore_bolted_realmode(void);
518
519
void slb_save_contents(struct slb_entry *slb_ptr);
void slb_dump_contents(struct slb_entry *slb_ptr);
520

521
extern void slb_vmalloc_update(void);
522
extern void slb_set_size(u16 size);
523
524
525
#endif /* __ASSEMBLY__ */

/*
526
 * VSID allocation (256MB segment)
527
 *
528
529
 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
 * from mmu context id and effective segment id of the address.
530
 *
531
 * For user processes max context id is limited to MAX_USER_CONTEXT.
532
533
534
 * more details in get_user_context
 *
 * For kernel space get_kernel_context
535
536
537
538
539
540
 *
 * The proto-VSIDs are then scrambled into real VSIDs with the
 * multiplicative hash:
 *
 *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
 *
541
 * VSID_MULTIPLIER is prime, so in particular it is
542
543
 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
 * Because the modulus is 2^n-1 we can compute it efficiently without
544
545
546
 * a divide or extra multiply (see below). The scramble function gives
 * robust scattering in the hash table (at least based on some initial
 * results).
547
 *
548
549
550
 * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
 * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
 * will produce a VSID of 0.
551
 *
552
553
 * We also need to avoid the last segment of the last context, because that
 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
554
 * because of the modulo operation in vsid scramble.
555
556
 */

557
558
559
560
561
562
563
564
565
566
/*
 * Max Va bits we support as of now is 68 bits. We want 19 bit
 * context ID.
 * Restrictions:
 * GPU has restrictions of not able to access beyond 128TB
 * (47 bit effective address). We also cannot do more than 20bit PID.
 * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
 * to 16 bits (ie, we can only have 2^16 pids at the same time).
 */
#define VA_BITS			68
567
#define CONTEXT_BITS		19
568
569
#define ESID_BITS		(VA_BITS - (SID_SHIFT + CONTEXT_BITS))
#define ESID_BITS_1T		(VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
570

571
572
573
#define ESID_BITS_MASK		((1 << ESID_BITS) - 1)
#define ESID_BITS_1T_MASK	((1 << ESID_BITS_1T) - 1)

574
575
576
577
578
579
580
581
582
583
584
585
586
/*
 * Now certain config support MAX_PHYSMEM more than 512TB. Hence we will need
 * to use more than one context for linear mapping the kernel.
 * For vmalloc and memmap, we use just one context with 512TB. With 64 byte
 * struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)).
 */
#if (MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
#define MAX_KERNEL_CTX_CNT	(1UL << (MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
#else
#define MAX_KERNEL_CTX_CNT	1
#endif

#define MAX_VMALLOC_CTX_CNT	1
587
588
#define MAX_IO_CTX_CNT		1
#define MAX_VMEMMAP_CTX_CNT	1
589

590
591
/*
 * 256MB segment
592
 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
593
594
 * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
 * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
595
 * context maps 2^49 bytes (512TB).
596
597
598
599
 *
 * We also need to avoid the last segment of the last context, because that
 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
 * because of the modulo operation in vsid scramble.
600
 *
601
 */
602
#define MAX_USER_CONTEXT	((ASM_CONST(1) << CONTEXT_BITS) - 2)
603
604

// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
605
#define MIN_USER_CONTEXT	(MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
606
607
				 MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)

608
609
610
611
612
/*
 * For platforms that support on 65bit VA we limit the context bits
 */
#define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)

613
614
/*
 * This should be computed such that protovosid * vsid_mulitplier
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
 * doesn't overflow 64 bits. The vsid_mutliplier should also be
 * co-prime to vsid_modulus. We also need to make sure that number
 * of bits in multiplied result (dividend) is less than twice the number of
 * protovsid bits for our modulus optmization to work.
 *
 * The below table shows the current values used.
 * |-------+------------+----------------------+------------+-------------------|
 * |       | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
 * |-------+------------+----------------------+------------+-------------------|
 * | 1T    |         24 |                   25 |         49 |                50 |
 * |-------+------------+----------------------+------------+-------------------|
 * | 256MB |         24 |                   37 |         61 |                74 |
 * |-------+------------+----------------------+------------+-------------------|
 *
 * |-------+------------+----------------------+------------+--------------------|
 * |       | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
 * |-------+------------+----------------------+------------+--------------------|
 * | 1T    |         24 |                   28 |         52 |                 56 |
 * |-------+------------+----------------------+------------+--------------------|
 * | 256MB |         24 |                   40 |         64 |                 80 |
 * |-------+------------+----------------------+------------+--------------------|
 *
637
638
 */
#define VSID_MULTIPLIER_256M	ASM_CONST(12538073)	/* 24-bit prime */
639
640
#define VSID_BITS_256M		(VA_BITS - SID_SHIFT)
#define VSID_BITS_65_256M	(65 - SID_SHIFT)
641
642
643
644
/*
 * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
 */
#define VSID_MULINV_256M	ASM_CONST(665548017062)
645

Paul Mackerras's avatar
Paul Mackerras committed
646
#define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
647
648
#define VSID_BITS_1T		(VA_BITS - SID_SHIFT_1T)
#define VSID_BITS_65_1T		(65 - SID_SHIFT_1T)
649
#define VSID_MULINV_1T		ASM_CONST(209034062)
650

651
652
/* 1TB VSID reserved for VRMA */
#define VRMA_VSID	0x1ffffffUL
653
#define USER_VSID_RANGE	(1UL << (ESID_BITS + SID_SHIFT))
654

655
/* 4 bits per slice and we have one slice per 1TB */
656
#define SLICE_ARRAY_SIZE	(H_PGTABLE_RANGE >> 41)
657
#define LOW_SLICE_ARRAY_SZ	(BITS_PER_LONG / BITS_PER_BYTE)
658
#define TASK_SLICE_ARRAY_SZ(x)	((x)->hash_context->slb_addr_limit >> 41)
659
660
#ifndef __ASSEMBLY__

661
662
663
664
665
666
667
668
669
670
671
672
673
674
#ifdef CONFIG_PPC_SUBPAGE_PROT
/*
 * For the sub-page protection option, we extend the PGD with one of
 * these.  Basically we have a 3-level tree, with the top level being
 * the protptrs array.  To optimize speed and memory consumption when
 * only addresses < 4GB are being protected, pointers to the first
 * four pages of sub-page protection words are stored in the low_prot
 * array.
 * Each page of sub-page protection words protects 1GB (4 bytes
 * protects 64k).  For the 3-level tree, each page of pointers then
 * protects 8TB.
 */
struct subpage_prot_table {
	unsigned long maxaddr;	/* only addresses < this are protected */
675
	unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
	unsigned int *low_prot[4];
};

#define SBP_L1_BITS		(PAGE_SHIFT - 2)
#define SBP_L2_BITS		(PAGE_SHIFT - 3)
#define SBP_L1_COUNT		(1 << SBP_L1_BITS)
#define SBP_L2_COUNT		(1 << SBP_L2_BITS)
#define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
#define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)

extern void subpage_prot_free(struct mm_struct *mm);
#else
static inline void subpage_prot_free(struct mm_struct *mm) {}
#endif /* CONFIG_PPC_SUBPAGE_PROT */

691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
/*
 * One bit per slice. We have lower slices which cover 256MB segments
 * upto 4G range. That gets us 16 low slices. For the rest we track slices
 * in 1TB size.
 */
struct slice_mask {
	u64 low_slices;
	DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
};

struct hash_mm_context {
	u16 user_psize; /* page size index */

	/* SLB page size encodings*/
	unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
	unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
	unsigned long slb_addr_limit;
#ifdef CONFIG_PPC_64K_PAGES
	struct slice_mask mask_64k;
#endif
	struct slice_mask mask_4k;
#ifdef CONFIG_HUGETLB_PAGE
	struct slice_mask mask_16m;
	struct slice_mask mask_16g;
#endif

#ifdef CONFIG_PPC_SUBPAGE_PROT
718
	struct subpage_prot_table *spt;
719
720
721
#endif /* CONFIG_PPC_SUBPAGE_PROT */
};

722
#if 0
Paul Mackerras's avatar
Paul Mackerras committed
723
724
725
726
727
728
/*
 * The code below is equivalent to this function for arguments
 * < 2^VSID_BITS, which is all this should ever be called
 * with.  However gcc is not clever enough to compute the
 * modulus (2^n-1) without a second multiply.
 */
729
#define vsid_scramble(protovsid, size) \
Paul Mackerras's avatar
Paul Mackerras committed
730
	((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
731

732
/* simplified form avoiding mod operation */
Paul Mackerras's avatar
Paul Mackerras committed
733
734
735
736
737
738
739
#define vsid_scramble(protovsid, size) \
	({								 \
		unsigned long x;					 \
		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
	})
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754

#else /* 1 */
static inline unsigned long vsid_scramble(unsigned long protovsid,
				  unsigned long vsid_multiplier, int vsid_bits)
{
	unsigned long vsid;
	unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
	/*
	 * We have same multipler for both 256 and 1T segements now
	 */
	vsid = protovsid * vsid_multiplier;
	vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
	return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
}

755
756
#endif /* 1 */

Paul Mackerras's avatar
Paul Mackerras committed
757
758
/* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr)
759
{
Paul Mackerras's avatar
Paul Mackerras committed
760
761
762
763
	/* Use 1T segments if possible for addresses >= 1T */
	if (addr >= (1UL << SID_SHIFT_1T))
		return mmu_highuser_ssize;
	return MMU_SEGSIZE_256M;
764
765
}

Paul Mackerras's avatar
Paul Mackerras committed
766
767
768
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
				     int ssize)
{
769
770
771
772
	unsigned long va_bits = VA_BITS;
	unsigned long vsid_bits;
	unsigned long protovsid;

773
774
775
	/*
	 * Bad address. We return VSID 0 for that
	 */
776
	if ((ea & EA_MASK)  >= H_PGTABLE_RANGE)
777
778
		return 0;

779
780
781
782
783
784
785
786
787
788
789
790
791
792
	if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
		va_bits = 65;

	if (ssize == MMU_SEGSIZE_256M) {
		vsid_bits = va_bits - SID_SHIFT;
		protovsid = (context << ESID_BITS) |
			((ea >> SID_SHIFT) & ESID_BITS_MASK);
		return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
	}
	/* 1T segment */
	vsid_bits = va_bits - SID_SHIFT_1T;
	protovsid = (context << ESID_BITS_1T) |
		((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
	return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
Paul Mackerras's avatar
Paul Mackerras committed
793
794
}

795
796
797
798
799
800
801
802
803
/*
 * For kernel space, we use context ids as below
 * below. Range is 512TB per context.
 *
 * 0x00001 -  [ 0xc000000000000000 - 0xc001ffffffffffff]
 * 0x00002 -  [ 0xc002000000000000 - 0xc003ffffffffffff]
 * 0x00003 -  [ 0xc004000000000000 - 0xc005ffffffffffff]
 * 0x00004 -  [ 0xc006000000000000 - 0xc007ffffffffffff]
 *
804
805
806
807
808
809
 * vmap, IO, vmemap
 *
 * 0x00005 -  [ 0xc008000000000000 - 0xc009ffffffffffff]
 * 0x00006 -  [ 0xc00a000000000000 - 0xc00bffffffffffff]
 * 0x00007 -  [ 0xc00c000000000000 - 0xc00dffffffffffff]
 *
810
811
812
 */
static inline unsigned long get_kernel_context(unsigned long ea)
{
813
	unsigned long region_id = get_region_id(ea);
814
815
	unsigned long ctx;
	/*
816
817
	 * Depending on Kernel config, kernel region can have one context
	 * or more.
818
	 */
819
	if (region_id == LINEAR_MAP_REGION_ID) {
820
821
822
		/*
		 * We already verified ea to be not beyond the addr limit.
		 */
823
		ctx =  1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
824
	} else
825
		ctx = region_id + MAX_KERNEL_CTX_CNT - 1;
826
827
828
	return ctx;
}

829
830
831
832
833
834
835
/*
 * This is only valid for addresses >= PAGE_OFFSET
 */
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
	unsigned long context;

836
837
838
	if (!is_kernel_addr(ea))
		return 0;

839
	context = get_kernel_context(ea);
840
841
	return get_vsid(context, ea, ssize);
}
842
843
844

unsigned htab_shift_for_mem_size(unsigned long mem_size);

845
846
#endif /* __ASSEMBLY__ */

847
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */