fault.c 17.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Derived from "arch/i386/mm/fault.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Modified by Cort Dougan and Paul Mackerras.
 *
 *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/signal.h>
#include <linux/sched.h>
20
#include <linux/sched/task_stack.h>
21
22
23
24
25
26
27
28
29
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
30
#include <linux/extable.h>
31
#include <linux/kprobes.h>
32
#include <linux/kdebug.h>
33
#include <linux/perf_event.h>
34
#include <linux/ratelimit.h>
35
#include <linux/context_tracking.h>
36
#include <linux/hugetlb.h>
37
#include <linux/uaccess.h>
38

39
#include <asm/firmware.h>
40
41
42
43
44
45
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/siginfo.h>
46
#include <asm/debug.h>
47

48
static inline bool notify_page_fault(struct pt_regs *regs)
49
{
50
	bool ret = false;
51

52
#ifdef CONFIG_KPROBES
53
54
55
56
	/* kprobe_running() needs smp_processor_id() */
	if (!user_mode(regs)) {
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 11))
57
			ret = true;
58
59
		preempt_enable();
	}
60
61
62
63
#endif /* CONFIG_KPROBES */

	if (unlikely(debugger_fault_handler(regs)))
		ret = true;
64

65
	return ret;
66
67
}

68
69
70
71
/*
 * Check whether the instruction at regs->nip is a store using
 * an update addressing form which will update r1.
 */
72
static bool store_updates_sp(struct pt_regs *regs)
73
74
75
76
{
	unsigned int inst;

	if (get_user(inst, (unsigned int __user *)regs->nip))
77
		return false;
78
79
	/* check for 1 in the rA field */
	if (((inst >> 16) & 0x1f) != 1)
80
		return false;
81
82
83
84
85
86
87
	/* check major opcode */
	switch (inst >> 26) {
	case 37:	/* stwu */
	case 39:	/* stbu */
	case 45:	/* sthu */
	case 53:	/* stfsu */
	case 55:	/* stfdu */
88
		return true;
89
90
91
92
93
94
95
96
97
98
99
	case 62:	/* std or stdu */
		return (inst & 3) == 1;
	case 31:
		/* check minor opcode */
		switch ((inst >> 1) & 0x3ff) {
		case 181:	/* stdux */
		case 183:	/* stwux */
		case 247:	/* stbux */
		case 439:	/* sthux */
		case 695:	/* stfsux */
		case 759:	/* stfdux */
100
			return true;
101
102
		}
	}
103
	return false;
104
}
105
106
107
108
/*
 * do_page_fault error handling helpers
 */

109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
static int
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
{
	/*
	 * If we are in kernel mode, bail out with a SEGV, this will
	 * be caught by the assembly which will restore the non-volatile
	 * registers before calling bad_page_fault()
	 */
	if (!user_mode(regs))
		return SIGSEGV;

	_exception(SIGSEGV, regs, si_code, address);

	return 0;
}

static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
{
	return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
}

static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
{
	struct mm_struct *mm = current->mm;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

	return __bad_area_nosemaphore(regs, address, si_code);
}

static noinline int bad_area(struct pt_regs *regs, unsigned long address)
{
	return __bad_area(regs, address, SEGV_MAPERR);
}

148
149
150
151
152
static noinline int bad_access(struct pt_regs *regs, unsigned long address)
{
	return __bad_area(regs, address, SEGV_ACCERR);
}

153
154
static int do_sigbus(struct pt_regs *regs, unsigned long address,
		     unsigned int fault)
155
156
{
	siginfo_t info;
157
	unsigned int lsb = 0;
158

Anton Blanchard's avatar
Anton Blanchard committed
159
	if (!user_mode(regs))
160
		return SIGBUS;
Anton Blanchard's avatar
Anton Blanchard committed
161
162
163
164
165
166

	current->thread.trap_nr = BUS_ADRERR;
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
	info.si_addr = (void __user *)address;
167
168
169
170
171
172
#ifdef CONFIG_MEMORY_FAILURE
	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
		pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			current->comm, current->pid, address);
		info.si_code = BUS_MCEERR_AR;
	}
173
174
175
176
177

	if (fault & VM_FAULT_HWPOISON_LARGE)
		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
	if (fault & VM_FAULT_HWPOISON)
		lsb = PAGE_SHIFT;
178
#endif
179
	info.si_addr_lsb = lsb;
Anton Blanchard's avatar
Anton Blanchard committed
180
	force_sig_info(SIGBUS, &info, current);
181
	return 0;
182
183
184
185
186
}

static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
{
	/*
187
188
	 * Kernel page fault interrupted by SIGKILL. We have no reason to
	 * continue processing.
189
	 */
190
191
	if (fatal_signal_pending(current) && !user_mode(regs))
		return SIGKILL;
192
193

	/* Out of memory */
194
195
196
197
198
199
	if (fault & VM_FAULT_OOM) {
		/*
		 * We ran out of memory, or some other thing happened to us that
		 * made us unable to handle the page fault gracefully.
		 */
		if (!user_mode(regs))
200
			return SIGSEGV;
201
		pagefault_out_of_memory();
202
203
204
205
206
207
208
209
	} else {
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
			     VM_FAULT_HWPOISON_LARGE))
			return do_sigbus(regs, addr, fault);
		else if (fault & VM_FAULT_SIGSEGV)
			return bad_area_nosemaphore(regs, addr);
		else
			BUG();
210
	}
211
	return 0;
212
}
213

214
215
216
217
218
219
220
221
222
223
224
225
226
227
/* Is this a bad kernel fault ? */
static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
			     unsigned long address)
{
	if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) {
		printk_ratelimited(KERN_CRIT "kernel tried to execute"
				   " exec-protected page (%lx) -"
				   "exploit attempt? (uid: %d)\n",
				   address, from_kuid(&init_user_ns,
						      current_uid()));
	}
	return is_exec || (address >= TASK_SIZE);
}

228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
				struct vm_area_struct *vma,
				bool store_update_sp)
{
	/*
	 * N.B. The POWER/Open ABI allows programs to access up to
	 * 288 bytes below the stack pointer.
	 * The kernel signal delivery code writes up to about 1.5kB
	 * below the stack pointer (r1) before decrementing it.
	 * The exec code can write slightly over 640kB to the stack
	 * before setting the user r1.  Thus we allow the stack to
	 * expand to 1MB without further checks.
	 */
	if (address + 0x100000 < vma->vm_end) {
		/* get user regs even if this fault is in kernel mode */
		struct pt_regs *uregs = current->thread.regs;
		if (uregs == NULL)
			return true;

		/*
		 * A user-mode access to an address a long way below
		 * the stack pointer is only valid if the instruction
		 * is one which would update the stack pointer to the
		 * address accessed if the instruction completed,
		 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
		 * (or the byte, halfword, float or double forms).
		 *
		 * If we don't check this then any write to the area
		 * between the last mapped region and the stack will
		 * expand the stack rather than segfaulting.
		 */
		if (address + 2048 < uregs->gpr[1] && !store_update_sp)
			return true;
	}
	return false;
}

265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
static bool access_error(bool is_write, bool is_exec,
			 struct vm_area_struct *vma)
{
	/*
	 * Allow execution from readable areas if the MMU does not
	 * provide separate controls over reading and executing.
	 *
	 * Note: That code used to not be enabled for 4xx/BookE.
	 * It is now as I/D cache coherency for these is done at
	 * set_pte_at() time and I see no reason why the test
	 * below wouldn't be valid on those processors. This -may-
	 * break programs compiled with a really old ABI though.
	 */
	if (is_exec) {
		return !(vma->vm_flags & VM_EXEC) &&
			(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
			 !(vma->vm_flags & (VM_READ | VM_WRITE)));
	}

	if (is_write) {
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return true;
		return false;
	}

	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
		return true;

	return false;
}

296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
#ifdef CONFIG_PPC_SMLPAR
static inline void cmo_account_page_fault(void)
{
	if (firmware_has_feature(FW_FEATURE_CMO)) {
		u32 page_ins;

		preempt_disable();
		page_ins = be32_to_cpu(get_lppaca()->page_ins);
		page_ins += 1 << PAGE_FACTOR;
		get_lppaca()->page_ins = cpu_to_be32(page_ins);
		preempt_enable();
	}
}
#else
static inline void cmo_account_page_fault(void) { }
#endif /* CONFIG_PPC_SMLPAR */

313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
#ifdef CONFIG_PPC_STD_MMU
static void sanity_check_fault(bool is_write, unsigned long error_code)
{
	/*
	 * For hash translation mode, we should never get a
	 * PROTFAULT. Any update to pte to reduce access will result in us
	 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
	 * fault instead of DSISR_PROTFAULT.
	 *
	 * A pte update to relax the access will not result in a hash page table
	 * entry invalidate and hence can result in DSISR_PROTFAULT.
	 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
	 * the special !is_write in the below conditional.
	 *
	 * For platforms that doesn't supports coherent icache and do support
	 * per page noexec bit, we do setup things such that we do the
	 * sync between D/I cache via fault. But that is handled via low level
	 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
	 * here in such case.
	 *
	 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
	 * check should handle those and hence we should fall to the bad_area
	 * handling correctly.
	 *
	 * For embedded with per page exec support that doesn't support coherent
	 * icache we do get PROTFAULT and we handle that D/I cache sync in
	 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
	 * is conditional for server MMU.
	 *
	 * For radix, we can get prot fault for autonuma case, because radix
	 * page table will have them marked noaccess for user.
	 */
	if (!radix_enabled() && !is_write)
		WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
}
#else
static void sanity_check_fault(bool is_write, unsigned long error_code) { }
#endif /* CONFIG_PPC_STD_MMU */

352
353
354
355
356
357
/*
 * Define the correct "is_write" bit in error_code based
 * on the processor family
 */
#if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
#define page_fault_is_write(__err)	((__err) & ESR_DST)
358
#define page_fault_is_bad(__err)	(0)
359
360
#else
#define page_fault_is_write(__err)	((__err) & DSISR_ISSTORE)
361
#if defined(CONFIG_PPC_8xx)
362
#define page_fault_is_bad(__err)	((__err) & DSISR_NOEXEC_OR_G)
363
364
365
366
367
#elif defined(CONFIG_PPC64)
#define page_fault_is_bad(__err)	((__err) & DSISR_BAD_FAULT_64S)
#else
#define page_fault_is_bad(__err)	((__err) & DSISR_BAD_FAULT_32S)
#endif
368
369
#endif

370
371
372
373
374
375
376
377
378
379
380
381
382
/*
 * For 600- and 800-family processors, the error_code parameter is DSISR
 * for a data fault, SRR1 for an instruction fault. For 400-family processors
 * the error_code parameter is ESR for a data fault, 0 for an instruction
 * fault.
 * For 64-bit processors, the error_code parameter is
 *  - DSISR for a non-SLB data access fault,
 *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
 *  - 0 any SLB fault.
 *
 * The return value is 0 if the fault was handled, or the signal
 * number if this is a kernel fault that can't be handled here.
 */
383
384
static int __do_page_fault(struct pt_regs *regs, unsigned long address,
			   unsigned long error_code)
385
386
387
{
	struct vm_area_struct * vma;
	struct mm_struct *mm = current->mm;
388
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
389
 	int is_exec = TRAP(regs) == 0x400;
390
	int is_user = user_mode(regs);
391
	int is_write = page_fault_is_write(error_code);
392
	int fault, major = 0;
393
	bool store_update_sp = false;
394

395
	if (notify_page_fault(regs))
396
		return 0;
397

398
	if (unlikely(page_fault_is_bad(error_code))) {
399
		if (is_user) {
400
			_exception(SIGBUS, regs, BUS_OBJERR, address);
401
402
403
			return 0;
		}
		return SIGBUS;
404
405
	}

406
407
408
	/* Additional sanity check(s) */
	sanity_check_fault(is_write, error_code);

409
410
411
412
	/*
	 * The kernel should never take an execute fault nor should it
	 * take a page fault to a kernel address.
	 */
413
	if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address)))
414
		return SIGSEGV;
415

416
417
418
419
420
421
422
423
424
425
426
427
428
	/*
	 * If we're in an interrupt, have no user context or are running
	 * in a region with pagefaults disabled then we must not take the fault
	 */
	if (unlikely(faulthandler_disabled() || !mm)) {
		if (is_user)
			printk_ratelimited(KERN_ERR "Page fault in user mode"
					   " with faulthandler_disabled()=%d"
					   " mm=%p\n",
					   faulthandler_disabled(), mm);
		return bad_area_nosemaphore(regs, address);
	}

429
430
431
432
	/* We restore the interrupt state now */
	if (!arch_irq_disabled_regs(regs))
		local_irq_enable();

433
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
434

435
436
437
438
439
	/*
	 * We want to do this outside mmap_sem, because reading code around nip
	 * can result in fault, which will cause a deadlock when called with
	 * mmap_sem held
	 */
440
	if (is_write && is_user)
441
442
		store_update_sp = store_updates_sp(regs);

443
	if (is_user)
444
		flags |= FAULT_FLAG_USER;
445
446
447
448
	if (is_write)
		flags |= FAULT_FLAG_WRITE;
	if (is_exec)
		flags |= FAULT_FLAG_INSTRUCTION;
449

450
451
	/* When running in the kernel we expect faults to occur only to
	 * addresses in user space.  All other faults represent errors in the
452
453
	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
	 * erroneous fault occurring in a code path which already holds mmap_sem
454
455
456
457
458
459
	 * we will deadlock attempting to validate the fault against the
	 * address space.  Luckily the kernel only validly references user
	 * space from well defined areas of code, which are listed in the
	 * exceptions table.
	 *
	 * As the vast majority of faults will be valid we will only perform
460
	 * the source reference check when there is a possibility of a deadlock.
461
462
463
464
	 * Attempt to lock the address space, if we cannot we then validate the
	 * source.  If this is invalid we can skip the address space check,
	 * thus avoiding the deadlock.
	 */
465
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
466
		if (!is_user && !search_exception_tables(regs->nip))
467
			return bad_area_nosemaphore(regs, address);
468

469
retry:
470
		down_read(&mm->mmap_sem);
471
472
473
474
475
476
477
	} else {
		/*
		 * The above down_read_trylock() might have succeeded in
		 * which case we'll have missed the might_sleep() from
		 * down_read():
		 */
		might_sleep();
478
479
480
	}

	vma = find_vma(mm, address);
481
	if (unlikely(!vma))
482
		return bad_area(regs, address);
483
	if (likely(vma->vm_start <= address))
484
		goto good_area;
485
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
486
		return bad_area(regs, address);
487

488
489
490
	/* The stack is being expanded, check if it's valid */
	if (unlikely(bad_stack_expansion(regs, address, vma, store_update_sp)))
		return bad_area(regs, address);
491

492
	/* Try to expand it */
493
	if (unlikely(expand_stack(vma, address)))
494
		return bad_area(regs, address);
495
496

good_area:
497
	if (unlikely(access_error(is_write, is_exec, vma)))
498
		return bad_access(regs, address);
499
500
501
502
503
504

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
505
	fault = handle_mm_fault(vma, address, flags);
506
	major |= fault & VM_FAULT_MAJOR;
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524

	/*
	 * Handle the retry right now, the mmap_sem has been released in that
	 * case.
	 */
	if (unlikely(fault & VM_FAULT_RETRY)) {
		/* We retry only once */
		if (flags & FAULT_FLAG_ALLOW_RETRY) {
			/*
			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
			 * of starvation.
			 */
			flags &= ~FAULT_FLAG_ALLOW_RETRY;
			flags |= FAULT_FLAG_TRIED;
			if (!fatal_signal_pending(current))
				goto retry;
		}

525
526
527
528
529
		/*
		 * User mode? Just return to handle the fatal exception otherwise
		 * return to bad_page_fault
		 */
		return is_user ? 0 : SIGBUS;
530
	}
531

532
533
534
535
536
	up_read(&current->mm->mmap_sem);

	if (unlikely(fault & VM_FAULT_ERROR))
		return mm_fault_error(regs, address, fault);

537
	/*
538
	 * Major/minor page fault accounting.
539
	 */
540
	if (major) {
541
		current->maj_flt++;
542
		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
543
		cmo_account_page_fault();
544
545
	} else {
		current->min_flt++;
546
		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
547
	}
548
	return 0;
549
550
551
552
553
554
555
556
}
NOKPROBE_SYMBOL(__do_page_fault);

int do_page_fault(struct pt_regs *regs, unsigned long address,
		  unsigned long error_code)
{
	enum ctx_state prev_state = exception_enter();
	int rc = __do_page_fault(regs, address, error_code);
557
558
	exception_exit(prev_state);
	return rc;
559
}
560
NOKPROBE_SYMBOL(do_page_fault);
561
562
563
564
565
566
567
568
569
570
571
572

/*
 * bad_page_fault is called when we have a bad access from the kernel.
 * It is called from the DSI and ISI handlers in head.S and from some
 * of the procedures in traps.c.
 */
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
	const struct exception_table_entry *entry;

	/* Are we prepared to handle this fault?  */
	if ((entry = search_exception_tables(regs->nip)) != NULL) {
573
		regs->nip = extable_fixup(entry);
574
575
576
577
		return;
	}

	/* kernel has accessed a bad area */
578
579

	switch (regs->trap) {
580
581
582
583
584
585
586
587
588
589
	case 0x300:
	case 0x380:
		printk(KERN_ALERT "Unable to handle kernel paging request for "
			"data at address 0x%08lx\n", regs->dar);
		break;
	case 0x400:
	case 0x480:
		printk(KERN_ALERT "Unable to handle kernel paging request for "
			"instruction fetch\n");
		break;
590
591
592
593
	case 0x600:
		printk(KERN_ALERT "Unable to handle kernel paging request for "
			"unaligned access at address 0x%08lx\n", regs->dar);
		break;
594
595
596
597
	default:
		printk(KERN_ALERT "Unable to handle kernel paging request for "
			"unknown fault\n");
		break;
598
599
600
601
	}
	printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
		regs->nip);

602
	if (task_stack_end_corrupted(current))
603
604
		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");

605
606
	die("Kernel access of bad area", regs, sig);
}