entry.S 47.6 KB
Newer Older
1
2
3
4
5
6
7
/*
 * Low-level exception handling
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
8
 * Copyright (C) 2004 - 2008 by Tensilica Inc.
Max Filippov's avatar
Max Filippov committed
9
 * Copyright (C) 2015 Cadence Design Systems Inc.
10
11
12
13
14
15
 *
 * Chris Zankel <chris@zankel.net>
 *
 */

#include <linux/linkage.h>
16
#include <asm/asm-offsets.h>
17
#include <asm/asmmacro.h>
18
#include <asm/processor.h>
19
#include <asm/coprocessor.h>
20
#include <asm/thread_info.h>
21
#include <asm/asm-uaccess.h>
22
23
24
25
26
27
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
28
#include <asm/tlbflush.h>
29
#include <variant/tie-asm.h>
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

/* Unimplemented features. */

#undef KERNEL_STACK_OVERFLOW_CHECK

/* Not well tested.
 *
 * - fast_coprocessor
 */

/*
 * Macro to find first bit set in WINDOWBASE from the left + 1
 *
 * 100....0 -> 1
 * 010....0 -> 2
 * 000....1 -> WSBITS
 */

	.macro ffs_ws bit mask

#if XCHAL_HAVE_NSA
	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
#else
	movi    \bit, WSBITS
#if WSBITS > 16
	_bltui  \mask, 0x10000, 99f
	addi    \bit, \bit, -16
	extui   \mask, \mask, 16, 16
#endif
#if WSBITS > 8
99:	_bltui  \mask, 0x100, 99f
	addi    \bit, \bit, -8
	srli    \mask, \mask, 8
#endif
99:	_bltui  \mask, 0x10, 99f
	addi    \bit, \bit, -4
	srli    \mask, \mask, 4
99:	_bltui  \mask, 0x4, 99f
	addi    \bit, \bit, -2
	srli    \mask, \mask, 2
99:	_bltui  \mask, 0x2, 99f
	addi    \bit, \bit, -1
99:

#endif
	.endm

Max Filippov's avatar
Max Filippov committed
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98

	.macro	irq_save flags tmp
#if XTENSA_FAKE_NMI
#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
	rsr	\flags, ps
	extui	\tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
	bgei	\tmp, LOCKLEVEL, 99f
	rsil	\tmp, LOCKLEVEL
99:
#else
	movi	\tmp, LOCKLEVEL
	rsr	\flags, ps
	or	\flags, \flags, \tmp
	xsr	\flags, ps
	rsync
#endif
#else
	rsil	\flags, LOCKLEVEL
#endif
	.endm

99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */

/*
 * First-level exception handler for user exceptions.
 * Save some special registers, extra states and all registers in the AR
 * register file that were in use in the user task, and jump to the common
 * exception code.
 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
 * save them for kernel exceptions).
 *
 * Entry condition for user_exception:
 *
 *   a0:	trashed, original value saved on stack (PT_AREG0)
 *   a1:	a1
 *   a2:	new stack pointer, original value in depc
114
 *   a3:	a3
115
 *   depc:	a2, original value saved on stack (PT_DEPC)
116
 *   excsave1:	dispatch table
117
118
119
120
121
122
123
124
125
126
 *
 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
 *
 * Entry condition for _user_exception:
 *
 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
 *   excsave has been restored, and
 *   stack pointer (a1) has been set.
 *
127
 * Note: _user_exception might be at an odd address. Don't use call0..call12
128
 */
129
	.literal_position
130
131
132

ENTRY(user_exception)

133
	/* Save a1, a2, a3, and set SP. */
134

135
	rsr	a0, depc
136
137
138
139
140
141
142
143
144
145
146
	s32i	a1, a2, PT_AREG1
	s32i	a0, a2, PT_AREG2
	s32i	a3, a2, PT_AREG3
	mov	a1, a2

	.globl _user_exception
_user_exception:

	/* Save SAR and turn off single stepping */

	movi	a2, 0
147
	wsr	a2, depc		# terminate user stack trace with 0
148
149
	rsr	a3, sar
	xsr	a2, icountlevel
150
	s32i	a3, a1, PT_SAR
151
	s32i	a2, a1, PT_ICOUNTLEVEL
152

Chris Zankel's avatar
Chris Zankel committed
153
154
155
156
157
#if XCHAL_HAVE_THREADPTR
	rur	a2, threadptr
	s32i	a2, a1, PT_THREADPTR
#endif

158
159
160
	/* Rotate ws so that the current windowbase is at bit0. */
	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */

161
162
	rsr	a2, windowbase
	rsr	a3, windowstart
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
	ssr	a2
	s32i	a2, a1, PT_WINDOWBASE
	s32i	a3, a1, PT_WINDOWSTART
	slli	a2, a3, 32-WSBITS
	src	a2, a3, a2
	srli	a2, a2, 32-WSBITS
	s32i	a2, a1, PT_WMASK	# needed for restoring registers

	/* Save only live registers. */

	_bbsi.l	a2, 1, 1f
	s32i	a4, a1, PT_AREG4
	s32i	a5, a1, PT_AREG5
	s32i	a6, a1, PT_AREG6
	s32i	a7, a1, PT_AREG7
	_bbsi.l	a2, 2, 1f
	s32i	a8, a1, PT_AREG8
	s32i	a9, a1, PT_AREG9
	s32i	a10, a1, PT_AREG10
	s32i	a11, a1, PT_AREG11
	_bbsi.l	a2, 3, 1f
	s32i	a12, a1, PT_AREG12
	s32i	a13, a1, PT_AREG13
	s32i	a14, a1, PT_AREG14
	s32i	a15, a1, PT_AREG15
	_bnei	a2, 1, 1f		# only one valid frame?

	/* Only one valid frame, skip saving regs. */

	j	2f

	/* Save the remaining registers.
	 * We have to save all registers up to the first '1' from
	 * the right, except the current frame (bit 0).
	 * Assume a2 is:  001001000110001
198
	 * All register frames starting from the top field to the marked '1'
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
	 * must be saved.
	 */

1:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
	and	a3, a3, a2		# max. only one bit is set

	/* Find number of frames to save */

	ffs_ws	a0, a3			# number of frames to the '1' from left

	/* Store information into WMASK:
	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
	 * bits 4...: number of valid 4-register frames
	 */

	slli	a3, a0, 4		# number of frames to save in bits 8..4
	extui	a2, a2, 0, 4		# mask for the first 16 registers
	or	a2, a3, a2
	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file

	/* Save 4 registers at a time */

1:	rotw	-1
	s32i	a0, a5, PT_AREG_END - 16
	s32i	a1, a5, PT_AREG_END - 12
	s32i	a2, a5, PT_AREG_END - 8
	s32i	a3, a5, PT_AREG_END - 4
	addi	a0, a4, -1
	addi	a1, a5, -16
	_bnez	a0, 1b

	/* WINDOWBASE still in SAR! */

233
	rsr	a2, sar			# original WINDOWBASE
234
235
236
	movi	a3, 1
	ssl	a2
	sll	a3, a3
237
238
	wsr	a3, windowstart		# set corresponding WINDOWSTART bit
	wsr	a2, windowbase		# and WINDOWSTART
239
240
241
242
	rsync

	/* We are back to the original stack pointer (a1) */

243
2:	/* Now, jump to the common exception handler. */
244
245
246

	j	common_exception

247
ENDPROC(user_exception)
248
249
250
251
252
253
254
255
256
257
258
259
260

/*
 * First-level exit handler for kernel exceptions
 * Save special registers and the live window frame.
 * Note: Even though we changes the stack pointer, we don't have to do a
 *	 MOVSP here, as we do that when we return from the exception.
 *	 (See comment in the kernel exception exit code)
 *
 * Entry condition for kernel_exception:
 *
 *   a0:	trashed, original value saved on stack (PT_AREG0)
 *   a1:	a1
 *   a2:	new stack pointer, original in DEPC
261
 *   a3:	a3
262
 *   depc:	a2, original value saved on stack (PT_DEPC)
263
 *   excsave_1:	dispatch table
264
265
266
267
268
269
270
271
272
273
 *
 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
 *
 * Entry condition for _kernel_exception:
 *
 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
 *   excsave has been restored, and
 *   stack pointer (a1) has been set.
 *
274
 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
275
276
277
278
 */

ENTRY(kernel_exception)

279
	/* Save a1, a2, a3, and set SP. */
280

281
	rsr	a0, depc		# get a2
282
283
284
285
286
287
288
289
290
291
292
	s32i	a1, a2, PT_AREG1
	s32i	a0, a2, PT_AREG2
	s32i	a3, a2, PT_AREG3
	mov	a1, a2

	.globl _kernel_exception
_kernel_exception:

	/* Save SAR and turn off single stepping */

	movi	a2, 0
293
294
	rsr	a3, sar
	xsr	a2, icountlevel
295
	s32i	a3, a1, PT_SAR
296
	s32i	a2, a1, PT_ICOUNTLEVEL
297
298
299
300

	/* Rotate ws so that the current windowbase is at bit0. */
	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */

301
302
	rsr	a2, windowbase		# don't need to save these, we only
	rsr	a3, windowstart		# need shifted windowstart: windowmask
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
	ssr	a2
	slli	a2, a3, 32-WSBITS
	src	a2, a3, a2
	srli	a2, a2, 32-WSBITS
	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit

	/* Save only the live window-frame */

	_bbsi.l	a2, 1, 1f
	s32i	a4, a1, PT_AREG4
	s32i	a5, a1, PT_AREG5
	s32i	a6, a1, PT_AREG6
	s32i	a7, a1, PT_AREG7
	_bbsi.l	a2, 2, 1f
	s32i	a8, a1, PT_AREG8
	s32i	a9, a1, PT_AREG9
	s32i	a10, a1, PT_AREG10
	s32i	a11, a1, PT_AREG11
	_bbsi.l	a2, 3, 1f
	s32i	a12, a1, PT_AREG12
	s32i	a13, a1, PT_AREG13
	s32i	a14, a1, PT_AREG14
	s32i	a15, a1, PT_AREG15

327
328
329
330
331
332
333
334
335
	_bnei	a2, 1, 1f

	/* Copy spill slots of a0 and a1 to imitate movsp
	 * in order to keep exception stack continuous
	 */
	l32i	a3, a1, PT_SIZE
	l32i	a0, a1, PT_SIZE + 4
	s32e	a3, a1, -16
	s32e	a0, a1, -12
336
1:
337
338
	l32i	a0, a1, PT_AREG0	# restore saved a0
	wsr	a0, depc
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359

#ifdef KERNEL_STACK_OVERFLOW_CHECK

	/*  Stack overflow check, for debugging  */
	extui	a2, a1, TASK_SIZE_BITS,XX
	movi	a3, SIZE??
	_bge	a2, a3, out_of_stack_panic

#endif

/*
 * This is the common exception handler.
 * We get here from the user exception handler or simply by falling through
 * from the kernel exception handler.
 * Save the remaining special registers, switch to kernel mode, and jump
 * to the second-level exception handler.
 *
 */

common_exception:

360
	/* Save some registers, disable loops and clear the syscall flag. */
361

362
363
	rsr	a2, debugcause
	rsr	a3, epc1
364
365
366
	s32i	a2, a1, PT_DEBUGCAUSE
	s32i	a3, a1, PT_PC

367
	movi	a2, NO_SYSCALL
368
	rsr	a3, excvaddr
369
	s32i	a2, a1, PT_SYSCALL
370
371
	movi	a2, 0
	s32i	a3, a1, PT_EXCVADDR
372
#if XCHAL_HAVE_LOOPS
373
	xsr	a2, lcount
374
	s32i	a2, a1, PT_LCOUNT
375
#endif
376
377
378

	/* It is now save to restore the EXC_TABLE_FIXUP variable. */

Max Filippov's avatar
Max Filippov committed
379
	rsr	a2, exccause
380
	movi	a3, 0
Max Filippov's avatar
Max Filippov committed
381
382
383
	rsr	a0, excsave1
	s32i	a2, a1, PT_EXCCAUSE
	s32i	a3, a0, EXC_TABLE_FIXUP
384

385
386
387
388
	/* All unrecoverable states are saved on stack, now, and a1 is valid.
	 * Now we can allow exceptions again. In case we've got an interrupt
	 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
	 * otherwise it's left unchanged.
389
	 *
390
	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
391
392
	 */

393
	rsr	a3, ps
Max Filippov's avatar
Max Filippov committed
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
	s32i	a3, a1, PT_PS		# save ps

#if XTENSA_FAKE_NMI
	/* Correct PS needs to be saved in the PT_PS:
	 * - in case of exception or level-1 interrupt it's in the PS,
	 *   and is already saved.
	 * - in case of medium level interrupt it's in the excsave2.
	 */
	movi	a0, EXCCAUSE_MAPPED_NMI
	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
	beq	a2, a0, .Lmedium_level_irq
	bnei	a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
	beqz	a3, .Llevel1_irq	# level-1 IRQ sets ps.intlevel to 0

.Lmedium_level_irq:
	rsr	a0, excsave2
	s32i	a0, a1, PT_PS		# save medium-level interrupt ps
	bgei	a3, LOCKLEVEL, .Lexception

.Llevel1_irq:
	movi	a3, LOCKLEVEL

.Lexception:
417
	movi	a0, PS_WOE_MASK
Max Filippov's avatar
Max Filippov committed
418
419
420
421
	or	a3, a3, a0
#else
	addi	a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
	movi	a0, LOCKLEVEL
422
423
	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
					# a3 = PS.INTLEVEL
Max Filippov's avatar
Max Filippov committed
424
	moveqz	a3, a0, a2		# a3 = LOCKLEVEL iff interrupt
425
	movi	a2, PS_WOE_MASK
426
	or	a3, a3, a2
427
	rsr	a2, exccause
Max Filippov's avatar
Max Filippov committed
428
429
#endif

430
431
	/* restore return address (or 0 if return to userspace) */
	rsr	a0, depc
Max Filippov's avatar
Max Filippov committed
432
433
	wsr	a3, ps
	rsync				# PS.WOE => rsync => overflow
434

435
	/* Save lbeg, lend */
436
#if XCHAL_HAVE_LOOPS
437
	rsr	a4, lbeg
438
	rsr	a3, lend
439
	s32i	a4, a1, PT_LBEG
440
	s32i	a3, a1, PT_LEND
441
#endif
442

443
444
445
	/* Save SCOMPARE1 */

#if XCHAL_HAVE_S32C1I
446
447
	rsr     a3, scompare1
	s32i    a3, a1, PT_SCOMPARE1
448
449
#endif

450
451
	/* Save optional registers. */

452
	save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
453
	
454
455
456
457
	/* Go to second-level dispatcher. Set up parameters to pass to the
	 * exception handler and call the exception handler.
	 */

458
	rsr	a4, excsave1
459
	mov	a6, a1			# pass stack frame
460
461
	mov	a7, a2			# pass EXCCAUSE
	addx4	a4, a2, a4
462
463
464
465
466
467
468
	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler

	/* Call the second-level handler */

	callx4	a4

	/* Jump here for exception exit */
dann's avatar
dann committed
469
	.global common_exception_return
470
471
common_exception_return:

Max Filippov's avatar
Max Filippov committed
472
473
474
475
476
#if XTENSA_FAKE_NMI
	l32i	a2, a1, PT_EXCCAUSE
	movi	a3, EXCCAUSE_MAPPED_NMI
	beq	a2, a3, .LNMIexit
#endif
477
1:
Max Filippov's avatar
Max Filippov committed
478
	irq_save a2, a3
479
#ifdef CONFIG_TRACE_IRQFLAGS
480
	call4	trace_hardirqs_off
481
#endif
482

483
484
	/* Jump if we are returning from kernel exceptions. */

485
	l32i	a3, a1, PT_PS
486
487
488
	GET_THREAD_INFO(a2, a1)
	l32i	a4, a2, TI_FLAGS
	_bbci.l	a3, PS_UM_BIT, 6f
489
490
491
492
493

	/* Specific to a user exception exit:
	 * We need to check some flags for signal handling and rescheduling,
	 * and have to restore WB and WS, extra states, and all registers
	 * in the register file that were in use in the user task.
494
	 * Note that we don't disable interrupts here. 
495
496
497
	 */

	_bbsi.l	a4, TIF_NEED_RESCHED, 3f
498
	_bbsi.l	a4, TIF_NOTIFY_RESUME, 2f
499
	_bbci.l	a4, TIF_SIGPENDING, 5f
500

501
2:	l32i	a4, a1, PT_DEPC
502
503
	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f

504
505
	/* Call do_signal() */

506
#ifdef CONFIG_TRACE_IRQFLAGS
507
	call4	trace_hardirqs_on
508
#endif
509
	rsil	a2, 0
510
	mov	a6, a1
511
	call4	do_notify_resume	# int do_notify_resume(struct pt_regs*)
512
513
	j	1b

514
3:	/* Reschedule */
515

516
#ifdef CONFIG_TRACE_IRQFLAGS
517
	call4	trace_hardirqs_on
518
#endif
519
	rsil	a2, 0
520
	call4	schedule	# void schedule (void)
521
522
	j	1b

523
524
525
526
527
528
529
530
#ifdef CONFIG_PREEMPT
6:
	_bbci.l	a4, TIF_NEED_RESCHED, 4f

	/* Check current_thread_info->preempt_count */

	l32i	a4, a2, TI_PRE_COUNT
	bnez	a4, 4f
531
	call4	preempt_schedule_irq
532
533
534
	j	1b
#endif

Max Filippov's avatar
Max Filippov committed
535
536
537
538
539
540
#if XTENSA_FAKE_NMI
.LNMIexit:
	l32i	a3, a1, PT_PS
	_bbci.l	a3, PS_UM_BIT, 4f
#endif

541
5:
542
543
#ifdef CONFIG_HAVE_HW_BREAKPOINT
	_bbci.l	a4, TIF_DB_DISABLED, 7f
544
	call4	restore_dbreak
545
546
7:
#endif
547
548
549
#ifdef CONFIG_DEBUG_TLB_SANITY
	l32i	a4, a1, PT_DEPC
	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
550
	call4	check_tlb_sanity
551
#endif
552
6:
553
554
4:
#ifdef CONFIG_TRACE_IRQFLAGS
555
556
	extui	a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
	bgei	a4, LOCKLEVEL, 1f
557
	call4	trace_hardirqs_on
558
559
560
1:
#endif
	/* Restore optional registers. */
561
562

	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
563

564
565
566
567
568
569
	/* Restore SCOMPARE1 */

#if XCHAL_HAVE_S32C1I
	l32i    a2, a1, PT_SCOMPARE1
	wsr     a2, scompare1
#endif
570
	wsr	a3, ps		/* disable interrupts */
571
572
573
574
575
576

	_bbci.l	a3, PS_UM_BIT, kernel_exception_exit

user_exception_exit:

	/* Restore the state of the task and return from the exception. */
577
578
579
580
581

	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */

	l32i	a2, a1, PT_WINDOWBASE
	l32i	a3, a1, PT_WINDOWSTART
582
583
	wsr	a1, depc		# use DEPC as temp storage
	wsr	a3, windowstart		# restore WINDOWSTART
584
	ssr	a2			# preserve user's WB in the SAR
585
	wsr	a2, windowbase		# switch to user's saved WB
586
	rsync
587
	rsr	a1, depc		# restore stack pointer
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
	rotw	-1			# we restore a4..a7
	_bltui	a6, 16, 1f		# only have to restore current window?

	/* The working registers are a0 and a3.  We are restoring to
	 * a4..a7.  Be careful not to destroy what we have just restored.
	 * Note: wmask has the format YYYYM:
	 *       Y: number of registers saved in groups of 4
	 *       M: 4 bit mask of first 16 registers
	 */

	mov	a2, a6
	mov	a3, a5

2:	rotw	-1			# a0..a3 become a4..a7
	addi	a3, a7, -4*4		# next iteration
	addi	a2, a6, -16		# decrementing Y in WMASK
	l32i	a4, a3, PT_AREG_END + 0
	l32i	a5, a3, PT_AREG_END + 4
	l32i	a6, a3, PT_AREG_END + 8
	l32i	a7, a3, PT_AREG_END + 12
	_bgeui	a2, 16, 2b

	/* Clear unrestored registers (don't leak anything to user-land */

613
614
1:	rsr	a0, windowbase
	rsr	a3, sar
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
	sub	a3, a0, a3
	beqz	a3, 2f
	extui	a3, a3, 0, WBBITS

1:	rotw	-1
	addi	a3, a7, -1
	movi	a4, 0
	movi	a5, 0
	movi	a6, 0
	movi	a7, 0
	bgei	a3, 1, 1b

	/* We are back were we were when we started.
	 * Note: a2 still contains WMASK (if we've returned to the original
	 *	 frame where we had loaded a2), or at least the lower 4 bits
	 *	 (if we have restored WSBITS-1 frames).
	 */

633
2:
Chris Zankel's avatar
Chris Zankel committed
634
635
636
637
638
#if XCHAL_HAVE_THREADPTR
	l32i	a3, a1, PT_THREADPTR
	wur	a3, threadptr
#endif

639
	j	common_exception_exit
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676

	/* This is the kernel exception exit.
	 * We avoided to do a MOVSP when we entered the exception, but we
	 * have to do it here.
	 */

kernel_exception_exit:

	/* Check if we have to do a movsp.
	 *
	 * We only have to do a movsp if the previous window-frame has
	 * been spilled to the *temporary* exception stack instead of the
	 * task's stack. This is the case if the corresponding bit in
	 * WINDOWSTART for the previous window-frame was set before
	 * (not spilled) but is zero now (spilled).
	 * If this bit is zero, all other bits except the one for the
	 * current window frame are also zero. So, we can use a simple test:
	 * 'and' WINDOWSTART and WINDOWSTART-1:
	 *
	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
	 *
	 * The result is zero only if one bit was set.
	 *
	 * (Note: We might have gone through several task switches before
	 *        we come back to the current task, so WINDOWBASE might be
	 *        different from the time the exception occurred.)
	 */

	/* Test WINDOWSTART before and after the exception.
	 * We actually have WMASK, so we only have to test if it is 1 or not.
	 */

	l32i	a2, a1, PT_WMASK
	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump

	/* Test WINDOWSTART now. If spilled, do the movsp */

677
	rsr     a3, windowstart
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
	addi	a0, a3, -1
	and     a3, a3, a0
	_bnez	a3, common_exception_exit

	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */

	addi    a0, a1, -16
	l32i    a3, a0, 0
	l32i    a4, a0, 4
	s32i    a3, a1, PT_SIZE+0
	s32i    a4, a1, PT_SIZE+4
	l32i    a3, a0, 8
	l32i    a4, a0, 12
	s32i    a3, a1, PT_SIZE+8
	s32i    a4, a1, PT_SIZE+12

	/* Common exception exit.
	 * We restore the special register and the current window frame, and
	 * return from the exception.
	 *
	 * Note: We expect a2 to hold PT_WMASK
	 */

common_exception_exit:

703
704
	/* Restore address registers. */

705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
	_bbsi.l	a2, 1, 1f
	l32i	a4,  a1, PT_AREG4
	l32i	a5,  a1, PT_AREG5
	l32i	a6,  a1, PT_AREG6
	l32i	a7,  a1, PT_AREG7
	_bbsi.l	a2, 2, 1f
	l32i	a8,  a1, PT_AREG8
	l32i	a9,  a1, PT_AREG9
	l32i	a10, a1, PT_AREG10
	l32i	a11, a1, PT_AREG11
	_bbsi.l	a2, 3, 1f
	l32i	a12, a1, PT_AREG12
	l32i	a13, a1, PT_AREG13
	l32i	a14, a1, PT_AREG14
	l32i	a15, a1, PT_AREG15

	/* Restore PC, SAR */

1:	l32i	a2, a1, PT_PC
	l32i	a3, a1, PT_SAR
725
726
	wsr	a2, epc1
	wsr	a3, sar
727
728

	/* Restore LBEG, LEND, LCOUNT */
729
#if XCHAL_HAVE_LOOPS
730
731
	l32i	a2, a1, PT_LBEG
	l32i	a3, a1, PT_LEND
732
	wsr	a2, lbeg
733
	l32i	a2, a1, PT_LCOUNT
734
735
	wsr	a3, lend
	wsr	a2, lcount
736
#endif
737

738
739
740
741
	/* We control single stepping through the ICOUNTLEVEL register. */

	l32i	a2, a1, PT_ICOUNTLEVEL
	movi	a3, -2
742
743
	wsr	a2, icountlevel
	wsr	a3, icount
744

745
746
747
748
749
	/* Check if it was double exception. */

	l32i	a0, a1, PT_DEPC
	l32i	a3, a1, PT_AREG3
	l32i	a2, a1, PT_AREG2
750
	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
751
752
753
754
755

	/* Restore a0...a3 and return */

	l32i	a0, a1, PT_AREG0
	l32i	a1, a1, PT_AREG1
756
	rfe
757

758
1: 	wsr	a0, depc
759
760
	l32i	a0, a1, PT_AREG0
	l32i	a1, a1, PT_AREG1
761
	rfde
762

763
764
ENDPROC(kernel_exception)

765
766
767
768
769
770
771
772
/*
 * Debug exception handler.
 *
 * Currently, we don't support KGDB, so only user application can be debugged.
 *
 * When we get here,  a0 is trashed and saved to excsave[debuglevel]
 */

773
774
	.literal_position

775
776
ENTRY(debug_exception)

777
	rsr	a0, SREG_EPS + XCHAL_DEBUGLEVEL
778
	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode
779

780
	/* Set EPC1 and EXCCAUSE */
781

782
783
784
	wsr	a2, depc		# save a2 temporarily
	rsr	a2, SREG_EPC + XCHAL_DEBUGLEVEL
	wsr	a2, epc1
785
786

	movi	a2, EXCCAUSE_MAPPED_DEBUG
787
	wsr	a2, exccause
788
789
790

	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/

791
	movi	a2, 1 << PS_EXCM_BIT
792
	or	a2, a0, a2
793
	wsr	a2, ps
794
795
796

	/* Switch to kernel/user stack, restore jump vector, and save a0 */

797
	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode
798
799

	addi	a2, a1, -16-PT_SIZE	# assume kernel stack
800
801
802
3:
	l32i	a0, a3, DT_DEBUG_SAVE
	s32i	a1, a2, PT_AREG1
803
804
805
	s32i	a0, a2, PT_AREG0
	movi	a0, 0
	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
806
	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
807
	xsr	a0, depc
808
809
810
	s32i	a3, a2, PT_AREG3
	s32i	a0, a2, PT_AREG2
	mov	a1, a2
811

812
813
814
815
816
817
818
819
820
821
822
823
	/* Debug exception is handled as an exception, so interrupts will
	 * likely be enabled in the common exception handler. Disable
	 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
	 * meaning.
	 */
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
	GET_THREAD_INFO(a2, a1)
	l32i	a3, a2, TI_PRE_COUNT
	addi	a3, a3, 1
	s32i	a3, a2, TI_PRE_COUNT
#endif

824
825
	rsr	a2, ps
	bbsi.l	a2, PS_UM_BIT, _user_exception
826
827
	j	_kernel_exception

828
2:	rsr	a2, excsave1
829
	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
830
	j	3b
831

832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
#ifdef CONFIG_HAVE_HW_BREAKPOINT
	/* Debug exception while in exception mode. This may happen when
	 * window overflow/underflow handler or fast exception handler hits
	 * data breakpoint, in which case save and disable all data
	 * breakpoints, single-step faulting instruction and restore data
	 * breakpoints.
	 */
1:
	bbci.l	a0, PS_UM_BIT, 1b	# jump if kernel mode

	rsr	a0, debugcause
	bbsi.l	a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak

	.set	_index, 0
	.rept	XCHAL_NUM_DBREAK
	l32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
	wsr	a0, SREG_DBREAKC + _index
	.set	_index, _index + 1
	.endr

	l32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
	wsr	a0, icountlevel

	l32i	a0, a3, DT_ICOUNT_SAVE
	xsr	a0, icount

	l32i	a0, a3, DT_DEBUG_SAVE
	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
	rfi	XCHAL_DEBUGLEVEL

.Ldebug_save_dbreak:
	.set	_index, 0
	.rept	XCHAL_NUM_DBREAK
	movi	a0, 0
	xsr	a0, SREG_DBREAKC + _index
	s32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
	.set	_index, _index + 1
	.endr

	movi	a0, XCHAL_EXCM_LEVEL + 1
	xsr	a0, icountlevel
	s32i	a0, a3, DT_ICOUNT_LEVEL_SAVE

	movi	a0, 0xfffffffe
	xsr	a0, icount
	s32i	a0, a3, DT_ICOUNT_SAVE

	l32i	a0, a3, DT_DEBUG_SAVE
	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
	rfi	XCHAL_DEBUGLEVEL
#else
	/* Debug exception while in exception mode. Should not happen. */
884
1:	j	1b	// FIXME!!
885
#endif
886

887
ENDPROC(debug_exception)
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913

/*
 * We get here in case of an unrecoverable exception.
 * The only thing we can do is to be nice and print a panic message.
 * We only produce a single stack frame for panic, so ???
 *
 *
 * Entry conditions:
 *
 *   - a0 contains the caller address; original value saved in excsave1.
 *   - the original a0 contains a valid return address (backtrace) or 0.
 *   - a2 contains a valid stackpointer
 *
 * Notes:
 *
 *   - If the stack pointer could be invalid, the caller has to setup a
 *     dummy stack pointer (e.g. the stack of the init_task)
 *
 *   - If the return address could be invalid, the caller has to set it
 *     to 0, so the backtrace would stop.
 *
 */
	.align 4
unrecoverable_text:
	.ascii "Unrecoverable error in exception handler\0"

914
915
	.literal_position

916
917
918
919
920
ENTRY(unrecoverable_exception)

	movi	a0, 1
	movi	a1, 0

921
922
	wsr	a0, windowstart
	wsr	a1, windowbase
923
924
	rsync

925
	movi	a1, PS_WOE_MASK | LOCKLEVEL
926
	wsr	a1, ps
927
928
929
930
931
932
933
	rsync

	movi	a1, init_task
	movi	a0, 0
	addi	a1, a1, PT_REGS_OFFSET

	movi	a6, unrecoverable_text
934
	call4	panic
935
936
937

1:	j	1b

938
ENDPROC(unrecoverable_exception)
939
940
941
942
943
944
945
946
947

/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */

/*
 * Fast-handler for alloca exceptions
 *
 *  The ALLOCA handler is entered when user code executes the MOVSP
 *  instruction and the caller's frame is not in the register file.
 *
Max Filippov's avatar
Max Filippov committed
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
 * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
 *
 *    /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
 *
 * It leverages the existing window spill/fill routines and their support for
 * double exceptions. The 'movsp' instruction will only cause an exception if
 * the next window needs to be loaded. In fact this ALLOCA exception may be
 * replaced at some point by changing the hardware to do a underflow exception
 * of the proper size instead.
 *
 * This algorithm simply backs out the register changes started by the user
 * excpetion handler, makes it appear that we have started a window underflow
 * by rotating the window back and then setting the old window base (OWB) in
 * the 'ps' register with the rolled back window base. The 'movsp' instruction
 * will be re-executed and this time since the next window frames is in the
 * active AR registers it won't cause an exception.
 *
 * If the WindowUnderflow code gets a TLB miss the page will get mapped
 * the the partial windeowUnderflow will be handeled in the double exception
 * handler.
968
969
970
971
972
973
 *
 * Entry condition:
 *
 *   a0:	trashed, original value saved on stack (PT_AREG0)
 *   a1:	a1
 *   a2:	new stack pointer, original in DEPC
974
 *   a3:	a3
975
 *   depc:	a2, original value saved on stack (PT_DEPC)
976
 *   excsave_1:	dispatch table
977
978
979
980
981
982
 *
 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
 */

ENTRY(fast_alloca)
Max Filippov's avatar
Max Filippov committed
983
984
985
986
987
988
989
990
991
992
993
994
995
	rsr	a0, windowbase
	rotw	-1
	rsr	a2, ps
	extui	a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
	xor	a3, a3, a4
	l32i	a4, a6, PT_AREG0
	l32i	a1, a6, PT_DEPC
	rsr	a6, depc
	wsr	a1, depc
	slli	a3, a3, PS_OWB_SHIFT
	xor	a2, a2, a3
	wsr	a2, ps
	rsync
996

Max Filippov's avatar
Max Filippov committed
997
998
999
1000
1001
1002
1003
	_bbci.l	a4, 31, 4f
	rotw	-1
	_bbci.l	a8, 30, 8f
	rotw	-1
	j	_WindowUnderflow12
8:	j	_WindowUnderflow8
4:	j	_WindowUnderflow4
1004
ENDPROC(fast_alloca)
1005

1006
#ifdef CONFIG_USER_ABI_CALL0_PROBE
1007
/*
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
 * fast illegal instruction handler.
 *
 * This is used to fix up user PS.WOE on the exception caused
 * by the first opcode related to register window. If PS.WOE is
 * already set it goes directly to the common user exception handler.
 *
 * Entry condition:
 *
 *   a0:	trashed, original value saved on stack (PT_AREG0)
 *   a1:	a1
 *   a2:	new stack pointer, original in DEPC
 *   a3:	a3
 *   depc:	a2, original value saved on stack (PT_DEPC)
 *   excsave_1:	dispatch table
 */

ENTRY(fast_illegal_instruction_user)

	rsr	a0, ps
	bbsi.l	a0, PS_WOE_BIT, user_exception
	s32i	a3, a2, PT_AREG3
	movi	a3, PS_WOE_MASK
	or	a0, a0, a3
	wsr	a0, ps
	l32i	a3, a2, PT_AREG3
	l32i	a0, a2, PT_AREG0
	rsr	a2, depc
	rfe

ENDPROC(fast_illegal_instruction_user)
#endif

	/*
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
 * fast system calls.
 *
 * WARNING:  The kernel doesn't save the entire user context before
 * handling a fast system call.  These functions are small and short,
 * usually offering some functionality not available to user tasks.
 *
 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
 *
 * Entry condition:
 *
 *   a0:	trashed, original value saved on stack (PT_AREG0)
 *   a1:	a1
 *   a2:	new stack pointer, original in DEPC
1054
 *   a3:	a3
1055
 *   depc:	a2, original value saved on stack (PT_DEPC)
1056
 *   excsave_1:	dispatch table
1057
1058
1059
1060
1061
1062
 */

ENTRY(fast_syscall_user)

	/* Skip syscall. */

1063
	rsr	a0, epc1
1064
	addi	a0, a0, 3
1065
	wsr	a0, epc1
1066
1067
1068
1069

	l32i	a0, a2, PT_DEPC
	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable

1070
	rsr	a0, depc			# get syscall-nr
1071
	_beqz	a0, fast_syscall_spill_registers
1072
	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
1073
1074
1075

	j	user_exception

1076
1077
ENDPROC(fast_syscall_user)

1078
1079
ENTRY(fast_syscall_unrecoverable)

1080
	/* Restore all states. */
1081

1082
1083
	l32i    a0, a2, PT_AREG0        # restore a0
	xsr     a2, depc                # restore a2, depc
1084

1085
	wsr     a0, excsave1
1086
	call0	unrecoverable_exception
1087

1088
ENDPROC(fast_syscall_unrecoverable)
1089
1090
1091
1092

/*
 * sysxtensa syscall handler
 *
1093
1094
1095
1096
1097
 * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
 *        a2            a6                   a3    a4      a5
1098
1099
1100
 *
 * Entry condition:
 *
1101
 *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0)
1102
 *   a1:	a1
1103
 *   a2:	new stack pointer, original in a0 and DEPC
1104
 *   a3:	a3
1105
 *   a4..a15:	unchanged
1106
 *   depc:	a2, original value saved on stack (PT_DEPC)
1107
 *   excsave_1:	dispatch table
1108
1109
1110
1111
1112
1113
1114
 *
 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
 *
 * Note: we don't have to save a2; a2 holds the return value
 */

1115
1116
	.literal_position

1117
1118
#ifdef CONFIG_FAST_SYSCALL_XTENSA

1119
ENTRY(fast_syscall_xtensa)
1120

1121
	s32i	a7, a2, PT_AREG7	# we need an additional register
1122
	movi	a7, 4			# sizeof(unsigned int)
1123
	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
1124

1125
1126
	_bgeui	a6, SYS_XTENSA_COUNT, .Lill
	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
1127

1128
	/* Fall through for ATOMIC_CMP_SWP. */
1129
1130
1131

.Lswp:	/* Atomic compare and swap */

1132
EX(.Leac) l32i	a0, a3, 0		# read old value
1133
	bne	a0, a4, 1f		# same as old value? jump
1134
EX(.Leac) s32i	a5, a3, 0		# different, modify value
1135
1136
1137
1138
	l32i	a7, a2, PT_AREG7	# restore a7
	l32i	a0, a2, PT_AREG0	# restore a0
	movi	a2, 1			# and return 1
	rfe
1139

1140
1141
1142
1143
1:	l32i	a7, a2, PT_AREG7	# restore a7
	l32i	a0, a2, PT_AREG0	# restore a0
	movi	a2, 0			# return 0 (note that we cannot set
	rfe
1144

1145
.Lnswp:	/* Atomic set, add, and exg_add. */
1146

1147
EX(.Leac) l32i	a7, a3, 0		# orig
1148
	addi	a6, a6, -SYS_XTENSA_ATOMIC_SET
1149
1150
	add	a0, a4, a7		# + arg
	moveqz	a0, a4, a6		# set
1151
	addi	a6, a6, SYS_XTENSA_ATOMIC_SET
1152
EX(.Leac) s32i	a0, a3, 0		# write new value
1153

1154
	mov	a0, a2
1155
	mov	a2, a7
1156
1157
	l32i	a7, a0, PT_AREG7	# restore a7
	l32i	a0, a0, PT_AREG0	# restore a0
1158
1159
	rfe

1160
1161
1162
1163
1164
.Leac:	l32i	a7, a2, PT_AREG7	# restore a7
	l32i	a0, a2, PT_AREG0	# restore a0
	movi	a2, -EFAULT
	rfe

1165
.Lill:	l32i	a7, a2, PT_AREG7	# restore a7
1166
1167
1168
1169
	l32i	a0, a2, PT_AREG0	# restore a0
	movi	a2, -EINVAL
	rfe

1170
ENDPROC(fast_syscall_xtensa)
1171

1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
#else /* CONFIG_FAST_SYSCALL_XTENSA */

ENTRY(fast_syscall_xtensa)

	l32i    a0, a2, PT_AREG0        # restore a0
	movi	a2, -ENOSYS
	rfe

ENDPROC(fast_syscall_xtensa)

#endif /* CONFIG_FAST_SYSCALL_XTENSA */

1184
1185
1186
1187
1188
1189
1190
1191

/* fast_syscall_spill_registers.
 *
 * Entry condition:
 *
 *   a0:	trashed, original value saved on stack (PT_AREG0)
 *   a1:	a1
 *   a2:	new stack pointer, original in DEPC
1192
 *   a3:	a3
1193
 *   depc:	a2, original value saved on stack (PT_DEPC)
1194
 *   excsave_1:	dispatch table
1195
1196
1197
1198
 *
 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
 */

1199
1200
#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS

1201
1202
1203
1204
ENTRY(fast_syscall_spill_registers)

	/* Register a FIXUP handler (pass current wb as a parameter) */

1205
	xsr	a3, excsave1
1206
1207
	movi	a0, fast_syscall_spill_registers_fixup
	s32i	a0, a3, EXC_TABLE_FIXUP
1208
	rsr	a0, windowbase
1209
	s32i	a0, a3, EXC_TABLE_PARAM
1210
	xsr	a3, excsave1		# restore a3 and excsave_1
1211

1212
	/* Save a3, a4 and SAR on stack. */
1213

1214
	rsr	a0, sar
1215
	s32i	a3, a2, PT_AREG3
1216
	s32i	a0, a2, PT_SAR
1217

1218
	/* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
1219

1220
	s32i	a4, a2, PT_AREG4
1221
	s32i	a7, a2, PT_AREG7
1222
	s32i	a8, a2, PT_AREG8
1223
	s32i	a11, a2, PT_AREG11
1224
	s32i	a12, a2, PT_AREG12
1225
	s32i	a15, a2, PT_AREG15
1226

1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266