chip.c 35.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
10
 * Detailed information is available in Documentation/core-api/genericirq.rst
11
12
13
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15
16
17
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
18
#include <linux/irqdomain.h>
19

20
21
#include <trace/events/irq.h>

22
23
#include "internals.h"

24
25
26
27
28
29
30
31
32
33
34
35
36
37
static irqreturn_t bad_chained_irq(int irq, void *dev_id)
{
	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
	return IRQ_NONE;
}

/*
 * Chained handlers should never call action on their IRQ. This default
 * action will emit warning if such thing happens.
 */
struct irqaction chained_action = {
	.handler = bad_chained_irq,
};

38
/**
Thomas Gleixner's avatar
Thomas Gleixner committed
39
 *	irq_set_chip - set the irq chip for an irq
40
41
42
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
Thomas Gleixner's avatar
Thomas Gleixner committed
43
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
44
45
{
	unsigned long flags;
46
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
47

48
	if (!desc)
49
50
51
52
53
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

54
	desc->irq_data.chip = chip;
55
	irq_put_desc_unlock(desc, flags);
56
57
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
58
	 * allocated_irqs.
59
	 */
60
	irq_mark_irq(irq);
61
62
	return 0;
}
Thomas Gleixner's avatar
Thomas Gleixner committed
63
EXPORT_SYMBOL(irq_set_chip);
64
65

/**
Thomas Gleixner's avatar
Thomas Gleixner committed
66
 *	irq_set_type - set the irq trigger type for an irq
67
 *	@irq:	irq number
David Brownell's avatar
David Brownell committed
68
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
69
 */
Thomas Gleixner's avatar
Thomas Gleixner committed
70
int irq_set_irq_type(unsigned int irq, unsigned int type)
71
72
{
	unsigned long flags;
73
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
74
	int ret = 0;
75

76
77
	if (!desc)
		return -EINVAL;
78

79
	ret = __irq_set_trigger(desc, type);
80
	irq_put_desc_busunlock(desc, flags);
81
82
	return ret;
}
Thomas Gleixner's avatar
Thomas Gleixner committed
83
EXPORT_SYMBOL(irq_set_irq_type);
84
85

/**
Thomas Gleixner's avatar
Thomas Gleixner committed
86
 *	irq_set_handler_data - set irq handler data for an irq
87
88
89
90
91
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
Thomas Gleixner's avatar
Thomas Gleixner committed
92
int irq_set_handler_data(unsigned int irq, void *data)
93
94
{
	unsigned long flags;
95
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
96

97
	if (!desc)
98
		return -EINVAL;
99
	desc->irq_common_data.handler_data = data;
100
	irq_put_desc_unlock(desc, flags);
101
102
	return 0;
}
Thomas Gleixner's avatar
Thomas Gleixner committed
103
EXPORT_SYMBOL(irq_set_handler_data);
104

105
/**
106
107
108
109
 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 *	@irq_base:	Interrupt number base
 *	@irq_offset:	Interrupt number offset
 *	@entry:		Pointer to MSI descriptor data
110
 *
111
 *	Set the MSI descriptor entry for an irq at offset
112
 */
113
114
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
			 struct msi_desc *entry)
115
116
{
	unsigned long flags;
117
	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
118

119
	if (!desc)
120
		return -EINVAL;
121
	desc->irq_common_data.msi_desc = entry;
122
123
	if (entry && !irq_offset)
		entry->irq = irq_base;
124
	irq_put_desc_unlock(desc, flags);
125
126
127
	return 0;
}

128
129
130
131
132
133
134
135
136
137
138
139
/**
 *	irq_set_msi_desc - set MSI descriptor data for an irq
 *	@irq:	Interrupt number
 *	@entry:	Pointer to MSI descriptor data
 *
 *	Set the MSI descriptor entry for an irq
 */
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
	return irq_set_msi_desc_off(irq, 0, entry);
}

140
/**
Thomas Gleixner's avatar
Thomas Gleixner committed
141
 *	irq_set_chip_data - set irq chip data for an irq
142
143
144
145
146
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
Thomas Gleixner's avatar
Thomas Gleixner committed
147
int irq_set_chip_data(unsigned int irq, void *data)
148
149
{
	unsigned long flags;
150
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
151

152
	if (!desc)
153
		return -EINVAL;
154
	desc->irq_data.chip_data = data;
155
	irq_put_desc_unlock(desc, flags);
156
157
	return 0;
}
Thomas Gleixner's avatar
Thomas Gleixner committed
158
EXPORT_SYMBOL(irq_set_chip_data);
159

160
161
162
163
164
165
166
167
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

168
169
static void irq_state_clr_disabled(struct irq_desc *desc)
{
170
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
171
172
}

173
174
static void irq_state_clr_masked(struct irq_desc *desc)
{
175
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
176
177
}

178
179
180
181
182
183
184
185
186
187
static void irq_state_clr_started(struct irq_desc *desc)
{
	irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
}

static void irq_state_set_started(struct irq_desc *desc)
{
	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
}

188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
enum {
	IRQ_STARTUP_NORMAL,
	IRQ_STARTUP_MANAGED,
	IRQ_STARTUP_ABORT,
};

#ifdef CONFIG_SMP
static int
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
{
	struct irq_data *d = irq_desc_get_irq_data(desc);

	if (!irqd_affinity_is_managed(d))
		return IRQ_STARTUP_NORMAL;

	irqd_clr_managed_shutdown(d);

205
	if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
206
207
208
209
		/*
		 * Catch code which fiddles with enable_irq() on a managed
		 * and potentially shutdown IRQ. Chained interrupt
		 * installment or irq auto probing should not happen on
210
		 * managed irqs either.
211
212
		 */
		if (WARN_ON_ONCE(force))
213
			return IRQ_STARTUP_ABORT;
214
215
216
217
218
219
220
221
		/*
		 * The interrupt was requested, but there is no online CPU
		 * in it's affinity mask. Put it into managed shutdown
		 * state and let the cpu hotplug mechanism start it up once
		 * a CPU in the mask becomes available.
		 */
		return IRQ_STARTUP_ABORT;
	}
222
223
224
225
	/*
	 * Managed interrupts have reserved resources, so this should not
	 * happen.
	 */
226
	if (WARN_ON(irq_domain_activate_irq(d, false)))
227
		return IRQ_STARTUP_ABORT;
228
229
230
	return IRQ_STARTUP_MANAGED;
}
#else
231
static __always_inline int
232
233
234
235
236
237
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
{
	return IRQ_STARTUP_NORMAL;
}
#endif

238
239
240
241
242
static int __irq_startup(struct irq_desc *desc)
{
	struct irq_data *d = irq_desc_get_irq_data(desc);
	int ret = 0;

243
244
245
	/* Warn if this interrupt is not activated but try nevertheless */
	WARN_ON_ONCE(!irqd_is_activated(d));

246
247
248
249
250
251
252
253
254
255
256
	if (d->chip->irq_startup) {
		ret = d->chip->irq_startup(d);
		irq_state_clr_disabled(desc);
		irq_state_clr_masked(desc);
	} else {
		irq_enable(desc);
	}
	irq_state_set_started(desc);
	return ret;
}

257
int irq_startup(struct irq_desc *desc, bool resend, bool force)
258
{
259
260
	struct irq_data *d = irq_desc_get_irq_data(desc);
	struct cpumask *aff = irq_data_get_affinity_mask(d);
261
262
	int ret = 0;

263
264
	desc->depth = 0;

265
	if (irqd_is_started(d)) {
266
		irq_enable(desc);
267
	} else {
268
269
270
271
272
273
		switch (__irq_startup_managed(desc, aff, force)) {
		case IRQ_STARTUP_NORMAL:
			ret = __irq_startup(desc);
			irq_setup_affinity(desc);
			break;
		case IRQ_STARTUP_MANAGED:
274
			irq_do_set_affinity(d, aff, false);
275
276
277
			ret = __irq_startup(desc);
			break;
		case IRQ_STARTUP_ABORT:
278
			irqd_set_managed_shutdown(d);
279
280
			return 0;
		}
281
	}
282
	if (resend)
283
		check_irq_resend(desc);
284

285
	return ret;
286
287
}

288
289
290
291
292
int irq_activate(struct irq_desc *desc)
{
	struct irq_data *d = irq_desc_get_irq_data(desc);

	if (!irqd_affinity_is_managed(d))
293
		return irq_domain_activate_irq(d, false);
294
295
296
	return 0;
}

297
int irq_activate_and_startup(struct irq_desc *desc, bool resend)
298
299
{
	if (WARN_ON(irq_activate(desc)))
300
301
		return 0;
	return irq_startup(desc, resend, IRQ_START_FORCE);
302
303
}

304
305
static void __irq_disable(struct irq_desc *desc, bool mask);

306
307
void irq_shutdown(struct irq_desc *desc)
{
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
	if (irqd_is_started(&desc->irq_data)) {
		desc->depth = 1;
		if (desc->irq_data.chip->irq_shutdown) {
			desc->irq_data.chip->irq_shutdown(&desc->irq_data);
			irq_state_set_disabled(desc);
			irq_state_set_masked(desc);
		} else {
			__irq_disable(desc, true);
		}
		irq_state_clr_started(desc);
	}
	/*
	 * This must be called even if the interrupt was never started up,
	 * because the activation can happen before the interrupt is
	 * available for request/startup. It has it's own state tracking so
	 * it's safe to call it unconditionally.
	 */
325
	irq_domain_deactivate_irq(&desc->irq_data);
326
327
}

328
329
void irq_enable(struct irq_desc *desc)
{
330
331
332
333
334
335
336
337
338
339
340
	if (!irqd_irq_disabled(&desc->irq_data)) {
		unmask_irq(desc);
	} else {
		irq_state_clr_disabled(desc);
		if (desc->irq_data.chip->irq_enable) {
			desc->irq_data.chip->irq_enable(&desc->irq_data);
			irq_state_clr_masked(desc);
		} else {
			unmask_irq(desc);
		}
	}
341
342
}

343
344
static void __irq_disable(struct irq_desc *desc, bool mask)
{
345
346
347
348
349
350
351
352
353
354
355
	if (irqd_irq_disabled(&desc->irq_data)) {
		if (mask)
			mask_irq(desc);
	} else {
		irq_state_set_disabled(desc);
		if (desc->irq_data.chip->irq_disable) {
			desc->irq_data.chip->irq_disable(&desc->irq_data);
			irq_state_set_masked(desc);
		} else if (mask) {
			mask_irq(desc);
		}
356
357
358
	}
}

359
/**
360
 * irq_disable - Mark interrupt disabled
361
362
363
364
365
366
367
368
369
370
 * @desc:	irq descriptor which should be disabled
 *
 * If the chip does not implement the irq_disable callback, we
 * use a lazy disable approach. That means we mark the interrupt
 * disabled, but leave the hardware unmasked. That's an
 * optimization because we avoid the hardware access for the
 * common case where no interrupt happens after we marked it
 * disabled. If an interrupt happens, then the interrupt flow
 * handler masks the line at the hardware level and marks it
 * pending.
371
372
373
374
375
376
377
 *
 * If the interrupt chip does not implement the irq_disable callback,
 * a driver can disable the lazy approach for a particular irq line by
 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
 * be used for devices which cannot disable the interrupt at the
 * device level under certain circumstances and have to use
 * disable_irq[_nosync] instead.
378
 */
Thomas Gleixner's avatar
Thomas Gleixner committed
379
void irq_disable(struct irq_desc *desc)
380
{
381
	__irq_disable(desc, irq_settings_disable_unlazy(desc));
382
383
}

384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
	cpumask_set_cpu(cpu, desc->percpu_enabled);
}

void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
	cpumask_clear_cpu(cpu, desc->percpu_enabled);
}

402
static inline void mask_ack_irq(struct irq_desc *desc)
403
{
404
	if (desc->irq_data.chip->irq_mask_ack) {
405
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
406
407
408
		irq_state_set_masked(desc);
	} else {
		mask_irq(desc);
409
410
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
411
	}
412
413
}

414
void mask_irq(struct irq_desc *desc)
415
{
416
417
418
	if (irqd_irq_masked(&desc->irq_data))
		return;

419
420
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
421
		irq_state_set_masked(desc);
422
423
424
	}
}

425
void unmask_irq(struct irq_desc *desc)
426
{
427
428
429
	if (!irqd_irq_masked(&desc->irq_data))
		return;

430
431
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
432
		irq_state_clr_masked(desc);
433
	}
434
435
}

436
437
438
439
440
441
442
void unmask_threaded_irq(struct irq_desc *desc)
{
	struct irq_chip *chip = desc->irq_data.chip;

	if (chip->flags & IRQCHIP_EOI_THREADED)
		chip->irq_eoi(&desc->irq_data);

443
	unmask_irq(desc);
444
445
}

446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

462
	raw_spin_lock_irq(&desc->lock);
463

464
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
465
466

	action = desc->action;
467
468
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
469
		goto out_unlock;
470
	}
471

472
	kstat_incr_irqs_this_cpu(desc);
473
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
474
	raw_spin_unlock_irq(&desc->lock);
475

476
477
478
479
	action_ret = IRQ_NONE;
	for_each_action_of_desc(desc, action)
		action_ret |= action->thread_fn(action->irq, action->dev_id);

480
	if (!noirqdebug)
481
		note_interrupt(desc, action_ret);
482

483
	raw_spin_lock_irq(&desc->lock);
484
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
485
486

out_unlock:
487
	raw_spin_unlock_irq(&desc->lock);
488
489
490
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

491
492
static bool irq_check_poll(struct irq_desc *desc)
{
493
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
494
495
496
497
		return false;
	return irq_wait_for_poll(desc);
}

498
499
static bool irq_may_run(struct irq_desc *desc)
{
500
501
502
503
504
505
506
	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;

	/*
	 * If the interrupt is not in progress and is not an armed
	 * wakeup interrupt, proceed.
	 */
	if (!irqd_has_set(&desc->irq_data, mask))
507
		return true;
508
509
510
511
512
513
514
515
516
517
518
519

	/*
	 * If the interrupt is an armed wakeup source, mark it pending
	 * and suspended, disable it and notify the pm core about the
	 * event.
	 */
	if (irq_pm_check_wakeup(desc))
		return false;

	/*
	 * Handle a potential concurrent poll on a different core.
	 */
520
521
522
	return irq_check_poll(desc);
}

523
524
525
526
527
528
529
530
531
532
533
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
534
void handle_simple_irq(struct irq_desc *desc)
535
{
536
	raw_spin_lock(&desc->lock);
537

538
539
	if (!irq_may_run(desc))
		goto out_unlock;
540

541
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
542

543
544
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
545
		goto out_unlock;
546
	}
547

548
	kstat_incr_irqs_this_cpu(desc);
549
	handle_irq_event(desc);
550
551

out_unlock:
552
	raw_spin_unlock(&desc->lock);
553
}
554
EXPORT_SYMBOL_GPL(handle_simple_irq);
555

556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
/**
 *	handle_untracked_irq - Simple and software-decoded IRQs.
 *	@desc:	the interrupt description structure for this irq
 *
 *	Untracked interrupts are sent from a demultiplexing interrupt
 *	handler when the demultiplexer does not know which device it its
 *	multiplexed irq domain generated the interrupt. IRQ's handled
 *	through here are not subjected to stats tracking, randomness, or
 *	spurious interrupt detection.
 *
 *	Note: Like handle_simple_irq, the caller is expected to handle
 *	the ack, clear, mask and unmask issues if necessary.
 */
void handle_untracked_irq(struct irq_desc *desc)
{
	unsigned int flags = 0;

	raw_spin_lock(&desc->lock);

	if (!irq_may_run(desc))
		goto out_unlock;

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);

	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
		goto out_unlock;
	}

	desc->istate &= ~IRQS_PENDING;
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	raw_spin_unlock(&desc->lock);

	__handle_irq_event_percpu(desc, &flags);

	raw_spin_lock(&desc->lock);
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);

out_unlock:
	raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_untracked_irq);

599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
/*
 * Called unconditionally from handle_level_irq() and only for oneshot
 * interrupts from handle_fasteoi_irq()
 */
static void cond_unmask_irq(struct irq_desc *desc)
{
	/*
	 * We need to unmask in the following cases:
	 * - Standard level irq (IRQF_ONESHOT is not set)
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
		unmask_irq(desc);
}

617
618
619
620
621
622
623
624
625
/**
 *	handle_level_irq - Level type irq handler
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
626
void handle_level_irq(struct irq_desc *desc)
627
{
628
	raw_spin_lock(&desc->lock);
629
	mask_ack_irq(desc);
630

631
632
	if (!irq_may_run(desc))
		goto out_unlock;
633

634
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
635
636
637
638
639

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
640
641
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
642
		goto out_unlock;
643
	}
644

645
	kstat_incr_irqs_this_cpu(desc);
646
	handle_irq_event(desc);
Thomas Gleixner's avatar
Thomas Gleixner committed
647

648
649
	cond_unmask_irq(desc);

650
out_unlock:
651
	raw_spin_unlock(&desc->lock);
652
}
653
EXPORT_SYMBOL_GPL(handle_level_irq);
654

655
656
657
658
659
660
661
662
663
664
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
	if (!(desc->istate & IRQS_ONESHOT)) {
		chip->irq_eoi(&desc->irq_data);
		return;
	}
	/*
	 * We need to unmask in the following cases:
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
		chip->irq_eoi(&desc->irq_data);
		unmask_irq(desc);
	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
		chip->irq_eoi(&desc->irq_data);
	}
}

686
/**
687
 *	handle_fasteoi_irq - irq handler for transparent controllers
688
689
 *	@desc:	the interrupt description structure for this irq
 *
690
 *	Only a single callback will be issued to the chip: an ->eoi()
691
692
693
694
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
695
void handle_fasteoi_irq(struct irq_desc *desc)
696
{
697
698
	struct irq_chip *chip = desc->irq_data.chip;

699
	raw_spin_lock(&desc->lock);
700

701
702
	if (!irq_may_run(desc))
		goto out;
703

704
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
705
706
707

	/*
	 * If its disabled or no action available
708
	 * then mask it and get out of here:
709
	 */
710
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
711
		desc->istate |= IRQS_PENDING;
712
		mask_irq(desc);
713
		goto out;
714
	}
715

716
	kstat_incr_irqs_this_cpu(desc);
717
718
719
	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

720
	preflow_handler(desc);
721
	handle_irq_event(desc);
722

723
	cond_unmask_eoi_irq(desc, chip);
724

725
	raw_spin_unlock(&desc->lock);
726
727
	return;
out:
728
729
730
	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
		chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
731
}
732
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
733
734
735
736
737
738

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
Lucas De Marchi's avatar
Lucas De Marchi committed
739
 *	signal. The occurrence is latched into the irq controller hardware
740
741
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
742
 *	is handled by the associated event handler. If this happens it
743
744
745
746
747
748
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
749
void handle_edge_irq(struct irq_desc *desc)
750
{
751
	raw_spin_lock(&desc->lock);
752

753
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
754

755
756
757
758
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
759
	}
760

761
	/*
762
763
	 * If its disabled or no action available then mask it and get
	 * out of here.
764
	 */
765
766
767
768
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
769
	}
770

771
	kstat_incr_irqs_this_cpu(desc);
772
773

	/* Start handling the irq */
774
	desc->irq_data.chip->irq_ack(&desc->irq_data);
775
776

	do {
777
		if (unlikely(!desc->action)) {
778
			mask_irq(desc);
779
780
781
782
783
784
785
786
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
787
		if (unlikely(desc->istate & IRQS_PENDING)) {
788
789
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
790
				unmask_irq(desc);
791
792
		}

793
		handle_irq_event(desc);
794

795
	} while ((desc->istate & IRQS_PENDING) &&
796
		 !irqd_irq_disabled(&desc->irq_data));
797
798

out_unlock:
799
	raw_spin_unlock(&desc->lock);
800
}
801
EXPORT_SYMBOL(handle_edge_irq);
802

803
804
805
806
807
808
809
810
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
811
void handle_edge_eoi_irq(struct irq_desc *desc)
812
813
814
815
816
817
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
818

819
820
821
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
822
	}
823

824
	/*
825
826
	 * If its disabled or no action available then mask it and get
	 * out of here.
827
	 */
828
829
830
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
831
	}
832

833
	kstat_incr_irqs_this_cpu(desc);
834
835
836
837
838
839
840
841
842
843

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

844
out_eoi:
845
846
847
848
849
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

850
/**
Liuweni's avatar
Liuweni committed
851
 *	handle_percpu_irq - Per CPU local irq handler
852
853
854
855
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
856
void handle_percpu_irq(struct irq_desc *desc)
857
{
858
	struct irq_chip *chip = irq_desc_get_chip(desc);
859

860
	kstat_incr_irqs_this_cpu(desc);
861

862
863
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
864

865
	handle_irq_event_percpu(desc);
866

867
868
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
869
870
}

871
872
873
874
875
876
877
878
879
880
881
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
882
void handle_percpu_devid_irq(struct irq_desc *desc)
883
884
885
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
886
	unsigned int irq = irq_desc_get_irq(desc);
887
888
	irqreturn_t res;

889
	kstat_incr_irqs_this_cpu(desc);
890
891
892
893

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

894
895
896
897
898
899
900
901
902
903
904
905
906
907
	if (likely(action)) {
		trace_irq_handler_entry(irq, action);
		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
		trace_irq_handler_exit(irq, action, res);
	} else {
		unsigned int cpu = smp_processor_id();
		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);

		if (enabled)
			irq_percpu_disable(desc, cpu);

		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
			    enabled ? " and unmasked" : "", irq, cpu);
	}
908
909
910
911
912

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}

913
static void
914
915
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
		     int is_chained, const char *name)
916
{
917
	if (!handle) {
918
		handle = handle_bad_irq;
919
	} else {
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
		struct irq_data *irq_data = &desc->irq_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
		/*
		 * With hierarchical domains we might run into a
		 * situation where the outermost chip is not yet set
		 * up, but the inner chips are there.  Instead of
		 * bailing we install the handler, but obviously we
		 * cannot enable/startup the interrupt at this point.
		 */
		while (irq_data) {
			if (irq_data->chip != &no_irq_chip)
				break;
			/*
			 * Bail out if the outer chip is not set up
			 * and the interrrupt supposed to be started
			 * right away.
			 */
			if (WARN_ON(is_chained))
938
				return;
939
940
941
942
943
			/* Try the parent */
			irq_data = irq_data->parent_data;
		}
#endif
		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
944
			return;
945
	}
946
947
948

	/* Uninstall? */
	if (handle == handle_bad_irq) {
949
		if (desc->irq_data.chip != &no_irq_chip)
950
			mask_ack_irq(desc);
951
		irq_state_set_disabled(desc);
952
953
		if (is_chained)
			desc->action = NULL;
954
955
956
		desc->depth = 1;
	}
	desc->handle_irq = handle;
957
	desc->name = name;
958
959

	if (handle != handle_bad_irq && is_chained) {
960
961
		unsigned int type = irqd_get_trigger_type(&desc->irq_data);

962
963
964
965
966
967
968
969
		/*
		 * We're about to start this interrupt immediately,
		 * hence the need to set the trigger configuration.
		 * But the .set_type callback may have overridden the
		 * flow handler, ignoring that we're dealing with a
		 * chained interrupt. Reset it immediately because we
		 * do know better.
		 */
970
971
972
973
		if (type != IRQ_TYPE_NONE) {
			__irq_set_trigger(desc, type);
			desc->handle_irq = handle;
		}
974

975
976
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
977
		irq_settings_set_nothread(desc);
978
		desc->action = &chained_action;
979
		irq_activate_and_startup(desc, IRQ_RESEND);
980
	}
981
982
983
984
985
986
987
988
989
990
991
992
993
}

void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
		  const char *name)
{
	unsigned long flags;
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);

	if (!desc)
		return;

	__irq_do_set_handler(desc, handle, is_chained, name);
994
	irq_put_desc_busunlock(desc, flags);
995
}
996
EXPORT_SYMBOL_GPL(__irq_set_handler);
997

998
999
1000
void
irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
				 void *data)