regmap-irq.c 20.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * regmap based irq_chip
 *
 * Copyright 2011 Wolfson Microelectronics plc
 *
 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

13
#include <linux/device.h>
14
#include <linux/export.h>
15
#include <linux/interrupt.h>
16
#include <linux/irq.h>
17
#include <linux/irqdomain.h>
18
#include <linux/pm_runtime.h>
19
#include <linux/regmap.h>
20
21
22
23
24
25
#include <linux/slab.h>

#include "internal.h"

struct regmap_irq_chip_data {
	struct mutex lock;
26
	struct irq_chip irq_chip;
27
28

	struct regmap *map;
Mark Brown's avatar
Mark Brown committed
29
	const struct regmap_irq_chip *chip;
30
31

	int irq_base;
32
	struct irq_domain *domain;
33

34
35
36
	int irq;
	int wake_count;

37
	void *status_reg_buf;
38
39
40
	unsigned int *status_buf;
	unsigned int *mask_buf;
	unsigned int *mask_buf_def;
41
	unsigned int *wake_buf;
42
43
	unsigned int *type_buf;
	unsigned int *type_buf_def;
44
45

	unsigned int irq_reg_stride;
46
	unsigned int type_reg_stride;
47
48
49
50
51
52
};

static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
				     int irq)
{
53
	return &data->chip->irqs[irq];
54
55
56
57
58
59
60
61
62
63
64
65
}

static void regmap_irq_lock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);

	mutex_lock(&d->lock);
}

static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
66
	struct regmap *map = d->map;
67
	int i, ret;
68
	u32 reg;
69
	u32 unmask_offset;
70

71
72
73
74
75
76
77
	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

78
79
80
81
82
83
	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
84
85
		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
86
		if (d->chip->mask_invert) {
87
88
			ret = regmap_update_bits(d->map, reg,
					 d->mask_buf_def[i], ~d->mask_buf[i]);
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
		} else if (d->chip->unmask_base) {
			/* set mask with mask_base register */
			ret = regmap_update_bits(d->map, reg,
					d->mask_buf_def[i], ~d->mask_buf[i]);
			if (ret < 0)
				dev_err(d->map->dev,
					"Failed to sync unmasks in %x\n",
					reg);
			unmask_offset = d->chip->unmask_base -
							d->chip->mask_base;
			/* clear mask with unmask_base register */
			ret = regmap_update_bits(d->map,
					reg + unmask_offset,
					d->mask_buf_def[i],
					d->mask_buf[i]);
		} else {
105
			ret = regmap_update_bits(d->map, reg,
106
					 d->mask_buf_def[i], d->mask_buf[i]);
107
		}
108
109
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
110
				reg);
111
112
113
114

		reg = d->chip->wake_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->wake_buf) {
115
116
117
118
119
120
121
122
			if (d->chip->wake_invert)
				ret = regmap_update_bits(d->map, reg,
							 d->mask_buf_def[i],
							 ~d->wake_buf[i]);
			else
				ret = regmap_update_bits(d->map, reg,
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
123
124
125
126
127
			if (ret != 0)
				dev_err(d->map->dev,
					"Failed to sync wakes in %x: %d\n",
					reg, ret);
		}
128
129
130
131

		if (!d->chip->init_ack_masked)
			continue;
		/*
dashsriram's avatar
dashsriram committed
132
		 * Ack all the masked interrupts unconditionally,
133
134
135
		 * OR if there is masked interrupt which hasn't been Acked,
		 * it'll be ignored in irq handler, then may introduce irq storm
		 */
136
		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
137
138
			reg = d->chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
139
140
141
142
143
			/* some chips ack by write 0 */
			if (d->chip->ack_invert)
				ret = regmap_write(map, reg, ~d->mask_buf[i]);
			else
				ret = regmap_write(map, reg, d->mask_buf[i]);
144
145
146
147
			if (ret != 0)
				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
		}
148
149
	}

150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
	for (i = 0; i < d->chip->num_type_reg; i++) {
		if (!d->type_buf_def[i])
			continue;
		reg = d->chip->type_base +
			(i * map->reg_stride * d->type_reg_stride);
		if (d->chip->type_invert)
			ret = regmap_update_bits(d->map, reg,
				d->type_buf_def[i], ~d->type_buf[i]);
		else
			ret = regmap_update_bits(d->map, reg,
				d->type_buf_def[i], d->type_buf[i]);
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync type in %x\n",
				reg);
	}

166
167
168
	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

169
170
171
172
173
174
175
176
177
178
	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

179
180
181
182
183
184
	mutex_unlock(&d->lock);
}

static void regmap_irq_enable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
185
	struct regmap *map = d->map;
186
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
187

188
	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
189
190
191
192
193
}

static void regmap_irq_disable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
194
	struct regmap *map = d->map;
195
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
196

197
	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
198
199
}

200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
	int reg = irq_data->type_reg_offset / map->reg_stride;

	if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
		return 0;

	d->type_buf[reg] &= ~(irq_data->type_falling_mask |
					irq_data->type_rising_mask);
	switch (type) {
	case IRQ_TYPE_EDGE_FALLING:
		d->type_buf[reg] |= irq_data->type_falling_mask;
		break;

	case IRQ_TYPE_EDGE_RISING:
		d->type_buf[reg] |= irq_data->type_rising_mask;
		break;

	case IRQ_TYPE_EDGE_BOTH:
		d->type_buf[reg] |= (irq_data->type_falling_mask |
					irq_data->type_rising_mask);
		break;

	default:
		return -EINVAL;
	}
	return 0;
}

232
233
234
235
236
237
238
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);

	if (on) {
239
240
241
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				&= ~irq_data->mask;
242
243
		d->wake_count++;
	} else {
244
245
246
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				|= irq_data->mask;
247
248
249
250
251
252
		d->wake_count--;
	}

	return 0;
}

253
static const struct irq_chip regmap_irq_chip = {
254
255
256
257
	.irq_bus_lock		= regmap_irq_lock,
	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
	.irq_disable		= regmap_irq_disable,
	.irq_enable		= regmap_irq_enable,
258
	.irq_set_type		= regmap_irq_set_type,
259
	.irq_set_wake		= regmap_irq_set_wake,
260
261
262
263
264
};

static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
Mark Brown's avatar
Mark Brown committed
265
	const struct regmap_irq_chip *chip = data->chip;
266
267
	struct regmap *map = data->map;
	int ret, i;
268
	bool handled = false;
269
	u32 reg;
270

271
272
273
	if (chip->handle_pre_irq)
		chip->handle_pre_irq(chip->irq_drv_data);

274
275
276
277
278
	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
279
			pm_runtime_put(map->dev);
280
			goto exit;
281
282
283
		}
	}

284
285
286
287
	/*
	 * Read in the statuses, using a single bulk read if possible
	 * in order to reduce the I/O overheads.
	 */
288
	if (!map->use_single_read && map->reg_stride == 1 &&
289
290
291
292
	    data->irq_reg_stride == 1) {
		u8 *buf8 = data->status_reg_buf;
		u16 *buf16 = data->status_reg_buf;
		u32 *buf32 = data->status_reg_buf;
293

294
295
296
297
298
		BUG_ON(!data->status_reg_buf);

		ret = regmap_bulk_read(map, chip->status_base,
				       data->status_reg_buf,
				       chip->num_regs);
299
300
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
301
				ret);
302
			goto exit;
303
		}
304
305
306
307
308
309
310
311
312
313
314
315
316
317

		for (i = 0; i < data->chip->num_regs; i++) {
			switch (map->format.val_bytes) {
			case 1:
				data->status_buf[i] = buf8[i];
				break;
			case 2:
				data->status_buf[i] = buf16[i];
				break;
			case 4:
				data->status_buf[i] = buf32[i];
				break;
			default:
				BUG();
318
				goto exit;
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
			}
		}

	} else {
		for (i = 0; i < data->chip->num_regs; i++) {
			ret = regmap_read(map, chip->status_base +
					  (i * map->reg_stride
					   * data->irq_reg_stride),
					  &data->status_buf[i]);

			if (ret != 0) {
				dev_err(map->dev,
					"Failed to read IRQ status: %d\n",
					ret);
				if (chip->runtime_pm)
					pm_runtime_put(map->dev);
335
				goto exit;
336
337
			}
		}
338
	}
339

340
341
342
343
344
345
346
347
	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
348
349
		data->status_buf[i] &= ~data->mask_buf[i];

350
		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
351
352
353
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
			ret = regmap_write(map, reg, data->status_buf[i]);
354
355
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
356
					reg, ret);
357
358
359
360
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
361
362
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
363
			handle_nested_irq(irq_find_mapping(data->domain, i));
364
			handled = true;
365
366
367
		}
	}

368
369
370
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

371
372
373
374
exit:
	if (chip->handle_post_irq)
		chip->handle_post_irq(chip->irq_drv_data);

375
376
377
378
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
379
380
}

381
382
383
384
385
386
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
			  irq_hw_number_t hw)
{
	struct regmap_irq_chip_data *data = h->host_data;

	irq_set_chip_data(virq, data);
387
	irq_set_chip(virq, &data->irq_chip);
388
	irq_set_nested_thread(virq, 1);
389
	irq_set_parent(virq, data->irq);
390
391
392
393
394
	irq_set_noprobe(virq);

	return 0;
}

395
static const struct irq_domain_ops regmap_domain_ops = {
396
	.map	= regmap_irq_map,
397
	.xlate	= irq_domain_xlate_onetwocell,
398
399
};

400
/**
401
 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
402
 *
403
404
405
406
407
408
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts.
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success.
409
410
411
412
413
414
415
416
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
Mark Brown's avatar
Mark Brown committed
417
			int irq_base, const struct regmap_irq_chip *chip,
418
419
420
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
421
	int i;
422
	int ret = -ENOMEM;
423
	u32 reg;
424
	u32 unmask_offset;
425

426
427
428
	if (chip->num_regs <= 0)
		return -EINVAL;

429
430
431
432
433
434
435
436
	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

437
438
439
440
441
442
443
	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
444
445
446
447
448
449
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

lixiubo's avatar
lixiubo committed
450
	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
451
452
453
454
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

lixiubo's avatar
lixiubo committed
455
	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
456
457
458
459
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

lixiubo's avatar
lixiubo committed
460
	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
461
462
463
464
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

465
	if (chip->wake_base) {
lixiubo's avatar
lixiubo committed
466
		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
467
468
469
470
471
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

472
473
474
475
476
477
478
479
480
481
482
483
	if (chip->num_type_reg) {
		d->type_buf_def = kcalloc(chip->num_type_reg,
					sizeof(unsigned int), GFP_KERNEL);
		if (!d->type_buf_def)
			goto err_alloc;

		d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
				      GFP_KERNEL);
		if (!d->type_buf)
			goto err_alloc;
	}

484
	d->irq_chip = regmap_irq_chip;
485
	d->irq_chip.name = chip->name;
486
	d->irq = irq;
487
488
489
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
490
491
492
493
494
495

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

496
497
498
499
500
	if (chip->type_reg_stride)
		d->type_reg_stride = chip->type_reg_stride;
	else
		d->type_reg_stride = 1;

501
	if (!map->use_single_read && map->reg_stride == 1 &&
502
	    d->irq_reg_stride == 1) {
503
504
505
		d->status_reg_buf = kmalloc_array(chip->num_regs,
						  map->format.val_bytes,
						  GFP_KERNEL);
506
507
508
509
		if (!d->status_reg_buf)
			goto err_alloc;
	}

510
511
512
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
513
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
514
515
516
517
518
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
519
520
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
521
522
523
		if (chip->mask_invert)
			ret = regmap_update_bits(map, reg,
					 d->mask_buf[i], ~d->mask_buf[i]);
524
525
526
527
528
529
530
531
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
			ret = regmap_update_bits(d->map,
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
532
			ret = regmap_update_bits(map, reg,
533
					 d->mask_buf[i], d->mask_buf[i]);
534
535
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
536
				reg, ret);
537
538
			goto err_alloc;
		}
539
540
541
542
543
544
545
546
547
548
549
550
551
552

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

553
		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
554
555
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
556
557
558
559
560
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
					~(d->status_buf[i] & d->mask_buf[i]));
			else
				ret = regmap_write(map, reg,
561
562
563
564
565
566
567
					d->status_buf[i] & d->mask_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
568
569
	}

570
571
572
573
574
575
	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);
576
577
578
579
580
581
582
583
584

			if (chip->wake_invert)
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 0);
			else
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
585
586
587
588
589
590
591
592
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
	if (chip->num_type_reg) {
		for (i = 0; i < chip->num_irqs; i++) {
			reg = chip->irqs[i].type_reg_offset / map->reg_stride;
			d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
					chip->irqs[i].type_falling_mask;
		}
		for (i = 0; i < chip->num_type_reg; ++i) {
			if (!d->type_buf_def[i])
				continue;

			reg = chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (chip->type_invert)
				ret = regmap_update_bits(map, reg,
					d->type_buf_def[i], 0xFF);
			else
				ret = regmap_update_bits(map, reg,
					d->type_buf_def[i], 0x0);
			if (ret != 0) {
				dev_err(map->dev,
					"Failed to set type in 0x%x: %x\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

620
621
622
623
624
625
626
627
628
629
630
631
	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
632
633
	}

634
635
	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
636
637
				   chip->name, d);
	if (ret != 0) {
638
639
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
640
		goto err_domain;
641
642
	}

643
644
	*data = d;

645
646
	return 0;

647
648
err_domain:
	/* Should really dispose of the domain but... */
649
err_alloc:
650
651
	kfree(d->type_buf);
	kfree(d->type_buf_def);
652
	kfree(d->wake_buf);
653
654
655
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
656
	kfree(d->status_reg_buf);
657
658
659
660
661
662
	kfree(d);
	return ret;
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);

/**
663
 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
664
665
 *
 * @irq: Primary IRQ for the device
666
 * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
667
 *
668
 * This function also disposes of all mapped IRQs on the chip.
669
670
671
 */
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
672
673
674
	unsigned int virq;
	int hwirq;

675
676
677
678
	if (!d)
		return;

	free_irq(irq, d);
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694

	/* Dispose all virtual irq from irq domain before removing it */
	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
		/* Ignore hwirq if holes in the IRQ list */
		if (!d->chip->irqs[hwirq].mask)
			continue;

		/*
		 * Find the virtual irq of hwirq on chip and if it is
		 * there then dispose it
		 */
		virq = irq_find_mapping(d->domain, hwirq);
		if (virq)
			irq_dispose_mapping(virq);
	}

695
	irq_domain_remove(d->domain);
696
697
	kfree(d->type_buf);
	kfree(d->type_buf_def);
698
	kfree(d->wake_buf);
699
700
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
701
	kfree(d->status_reg_buf);
702
703
704
705
	kfree(d->status_buf);
	kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
706

707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
static void devm_regmap_irq_chip_release(struct device *dev, void *res)
{
	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;

	regmap_del_irq_chip(d->irq, d);
}

static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)

{
	struct regmap_irq_chip_data **r = res;

	if (!r || !*r) {
		WARN_ON(!r || !*r);
		return 0;
	}
	return *r == data;
}

/**
727
 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
728
 *
729
730
731
 * @dev: The device pointer on which irq_chip belongs to.
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts
732
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
733
734
735
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success
736
737
738
 *
 * Returns 0 on success or an errno on failure.
 *
739
 * The &regmap_irq_chip_data will be automatically released when the device is
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
 * unbound.
 */
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
			     int irq_flags, int irq_base,
			     const struct regmap_irq_chip *chip,
			     struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data **ptr, *d;
	int ret;

	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
				  chip, &d);
	if (ret < 0) {
		devres_free(ptr);
		return ret;
	}

	*ptr = d;
	devres_add(dev, ptr);
	*data = d;
	return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);

/**
770
 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
771
772
 *
 * @dev: Device for which which resource was allocated.
773
774
775
776
 * @irq: Primary IRQ for the device.
 * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
 *
 * A resource managed version of regmap_del_irq_chip().
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
 */
void devm_regmap_del_irq_chip(struct device *dev, int irq,
			      struct regmap_irq_chip_data *data)
{
	int rc;

	WARN_ON(irq != data->irq);
	rc = devres_release(dev, devm_regmap_irq_chip_release,
			    devm_regmap_irq_chip_match, data);

	if (rc != 0)
		WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);

792
/**
793
 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
794
 *
795
 * @data: regmap irq controller to operate on.
796
 *
797
 * Useful for drivers to request their own IRQs.
798
799
800
 */
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
801
	WARN_ON(!data->irq_base);
802
803
804
	return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
805
806

/**
807
 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
808
 *
809
810
 * @data: regmap irq controller to operate on.
 * @irq: index of the interrupt requested in the chip IRQs.
811
 *
812
 * Useful for drivers to request their own IRQs.
813
814
815
 */
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
816
817
818
819
	/* Handle holes in the IRQ list */
	if (!data->chip->irqs[irq].mask)
		return -EINVAL;

820
821
822
	return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
823
824

/**
825
826
827
 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
 *
 * @data: regmap_irq controller to operate on.
828
829
830
831
832
833
834
835
836
837
838
839
840
841
 *
 * Useful for drivers to request their own IRQs and for integration
 * with subsystems.  For ease of integration NULL is accepted as a
 * domain, allowing devices to just call this even if no domain is
 * allocated.
 */
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
	if (data)
		return data->domain;
	else
		return NULL;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_domain);