regmap-irq.c 20.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * regmap based irq_chip
 *
 * Copyright 2011 Wolfson Microelectronics plc
 *
 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

13
#include <linux/device.h>
14
#include <linux/export.h>
15
#include <linux/interrupt.h>
16
#include <linux/irq.h>
17
#include <linux/irqdomain.h>
18
#include <linux/pm_runtime.h>
19
#include <linux/regmap.h>
20
21
22
23
24
25
#include <linux/slab.h>

#include "internal.h"

struct regmap_irq_chip_data {
	struct mutex lock;
26
	struct irq_chip irq_chip;
27
28

	struct regmap *map;
Mark Brown's avatar
Mark Brown committed
29
	const struct regmap_irq_chip *chip;
30
31

	int irq_base;
32
	struct irq_domain *domain;
33

34
35
36
	int irq;
	int wake_count;

37
	void *status_reg_buf;
38
39
40
	unsigned int *status_buf;
	unsigned int *mask_buf;
	unsigned int *mask_buf_def;
41
	unsigned int *wake_buf;
42
43
	unsigned int *type_buf;
	unsigned int *type_buf_def;
44
45

	unsigned int irq_reg_stride;
46
	unsigned int type_reg_stride;
47
48
49
50
51
52
};

static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
				     int irq)
{
53
	return &data->chip->irqs[irq];
54
55
56
57
58
59
60
61
62
63
64
65
}

static void regmap_irq_lock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);

	mutex_lock(&d->lock);
}

static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
66
	struct regmap *map = d->map;
67
	int i, ret;
68
	u32 reg;
69
	u32 unmask_offset;
70

71
72
73
74
75
76
77
	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

78
79
80
81
82
83
	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
84
85
		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
86
		if (d->chip->mask_invert) {
87
88
			ret = regmap_update_bits(d->map, reg,
					 d->mask_buf_def[i], ~d->mask_buf[i]);
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
		} else if (d->chip->unmask_base) {
			/* set mask with mask_base register */
			ret = regmap_update_bits(d->map, reg,
					d->mask_buf_def[i], ~d->mask_buf[i]);
			if (ret < 0)
				dev_err(d->map->dev,
					"Failed to sync unmasks in %x\n",
					reg);
			unmask_offset = d->chip->unmask_base -
							d->chip->mask_base;
			/* clear mask with unmask_base register */
			ret = regmap_update_bits(d->map,
					reg + unmask_offset,
					d->mask_buf_def[i],
					d->mask_buf[i]);
		} else {
105
			ret = regmap_update_bits(d->map, reg,
106
					 d->mask_buf_def[i], d->mask_buf[i]);
107
		}
108
109
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
110
				reg);
111
112
113
114

		reg = d->chip->wake_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->wake_buf) {
115
116
117
118
119
120
121
122
			if (d->chip->wake_invert)
				ret = regmap_update_bits(d->map, reg,
							 d->mask_buf_def[i],
							 ~d->wake_buf[i]);
			else
				ret = regmap_update_bits(d->map, reg,
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
123
124
125
126
127
			if (ret != 0)
				dev_err(d->map->dev,
					"Failed to sync wakes in %x: %d\n",
					reg, ret);
		}
128
129
130
131

		if (!d->chip->init_ack_masked)
			continue;
		/*
dashsriram's avatar
dashsriram committed
132
		 * Ack all the masked interrupts unconditionally,
133
134
135
		 * OR if there is masked interrupt which hasn't been Acked,
		 * it'll be ignored in irq handler, then may introduce irq storm
		 */
136
		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
137
138
			reg = d->chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
139
140
141
142
143
			/* some chips ack by write 0 */
			if (d->chip->ack_invert)
				ret = regmap_write(map, reg, ~d->mask_buf[i]);
			else
				ret = regmap_write(map, reg, d->mask_buf[i]);
144
145
146
147
			if (ret != 0)
				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
		}
148
149
	}

150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
	for (i = 0; i < d->chip->num_type_reg; i++) {
		if (!d->type_buf_def[i])
			continue;
		reg = d->chip->type_base +
			(i * map->reg_stride * d->type_reg_stride);
		if (d->chip->type_invert)
			ret = regmap_update_bits(d->map, reg,
				d->type_buf_def[i], ~d->type_buf[i]);
		else
			ret = regmap_update_bits(d->map, reg,
				d->type_buf_def[i], d->type_buf[i]);
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync type in %x\n",
				reg);
	}

166
167
168
	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

169
170
171
172
173
174
175
176
177
178
	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

179
180
181
182
183
184
	mutex_unlock(&d->lock);
}

static void regmap_irq_enable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
185
	struct regmap *map = d->map;
186
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
187

188
	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
189
190
191
192
193
}

static void regmap_irq_disable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
194
	struct regmap *map = d->map;
195
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
196

197
	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
198
199
}

200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
	int reg = irq_data->type_reg_offset / map->reg_stride;

	if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
		return 0;

	d->type_buf[reg] &= ~(irq_data->type_falling_mask |
					irq_data->type_rising_mask);
	switch (type) {
	case IRQ_TYPE_EDGE_FALLING:
		d->type_buf[reg] |= irq_data->type_falling_mask;
		break;

	case IRQ_TYPE_EDGE_RISING:
		d->type_buf[reg] |= irq_data->type_rising_mask;
		break;

	case IRQ_TYPE_EDGE_BOTH:
		d->type_buf[reg] |= (irq_data->type_falling_mask |
					irq_data->type_rising_mask);
		break;

	default:
		return -EINVAL;
	}
	return 0;
}

232
233
234
235
236
237
238
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);

	if (on) {
239
240
241
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				&= ~irq_data->mask;
242
243
		d->wake_count++;
	} else {
244
245
246
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				|= irq_data->mask;
247
248
249
250
251
252
		d->wake_count--;
	}

	return 0;
}

253
static const struct irq_chip regmap_irq_chip = {
254
255
256
257
	.irq_bus_lock		= regmap_irq_lock,
	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
	.irq_disable		= regmap_irq_disable,
	.irq_enable		= regmap_irq_enable,
258
	.irq_set_type		= regmap_irq_set_type,
259
	.irq_set_wake		= regmap_irq_set_wake,
260
261
262
263
264
};

static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
Mark Brown's avatar
Mark Brown committed
265
	const struct regmap_irq_chip *chip = data->chip;
266
267
	struct regmap *map = data->map;
	int ret, i;
268
	bool handled = false;
269
	u32 reg;
270

271
272
273
274
275
	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
276
			pm_runtime_put(map->dev);
277
278
279
280
			return IRQ_NONE;
		}
	}

281
282
283
284
	/*
	 * Read in the statuses, using a single bulk read if possible
	 * in order to reduce the I/O overheads.
	 */
285
	if (!map->use_single_read && map->reg_stride == 1 &&
286
287
288
289
	    data->irq_reg_stride == 1) {
		u8 *buf8 = data->status_reg_buf;
		u16 *buf16 = data->status_reg_buf;
		u32 *buf32 = data->status_reg_buf;
290

291
292
293
294
295
		BUG_ON(!data->status_reg_buf);

		ret = regmap_bulk_read(map, chip->status_base,
				       data->status_reg_buf,
				       chip->num_regs);
296
297
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
298
				ret);
299
300
			return IRQ_NONE;
		}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334

		for (i = 0; i < data->chip->num_regs; i++) {
			switch (map->format.val_bytes) {
			case 1:
				data->status_buf[i] = buf8[i];
				break;
			case 2:
				data->status_buf[i] = buf16[i];
				break;
			case 4:
				data->status_buf[i] = buf32[i];
				break;
			default:
				BUG();
				return IRQ_NONE;
			}
		}

	} else {
		for (i = 0; i < data->chip->num_regs; i++) {
			ret = regmap_read(map, chip->status_base +
					  (i * map->reg_stride
					   * data->irq_reg_stride),
					  &data->status_buf[i]);

			if (ret != 0) {
				dev_err(map->dev,
					"Failed to read IRQ status: %d\n",
					ret);
				if (chip->runtime_pm)
					pm_runtime_put(map->dev);
				return IRQ_NONE;
			}
		}
335
	}
336

337
338
339
340
341
342
343
344
	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
345
346
		data->status_buf[i] &= ~data->mask_buf[i];

347
		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
348
349
350
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
			ret = regmap_write(map, reg, data->status_buf[i]);
351
352
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
353
					reg, ret);
354
355
356
357
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
358
359
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
360
			handle_nested_irq(irq_find_mapping(data->domain, i));
361
			handled = true;
362
363
364
		}
	}

365
366
367
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

368
369
370
371
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
372
373
}

374
375
376
377
378
379
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
			  irq_hw_number_t hw)
{
	struct regmap_irq_chip_data *data = h->host_data;

	irq_set_chip_data(virq, data);
380
	irq_set_chip(virq, &data->irq_chip);
381
382
383
384
385
386
	irq_set_nested_thread(virq, 1);
	irq_set_noprobe(virq);

	return 0;
}

387
static const struct irq_domain_ops regmap_domain_ops = {
388
389
390
391
	.map	= regmap_irq_map,
	.xlate	= irq_domain_xlate_twocell,
};

392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
/**
 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
 *
 * map:       The regmap for the device.
 * irq:       The IRQ the device uses to signal interrupts
 * irq_flags: The IRQF_ flags to use for the primary interrupt.
 * chip:      Configuration for the interrupt controller.
 * data:      Runtime data structure for the controller, allocated on success
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
Mark Brown's avatar
Mark Brown committed
408
			int irq_base, const struct regmap_irq_chip *chip,
409
410
411
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
412
	int i;
413
	int ret = -ENOMEM;
414
	u32 reg;
415
	u32 unmask_offset;
416

417
418
419
	if (chip->num_regs <= 0)
		return -EINVAL;

420
421
422
423
424
425
426
427
	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

428
429
430
431
432
433
434
	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
435
436
437
438
439
440
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

lixiubo's avatar
lixiubo committed
441
	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
442
443
444
445
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

lixiubo's avatar
lixiubo committed
446
	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
447
448
449
450
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

lixiubo's avatar
lixiubo committed
451
	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
452
453
454
455
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

456
	if (chip->wake_base) {
lixiubo's avatar
lixiubo committed
457
		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
458
459
460
461
462
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

463
464
465
466
467
468
469
470
471
472
473
474
	if (chip->num_type_reg) {
		d->type_buf_def = kcalloc(chip->num_type_reg,
					sizeof(unsigned int), GFP_KERNEL);
		if (!d->type_buf_def)
			goto err_alloc;

		d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
				      GFP_KERNEL);
		if (!d->type_buf)
			goto err_alloc;
	}

475
	d->irq_chip = regmap_irq_chip;
476
	d->irq_chip.name = chip->name;
477
	d->irq = irq;
478
479
480
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
481
482
483
484
485
486

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

487
488
489
490
491
	if (chip->type_reg_stride)
		d->type_reg_stride = chip->type_reg_stride;
	else
		d->type_reg_stride = 1;

492
	if (!map->use_single_read && map->reg_stride == 1 &&
493
	    d->irq_reg_stride == 1) {
494
495
496
		d->status_reg_buf = kmalloc_array(chip->num_regs,
						  map->format.val_bytes,
						  GFP_KERNEL);
497
498
499
500
		if (!d->status_reg_buf)
			goto err_alloc;
	}

501
502
503
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
504
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
505
506
507
508
509
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
510
511
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
512
513
514
		if (chip->mask_invert)
			ret = regmap_update_bits(map, reg,
					 d->mask_buf[i], ~d->mask_buf[i]);
515
516
517
518
519
520
521
522
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
			ret = regmap_update_bits(d->map,
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
523
			ret = regmap_update_bits(map, reg,
524
					 d->mask_buf[i], d->mask_buf[i]);
525
526
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
527
				reg, ret);
528
529
			goto err_alloc;
		}
530
531
532
533
534
535
536
537
538
539
540
541
542
543

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

544
		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
545
546
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
547
548
549
550
551
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
					~(d->status_buf[i] & d->mask_buf[i]));
			else
				ret = regmap_write(map, reg,
552
553
554
555
556
557
558
					d->status_buf[i] & d->mask_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
559
560
	}

561
562
563
564
565
566
	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);
567
568
569
570
571
572
573
574
575

			if (chip->wake_invert)
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 0);
			else
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
576
577
578
579
580
581
582
583
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
	if (chip->num_type_reg) {
		for (i = 0; i < chip->num_irqs; i++) {
			reg = chip->irqs[i].type_reg_offset / map->reg_stride;
			d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
					chip->irqs[i].type_falling_mask;
		}
		for (i = 0; i < chip->num_type_reg; ++i) {
			if (!d->type_buf_def[i])
				continue;

			reg = chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (chip->type_invert)
				ret = regmap_update_bits(map, reg,
					d->type_buf_def[i], 0xFF);
			else
				ret = regmap_update_bits(map, reg,
					d->type_buf_def[i], 0x0);
			if (ret != 0) {
				dev_err(map->dev,
					"Failed to set type in 0x%x: %x\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

611
612
613
614
615
616
617
618
619
620
621
622
	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
623
624
	}

625
626
	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
627
628
				   chip->name, d);
	if (ret != 0) {
629
630
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
631
		goto err_domain;
632
633
	}

634
635
	*data = d;

636
637
	return 0;

638
639
err_domain:
	/* Should really dispose of the domain but... */
640
err_alloc:
641
642
	kfree(d->type_buf);
	kfree(d->type_buf_def);
643
	kfree(d->wake_buf);
644
645
646
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
647
	kfree(d->status_reg_buf);
648
649
650
651
652
653
654
655
656
657
	kfree(d);
	return ret;
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);

/**
 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
 *
 * @irq: Primary IRQ for the device
 * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
658
659
 *
 * This function also dispose all mapped irq on chip.
660
661
662
 */
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
663
664
665
	unsigned int virq;
	int hwirq;

666
667
668
669
	if (!d)
		return;

	free_irq(irq, d);
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685

	/* Dispose all virtual irq from irq domain before removing it */
	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
		/* Ignore hwirq if holes in the IRQ list */
		if (!d->chip->irqs[hwirq].mask)
			continue;

		/*
		 * Find the virtual irq of hwirq on chip and if it is
		 * there then dispose it
		 */
		virq = irq_find_mapping(d->domain, hwirq);
		if (virq)
			irq_dispose_mapping(virq);
	}

686
	irq_domain_remove(d->domain);
687
688
	kfree(d->type_buf);
	kfree(d->type_buf_def);
689
	kfree(d->wake_buf);
690
691
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
692
	kfree(d->status_reg_buf);
693
694
695
696
	kfree(d->status_buf);
	kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
697

698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
static void devm_regmap_irq_chip_release(struct device *dev, void *res)
{
	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;

	regmap_del_irq_chip(d->irq, d);
}

static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)

{
	struct regmap_irq_chip_data **r = res;

	if (!r || !*r) {
		WARN_ON(!r || !*r);
		return 0;
	}
	return *r == data;
}

/**
 * devm_regmap_add_irq_chip(): Resource manager regmap_add_irq_chip()
 *
 * @dev:       The device pointer on which irq_chip belongs to.
 * @map:       The regmap for the device.
 * @irq:       The IRQ the device uses to signal interrupts
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
 * @chip:      Configuration for the interrupt controller.
 * @data:      Runtime data structure for the controller, allocated on success
 *
 * Returns 0 on success or an errno on failure.
 *
 * The regmap_irq_chip data automatically be released when the device is
 * unbound.
 */
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
			     int irq_flags, int irq_base,
			     const struct regmap_irq_chip *chip,
			     struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data **ptr, *d;
	int ret;

	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
				  chip, &d);
	if (ret < 0) {
		devres_free(ptr);
		return ret;
	}

	*ptr = d;
	devres_add(dev, ptr);
	*data = d;
	return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);

/**
 * devm_regmap_del_irq_chip(): Resource managed regmap_del_irq_chip()
 *
 * @dev: Device for which which resource was allocated.
 * @irq: Primary IRQ for the device
 * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
 */
void devm_regmap_del_irq_chip(struct device *dev, int irq,
			      struct regmap_irq_chip_data *data)
{
	int rc;

	WARN_ON(irq != data->irq);
	rc = devres_release(dev, devm_regmap_irq_chip_release,
			    devm_regmap_irq_chip_match, data);

	if (rc != 0)
		WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);

780
781
782
783
784
785
786
787
788
/**
 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
 *
 * Useful for drivers to request their own IRQs.
 *
 * @data: regmap_irq controller to operate on.
 */
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
789
	WARN_ON(!data->irq_base);
790
791
792
	return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
793
794
795
796
797
798
799
800
801
802
803

/**
 * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
 *
 * Useful for drivers to request their own IRQs.
 *
 * @data: regmap_irq controller to operate on.
 * @irq: index of the interrupt requested in the chip IRQs
 */
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
804
805
806
807
	/* Handle holes in the IRQ list */
	if (!data->chip->irqs[irq].mask)
		return -EINVAL;

808
809
810
	return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829

/**
 * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
 *
 * Useful for drivers to request their own IRQs and for integration
 * with subsystems.  For ease of integration NULL is accepted as a
 * domain, allowing devices to just call this even if no domain is
 * allocated.
 *
 * @data: regmap_irq controller to operate on.
 */
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
	if (data)
		return data->domain;
	else
		return NULL;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_domain);