regmap-irq.c 24.7 KB
Newer Older
1
2
3
4
5
6
7
// SPDX-License-Identifier: GPL-2.0
//
// regmap based irq_chip
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8

9
#include <linux/device.h>
10
#include <linux/export.h>
11
#include <linux/interrupt.h>
12
#include <linux/irq.h>
13
#include <linux/irqdomain.h>
14
#include <linux/pm_runtime.h>
15
#include <linux/regmap.h>
16
17
18
19
20
21
#include <linux/slab.h>

#include "internal.h"

struct regmap_irq_chip_data {
	struct mutex lock;
22
	struct irq_chip irq_chip;
23
24

	struct regmap *map;
Mark Brown's avatar
Mark Brown committed
25
	const struct regmap_irq_chip *chip;
26
27

	int irq_base;
28
	struct irq_domain *domain;
29

30
31
32
	int irq;
	int wake_count;

33
	void *status_reg_buf;
34
	unsigned int *main_status_buf;
35
36
37
	unsigned int *status_buf;
	unsigned int *mask_buf;
	unsigned int *mask_buf_def;
38
	unsigned int *wake_buf;
39
40
	unsigned int *type_buf;
	unsigned int *type_buf_def;
41
42

	unsigned int irq_reg_stride;
43
	unsigned int type_reg_stride;
44
45

	bool clear_status:1;
46
47
48
49
50
51
};

static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
				     int irq)
{
52
	return &data->chip->irqs[irq];
53
54
55
56
57
58
59
60
61
}

static void regmap_irq_lock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);

	mutex_lock(&d->lock);
}

62
63
64
65
66
67
68
69
70
71
static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
				  unsigned int reg, unsigned int mask,
				  unsigned int val)
{
	if (d->chip->mask_writeonly)
		return regmap_write_bits(d->map, reg, mask, val);
	else
		return regmap_update_bits(d->map, reg, mask, val);
}

72
73
74
static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
75
	struct regmap *map = d->map;
76
	int i, ret;
77
	u32 reg;
78
	u32 unmask_offset;
79
	u32 val;
80

81
82
83
84
85
86
87
	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

88
89
90
91
92
93
94
95
96
97
98
99
100
101
	if (d->clear_status) {
		for (i = 0; i < d->chip->num_regs; i++) {
			reg = d->chip->status_base +
				(i * map->reg_stride * d->irq_reg_stride);

			ret = regmap_read(map, reg, &val);
			if (ret)
				dev_err(d->map->dev,
					"Failed to clear the interrupt status bits\n");
		}

		d->clear_status = false;
	}

102
103
104
105
106
107
	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
108
109
110
		if (!d->chip->mask_base)
			continue;

111
112
		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
113
		if (d->chip->mask_invert) {
114
			ret = regmap_irq_update_bits(d, reg,
115
					 d->mask_buf_def[i], ~d->mask_buf[i]);
116
117
		} else if (d->chip->unmask_base) {
			/* set mask with mask_base register */
118
			ret = regmap_irq_update_bits(d, reg,
119
120
121
122
123
124
125
126
					d->mask_buf_def[i], ~d->mask_buf[i]);
			if (ret < 0)
				dev_err(d->map->dev,
					"Failed to sync unmasks in %x\n",
					reg);
			unmask_offset = d->chip->unmask_base -
							d->chip->mask_base;
			/* clear mask with unmask_base register */
127
			ret = regmap_irq_update_bits(d,
128
129
130
131
					reg + unmask_offset,
					d->mask_buf_def[i],
					d->mask_buf[i]);
		} else {
132
			ret = regmap_irq_update_bits(d, reg,
133
					 d->mask_buf_def[i], d->mask_buf[i]);
134
		}
135
136
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
137
				reg);
138
139
140
141

		reg = d->chip->wake_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->wake_buf) {
142
			if (d->chip->wake_invert)
143
				ret = regmap_irq_update_bits(d, reg,
144
145
146
							 d->mask_buf_def[i],
							 ~d->wake_buf[i]);
			else
147
				ret = regmap_irq_update_bits(d, reg,
148
149
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
150
151
152
153
154
			if (ret != 0)
				dev_err(d->map->dev,
					"Failed to sync wakes in %x: %d\n",
					reg, ret);
		}
155
156
157
158

		if (!d->chip->init_ack_masked)
			continue;
		/*
dashsriram's avatar
dashsriram committed
159
		 * Ack all the masked interrupts unconditionally,
160
161
162
		 * OR if there is masked interrupt which hasn't been Acked,
		 * it'll be ignored in irq handler, then may introduce irq storm
		 */
163
		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
164
165
			reg = d->chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
166
167
168
169
170
			/* some chips ack by write 0 */
			if (d->chip->ack_invert)
				ret = regmap_write(map, reg, ~d->mask_buf[i]);
			else
				ret = regmap_write(map, reg, d->mask_buf[i]);
171
172
173
174
			if (ret != 0)
				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
		}
175
176
	}

177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
	/* Don't update the type bits if we're using mask bits for irq type. */
	if (!d->chip->type_in_mask) {
		for (i = 0; i < d->chip->num_type_reg; i++) {
			if (!d->type_buf_def[i])
				continue;
			reg = d->chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (d->chip->type_invert)
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], ~d->type_buf[i]);
			else
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], d->type_buf[i]);
			if (ret != 0)
				dev_err(d->map->dev, "Failed to sync type in %x\n",
					reg);
		}
194
195
	}

196
197
198
	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

199
200
201
202
203
204
205
206
207
208
	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

209
210
211
212
213
214
	mutex_unlock(&d->lock);
}

static void regmap_irq_enable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
215
	struct regmap *map = d->map;
216
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
217
218
	unsigned int mask, type;

219
	type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235

	/*
	 * The type_in_mask flag means that the underlying hardware uses
	 * separate mask bits for rising and falling edge interrupts, but
	 * we want to make them into a single virtual interrupt with
	 * configurable edge.
	 *
	 * If the interrupt we're enabling defines the falling or rising
	 * masks then instead of using the regular mask bits for this
	 * interrupt, use the value previously written to the type buffer
	 * at the corresponding offset in regmap_irq_set_type().
	 */
	if (d->chip->type_in_mask && type)
		mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
	else
		mask = irq_data->mask;
236

237
238
239
	if (d->chip->clear_on_unmask)
		d->clear_status = true;

240
	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
241
242
243
244
245
}

static void regmap_irq_disable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
246
	struct regmap *map = d->map;
247
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
248

249
	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
250
251
}

252
253
254
255
256
static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
257
258
	int reg;
	const struct regmap_irq_type *t = &irq_data->type;
259

260
	if ((t->types_supported & type) != type)
261
		return 0;
262

263
264
265
266
267
268
269
270
271
	reg = t->type_reg_offset / map->reg_stride;

	if (t->type_reg_mask)
		d->type_buf[reg] &= ~t->type_reg_mask;
	else
		d->type_buf[reg] &= ~(t->type_falling_val |
				      t->type_rising_val |
				      t->type_level_low_val |
				      t->type_level_high_val);
272
273
	switch (type) {
	case IRQ_TYPE_EDGE_FALLING:
274
		d->type_buf[reg] |= t->type_falling_val;
275
276
277
		break;

	case IRQ_TYPE_EDGE_RISING:
278
		d->type_buf[reg] |= t->type_rising_val;
279
280
281
		break;

	case IRQ_TYPE_EDGE_BOTH:
282
283
		d->type_buf[reg] |= (t->type_falling_val |
					t->type_rising_val);
284
285
		break;

286
287
288
289
290
291
292
	case IRQ_TYPE_LEVEL_HIGH:
		d->type_buf[reg] |= t->type_level_high_val;
		break;

	case IRQ_TYPE_LEVEL_LOW:
		d->type_buf[reg] |= t->type_level_low_val;
		break;
293
294
295
296
297
298
	default:
		return -EINVAL;
	}
	return 0;
}

299
300
301
302
303
304
305
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);

	if (on) {
306
307
308
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				&= ~irq_data->mask;
309
310
		d->wake_count++;
	} else {
311
312
313
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				|= irq_data->mask;
314
315
316
317
318
319
		d->wake_count--;
	}

	return 0;
}

320
static const struct irq_chip regmap_irq_chip = {
321
322
323
324
	.irq_bus_lock		= regmap_irq_lock,
	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
	.irq_disable		= regmap_irq_disable,
	.irq_enable		= regmap_irq_enable,
325
	.irq_set_type		= regmap_irq_set_type,
326
	.irq_set_wake		= regmap_irq_set_wake,
327
328
};

329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
					   unsigned int b)
{
	const struct regmap_irq_chip *chip = data->chip;
	struct regmap *map = data->map;
	struct regmap_irq_sub_irq_map *subreg;
	int i, ret = 0;

	if (!chip->sub_reg_offsets) {
		/* Assume linear mapping */
		ret = regmap_read(map, chip->status_base +
				  (b * map->reg_stride * data->irq_reg_stride),
				   &data->status_buf[b]);
	} else {
		subreg = &chip->sub_reg_offsets[b];
		for (i = 0; i < subreg->num_regs; i++) {
			unsigned int offset = subreg->offset[i];

			ret = regmap_read(map, chip->status_base + offset,
					  &data->status_buf[offset]);
			if (ret)
				break;
		}
	}
	return ret;
}

356
357
358
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
Mark Brown's avatar
Mark Brown committed
359
	const struct regmap_irq_chip *chip = data->chip;
360
361
	struct regmap *map = data->map;
	int ret, i;
362
	bool handled = false;
363
	u32 reg;
364

365
366
367
	if (chip->handle_pre_irq)
		chip->handle_pre_irq(chip->irq_drv_data);

368
369
370
371
372
	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
373
			goto exit;
374
375
376
		}
	}

377
	/*
378
379
380
	 * Read only registers with active IRQs if the chip has 'main status
	 * register'. Else read in the statuses, using a single bulk read if
	 * possible in order to reduce the I/O overheads.
381
	 */
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434

	if (chip->num_main_regs) {
		unsigned int max_main_bits;
		unsigned long size;

		size = chip->num_regs * sizeof(unsigned int);

		max_main_bits = (chip->num_main_status_bits) ?
				 chip->num_main_status_bits : chip->num_regs;
		/* Clear the status buf as we don't read all status regs */
		memset(data->status_buf, 0, size);

		/* We could support bulk read for main status registers
		 * but I don't expect to see devices with really many main
		 * status registers so let's only support single reads for the
		 * sake of simplicity. and add bulk reads only if needed
		 */
		for (i = 0; i < chip->num_main_regs; i++) {
			ret = regmap_read(map, chip->main_status +
				  (i * map->reg_stride
				   * data->irq_reg_stride),
				  &data->main_status_buf[i]);
			if (ret) {
				dev_err(map->dev,
					"Failed to read IRQ status %d\n",
					ret);
				goto exit;
			}
		}

		/* Read sub registers with active IRQs */
		for (i = 0; i < chip->num_main_regs; i++) {
			unsigned int b;
			const unsigned long mreg = data->main_status_buf[i];

			for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
				if (i * map->format.val_bytes * 8 + b >
				    max_main_bits)
					break;
				ret = read_sub_irq_data(data, b);

				if (ret != 0) {
					dev_err(map->dev,
						"Failed to read IRQ status %d\n",
						ret);
					goto exit;
				}
			}

		}
	} else if (!map->use_single_read && map->reg_stride == 1 &&
		   data->irq_reg_stride == 1) {

435
436
437
		u8 *buf8 = data->status_reg_buf;
		u16 *buf16 = data->status_reg_buf;
		u32 *buf32 = data->status_reg_buf;
438

439
440
441
442
443
		BUG_ON(!data->status_reg_buf);

		ret = regmap_bulk_read(map, chip->status_base,
				       data->status_reg_buf,
				       chip->num_regs);
444
445
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
446
				ret);
447
			goto exit;
448
		}
449
450
451
452
453
454
455
456
457
458
459
460
461
462

		for (i = 0; i < data->chip->num_regs; i++) {
			switch (map->format.val_bytes) {
			case 1:
				data->status_buf[i] = buf8[i];
				break;
			case 2:
				data->status_buf[i] = buf16[i];
				break;
			case 4:
				data->status_buf[i] = buf32[i];
				break;
			default:
				BUG();
463
				goto exit;
464
465
466
467
468
469
470
471
472
473
474
475
476
477
			}
		}

	} else {
		for (i = 0; i < data->chip->num_regs; i++) {
			ret = regmap_read(map, chip->status_base +
					  (i * map->reg_stride
					   * data->irq_reg_stride),
					  &data->status_buf[i]);

			if (ret != 0) {
				dev_err(map->dev,
					"Failed to read IRQ status: %d\n",
					ret);
478
				goto exit;
479
480
			}
		}
481
	}
482

483
484
485
486
487
488
489
490
	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
491
492
		data->status_buf[i] &= ~data->mask_buf[i];

493
		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
494
495
496
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
			ret = regmap_write(map, reg, data->status_buf[i]);
497
498
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
499
					reg, ret);
500
501
502
503
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
504
505
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
506
			handle_nested_irq(irq_find_mapping(data->domain, i));
507
			handled = true;
508
509
510
		}
	}

511
exit:
512
513
514
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

515
516
517
	if (chip->handle_post_irq)
		chip->handle_post_irq(chip->irq_drv_data);

518
519
520
521
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
522
523
}

524
525
526
527
528
529
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
			  irq_hw_number_t hw)
{
	struct regmap_irq_chip_data *data = h->host_data;

	irq_set_chip_data(virq, data);
530
	irq_set_chip(virq, &data->irq_chip);
531
	irq_set_nested_thread(virq, 1);
532
	irq_set_parent(virq, data->irq);
533
534
535
536
537
	irq_set_noprobe(virq);

	return 0;
}

538
static const struct irq_domain_ops regmap_domain_ops = {
539
	.map	= regmap_irq_map,
540
	.xlate	= irq_domain_xlate_onetwocell,
541
542
};

543
/**
544
 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
545
 *
546
547
548
549
550
551
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts.
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success.
552
553
554
555
556
557
558
559
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
Mark Brown's avatar
Mark Brown committed
560
			int irq_base, const struct regmap_irq_chip *chip,
561
562
563
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
564
	int i;
565
	int ret = -ENOMEM;
566
	int num_type_reg;
567
	u32 reg;
568
	u32 unmask_offset;
569

570
571
572
	if (chip->num_regs <= 0)
		return -EINVAL;

573
574
575
	if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
		return -EINVAL;

576
577
578
579
580
581
582
583
	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

584
585
586
587
588
589
590
	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
591
592
593
594
595
596
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

597
598
599
600
601
602
603
604
605
	if (chip->num_main_regs) {
		d->main_status_buf = kcalloc(chip->num_main_regs,
					     sizeof(unsigned int),
					     GFP_KERNEL);

		if (!d->main_status_buf)
			goto err_alloc;
	}

lixiubo's avatar
lixiubo committed
606
	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
607
608
609
610
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

lixiubo's avatar
lixiubo committed
611
	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
612
613
614
615
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

lixiubo's avatar
lixiubo committed
616
	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
617
618
619
620
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

621
	if (chip->wake_base) {
lixiubo's avatar
lixiubo committed
622
		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
623
624
625
626
627
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

628
629
630
631
	num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
	if (num_type_reg) {
		d->type_buf_def = kcalloc(num_type_reg,
					  sizeof(unsigned int), GFP_KERNEL);
632
633
634
		if (!d->type_buf_def)
			goto err_alloc;

635
		d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
636
637
638
639
640
				      GFP_KERNEL);
		if (!d->type_buf)
			goto err_alloc;
	}

641
	d->irq_chip = regmap_irq_chip;
642
	d->irq_chip.name = chip->name;
643
	d->irq = irq;
644
645
646
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
647
648
649
650
651
652

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

653
654
655
656
657
	if (chip->type_reg_stride)
		d->type_reg_stride = chip->type_reg_stride;
	else
		d->type_reg_stride = 1;

658
	if (!map->use_single_read && map->reg_stride == 1 &&
659
	    d->irq_reg_stride == 1) {
660
661
662
		d->status_reg_buf = kmalloc_array(chip->num_regs,
						  map->format.val_bytes,
						  GFP_KERNEL);
663
664
665
666
		if (!d->status_reg_buf)
			goto err_alloc;
	}

667
668
669
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
670
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
671
672
673
674
675
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
676
677
678
		if (!chip->mask_base)
			continue;

679
680
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
681
		if (chip->mask_invert)
682
			ret = regmap_irq_update_bits(d, reg,
683
					 d->mask_buf[i], ~d->mask_buf[i]);
684
685
686
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
687
			ret = regmap_irq_update_bits(d,
688
689
690
691
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
692
			ret = regmap_irq_update_bits(d, reg,
693
					 d->mask_buf[i], d->mask_buf[i]);
694
695
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
696
				reg, ret);
697
698
			goto err_alloc;
		}
699
700
701
702
703
704
705
706
707
708
709
710
711
712

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

713
		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
714
715
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
716
717
718
719
720
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
					~(d->status_buf[i] & d->mask_buf[i]));
			else
				ret = regmap_write(map, reg,
721
722
723
724
725
726
727
					d->status_buf[i] & d->mask_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
728
729
	}

730
731
732
733
734
735
	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);
736
737

			if (chip->wake_invert)
738
				ret = regmap_irq_update_bits(d, reg,
739
740
741
							 d->mask_buf_def[i],
							 0);
			else
742
				ret = regmap_irq_update_bits(d, reg,
743
744
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
745
746
747
748
749
750
751
752
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

753
	if (chip->num_type_reg && !chip->type_in_mask) {
754
755
756
		for (i = 0; i < chip->num_type_reg; ++i) {
			reg = chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
757
758
759
760
761
762
763
764

			ret = regmap_read(map, reg, &d->type_buf_def[i]);

			if (d->chip->type_invert)
				d->type_buf_def[i] = ~d->type_buf_def[i];

			if (ret) {
				dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
765
766
767
768
769
770
					reg, ret);
				goto err_alloc;
			}
		}
	}

771
772
773
774
775
776
777
778
779
780
781
782
	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
783
784
	}

785
786
	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
787
788
				   chip->name, d);
	if (ret != 0) {
789
790
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
791
		goto err_domain;
792
793
	}

794
795
	*data = d;

796
797
	return 0;

798
799
err_domain:
	/* Should really dispose of the domain but... */
800
err_alloc:
801
802
	kfree(d->type_buf);
	kfree(d->type_buf_def);
803
	kfree(d->wake_buf);
804
805
806
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
807
	kfree(d->status_reg_buf);
808
809
810
811
812
813
	kfree(d);
	return ret;
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);

/**
814
 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
815
816
 *
 * @irq: Primary IRQ for the device
817
 * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
818
 *
819
 * This function also disposes of all mapped IRQs on the chip.
820
821
822
 */
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
823
824
825
	unsigned int virq;
	int hwirq;

826
827
828
829
	if (!d)
		return;

	free_irq(irq, d);
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845

	/* Dispose all virtual irq from irq domain before removing it */
	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
		/* Ignore hwirq if holes in the IRQ list */
		if (!d->chip->irqs[hwirq].mask)
			continue;

		/*
		 * Find the virtual irq of hwirq on chip and if it is
		 * there then dispose it
		 */
		virq = irq_find_mapping(d->domain, hwirq);
		if (virq)
			irq_dispose_mapping(virq);
	}

846
	irq_domain_remove(d->domain);
847
848
	kfree(d->type_buf);
	kfree(d->type_buf_def);
849
	kfree(d->wake_buf);
850
851
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
852
	kfree(d->status_reg_buf);
853
854
855
856
	kfree(d->status_buf);
	kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
857

858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
static void devm_regmap_irq_chip_release(struct device *dev, void *res)
{
	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;

	regmap_del_irq_chip(d->irq, d);
}

static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)

{
	struct regmap_irq_chip_data **r = res;

	if (!r || !*r) {
		WARN_ON(!r || !*r);
		return 0;
	}
	return *r == data;
}

/**
878
 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
879
 *
880
881
882
 * @dev: The device pointer on which irq_chip belongs to.
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts
883
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
884
885
886
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success
887
888
889
 *
 * Returns 0 on success or an errno on failure.
 *
890
 * The &regmap_irq_chip_data will be automatically released when the device is
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
 * unbound.
 */
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
			     int irq_flags, int irq_base,
			     const struct regmap_irq_chip *chip,
			     struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data **ptr, *d;
	int ret;

	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
				  chip, &d);
	if (ret < 0) {
		devres_free(ptr);
		return ret;
	}

	*ptr = d;
	devres_add(dev, ptr);
	*data = d;
	return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);

/**
921
 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
922
923
 *
 * @dev: Device for which which resource was allocated.
924
925
926
927
 * @irq: Primary IRQ for the device.
 * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
 *
 * A resource managed version of regmap_del_irq_chip().
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
 */
void devm_regmap_del_irq_chip(struct device *dev, int irq,
			      struct regmap_irq_chip_data *data)
{
	int rc;

	WARN_ON(irq != data->irq);
	rc = devres_release(dev, devm_regmap_irq_chip_release,
			    devm_regmap_irq_chip_match, data);

	if (rc != 0)
		WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);

943
/**
944
 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
945
 *
946
 * @data: regmap irq controller to operate on.
947
 *
948
 * Useful for drivers to request their own IRQs.
949
950
951
 */
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
952
	WARN_ON(!data->irq_base);
953
954
955
	return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
956
957

/**
958
 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
959
 *
960
961
 * @data: regmap irq controller to operate on.
 * @irq: index of the interrupt requested in the chip IRQs.
962
 *
963
 * Useful for drivers to request their own IRQs.
964
965
966
 */
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
967
968
969
970
	/* Handle holes in the IRQ list */
	if (!data->chip->irqs[irq].mask)
		return -EINVAL;

971
972
973
	return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
974
975

/**
976
977
978
 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
 *
 * @data: regmap_irq controller to operate on.
979
980
981
982
983
984
985
986
987
988
989
990
991
992
 *
 * Useful for drivers to request their own IRQs and for integration
 * with subsystems.  For ease of integration NULL is accepted as a
 * domain, allowing devices to just call this even if no domain is
 * allocated.
 */
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
	if (data)
		return data->domain;
	else
		return NULL;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_domain);