regmap-irq.c 15.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * regmap based irq_chip
 *
 * Copyright 2011 Wolfson Microelectronics plc
 *
 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

13
#include <linux/device.h>
14
#include <linux/export.h>
15
#include <linux/interrupt.h>
16
#include <linux/irq.h>
17
#include <linux/irqdomain.h>
18
#include <linux/pm_runtime.h>
19
#include <linux/regmap.h>
20
21
22
23
24
25
#include <linux/slab.h>

#include "internal.h"

struct regmap_irq_chip_data {
	struct mutex lock;
26
	struct irq_chip irq_chip;
27
28

	struct regmap *map;
Mark Brown's avatar
Mark Brown committed
29
	const struct regmap_irq_chip *chip;
30
31

	int irq_base;
32
	struct irq_domain *domain;
33

34
35
36
	int irq;
	int wake_count;

37
	void *status_reg_buf;
38
39
40
	unsigned int *status_buf;
	unsigned int *mask_buf;
	unsigned int *mask_buf_def;
41
	unsigned int *wake_buf;
42
43

	unsigned int irq_reg_stride;
44
45
46
47
48
49
};

static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
				     int irq)
{
50
	return &data->chip->irqs[irq];
51
52
53
54
55
56
57
58
59
60
61
62
}

static void regmap_irq_lock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);

	mutex_lock(&d->lock);
}

static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
63
	struct regmap *map = d->map;
64
	int i, ret;
65
	u32 reg;
66
	u32 unmask_offset;
67

68
69
70
71
72
73
74
	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

75
76
77
78
79
80
	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
81
82
		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
83
		if (d->chip->mask_invert) {
84
85
			ret = regmap_update_bits(d->map, reg,
					 d->mask_buf_def[i], ~d->mask_buf[i]);
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
		} else if (d->chip->unmask_base) {
			/* set mask with mask_base register */
			ret = regmap_update_bits(d->map, reg,
					d->mask_buf_def[i], ~d->mask_buf[i]);
			if (ret < 0)
				dev_err(d->map->dev,
					"Failed to sync unmasks in %x\n",
					reg);
			unmask_offset = d->chip->unmask_base -
							d->chip->mask_base;
			/* clear mask with unmask_base register */
			ret = regmap_update_bits(d->map,
					reg + unmask_offset,
					d->mask_buf_def[i],
					d->mask_buf[i]);
		} else {
102
			ret = regmap_update_bits(d->map, reg,
103
					 d->mask_buf_def[i], d->mask_buf[i]);
104
		}
105
106
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
107
				reg);
108
109
110
111

		reg = d->chip->wake_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->wake_buf) {
112
113
114
115
116
117
118
119
			if (d->chip->wake_invert)
				ret = regmap_update_bits(d->map, reg,
							 d->mask_buf_def[i],
							 ~d->wake_buf[i]);
			else
				ret = regmap_update_bits(d->map, reg,
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
120
121
122
123
124
			if (ret != 0)
				dev_err(d->map->dev,
					"Failed to sync wakes in %x: %d\n",
					reg, ret);
		}
125
126
127
128

		if (!d->chip->init_ack_masked)
			continue;
		/*
dashsriram's avatar
dashsriram committed
129
		 * Ack all the masked interrupts unconditionally,
130
131
132
		 * OR if there is masked interrupt which hasn't been Acked,
		 * it'll be ignored in irq handler, then may introduce irq storm
		 */
133
		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
134
135
136
137
138
139
140
			reg = d->chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
			ret = regmap_write(map, reg, d->mask_buf[i]);
			if (ret != 0)
				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
		}
141
142
	}

143
144
145
	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

146
147
148
149
150
151
152
153
154
155
	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

156
157
158
159
160
161
	mutex_unlock(&d->lock);
}

static void regmap_irq_enable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
162
	struct regmap *map = d->map;
163
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
164

165
	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
166
167
168
169
170
}

static void regmap_irq_disable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
171
	struct regmap *map = d->map;
172
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
173

174
	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
175
176
}

177
178
179
180
181
182
183
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);

	if (on) {
184
185
186
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				&= ~irq_data->mask;
187
188
		d->wake_count++;
	} else {
189
190
191
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				|= irq_data->mask;
192
193
194
195
196
197
		d->wake_count--;
	}

	return 0;
}

198
static const struct irq_chip regmap_irq_chip = {
199
200
201
202
	.irq_bus_lock		= regmap_irq_lock,
	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
	.irq_disable		= regmap_irq_disable,
	.irq_enable		= regmap_irq_enable,
203
	.irq_set_wake		= regmap_irq_set_wake,
204
205
206
207
208
};

static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
Mark Brown's avatar
Mark Brown committed
209
	const struct regmap_irq_chip *chip = data->chip;
210
211
	struct regmap *map = data->map;
	int ret, i;
212
	bool handled = false;
213
	u32 reg;
214

215
216
217
218
219
	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
220
			pm_runtime_put(map->dev);
221
222
223
224
			return IRQ_NONE;
		}
	}

225
226
227
228
	/*
	 * Read in the statuses, using a single bulk read if possible
	 * in order to reduce the I/O overheads.
	 */
229
	if (!map->use_single_read && map->reg_stride == 1 &&
230
231
232
233
	    data->irq_reg_stride == 1) {
		u8 *buf8 = data->status_reg_buf;
		u16 *buf16 = data->status_reg_buf;
		u32 *buf32 = data->status_reg_buf;
234

235
236
237
238
239
		BUG_ON(!data->status_reg_buf);

		ret = regmap_bulk_read(map, chip->status_base,
				       data->status_reg_buf,
				       chip->num_regs);
240
241
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
242
				ret);
243
244
			return IRQ_NONE;
		}
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278

		for (i = 0; i < data->chip->num_regs; i++) {
			switch (map->format.val_bytes) {
			case 1:
				data->status_buf[i] = buf8[i];
				break;
			case 2:
				data->status_buf[i] = buf16[i];
				break;
			case 4:
				data->status_buf[i] = buf32[i];
				break;
			default:
				BUG();
				return IRQ_NONE;
			}
		}

	} else {
		for (i = 0; i < data->chip->num_regs; i++) {
			ret = regmap_read(map, chip->status_base +
					  (i * map->reg_stride
					   * data->irq_reg_stride),
					  &data->status_buf[i]);

			if (ret != 0) {
				dev_err(map->dev,
					"Failed to read IRQ status: %d\n",
					ret);
				if (chip->runtime_pm)
					pm_runtime_put(map->dev);
				return IRQ_NONE;
			}
		}
279
	}
280

281
282
283
284
285
286
287
288
	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
289
290
		data->status_buf[i] &= ~data->mask_buf[i];

291
		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
292
293
294
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
			ret = regmap_write(map, reg, data->status_buf[i]);
295
296
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
297
					reg, ret);
298
299
300
301
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
302
303
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
304
			handle_nested_irq(irq_find_mapping(data->domain, i));
305
			handled = true;
306
307
308
		}
	}

309
310
311
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

312
313
314
315
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
316
317
}

318
319
320
321
322
323
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
			  irq_hw_number_t hw)
{
	struct regmap_irq_chip_data *data = h->host_data;

	irq_set_chip_data(virq, data);
324
	irq_set_chip(virq, &data->irq_chip);
325
326
327
328
329
330
	irq_set_nested_thread(virq, 1);
	irq_set_noprobe(virq);

	return 0;
}

331
static const struct irq_domain_ops regmap_domain_ops = {
332
333
334
335
	.map	= regmap_irq_map,
	.xlate	= irq_domain_xlate_twocell,
};

336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
/**
 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
 *
 * map:       The regmap for the device.
 * irq:       The IRQ the device uses to signal interrupts
 * irq_flags: The IRQF_ flags to use for the primary interrupt.
 * chip:      Configuration for the interrupt controller.
 * data:      Runtime data structure for the controller, allocated on success
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
Mark Brown's avatar
Mark Brown committed
352
			int irq_base, const struct regmap_irq_chip *chip,
353
354
355
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
356
	int i;
357
	int ret = -ENOMEM;
358
	u32 reg;
359
	u32 unmask_offset;
360

361
362
363
	if (chip->num_regs <= 0)
		return -EINVAL;

364
365
366
367
368
369
370
371
	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

372
373
374
375
376
377
378
	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

	d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

	d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

	d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

400
401
402
403
404
405
406
	if (chip->wake_base) {
		d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

407
	d->irq_chip = regmap_irq_chip;
408
	d->irq_chip.name = chip->name;
409
	d->irq = irq;
410
411
412
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
413
414
415
416
417
418

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

419
	if (!map->use_single_read && map->reg_stride == 1 &&
420
421
422
423
424
425
426
	    d->irq_reg_stride == 1) {
		d->status_reg_buf = kmalloc(map->format.val_bytes *
					    chip->num_regs, GFP_KERNEL);
		if (!d->status_reg_buf)
			goto err_alloc;
	}

427
428
429
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
430
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
431
432
433
434
435
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
436
437
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
438
439
440
		if (chip->mask_invert)
			ret = regmap_update_bits(map, reg,
					 d->mask_buf[i], ~d->mask_buf[i]);
441
442
443
444
445
446
447
448
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
			ret = regmap_update_bits(d->map,
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
449
			ret = regmap_update_bits(map, reg,
450
					 d->mask_buf[i], d->mask_buf[i]);
451
452
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
453
				reg, ret);
454
455
			goto err_alloc;
		}
456
457
458
459
460
461
462
463
464
465
466
467
468
469

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

470
		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
471
472
473
474
475
476
477
478
479
480
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
			ret = regmap_write(map, reg,
					d->status_buf[i] & d->mask_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
481
482
	}

483
484
485
486
487
488
	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);
489
490
491
492
493
494
495
496
497

			if (chip->wake_invert)
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 0);
			else
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
498
499
500
501
502
503
504
505
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

506
507
508
509
510
511
512
513
514
515
516
517
	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
518
519
	}

520
521
	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
522
523
				   chip->name, d);
	if (ret != 0) {
524
525
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
526
		goto err_domain;
527
528
	}

529
530
	*data = d;

531
532
	return 0;

533
534
err_domain:
	/* Should really dispose of the domain but... */
535
err_alloc:
536
	kfree(d->wake_buf);
537
538
539
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
540
	kfree(d->status_reg_buf);
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
	kfree(d);
	return ret;
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);

/**
 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
 *
 * @irq: Primary IRQ for the device
 * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
 */
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
	if (!d)
		return;

	free_irq(irq, d);
558
	irq_domain_remove(d->domain);
559
	kfree(d->wake_buf);
560
561
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
562
	kfree(d->status_reg_buf);
563
564
565
566
	kfree(d->status_buf);
	kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
567
568
569
570
571
572
573
574
575
576

/**
 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
 *
 * Useful for drivers to request their own IRQs.
 *
 * @data: regmap_irq controller to operate on.
 */
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
577
	WARN_ON(!data->irq_base);
578
579
580
	return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
581
582
583
584
585
586
587
588
589
590
591

/**
 * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
 *
 * Useful for drivers to request their own IRQs.
 *
 * @data: regmap_irq controller to operate on.
 * @irq: index of the interrupt requested in the chip IRQs
 */
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
592
593
594
595
	/* Handle holes in the IRQ list */
	if (!data->chip->irqs[irq].mask)
		return -EINVAL;

596
597
598
	return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617

/**
 * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
 *
 * Useful for drivers to request their own IRQs and for integration
 * with subsystems.  For ease of integration NULL is accepted as a
 * domain, allowing devices to just call this even if no domain is
 * allocated.
 *
 * @data: regmap_irq controller to operate on.
 */
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
	if (data)
		return data->domain;
	else
		return NULL;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_domain);