irq-gic-v3-its.c 97.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4
5
6
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

7
#include <linux/acpi.h>
8
#include <linux/acpi_iort.h>
9
10
#include <linux/bitmap.h>
#include <linux/cpu.h>
11
#include <linux/crash_dump.h>
12
#include <linux/delay.h>
13
#include <linux/dma-iommu.h>
14
#include <linux/efi.h>
15
#include <linux/interrupt.h>
16
#include <linux/irqdomain.h>
17
#include <linux/list.h>
18
#include <linux/log2.h>
19
#include <linux/memblock.h>
20
21
22
23
24
25
26
27
28
#include <linux/mm.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/percpu.h>
#include <linux/slab.h>
29
#include <linux/syscore_ops.h>
30

31
#include <linux/irqchip.h>
32
#include <linux/irqchip/arm-gic-v3.h>
33
#include <linux/irqchip/arm-gic-v4.h>
34
35
36
37

#include <asm/cputype.h>
#include <asm/exception.h>

38
39
#include "irq-gic-common.h"

40
41
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
42
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
43
#define ITS_FLAGS_SAVE_SUSPEND_STATE		(1ULL << 3)
44

45
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
46
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED	(1 << 1)
47

48
49
50
51
52
53
54
55
56
57
58
static u32 lpi_id_bits;

/*
 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
 * deal with (one configuration byte per interrupt). PENDBASE has to
 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
 */
#define LPI_NRBITS		lpi_id_bits
#define LPI_PROPBASE_SZ		ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ		ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)

59
#define LPI_PROP_DEFAULT_PRIO	GICD_INT_DEF_PRI
60

61
62
63
64
65
66
67
68
69
70
/*
 * Collection structure - just an ID, and a redistributor address to
 * ping. We use one per CPU as a bag of interrupts assigned to this
 * CPU.
 */
struct its_collection {
	u64			target_address;
	u16			col_id;
};

71
/*
72
73
 * The ITS_BASER structure - contains memory information, cached
 * value of BASER register configuration and ITS page size.
74
75
76
77
78
 */
struct its_baser {
	void		*base;
	u64		val;
	u32		order;
79
	u32		psz;
80
81
};

82
83
struct its_device;

84
85
/*
 * The ITS structure - contains most of the infrastructure, with the
86
87
 * top-level MSI domain, the command queue, the collections, and the
 * list of devices writing to it.
88
89
90
91
 *
 * dev_alloc_lock has to be taken for device allocations, while the
 * spinlock must be taken to parse data structures such as the device
 * list.
92
93
94
 */
struct its_node {
	raw_spinlock_t		lock;
95
	struct mutex		dev_alloc_lock;
96
97
	struct list_head	entry;
	void __iomem		*base;
98
	phys_addr_t		phys_base;
99
100
	struct its_cmd_block	*cmd_base;
	struct its_cmd_block	*cmd_write;
101
	struct its_baser	tables[GITS_BASER_NR_REGS];
102
	struct its_collection	*collections;
103
104
	struct fwnode_handle	*fwnode_handle;
	u64			(*get_msi_base)(struct its_device *its_dev);
105
106
	u64			cbaser_save;
	u32			ctlr_save;
107
108
	struct list_head	its_device_list;
	u64			flags;
109
	unsigned long		list_nr;
110
	u32			ite_size;
111
	u32			device_ids;
112
	int			numa_node;
113
114
	unsigned int		msi_domain_flags;
	u32			pre_its_base; /* for Socionext Synquacer */
115
	bool			is_v4;
116
	int			vlpi_redist_offset;
117
118
119
120
};

#define ITS_ITT_ALIGN		SZ_256

121
122
123
124
/* The maximum number of VPEID bits supported by VLPI commands */
#define ITS_MAX_VPEID_BITS	(16)
#define ITS_MAX_VPEID		(1 << (ITS_MAX_VPEID_BITS))

125
126
127
/* Convert page order to size in bytes */
#define PAGE_ORDER_TO_SIZE(o)	(PAGE_SIZE << (o))

128
129
130
131
132
struct event_lpi_map {
	unsigned long		*lpi_map;
	u16			*col_map;
	irq_hw_number_t		lpi_base;
	int			nr_lpis;
133
134
135
136
	struct mutex		vlpi_lock;
	struct its_vm		*vm;
	struct its_vlpi_map	*vlpi_maps;
	int			nr_vlpis;
137
138
};

139
/*
140
141
142
143
 * The ITS view of a device - belongs to an ITS, owns an interrupt
 * translation table, and a list of interrupts.  If it some of its
 * LPIs are injected into a guest (GICv4), the event_map.vm field
 * indicates which one.
144
145
146
147
 */
struct its_device {
	struct list_head	entry;
	struct its_node		*its;
148
	struct event_lpi_map	event_map;
149
150
151
	void			*itt;
	u32			nr_ites;
	u32			device_id;
152
	bool			shared;
153
154
};

155
156
157
158
159
160
161
static struct {
	raw_spinlock_t		lock;
	struct its_device	*dev;
	struct its_vpe		**vpes;
	int			next_victim;
} vpe_proxy;

162
static LIST_HEAD(its_nodes);
163
static DEFINE_RAW_SPINLOCK(its_lock);
164
static struct rdists *gic_rdists;
165
static struct irq_domain *its_parent;
166

167
static unsigned long its_list_map;
168
169
170
static u16 vmovp_seq_num;
static DEFINE_RAW_SPINLOCK(vmovp_lock);

171
static DEFINE_IDA(its_vpeid_ida);
172

173
#define gic_data_rdist()		(raw_cpu_ptr(gic_rdists->rdist))
174
#define gic_data_rdist_cpu(cpu)		(per_cpu_ptr(gic_rdists->rdist, cpu))
175
#define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
176
#define gic_data_rdist_vlpi_base()	(gic_data_rdist_rd_base() + SZ_128K)
177

178
179
180
181
182
183
184
185
static struct its_collection *dev_event_to_col(struct its_device *its_dev,
					       u32 event)
{
	struct its_node *its = its_dev->its;

	return its->collections + its_dev->event_map.col_map[event];
}

186
187
static struct its_collection *valid_col(struct its_collection *col)
{
188
	if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
189
190
191
192
193
		return NULL;

	return col;
}

194
195
196
197
198
199
200
201
static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
{
	if (valid_col(its->collections + vpe->col_idx))
		return vpe;

	return NULL;
}

202
203
204
205
206
207
208
209
210
211
212
/*
 * ITS command descriptors - parameters to be encoded in a command
 * block.
 */
struct its_cmd_desc {
	union {
		struct {
			struct its_device *dev;
			u32 event_id;
		} its_inv_cmd;

213
214
215
216
217
		struct {
			struct its_device *dev;
			u32 event_id;
		} its_clear_cmd;

218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
		struct {
			struct its_device *dev;
			u32 event_id;
		} its_int_cmd;

		struct {
			struct its_device *dev;
			int valid;
		} its_mapd_cmd;

		struct {
			struct its_collection *col;
			int valid;
		} its_mapc_cmd;

		struct {
			struct its_device *dev;
			u32 phys_id;
			u32 event_id;
237
		} its_mapti_cmd;
238
239
240
241

		struct {
			struct its_device *dev;
			struct its_collection *col;
242
			u32 event_id;
243
244
245
246
247
248
249
250
251
252
		} its_movi_cmd;

		struct {
			struct its_device *dev;
			u32 event_id;
		} its_discard_cmd;

		struct {
			struct its_collection *col;
		} its_invall_cmd;
253

254
255
256
257
258
259
260
261
262
263
		struct {
			struct its_vpe *vpe;
		} its_vinvall_cmd;

		struct {
			struct its_vpe *vpe;
			struct its_collection *col;
			bool valid;
		} its_vmapp_cmd;

264
265
266
267
268
269
270
271
272
273
274
275
276
277
		struct {
			struct its_vpe *vpe;
			struct its_device *dev;
			u32 virt_id;
			u32 event_id;
			bool db_enabled;
		} its_vmapti_cmd;

		struct {
			struct its_vpe *vpe;
			struct its_device *dev;
			u32 event_id;
			bool db_enabled;
		} its_vmovi_cmd;
278
279
280
281
282
283
284

		struct {
			struct its_vpe *vpe;
			struct its_collection *col;
			u16 seq_num;
			u16 its_list;
		} its_vmovp_cmd;
285
286
287
288
289
290
291
292
293
294
295
296
297
	};
};

/*
 * The ITS command block, which is what the ITS actually parses.
 */
struct its_cmd_block {
	u64	raw_cmd[4];
};

#define ITS_CMD_QUEUE_SZ		SZ_64K
#define ITS_CMD_QUEUE_NR_ENTRIES	(ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))

298
299
typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
						    struct its_cmd_block *,
300
301
						    struct its_cmd_desc *);

302
303
typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
					      struct its_cmd_block *,
304
305
					      struct its_cmd_desc *);

306
307
308
309
310
311
312
static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
{
	u64 mask = GENMASK_ULL(h, l);
	*raw_cmd &= ~mask;
	*raw_cmd |= (val << l) & mask;
}

313
314
static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
{
315
	its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
316
317
318
319
}

static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
{
320
	its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
321
322
323
324
}

static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
{
325
	its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
326
327
328
329
}

static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
{
330
	its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
331
332
333
334
}

static void its_encode_size(struct its_cmd_block *cmd, u8 size)
{
335
	its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
336
337
338
339
}

static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
340
	its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
341
342
343
344
}

static void its_encode_valid(struct its_cmd_block *cmd, int valid)
{
345
	its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
346
347
348
349
}

static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
350
	its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
351
352
353
354
}

static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
{
355
	its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
356
357
}

358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
{
	its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
}

static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
{
	its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
}

static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
{
	its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
}

static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
{
	its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
}

378
379
380
381
382
383
384
385
386
387
static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
{
	its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
}

static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
{
	its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
}

388
389
static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
{
390
	its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
391
392
393
394
395
396
397
}

static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
{
	its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
}

398
399
400
401
402
403
404
405
406
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
	/* Let's fixup BE commands */
	cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
	cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
	cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
	cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
}

407
408
static struct its_collection *its_build_mapd_cmd(struct its_node *its,
						 struct its_cmd_block *cmd,
409
410
411
						 struct its_cmd_desc *desc)
{
	unsigned long itt_addr;
412
	u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
413
414
415
416
417
418
419
420
421
422
423
424

	itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
	itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);

	its_encode_cmd(cmd, GITS_CMD_MAPD);
	its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
	its_encode_size(cmd, size - 1);
	its_encode_itt(cmd, itt_addr);
	its_encode_valid(cmd, desc->its_mapd_cmd.valid);

	its_fixup_cmd(cmd);

425
	return NULL;
426
427
}

428
429
static struct its_collection *its_build_mapc_cmd(struct its_node *its,
						 struct its_cmd_block *cmd,
430
431
432
433
434
435
436
437
438
439
440
441
						 struct its_cmd_desc *desc)
{
	its_encode_cmd(cmd, GITS_CMD_MAPC);
	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
	its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
	its_encode_valid(cmd, desc->its_mapc_cmd.valid);

	its_fixup_cmd(cmd);

	return desc->its_mapc_cmd.col;
}

442
443
static struct its_collection *its_build_mapti_cmd(struct its_node *its,
						  struct its_cmd_block *cmd,
444
445
						  struct its_cmd_desc *desc)
{
446
447
	struct its_collection *col;

448
449
	col = dev_event_to_col(desc->its_mapti_cmd.dev,
			       desc->its_mapti_cmd.event_id);
450

451
452
453
454
	its_encode_cmd(cmd, GITS_CMD_MAPTI);
	its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
	its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
455
	its_encode_collection(cmd, col->col_id);
456
457
458

	its_fixup_cmd(cmd);

459
	return valid_col(col);
460
461
}

462
463
static struct its_collection *its_build_movi_cmd(struct its_node *its,
						 struct its_cmd_block *cmd,
464
465
						 struct its_cmd_desc *desc)
{
466
467
468
469
470
	struct its_collection *col;

	col = dev_event_to_col(desc->its_movi_cmd.dev,
			       desc->its_movi_cmd.event_id);

471
472
	its_encode_cmd(cmd, GITS_CMD_MOVI);
	its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
473
	its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
474
475
476
477
	its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);

	its_fixup_cmd(cmd);

478
	return valid_col(col);
479
480
}

481
482
static struct its_collection *its_build_discard_cmd(struct its_node *its,
						    struct its_cmd_block *cmd,
483
484
						    struct its_cmd_desc *desc)
{
485
486
487
488
489
	struct its_collection *col;

	col = dev_event_to_col(desc->its_discard_cmd.dev,
			       desc->its_discard_cmd.event_id);

490
491
492
493
494
495
	its_encode_cmd(cmd, GITS_CMD_DISCARD);
	its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_discard_cmd.event_id);

	its_fixup_cmd(cmd);

496
	return valid_col(col);
497
498
}

499
500
static struct its_collection *its_build_inv_cmd(struct its_node *its,
						struct its_cmd_block *cmd,
501
502
						struct its_cmd_desc *desc)
{
503
504
505
506
507
	struct its_collection *col;

	col = dev_event_to_col(desc->its_inv_cmd.dev,
			       desc->its_inv_cmd.event_id);

508
509
510
511
512
513
	its_encode_cmd(cmd, GITS_CMD_INV);
	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);

	its_fixup_cmd(cmd);

514
	return valid_col(col);
515
516
}

517
518
static struct its_collection *its_build_int_cmd(struct its_node *its,
						struct its_cmd_block *cmd,
519
520
521
522
523
524
525
526
527
528
529
530
531
						struct its_cmd_desc *desc)
{
	struct its_collection *col;

	col = dev_event_to_col(desc->its_int_cmd.dev,
			       desc->its_int_cmd.event_id);

	its_encode_cmd(cmd, GITS_CMD_INT);
	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_int_cmd.event_id);

	its_fixup_cmd(cmd);

532
	return valid_col(col);
533
534
}

535
536
static struct its_collection *its_build_clear_cmd(struct its_node *its,
						  struct its_cmd_block *cmd,
537
538
539
540
541
542
543
544
545
546
547
548
549
						  struct its_cmd_desc *desc)
{
	struct its_collection *col;

	col = dev_event_to_col(desc->its_clear_cmd.dev,
			       desc->its_clear_cmd.event_id);

	its_encode_cmd(cmd, GITS_CMD_CLEAR);
	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);

	its_fixup_cmd(cmd);

550
	return valid_col(col);
551
552
}

553
554
static struct its_collection *its_build_invall_cmd(struct its_node *its,
						   struct its_cmd_block *cmd,
555
556
557
558
559
560
561
562
563
564
						   struct its_cmd_desc *desc)
{
	its_encode_cmd(cmd, GITS_CMD_INVALL);
	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);

	its_fixup_cmd(cmd);

	return NULL;
}

565
566
static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
					     struct its_cmd_block *cmd,
567
568
569
570
571
572
573
					     struct its_cmd_desc *desc)
{
	its_encode_cmd(cmd, GITS_CMD_VINVALL);
	its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);

	its_fixup_cmd(cmd);

574
	return valid_vpe(its, desc->its_vinvall_cmd.vpe);
575
576
}

577
578
static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
					   struct its_cmd_block *cmd,
579
580
581
					   struct its_cmd_desc *desc)
{
	unsigned long vpt_addr;
582
	u64 target;
583
584

	vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
585
	target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
586
587
588
589

	its_encode_cmd(cmd, GITS_CMD_VMAPP);
	its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
	its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
590
	its_encode_target(cmd, target);
591
592
593
594
595
	its_encode_vpt_addr(cmd, vpt_addr);
	its_encode_vpt_size(cmd, LPI_NRBITS - 1);

	its_fixup_cmd(cmd);

596
	return valid_vpe(its, desc->its_vmapp_cmd.vpe);
597
598
}

599
600
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
					    struct its_cmd_block *cmd,
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
					    struct its_cmd_desc *desc)
{
	u32 db;

	if (desc->its_vmapti_cmd.db_enabled)
		db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
	else
		db = 1023;

	its_encode_cmd(cmd, GITS_CMD_VMAPTI);
	its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
	its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
	its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
	its_encode_db_phys_id(cmd, db);
	its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);

	its_fixup_cmd(cmd);

619
	return valid_vpe(its, desc->its_vmapti_cmd.vpe);
620
621
}

622
623
static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
					   struct its_cmd_block *cmd,
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
					   struct its_cmd_desc *desc)
{
	u32 db;

	if (desc->its_vmovi_cmd.db_enabled)
		db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
	else
		db = 1023;

	its_encode_cmd(cmd, GITS_CMD_VMOVI);
	its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
	its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
	its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
	its_encode_db_phys_id(cmd, db);
	its_encode_db_valid(cmd, true);

	its_fixup_cmd(cmd);

642
	return valid_vpe(its, desc->its_vmovi_cmd.vpe);
643
644
}

645
646
static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
					   struct its_cmd_block *cmd,
647
648
					   struct its_cmd_desc *desc)
{
649
650
651
	u64 target;

	target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
652
653
654
655
	its_encode_cmd(cmd, GITS_CMD_VMOVP);
	its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
	its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
	its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
656
	its_encode_target(cmd, target);
657
658
659

	its_fixup_cmd(cmd);

660
	return valid_vpe(its, desc->its_vmovp_cmd.vpe);
661
662
}

663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
static u64 its_cmd_ptr_to_offset(struct its_node *its,
				 struct its_cmd_block *ptr)
{
	return (ptr - its->cmd_base) * sizeof(*ptr);
}

static int its_queue_full(struct its_node *its)
{
	int widx;
	int ridx;

	widx = its->cmd_write - its->cmd_base;
	ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);

	/* This is incredibly unlikely to happen, unless the ITS locks up. */
	if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
		return 1;

	return 0;
}

static struct its_cmd_block *its_allocate_entry(struct its_node *its)
{
	struct its_cmd_block *cmd;
	u32 count = 1000000;	/* 1s! */

	while (its_queue_full(its)) {
		count--;
		if (!count) {
			pr_err_ratelimited("ITS queue not draining\n");
			return NULL;
		}
		cpu_relax();
		udelay(1);
	}

	cmd = its->cmd_write++;

	/* Handle queue wrapping */
	if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
		its->cmd_write = its->cmd_base;

705
706
707
708
709
710
	/* Clear command  */
	cmd->raw_cmd[0] = 0;
	cmd->raw_cmd[1] = 0;
	cmd->raw_cmd[2] = 0;
	cmd->raw_cmd[3] = 0;

711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
	return cmd;
}

static struct its_cmd_block *its_post_commands(struct its_node *its)
{
	u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);

	writel_relaxed(wr, its->base + GITS_CWRITER);

	return its->cmd_write;
}

static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
{
	/*
	 * Make sure the commands written to memory are observable by
	 * the ITS.
	 */
	if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
730
		gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
731
732
733
734
	else
		dsb(ishst);
}

735
static int its_wait_for_range_completion(struct its_node *its,
736
					 u64	prev_idx,
737
					 struct its_cmd_block *to)
738
{
739
	u64 rd_idx, to_idx, linear_idx;
740
741
	u32 count = 1000000;	/* 1s! */

742
	/* Linearize to_idx if the command set has wrapped around */
743
	to_idx = its_cmd_ptr_to_offset(its, to);
744
745
746
747
	if (to_idx < prev_idx)
		to_idx += ITS_CMD_QUEUE_SZ;

	linear_idx = prev_idx;
748
749

	while (1) {
750
751
		s64 delta;

752
		rd_idx = readl_relaxed(its->base + GITS_CREADR);
753

754
755
756
757
758
759
760
		/*
		 * Compute the read pointer progress, taking the
		 * potential wrap-around into account.
		 */
		delta = rd_idx - prev_idx;
		if (rd_idx < prev_idx)
			delta += ITS_CMD_QUEUE_SZ;
761

762
763
		linear_idx += delta;
		if (linear_idx >= to_idx)
764
765
766
767
			break;

		count--;
		if (!count) {
768
769
			pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
					   to_idx, linear_idx);
770
			return -1;
771
		}
772
		prev_idx = rd_idx;
773
774
775
		cpu_relax();
		udelay(1);
	}
776
777

	return 0;
778
779
}

780
781
782
783
784
785
786
787
788
/* Warning, macro hell follows */
#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)	\
void name(struct its_node *its,						\
	  buildtype builder,						\
	  struct its_cmd_desc *desc)					\
{									\
	struct its_cmd_block *cmd, *sync_cmd, *next_cmd;		\
	synctype *sync_obj;						\
	unsigned long flags;						\
789
	u64 rd_idx;							\
790
791
792
793
794
795
796
797
									\
	raw_spin_lock_irqsave(&its->lock, flags);			\
									\
	cmd = its_allocate_entry(its);					\
	if (!cmd) {		/* We're soooooo screewed... */		\
		raw_spin_unlock_irqrestore(&its->lock, flags);		\
		return;							\
	}								\
798
	sync_obj = builder(its, cmd, desc);				\
799
800
801
802
803
804
805
	its_flush_cmd(its, cmd);					\
									\
	if (sync_obj) {							\
		sync_cmd = its_allocate_entry(its);			\
		if (!sync_cmd)						\
			goto post;					\
									\
806
		buildfn(its, sync_cmd, sync_obj);			\
807
808
809
810
		its_flush_cmd(its, sync_cmd);				\
	}								\
									\
post:									\
811
	rd_idx = readl_relaxed(its->base + GITS_CREADR);		\
812
813
814
	next_cmd = its_post_commands(its);				\
	raw_spin_unlock_irqrestore(&its->lock, flags);			\
									\
815
	if (its_wait_for_range_completion(its, rd_idx, next_cmd))	\
816
		pr_err_ratelimited("ITS cmd %ps failed\n", builder);	\
817
}
818

819
820
static void its_build_sync_cmd(struct its_node *its,
			       struct its_cmd_block *sync_cmd,
821
822
823
824
			       struct its_collection *sync_col)
{
	its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
	its_encode_target(sync_cmd, sync_col->target_address);
825

826
	its_fixup_cmd(sync_cmd);
827
828
}

829
830
831
static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
			     struct its_collection, its_build_sync_cmd)

832
833
static void its_build_vsync_cmd(struct its_node *its,
				struct its_cmd_block *sync_cmd,
834
835
836
837
838
839
840
841
842
843
844
				struct its_vpe *sync_vpe)
{
	its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
	its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);

	its_fixup_cmd(sync_cmd);
}

static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
			     struct its_vpe, its_build_vsync_cmd)

845
static void its_send_int(struct its_device *dev, u32 event_id)
846
{
847
	struct its_cmd_desc desc;
848

849
850
	desc.its_int_cmd.dev = dev;
	desc.its_int_cmd.event_id = event_id;
851

852
853
	its_send_single_command(dev->its, its_build_int_cmd, &desc);
}
854

855
856
857
static void its_send_clear(struct its_device *dev, u32 event_id)
{
	struct its_cmd_desc desc;
858

859
860
	desc.its_clear_cmd.dev = dev;
	desc.its_clear_cmd.event_id = event_id;
861

862
	its_send_single_command(dev->its, its_build_clear_cmd, &desc);
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
}

static void its_send_inv(struct its_device *dev, u32 event_id)
{
	struct its_cmd_desc desc;

	desc.its_inv_cmd.dev = dev;
	desc.its_inv_cmd.event_id = event_id;

	its_send_single_command(dev->its, its_build_inv_cmd, &desc);
}

static void its_send_mapd(struct its_device *dev, int valid)
{
	struct its_cmd_desc desc;

	desc.its_mapd_cmd.dev = dev;
	desc.its_mapd_cmd.valid = !!valid;

	its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
}

static void its_send_mapc(struct its_node *its, struct its_collection *col,
			  int valid)
{
	struct its_cmd_desc desc;

	desc.its_mapc_cmd.col = col;
	desc.its_mapc_cmd.valid = !!valid;

	its_send_single_command(its, its_build_mapc_cmd, &desc);
}

896
static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
897
898
899
{
	struct its_cmd_desc desc;

900
901
902
	desc.its_mapti_cmd.dev = dev;
	desc.its_mapti_cmd.phys_id = irq_id;
	desc.its_mapti_cmd.event_id = id;
903

904
	its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
905
906
907
908
909
910
911
912
913
}

static void its_send_movi(struct its_device *dev,
			  struct its_collection *col, u32 id)
{
	struct its_cmd_desc desc;

	desc.its_movi_cmd.dev = dev;
	desc.its_movi_cmd.col = col;
914
	desc.its_movi_cmd.event_id = id;
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936

	its_send_single_command(dev->its, its_build_movi_cmd, &desc);
}

static void its_send_discard(struct its_device *dev, u32 id)
{
	struct its_cmd_desc desc;

	desc.its_discard_cmd.dev = dev;
	desc.its_discard_cmd.event_id = id;

	its_send_single_command(dev->its, its_build_discard_cmd, &desc);
}

static void its_send_invall(struct its_node *its, struct its_collection *col)
{
	struct its_cmd_desc desc;

	desc.its_invall_cmd.col = col;

	its_send_single_command(its, its_build_invall_cmd, &desc);
}
937

938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
static void its_send_vmapti(struct its_device *dev, u32 id)
{
	struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
	struct its_cmd_desc desc;

	desc.its_vmapti_cmd.vpe = map->vpe;
	desc.its_vmapti_cmd.dev = dev;
	desc.its_vmapti_cmd.virt_id = map->vintid;
	desc.its_vmapti_cmd.event_id = id;
	desc.its_vmapti_cmd.db_enabled = map->db_enabled;

	its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
}

static void its_send_vmovi(struct its_device *dev, u32 id)
{
	struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
	struct its_cmd_desc desc;

	desc.its_vmovi_cmd.vpe = map->vpe;
	desc.its_vmovi_cmd.dev = dev;
	desc.its_vmovi_cmd.event_id = id;
	desc.its_vmovi_cmd.db_enabled = map->db_enabled;

	its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
}

965
966
static void its_send_vmapp(struct its_node *its,
			   struct its_vpe *vpe, bool valid)
967
968
969
970
971
{
	struct its_cmd_desc desc;

	desc.its_vmapp_cmd.vpe = vpe;
	desc.its_vmapp_cmd.valid = valid;
972
	desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
973

974
	its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
975
976
}

977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
static void its_send_vmovp(struct its_vpe *vpe)
{
	struct its_cmd_desc desc;
	struct its_node *its;
	unsigned long flags;
	int col_id = vpe->col_idx;

	desc.its_vmovp_cmd.vpe = vpe;
	desc.its_vmovp_cmd.its_list = (u16)its_list_map;

	if (!its_list_map) {
		its = list_first_entry(&its_nodes, struct its_node, entry);
		desc.its_vmovp_cmd.seq_num = 0;
		desc.its_vmovp_cmd.col = &its->collections[col_id];
		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
		return;
	}

	/*
	 * Yet another marvel of the architecture. If using the
	 * its_list "feature", we need to make sure that all ITSs
	 * receive all VMOVP commands in the same order. The only way
	 * to guarantee this is to make vmovp a serialization point.
	 *
	 * Wall <-- Head.
	 */
	raw_spin_lock_irqsave(&vmovp_lock, flags);

	desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;

	/* Emit VMOVPs */
	list_for_each_entry(its, &its_nodes, entry) {
		if (!its->is_v4)
			continue;

1012
1013
1014
		if (!vpe->its_vm->vlpi_count[its->list_nr])
			continue;

1015
1016
1017
1018
1019
1020
1021
		desc.its_vmovp_cmd.col = &its->collections[col_id];
		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
	}

	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}

1022
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1023
1024
1025
1026
{
	struct its_cmd_desc desc;

	desc.its_vinvall_cmd.vpe = vpe;
1027
	its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1028
1029
}

1030
1031
1032
1033
1034
1035
1036
/*
 * irqchip functions - assumes MSI, mostly.
 */

static inline u32 its_get_event_id(struct irq_data *d)
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1037
	return d->hwirq - its_dev->event_map.lpi_base;
1038
1039
}

1040
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1041
{
1042
	irq_hw_number_t hwirq;
1043
	void *va;
1044
	u8 *cfg;
1045

1046
1047
1048
	if (irqd_is_forwarded_to_vcpu(d)) {
		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
		u32 event = its_get_event_id(d);
1049
		struct its_vlpi_map *map;
1050

1051
		va = page_address(its_dev->event_map.vm->vprop_page);
1052
1053
1054
1055
1056
1057
		map = &its_dev->event_map.vlpi_maps[event];
		hwirq = map->vintid;

		/* Remember the updated property */
		map->properties &= ~clr;
		map->properties |= set | LPI_PROP_GROUP1;
1058
	} else {
1059
		va = gic_rdists->prop_table_va;
1060
1061
		hwirq = d->hwirq;
	}
1062

1063
	cfg = va + hwirq - 8192;
1064
	*cfg &= ~clr;
1065
	*cfg |= set | LPI_PROP_GROUP1;
1066
1067
1068
1069
1070
1071
1072

	/*
	 * Make the above write visible to the redistributors.
	 * And yes, we're flushing exactly: One. Single. Byte.
	 * Humpf...
	 */
	if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1073
		gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1074
1075
	else
		dsb(ishst);
1076
1077
1078
1079
1080
1081
1082
}

static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);

	lpi_write_config(d, clr, set);
1083
	its_send_inv(its_dev, its_get_event_id(d));
1084
1085
}

1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
	u32 event = its_get_event_id(d);

	if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
		return;

	its_dev->event_map.vlpi_maps[event].db_enabled = enable;

	/*
	 * More fun with the architecture:
	 *
	 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
	 * value or to 1023, depending on the enable bit. But that
	 * would be issueing a mapping for an /existing/ DevID+EventID
	 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
	 * to the /same/ vPE, using this opportunity to adjust the
	 * doorbell. Mouahahahaha. We loves it, Precious.
	 */
	its_send_vmovi(its_dev, event);
1107
1108
1109
1110
}

static void its_mask_irq(struct irq_data *d)
{
1111
1112
1113
	if (irqd_is_forwarded_to_vcpu(d))
		its_vlpi_set_doorbell(d, false);

1114
	lpi_update_config(d, LPI_PROP_ENABLED, 0);
1115
1116
1117
1118
}

static void its_unmask_irq(struct irq_data *d)
{
1119
1120
1121
	if (irqd_is_forwarded_to_vcpu(d))
		its_vlpi_set_doorbell(d, true);

1122
	lpi_update_config(d, 0, LPI_PROP_ENABLED);
1123
1124
1125
1126
1127
}

static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
			    bool force)
{
1128
1129
	unsigned int cpu;
	const struct cpumask *cpu_mask = cpu_online_mask;
1130
1131
1132
1133
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
	struct its_collection *target_col;
	u32 id = its_get_event_id(d);

1134
1135
1136
1137
	/* A forwarded interrupt should use irq_set_vcpu_affinity */
	if (irqd_is_forwarded_to_vcpu(d))
		return -EINVAL;

1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
       /* lpi cannot be routed to a redistributor that is on a foreign node */
	if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
		if (its_dev->its->numa_node >= 0) {
			cpu_mask = cpumask_of_node(its_dev->its->numa_node);
			if (!cpumask_intersects(mask_val, cpu_mask))
				return -EINVAL;
		}
	}

	cpu = cpumask_any_and(mask_val, cpu_mask);

1149
1150
1151
	if (cpu >= nr_cpu_ids)
		return -EINVAL;

1152
1153
1154
1155
1156
	/* don't set the affinity when the target cpu is same as current one */
	if (cpu != its_dev->event_map.col_map[id]) {
		target_col = &its_dev->its->collections[cpu];
		its_send_movi(its_dev, target_col, id);
		its_dev->event_map.col_map[id] = cpu;
1157
		irq_data_update_effective_affinity(d, cpumask_of(cpu));
1158
	}
1159
1160
1161
1162

	return IRQ_SET_MASK_OK_DONE;
}

1163
1164
1165
1166
1167
1168
1169
static u64 its_irq_get_msi_base(struct its_device *its_dev)
{
	struct its_node *its = its_dev->its;

	return its->phys_base + GITS_TRANSLATER;
}

1170
1171
1172
1173
1174
1175
1176
static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
	struct its_node *its;
	u64 addr;

	its = its_dev->its;
1177
	addr = its->get_msi_base(its_dev);
1178

1179
1180
	msg->address_lo		= lower_32_bits(addr);
	msg->address_hi		= upper_32_bits(addr);
1181
	msg->data		= its_get_event_id(d);
1182

1183
	iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1184
1185
}

1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
static int its_irq_set_irqchip_state(struct irq_data *d,
				     enum irqchip_irq_state which,
				     bool state)
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
	u32 event = its_get_event_id(d);

	if (which != IRQCHIP_STATE_PENDING)
		return -EINVAL;

	if (state)
		its_send_int(its_dev, event);
	else
		its_send_clear(its_dev, event);

	return 0;
}

1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
static void its_map_vm(struct its_node *its, struct its_vm *vm)
{
	unsigned long flags;

	/* Not using the ITS list? Everything is always mapped. */
	if (!its_list_map)
		return;

	raw_spin_lock_irqsave(&vmovp_lock, flags);

	/*
	 * If the VM wasn't mapped yet, iterate over the vpes and get
	 * them mapped now.
	 */
	vm->vlpi_count[its->list_nr]++;

	if (vm->vlpi_count[its->list_nr] == 1) {
		int i;

		for (i = 0; i < vm->nr_vpes; i++) {
			struct its_vpe *vpe = vm->vpes[i];
1225
			struct irq_data *d = irq_get_irq_data(vpe->irq);
1226
1227
1228
1229
1230

			/* Map the VPE to the first possible CPU */
			vpe->col_idx = cpumask_first(cpu_online_mask);
			its_send_vmapp(its, vpe, true);
			its_send_vinvall(its, vpe);
1231
			irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
		}
	}

	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}

static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
{
	unsigned long flags;

	/* Not using the ITS list? Everything is always mapped. */
	if (!its_list_map)
		return;

	raw_spin_lock_irqsave(&vmovp_lock, flags);

	if (!--vm->vlpi_count[its->list_nr]) {
		int i;

		for (i = 0; i < vm->nr_vpes; i++)
			its_send_vmapp(its, vm->vpes[i], false);
	}

	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}

1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
	u32 event = its_get_event_id(d);
	int ret = 0;

	if (!info->map)
		return -EINVAL;

	mutex_lock(&its_dev->event_map.vlpi_lock);

	if (!its_dev->event_map.vm) {
		struct its_vlpi_map *maps;

Kees Cook's avatar
Kees Cook committed
1272
		maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
			       GFP_KERNEL);
		if (!maps) {
			ret = -ENOMEM;
			goto out;
		}

		its_dev->event_map.vm = info->map->vm;
		its_dev->event_map.vlpi_maps = maps;
	} else if (its_dev->event_map.vm != info->map->vm) {
		ret = -EINVAL;
		goto out;
	}

	/* Get our private copy of the mapping information */
	its_dev->event_map.vlpi_maps[event] = *info->map;

	if (irqd_is_forwarded_to_vcpu(d)) {
		/* Already mapped, move it around */
		its_send_vmovi(its_dev, event);
	} else {
1293
1294
1295
		/* Ensure all the VPEs are mapped on this ITS */
		its_map_vm(its_dev->its, info->map->vm);

1296
1297
1298
1299
1300
1301
1302
1303
1304
		/*
		 * Flag the interrupt as forwarded so that we can
		 * start poking the virtual property table.
		 */
		irqd_set_forwarded_to_vcpu(d);

		/* Write out the property to the prop table */
		lpi_write_config(d, 0xff, info->map->properties);

1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
		/* Drop the physical mapping */
		its_send_discard(its_dev, event);

		/* and install the virtual one */
		its_send_vmapti(its_dev, event);

		/* Increment the number of VLPIs */
		its_dev->event_map.nr_vlpis++;
	}

out:
	mutex_unlock(&its_dev->event_map.vlpi_lock);
	return ret;
}

static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
	u32 event = its_get_event_id(d);
	int ret = 0;

	mutex_lock(&its_dev->event_map.vlpi_lock);

	if (!its_dev->event_map.vm ||
	    !its_dev->event_map.vlpi_maps[event].vm) {
		ret = -EINVAL;
		goto out;
	}

	/* Copy our mapping information to the incoming request */
	*info->map = its_dev->event_map.vlpi_maps[event];

out:
	mutex_unlock(&its_dev->event_map.vlpi_lock);
	return ret;
}

static int its_vlpi_unmap(struct irq_data *d)
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
	u32 event = its_get_event_id(d);
	int ret = 0;

	mutex_lock(&its_dev->event_map.vlpi_lock);

	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
		ret = -EINVAL;
		goto out;
	}

	/* Drop the virtual mapping */
	its_send_discard(its_dev, event);

	/* and restore the physical one */
	irqd_clr_forwarded_to_vcpu(d);
	its_send_mapti(its_dev, d->hwirq, event);
	lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
				    LPI_PROP_ENABLED |
				    LPI_PROP_GROUP1));