irq-gic-v3-its.c 119 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4
5
6
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

7
#include <linux/acpi.h>
8
#include <linux/acpi_iort.h>
9
#include <linux/bitfield.h>
10
11
#include <linux/bitmap.h>
#include <linux/cpu.h>
12
#include <linux/crash_dump.h>
13
#include <linux/delay.h>
14
#include <linux/dma-iommu.h>
15
#include <linux/efi.h>
16
#include <linux/interrupt.h>
17
#include <linux/irqdomain.h>
18
#include <linux/list.h>
19
#include <linux/log2.h>
20
#include <linux/memblock.h>
21
22
23
24
25
26
27
28
29
#include <linux/mm.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/percpu.h>
#include <linux/slab.h>
30
#include <linux/syscore_ops.h>
31

32
#include <linux/irqchip.h>
33
#include <linux/irqchip/arm-gic-v3.h>
34
#include <linux/irqchip/arm-gic-v4.h>
35
36
37
38

#include <asm/cputype.h>
#include <asm/exception.h>

39
40
#include "irq-gic-common.h"

41
42
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
43
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
44
#define ITS_FLAGS_SAVE_SUSPEND_STATE		(1ULL << 3)
45

46
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
47
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED	(1 << 1)
48

49
50
51
52
53
54
55
56
57
58
59
static u32 lpi_id_bits;

/*
 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
 * deal with (one configuration byte per interrupt). PENDBASE has to
 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
 */
#define LPI_NRBITS		lpi_id_bits
#define LPI_PROPBASE_SZ		ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ		ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)

60
#define LPI_PROP_DEFAULT_PRIO	GICD_INT_DEF_PRI
61

62
63
64
65
66
67
68
69
70
71
/*
 * Collection structure - just an ID, and a redistributor address to
 * ping. We use one per CPU as a bag of interrupts assigned to this
 * CPU.
 */
struct its_collection {
	u64			target_address;
	u16			col_id;
};

72
/*
73
74
 * The ITS_BASER structure - contains memory information, cached
 * value of BASER register configuration and ITS page size.
75
76
77
78
79
 */
struct its_baser {
	void		*base;
	u64		val;
	u32		order;
80
	u32		psz;
81
82
};

83
84
struct its_device;

85
86
/*
 * The ITS structure - contains most of the infrastructure, with the
87
88
 * top-level MSI domain, the command queue, the collections, and the
 * list of devices writing to it.
89
90
91
92
 *
 * dev_alloc_lock has to be taken for device allocations, while the
 * spinlock must be taken to parse data structures such as the device
 * list.
93
94
95
 */
struct its_node {
	raw_spinlock_t		lock;
96
	struct mutex		dev_alloc_lock;
97
98
	struct list_head	entry;
	void __iomem		*base;
99
	phys_addr_t		phys_base;
100
101
	struct its_cmd_block	*cmd_base;
	struct its_cmd_block	*cmd_write;
102
	struct its_baser	tables[GITS_BASER_NR_REGS];
103
	struct its_collection	*collections;
104
105
	struct fwnode_handle	*fwnode_handle;
	u64			(*get_msi_base)(struct its_device *its_dev);
106
	u64			typer;
107
108
	u64			cbaser_save;
	u32			ctlr_save;
109
	u32			mpidr;
110
111
	struct list_head	its_device_list;
	u64			flags;
112
	unsigned long		list_nr;
113
	int			numa_node;
114
115
	unsigned int		msi_domain_flags;
	u32			pre_its_base; /* for Socionext Synquacer */
116
	int			vlpi_redist_offset;
117
118
};

119
#define is_v4(its)		(!!((its)->typer & GITS_TYPER_VLPIS))
120
#define is_v4_1(its)		(!!((its)->typer & GITS_TYPER_VMAPP))
121
#define device_ids(its)		(FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
122

123
124
#define ITS_ITT_ALIGN		SZ_256

125
/* The maximum number of VPEID bits supported by VLPI commands */
126
127
128
129
130
131
132
133
134
135
#define ITS_MAX_VPEID_BITS						\
	({								\
		int nvpeid = 16;					\
		if (gic_rdists->has_rvpeid &&				\
		    gic_rdists->gicd_typer2 & GICD_TYPER2_VIL)		\
			nvpeid = 1 + (gic_rdists->gicd_typer2 &		\
				      GICD_TYPER2_VID);			\
									\
		nvpeid;							\
	})
136
137
#define ITS_MAX_VPEID		(1 << (ITS_MAX_VPEID_BITS))

138
139
140
/* Convert page order to size in bytes */
#define PAGE_ORDER_TO_SIZE(o)	(PAGE_SIZE << (o))

141
142
143
144
145
struct event_lpi_map {
	unsigned long		*lpi_map;
	u16			*col_map;
	irq_hw_number_t		lpi_base;
	int			nr_lpis;
146
	raw_spinlock_t		vlpi_lock;
147
148
149
	struct its_vm		*vm;
	struct its_vlpi_map	*vlpi_maps;
	int			nr_vlpis;
150
151
};

152
/*
153
154
155
156
 * The ITS view of a device - belongs to an ITS, owns an interrupt
 * translation table, and a list of interrupts.  If it some of its
 * LPIs are injected into a guest (GICv4), the event_map.vm field
 * indicates which one.
157
158
159
160
 */
struct its_device {
	struct list_head	entry;
	struct its_node		*its;
161
	struct event_lpi_map	event_map;
162
163
164
	void			*itt;
	u32			nr_ites;
	u32			device_id;
165
	bool			shared;
166
167
};

168
169
170
171
172
173
174
static struct {
	raw_spinlock_t		lock;
	struct its_device	*dev;
	struct its_vpe		**vpes;
	int			next_victim;
} vpe_proxy;

175
static LIST_HEAD(its_nodes);
176
static DEFINE_RAW_SPINLOCK(its_lock);
177
static struct rdists *gic_rdists;
178
static struct irq_domain *its_parent;
179

180
static unsigned long its_list_map;
181
182
183
static u16 vmovp_seq_num;
static DEFINE_RAW_SPINLOCK(vmovp_lock);

184
static DEFINE_IDA(its_vpeid_ida);
185

186
#define gic_data_rdist()		(raw_cpu_ptr(gic_rdists->rdist))
187
#define gic_data_rdist_cpu(cpu)		(per_cpu_ptr(gic_rdists->rdist, cpu))
188
#define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
189
#define gic_data_rdist_vlpi_base()	(gic_data_rdist_rd_base() + SZ_128K)
190

191
192
193
194
195
196
static u16 get_its_list(struct its_vm *vm)
{
	struct its_node *its;
	unsigned long its_list = 0;

	list_for_each_entry(its, &its_nodes, entry) {
197
		if (!is_v4(its))
198
199
200
201
202
203
204
205
206
			continue;

		if (vm->vlpi_count[its->list_nr])
			__set_bit(its->list_nr, &its_list);
	}

	return (u16)its_list;
}

207
208
209
210
211
212
static inline u32 its_get_event_id(struct irq_data *d)
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
	return d->hwirq - its_dev->event_map.lpi_base;
}

213
214
215
216
217
218
219
220
static struct its_collection *dev_event_to_col(struct its_device *its_dev,
					       u32 event)
{
	struct its_node *its = its_dev->its;

	return its->collections + its_dev->event_map.col_map[event];
}

221
222
223
224
225
226
227
228
229
static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
					       u32 event)
{
	if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
		return NULL;

	return &its_dev->event_map.vlpi_maps[event];
}

230
231
232
233
234
235
236
237
238
239
240
241
242
static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
{
	if (irqd_is_forwarded_to_vcpu(d)) {
		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
		u32 event = its_get_event_id(d);

		return dev_event_to_vlpi_map(its_dev, event);
	}

	return NULL;
}

static int irq_to_cpuid(struct irq_data *d)
243
244
{
	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
245
246
247
248
	struct its_vlpi_map *map = get_vlpi_map(d);

	if (map)
		return map->vpe->col_idx;
249

250
	return its_dev->event_map.col_map[its_get_event_id(d)];
251
252
}

253
254
static struct its_collection *valid_col(struct its_collection *col)
{
255
	if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
256
257
258
259
260
		return NULL;

	return col;
}

261
262
263
264
265
266
267
268
static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
{
	if (valid_col(its->collections + vpe->col_idx))
		return vpe;

	return NULL;
}

269
270
271
272
273
274
275
276
277
278
279
/*
 * ITS command descriptors - parameters to be encoded in a command
 * block.
 */
struct its_cmd_desc {
	union {
		struct {
			struct its_device *dev;
			u32 event_id;
		} its_inv_cmd;

280
281
282
283
284
		struct {
			struct its_device *dev;
			u32 event_id;
		} its_clear_cmd;

285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
		struct {
			struct its_device *dev;
			u32 event_id;
		} its_int_cmd;

		struct {
			struct its_device *dev;
			int valid;
		} its_mapd_cmd;

		struct {
			struct its_collection *col;
			int valid;
		} its_mapc_cmd;

		struct {
			struct its_device *dev;
			u32 phys_id;
			u32 event_id;
304
		} its_mapti_cmd;
305
306
307
308

		struct {
			struct its_device *dev;
			struct its_collection *col;
309
			u32 event_id;
310
311
312
313
314
315
316
317
318
319
		} its_movi_cmd;

		struct {
			struct its_device *dev;
			u32 event_id;
		} its_discard_cmd;

		struct {
			struct its_collection *col;
		} its_invall_cmd;
320

321
322
323
324
325
326
327
328
329
330
		struct {
			struct its_vpe *vpe;
		} its_vinvall_cmd;

		struct {
			struct its_vpe *vpe;
			struct its_collection *col;
			bool valid;
		} its_vmapp_cmd;

331
332
333
334
335
336
337
338
339
340
341
342
343
344
		struct {
			struct its_vpe *vpe;
			struct its_device *dev;
			u32 virt_id;
			u32 event_id;
			bool db_enabled;
		} its_vmapti_cmd;

		struct {
			struct its_vpe *vpe;
			struct its_device *dev;
			u32 event_id;
			bool db_enabled;
		} its_vmovi_cmd;
345
346
347
348
349
350
351

		struct {
			struct its_vpe *vpe;
			struct its_collection *col;
			u16 seq_num;
			u16 its_list;
		} its_vmovp_cmd;
352
353
354
355

		struct {
			struct its_vpe *vpe;
		} its_invdb_cmd;
356
357
358
359
360
361
362
	};
};

/*
 * The ITS command block, which is what the ITS actually parses.
 */
struct its_cmd_block {
363
364
365
366
	union {
		u64	raw_cmd[4];
		__le64	raw_cmd_le[4];
	};
367
368
369
370
371
};

#define ITS_CMD_QUEUE_SZ		SZ_64K
#define ITS_CMD_QUEUE_NR_ENTRIES	(ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))

372
373
typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
						    struct its_cmd_block *,
374
375
						    struct its_cmd_desc *);

376
377
typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
					      struct its_cmd_block *,
378
379
					      struct its_cmd_desc *);

380
381
382
383
384
385
386
static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
{
	u64 mask = GENMASK_ULL(h, l);
	*raw_cmd &= ~mask;
	*raw_cmd |= (val << l) & mask;
}

387
388
static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
{
389
	its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
390
391
392
393
}

static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
{
394
	its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
395
396
397
398
}

static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
{
399
	its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
400
401
402
403
}

static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
{
404
	its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
405
406
407
408
}

static void its_encode_size(struct its_cmd_block *cmd, u8 size)
{
409
	its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
410
411
412
413
}

static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
414
	its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
415
416
417
418
}

static void its_encode_valid(struct its_cmd_block *cmd, int valid)
{
419
	its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
420
421
422
423
}

static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
424
	its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
425
426
427
428
}

static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
{
429
	its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
430
431
}

432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
{
	its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
}

static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
{
	its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
}

static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
{
	its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
}

static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
{
	its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
}

452
453
454
455
456
457
458
459
460
461
static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
{
	its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
}

static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
{
	its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
}

462
463
static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
{
464
	its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
465
466
467
468
469
470
471
}

static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
{
	its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
}

472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
{
	its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
}

static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
{
	its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
}

static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
{
	its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
}

static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
					u32 vpe_db_lpi)
{
	its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
}

493
494
495
496
497
498
499
500
501
502
503
static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
					u32 vpe_db_lpi)
{
	its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
}

static void its_encode_db(struct its_cmd_block *cmd, bool db)
{
	its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
}

504
505
506
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
	/* Let's fixup BE commands */
507
508
509
510
	cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
	cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
	cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
	cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
511
512
}

513
514
static struct its_collection *its_build_mapd_cmd(struct its_node *its,
						 struct its_cmd_block *cmd,
515
516
517
						 struct its_cmd_desc *desc)
{
	unsigned long itt_addr;
518
	u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
519
520
521
522
523
524
525
526
527
528
529
530

	itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
	itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);

	its_encode_cmd(cmd, GITS_CMD_MAPD);
	its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
	its_encode_size(cmd, size - 1);
	its_encode_itt(cmd, itt_addr);
	its_encode_valid(cmd, desc->its_mapd_cmd.valid);

	its_fixup_cmd(cmd);

531
	return NULL;
532
533
}

534
535
static struct its_collection *its_build_mapc_cmd(struct its_node *its,
						 struct its_cmd_block *cmd,
536
537
538
539
540
541
542
543
544
545
546
547
						 struct its_cmd_desc *desc)
{
	its_encode_cmd(cmd, GITS_CMD_MAPC);
	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
	its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
	its_encode_valid(cmd, desc->its_mapc_cmd.valid);

	its_fixup_cmd(cmd);

	return desc->its_mapc_cmd.col;
}

548
549
static struct its_collection *its_build_mapti_cmd(struct its_node *its,
						  struct its_cmd_block *cmd,
550
551
						  struct its_cmd_desc *desc)
{
552
553
	struct its_collection *col;

554
555
	col = dev_event_to_col(desc->its_mapti_cmd.dev,
			       desc->its_mapti_cmd.event_id);
556

557
558
559
560
	its_encode_cmd(cmd, GITS_CMD_MAPTI);
	its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
	its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
561
	its_encode_collection(cmd, col->col_id);
562
563
564

	its_fixup_cmd(cmd);

565
	return valid_col(col);
566
567
}

568
569
static struct its_collection *its_build_movi_cmd(struct its_node *its,
						 struct its_cmd_block *cmd,
570
571
						 struct its_cmd_desc *desc)
{
572
573
574
575
576
	struct its_collection *col;

	col = dev_event_to_col(desc->its_movi_cmd.dev,
			       desc->its_movi_cmd.event_id);

577
578
	its_encode_cmd(cmd, GITS_CMD_MOVI);
	its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
579
	its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
580
581
582
583
	its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);

	its_fixup_cmd(cmd);

584
	return valid_col(col);
585
586
}

587
588
static struct its_collection *its_build_discard_cmd(struct its_node *its,
						    struct its_cmd_block *cmd,
589
590
						    struct its_cmd_desc *desc)
{
591
592
593
594
595
	struct its_collection *col;

	col = dev_event_to_col(desc->its_discard_cmd.dev,
			       desc->its_discard_cmd.event_id);

596
597
598
599
600
601
	its_encode_cmd(cmd, GITS_CMD_DISCARD);
	its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_discard_cmd.event_id);

	its_fixup_cmd(cmd);

602
	return valid_col(col);
603
604
}

605
606
static struct its_collection *its_build_inv_cmd(struct its_node *its,
						struct its_cmd_block *cmd,
607
608
						struct its_cmd_desc *desc)
{
609
610
611
612
613
	struct its_collection *col;

	col = dev_event_to_col(desc->its_inv_cmd.dev,
			       desc->its_inv_cmd.event_id);

614
615
616
617
618
619
	its_encode_cmd(cmd, GITS_CMD_INV);
	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);

	its_fixup_cmd(cmd);

620
	return valid_col(col);
621
622
}

623
624
static struct its_collection *its_build_int_cmd(struct its_node *its,
						struct its_cmd_block *cmd,
625
626
627
628
629
630
631
632
633
634
635
636
637
						struct its_cmd_desc *desc)
{
	struct its_collection *col;

	col = dev_event_to_col(desc->its_int_cmd.dev,
			       desc->its_int_cmd.event_id);

	its_encode_cmd(cmd, GITS_CMD_INT);
	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_int_cmd.event_id);

	its_fixup_cmd(cmd);

638
	return valid_col(col);
639
640
}

641
642
static struct its_collection *its_build_clear_cmd(struct its_node *its,
						  struct its_cmd_block *cmd,
643
644
645
646
647
648
649
650
651
652
653
654
655
						  struct its_cmd_desc *desc)
{
	struct its_collection *col;

	col = dev_event_to_col(desc->its_clear_cmd.dev,
			       desc->its_clear_cmd.event_id);

	its_encode_cmd(cmd, GITS_CMD_CLEAR);
	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);

	its_fixup_cmd(cmd);

656
	return valid_col(col);
657
658
}

659
660
static struct its_collection *its_build_invall_cmd(struct its_node *its,
						   struct its_cmd_block *cmd,
661
662
663
						   struct its_cmd_desc *desc)
{
	its_encode_cmd(cmd, GITS_CMD_INVALL);
664
	its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
665
666
667
668
669
670

	its_fixup_cmd(cmd);

	return NULL;
}

671
672
static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
					     struct its_cmd_block *cmd,
673
674
675
676
677
678
679
					     struct its_cmd_desc *desc)
{
	its_encode_cmd(cmd, GITS_CMD_VINVALL);
	its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);

	its_fixup_cmd(cmd);

680
	return valid_vpe(its, desc->its_vinvall_cmd.vpe);
681
682
}

683
684
static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
					   struct its_cmd_block *cmd,
685
686
					   struct its_cmd_desc *desc)
{
687
	unsigned long vpt_addr, vconf_addr;
688
	u64 target;
689
	bool alloc;
690
691
692
693

	its_encode_cmd(cmd, GITS_CMD_VMAPP);
	its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
	its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
694
695
696
697
698
699
700
701
702
703
704
705
706

	if (!desc->its_vmapp_cmd.valid) {
		if (is_v4_1(its)) {
			alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
			its_encode_alloc(cmd, alloc);
		}

		goto out;
	}

	vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
	target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;

707
	its_encode_target(cmd, target);
708
709
710
	its_encode_vpt_addr(cmd, vpt_addr);
	its_encode_vpt_size(cmd, LPI_NRBITS - 1);

711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
	if (!is_v4_1(its))
		goto out;

	vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));

	alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);

	its_encode_alloc(cmd, alloc);

	/* We can only signal PTZ when alloc==1. Why do we have two bits? */
	its_encode_ptz(cmd, alloc);
	its_encode_vconf_addr(cmd, vconf_addr);
	its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);

out:
726
727
	its_fixup_cmd(cmd);

728
	return valid_vpe(its, desc->its_vmapp_cmd.vpe);
729
730
}

731
732
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
					    struct its_cmd_block *cmd,
733
734
735
736
					    struct its_cmd_desc *desc)
{
	u32 db;

737
	if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
738
739
740
741
742
743
744
745
746
747
748
749
750
		db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
	else
		db = 1023;

	its_encode_cmd(cmd, GITS_CMD_VMAPTI);
	its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
	its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
	its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
	its_encode_db_phys_id(cmd, db);
	its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);

	its_fixup_cmd(cmd);

751
	return valid_vpe(its, desc->its_vmapti_cmd.vpe);
752
753
}

754
755
static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
					   struct its_cmd_block *cmd,
756
757
758
759
					   struct its_cmd_desc *desc)
{
	u32 db;

760
	if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
761
762
763
764
765
766
767
768
769
770
771
772
773
		db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
	else
		db = 1023;

	its_encode_cmd(cmd, GITS_CMD_VMOVI);
	its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
	its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
	its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
	its_encode_db_phys_id(cmd, db);
	its_encode_db_valid(cmd, true);

	its_fixup_cmd(cmd);

774
	return valid_vpe(its, desc->its_vmovi_cmd.vpe);
775
776
}

777
778
static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
					   struct its_cmd_block *cmd,
779
780
					   struct its_cmd_desc *desc)
{
781
782
783
	u64 target;

	target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
784
785
786
787
	its_encode_cmd(cmd, GITS_CMD_VMOVP);
	its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
	its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
	its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
788
	its_encode_target(cmd, target);
789

790
791
792
793
794
	if (is_v4_1(its)) {
		its_encode_db(cmd, true);
		its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
	}

795
796
	its_fixup_cmd(cmd);

797
	return valid_vpe(its, desc->its_vmovp_cmd.vpe);
798
799
}

800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
					  struct its_cmd_block *cmd,
					  struct its_cmd_desc *desc)
{
	struct its_vlpi_map *map;

	map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
				    desc->its_inv_cmd.event_id);

	its_encode_cmd(cmd, GITS_CMD_INV);
	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);

	its_fixup_cmd(cmd);

	return valid_vpe(its, map->vpe);
}

818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
static struct its_vpe *its_build_vint_cmd(struct its_node *its,
					  struct its_cmd_block *cmd,
					  struct its_cmd_desc *desc)
{
	struct its_vlpi_map *map;

	map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
				    desc->its_int_cmd.event_id);

	its_encode_cmd(cmd, GITS_CMD_INT);
	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_int_cmd.event_id);

	its_fixup_cmd(cmd);

	return valid_vpe(its, map->vpe);
}

static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
					    struct its_cmd_block *cmd,
					    struct its_cmd_desc *desc)
{
	struct its_vlpi_map *map;

	map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
				    desc->its_clear_cmd.event_id);

	its_encode_cmd(cmd, GITS_CMD_CLEAR);
	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);

	its_fixup_cmd(cmd);

	return valid_vpe(its, map->vpe);
}

854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
					   struct its_cmd_block *cmd,
					   struct its_cmd_desc *desc)
{
	if (WARN_ON(!is_v4_1(its)))
		return NULL;

	its_encode_cmd(cmd, GITS_CMD_INVDB);
	its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);

	its_fixup_cmd(cmd);

	return valid_vpe(its, desc->its_invdb_cmd.vpe);
}

869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
static u64 its_cmd_ptr_to_offset(struct its_node *its,
				 struct its_cmd_block *ptr)
{
	return (ptr - its->cmd_base) * sizeof(*ptr);
}

static int its_queue_full(struct its_node *its)
{
	int widx;
	int ridx;

	widx = its->cmd_write - its->cmd_base;
	ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);

	/* This is incredibly unlikely to happen, unless the ITS locks up. */
	if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
		return 1;

	return 0;
}

static struct its_cmd_block *its_allocate_entry(struct its_node *its)
{
	struct its_cmd_block *cmd;
	u32 count = 1000000;	/* 1s! */

	while (its_queue_full(its)) {
		count--;
		if (!count) {
			pr_err_ratelimited("ITS queue not draining\n");
			return NULL;
		}
		cpu_relax();
		udelay(1);
	}

	cmd = its->cmd_write++;

	/* Handle queue wrapping */
	if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
		its->cmd_write = its->cmd_base;

911
912
913
914
915
916
	/* Clear command  */
	cmd->raw_cmd[0] = 0;
	cmd->raw_cmd[1] = 0;
	cmd->raw_cmd[2] = 0;
	cmd->raw_cmd[3] = 0;

917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
	return cmd;
}

static struct its_cmd_block *its_post_commands(struct its_node *its)
{
	u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);

	writel_relaxed(wr, its->base + GITS_CWRITER);

	return its->cmd_write;
}

static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
{
	/*
	 * Make sure the commands written to memory are observable by
	 * the ITS.
	 */
	if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
936
		gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
937
938
939
940
	else
		dsb(ishst);
}

941
static int its_wait_for_range_completion(struct its_node *its,
942
					 u64	prev_idx,
943
					 struct its_cmd_block *to)
944
{
945
	u64 rd_idx, to_idx, linear_idx;
946
947
	u32 count = 1000000;	/* 1s! */

948
	/* Linearize to_idx if the command set has wrapped around */
949
	to_idx = its_cmd_ptr_to_offset(its, to);
950
951
952
953
	if (to_idx < prev_idx)
		to_idx += ITS_CMD_QUEUE_SZ;

	linear_idx = prev_idx;
954
955

	while (1) {
956
957
		s64 delta;

958
		rd_idx = readl_relaxed(its->base + GITS_CREADR);
959

960
961
962
963
964
965
966
		/*
		 * Compute the read pointer progress, taking the
		 * potential wrap-around into account.
		 */
		delta = rd_idx - prev_idx;
		if (rd_idx < prev_idx)
			delta += ITS_CMD_QUEUE_SZ;
967

968
969
		linear_idx += delta;
		if (linear_idx >= to_idx)
970
971
972
973
			break;

		count--;
		if (!count) {
974
975
			pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
					   to_idx, linear_idx);
976
			return -1;
977
		}
978
		prev_idx = rd_idx;
979
980
981
		cpu_relax();
		udelay(1);
	}
982
983

	return 0;
984
985
}

986
987
988
989
990
991
992
993
994
/* Warning, macro hell follows */
#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)	\
void name(struct its_node *its,						\
	  buildtype builder,						\
	  struct its_cmd_desc *desc)					\
{									\
	struct its_cmd_block *cmd, *sync_cmd, *next_cmd;		\
	synctype *sync_obj;						\
	unsigned long flags;						\
995
	u64 rd_idx;							\
996
997
998
999
1000
1001
1002
1003
									\
	raw_spin_lock_irqsave(&its->lock, flags);			\
									\
	cmd = its_allocate_entry(its);					\
	if (!cmd) {		/* We're soooooo screewed... */		\
		raw_spin_unlock_irqrestore(&its->lock, flags);		\
		return;							\
	}								\
1004
	sync_obj = builder(its, cmd, desc);				\
1005
1006
1007
1008
1009
1010
1011
	its_flush_cmd(its, cmd);					\
									\
	if (sync_obj) {							\
		sync_cmd = its_allocate_entry(its);			\
		if (!sync_cmd)						\
			goto post;					\
									\
1012
		buildfn(its, sync_cmd, sync_obj);			\
1013
1014
1015
1016
		its_flush_cmd(its, sync_cmd);				\
	}								\
									\
post:									\
1017
	rd_idx = readl_relaxed(its->base + GITS_CREADR);		\
1018
1019
1020
	next_cmd = its_post_commands(its);				\
	raw_spin_unlock_irqrestore(&its->lock, flags);			\
									\
1021
	if (its_wait_for_range_completion(its, rd_idx, next_cmd))	\
1022
		pr_err_ratelimited("ITS cmd %ps failed\n", builder);	\
1023
}
1024

1025
1026
static void its_build_sync_cmd(struct its_node *its,
			       struct its_cmd_block *sync_cmd,
1027
1028
1029
1030
			       struct its_collection *sync_col)
{
	its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
	its_encode_target(sync_cmd, sync_col->target_address);
1031

1032
	its_fixup_cmd(sync_cmd);
1033
1034
}

1035
1036
1037
static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
			     struct its_collection, its_build_sync_cmd)

1038
1039
static void its_build_vsync_cmd(struct its_node *its,
				struct its_cmd_block *sync_cmd,
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
				struct its_vpe *sync_vpe)
{
	its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
	its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);

	its_fixup_cmd(sync_cmd);
}

static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
			     struct its_vpe, its_build_vsync_cmd)

1051
static void its_send_int(struct its_device *dev, u32 event_id)
1052
{
1053
	struct its_cmd_desc desc;
1054

1055
1056
	desc.its_int_cmd.dev = dev;
	desc.its_int_cmd.event_id = event_id;
1057

1058
1059
	its_send_single_command(dev->its, its_build_int_cmd, &desc);
}
1060

1061
1062
1063
static void its_send_clear(struct its_device *dev, u32 event_id)
{
	struct its_cmd_desc desc;
1064

1065
1066
	desc.its_clear_cmd.dev = dev;
	desc.its_clear_cmd.event_id = event_id;
1067

1068
	its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
}

static void its_send_inv(struct its_device *dev, u32 event_id)
{
	struct its_cmd_desc desc;

	desc.its_inv_cmd.dev = dev;
	desc.its_inv_cmd.event_id = event_id;

	its_send_single_command(dev->its, its_build_inv_cmd, &desc);
}

static void its_send_mapd(struct its_device *dev, int valid)
{
	struct its_cmd_desc desc;

	desc.its_mapd_cmd.dev = dev;
	desc.its_mapd_cmd.valid = !!valid;

	its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
}

static void its_send_mapc(struct its_node *its, struct its_collection *col,
			  int valid)
{
	struct its_cmd_desc desc;

	desc.its_mapc_cmd.col = col;
	desc.its_mapc_cmd.valid = !!valid;

	its_send_single_command(its, its_build_mapc_cmd, &desc);
}

1102
static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1103
1104
1105
{
	struct its_cmd_desc desc;

1106
1107
1108
	desc.its_mapti_cmd.dev = dev;
	desc.its_mapti_cmd.phys_id = irq_id;
	desc.its_mapti_cmd.event_id = id;
1109

1110
	its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1111
1112
1113
1114
1115
1116
1117
1118
1119
}

static void its_send_movi(struct its_device *dev,
			  struct its_collection *col, u32 id)
{
	struct its_cmd_desc desc;

	desc.its_movi_cmd.dev = dev;
	desc.its_movi_cmd.col = col;
1120
	desc.its_movi_cmd.event_id = id;
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142

	its_send_single_command(dev->its, its_build_movi_cmd, &desc);
}

static void its_send_discard(struct its_device *dev, u32 id)
{
	struct its_cmd_desc desc;

	desc.its_discard_cmd.dev = dev;
	desc.its_discard_cmd.event_id = id;

	its_send_single_command(dev->its, its_build_discard_cmd, &desc);
}

static void its_send_invall(struct its_node *its, struct its_collection *col)
{
	struct its_cmd_desc desc;

	desc.its_invall_cmd.col = col;

	its_send_single_command(its, its_build_invall_cmd, &desc);
}
1143

1144
1145
static void its_send_vmapti(struct its_device *dev, u32 id)
{
1146
	struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
	struct its_cmd_desc desc;

	desc.its_vmapti_cmd.vpe = map->vpe;
	desc.its_vmapti_cmd.dev = dev;
	desc.its_vmapti_cmd.virt_id = map->vintid;
	desc.its_vmapti_cmd.event_id = id;
	desc.its_vmapti_cmd.db_enabled = map->db_enabled;

	its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
}

static void its_send_vmovi(struct its_device *dev, u32 id)
{
1160
	struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
	struct its_cmd_desc desc;

	desc.its_vmovi_cmd.vpe = map->vpe;
	desc.its_vmovi_cmd.dev = dev;
	desc.its_vmovi_cmd.event_id = id;
	desc.its_vmovi_cmd.db_enabled = map->db_enabled;

	its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
}

1171
1172
static void its_send_vmapp(struct its_node *its,
			   struct its_vpe *vpe, bool valid)
1173
1174
1175
1176
1177
{
	struct its_cmd_desc desc;

	desc.its_vmapp_cmd.vpe = vpe;
	desc.its_vmapp_cmd.valid = valid;
1178
	desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1179

1180
	its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1181
1182
}

1183
1184
static void its_send_vmovp(struct its_vpe *vpe)
{
1185
	struct its_cmd_desc desc = {};
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
	struct its_node *its;
	unsigned long flags;
	int col_id = vpe->col_idx;

	desc.its_vmovp_cmd.vpe = vpe;

	if (!its_list_map) {
		its = list_first_entry(&its_nodes, struct its_node, entry);
		desc.its_vmovp_cmd.col = &its->collections[col_id];
		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
		return;
	}

	/*
	 * Yet another marvel of the architecture. If using the
	 * its_list "feature", we need to make sure that all ITSs
	 * receive all VMOVP commands in the same order. The only way
	 * to guarantee this is to make vmovp a serialization point.
	 *
	 * Wall <-- Head.
	 */
	raw_spin_lock_irqsave(&vmovp_lock, flags);

	desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1210
	desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1211
1212
1213

	/* Emit VMOVPs */
	list_for_each_entry(its, &its_nodes, entry) {
1214
		if (!is_v4(its))
1215
1216
			continue;

1217
1218
1219
		if (!vpe->its_vm->vlpi_count[its->list_nr])
			continue;

1220
1221
1222
1223
1224
1225
1226
		desc.its_vmovp_cmd.col = &its->collections[col_id];
		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
	}

	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}

1227
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1228
1229
1230
1231
{
	struct its_cmd_desc desc;

	desc.its_vinvall_cmd.vpe = vpe;
1232
	its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1233
1234
}

1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
static void its_send_vinv(struct its_device *dev, u32 event_id)
{
	struct its_cmd_desc desc;

	/*
	 * There is no real VINV command. This is just a normal INV,
	 * with a VSYNC instead of a SYNC.
	 */
	desc.its_inv_cmd.dev = dev;
	desc.its_inv_cmd.event_id = event_id;

	its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
}

1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
static void its_send_vint(struct its_device *dev, u32 event_id)
{
	struct its_cmd_desc desc;

	/*
	 * There is no real VINT command. This is just a normal INT,
	 * with a VSYNC instead of a SYNC.
	 */
	desc.its_int_cmd.dev = dev;
	desc.its_int_cmd.event_id = event_id;

	its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
}

static void its_send_vclear(struct its_device *dev, u32 event_id)
{
	struct its_cmd_desc desc;

	/*
	 * There is no real VCLEAR command. This is just a normal CLEAR,
	 * with a VSYNC instead of a SYNC.
	 */
	desc.its_clear_cmd.dev = dev;
	desc.its_clear_cmd.event_id = event_id;

	its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
}

1277
1278
1279
1280
1281
1282
1283
1284
static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
{
	struct its_cmd_desc desc;

	desc.its_invdb_cmd.vpe = vpe;
	its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
}

1285
1286
1287
/*
 * irqchip functions - assumes MSI, mostly.
 */
1288
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1289
{
1290
	struct its_vlpi_map *map = get_vlpi_map(d);
1291
	irq_hw_number_t hwirq;
1292
	void *va;
1293
	u8 *cfg;
1294

1295
1296
	if (map) {
		va = page_address(map->vm->vprop_page);
1297
1298
1299
1300
1301
		hwirq = map->vintid;

		/* Remember the updated property */
		map->properties &= ~clr;
		map->properties |= set | LPI_PROP_GROUP1;
1302
	} else {
1303
		va = gic_rdists->prop_table_va;
1304
1305
		hwirq = d->hwirq;
	}
1306

1307
	cfg = va + hwirq - 8192;
1308
	*cfg &= ~clr;
1309
	*cfg |= set | LPI_PROP_GROUP1;
1310
1311
1312
1313
1314
1315
1316

	/*
	 * Make the above write visible to the redistributors.
	 * And yes, we're flushing exactly: One. Single. Byte.
	 * Humpf...
	 */
	if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1317
		gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1318
1319
	else
		dsb(ishst);
1320
1321
}

1322
1323
1324
1325
1326
1327
static void wait_for_syncr(void __iomem *rdbase)
{
	while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
		cpu_relax();
}

1328
1329
static void direct_lpi_inv(struct irq_data *d)
{
1330
	struct its_vlpi_map *map = get_vlpi_map(d);
1331
	void __iomem *rdbase;
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
	u64 val;

	if (map) {
		struct its_device *its_dev = irq_data_get_irq_chip_data(d);

		WARN_ON(!is_v4_1(its_dev->its));

		val  = GICR_INVLPIR_V;
		val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
		val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
	} else {
		val = d->hwirq;
<