base.c 51.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
2
3
4
5
6
7
8
9
10
11
12
/*
 * Procedures for creating, accessing and interpreting the device tree.
 *
 * Paul Mackerras	August 1996.
 * Copyright (C) 1996-2005 Paul Mackerras.
 *
 *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
 *    {engebret|bergner}@us.ibm.com
 *
 *  Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net
 *
13
14
 *  Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
 *  Grant Likely.
15
 */
16
17
18

#define pr_fmt(fmt)	"OF: " fmt

19
#include <linux/console.h>
20
#include <linux/ctype.h>
21
#include <linux/cpu.h>
22
23
#include <linux/module.h>
#include <linux/of.h>
24
#include <linux/of_device.h>
25
#include <linux/of_graph.h>
26
#include <linux/spinlock.h>
27
#include <linux/slab.h>
28
#include <linux/string.h>
Jeremy Kerr's avatar
Jeremy Kerr committed
29
#include <linux/proc_fs.h>
30

31
#include "of_private.h"
32

33
LIST_HEAD(aliases_lookup);
34

Grant Likely's avatar
Grant Likely committed
35
36
struct device_node *of_root;
EXPORT_SYMBOL(of_root);
37
struct device_node *of_chosen;
38
struct device_node *of_aliases;
39
struct device_node *of_stdout;
40
static const char *of_stdout_options;
41

42
struct kset *of_kset;
43
44

/*
45
46
47
48
 * Used to protect the of_aliases, to hold off addition of nodes to sysfs.
 * This mutex must be held whenever modifications are being made to the
 * device tree. The of_{attach,detach}_node() and
 * of_{add,remove,update}_property() helpers make sure this happens.
49
 */
50
DEFINE_MUTEX(of_mutex);
51

Grant Likely's avatar
Grant Likely committed
52
/* use when traversing tree through the child, sibling,
53
54
 * or parent members of struct device_node.
 */
55
DEFINE_RAW_SPINLOCK(devtree_lock);
56
57
58

int of_n_addr_cells(struct device_node *np)
{
59
	u32 cells;
60
61
62
63

	do {
		if (np->parent)
			np = np->parent;
64
65
		if (!of_property_read_u32(np, "#address-cells", &cells))
			return cells;
66
67
68
69
70
71
72
73
	} while (np->parent);
	/* No #address-cells property for the root node */
	return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
}
EXPORT_SYMBOL(of_n_addr_cells);

int of_n_size_cells(struct device_node *np)
{
74
	u32 cells;
75
76
77
78

	do {
		if (np->parent)
			np = np->parent;
79
80
		if (!of_property_read_u32(np, "#size-cells", &cells))
			return cells;
81
82
83
84
85
86
	} while (np->parent);
	/* No #size-cells property for the root node */
	return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
}
EXPORT_SYMBOL(of_n_size_cells);

87
88
89
#ifdef CONFIG_NUMA
int __weak of_node_to_nid(struct device_node *np)
{
90
	return NUMA_NO_NODE;
91
92
93
}
#endif

94
95
96
97
98
99
100
101
102
103
104
static struct device_node **phandle_cache;
static u32 phandle_cache_mask;

/*
 * Assumptions behind phandle_cache implementation:
 *   - phandle property values are in a contiguous range of 1..n
 *
 * If the assumptions do not hold, then
 *   - the phandle lookup overhead reduction provided by the cache
 *     will likely be less
 */
105
void of_populate_phandle_cache(void)
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
{
	unsigned long flags;
	u32 cache_entries;
	struct device_node *np;
	u32 phandles = 0;

	raw_spin_lock_irqsave(&devtree_lock, flags);

	kfree(phandle_cache);
	phandle_cache = NULL;

	for_each_of_allnodes(np)
		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
			phandles++;

	cache_entries = roundup_pow_of_two(phandles);
	phandle_cache_mask = cache_entries - 1;

	phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
				GFP_ATOMIC);
	if (!phandle_cache)
		goto out;

	for_each_of_allnodes(np)
		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
			phandle_cache[np->phandle & phandle_cache_mask] = np;

out:
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
}

137
int of_free_phandle_cache(void)
138
139
140
141
142
143
144
145
146
147
148
149
{
	unsigned long flags;

	raw_spin_lock_irqsave(&devtree_lock, flags);

	kfree(phandle_cache);
	phandle_cache = NULL;

	raw_spin_unlock_irqrestore(&devtree_lock, flags);

	return 0;
}
150
#if !defined(CONFIG_MODULES)
151
152
153
late_initcall_sync(of_free_phandle_cache);
#endif

154
void __init of_core_init(void)
155
156
157
{
	struct device_node *np;

158
159
	of_populate_phandle_cache();

160
	/* Create the kset, and register existing nodes */
161
	mutex_lock(&of_mutex);
162
163
	of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
	if (!of_kset) {
164
		mutex_unlock(&of_mutex);
165
		pr_err("failed to register existing nodes\n");
166
		return;
167
168
	}
	for_each_of_allnodes(np)
169
		__of_attach_node_sysfs(np);
170
	mutex_unlock(&of_mutex);
171

Grant Likely's avatar
Grant Likely committed
172
	/* Symlink in /proc as required by userspace ABI */
Grant Likely's avatar
Grant Likely committed
173
	if (of_root)
174
175
176
		proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
}

177
178
static struct property *__of_find_property(const struct device_node *np,
					   const char *name, int *lenp)
179
180
181
{
	struct property *pp;

182
183
184
	if (!np)
		return NULL;

185
	for (pp = np->properties; pp; pp = pp->next) {
186
		if (of_prop_cmp(pp->name, name) == 0) {
187
			if (lenp)
188
189
190
191
				*lenp = pp->length;
			break;
		}
	}
192
193
194
195
196
197
198
199
200

	return pp;
}

struct property *of_find_property(const struct device_node *np,
				  const char *name,
				  int *lenp)
{
	struct property *pp;
201
	unsigned long flags;
202

203
	raw_spin_lock_irqsave(&devtree_lock, flags);
204
	pp = __of_find_property(np, name, lenp);
205
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
206
207
208
209
210

	return pp;
}
EXPORT_SYMBOL(of_find_property);

Grant Likely's avatar
Grant Likely committed
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
struct device_node *__of_find_all_nodes(struct device_node *prev)
{
	struct device_node *np;
	if (!prev) {
		np = of_root;
	} else if (prev->child) {
		np = prev->child;
	} else {
		/* Walk back up looking for a sibling, or the end of the structure */
		np = prev;
		while (np->parent && !np->sibling)
			np = np->parent;
		np = np->sibling; /* Might be null at the end of the tree */
	}
	return np;
}

228
229
230
231
232
233
234
235
236
237
238
/**
 * of_find_all_nodes - Get next node in global list
 * @prev:	Previous node or NULL to start iteration
 *		of_node_put() will be called on it
 *
 * Returns a node pointer with refcount incremented, use
 * of_node_put() on it when done.
 */
struct device_node *of_find_all_nodes(struct device_node *prev)
{
	struct device_node *np;
239
	unsigned long flags;
240

241
	raw_spin_lock_irqsave(&devtree_lock, flags);
Grant Likely's avatar
Grant Likely committed
242
243
	np = __of_find_all_nodes(prev);
	of_node_get(np);
244
	of_node_put(prev);
245
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
246
247
248
249
	return np;
}
EXPORT_SYMBOL(of_find_all_nodes);

250
251
252
253
/*
 * Find a property with a given name for a given node
 * and return the value.
 */
254
255
const void *__of_get_property(const struct device_node *np,
			      const char *name, int *lenp)
256
257
258
259
260
261
{
	struct property *pp = __of_find_property(np, name, lenp);

	return pp ? pp->value : NULL;
}

262
263
264
265
266
/*
 * Find a property with a given name for a given node
 * and return the value.
 */
const void *of_get_property(const struct device_node *np, const char *name,
267
			    int *lenp)
268
269
270
271
272
273
{
	struct property *pp = of_find_property(np, name, lenp);

	return pp ? pp->value : NULL;
}
EXPORT_SYMBOL(of_get_property);
274

275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
/*
 * arch_match_cpu_phys_id - Match the given logical CPU and physical id
 *
 * @cpu: logical cpu index of a core/thread
 * @phys_id: physical identifier of a core/thread
 *
 * CPU logical to physical index mapping is architecture specific.
 * However this __weak function provides a default match of physical
 * id to logical cpu index. phys_id provided here is usually values read
 * from the device tree which must match the hardware internal registers.
 *
 * Returns true if the physical identifier and the logical cpu index
 * correspond to the same core/thread, false otherwise.
 */
bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
	return (u32)phys_id == cpu;
}

/**
 * Checks if the given "prop_name" property holds the physical id of the
 * core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
 * NULL, local thread number within the core is returned in it.
 */
static bool __of_find_n_match_cpu_property(struct device_node *cpun,
			const char *prop_name, int cpu, unsigned int *thread)
{
	const __be32 *cell;
	int ac, prop_len, tid;
	u64 hwid;

	ac = of_n_addr_cells(cpun);
	cell = of_get_property(cpun, prop_name, &prop_len);
308
	if (!cell || !ac)
309
		return false;
310
	prop_len /= sizeof(*cell) * ac;
311
312
313
314
315
316
317
318
319
320
321
322
	for (tid = 0; tid < prop_len; tid++) {
		hwid = of_read_number(cell, ac);
		if (arch_match_cpu_phys_id(cpu, hwid)) {
			if (thread)
				*thread = tid;
			return true;
		}
		cell += ac;
	}
	return false;
}

323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
/*
 * arch_find_n_match_cpu_physical_id - See if the given device node is
 * for the cpu corresponding to logical cpu 'cpu'.  Return true if so,
 * else false.  If 'thread' is non-NULL, the local thread number within the
 * core is returned in it.
 */
bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
					      int cpu, unsigned int *thread)
{
	/* Check for non-standard "ibm,ppc-interrupt-server#s" property
	 * for thread ids on PowerPC. If it doesn't exist fallback to
	 * standard "reg" property.
	 */
	if (IS_ENABLED(CONFIG_PPC) &&
	    __of_find_n_match_cpu_property(cpun,
					   "ibm,ppc-interrupt-server#s",
					   cpu, thread))
		return true;

342
	return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
343
344
}

345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
/**
 * of_get_cpu_node - Get device node associated with the given logical CPU
 *
 * @cpu: CPU number(logical index) for which device node is required
 * @thread: if not NULL, local thread number within the physical core is
 *          returned
 *
 * The main purpose of this function is to retrieve the device node for the
 * given logical CPU index. It should be used to initialize the of_node in
 * cpu device. Once of_node in cpu device is populated, all the further
 * references can use that instead.
 *
 * CPU logical to physical index mapping is architecture specific and is built
 * before booting secondary cores. This function uses arch_match_cpu_phys_id
 * which can be overridden by architecture specific implementation.
 *
361
362
 * Returns a node pointer for the logical cpu with refcount incremented, use
 * of_node_put() on it when done. Returns NULL if not found.
363
364
365
 */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
366
	struct device_node *cpun;
367

368
369
	for_each_node_by_type(cpun, "cpu") {
		if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
370
371
372
373
374
375
			return cpun;
	}
	return NULL;
}
EXPORT_SYMBOL(of_get_cpu_node);

376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
/**
 * of_cpu_node_to_id: Get the logical CPU number for a given device_node
 *
 * @cpu_node: Pointer to the device_node for CPU.
 *
 * Returns the logical CPU number of the given CPU device_node.
 * Returns -ENODEV if the CPU is not found.
 */
int of_cpu_node_to_id(struct device_node *cpu_node)
{
	int cpu;
	bool found = false;
	struct device_node *np;

	for_each_possible_cpu(cpu) {
		np = of_cpu_device_node_get(cpu);
		found = (cpu_node == np);
		of_node_put(np);
		if (found)
			return cpu;
	}

	return -ENODEV;
}
EXPORT_SYMBOL(of_cpu_node_to_id);

402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
/**
 * __of_device_is_compatible() - Check if the node matches given constraints
 * @device: pointer to node
 * @compat: required compatible string, NULL or "" for any match
 * @type: required device_type value, NULL or "" for any match
 * @name: required node name, NULL or "" for any match
 *
 * Checks if the given @compat, @type and @name strings match the
 * properties of the given @device. A constraints can be skipped by
 * passing NULL or an empty string as the constraint.
 *
 * Returns 0 for no match, and a positive integer on match. The return
 * value is a relative score with larger values indicating better
 * matches. The score is weighted for the most specific compatible value
 * to get the highest score. Matching type is next, followed by matching
 * name. Practically speaking, this results in the following priority
 * order for matches:
 *
 * 1. specific compatible && type && name
 * 2. specific compatible && type
 * 3. specific compatible && name
 * 4. specific compatible
 * 5. general compatible && type && name
 * 6. general compatible && type
 * 7. general compatible && name
 * 8. general compatible
 * 9. type && name
 * 10. type
 * 11. name
431
 */
432
static int __of_device_is_compatible(const struct device_node *device,
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
				     const char *compat, const char *type, const char *name)
{
	struct property *prop;
	const char *cp;
	int index = 0, score = 0;

	/* Compatible match has highest priority */
	if (compat && compat[0]) {
		prop = __of_find_property(device, "compatible", NULL);
		for (cp = of_prop_next_string(prop, NULL); cp;
		     cp = of_prop_next_string(prop, cp), index++) {
			if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
				score = INT_MAX/2 - (index << 2);
				break;
			}
		}
		if (!score)
			return 0;
	}
452

453
454
455
456
457
	/* Matching type is better than matching name */
	if (type && type[0]) {
		if (!device->type || of_node_cmp(type, device->type))
			return 0;
		score += 2;
458
459
	}

460
461
462
463
464
465
466
467
	/* Matching name is a bit better than not */
	if (name && name[0]) {
		if (!device->name || of_node_cmp(name, device->name))
			return 0;
		score++;
	}

	return score;
468
}
469
470
471
472
473
474
475

/** Checks if the given "compat" string matches one of the strings in
 * the device's "compatible" property
 */
int of_device_is_compatible(const struct device_node *device,
		const char *compat)
{
476
	unsigned long flags;
477
478
	int res;

479
	raw_spin_lock_irqsave(&devtree_lock, flags);
480
	res = __of_device_is_compatible(device, compat, NULL, NULL);
481
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
482
483
	return res;
}
484
EXPORT_SYMBOL(of_device_is_compatible);
Stephen Rothwell's avatar
Stephen Rothwell committed
485

486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
/** Checks if the device is compatible with any of the entries in
 *  a NULL terminated array of strings. Returns the best match
 *  score or 0.
 */
int of_device_compatible_match(struct device_node *device,
			       const char *const *compat)
{
	unsigned int tmp, score = 0;

	if (!compat)
		return 0;

	while (*compat) {
		tmp = of_device_is_compatible(device, *compat);
		if (tmp > score)
			score = tmp;
		compat++;
	}

	return score;
}

508
/**
509
 * of_machine_is_compatible - Test root of device tree for a given compatible value
510
511
 * @compat: compatible string to look for in root node's compatible property.
 *
512
 * Returns a positive integer if the root node has the given value in its
513
514
 * compatible property.
 */
515
int of_machine_is_compatible(const char *compat)
516
517
518
519
520
521
522
523
524
525
526
{
	struct device_node *root;
	int rc = 0;

	root = of_find_node_by_path("/");
	if (root) {
		rc = of_device_is_compatible(root, compat);
		of_node_put(root);
	}
	return rc;
}
527
EXPORT_SYMBOL(of_machine_is_compatible);
528

529
/**
530
 *  __of_device_is_available - check if a device is available for use
531
 *
532
 *  @device: Node to check for availability, with locks already held
533
 *
534
535
 *  Returns true if the status property is absent or set to "okay" or "ok",
 *  false otherwise
536
 */
537
static bool __of_device_is_available(const struct device_node *device)
538
539
540
541
{
	const char *status;
	int statlen;

542
	if (!device)
543
		return false;
544

545
	status = __of_get_property(device, "status", &statlen);
546
	if (status == NULL)
547
		return true;
548
549
550

	if (statlen > 0) {
		if (!strcmp(status, "okay") || !strcmp(status, "ok"))
551
			return true;
552
553
	}

554
	return false;
555
}
556
557
558
559
560
561

/**
 *  of_device_is_available - check if a device is available for use
 *
 *  @device: Node to check for availability
 *
562
563
 *  Returns true if the status property is absent or set to "okay" or "ok",
 *  false otherwise
564
 */
565
bool of_device_is_available(const struct device_node *device)
566
567
{
	unsigned long flags;
568
	bool res;
569
570
571
572
573
574
575

	raw_spin_lock_irqsave(&devtree_lock, flags);
	res = __of_device_is_available(device);
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
	return res;

}
576
577
EXPORT_SYMBOL(of_device_is_available);

578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
/**
 *  of_device_is_big_endian - check if a device has BE registers
 *
 *  @device: Node to check for endianness
 *
 *  Returns true if the device has a "big-endian" property, or if the kernel
 *  was compiled for BE *and* the device has a "native-endian" property.
 *  Returns false otherwise.
 *
 *  Callers would nominally use ioread32be/iowrite32be if
 *  of_device_is_big_endian() == true, or readl/writel otherwise.
 */
bool of_device_is_big_endian(const struct device_node *device)
{
	if (of_property_read_bool(device, "big-endian"))
		return true;
	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
	    of_property_read_bool(device, "native-endian"))
		return true;
	return false;
}
EXPORT_SYMBOL(of_device_is_big_endian);

Stephen Rothwell's avatar
Stephen Rothwell committed
601
602
603
604
605
606
607
608
609
610
/**
 *	of_get_parent - Get a node's parent if any
 *	@node:	Node to get parent
 *
 *	Returns a node pointer with refcount incremented, use
 *	of_node_put() on it when done.
 */
struct device_node *of_get_parent(const struct device_node *node)
{
	struct device_node *np;
611
	unsigned long flags;
Stephen Rothwell's avatar
Stephen Rothwell committed
612
613
614
615

	if (!node)
		return NULL;

616
	raw_spin_lock_irqsave(&devtree_lock, flags);
Stephen Rothwell's avatar
Stephen Rothwell committed
617
	np = of_node_get(node->parent);
618
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
Stephen Rothwell's avatar
Stephen Rothwell committed
619
620
621
	return np;
}
EXPORT_SYMBOL(of_get_parent);
622

623
624
625
626
/**
 *	of_get_next_parent - Iterate to a node's parent
 *	@node:	Node to get parent of
 *
627
628
629
 *	This is like of_get_parent() except that it drops the
 *	refcount on the passed node, making it suitable for iterating
 *	through a node's parents.
630
631
632
633
634
635
636
 *
 *	Returns a node pointer with refcount incremented, use
 *	of_node_put() on it when done.
 */
struct device_node *of_get_next_parent(struct device_node *node)
{
	struct device_node *parent;
637
	unsigned long flags;
638
639
640
641

	if (!node)
		return NULL;

642
	raw_spin_lock_irqsave(&devtree_lock, flags);
643
644
	parent = of_node_get(node->parent);
	of_node_put(node);
645
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
646
647
	return parent;
}
648
EXPORT_SYMBOL(of_get_next_parent);
649

650
651
652
653
654
static struct device_node *__of_get_next_child(const struct device_node *node,
						struct device_node *prev)
{
	struct device_node *next;

655
656
657
	if (!node)
		return NULL;

658
659
660
661
662
663
664
665
666
667
668
	next = prev ? prev->sibling : node->child;
	for (; next; next = next->sibling)
		if (of_node_get(next))
			break;
	of_node_put(prev);
	return next;
}
#define __for_each_child_of_node(parent, child) \
	for (child = __of_get_next_child(parent, NULL); child != NULL; \
	     child = __of_get_next_child(parent, child))

669
670
671
672
673
/**
 *	of_get_next_child - Iterate a node childs
 *	@node:	parent node
 *	@prev:	previous child of the parent node, or NULL to get first
 *
674
675
676
 *	Returns a node pointer with refcount incremented, use of_node_put() on
 *	it when done. Returns NULL when prev is the last child. Decrements the
 *	refcount of prev.
677
678
679
680
681
 */
struct device_node *of_get_next_child(const struct device_node *node,
	struct device_node *prev)
{
	struct device_node *next;
682
	unsigned long flags;
683

684
	raw_spin_lock_irqsave(&devtree_lock, flags);
685
	next = __of_get_next_child(node, prev);
686
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
687
688
689
	return next;
}
EXPORT_SYMBOL(of_get_next_child);
690

691
692
693
694
695
696
697
698
699
700
701
702
/**
 *	of_get_next_available_child - Find the next available child node
 *	@node:	parent node
 *	@prev:	previous child of the parent node, or NULL to get first
 *
 *      This function is like of_get_next_child(), except that it
 *      automatically skips any disabled nodes (i.e. status = "disabled").
 */
struct device_node *of_get_next_available_child(const struct device_node *node,
	struct device_node *prev)
{
	struct device_node *next;
703
	unsigned long flags;
704

705
706
707
	if (!node)
		return NULL;

708
	raw_spin_lock_irqsave(&devtree_lock, flags);
709
710
	next = prev ? prev->sibling : node->child;
	for (; next; next = next->sibling) {
711
		if (!__of_device_is_available(next))
712
713
714
715
716
			continue;
		if (of_node_get(next))
			break;
	}
	of_node_put(prev);
717
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
718
719
720
721
	return next;
}
EXPORT_SYMBOL(of_get_next_available_child);

722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
/**
 * of_get_compatible_child - Find compatible child node
 * @parent:	parent node
 * @compatible:	compatible string
 *
 * Lookup child node whose compatible property contains the given compatible
 * string.
 *
 * Returns a node pointer with refcount incremented, use of_node_put() on it
 * when done; or NULL if not found.
 */
struct device_node *of_get_compatible_child(const struct device_node *parent,
				const char *compatible)
{
	struct device_node *child;

	for_each_child_of_node(parent, child) {
		if (of_device_is_compatible(child, compatible))
			break;
	}

	return child;
}
EXPORT_SYMBOL(of_get_compatible_child);

747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
/**
 *	of_get_child_by_name - Find the child node by name for a given parent
 *	@node:	parent node
 *	@name:	child name to look for.
 *
 *      This function looks for child node for given matching name
 *
 *	Returns a node pointer if found, with refcount incremented, use
 *	of_node_put() on it when done.
 *	Returns NULL if node is not found.
 */
struct device_node *of_get_child_by_name(const struct device_node *node,
				const char *name)
{
	struct device_node *child;

	for_each_child_of_node(node, child)
		if (child->name && (of_node_cmp(child->name, name) == 0))
			break;
	return child;
}
EXPORT_SYMBOL(of_get_child_by_name);

770
struct device_node *__of_find_node_by_path(struct device_node *parent,
771
772
773
						const char *path)
{
	struct device_node *child;
774
	int len;
775

776
	len = strcspn(path, "/:");
777
778
779
780
	if (!len)
		return NULL;

	__for_each_child_of_node(parent, child) {
781
		const char *name = kbasename(child->full_name);
782
783
784
785
786
787
		if (strncmp(path, name, len) == 0 && (strlen(name) == len))
			return child;
	}
	return NULL;
}

788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
struct device_node *__of_find_node_by_full_path(struct device_node *node,
						const char *path)
{
	const char *separator = strchr(path, ':');

	while (node && *path == '/') {
		struct device_node *tmp = node;

		path++; /* Increment past '/' delimiter */
		node = __of_find_node_by_path(node, path);
		of_node_put(tmp);
		path = strchrnul(path, '/');
		if (separator && separator < path)
			break;
	}
	return node;
}

806
/**
807
 *	of_find_node_opts_by_path - Find a node matching a full OF path
808
809
810
811
 *	@path: Either the full path to match, or if the path does not
 *	       start with '/', the name of a property of the /aliases
 *	       node (an alias).  In the case of an alias, the node
 *	       matching the alias' value will be returned.
812
813
814
 *	@opts: Address of a pointer into which to store the start of
 *	       an options string appended to the end of the path with
 *	       a ':' separator.
815
816
817
818
819
 *
 *	Valid paths:
 *		/foo/bar	Full path
 *		foo		Valid alias
 *		foo/bar		Valid alias + relative path
820
821
822
823
 *
 *	Returns a node pointer with refcount incremented, use
 *	of_node_put() on it when done.
 */
824
struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
825
{
826
827
	struct device_node *np = NULL;
	struct property *pp;
828
	unsigned long flags;
829
830
831
832
	const char *separator = strchr(path, ':');

	if (opts)
		*opts = separator ? separator + 1 : NULL;
833

834
	if (strcmp(path, "/") == 0)
Grant Likely's avatar
Grant Likely committed
835
		return of_node_get(of_root);
836
837
838

	/* The path could begin with an alias */
	if (*path != '/') {
839
840
841
842
843
844
		int len;
		const char *p = separator;

		if (!p)
			p = strchrnul(path, '/');
		len = p - path;
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861

		/* of_aliases must not be NULL */
		if (!of_aliases)
			return NULL;

		for_each_property_of_node(of_aliases, pp) {
			if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
				np = of_find_node_by_path(pp->value);
				break;
			}
		}
		if (!np)
			return NULL;
		path = p;
	}

	/* Step down the tree matching path components */
862
	raw_spin_lock_irqsave(&devtree_lock, flags);
863
	if (!np)
Grant Likely's avatar
Grant Likely committed
864
		np = of_node_get(of_root);
865
	np = __of_find_node_by_full_path(np, path);
866
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
867
868
	return np;
}
869
EXPORT_SYMBOL(of_find_node_opts_by_path);
870
871
872

/**
 *	of_find_node_by_name - Find a node by its "name" property
873
 *	@from:	The node to start searching from or NULL; the node
874
 *		you pass will not be searched, only the next one
875
876
 *		will. Typically, you pass what the previous call
 *		returned. of_node_put() will be called on @from.
877
878
879
880
881
882
883
884
885
 *	@name:	The name string to match against
 *
 *	Returns a node pointer with refcount incremented, use
 *	of_node_put() on it when done.
 */
struct device_node *of_find_node_by_name(struct device_node *from,
	const char *name)
{
	struct device_node *np;
886
	unsigned long flags;
887

888
	raw_spin_lock_irqsave(&devtree_lock, flags);
Grant Likely's avatar
Grant Likely committed
889
	for_each_of_allnodes_from(from, np)
890
891
892
893
		if (np->name && (of_node_cmp(np->name, name) == 0)
		    && of_node_get(np))
			break;
	of_node_put(from);
894
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
	return np;
}
EXPORT_SYMBOL(of_find_node_by_name);

/**
 *	of_find_node_by_type - Find a node by its "device_type" property
 *	@from:	The node to start searching from, or NULL to start searching
 *		the entire device tree. The node you pass will not be
 *		searched, only the next one will; typically, you pass
 *		what the previous call returned. of_node_put() will be
 *		called on from for you.
 *	@type:	The type string to match against
 *
 *	Returns a node pointer with refcount incremented, use
 *	of_node_put() on it when done.
 */
struct device_node *of_find_node_by_type(struct device_node *from,
	const char *type)
{
	struct device_node *np;
915
	unsigned long flags;
916

917
	raw_spin_lock_irqsave(&devtree_lock, flags);
Grant Likely's avatar
Grant Likely committed
918
	for_each_of_allnodes_from(from, np)
919
920
921
922
		if (np->type && (of_node_cmp(np->type, type) == 0)
		    && of_node_get(np))
			break;
	of_node_put(from);
923
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
	return np;
}
EXPORT_SYMBOL(of_find_node_by_type);

/**
 *	of_find_compatible_node - Find a node based on type and one of the
 *                                tokens in its "compatible" property
 *	@from:		The node to start searching from or NULL, the node
 *			you pass will not be searched, only the next one
 *			will; typically, you pass what the previous call
 *			returned. of_node_put() will be called on it
 *	@type:		The type string to match "device_type" or NULL to ignore
 *	@compatible:	The string to match to one of the tokens in the device
 *			"compatible" list.
 *
 *	Returns a node pointer with refcount incremented, use
 *	of_node_put() on it when done.
 */
struct device_node *of_find_compatible_node(struct device_node *from,
	const char *type, const char *compatible)
{
	struct device_node *np;
946
	unsigned long flags;
947

948
	raw_spin_lock_irqsave(&devtree_lock, flags);
Grant Likely's avatar
Grant Likely committed
949
	for_each_of_allnodes_from(from, np)
950
		if (__of_device_is_compatible(np, compatible, type, NULL) &&
951
		    of_node_get(np))
952
953
			break;
	of_node_put(from);
954
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
955
956
957
	return np;
}
EXPORT_SYMBOL(of_find_compatible_node);
958

959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
/**
 *	of_find_node_with_property - Find a node which has a property with
 *                                   the given name.
 *	@from:		The node to start searching from or NULL, the node
 *			you pass will not be searched, only the next one
 *			will; typically, you pass what the previous call
 *			returned. of_node_put() will be called on it
 *	@prop_name:	The name of the property to look for.
 *
 *	Returns a node pointer with refcount incremented, use
 *	of_node_put() on it when done.
 */
struct device_node *of_find_node_with_property(struct device_node *from,
	const char *prop_name)
{
	struct device_node *np;
	struct property *pp;
976
	unsigned long flags;
977

978
	raw_spin_lock_irqsave(&devtree_lock, flags);
Grant Likely's avatar
Grant Likely committed
979
	for_each_of_allnodes_from(from, np) {
980
		for (pp = np->properties; pp; pp = pp->next) {
981
982
983
984
985
986
987
988
			if (of_prop_cmp(pp->name, prop_name) == 0) {
				of_node_get(np);
				goto out;
			}
		}
	}
out:
	of_node_put(from);
989
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
990
991
992
993
	return np;
}
EXPORT_SYMBOL(of_find_node_with_property);

994
995
996
static
const struct of_device_id *__of_match_node(const struct of_device_id *matches,
					   const struct device_node *node)
997
{
998
999
1000
	const struct of_device_id *best_match = NULL;
	int score, best_score = 0;

1001
1002
1003
	if (!matches)
		return NULL;

1004
1005
1006
1007
1008
1009
1010
	for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
		score = __of_device_is_compatible(node, matches->compatible,
						  matches->type, matches->name);
		if (score > best_score) {
			best_match = matches;
			best_score = score;
		}
1011
	}
1012
1013

	return best_match;
1014
}
1015
1016

/**
Geert Uytterhoeven's avatar
Geert Uytterhoeven committed
1017
 * of_match_node - Tell if a device_node has a matching of_match structure
1018
1019
1020
 *	@matches:	array of of device match structures to search in
 *	@node:		the of device structure to match against
 *
1021
 *	Low level utility function used by device matching.
1022
1023
1024
1025
1026
 */
const struct of_device_id *of_match_node(const struct of_device_id *matches,
					 const struct device_node *node)
{
	const struct of_device_id *match;
1027
	unsigned long flags;
1028

1029
	raw_spin_lock_irqsave(&devtree_lock, flags);
1030
	match = __of_match_node(matches, node);
1031
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
1032
1033
	return match;
}
1034
1035
1036
EXPORT_SYMBOL(of_match_node);

/**
1037
1038
 *	of_find_matching_node_and_match - Find a node based on an of_device_id
 *					  match table.
1039
1040
1041
1042
1043
 *	@from:		The node to start searching from or NULL, the node
 *			you pass will not be searched, only the next one
 *			will; typically, you pass what the previous call
 *			returned. of_node_put() will be called on it
 *	@matches:	array of of device match structures to search in
1044
 *	@match		Updated to point at the matches entry which matched
1045
1046
1047
1048
 *
 *	Returns a node pointer with refcount incremented, use
 *	of_node_put() on it when done.
 */
1049
1050
1051
struct device_node *of_find_matching_node_and_match(struct device_node *from,
					const struct of_device_id *matches,
					const struct of_device_id **match)
1052
1053
{
	struct device_node *np;
1054
	const struct of_device_id *m;
1055
	unsigned long flags;
1056

1057
1058
1059
	if (match)
		*match = NULL;

1060
	raw_spin_lock_irqsave(&devtree_lock, flags);
Grant Likely's avatar
Grant Likely committed
1061
	for_each_of_allnodes_from(from, np) {
1062
		m = __of_match_node(matches, np);
1063
		if (m && of_node_get(np)) {
1064
			if (match)
1065
				*match = m;
1066
			break;
1067
		}
1068
1069
	}
	of_node_put(from);
1070
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
1071
1072
	return np;
}
1073
EXPORT_SYMBOL(of_find_matching_node_and_match);
1074
1075
1076
1077
1078
1079
1080

/**
 * of_modalias_node - Lookup appropriate modalias for a device node
 * @node:	pointer to a device tree node
 * @modalias:	Pointer to buffer that modalias value will be copied into
 * @len:	Length of modalias value
 *
1081
1082
1083
1084
 * Based on the value of the compatible property, this routine will attempt
 * to choose an appropriate modalias value for a particular device tree node.
 * It does this by stripping the manufacturer prefix (as delimited by a ',')
 * from the first entry in the compatible list property.
1085
 *
1086
 * This routine returns 0 on success, <0 on failure.
1087
1088
1089
 */
int of_modalias_node(struct device_node *node, char *modalias, int len)
{
1090
1091
	const char *compatible, *p;
	int cplen;
1092
1093

	compatible = of_get_property(node, "compatible", &cplen);
1094
	if (!compatible || strlen(compatible) > cplen)
1095
1096
		return -ENODEV;
	p = strchr(compatible, ',');
1097
	strlcpy(modalias, p ? p + 1 : compatible, len);
1098
1099
1100
1101
	return 0;
}
EXPORT_SYMBOL_GPL(of_modalias_node);

1102
1103
1104
1105
1106
1107
1108
1109
1110
/**
 * of_find_node_by_phandle - Find a node given a phandle
 * @handle:	phandle of the node to find
 *
 * Returns a node pointer with refcount incremented, use
 * of_node_put() on it when done.
 */
struct device_node *of_find_node_by_phandle(phandle handle)
{
1111
	struct device_node *np = NULL;
1112
	unsigned long flags;
1113
	phandle masked_handle;
1114

1115
1116
1117
	if (!handle)
		return NULL;

1118
	raw_spin_lock_irqsave(&devtree_lock, flags);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136

	masked_handle = handle & phandle_cache_mask;

	if (phandle_cache) {
		if (phandle_cache[masked_handle] &&
		    handle == phandle_cache[masked_handle]->phandle)
			np = phandle_cache[masked_handle];
	}

	if (!np) {
		for_each_of_allnodes(np)
			if (np->phandle == handle) {
				if (phandle_cache)
					phandle_cache[masked_handle] = np;
				break;
			}
	}

1137
	of_node_get(np);
1138
	raw_spin_unlock_irqrestore(&devtree_lock, flags);
1139
1140
1141
1142
	return np;
}
EXPORT_SYMBOL(of_find_node_by_phandle);

1143
1144
1145
void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
{
	int i;
1146
	printk("%s %pOF", msg, args->np);
1147
1148
1149
1150
1151
1152
	for (i = 0; i < args->args_count; i++) {
		const char delim = i ? ',' : ':';

		pr_cont("%c%08x", delim, args->args[i]);
	}
	pr_cont("\n");
1153
1154
}

1155
1156
1157
1158
1159
int of_phandle_iterator_init(struct of_phandle_iterator *it,
		const struct device_node *np,
		const char *list_name,
		const char *cells_name,
		int cell_count)
1160
{
1161
1162
1163
1164
	const __be32 *list;
	int size;

	memset(it, 0, sizeof(*it));
1165
1166

	list = of_get_property(np, list_name, &size);
1167
	if (!list)
1168
		return -ENOENT;
1169

1170
1171
1172
1173
1174
1175
1176
1177
1178
	it->cells_name = cells_name;
	it->cell_count = cell_count;
	it->parent = np;
	it->list_end = list + size / sizeof(*list);
	it->phandle_end = list;
	it->cur = list;

	return 0;
}
1179
EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1180

1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
int of_phandle_iterator_next(struct of_phandle_iterator *it)
{
	uint32_t count = 0;

	if (it->node) {
		of_node_put(it->node);
		it->node = NULL;
	}

	if (!it->cur || it->phandle_end >= it->list_end)
		return -ENOENT;

	it->cur = it->phandle_end;

	/* If phandle is 0, then it is an empty entry with no arguments. */
	it->phandle = be32_to_cpup(it->cur++);

	if (it->phandle) {
1199

1200
		/*
1201
1202
		 * Find the provider node and parse the #*-cells property to
		 * determine the argument length.
1203
		 */
1204
		it->node = of_find_node_by_phandle(it->phandle);
1205

1206
1207
		if (it->cells_name) {
			if (!it->node) {
1208
1209
				pr_err("%pOF: could not find phandle\n",
				       it->parent);
1210
				goto err;
1211
			}
1212

1213
1214
			if (of_property_read_u32(it->node, it->cells_name,
						 &count)) {
1215
1216
				pr_err("%pOF: could not get %s for %pOF\n",
				       it->parent,
1217
				       it->cells_name,
1218
				       it->node);
1219
				goto err;
1220
			}
1221
1222
		} else {
			count = it->cell_count;
1223
1224
		}

1225
		/*
1226
1227
1228
1229
		 * Make sure that the arguments actually fit in the remaining
		 * property data length
		 */
		if (it->cur + count > it->list_end) {
1230
1231
			pr_err("%pOF: arguments longer than property\n",
			       it->parent);
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
			goto err;
		}
	}

	it->phandle_end = it->cur + count;
	it->cur_count = count;

	return 0;

err:
	if (it->node) {
		of_node_put(it->node);
		it->node = NULL;
	}

	return -EINVAL;
}
1249
EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1250

1251
1252
1253