panfrost_drv.c 19 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
/* Copyright 2019 Collabora ltd. */

#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pagemap.h>
#include <linux/pm_runtime.h>
#include <drm/panfrost_drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_syncobj.h>
#include <drm/drm_utils.h>

#include "panfrost_device.h"
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
#include "panfrost_job.h"
#include "panfrost_gpu.h"
21
#include "panfrost_perfcnt.h"
22

23
24
25
static bool unstable_ioctls;
module_param_unsafe(unstable_ioctls, bool, 0600);

26
27
28
29
30
31
32
33
static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
{
	struct drm_panfrost_get_param *param = data;
	struct panfrost_device *pfdev = ddev->dev_private;

	if (param->pad != 0)
		return -EINVAL;

34
35
36
37
38
39
40
41
42
43
44
#define PANFROST_FEATURE(name, member)			\
	case DRM_PANFROST_PARAM_ ## name:		\
		param->value = pfdev->features.member;	\
		break
#define PANFROST_FEATURE_ARRAY(name, member, max)			\
	case DRM_PANFROST_PARAM_ ## name ## 0 ...			\
		DRM_PANFROST_PARAM_ ## name ## max:			\
		param->value = pfdev->features.member[param->param -	\
			DRM_PANFROST_PARAM_ ## name ## 0];		\
		break

45
	switch (param->param) {
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
		PANFROST_FEATURE(GPU_PROD_ID, id);
		PANFROST_FEATURE(GPU_REVISION, revision);
		PANFROST_FEATURE(SHADER_PRESENT, shader_present);
		PANFROST_FEATURE(TILER_PRESENT, tiler_present);
		PANFROST_FEATURE(L2_PRESENT, l2_present);
		PANFROST_FEATURE(STACK_PRESENT, stack_present);
		PANFROST_FEATURE(AS_PRESENT, as_present);
		PANFROST_FEATURE(JS_PRESENT, js_present);
		PANFROST_FEATURE(L2_FEATURES, l2_features);
		PANFROST_FEATURE(CORE_FEATURES, core_features);
		PANFROST_FEATURE(TILER_FEATURES, tiler_features);
		PANFROST_FEATURE(MEM_FEATURES, mem_features);
		PANFROST_FEATURE(MMU_FEATURES, mmu_features);
		PANFROST_FEATURE(THREAD_FEATURES, thread_features);
		PANFROST_FEATURE(MAX_THREADS, max_threads);
		PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
				thread_max_workgroup_sz);
		PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
				thread_max_barrier_sz);
		PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
66
		PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
67
68
69
70
		PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
		PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
		PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
		PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
71
72
73
74
75
76
77
78
79
80
	default:
		return -EINVAL;
	}

	return 0;
}

static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
		struct drm_file *file)
{
81
	struct panfrost_file_priv *priv = file->driver_priv;
82
	struct panfrost_gem_object *bo;
83
	struct drm_panfrost_create_bo *args = data;
84
	struct panfrost_gem_mapping *mapping;
85

86
	if (!args->size || args->pad ||
87
88
89
90
91
92
	    (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
		return -EINVAL;

	/* Heaps should never be executable */
	if ((args->flags & PANFROST_BO_HEAP) &&
	    !(args->flags & PANFROST_BO_NOEXEC))
93
94
		return -EINVAL;

95
96
97
98
	bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
					     &args->handle);
	if (IS_ERR(bo))
		return PTR_ERR(bo);
99

100
101
	mapping = panfrost_gem_mapping_get(bo, priv);
	if (!mapping) {
102
		drm_gem_object_put(&bo->base.base);
103
104
105
106
107
		return -EINVAL;
	}

	args->offset = mapping->mmnode.start << PAGE_SHIFT;
	panfrost_gem_mapping_put(mapping);
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130

	return 0;
}

/**
 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
 * referenced by the job.
 * @dev: DRM device
 * @file_priv: DRM file for this fd
 * @args: IOCTL args
 * @job: job being set up
 *
 * Resolve handles from userspace to BOs and attach them to job.
 *
 * Note that this function doesn't need to unreference the BOs on
 * failure, because that will happen at panfrost_job_cleanup() time.
 */
static int
panfrost_lookup_bos(struct drm_device *dev,
		  struct drm_file *file_priv,
		  struct drm_panfrost_submit *args,
		  struct panfrost_job *job)
{
131
132
133
134
135
	struct panfrost_file_priv *priv = file_priv->driver_priv;
	struct panfrost_gem_object *bo;
	unsigned int i;
	int ret;

136
137
138
139
140
141
142
143
144
145
146
	job->bo_count = args->bo_handle_count;

	if (!job->bo_count)
		return 0;

	job->implicit_fences = kvmalloc_array(job->bo_count,
				  sizeof(struct dma_fence *),
				  GFP_KERNEL | __GFP_ZERO);
	if (!job->implicit_fences)
		return -ENOMEM;

147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
	ret = drm_gem_objects_lookup(file_priv,
				     (void __user *)(uintptr_t)args->bo_handles,
				     job->bo_count, &job->bos);
	if (ret)
		return ret;

	job->mappings = kvmalloc_array(job->bo_count,
				       sizeof(struct panfrost_gem_mapping *),
				       GFP_KERNEL | __GFP_ZERO);
	if (!job->mappings)
		return -ENOMEM;

	for (i = 0; i < job->bo_count; i++) {
		struct panfrost_gem_mapping *mapping;

		bo = to_panfrost_bo(job->bos[i]);
		mapping = panfrost_gem_mapping_get(bo, priv);
		if (!mapping) {
			ret = -EINVAL;
			break;
		}

169
		atomic_inc(&bo->gpu_usecount);
170
171
172
173
		job->mappings[i] = mapping;
	}

	return ret;
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
}

/**
 * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects
 * referenced by the job.
 * @dev: DRM device
 * @file_priv: DRM file for this fd
 * @args: IOCTL args
 * @job: job being set up
 *
 * Resolve syncobjs from userspace to fences and attach them to job.
 *
 * Note that this function doesn't need to unreference the fences on
 * failure, because that will happen at panfrost_job_cleanup() time.
 */
static int
panfrost_copy_in_sync(struct drm_device *dev,
		  struct drm_file *file_priv,
		  struct drm_panfrost_submit *args,
		  struct panfrost_job *job)
{
	u32 *handles;
	int ret = 0;
	int i;

	job->in_fence_count = args->in_sync_count;

	if (!job->in_fence_count)
		return 0;

	job->in_fences = kvmalloc_array(job->in_fence_count,
					sizeof(struct dma_fence *),
					GFP_KERNEL | __GFP_ZERO);
	if (!job->in_fences) {
		DRM_DEBUG("Failed to allocate job in fences\n");
		return -ENOMEM;
	}

	handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL);
	if (!handles) {
		ret = -ENOMEM;
		DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
		goto fail;
	}

	if (copy_from_user(handles,
			   (void __user *)(uintptr_t)args->in_syncs,
			   job->in_fence_count * sizeof(u32))) {
		ret = -EFAULT;
		DRM_DEBUG("Failed to copy in syncobj handles\n");
		goto fail;
	}

	for (i = 0; i < job->in_fence_count; i++) {
		ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
					     &job->in_fences[i]);
		if (ret == -EINVAL)
			goto fail;
	}

fail:
	kvfree(handles);
	return ret;
}

static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
		struct drm_file *file)
{
	struct panfrost_device *pfdev = dev->dev_private;
	struct drm_panfrost_submit *args = data;
244
	struct drm_syncobj *sync_out = NULL;
245
246
247
	struct panfrost_job *job;
	int ret = 0;

248
249
250
251
252
253
254
255
256
257
258
259
	if (!args->jc)
		return -EINVAL;

	if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
		return -EINVAL;

	if (args->out_sync > 0) {
		sync_out = drm_syncobj_find(file, args->out_sync);
		if (!sync_out)
			return -ENODEV;
	}

260
	job = kzalloc(sizeof(*job), GFP_KERNEL);
261
262
263
264
	if (!job) {
		ret = -ENOMEM;
		goto fail_out_sync;
	}
265
266
267
268
269
270
271
272
273
274
275

	kref_init(&job->refcount);

	job->pfdev = pfdev;
	job->jc = args->jc;
	job->requirements = args->requirements;
	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
	job->file_priv = file->driver_priv;

	ret = panfrost_copy_in_sync(dev, file, args, job);
	if (ret)
276
		goto fail_job;
277
278
279

	ret = panfrost_lookup_bos(dev, file, args, job);
	if (ret)
280
		goto fail_job;
281
282
283

	ret = panfrost_job_push(job);
	if (ret)
284
		goto fail_job;
285
286

	/* Update the return sync object for the job */
287
	if (sync_out)
288
289
		drm_syncobj_replace_fence(sync_out, job->render_done_fence);

290
fail_job:
291
	panfrost_job_put(job);
292
fail_out_sync:
293
294
	if (sync_out)
		drm_syncobj_put(sync_out);
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314

	return ret;
}

static int
panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	long ret;
	struct drm_panfrost_wait_bo *args = data;
	struct drm_gem_object *gem_obj;
	unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);

	if (args->pad)
		return -EINVAL;

	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
	if (!gem_obj)
		return -ENOENT;

315
	ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
316
317
318
	if (!ret)
		ret = timeout ? -ETIMEDOUT : -EBUSY;

319
	drm_gem_object_put(gem_obj);
320
321
322
323
324
325
326
327

	return ret;
}

static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_panfrost_mmap_bo *args = data;
328
329
	struct drm_gem_object *gem_obj;
	int ret;
330
331
332
333
334
335

	if (args->flags != 0) {
		DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
		return -EINVAL;
	}

336
337
338
339
340
341
	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
	if (!gem_obj) {
		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
		return -ENOENT;
	}

342
	/* Don't allow mmapping of heap objects as pages are not pinned. */
343
344
345
346
	if (to_panfrost_bo(gem_obj)->is_heap) {
		ret = -EINVAL;
		goto out;
	}
347

348
349
350
351
	ret = drm_gem_create_mmap_offset(gem_obj);
	if (ret == 0)
		args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);

352
out:
353
	drm_gem_object_put(gem_obj);
354
	return ret;
355
356
357
358
359
}

static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
			    struct drm_file *file_priv)
{
360
	struct panfrost_file_priv *priv = file_priv->driver_priv;
361
	struct drm_panfrost_get_bo_offset *args = data;
362
	struct panfrost_gem_mapping *mapping;
363
364
365
366
367
368
369
370
371
372
	struct drm_gem_object *gem_obj;
	struct panfrost_gem_object *bo;

	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
	if (!gem_obj) {
		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
		return -ENOENT;
	}
	bo = to_panfrost_bo(gem_obj);

373
	mapping = panfrost_gem_mapping_get(bo, priv);
374
	drm_gem_object_put(gem_obj);
375
376
377
378
379
380

	if (!mapping)
		return -EINVAL;

	args->offset = mapping->mmnode.start << PAGE_SHIFT;
	panfrost_gem_mapping_put(mapping);
381
382
383
	return 0;
}

384
385
386
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
				  struct drm_file *file_priv)
{
387
	struct panfrost_file_priv *priv = file_priv->driver_priv;
388
389
390
	struct drm_panfrost_madvise *args = data;
	struct panfrost_device *pfdev = dev->dev_private;
	struct drm_gem_object *gem_obj;
391
392
	struct panfrost_gem_object *bo;
	int ret = 0;
393
394
395
396
397
398
399

	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
	if (!gem_obj) {
		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
		return -ENOENT;
	}

400
401
	bo = to_panfrost_bo(gem_obj);

402
	mutex_lock(&pfdev->shrinker_lock);
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
	mutex_lock(&bo->mappings.lock);
	if (args->madv == PANFROST_MADV_DONTNEED) {
		struct panfrost_gem_mapping *first;

		first = list_first_entry(&bo->mappings.list,
					 struct panfrost_gem_mapping,
					 node);

		/*
		 * If we want to mark the BO purgeable, there must be only one
		 * user: the caller FD.
		 * We could do something smarter and mark the BO purgeable only
		 * when all its users have marked it purgeable, but globally
		 * visible/shared BOs are likely to never be marked purgeable
		 * anyway, so let's not bother.
		 */
		if (!list_is_singular(&bo->mappings.list) ||
		    WARN_ON_ONCE(first->mmu != &priv->mmu)) {
			ret = -EINVAL;
			goto out_unlock_mappings;
		}
	}

426
427
428
429
	args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);

	if (args->retained) {
		if (args->madv == PANFROST_MADV_DONTNEED)
430
431
			list_add_tail(&bo->base.madv_list,
				      &pfdev->shrinker_list);
432
433
434
		else if (args->madv == PANFROST_MADV_WILLNEED)
			list_del_init(&bo->base.madv_list);
	}
435
436
437

out_unlock_mappings:
	mutex_unlock(&bo->mappings.lock);
438
	mutex_unlock(&pfdev->shrinker_lock);
439

440
	drm_gem_object_put(gem_obj);
441
	return ret;
442
443
}

444
445
446
447
448
449
450
451
int panfrost_unstable_ioctl_check(void)
{
	if (!unstable_ioctls)
		return -ENOSYS;

	return 0;
}

452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
#define PFN_4G		(SZ_4G >> PAGE_SHIFT)
#define PFN_4G_MASK	(PFN_4G - 1)
#define PFN_16M		(SZ_16M >> PAGE_SHIFT)

static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
					 unsigned long color,
					 u64 *start, u64 *end)
{
	/* Executable buffers can't start or end on a 4GB boundary */
	if (!(color & PANFROST_BO_NOEXEC)) {
		u64 next_seg;

		if ((*start & PFN_4G_MASK) == 0)
			(*start)++;

		if ((*end & PFN_4G_MASK) == 0)
			(*end)--;

		next_seg = ALIGN(*start, PFN_4G);
		if (next_seg - *start <= PFN_16M)
			*start = next_seg + 1;

		*end = min(*end, ALIGN(*start, PFN_4G) - 1);
	}
}

478
479
480
static int
panfrost_open(struct drm_device *dev, struct drm_file *file)
{
481
	int ret;
482
483
484
485
486
487
488
489
490
491
	struct panfrost_device *pfdev = dev->dev_private;
	struct panfrost_file_priv *panfrost_priv;

	panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
	if (!panfrost_priv)
		return -ENOMEM;

	panfrost_priv->pfdev = pfdev;
	file->driver_priv = panfrost_priv;

492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
	spin_lock_init(&panfrost_priv->mm_lock);

	/* 4G enough for now. can be 48-bit */
	drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
	panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;

	ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
	if (ret)
		goto err_pgtable;

	ret = panfrost_job_open(panfrost_priv);
	if (ret)
		goto err_job;

	return 0;

err_job:
	panfrost_mmu_pgtable_free(panfrost_priv);
err_pgtable:
	drm_mm_takedown(&panfrost_priv->mm);
	kfree(panfrost_priv);
	return ret;
514
515
516
517
518
519
520
}

static void
panfrost_postclose(struct drm_device *dev, struct drm_file *file)
{
	struct panfrost_file_priv *panfrost_priv = file->driver_priv;

521
	panfrost_perfcnt_close(file);
522
523
	panfrost_job_close(panfrost_priv);

524
525
	panfrost_mmu_pgtable_free(panfrost_priv);
	drm_mm_takedown(&panfrost_priv->mm);
526
527
528
529
530
531
532
	kfree(panfrost_priv);
}

static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
#define PANFROST_IOCTL(n, func, flags) \
	DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)

533
	PANFROST_IOCTL(SUBMIT,		submit,		DRM_RENDER_ALLOW),
534
535
536
537
538
	PANFROST_IOCTL(WAIT_BO,		wait_bo,	DRM_RENDER_ALLOW),
	PANFROST_IOCTL(CREATE_BO,	create_bo,	DRM_RENDER_ALLOW),
	PANFROST_IOCTL(MMAP_BO,		mmap_bo,	DRM_RENDER_ALLOW),
	PANFROST_IOCTL(GET_PARAM,	get_param,	DRM_RENDER_ALLOW),
	PANFROST_IOCTL(GET_BO_OFFSET,	get_bo_offset,	DRM_RENDER_ALLOW),
539
540
	PANFROST_IOCTL(PERFCNT_ENABLE,	perfcnt_enable,	DRM_RENDER_ALLOW),
	PANFROST_IOCTL(PERFCNT_DUMP,	perfcnt_dump,	DRM_RENDER_ALLOW),
541
	PANFROST_IOCTL(MADVISE,		madvise,	DRM_RENDER_ALLOW),
542
543
};

544
DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
545

546
547
548
549
/*
 * Panfrost driver version:
 * - 1.0 - initial interface
 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
550
 * - 1.2 - adds AFBC_FEATURES query
551
 */
552
static const struct drm_driver panfrost_drm_driver = {
553
	.driver_features	= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
554
555
556
557
558
559
560
561
562
	.open			= panfrost_open,
	.postclose		= panfrost_postclose,
	.ioctls			= panfrost_drm_driver_ioctls,
	.num_ioctls		= ARRAY_SIZE(panfrost_drm_driver_ioctls),
	.fops			= &panfrost_drm_driver_fops,
	.name			= "panfrost",
	.desc			= "panfrost DRM",
	.date			= "20180908",
	.major			= 1,
563
	.minor			= 2,
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586

	.gem_create_object	= panfrost_gem_create_object,
	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
	.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
	.gem_prime_mmap		= drm_gem_prime_mmap,
};

static int panfrost_probe(struct platform_device *pdev)
{
	struct panfrost_device *pfdev;
	struct drm_device *ddev;
	int err;

	pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
	if (!pfdev)
		return -ENOMEM;

	pfdev->pdev = pdev;
	pfdev->dev = &pdev->dev;

	platform_set_drvdata(pdev, pfdev);

587
588
589
590
	pfdev->comp = of_device_get_match_data(&pdev->dev);
	if (!pfdev->comp)
		return -ENODEV;

591
592
	pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;

593
594
595
596
597
598
599
600
	/* Allocate and initialze the DRM device. */
	ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
	if (IS_ERR(ddev))
		return PTR_ERR(ddev);

	ddev->dev_private = pfdev;
	pfdev->ddev = ddev;

601
602
	mutex_init(&pfdev->shrinker_lock);
	INIT_LIST_HEAD(&pfdev->shrinker_list);
603
604
605

	err = panfrost_device_init(pfdev);
	if (err) {
606
607
		if (err != -EPROBE_DEFER)
			dev_err(&pdev->dev, "Fatal error during GPU init\n");
608
609
610
		goto err_out0;
	}

611
612
613
614
615
616
	pm_runtime_set_active(pfdev->dev);
	pm_runtime_mark_last_busy(pfdev->dev);
	pm_runtime_enable(pfdev->dev);
	pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
	pm_runtime_use_autosuspend(pfdev->dev);

617
618
619
620
621
622
	/*
	 * Register the DRM device with the core and the connectors with
	 * sysfs
	 */
	err = drm_dev_register(ddev, 0);
	if (err < 0)
623
		goto err_out1;
624

625
626
	panfrost_gem_shrinker_init(ddev);

627
628
629
	return 0;

err_out1:
630
	pm_runtime_disable(pfdev->dev);
631
	panfrost_device_fini(pfdev);
Steven Price's avatar
Steven Price committed
632
	pm_runtime_set_suspended(pfdev->dev);
633
634
635
636
637
638
639
640
641
642
643
err_out0:
	drm_dev_put(ddev);
	return err;
}

static int panfrost_remove(struct platform_device *pdev)
{
	struct panfrost_device *pfdev = platform_get_drvdata(pdev);
	struct drm_device *ddev = pfdev->ddev;

	drm_dev_unregister(ddev);
644
	panfrost_gem_shrinker_cleanup(ddev);
645

646
	pm_runtime_get_sync(pfdev->dev);
647
	pm_runtime_disable(pfdev->dev);
Steven Price's avatar
Steven Price committed
648
649
	panfrost_device_fini(pfdev);
	pm_runtime_set_suspended(pfdev->dev);
650

651
652
653
654
	drm_dev_put(ddev);
	return 0;
}

655
static const char * const default_supplies[] = { "mali" };
656
657
658
static const struct panfrost_compatible default_data = {
	.num_supplies = ARRAY_SIZE(default_supplies),
	.supply_names = default_supplies,
659
660
	.num_pm_domains = 1, /* optional */
	.pm_domain_names = NULL,
661
662
};

663
664
665
666
667
668
static const struct panfrost_compatible amlogic_data = {
	.num_supplies = ARRAY_SIZE(default_supplies),
	.supply_names = default_supplies,
	.vendor_quirk = panfrost_gpu_amlogic_quirk,
};

669
670
671
672
673
674
675
const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
static const struct panfrost_compatible mediatek_mt8183_data = {
	.num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
	.supply_names = mediatek_mt8183_supplies,
	.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
	.pm_domain_names = mediatek_mt8183_pm_domains,
676
	.set_opp = panfrost_mt8183_set_opp,
677
678
};

679
static const struct of_device_id dt_match[] = {
680
681
682
683
684
	/* Set first to probe before the generic compatibles */
	{ .compatible = "amlogic,meson-gxm-mali",
	  .data = &amlogic_data, },
	{ .compatible = "amlogic,meson-g12a-mali",
	  .data = &amlogic_data, },
685
686
687
688
689
690
691
692
693
	{ .compatible = "arm,mali-t604", .data = &default_data, },
	{ .compatible = "arm,mali-t624", .data = &default_data, },
	{ .compatible = "arm,mali-t628", .data = &default_data, },
	{ .compatible = "arm,mali-t720", .data = &default_data, },
	{ .compatible = "arm,mali-t760", .data = &default_data, },
	{ .compatible = "arm,mali-t820", .data = &default_data, },
	{ .compatible = "arm,mali-t830", .data = &default_data, },
	{ .compatible = "arm,mali-t860", .data = &default_data, },
	{ .compatible = "arm,mali-t880", .data = &default_data, },
694
	{ .compatible = "arm,mali-bifrost", .data = &default_data, },
695
	{ .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data },
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
	{}
};
MODULE_DEVICE_TABLE(of, dt_match);

static const struct dev_pm_ops panfrost_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
	SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
};

static struct platform_driver panfrost_driver = {
	.probe		= panfrost_probe,
	.remove		= panfrost_remove,
	.driver		= {
		.name	= "panfrost",
		.pm	= &panfrost_pm_ops,
		.of_match_table = dt_match,
	},
};
module_platform_driver(panfrost_driver);

MODULE_AUTHOR("Panfrost Project Developers");
MODULE_DESCRIPTION("Panfrost DRM Driver");
MODULE_LICENSE("GPL v2");