1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2012 Avionic Design GmbH
4   * Copyright (C) 2012-2016 NVIDIA CORPORATION.  All rights reserved.
5   */
6  
7  #include <linux/bitops.h>
8  #include <linux/host1x.h>
9  #include <linux/idr.h>
10  #include <linux/iommu.h>
11  #include <linux/module.h>
12  #include <linux/platform_device.h>
13  
14  #include <drm/drm_aperture.h>
15  #include <drm/drm_atomic.h>
16  #include <drm/drm_atomic_helper.h>
17  #include <drm/drm_debugfs.h>
18  #include <drm/drm_drv.h>
19  #include <drm/drm_fourcc.h>
20  #include <drm/drm_ioctl.h>
21  #include <drm/drm_prime.h>
22  #include <drm/drm_vblank.h>
23  
24  #include "dc.h"
25  #include "drm.h"
26  #include "gem.h"
27  #include "uapi.h"
28  
29  #define DRIVER_NAME "tegra"
30  #define DRIVER_DESC "NVIDIA Tegra graphics"
31  #define DRIVER_DATE "20120330"
32  #define DRIVER_MAJOR 1
33  #define DRIVER_MINOR 0
34  #define DRIVER_PATCHLEVEL 0
35  
36  #define CARVEOUT_SZ SZ_64M
37  #define CDMA_GATHER_FETCHES_MAX_NB 16383
38  
tegra_atomic_check(struct drm_device * drm,struct drm_atomic_state * state)39  static int tegra_atomic_check(struct drm_device *drm,
40  			      struct drm_atomic_state *state)
41  {
42  	int err;
43  
44  	err = drm_atomic_helper_check(drm, state);
45  	if (err < 0)
46  		return err;
47  
48  	return tegra_display_hub_atomic_check(drm, state);
49  }
50  
51  static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
52  	.fb_create = tegra_fb_create,
53  #ifdef CONFIG_DRM_FBDEV_EMULATION
54  	.output_poll_changed = drm_fb_helper_output_poll_changed,
55  #endif
56  	.atomic_check = tegra_atomic_check,
57  	.atomic_commit = drm_atomic_helper_commit,
58  };
59  
tegra_atomic_post_commit(struct drm_device * drm,struct drm_atomic_state * old_state)60  static void tegra_atomic_post_commit(struct drm_device *drm,
61  				     struct drm_atomic_state *old_state)
62  {
63  	struct drm_crtc_state *old_crtc_state __maybe_unused;
64  	struct drm_crtc *crtc;
65  	unsigned int i;
66  
67  	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
68  		tegra_crtc_atomic_post_commit(crtc, old_state);
69  }
70  
tegra_atomic_commit_tail(struct drm_atomic_state * old_state)71  static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
72  {
73  	struct drm_device *drm = old_state->dev;
74  	struct tegra_drm *tegra = drm->dev_private;
75  
76  	if (tegra->hub) {
77  		bool fence_cookie = dma_fence_begin_signalling();
78  
79  		drm_atomic_helper_commit_modeset_disables(drm, old_state);
80  		tegra_display_hub_atomic_commit(drm, old_state);
81  		drm_atomic_helper_commit_planes(drm, old_state, 0);
82  		drm_atomic_helper_commit_modeset_enables(drm, old_state);
83  		drm_atomic_helper_commit_hw_done(old_state);
84  		dma_fence_end_signalling(fence_cookie);
85  		drm_atomic_helper_wait_for_vblanks(drm, old_state);
86  		drm_atomic_helper_cleanup_planes(drm, old_state);
87  	} else {
88  		drm_atomic_helper_commit_tail_rpm(old_state);
89  	}
90  
91  	tegra_atomic_post_commit(drm, old_state);
92  }
93  
94  static const struct drm_mode_config_helper_funcs
95  tegra_drm_mode_config_helpers = {
96  	.atomic_commit_tail = tegra_atomic_commit_tail,
97  };
98  
tegra_drm_open(struct drm_device * drm,struct drm_file * filp)99  static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
100  {
101  	struct tegra_drm_file *fpriv;
102  
103  	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
104  	if (!fpriv)
105  		return -ENOMEM;
106  
107  	idr_init_base(&fpriv->legacy_contexts, 1);
108  	xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1);
109  	xa_init(&fpriv->syncpoints);
110  	mutex_init(&fpriv->lock);
111  	filp->driver_priv = fpriv;
112  
113  	return 0;
114  }
115  
tegra_drm_context_free(struct tegra_drm_context * context)116  static void tegra_drm_context_free(struct tegra_drm_context *context)
117  {
118  	context->client->ops->close_channel(context);
119  	kfree(context);
120  }
121  
host1x_reloc_copy_from_user(struct host1x_reloc * dest,struct drm_tegra_reloc __user * src,struct drm_device * drm,struct drm_file * file)122  static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
123  				       struct drm_tegra_reloc __user *src,
124  				       struct drm_device *drm,
125  				       struct drm_file *file)
126  {
127  	u32 cmdbuf, target;
128  	int err;
129  
130  	err = get_user(cmdbuf, &src->cmdbuf.handle);
131  	if (err < 0)
132  		return err;
133  
134  	err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
135  	if (err < 0)
136  		return err;
137  
138  	err = get_user(target, &src->target.handle);
139  	if (err < 0)
140  		return err;
141  
142  	err = get_user(dest->target.offset, &src->target.offset);
143  	if (err < 0)
144  		return err;
145  
146  	err = get_user(dest->shift, &src->shift);
147  	if (err < 0)
148  		return err;
149  
150  	dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
151  
152  	dest->cmdbuf.bo = tegra_gem_lookup(file, cmdbuf);
153  	if (!dest->cmdbuf.bo)
154  		return -ENOENT;
155  
156  	dest->target.bo = tegra_gem_lookup(file, target);
157  	if (!dest->target.bo)
158  		return -ENOENT;
159  
160  	return 0;
161  }
162  
tegra_drm_submit(struct tegra_drm_context * context,struct drm_tegra_submit * args,struct drm_device * drm,struct drm_file * file)163  int tegra_drm_submit(struct tegra_drm_context *context,
164  		     struct drm_tegra_submit *args, struct drm_device *drm,
165  		     struct drm_file *file)
166  {
167  	struct host1x_client *client = &context->client->base;
168  	unsigned int num_cmdbufs = args->num_cmdbufs;
169  	unsigned int num_relocs = args->num_relocs;
170  	struct drm_tegra_cmdbuf __user *user_cmdbufs;
171  	struct drm_tegra_reloc __user *user_relocs;
172  	struct drm_tegra_syncpt __user *user_syncpt;
173  	struct drm_tegra_syncpt syncpt;
174  	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
175  	struct drm_gem_object **refs;
176  	struct host1x_syncpt *sp = NULL;
177  	struct host1x_job *job;
178  	unsigned int num_refs;
179  	int err;
180  
181  	user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
182  	user_relocs = u64_to_user_ptr(args->relocs);
183  	user_syncpt = u64_to_user_ptr(args->syncpts);
184  
185  	/* We don't yet support other than one syncpt_incr struct per submit */
186  	if (args->num_syncpts != 1)
187  		return -EINVAL;
188  
189  	/* We don't yet support waitchks */
190  	if (args->num_waitchks != 0)
191  		return -EINVAL;
192  
193  	job = host1x_job_alloc(context->channel, args->num_cmdbufs,
194  			       args->num_relocs, false);
195  	if (!job)
196  		return -ENOMEM;
197  
198  	job->num_relocs = args->num_relocs;
199  	job->client = client;
200  	job->class = client->class;
201  	job->serialize = true;
202  	job->syncpt_recovery = true;
203  
204  	/*
205  	 * Track referenced BOs so that they can be unreferenced after the
206  	 * submission is complete.
207  	 */
208  	num_refs = num_cmdbufs + num_relocs * 2;
209  
210  	refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
211  	if (!refs) {
212  		err = -ENOMEM;
213  		goto put;
214  	}
215  
216  	/* reuse as an iterator later */
217  	num_refs = 0;
218  
219  	while (num_cmdbufs) {
220  		struct drm_tegra_cmdbuf cmdbuf;
221  		struct host1x_bo *bo;
222  		struct tegra_bo *obj;
223  		u64 offset;
224  
225  		if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
226  			err = -EFAULT;
227  			goto fail;
228  		}
229  
230  		/*
231  		 * The maximum number of CDMA gather fetches is 16383, a higher
232  		 * value means the words count is malformed.
233  		 */
234  		if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
235  			err = -EINVAL;
236  			goto fail;
237  		}
238  
239  		bo = tegra_gem_lookup(file, cmdbuf.handle);
240  		if (!bo) {
241  			err = -ENOENT;
242  			goto fail;
243  		}
244  
245  		offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
246  		obj = host1x_to_tegra_bo(bo);
247  		refs[num_refs++] = &obj->gem;
248  
249  		/*
250  		 * Gather buffer base address must be 4-bytes aligned,
251  		 * unaligned offset is malformed and cause commands stream
252  		 * corruption on the buffer address relocation.
253  		 */
254  		if (offset & 3 || offset > obj->gem.size) {
255  			err = -EINVAL;
256  			goto fail;
257  		}
258  
259  		host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
260  		num_cmdbufs--;
261  		user_cmdbufs++;
262  	}
263  
264  	/* copy and resolve relocations from submit */
265  	while (num_relocs--) {
266  		struct host1x_reloc *reloc;
267  		struct tegra_bo *obj;
268  
269  		err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
270  						  &user_relocs[num_relocs], drm,
271  						  file);
272  		if (err < 0)
273  			goto fail;
274  
275  		reloc = &job->relocs[num_relocs];
276  		obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
277  		refs[num_refs++] = &obj->gem;
278  
279  		/*
280  		 * The unaligned cmdbuf offset will cause an unaligned write
281  		 * during of the relocations patching, corrupting the commands
282  		 * stream.
283  		 */
284  		if (reloc->cmdbuf.offset & 3 ||
285  		    reloc->cmdbuf.offset >= obj->gem.size) {
286  			err = -EINVAL;
287  			goto fail;
288  		}
289  
290  		obj = host1x_to_tegra_bo(reloc->target.bo);
291  		refs[num_refs++] = &obj->gem;
292  
293  		if (reloc->target.offset >= obj->gem.size) {
294  			err = -EINVAL;
295  			goto fail;
296  		}
297  	}
298  
299  	if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
300  		err = -EFAULT;
301  		goto fail;
302  	}
303  
304  	/* Syncpoint ref will be dropped on job release. */
305  	sp = host1x_syncpt_get_by_id(host1x, syncpt.id);
306  	if (!sp) {
307  		err = -ENOENT;
308  		goto fail;
309  	}
310  
311  	job->is_addr_reg = context->client->ops->is_addr_reg;
312  	job->is_valid_class = context->client->ops->is_valid_class;
313  	job->syncpt_incrs = syncpt.incrs;
314  	job->syncpt = sp;
315  	job->timeout = 10000;
316  
317  	if (args->timeout && args->timeout < 10000)
318  		job->timeout = args->timeout;
319  
320  	err = host1x_job_pin(job, context->client->base.dev);
321  	if (err)
322  		goto fail;
323  
324  	err = host1x_job_submit(job);
325  	if (err) {
326  		host1x_job_unpin(job);
327  		goto fail;
328  	}
329  
330  	args->fence = job->syncpt_end;
331  
332  fail:
333  	while (num_refs--)
334  		drm_gem_object_put(refs[num_refs]);
335  
336  	kfree(refs);
337  
338  put:
339  	host1x_job_put(job);
340  	return err;
341  }
342  
343  
344  #ifdef CONFIG_DRM_TEGRA_STAGING
tegra_gem_create(struct drm_device * drm,void * data,struct drm_file * file)345  static int tegra_gem_create(struct drm_device *drm, void *data,
346  			    struct drm_file *file)
347  {
348  	struct drm_tegra_gem_create *args = data;
349  	struct tegra_bo *bo;
350  
351  	bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
352  					 &args->handle);
353  	if (IS_ERR(bo))
354  		return PTR_ERR(bo);
355  
356  	return 0;
357  }
358  
tegra_gem_mmap(struct drm_device * drm,void * data,struct drm_file * file)359  static int tegra_gem_mmap(struct drm_device *drm, void *data,
360  			  struct drm_file *file)
361  {
362  	struct drm_tegra_gem_mmap *args = data;
363  	struct drm_gem_object *gem;
364  	struct tegra_bo *bo;
365  
366  	gem = drm_gem_object_lookup(file, args->handle);
367  	if (!gem)
368  		return -EINVAL;
369  
370  	bo = to_tegra_bo(gem);
371  
372  	args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
373  
374  	drm_gem_object_put(gem);
375  
376  	return 0;
377  }
378  
tegra_syncpt_read(struct drm_device * drm,void * data,struct drm_file * file)379  static int tegra_syncpt_read(struct drm_device *drm, void *data,
380  			     struct drm_file *file)
381  {
382  	struct host1x *host = dev_get_drvdata(drm->dev->parent);
383  	struct drm_tegra_syncpt_read *args = data;
384  	struct host1x_syncpt *sp;
385  
386  	sp = host1x_syncpt_get_by_id_noref(host, args->id);
387  	if (!sp)
388  		return -EINVAL;
389  
390  	args->value = host1x_syncpt_read_min(sp);
391  	return 0;
392  }
393  
tegra_syncpt_incr(struct drm_device * drm,void * data,struct drm_file * file)394  static int tegra_syncpt_incr(struct drm_device *drm, void *data,
395  			     struct drm_file *file)
396  {
397  	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
398  	struct drm_tegra_syncpt_incr *args = data;
399  	struct host1x_syncpt *sp;
400  
401  	sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
402  	if (!sp)
403  		return -EINVAL;
404  
405  	return host1x_syncpt_incr(sp);
406  }
407  
tegra_syncpt_wait(struct drm_device * drm,void * data,struct drm_file * file)408  static int tegra_syncpt_wait(struct drm_device *drm, void *data,
409  			     struct drm_file *file)
410  {
411  	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
412  	struct drm_tegra_syncpt_wait *args = data;
413  	struct host1x_syncpt *sp;
414  
415  	sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
416  	if (!sp)
417  		return -EINVAL;
418  
419  	return host1x_syncpt_wait(sp, args->thresh,
420  				  msecs_to_jiffies(args->timeout),
421  				  &args->value);
422  }
423  
tegra_client_open(struct tegra_drm_file * fpriv,struct tegra_drm_client * client,struct tegra_drm_context * context)424  static int tegra_client_open(struct tegra_drm_file *fpriv,
425  			     struct tegra_drm_client *client,
426  			     struct tegra_drm_context *context)
427  {
428  	int err;
429  
430  	err = client->ops->open_channel(client, context);
431  	if (err < 0)
432  		return err;
433  
434  	err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
435  	if (err < 0) {
436  		client->ops->close_channel(context);
437  		return err;
438  	}
439  
440  	context->client = client;
441  	context->id = err;
442  
443  	return 0;
444  }
445  
tegra_open_channel(struct drm_device * drm,void * data,struct drm_file * file)446  static int tegra_open_channel(struct drm_device *drm, void *data,
447  			      struct drm_file *file)
448  {
449  	struct tegra_drm_file *fpriv = file->driver_priv;
450  	struct tegra_drm *tegra = drm->dev_private;
451  	struct drm_tegra_open_channel *args = data;
452  	struct tegra_drm_context *context;
453  	struct tegra_drm_client *client;
454  	int err = -ENODEV;
455  
456  	context = kzalloc(sizeof(*context), GFP_KERNEL);
457  	if (!context)
458  		return -ENOMEM;
459  
460  	mutex_lock(&fpriv->lock);
461  
462  	list_for_each_entry(client, &tegra->clients, list)
463  		if (client->base.class == args->client) {
464  			err = tegra_client_open(fpriv, client, context);
465  			if (err < 0)
466  				break;
467  
468  			args->context = context->id;
469  			break;
470  		}
471  
472  	if (err < 0)
473  		kfree(context);
474  
475  	mutex_unlock(&fpriv->lock);
476  	return err;
477  }
478  
tegra_close_channel(struct drm_device * drm,void * data,struct drm_file * file)479  static int tegra_close_channel(struct drm_device *drm, void *data,
480  			       struct drm_file *file)
481  {
482  	struct tegra_drm_file *fpriv = file->driver_priv;
483  	struct drm_tegra_close_channel *args = data;
484  	struct tegra_drm_context *context;
485  	int err = 0;
486  
487  	mutex_lock(&fpriv->lock);
488  
489  	context = idr_find(&fpriv->legacy_contexts, args->context);
490  	if (!context) {
491  		err = -EINVAL;
492  		goto unlock;
493  	}
494  
495  	idr_remove(&fpriv->legacy_contexts, context->id);
496  	tegra_drm_context_free(context);
497  
498  unlock:
499  	mutex_unlock(&fpriv->lock);
500  	return err;
501  }
502  
tegra_get_syncpt(struct drm_device * drm,void * data,struct drm_file * file)503  static int tegra_get_syncpt(struct drm_device *drm, void *data,
504  			    struct drm_file *file)
505  {
506  	struct tegra_drm_file *fpriv = file->driver_priv;
507  	struct drm_tegra_get_syncpt *args = data;
508  	struct tegra_drm_context *context;
509  	struct host1x_syncpt *syncpt;
510  	int err = 0;
511  
512  	mutex_lock(&fpriv->lock);
513  
514  	context = idr_find(&fpriv->legacy_contexts, args->context);
515  	if (!context) {
516  		err = -ENODEV;
517  		goto unlock;
518  	}
519  
520  	if (args->index >= context->client->base.num_syncpts) {
521  		err = -EINVAL;
522  		goto unlock;
523  	}
524  
525  	syncpt = context->client->base.syncpts[args->index];
526  	args->id = host1x_syncpt_id(syncpt);
527  
528  unlock:
529  	mutex_unlock(&fpriv->lock);
530  	return err;
531  }
532  
tegra_submit(struct drm_device * drm,void * data,struct drm_file * file)533  static int tegra_submit(struct drm_device *drm, void *data,
534  			struct drm_file *file)
535  {
536  	struct tegra_drm_file *fpriv = file->driver_priv;
537  	struct drm_tegra_submit *args = data;
538  	struct tegra_drm_context *context;
539  	int err;
540  
541  	mutex_lock(&fpriv->lock);
542  
543  	context = idr_find(&fpriv->legacy_contexts, args->context);
544  	if (!context) {
545  		err = -ENODEV;
546  		goto unlock;
547  	}
548  
549  	err = context->client->ops->submit(context, args, drm, file);
550  
551  unlock:
552  	mutex_unlock(&fpriv->lock);
553  	return err;
554  }
555  
tegra_get_syncpt_base(struct drm_device * drm,void * data,struct drm_file * file)556  static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
557  				 struct drm_file *file)
558  {
559  	struct tegra_drm_file *fpriv = file->driver_priv;
560  	struct drm_tegra_get_syncpt_base *args = data;
561  	struct tegra_drm_context *context;
562  	struct host1x_syncpt_base *base;
563  	struct host1x_syncpt *syncpt;
564  	int err = 0;
565  
566  	mutex_lock(&fpriv->lock);
567  
568  	context = idr_find(&fpriv->legacy_contexts, args->context);
569  	if (!context) {
570  		err = -ENODEV;
571  		goto unlock;
572  	}
573  
574  	if (args->syncpt >= context->client->base.num_syncpts) {
575  		err = -EINVAL;
576  		goto unlock;
577  	}
578  
579  	syncpt = context->client->base.syncpts[args->syncpt];
580  
581  	base = host1x_syncpt_get_base(syncpt);
582  	if (!base) {
583  		err = -ENXIO;
584  		goto unlock;
585  	}
586  
587  	args->id = host1x_syncpt_base_id(base);
588  
589  unlock:
590  	mutex_unlock(&fpriv->lock);
591  	return err;
592  }
593  
tegra_gem_set_tiling(struct drm_device * drm,void * data,struct drm_file * file)594  static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
595  				struct drm_file *file)
596  {
597  	struct drm_tegra_gem_set_tiling *args = data;
598  	enum tegra_bo_tiling_mode mode;
599  	struct drm_gem_object *gem;
600  	unsigned long value = 0;
601  	struct tegra_bo *bo;
602  
603  	switch (args->mode) {
604  	case DRM_TEGRA_GEM_TILING_MODE_PITCH:
605  		mode = TEGRA_BO_TILING_MODE_PITCH;
606  
607  		if (args->value != 0)
608  			return -EINVAL;
609  
610  		break;
611  
612  	case DRM_TEGRA_GEM_TILING_MODE_TILED:
613  		mode = TEGRA_BO_TILING_MODE_TILED;
614  
615  		if (args->value != 0)
616  			return -EINVAL;
617  
618  		break;
619  
620  	case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
621  		mode = TEGRA_BO_TILING_MODE_BLOCK;
622  
623  		if (args->value > 5)
624  			return -EINVAL;
625  
626  		value = args->value;
627  		break;
628  
629  	default:
630  		return -EINVAL;
631  	}
632  
633  	gem = drm_gem_object_lookup(file, args->handle);
634  	if (!gem)
635  		return -ENOENT;
636  
637  	bo = to_tegra_bo(gem);
638  
639  	bo->tiling.mode = mode;
640  	bo->tiling.value = value;
641  
642  	drm_gem_object_put(gem);
643  
644  	return 0;
645  }
646  
tegra_gem_get_tiling(struct drm_device * drm,void * data,struct drm_file * file)647  static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
648  				struct drm_file *file)
649  {
650  	struct drm_tegra_gem_get_tiling *args = data;
651  	struct drm_gem_object *gem;
652  	struct tegra_bo *bo;
653  	int err = 0;
654  
655  	gem = drm_gem_object_lookup(file, args->handle);
656  	if (!gem)
657  		return -ENOENT;
658  
659  	bo = to_tegra_bo(gem);
660  
661  	switch (bo->tiling.mode) {
662  	case TEGRA_BO_TILING_MODE_PITCH:
663  		args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
664  		args->value = 0;
665  		break;
666  
667  	case TEGRA_BO_TILING_MODE_TILED:
668  		args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
669  		args->value = 0;
670  		break;
671  
672  	case TEGRA_BO_TILING_MODE_BLOCK:
673  		args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
674  		args->value = bo->tiling.value;
675  		break;
676  
677  	default:
678  		err = -EINVAL;
679  		break;
680  	}
681  
682  	drm_gem_object_put(gem);
683  
684  	return err;
685  }
686  
tegra_gem_set_flags(struct drm_device * drm,void * data,struct drm_file * file)687  static int tegra_gem_set_flags(struct drm_device *drm, void *data,
688  			       struct drm_file *file)
689  {
690  	struct drm_tegra_gem_set_flags *args = data;
691  	struct drm_gem_object *gem;
692  	struct tegra_bo *bo;
693  
694  	if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
695  		return -EINVAL;
696  
697  	gem = drm_gem_object_lookup(file, args->handle);
698  	if (!gem)
699  		return -ENOENT;
700  
701  	bo = to_tegra_bo(gem);
702  	bo->flags = 0;
703  
704  	if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
705  		bo->flags |= TEGRA_BO_BOTTOM_UP;
706  
707  	drm_gem_object_put(gem);
708  
709  	return 0;
710  }
711  
tegra_gem_get_flags(struct drm_device * drm,void * data,struct drm_file * file)712  static int tegra_gem_get_flags(struct drm_device *drm, void *data,
713  			       struct drm_file *file)
714  {
715  	struct drm_tegra_gem_get_flags *args = data;
716  	struct drm_gem_object *gem;
717  	struct tegra_bo *bo;
718  
719  	gem = drm_gem_object_lookup(file, args->handle);
720  	if (!gem)
721  		return -ENOENT;
722  
723  	bo = to_tegra_bo(gem);
724  	args->flags = 0;
725  
726  	if (bo->flags & TEGRA_BO_BOTTOM_UP)
727  		args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
728  
729  	drm_gem_object_put(gem);
730  
731  	return 0;
732  }
733  #endif
734  
735  static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
736  #ifdef CONFIG_DRM_TEGRA_STAGING
737  	DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open,
738  			  DRM_RENDER_ALLOW),
739  	DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close,
740  			  DRM_RENDER_ALLOW),
741  	DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map,
742  			  DRM_RENDER_ALLOW),
743  	DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap,
744  			  DRM_RENDER_ALLOW),
745  	DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit,
746  			  DRM_RENDER_ALLOW),
747  	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate,
748  			  DRM_RENDER_ALLOW),
749  	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free,
750  			  DRM_RENDER_ALLOW),
751  	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_WAIT, tegra_drm_ioctl_syncpoint_wait,
752  			  DRM_RENDER_ALLOW),
753  
754  	DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW),
755  	DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW),
756  	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
757  			  DRM_RENDER_ALLOW),
758  	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
759  			  DRM_RENDER_ALLOW),
760  	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
761  			  DRM_RENDER_ALLOW),
762  	DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
763  			  DRM_RENDER_ALLOW),
764  	DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
765  			  DRM_RENDER_ALLOW),
766  	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
767  			  DRM_RENDER_ALLOW),
768  	DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
769  			  DRM_RENDER_ALLOW),
770  	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
771  			  DRM_RENDER_ALLOW),
772  	DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
773  			  DRM_RENDER_ALLOW),
774  	DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
775  			  DRM_RENDER_ALLOW),
776  	DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
777  			  DRM_RENDER_ALLOW),
778  	DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
779  			  DRM_RENDER_ALLOW),
780  #endif
781  };
782  
783  static const struct file_operations tegra_drm_fops = {
784  	.owner = THIS_MODULE,
785  	.open = drm_open,
786  	.release = drm_release,
787  	.unlocked_ioctl = drm_ioctl,
788  	.mmap = tegra_drm_mmap,
789  	.poll = drm_poll,
790  	.read = drm_read,
791  	.compat_ioctl = drm_compat_ioctl,
792  	.llseek = noop_llseek,
793  };
794  
tegra_drm_context_cleanup(int id,void * p,void * data)795  static int tegra_drm_context_cleanup(int id, void *p, void *data)
796  {
797  	struct tegra_drm_context *context = p;
798  
799  	tegra_drm_context_free(context);
800  
801  	return 0;
802  }
803  
tegra_drm_postclose(struct drm_device * drm,struct drm_file * file)804  static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
805  {
806  	struct tegra_drm_file *fpriv = file->driver_priv;
807  
808  	mutex_lock(&fpriv->lock);
809  	idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL);
810  	tegra_drm_uapi_close_file(fpriv);
811  	mutex_unlock(&fpriv->lock);
812  
813  	idr_destroy(&fpriv->legacy_contexts);
814  	mutex_destroy(&fpriv->lock);
815  	kfree(fpriv);
816  }
817  
818  #ifdef CONFIG_DEBUG_FS
tegra_debugfs_framebuffers(struct seq_file * s,void * data)819  static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
820  {
821  	struct drm_info_node *node = (struct drm_info_node *)s->private;
822  	struct drm_device *drm = node->minor->dev;
823  	struct drm_framebuffer *fb;
824  
825  	mutex_lock(&drm->mode_config.fb_lock);
826  
827  	list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
828  		seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
829  			   fb->base.id, fb->width, fb->height,
830  			   fb->format->depth,
831  			   fb->format->cpp[0] * 8,
832  			   drm_framebuffer_read_refcount(fb));
833  	}
834  
835  	mutex_unlock(&drm->mode_config.fb_lock);
836  
837  	return 0;
838  }
839  
tegra_debugfs_iova(struct seq_file * s,void * data)840  static int tegra_debugfs_iova(struct seq_file *s, void *data)
841  {
842  	struct drm_info_node *node = (struct drm_info_node *)s->private;
843  	struct drm_device *drm = node->minor->dev;
844  	struct tegra_drm *tegra = drm->dev_private;
845  	struct drm_printer p = drm_seq_file_printer(s);
846  
847  	if (tegra->domain) {
848  		mutex_lock(&tegra->mm_lock);
849  		drm_mm_print(&tegra->mm, &p);
850  		mutex_unlock(&tegra->mm_lock);
851  	}
852  
853  	return 0;
854  }
855  
856  static struct drm_info_list tegra_debugfs_list[] = {
857  	{ "framebuffers", tegra_debugfs_framebuffers, 0 },
858  	{ "iova", tegra_debugfs_iova, 0 },
859  };
860  
tegra_debugfs_init(struct drm_minor * minor)861  static void tegra_debugfs_init(struct drm_minor *minor)
862  {
863  	drm_debugfs_create_files(tegra_debugfs_list,
864  				 ARRAY_SIZE(tegra_debugfs_list),
865  				 minor->debugfs_root, minor);
866  }
867  #endif
868  
869  static const struct drm_driver tegra_drm_driver = {
870  	.driver_features = DRIVER_MODESET | DRIVER_GEM |
871  			   DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ,
872  	.open = tegra_drm_open,
873  	.postclose = tegra_drm_postclose,
874  	.lastclose = drm_fb_helper_lastclose,
875  
876  #if defined(CONFIG_DEBUG_FS)
877  	.debugfs_init = tegra_debugfs_init,
878  #endif
879  
880  	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
881  	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
882  	.gem_prime_import = tegra_gem_prime_import,
883  
884  	.dumb_create = tegra_bo_dumb_create,
885  
886  	.ioctls = tegra_drm_ioctls,
887  	.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
888  	.fops = &tegra_drm_fops,
889  
890  	.name = DRIVER_NAME,
891  	.desc = DRIVER_DESC,
892  	.date = DRIVER_DATE,
893  	.major = DRIVER_MAJOR,
894  	.minor = DRIVER_MINOR,
895  	.patchlevel = DRIVER_PATCHLEVEL,
896  };
897  
tegra_drm_register_client(struct tegra_drm * tegra,struct tegra_drm_client * client)898  int tegra_drm_register_client(struct tegra_drm *tegra,
899  			      struct tegra_drm_client *client)
900  {
901  	/*
902  	 * When MLOCKs are implemented, change to allocate a shared channel
903  	 * only when MLOCKs are disabled.
904  	 */
905  	client->shared_channel = host1x_channel_request(&client->base);
906  	if (!client->shared_channel)
907  		return -EBUSY;
908  
909  	mutex_lock(&tegra->clients_lock);
910  	list_add_tail(&client->list, &tegra->clients);
911  	client->drm = tegra;
912  	mutex_unlock(&tegra->clients_lock);
913  
914  	return 0;
915  }
916  
tegra_drm_unregister_client(struct tegra_drm * tegra,struct tegra_drm_client * client)917  int tegra_drm_unregister_client(struct tegra_drm *tegra,
918  				struct tegra_drm_client *client)
919  {
920  	mutex_lock(&tegra->clients_lock);
921  	list_del_init(&client->list);
922  	client->drm = NULL;
923  	mutex_unlock(&tegra->clients_lock);
924  
925  	if (client->shared_channel)
926  		host1x_channel_put(client->shared_channel);
927  
928  	return 0;
929  }
930  
host1x_client_iommu_attach(struct host1x_client * client)931  int host1x_client_iommu_attach(struct host1x_client *client)
932  {
933  	struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
934  	struct drm_device *drm = dev_get_drvdata(client->host);
935  	struct tegra_drm *tegra = drm->dev_private;
936  	struct iommu_group *group = NULL;
937  	int err;
938  
939  	/*
940  	 * If the host1x client is already attached to an IOMMU domain that is
941  	 * not the shared IOMMU domain, don't try to attach it to a different
942  	 * domain. This allows using the IOMMU-backed DMA API.
943  	 */
944  	if (domain && domain != tegra->domain)
945  		return 0;
946  
947  	if (tegra->domain) {
948  		group = iommu_group_get(client->dev);
949  		if (!group)
950  			return -ENODEV;
951  
952  		if (domain != tegra->domain) {
953  			err = iommu_attach_group(tegra->domain, group);
954  			if (err < 0) {
955  				iommu_group_put(group);
956  				return err;
957  			}
958  		}
959  
960  		tegra->use_explicit_iommu = true;
961  	}
962  
963  	client->group = group;
964  
965  	return 0;
966  }
967  
host1x_client_iommu_detach(struct host1x_client * client)968  void host1x_client_iommu_detach(struct host1x_client *client)
969  {
970  	struct drm_device *drm = dev_get_drvdata(client->host);
971  	struct tegra_drm *tegra = drm->dev_private;
972  	struct iommu_domain *domain;
973  
974  	if (client->group) {
975  		/*
976  		 * Devices that are part of the same group may no longer be
977  		 * attached to a domain at this point because their group may
978  		 * have been detached by an earlier client.
979  		 */
980  		domain = iommu_get_domain_for_dev(client->dev);
981  		if (domain)
982  			iommu_detach_group(tegra->domain, client->group);
983  
984  		iommu_group_put(client->group);
985  		client->group = NULL;
986  	}
987  }
988  
tegra_drm_alloc(struct tegra_drm * tegra,size_t size,dma_addr_t * dma)989  void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
990  {
991  	struct iova *alloc;
992  	void *virt;
993  	gfp_t gfp;
994  	int err;
995  
996  	if (tegra->domain)
997  		size = iova_align(&tegra->carveout.domain, size);
998  	else
999  		size = PAGE_ALIGN(size);
1000  
1001  	gfp = GFP_KERNEL | __GFP_ZERO;
1002  	if (!tegra->domain) {
1003  		/*
1004  		 * Many units only support 32-bit addresses, even on 64-bit
1005  		 * SoCs. If there is no IOMMU to translate into a 32-bit IO
1006  		 * virtual address space, force allocations to be in the
1007  		 * lower 32-bit range.
1008  		 */
1009  		gfp |= GFP_DMA;
1010  	}
1011  
1012  	virt = (void *)__get_free_pages(gfp, get_order(size));
1013  	if (!virt)
1014  		return ERR_PTR(-ENOMEM);
1015  
1016  	if (!tegra->domain) {
1017  		/*
1018  		 * If IOMMU is disabled, devices address physical memory
1019  		 * directly.
1020  		 */
1021  		*dma = virt_to_phys(virt);
1022  		return virt;
1023  	}
1024  
1025  	alloc = alloc_iova(&tegra->carveout.domain,
1026  			   size >> tegra->carveout.shift,
1027  			   tegra->carveout.limit, true);
1028  	if (!alloc) {
1029  		err = -EBUSY;
1030  		goto free_pages;
1031  	}
1032  
1033  	*dma = iova_dma_addr(&tegra->carveout.domain, alloc);
1034  	err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
1035  			size, IOMMU_READ | IOMMU_WRITE);
1036  	if (err < 0)
1037  		goto free_iova;
1038  
1039  	return virt;
1040  
1041  free_iova:
1042  	__free_iova(&tegra->carveout.domain, alloc);
1043  free_pages:
1044  	free_pages((unsigned long)virt, get_order(size));
1045  
1046  	return ERR_PTR(err);
1047  }
1048  
tegra_drm_free(struct tegra_drm * tegra,size_t size,void * virt,dma_addr_t dma)1049  void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
1050  		    dma_addr_t dma)
1051  {
1052  	if (tegra->domain)
1053  		size = iova_align(&tegra->carveout.domain, size);
1054  	else
1055  		size = PAGE_ALIGN(size);
1056  
1057  	if (tegra->domain) {
1058  		iommu_unmap(tegra->domain, dma, size);
1059  		free_iova(&tegra->carveout.domain,
1060  			  iova_pfn(&tegra->carveout.domain, dma));
1061  	}
1062  
1063  	free_pages((unsigned long)virt, get_order(size));
1064  }
1065  
host1x_drm_wants_iommu(struct host1x_device * dev)1066  static bool host1x_drm_wants_iommu(struct host1x_device *dev)
1067  {
1068  	struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
1069  	struct iommu_domain *domain;
1070  
1071  	/*
1072  	 * If the Tegra DRM clients are backed by an IOMMU, push buffers are
1073  	 * likely to be allocated beyond the 32-bit boundary if sufficient
1074  	 * system memory is available. This is problematic on earlier Tegra
1075  	 * generations where host1x supports a maximum of 32 address bits in
1076  	 * the GATHER opcode. In this case, unless host1x is behind an IOMMU
1077  	 * as well it won't be able to process buffers allocated beyond the
1078  	 * 32-bit boundary.
1079  	 *
1080  	 * The DMA API will use bounce buffers in this case, so that could
1081  	 * perhaps still be made to work, even if less efficient, but there
1082  	 * is another catch: in order to perform cache maintenance on pages
1083  	 * allocated for discontiguous buffers we need to map and unmap the
1084  	 * SG table representing these buffers. This is fine for something
1085  	 * small like a push buffer, but it exhausts the bounce buffer pool
1086  	 * (typically on the order of a few MiB) for framebuffers (many MiB
1087  	 * for any modern resolution).
1088  	 *
1089  	 * Work around this by making sure that Tegra DRM clients only use
1090  	 * an IOMMU if the parent host1x also uses an IOMMU.
1091  	 *
1092  	 * Note that there's still a small gap here that we don't cover: if
1093  	 * the DMA API is backed by an IOMMU there's no way to control which
1094  	 * device is attached to an IOMMU and which isn't, except via wiring
1095  	 * up the device tree appropriately. This is considered an problem
1096  	 * of integration, so care must be taken for the DT to be consistent.
1097  	 */
1098  	domain = iommu_get_domain_for_dev(dev->dev.parent);
1099  
1100  	/*
1101  	 * Tegra20 and Tegra30 don't support addressing memory beyond the
1102  	 * 32-bit boundary, so the regular GATHER opcodes will always be
1103  	 * sufficient and whether or not the host1x is attached to an IOMMU
1104  	 * doesn't matter.
1105  	 */
1106  	if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
1107  		return true;
1108  
1109  	return domain != NULL;
1110  }
1111  
host1x_drm_probe(struct host1x_device * dev)1112  static int host1x_drm_probe(struct host1x_device *dev)
1113  {
1114  	struct tegra_drm *tegra;
1115  	struct drm_device *drm;
1116  	int err;
1117  
1118  	drm = drm_dev_alloc(&tegra_drm_driver, &dev->dev);
1119  	if (IS_ERR(drm))
1120  		return PTR_ERR(drm);
1121  
1122  	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
1123  	if (!tegra) {
1124  		err = -ENOMEM;
1125  		goto put;
1126  	}
1127  
1128  	if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
1129  		tegra->domain = iommu_domain_alloc(&platform_bus_type);
1130  		if (!tegra->domain) {
1131  			err = -ENOMEM;
1132  			goto free;
1133  		}
1134  
1135  		err = iova_cache_get();
1136  		if (err < 0)
1137  			goto domain;
1138  	}
1139  
1140  	mutex_init(&tegra->clients_lock);
1141  	INIT_LIST_HEAD(&tegra->clients);
1142  
1143  	dev_set_drvdata(&dev->dev, drm);
1144  	drm->dev_private = tegra;
1145  	tegra->drm = drm;
1146  
1147  	drm_mode_config_init(drm);
1148  
1149  	drm->mode_config.min_width = 0;
1150  	drm->mode_config.min_height = 0;
1151  	drm->mode_config.max_width = 0;
1152  	drm->mode_config.max_height = 0;
1153  
1154  	drm->mode_config.normalize_zpos = true;
1155  
1156  	drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
1157  	drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
1158  
1159  	err = tegra_drm_fb_prepare(drm);
1160  	if (err < 0)
1161  		goto config;
1162  
1163  	drm_kms_helper_poll_init(drm);
1164  
1165  	err = host1x_device_init(dev);
1166  	if (err < 0)
1167  		goto fbdev;
1168  
1169  	/*
1170  	 * Now that all display controller have been initialized, the maximum
1171  	 * supported resolution is known and the bitmask for horizontal and
1172  	 * vertical bitfields can be computed.
1173  	 */
1174  	tegra->hmask = drm->mode_config.max_width - 1;
1175  	tegra->vmask = drm->mode_config.max_height - 1;
1176  
1177  	if (tegra->use_explicit_iommu) {
1178  		u64 carveout_start, carveout_end, gem_start, gem_end;
1179  		u64 dma_mask = dma_get_mask(&dev->dev);
1180  		dma_addr_t start, end;
1181  		unsigned long order;
1182  
1183  		start = tegra->domain->geometry.aperture_start & dma_mask;
1184  		end = tegra->domain->geometry.aperture_end & dma_mask;
1185  
1186  		gem_start = start;
1187  		gem_end = end - CARVEOUT_SZ;
1188  		carveout_start = gem_end + 1;
1189  		carveout_end = end;
1190  
1191  		order = __ffs(tegra->domain->pgsize_bitmap);
1192  		init_iova_domain(&tegra->carveout.domain, 1UL << order,
1193  				 carveout_start >> order);
1194  
1195  		tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
1196  		tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
1197  
1198  		drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
1199  		mutex_init(&tegra->mm_lock);
1200  
1201  		DRM_DEBUG_DRIVER("IOMMU apertures:\n");
1202  		DRM_DEBUG_DRIVER("  GEM: %#llx-%#llx\n", gem_start, gem_end);
1203  		DRM_DEBUG_DRIVER("  Carveout: %#llx-%#llx\n", carveout_start,
1204  				 carveout_end);
1205  	} else if (tegra->domain) {
1206  		iommu_domain_free(tegra->domain);
1207  		tegra->domain = NULL;
1208  		iova_cache_put();
1209  	}
1210  
1211  	if (tegra->hub) {
1212  		err = tegra_display_hub_prepare(tegra->hub);
1213  		if (err < 0)
1214  			goto device;
1215  	}
1216  
1217  	/* syncpoints are used for full 32-bit hardware VBLANK counters */
1218  	drm->max_vblank_count = 0xffffffff;
1219  
1220  	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
1221  	if (err < 0)
1222  		goto hub;
1223  
1224  	drm_mode_config_reset(drm);
1225  
1226  	err = drm_aperture_remove_framebuffers(false, &tegra_drm_driver);
1227  	if (err < 0)
1228  		goto hub;
1229  
1230  	err = tegra_drm_fb_init(drm);
1231  	if (err < 0)
1232  		goto hub;
1233  
1234  	err = drm_dev_register(drm, 0);
1235  	if (err < 0)
1236  		goto fb;
1237  
1238  	return 0;
1239  
1240  fb:
1241  	tegra_drm_fb_exit(drm);
1242  hub:
1243  	if (tegra->hub)
1244  		tegra_display_hub_cleanup(tegra->hub);
1245  device:
1246  	if (tegra->domain) {
1247  		mutex_destroy(&tegra->mm_lock);
1248  		drm_mm_takedown(&tegra->mm);
1249  		put_iova_domain(&tegra->carveout.domain);
1250  		iova_cache_put();
1251  	}
1252  
1253  	host1x_device_exit(dev);
1254  fbdev:
1255  	drm_kms_helper_poll_fini(drm);
1256  	tegra_drm_fb_free(drm);
1257  config:
1258  	drm_mode_config_cleanup(drm);
1259  domain:
1260  	if (tegra->domain)
1261  		iommu_domain_free(tegra->domain);
1262  free:
1263  	kfree(tegra);
1264  put:
1265  	drm_dev_put(drm);
1266  	return err;
1267  }
1268  
host1x_drm_remove(struct host1x_device * dev)1269  static int host1x_drm_remove(struct host1x_device *dev)
1270  {
1271  	struct drm_device *drm = dev_get_drvdata(&dev->dev);
1272  	struct tegra_drm *tegra = drm->dev_private;
1273  	int err;
1274  
1275  	drm_dev_unregister(drm);
1276  
1277  	drm_kms_helper_poll_fini(drm);
1278  	tegra_drm_fb_exit(drm);
1279  	drm_atomic_helper_shutdown(drm);
1280  	drm_mode_config_cleanup(drm);
1281  
1282  	if (tegra->hub)
1283  		tegra_display_hub_cleanup(tegra->hub);
1284  
1285  	err = host1x_device_exit(dev);
1286  	if (err < 0)
1287  		dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
1288  
1289  	if (tegra->domain) {
1290  		mutex_destroy(&tegra->mm_lock);
1291  		drm_mm_takedown(&tegra->mm);
1292  		put_iova_domain(&tegra->carveout.domain);
1293  		iova_cache_put();
1294  		iommu_domain_free(tegra->domain);
1295  	}
1296  
1297  	kfree(tegra);
1298  	drm_dev_put(drm);
1299  
1300  	return 0;
1301  }
1302  
1303  #ifdef CONFIG_PM_SLEEP
host1x_drm_suspend(struct device * dev)1304  static int host1x_drm_suspend(struct device *dev)
1305  {
1306  	struct drm_device *drm = dev_get_drvdata(dev);
1307  
1308  	return drm_mode_config_helper_suspend(drm);
1309  }
1310  
host1x_drm_resume(struct device * dev)1311  static int host1x_drm_resume(struct device *dev)
1312  {
1313  	struct drm_device *drm = dev_get_drvdata(dev);
1314  
1315  	return drm_mode_config_helper_resume(drm);
1316  }
1317  #endif
1318  
1319  static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
1320  			 host1x_drm_resume);
1321  
1322  static const struct of_device_id host1x_drm_subdevs[] = {
1323  	{ .compatible = "nvidia,tegra20-dc", },
1324  	{ .compatible = "nvidia,tegra20-hdmi", },
1325  	{ .compatible = "nvidia,tegra20-gr2d", },
1326  	{ .compatible = "nvidia,tegra20-gr3d", },
1327  	{ .compatible = "nvidia,tegra30-dc", },
1328  	{ .compatible = "nvidia,tegra30-hdmi", },
1329  	{ .compatible = "nvidia,tegra30-gr2d", },
1330  	{ .compatible = "nvidia,tegra30-gr3d", },
1331  	{ .compatible = "nvidia,tegra114-dc", },
1332  	{ .compatible = "nvidia,tegra114-dsi", },
1333  	{ .compatible = "nvidia,tegra114-hdmi", },
1334  	{ .compatible = "nvidia,tegra114-gr2d", },
1335  	{ .compatible = "nvidia,tegra114-gr3d", },
1336  	{ .compatible = "nvidia,tegra124-dc", },
1337  	{ .compatible = "nvidia,tegra124-sor", },
1338  	{ .compatible = "nvidia,tegra124-hdmi", },
1339  	{ .compatible = "nvidia,tegra124-dsi", },
1340  	{ .compatible = "nvidia,tegra124-vic", },
1341  	{ .compatible = "nvidia,tegra132-dsi", },
1342  	{ .compatible = "nvidia,tegra210-dc", },
1343  	{ .compatible = "nvidia,tegra210-dsi", },
1344  	{ .compatible = "nvidia,tegra210-sor", },
1345  	{ .compatible = "nvidia,tegra210-sor1", },
1346  	{ .compatible = "nvidia,tegra210-vic", },
1347  	{ .compatible = "nvidia,tegra186-display", },
1348  	{ .compatible = "nvidia,tegra186-dc", },
1349  	{ .compatible = "nvidia,tegra186-sor", },
1350  	{ .compatible = "nvidia,tegra186-sor1", },
1351  	{ .compatible = "nvidia,tegra186-vic", },
1352  	{ .compatible = "nvidia,tegra194-display", },
1353  	{ .compatible = "nvidia,tegra194-dc", },
1354  	{ .compatible = "nvidia,tegra194-sor", },
1355  	{ .compatible = "nvidia,tegra194-vic", },
1356  	{ /* sentinel */ }
1357  };
1358  
1359  static struct host1x_driver host1x_drm_driver = {
1360  	.driver = {
1361  		.name = "drm",
1362  		.pm = &host1x_drm_pm_ops,
1363  	},
1364  	.probe = host1x_drm_probe,
1365  	.remove = host1x_drm_remove,
1366  	.subdevs = host1x_drm_subdevs,
1367  };
1368  
1369  static struct platform_driver * const drivers[] = {
1370  	&tegra_display_hub_driver,
1371  	&tegra_dc_driver,
1372  	&tegra_hdmi_driver,
1373  	&tegra_dsi_driver,
1374  	&tegra_dpaux_driver,
1375  	&tegra_sor_driver,
1376  	&tegra_gr2d_driver,
1377  	&tegra_gr3d_driver,
1378  	&tegra_vic_driver,
1379  };
1380  
host1x_drm_init(void)1381  static int __init host1x_drm_init(void)
1382  {
1383  	int err;
1384  
1385  	err = host1x_driver_register(&host1x_drm_driver);
1386  	if (err < 0)
1387  		return err;
1388  
1389  	err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1390  	if (err < 0)
1391  		goto unregister_host1x;
1392  
1393  	return 0;
1394  
1395  unregister_host1x:
1396  	host1x_driver_unregister(&host1x_drm_driver);
1397  	return err;
1398  }
1399  module_init(host1x_drm_init);
1400  
host1x_drm_exit(void)1401  static void __exit host1x_drm_exit(void)
1402  {
1403  	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1404  	host1x_driver_unregister(&host1x_drm_driver);
1405  }
1406  module_exit(host1x_drm_exit);
1407  
1408  MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1409  MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
1410  MODULE_LICENSE("GPL v2");
1411