1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
4 * Author: Rob Clark <rob@ti.com>
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/platform_device.h>
9 #include <linux/sort.h>
10 #include <linux/sys_soc.h>
11
12 #include <drm/drm_atomic.h>
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_bridge.h>
15 #include <drm/drm_bridge_connector.h>
16 #include <drm/drm_drv.h>
17 #include <drm/drm_fb_helper.h>
18 #include <drm/drm_file.h>
19 #include <drm/drm_ioctl.h>
20 #include <drm/drm_panel.h>
21 #include <drm/drm_prime.h>
22 #include <drm/drm_probe_helper.h>
23 #include <drm/drm_vblank.h>
24
25 #include "omap_dmm_tiler.h"
26 #include "omap_drv.h"
27
28 #define DRIVER_NAME MODULE_NAME
29 #define DRIVER_DESC "OMAP DRM"
30 #define DRIVER_DATE "20110917"
31 #define DRIVER_MAJOR 1
32 #define DRIVER_MINOR 0
33 #define DRIVER_PATCHLEVEL 0
34
35 /*
36 * mode config funcs
37 */
38
39 /* Notes about mapping DSS and DRM entities:
40 * CRTC: overlay
41 * encoder: manager.. with some extension to allow one primary CRTC
42 * and zero or more video CRTC's to be mapped to one encoder?
43 * connector: dssdev.. manager can be attached/detached from different
44 * devices
45 */
46
omap_atomic_wait_for_completion(struct drm_device * dev,struct drm_atomic_state * old_state)47 static void omap_atomic_wait_for_completion(struct drm_device *dev,
48 struct drm_atomic_state *old_state)
49 {
50 struct drm_crtc_state *new_crtc_state;
51 struct drm_crtc *crtc;
52 unsigned int i;
53 int ret;
54
55 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
56 if (!new_crtc_state->active)
57 continue;
58
59 ret = omap_crtc_wait_pending(crtc);
60
61 if (!ret)
62 dev_warn(dev->dev,
63 "atomic complete timeout (pipe %u)!\n", i);
64 }
65 }
66
omap_atomic_commit_tail(struct drm_atomic_state * old_state)67 static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
68 {
69 struct drm_device *dev = old_state->dev;
70 struct omap_drm_private *priv = dev->dev_private;
71 bool fence_cookie = dma_fence_begin_signalling();
72
73 dispc_runtime_get(priv->dispc);
74
75 /* Apply the atomic update. */
76 drm_atomic_helper_commit_modeset_disables(dev, old_state);
77
78 if (priv->omaprev != 0x3430) {
79 /* With the current dss dispc implementation we have to enable
80 * the new modeset before we can commit planes. The dispc ovl
81 * configuration relies on the video mode configuration been
82 * written into the HW when the ovl configuration is
83 * calculated.
84 *
85 * This approach is not ideal because after a mode change the
86 * plane update is executed only after the first vblank
87 * interrupt. The dispc implementation should be fixed so that
88 * it is able use uncommitted drm state information.
89 */
90 drm_atomic_helper_commit_modeset_enables(dev, old_state);
91 omap_atomic_wait_for_completion(dev, old_state);
92
93 drm_atomic_helper_commit_planes(dev, old_state, 0);
94 } else {
95 /*
96 * OMAP3 DSS seems to have issues with the work-around above,
97 * resulting in endless sync losts if a crtc is enabled without
98 * a plane. For now, skip the WA for OMAP3.
99 */
100 drm_atomic_helper_commit_planes(dev, old_state, 0);
101
102 drm_atomic_helper_commit_modeset_enables(dev, old_state);
103 }
104
105 drm_atomic_helper_commit_hw_done(old_state);
106
107 dma_fence_end_signalling(fence_cookie);
108
109 /*
110 * Wait for completion of the page flips to ensure that old buffers
111 * can't be touched by the hardware anymore before cleaning up planes.
112 */
113 omap_atomic_wait_for_completion(dev, old_state);
114
115 drm_atomic_helper_cleanup_planes(dev, old_state);
116
117 dispc_runtime_put(priv->dispc);
118 }
119
drm_atomic_state_normalized_zpos_cmp(const void * a,const void * b)120 static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b)
121 {
122 const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
123 const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
124
125 if (sa->normalized_zpos != sb->normalized_zpos)
126 return sa->normalized_zpos - sb->normalized_zpos;
127 else
128 return sa->plane->base.id - sb->plane->base.id;
129 }
130
131 /*
132 * This replaces the drm_atomic_normalize_zpos to handle the dual overlay case.
133 *
134 * Since both halves need to be 'appear' side by side the zpos is
135 * recalculated when dealing with dual overlay cases so that the other
136 * planes zpos is consistent.
137 */
omap_atomic_update_normalize_zpos(struct drm_device * dev,struct drm_atomic_state * state)138 static int omap_atomic_update_normalize_zpos(struct drm_device *dev,
139 struct drm_atomic_state *state)
140 {
141 struct drm_crtc *crtc;
142 struct drm_crtc_state *old_state, *new_state;
143 struct drm_plane *plane;
144 int c, i, n, inc;
145 int total_planes = dev->mode_config.num_total_plane;
146 struct drm_plane_state **states;
147 int ret = 0;
148
149 states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
150 if (!states)
151 return -ENOMEM;
152
153 for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) {
154 if (old_state->plane_mask == new_state->plane_mask &&
155 !new_state->zpos_changed)
156 continue;
157
158 /* Reset plane increment and index value for every crtc */
159 n = 0;
160
161 /*
162 * Normalization process might create new states for planes
163 * which normalized_zpos has to be recalculated.
164 */
165 drm_for_each_plane_mask(plane, dev, new_state->plane_mask) {
166 struct drm_plane_state *plane_state =
167 drm_atomic_get_plane_state(new_state->state,
168 plane);
169 if (IS_ERR(plane_state)) {
170 ret = PTR_ERR(plane_state);
171 goto done;
172 }
173 states[n++] = plane_state;
174 }
175
176 sort(states, n, sizeof(*states),
177 drm_atomic_state_normalized_zpos_cmp, NULL);
178
179 for (i = 0, inc = 0; i < n; i++) {
180 plane = states[i]->plane;
181
182 states[i]->normalized_zpos = i + inc;
183 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n",
184 plane->base.id, plane->name,
185 states[i]->normalized_zpos);
186
187 if (is_omap_plane_dual_overlay(states[i]))
188 inc++;
189 }
190 new_state->zpos_changed = true;
191 }
192
193 done:
194 kfree(states);
195 return ret;
196 }
197
omap_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)198 static int omap_atomic_check(struct drm_device *dev,
199 struct drm_atomic_state *state)
200 {
201 int ret;
202
203 ret = drm_atomic_helper_check(dev, state);
204 if (ret)
205 return ret;
206
207 if (dev->mode_config.normalize_zpos) {
208 ret = omap_atomic_update_normalize_zpos(dev, state);
209 if (ret)
210 return ret;
211 }
212
213 return 0;
214 }
215
216 static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
217 .atomic_commit_tail = omap_atomic_commit_tail,
218 };
219
220 static const struct drm_mode_config_funcs omap_mode_config_funcs = {
221 .fb_create = omap_framebuffer_create,
222 .output_poll_changed = drm_fb_helper_output_poll_changed,
223 .atomic_check = omap_atomic_check,
224 .atomic_commit = drm_atomic_helper_commit,
225 };
226
227 /* Global/shared object state funcs */
228
229 /*
230 * This is a helper that returns the private state currently in operation.
231 * Note that this would return the "old_state" if called in the atomic check
232 * path, and the "new_state" after the atomic swap has been done.
233 */
234 struct omap_global_state *
omap_get_existing_global_state(struct omap_drm_private * priv)235 omap_get_existing_global_state(struct omap_drm_private *priv)
236 {
237 return to_omap_global_state(priv->glob_obj.state);
238 }
239
240 /*
241 * This acquires the modeset lock set aside for global state, creates
242 * a new duplicated private object state.
243 */
244 struct omap_global_state *__must_check
omap_get_global_state(struct drm_atomic_state * s)245 omap_get_global_state(struct drm_atomic_state *s)
246 {
247 struct omap_drm_private *priv = s->dev->dev_private;
248 struct drm_private_state *priv_state;
249
250 priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj);
251 if (IS_ERR(priv_state))
252 return ERR_CAST(priv_state);
253
254 return to_omap_global_state(priv_state);
255 }
256
257 static struct drm_private_state *
omap_global_duplicate_state(struct drm_private_obj * obj)258 omap_global_duplicate_state(struct drm_private_obj *obj)
259 {
260 struct omap_global_state *state;
261
262 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
263 if (!state)
264 return NULL;
265
266 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
267
268 return &state->base;
269 }
270
omap_global_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)271 static void omap_global_destroy_state(struct drm_private_obj *obj,
272 struct drm_private_state *state)
273 {
274 struct omap_global_state *omap_state = to_omap_global_state(state);
275
276 kfree(omap_state);
277 }
278
279 static const struct drm_private_state_funcs omap_global_state_funcs = {
280 .atomic_duplicate_state = omap_global_duplicate_state,
281 .atomic_destroy_state = omap_global_destroy_state,
282 };
283
omap_global_obj_init(struct drm_device * dev)284 static int omap_global_obj_init(struct drm_device *dev)
285 {
286 struct omap_drm_private *priv = dev->dev_private;
287 struct omap_global_state *state;
288
289 state = kzalloc(sizeof(*state), GFP_KERNEL);
290 if (!state)
291 return -ENOMEM;
292
293 drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base,
294 &omap_global_state_funcs);
295 return 0;
296 }
297
omap_global_obj_fini(struct omap_drm_private * priv)298 static void omap_global_obj_fini(struct omap_drm_private *priv)
299 {
300 drm_atomic_private_obj_fini(&priv->glob_obj);
301 }
302
omap_disconnect_pipelines(struct drm_device * ddev)303 static void omap_disconnect_pipelines(struct drm_device *ddev)
304 {
305 struct omap_drm_private *priv = ddev->dev_private;
306 unsigned int i;
307
308 for (i = 0; i < priv->num_pipes; i++) {
309 struct omap_drm_pipeline *pipe = &priv->pipes[i];
310
311 omapdss_device_disconnect(NULL, pipe->output);
312
313 omapdss_device_put(pipe->output);
314 pipe->output = NULL;
315 }
316
317 memset(&priv->channels, 0, sizeof(priv->channels));
318
319 priv->num_pipes = 0;
320 }
321
omap_connect_pipelines(struct drm_device * ddev)322 static int omap_connect_pipelines(struct drm_device *ddev)
323 {
324 struct omap_drm_private *priv = ddev->dev_private;
325 struct omap_dss_device *output = NULL;
326 int r;
327
328 for_each_dss_output(output) {
329 r = omapdss_device_connect(priv->dss, NULL, output);
330 if (r == -EPROBE_DEFER) {
331 omapdss_device_put(output);
332 return r;
333 } else if (r) {
334 dev_warn(output->dev, "could not connect output %s\n",
335 output->name);
336 } else {
337 struct omap_drm_pipeline *pipe;
338
339 pipe = &priv->pipes[priv->num_pipes++];
340 pipe->output = omapdss_device_get(output);
341
342 if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) {
343 /* To balance the 'for_each_dss_output' loop */
344 omapdss_device_put(output);
345 break;
346 }
347 }
348 }
349
350 return 0;
351 }
352
omap_compare_pipelines(const void * a,const void * b)353 static int omap_compare_pipelines(const void *a, const void *b)
354 {
355 const struct omap_drm_pipeline *pipe1 = a;
356 const struct omap_drm_pipeline *pipe2 = b;
357
358 if (pipe1->alias_id > pipe2->alias_id)
359 return 1;
360 else if (pipe1->alias_id < pipe2->alias_id)
361 return -1;
362 return 0;
363 }
364
omap_modeset_init_properties(struct drm_device * dev)365 static int omap_modeset_init_properties(struct drm_device *dev)
366 {
367 struct omap_drm_private *priv = dev->dev_private;
368 unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
369
370 priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
371 num_planes - 1);
372 if (!priv->zorder_prop)
373 return -ENOMEM;
374
375 return 0;
376 }
377
omap_display_id(struct omap_dss_device * output)378 static int omap_display_id(struct omap_dss_device *output)
379 {
380 struct device_node *node = NULL;
381
382 if (output->bridge) {
383 struct drm_bridge *bridge = output->bridge;
384
385 while (drm_bridge_get_next_bridge(bridge))
386 bridge = drm_bridge_get_next_bridge(bridge);
387
388 node = bridge->of_node;
389 }
390
391 return node ? of_alias_get_id(node, "display") : -ENODEV;
392 }
393
omap_modeset_init(struct drm_device * dev)394 static int omap_modeset_init(struct drm_device *dev)
395 {
396 struct omap_drm_private *priv = dev->dev_private;
397 int num_ovls = dispc_get_num_ovls(priv->dispc);
398 int num_mgrs = dispc_get_num_mgrs(priv->dispc);
399 unsigned int i;
400 int ret;
401 u32 plane_crtc_mask;
402
403 if (!omapdss_stack_is_ready())
404 return -EPROBE_DEFER;
405
406 ret = omap_modeset_init_properties(dev);
407 if (ret < 0)
408 return ret;
409
410 /*
411 * This function creates exactly one connector, encoder, crtc,
412 * and primary plane per each connected dss-device. Each
413 * connector->encoder->crtc chain is expected to be separate
414 * and each crtc is connect to a single dss-channel. If the
415 * configuration does not match the expectations or exceeds
416 * the available resources, the configuration is rejected.
417 */
418 ret = omap_connect_pipelines(dev);
419 if (ret < 0)
420 return ret;
421
422 if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) {
423 dev_err(dev->dev, "%s(): Too many connected displays\n",
424 __func__);
425 return -EINVAL;
426 }
427
428 /* Create all planes first. They can all be put to any CRTC. */
429 plane_crtc_mask = (1 << priv->num_pipes) - 1;
430
431 for (i = 0; i < num_ovls; i++) {
432 enum drm_plane_type type = i < priv->num_pipes
433 ? DRM_PLANE_TYPE_PRIMARY
434 : DRM_PLANE_TYPE_OVERLAY;
435 struct drm_plane *plane;
436
437 if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)))
438 return -EINVAL;
439
440 plane = omap_plane_init(dev, i, type, plane_crtc_mask);
441 if (IS_ERR(plane))
442 return PTR_ERR(plane);
443
444 priv->planes[priv->num_planes++] = plane;
445 }
446
447 /*
448 * Create the encoders, attach the bridges and get the pipeline alias
449 * IDs.
450 */
451 for (i = 0; i < priv->num_pipes; i++) {
452 struct omap_drm_pipeline *pipe = &priv->pipes[i];
453 int id;
454
455 pipe->encoder = omap_encoder_init(dev, pipe->output);
456 if (!pipe->encoder)
457 return -ENOMEM;
458
459 if (pipe->output->bridge) {
460 ret = drm_bridge_attach(pipe->encoder,
461 pipe->output->bridge, NULL,
462 DRM_BRIDGE_ATTACH_NO_CONNECTOR);
463 if (ret < 0)
464 return ret;
465 }
466
467 id = omap_display_id(pipe->output);
468 pipe->alias_id = id >= 0 ? id : i;
469 }
470
471 /* Sort the pipelines by DT aliases. */
472 sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
473 omap_compare_pipelines, NULL);
474
475 /*
476 * Populate the pipeline lookup table by DISPC channel. Only one display
477 * is allowed per channel.
478 */
479 for (i = 0; i < priv->num_pipes; ++i) {
480 struct omap_drm_pipeline *pipe = &priv->pipes[i];
481 enum omap_channel channel = pipe->output->dispc_channel;
482
483 if (WARN_ON(priv->channels[channel] != NULL))
484 return -EINVAL;
485
486 priv->channels[channel] = pipe;
487 }
488
489 /* Create the connectors and CRTCs. */
490 for (i = 0; i < priv->num_pipes; i++) {
491 struct omap_drm_pipeline *pipe = &priv->pipes[i];
492 struct drm_encoder *encoder = pipe->encoder;
493 struct drm_crtc *crtc;
494
495 pipe->connector = drm_bridge_connector_init(dev, encoder);
496 if (IS_ERR(pipe->connector)) {
497 dev_err(priv->dev,
498 "unable to create bridge connector for %s\n",
499 pipe->output->name);
500 return PTR_ERR(pipe->connector);
501 }
502
503 drm_connector_attach_encoder(pipe->connector, encoder);
504
505 crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
506 if (IS_ERR(crtc))
507 return PTR_ERR(crtc);
508
509 encoder->possible_crtcs = 1 << i;
510 pipe->crtc = crtc;
511 }
512
513 DBG("registered %u planes, %u crtcs/encoders/connectors\n",
514 priv->num_planes, priv->num_pipes);
515
516 dev->mode_config.min_width = 8;
517 dev->mode_config.min_height = 2;
518
519 /*
520 * Note: these values are used for multiple independent things:
521 * connector mode filtering, buffer sizes, crtc sizes...
522 * Use big enough values here to cover all use cases, and do more
523 * specific checking in the respective code paths.
524 */
525 dev->mode_config.max_width = 8192;
526 dev->mode_config.max_height = 8192;
527
528 /* We want the zpos to be normalized */
529 dev->mode_config.normalize_zpos = true;
530
531 dev->mode_config.funcs = &omap_mode_config_funcs;
532 dev->mode_config.helper_private = &omap_mode_config_helper_funcs;
533
534 drm_mode_config_reset(dev);
535
536 omap_drm_irq_install(dev);
537
538 return 0;
539 }
540
omap_modeset_fini(struct drm_device * ddev)541 static void omap_modeset_fini(struct drm_device *ddev)
542 {
543 omap_drm_irq_uninstall(ddev);
544
545 drm_mode_config_cleanup(ddev);
546 }
547
548 /*
549 * drm ioctl funcs
550 */
551
552
ioctl_get_param(struct drm_device * dev,void * data,struct drm_file * file_priv)553 static int ioctl_get_param(struct drm_device *dev, void *data,
554 struct drm_file *file_priv)
555 {
556 struct omap_drm_private *priv = dev->dev_private;
557 struct drm_omap_param *args = data;
558
559 DBG("%p: param=%llu", dev, args->param);
560
561 switch (args->param) {
562 case OMAP_PARAM_CHIPSET_ID:
563 args->value = priv->omaprev;
564 break;
565 default:
566 DBG("unknown parameter %lld", args->param);
567 return -EINVAL;
568 }
569
570 return 0;
571 }
572
573 #define OMAP_BO_USER_MASK 0x00ffffff /* flags settable by userspace */
574
ioctl_gem_new(struct drm_device * dev,void * data,struct drm_file * file_priv)575 static int ioctl_gem_new(struct drm_device *dev, void *data,
576 struct drm_file *file_priv)
577 {
578 struct drm_omap_gem_new *args = data;
579 u32 flags = args->flags & OMAP_BO_USER_MASK;
580
581 VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
582 args->size.bytes, flags);
583
584 return omap_gem_new_handle(dev, file_priv, args->size, flags,
585 &args->handle);
586 }
587
ioctl_gem_info(struct drm_device * dev,void * data,struct drm_file * file_priv)588 static int ioctl_gem_info(struct drm_device *dev, void *data,
589 struct drm_file *file_priv)
590 {
591 struct drm_omap_gem_info *args = data;
592 struct drm_gem_object *obj;
593 int ret = 0;
594
595 VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
596
597 obj = drm_gem_object_lookup(file_priv, args->handle);
598 if (!obj)
599 return -ENOENT;
600
601 args->size = omap_gem_mmap_size(obj);
602 args->offset = omap_gem_mmap_offset(obj);
603
604 drm_gem_object_put(obj);
605
606 return ret;
607 }
608
609 static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
610 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param,
611 DRM_RENDER_ALLOW),
612 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, drm_invalid_op,
613 DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
614 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new,
615 DRM_RENDER_ALLOW),
616 /* Deprecated, to be removed. */
617 DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, drm_noop,
618 DRM_RENDER_ALLOW),
619 /* Deprecated, to be removed. */
620 DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, drm_noop,
621 DRM_RENDER_ALLOW),
622 DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info,
623 DRM_RENDER_ALLOW),
624 };
625
626 /*
627 * drm driver funcs
628 */
629
dev_open(struct drm_device * dev,struct drm_file * file)630 static int dev_open(struct drm_device *dev, struct drm_file *file)
631 {
632 file->driver_priv = NULL;
633
634 DBG("open: dev=%p, file=%p", dev, file);
635
636 return 0;
637 }
638
639 static const struct file_operations omapdriver_fops = {
640 .owner = THIS_MODULE,
641 .open = drm_open,
642 .unlocked_ioctl = drm_ioctl,
643 .compat_ioctl = drm_compat_ioctl,
644 .release = drm_release,
645 .mmap = omap_gem_mmap,
646 .poll = drm_poll,
647 .read = drm_read,
648 .llseek = noop_llseek,
649 };
650
651 static const struct drm_driver omap_drm_driver = {
652 .driver_features = DRIVER_MODESET | DRIVER_GEM |
653 DRIVER_ATOMIC | DRIVER_RENDER,
654 .open = dev_open,
655 .lastclose = drm_fb_helper_lastclose,
656 #ifdef CONFIG_DEBUG_FS
657 .debugfs_init = omap_debugfs_init,
658 #endif
659 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
660 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
661 .gem_prime_import = omap_gem_prime_import,
662 .dumb_create = omap_gem_dumb_create,
663 .dumb_map_offset = omap_gem_dumb_map_offset,
664 .ioctls = ioctls,
665 .num_ioctls = DRM_OMAP_NUM_IOCTLS,
666 .fops = &omapdriver_fops,
667 .name = DRIVER_NAME,
668 .desc = DRIVER_DESC,
669 .date = DRIVER_DATE,
670 .major = DRIVER_MAJOR,
671 .minor = DRIVER_MINOR,
672 .patchlevel = DRIVER_PATCHLEVEL,
673 };
674
675 static const struct soc_device_attribute omapdrm_soc_devices[] = {
676 { .family = "OMAP3", .data = (void *)0x3430 },
677 { .family = "OMAP4", .data = (void *)0x4430 },
678 { .family = "OMAP5", .data = (void *)0x5430 },
679 { .family = "DRA7", .data = (void *)0x0752 },
680 { /* sentinel */ }
681 };
682
omapdrm_init(struct omap_drm_private * priv,struct device * dev)683 static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
684 {
685 const struct soc_device_attribute *soc;
686 struct dss_pdata *pdata = dev->platform_data;
687 struct drm_device *ddev;
688 int ret;
689
690 DBG("%s", dev_name(dev));
691
692 if (drm_firmware_drivers_only())
693 return -ENODEV;
694
695 /* Allocate and initialize the DRM device. */
696 ddev = drm_dev_alloc(&omap_drm_driver, dev);
697 if (IS_ERR(ddev))
698 return PTR_ERR(ddev);
699
700 priv->ddev = ddev;
701 ddev->dev_private = priv;
702
703 priv->dev = dev;
704 priv->dss = pdata->dss;
705 priv->dispc = dispc_get_dispc(priv->dss);
706
707 priv->dss->mgr_ops_priv = priv;
708
709 soc = soc_device_match(omapdrm_soc_devices);
710 priv->omaprev = soc ? (uintptr_t)soc->data : 0;
711 priv->wq = alloc_ordered_workqueue("omapdrm", 0);
712
713 mutex_init(&priv->list_lock);
714 INIT_LIST_HEAD(&priv->obj_list);
715
716 /* Get memory bandwidth limits */
717 priv->max_bandwidth = dispc_get_memory_bandwidth_limit(priv->dispc);
718
719 omap_gem_init(ddev);
720
721 drm_mode_config_init(ddev);
722
723 ret = omap_global_obj_init(ddev);
724 if (ret)
725 goto err_gem_deinit;
726
727 ret = omap_hwoverlays_init(priv);
728 if (ret)
729 goto err_free_priv_obj;
730
731 ret = omap_modeset_init(ddev);
732 if (ret) {
733 dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
734 goto err_free_overlays;
735 }
736
737 /* Initialize vblank handling, start with all CRTCs disabled. */
738 ret = drm_vblank_init(ddev, priv->num_pipes);
739 if (ret) {
740 dev_err(priv->dev, "could not init vblank\n");
741 goto err_cleanup_modeset;
742 }
743
744 omap_fbdev_init(ddev);
745
746 drm_kms_helper_poll_init(ddev);
747
748 /*
749 * Register the DRM device with the core and the connectors with
750 * sysfs.
751 */
752 ret = drm_dev_register(ddev, 0);
753 if (ret)
754 goto err_cleanup_helpers;
755
756 return 0;
757
758 err_cleanup_helpers:
759 drm_kms_helper_poll_fini(ddev);
760
761 omap_fbdev_fini(ddev);
762 err_cleanup_modeset:
763 omap_modeset_fini(ddev);
764 err_free_overlays:
765 omap_hwoverlays_destroy(priv);
766 err_free_priv_obj:
767 omap_global_obj_fini(priv);
768 err_gem_deinit:
769 drm_mode_config_cleanup(ddev);
770 omap_gem_deinit(ddev);
771 destroy_workqueue(priv->wq);
772 omap_disconnect_pipelines(ddev);
773 drm_dev_put(ddev);
774 return ret;
775 }
776
omapdrm_cleanup(struct omap_drm_private * priv)777 static void omapdrm_cleanup(struct omap_drm_private *priv)
778 {
779 struct drm_device *ddev = priv->ddev;
780
781 DBG("");
782
783 drm_dev_unregister(ddev);
784
785 drm_kms_helper_poll_fini(ddev);
786
787 omap_fbdev_fini(ddev);
788
789 drm_atomic_helper_shutdown(ddev);
790
791 omap_modeset_fini(ddev);
792 omap_hwoverlays_destroy(priv);
793 omap_global_obj_fini(priv);
794 drm_mode_config_cleanup(ddev);
795 omap_gem_deinit(ddev);
796
797 destroy_workqueue(priv->wq);
798
799 omap_disconnect_pipelines(ddev);
800
801 drm_dev_put(ddev);
802 }
803
pdev_probe(struct platform_device * pdev)804 static int pdev_probe(struct platform_device *pdev)
805 {
806 struct omap_drm_private *priv;
807 int ret;
808
809 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
810 if (ret) {
811 dev_err(&pdev->dev, "Failed to set the DMA mask\n");
812 return ret;
813 }
814
815 /* Allocate and initialize the driver private structure. */
816 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
817 if (!priv)
818 return -ENOMEM;
819
820 platform_set_drvdata(pdev, priv);
821
822 ret = omapdrm_init(priv, &pdev->dev);
823 if (ret < 0)
824 kfree(priv);
825
826 return ret;
827 }
828
pdev_remove(struct platform_device * pdev)829 static int pdev_remove(struct platform_device *pdev)
830 {
831 struct omap_drm_private *priv = platform_get_drvdata(pdev);
832
833 omapdrm_cleanup(priv);
834 kfree(priv);
835
836 return 0;
837 }
838
839 #ifdef CONFIG_PM_SLEEP
omap_drm_suspend(struct device * dev)840 static int omap_drm_suspend(struct device *dev)
841 {
842 struct omap_drm_private *priv = dev_get_drvdata(dev);
843 struct drm_device *drm_dev = priv->ddev;
844
845 return drm_mode_config_helper_suspend(drm_dev);
846 }
847
omap_drm_resume(struct device * dev)848 static int omap_drm_resume(struct device *dev)
849 {
850 struct omap_drm_private *priv = dev_get_drvdata(dev);
851 struct drm_device *drm_dev = priv->ddev;
852
853 drm_mode_config_helper_resume(drm_dev);
854
855 return omap_gem_resume(drm_dev);
856 }
857 #endif
858
859 static SIMPLE_DEV_PM_OPS(omapdrm_pm_ops, omap_drm_suspend, omap_drm_resume);
860
861 static struct platform_driver pdev = {
862 .driver = {
863 .name = "omapdrm",
864 .pm = &omapdrm_pm_ops,
865 },
866 .probe = pdev_probe,
867 .remove = pdev_remove,
868 };
869
870 static struct platform_driver * const drivers[] = {
871 &omap_dmm_driver,
872 &pdev,
873 };
874
omap_drm_init(void)875 static int __init omap_drm_init(void)
876 {
877 int r;
878
879 DBG("init");
880
881 r = omap_dss_init();
882 if (r)
883 return r;
884
885 r = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
886 if (r) {
887 omap_dss_exit();
888 return r;
889 }
890
891 return 0;
892 }
893
omap_drm_fini(void)894 static void __exit omap_drm_fini(void)
895 {
896 DBG("fini");
897
898 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
899
900 omap_dss_exit();
901 }
902
903 module_init(omap_drm_init);
904 module_exit(omap_drm_fini);
905
906 MODULE_AUTHOR("Rob Clark <rob@ti.com>");
907 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
908 MODULE_DESCRIPTION("OMAP DRM Display Driver");
909 MODULE_ALIAS("platform:" DRIVER_NAME);
910 MODULE_LICENSE("GPL v2");
911