1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * DOC: atomic plane helpers
26 *
27 * The functions here are used by the atomic plane helper functions to
28 * implement legacy plane updates (i.e., drm_plane->update_plane() and
29 * drm_plane->disable_plane()). This allows plane updates to use the
30 * atomic state infrastructure and perform plane updates as separate
31 * prepare/check/commit/cleanup steps.
32 */
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_plane_helper.h>
37
38 #include "i915_trace.h"
39 #include "intel_atomic_plane.h"
40 #include "intel_cdclk.h"
41 #include "intel_display_types.h"
42 #include "intel_fb_pin.h"
43 #include "intel_pm.h"
44 #include "intel_sprite.h"
45 #include "gt/intel_rps.h"
46
intel_plane_state_reset(struct intel_plane_state * plane_state,struct intel_plane * plane)47 static void intel_plane_state_reset(struct intel_plane_state *plane_state,
48 struct intel_plane *plane)
49 {
50 memset(plane_state, 0, sizeof(*plane_state));
51
52 __drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base);
53
54 plane_state->scaler_id = -1;
55 }
56
intel_plane_alloc(void)57 struct intel_plane *intel_plane_alloc(void)
58 {
59 struct intel_plane_state *plane_state;
60 struct intel_plane *plane;
61
62 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
63 if (!plane)
64 return ERR_PTR(-ENOMEM);
65
66 plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
67 if (!plane_state) {
68 kfree(plane);
69 return ERR_PTR(-ENOMEM);
70 }
71
72 intel_plane_state_reset(plane_state, plane);
73
74 plane->base.state = &plane_state->uapi;
75
76 return plane;
77 }
78
intel_plane_free(struct intel_plane * plane)79 void intel_plane_free(struct intel_plane *plane)
80 {
81 intel_plane_destroy_state(&plane->base, plane->base.state);
82 kfree(plane);
83 }
84
85 /**
86 * intel_plane_duplicate_state - duplicate plane state
87 * @plane: drm plane
88 *
89 * Allocates and returns a copy of the plane state (both common and
90 * Intel-specific) for the specified plane.
91 *
92 * Returns: The newly allocated plane state, or NULL on failure.
93 */
94 struct drm_plane_state *
intel_plane_duplicate_state(struct drm_plane * plane)95 intel_plane_duplicate_state(struct drm_plane *plane)
96 {
97 struct intel_plane_state *intel_state;
98
99 intel_state = to_intel_plane_state(plane->state);
100 intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL);
101
102 if (!intel_state)
103 return NULL;
104
105 __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
106
107 intel_state->ggtt_vma = NULL;
108 intel_state->dpt_vma = NULL;
109 intel_state->flags = 0;
110
111 /* add reference to fb */
112 if (intel_state->hw.fb)
113 drm_framebuffer_get(intel_state->hw.fb);
114
115 return &intel_state->uapi;
116 }
117
118 /**
119 * intel_plane_destroy_state - destroy plane state
120 * @plane: drm plane
121 * @state: state object to destroy
122 *
123 * Destroys the plane state (both common and Intel-specific) for the
124 * specified plane.
125 */
126 void
intel_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)127 intel_plane_destroy_state(struct drm_plane *plane,
128 struct drm_plane_state *state)
129 {
130 struct intel_plane_state *plane_state = to_intel_plane_state(state);
131
132 drm_WARN_ON(plane->dev, plane_state->ggtt_vma);
133 drm_WARN_ON(plane->dev, plane_state->dpt_vma);
134
135 __drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
136 if (plane_state->hw.fb)
137 drm_framebuffer_put(plane_state->hw.fb);
138 kfree(plane_state);
139 }
140
intel_adjusted_rate(const struct drm_rect * src,const struct drm_rect * dst,unsigned int rate)141 unsigned int intel_adjusted_rate(const struct drm_rect *src,
142 const struct drm_rect *dst,
143 unsigned int rate)
144 {
145 unsigned int src_w, src_h, dst_w, dst_h;
146
147 src_w = drm_rect_width(src) >> 16;
148 src_h = drm_rect_height(src) >> 16;
149 dst_w = drm_rect_width(dst);
150 dst_h = drm_rect_height(dst);
151
152 /* Downscaling limits the maximum pixel rate */
153 dst_w = min(src_w, dst_w);
154 dst_h = min(src_h, dst_h);
155
156 return DIV_ROUND_UP_ULL(mul_u32_u32(rate, src_w * src_h),
157 dst_w * dst_h);
158 }
159
intel_plane_pixel_rate(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)160 unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
161 const struct intel_plane_state *plane_state)
162 {
163 /*
164 * Note we don't check for plane visibility here as
165 * we want to use this when calculating the cursor
166 * watermarks even if the cursor is fully offscreen.
167 * That depends on the src/dst rectangles being
168 * correctly populated whenever the watermark code
169 * considers the cursor to be visible, whether or not
170 * it is actually visible.
171 *
172 * See: intel_wm_plane_visible() and intel_check_cursor()
173 */
174
175 return intel_adjusted_rate(&plane_state->uapi.src,
176 &plane_state->uapi.dst,
177 crtc_state->pixel_rate);
178 }
179
intel_plane_data_rate(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)180 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
181 const struct intel_plane_state *plane_state)
182 {
183 const struct drm_framebuffer *fb = plane_state->hw.fb;
184 unsigned int cpp;
185 unsigned int pixel_rate;
186
187 if (!plane_state->uapi.visible)
188 return 0;
189
190 pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
191
192 cpp = fb->format->cpp[0];
193
194 /*
195 * Based on HSD#:1408715493
196 * NV12 cpp == 4, P010 cpp == 8
197 *
198 * FIXME what is the logic behind this?
199 */
200 if (fb->format->is_yuv && fb->format->num_planes > 1)
201 cpp *= 4;
202
203 return pixel_rate * cpp;
204 }
205
intel_plane_calc_min_cdclk(struct intel_atomic_state * state,struct intel_plane * plane,bool * need_cdclk_calc)206 int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
207 struct intel_plane *plane,
208 bool *need_cdclk_calc)
209 {
210 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
211 const struct intel_plane_state *plane_state =
212 intel_atomic_get_new_plane_state(state, plane);
213 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
214 const struct intel_cdclk_state *cdclk_state;
215 const struct intel_crtc_state *old_crtc_state;
216 struct intel_crtc_state *new_crtc_state;
217
218 if (!plane_state->uapi.visible || !plane->min_cdclk)
219 return 0;
220
221 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
222 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
223
224 new_crtc_state->min_cdclk[plane->id] =
225 plane->min_cdclk(new_crtc_state, plane_state);
226
227 /*
228 * No need to check against the cdclk state if
229 * the min cdclk for the plane doesn't increase.
230 *
231 * Ie. we only ever increase the cdclk due to plane
232 * requirements. This can reduce back and forth
233 * display blinking due to constant cdclk changes.
234 */
235 if (new_crtc_state->min_cdclk[plane->id] <=
236 old_crtc_state->min_cdclk[plane->id])
237 return 0;
238
239 cdclk_state = intel_atomic_get_cdclk_state(state);
240 if (IS_ERR(cdclk_state))
241 return PTR_ERR(cdclk_state);
242
243 /*
244 * No need to recalculate the cdclk state if
245 * the min cdclk for the pipe doesn't increase.
246 *
247 * Ie. we only ever increase the cdclk due to plane
248 * requirements. This can reduce back and forth
249 * display blinking due to constant cdclk changes.
250 */
251 if (new_crtc_state->min_cdclk[plane->id] <=
252 cdclk_state->min_cdclk[crtc->pipe])
253 return 0;
254
255 drm_dbg_kms(&dev_priv->drm,
256 "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
257 plane->base.base.id, plane->base.name,
258 new_crtc_state->min_cdclk[plane->id],
259 crtc->base.base.id, crtc->base.name,
260 cdclk_state->min_cdclk[crtc->pipe]);
261 *need_cdclk_calc = true;
262
263 return 0;
264 }
265
intel_plane_clear_hw_state(struct intel_plane_state * plane_state)266 static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
267 {
268 if (plane_state->hw.fb)
269 drm_framebuffer_put(plane_state->hw.fb);
270
271 memset(&plane_state->hw, 0, sizeof(plane_state->hw));
272 }
273
intel_plane_copy_uapi_to_hw_state(struct intel_plane_state * plane_state,const struct intel_plane_state * from_plane_state,struct intel_crtc * crtc)274 void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
275 const struct intel_plane_state *from_plane_state,
276 struct intel_crtc *crtc)
277 {
278 intel_plane_clear_hw_state(plane_state);
279
280 /*
281 * For the bigjoiner slave uapi.crtc will point at
282 * the master crtc. So we explicitly assign the right
283 * slave crtc to hw.crtc. uapi.crtc!=NULL simply indicates
284 * the plane is logically enabled on the uapi level.
285 */
286 plane_state->hw.crtc = from_plane_state->uapi.crtc ? &crtc->base : NULL;
287
288 plane_state->hw.fb = from_plane_state->uapi.fb;
289 if (plane_state->hw.fb)
290 drm_framebuffer_get(plane_state->hw.fb);
291
292 plane_state->hw.alpha = from_plane_state->uapi.alpha;
293 plane_state->hw.pixel_blend_mode =
294 from_plane_state->uapi.pixel_blend_mode;
295 plane_state->hw.rotation = from_plane_state->uapi.rotation;
296 plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
297 plane_state->hw.color_range = from_plane_state->uapi.color_range;
298 plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter;
299
300 plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
301 plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
302 }
303
intel_plane_copy_hw_state(struct intel_plane_state * plane_state,const struct intel_plane_state * from_plane_state)304 void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
305 const struct intel_plane_state *from_plane_state)
306 {
307 intel_plane_clear_hw_state(plane_state);
308
309 memcpy(&plane_state->hw, &from_plane_state->hw,
310 sizeof(plane_state->hw));
311
312 if (plane_state->hw.fb)
313 drm_framebuffer_get(plane_state->hw.fb);
314 }
315
intel_plane_set_invisible(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state)316 void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
317 struct intel_plane_state *plane_state)
318 {
319 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
320
321 crtc_state->active_planes &= ~BIT(plane->id);
322 crtc_state->nv12_planes &= ~BIT(plane->id);
323 crtc_state->c8_planes &= ~BIT(plane->id);
324 crtc_state->data_rate[plane->id] = 0;
325 crtc_state->min_cdclk[plane->id] = 0;
326
327 plane_state->uapi.visible = false;
328 }
329
intel_plane_atomic_check_with_state(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state,const struct intel_plane_state * old_plane_state,struct intel_plane_state * new_plane_state)330 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
331 struct intel_crtc_state *new_crtc_state,
332 const struct intel_plane_state *old_plane_state,
333 struct intel_plane_state *new_plane_state)
334 {
335 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
336 const struct drm_framebuffer *fb = new_plane_state->hw.fb;
337 int ret;
338
339 intel_plane_set_invisible(new_crtc_state, new_plane_state);
340 new_crtc_state->enabled_planes &= ~BIT(plane->id);
341
342 if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
343 return 0;
344
345 ret = plane->check_plane(new_crtc_state, new_plane_state);
346 if (ret)
347 return ret;
348
349 if (fb)
350 new_crtc_state->enabled_planes |= BIT(plane->id);
351
352 /* FIXME pre-g4x don't work like this */
353 if (new_plane_state->uapi.visible)
354 new_crtc_state->active_planes |= BIT(plane->id);
355
356 if (new_plane_state->uapi.visible &&
357 intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
358 new_crtc_state->nv12_planes |= BIT(plane->id);
359
360 if (new_plane_state->uapi.visible &&
361 fb->format->format == DRM_FORMAT_C8)
362 new_crtc_state->c8_planes |= BIT(plane->id);
363
364 if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
365 new_crtc_state->update_planes |= BIT(plane->id);
366
367 new_crtc_state->data_rate[plane->id] =
368 intel_plane_data_rate(new_crtc_state, new_plane_state);
369
370 return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
371 old_plane_state, new_plane_state);
372 }
373
374 static struct intel_plane *
intel_crtc_get_plane(struct intel_crtc * crtc,enum plane_id plane_id)375 intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
376 {
377 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
378 struct intel_plane *plane;
379
380 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
381 if (plane->id == plane_id)
382 return plane;
383 }
384
385 return NULL;
386 }
387
intel_plane_atomic_check(struct intel_atomic_state * state,struct intel_plane * plane)388 int intel_plane_atomic_check(struct intel_atomic_state *state,
389 struct intel_plane *plane)
390 {
391 struct drm_i915_private *i915 = to_i915(state->base.dev);
392 struct intel_plane_state *new_plane_state =
393 intel_atomic_get_new_plane_state(state, plane);
394 const struct intel_plane_state *old_plane_state =
395 intel_atomic_get_old_plane_state(state, plane);
396 const struct intel_plane_state *new_master_plane_state;
397 struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, plane->pipe);
398 const struct intel_crtc_state *old_crtc_state =
399 intel_atomic_get_old_crtc_state(state, crtc);
400 struct intel_crtc_state *new_crtc_state =
401 intel_atomic_get_new_crtc_state(state, crtc);
402
403 if (new_crtc_state && new_crtc_state->bigjoiner_slave) {
404 struct intel_plane *master_plane =
405 intel_crtc_get_plane(new_crtc_state->bigjoiner_linked_crtc,
406 plane->id);
407
408 new_master_plane_state =
409 intel_atomic_get_new_plane_state(state, master_plane);
410 } else {
411 new_master_plane_state = new_plane_state;
412 }
413
414 intel_plane_copy_uapi_to_hw_state(new_plane_state,
415 new_master_plane_state,
416 crtc);
417
418 new_plane_state->uapi.visible = false;
419 if (!new_crtc_state)
420 return 0;
421
422 return intel_plane_atomic_check_with_state(old_crtc_state,
423 new_crtc_state,
424 old_plane_state,
425 new_plane_state);
426 }
427
428 static struct intel_plane *
skl_next_plane_to_commit(struct intel_atomic_state * state,struct intel_crtc * crtc,struct skl_ddb_entry entries_y[I915_MAX_PLANES],struct skl_ddb_entry entries_uv[I915_MAX_PLANES],unsigned int * update_mask)429 skl_next_plane_to_commit(struct intel_atomic_state *state,
430 struct intel_crtc *crtc,
431 struct skl_ddb_entry entries_y[I915_MAX_PLANES],
432 struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
433 unsigned int *update_mask)
434 {
435 struct intel_crtc_state *crtc_state =
436 intel_atomic_get_new_crtc_state(state, crtc);
437 struct intel_plane_state *plane_state;
438 struct intel_plane *plane;
439 int i;
440
441 if (*update_mask == 0)
442 return NULL;
443
444 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
445 enum plane_id plane_id = plane->id;
446
447 if (crtc->pipe != plane->pipe ||
448 !(*update_mask & BIT(plane_id)))
449 continue;
450
451 if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
452 entries_y,
453 I915_MAX_PLANES, plane_id) ||
454 skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
455 entries_uv,
456 I915_MAX_PLANES, plane_id))
457 continue;
458
459 *update_mask &= ~BIT(plane_id);
460 entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
461 entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
462
463 return plane;
464 }
465
466 /* should never happen */
467 drm_WARN_ON(state->base.dev, 1);
468
469 return NULL;
470 }
471
intel_update_plane(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)472 void intel_update_plane(struct intel_plane *plane,
473 const struct intel_crtc_state *crtc_state,
474 const struct intel_plane_state *plane_state)
475 {
476 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
477
478 trace_intel_update_plane(&plane->base, crtc);
479
480 if (crtc_state->uapi.async_flip && plane->async_flip)
481 plane->async_flip(plane, crtc_state, plane_state, true);
482 else
483 plane->update_plane(plane, crtc_state, plane_state);
484 }
485
intel_disable_plane(struct intel_plane * plane,const struct intel_crtc_state * crtc_state)486 void intel_disable_plane(struct intel_plane *plane,
487 const struct intel_crtc_state *crtc_state)
488 {
489 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
490
491 trace_intel_disable_plane(&plane->base, crtc);
492 plane->disable_plane(plane, crtc_state);
493 }
494
skl_update_planes_on_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)495 void skl_update_planes_on_crtc(struct intel_atomic_state *state,
496 struct intel_crtc *crtc)
497 {
498 struct intel_crtc_state *old_crtc_state =
499 intel_atomic_get_old_crtc_state(state, crtc);
500 struct intel_crtc_state *new_crtc_state =
501 intel_atomic_get_new_crtc_state(state, crtc);
502 struct skl_ddb_entry entries_y[I915_MAX_PLANES];
503 struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
504 u32 update_mask = new_crtc_state->update_planes;
505 struct intel_plane *plane;
506
507 memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
508 sizeof(old_crtc_state->wm.skl.plane_ddb_y));
509 memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
510 sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
511
512 while ((plane = skl_next_plane_to_commit(state, crtc,
513 entries_y, entries_uv,
514 &update_mask))) {
515 struct intel_plane_state *new_plane_state =
516 intel_atomic_get_new_plane_state(state, plane);
517
518 if (new_plane_state->uapi.visible ||
519 new_plane_state->planar_slave) {
520 intel_update_plane(plane, new_crtc_state, new_plane_state);
521 } else {
522 intel_disable_plane(plane, new_crtc_state);
523 }
524 }
525 }
526
i9xx_update_planes_on_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)527 void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
528 struct intel_crtc *crtc)
529 {
530 struct intel_crtc_state *new_crtc_state =
531 intel_atomic_get_new_crtc_state(state, crtc);
532 u32 update_mask = new_crtc_state->update_planes;
533 struct intel_plane_state *new_plane_state;
534 struct intel_plane *plane;
535 int i;
536
537 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
538 if (crtc->pipe != plane->pipe ||
539 !(update_mask & BIT(plane->id)))
540 continue;
541
542 if (new_plane_state->uapi.visible)
543 intel_update_plane(plane, new_crtc_state, new_plane_state);
544 else
545 intel_disable_plane(plane, new_crtc_state);
546 }
547 }
548
intel_atomic_plane_check_clipping(struct intel_plane_state * plane_state,struct intel_crtc_state * crtc_state,int min_scale,int max_scale,bool can_position)549 int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
550 struct intel_crtc_state *crtc_state,
551 int min_scale, int max_scale,
552 bool can_position)
553 {
554 struct drm_framebuffer *fb = plane_state->hw.fb;
555 struct drm_rect *src = &plane_state->uapi.src;
556 struct drm_rect *dst = &plane_state->uapi.dst;
557 unsigned int rotation = plane_state->hw.rotation;
558 struct drm_rect clip = {};
559 int hscale, vscale;
560
561 if (!fb) {
562 plane_state->uapi.visible = false;
563 return 0;
564 }
565
566 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
567
568 /* Check scaling */
569 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
570 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
571 if (hscale < 0 || vscale < 0) {
572 DRM_DEBUG_KMS("Invalid scaling of plane\n");
573 drm_rect_debug_print("src: ", src, true);
574 drm_rect_debug_print("dst: ", dst, false);
575 return -ERANGE;
576 }
577
578 if (crtc_state->hw.enable) {
579 clip.x2 = crtc_state->pipe_src_w;
580 clip.y2 = crtc_state->pipe_src_h;
581 }
582
583 /* right side of the image is on the slave crtc, adjust dst to match */
584 if (crtc_state->bigjoiner_slave)
585 drm_rect_translate(dst, -crtc_state->pipe_src_w, 0);
586
587 /*
588 * FIXME: This might need further adjustment for seamless scaling
589 * with phase information, for the 2p2 and 2p1 scenarios.
590 */
591 plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, &clip);
592
593 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
594
595 if (!can_position && plane_state->uapi.visible &&
596 !drm_rect_equals(dst, &clip)) {
597 DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
598 drm_rect_debug_print("dst: ", dst, false);
599 drm_rect_debug_print("clip: ", &clip, false);
600 return -EINVAL;
601 }
602
603 return 0;
604 }
605
606 struct wait_rps_boost {
607 struct wait_queue_entry wait;
608
609 struct drm_crtc *crtc;
610 struct i915_request *request;
611 };
612
do_rps_boost(struct wait_queue_entry * _wait,unsigned mode,int sync,void * key)613 static int do_rps_boost(struct wait_queue_entry *_wait,
614 unsigned mode, int sync, void *key)
615 {
616 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
617 struct i915_request *rq = wait->request;
618
619 /*
620 * If we missed the vblank, but the request is already running it
621 * is reasonable to assume that it will complete before the next
622 * vblank without our intervention, so leave RPS alone.
623 */
624 if (!i915_request_started(rq))
625 intel_rps_boost(rq);
626 i915_request_put(rq);
627
628 drm_crtc_vblank_put(wait->crtc);
629
630 list_del(&wait->wait.entry);
631 kfree(wait);
632 return 1;
633 }
634
add_rps_boost_after_vblank(struct drm_crtc * crtc,struct dma_fence * fence)635 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
636 struct dma_fence *fence)
637 {
638 struct wait_rps_boost *wait;
639
640 if (!dma_fence_is_i915(fence))
641 return;
642
643 if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
644 return;
645
646 if (drm_crtc_vblank_get(crtc))
647 return;
648
649 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
650 if (!wait) {
651 drm_crtc_vblank_put(crtc);
652 return;
653 }
654
655 wait->request = to_request(dma_fence_get(fence));
656 wait->crtc = crtc;
657
658 wait->wait.func = do_rps_boost;
659 wait->wait.flags = 0;
660
661 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
662 }
663
664 /**
665 * intel_prepare_plane_fb - Prepare fb for usage on plane
666 * @_plane: drm plane to prepare for
667 * @_new_plane_state: the plane state being prepared
668 *
669 * Prepares a framebuffer for usage on a display plane. Generally this
670 * involves pinning the underlying object and updating the frontbuffer tracking
671 * bits. Some older platforms need special physical address handling for
672 * cursor planes.
673 *
674 * Returns 0 on success, negative error code on failure.
675 */
676 static int
intel_prepare_plane_fb(struct drm_plane * _plane,struct drm_plane_state * _new_plane_state)677 intel_prepare_plane_fb(struct drm_plane *_plane,
678 struct drm_plane_state *_new_plane_state)
679 {
680 struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
681 struct intel_plane *plane = to_intel_plane(_plane);
682 struct intel_plane_state *new_plane_state =
683 to_intel_plane_state(_new_plane_state);
684 struct intel_atomic_state *state =
685 to_intel_atomic_state(new_plane_state->uapi.state);
686 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
687 const struct intel_plane_state *old_plane_state =
688 intel_atomic_get_old_plane_state(state, plane);
689 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
690 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
691 int ret;
692
693 if (old_obj) {
694 const struct intel_crtc_state *crtc_state =
695 intel_atomic_get_new_crtc_state(state,
696 to_intel_crtc(old_plane_state->hw.crtc));
697
698 /* Big Hammer, we also need to ensure that any pending
699 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
700 * current scanout is retired before unpinning the old
701 * framebuffer. Note that we rely on userspace rendering
702 * into the buffer attached to the pipe they are waiting
703 * on. If not, userspace generates a GPU hang with IPEHR
704 * point to the MI_WAIT_FOR_EVENT.
705 *
706 * This should only fail upon a hung GPU, in which case we
707 * can safely continue.
708 */
709 if (intel_crtc_needs_modeset(crtc_state)) {
710 ret = i915_sw_fence_await_reservation(&state->commit_ready,
711 old_obj->base.resv, NULL,
712 false, 0,
713 GFP_KERNEL);
714 if (ret < 0)
715 return ret;
716 }
717 }
718
719 if (new_plane_state->uapi.fence) { /* explicit fencing */
720 i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
721 &attr);
722 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
723 new_plane_state->uapi.fence,
724 i915_fence_timeout(dev_priv),
725 GFP_KERNEL);
726 if (ret < 0)
727 return ret;
728 }
729
730 if (!obj)
731 return 0;
732
733
734 ret = intel_plane_pin_fb(new_plane_state);
735 if (ret)
736 return ret;
737
738 i915_gem_object_wait_priority(obj, 0, &attr);
739
740 if (!new_plane_state->uapi.fence) { /* implicit fencing */
741 struct dma_fence *fence;
742
743 ret = i915_sw_fence_await_reservation(&state->commit_ready,
744 obj->base.resv, NULL,
745 false,
746 i915_fence_timeout(dev_priv),
747 GFP_KERNEL);
748 if (ret < 0)
749 goto unpin_fb;
750
751 fence = dma_resv_get_excl_unlocked(obj->base.resv);
752 if (fence) {
753 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
754 fence);
755 dma_fence_put(fence);
756 }
757 } else {
758 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
759 new_plane_state->uapi.fence);
760 }
761
762 /*
763 * We declare pageflips to be interactive and so merit a small bias
764 * towards upclocking to deliver the frame on time. By only changing
765 * the RPS thresholds to sample more regularly and aim for higher
766 * clocks we can hopefully deliver low power workloads (like kodi)
767 * that are not quite steady state without resorting to forcing
768 * maximum clocks following a vblank miss (see do_rps_boost()).
769 */
770 if (!state->rps_interactive) {
771 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
772 state->rps_interactive = true;
773 }
774
775 return 0;
776
777 unpin_fb:
778 intel_plane_unpin_fb(new_plane_state);
779
780 return ret;
781 }
782
783 /**
784 * intel_cleanup_plane_fb - Cleans up an fb after plane use
785 * @plane: drm plane to clean up for
786 * @_old_plane_state: the state from the previous modeset
787 *
788 * Cleans up a framebuffer that has just been removed from a plane.
789 */
790 static void
intel_cleanup_plane_fb(struct drm_plane * plane,struct drm_plane_state * _old_plane_state)791 intel_cleanup_plane_fb(struct drm_plane *plane,
792 struct drm_plane_state *_old_plane_state)
793 {
794 struct intel_plane_state *old_plane_state =
795 to_intel_plane_state(_old_plane_state);
796 struct intel_atomic_state *state =
797 to_intel_atomic_state(old_plane_state->uapi.state);
798 struct drm_i915_private *dev_priv = to_i915(plane->dev);
799 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
800
801 if (!obj)
802 return;
803
804 if (state->rps_interactive) {
805 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
806 state->rps_interactive = false;
807 }
808
809 /* Should only be called after a successful intel_prepare_plane_fb()! */
810 intel_plane_unpin_fb(old_plane_state);
811 }
812
813 static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
814 .prepare_fb = intel_prepare_plane_fb,
815 .cleanup_fb = intel_cleanup_plane_fb,
816 };
817
intel_plane_helper_add(struct intel_plane * plane)818 void intel_plane_helper_add(struct intel_plane *plane)
819 {
820 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
821 }
822