1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include "intel_atomic.h"
7 #include "intel_ddi.h"
8 #include "intel_de.h"
9 #include "intel_display_types.h"
10 #include "intel_fdi.h"
11 #include "intel_sbi.h"
12
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)13 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
14 enum pipe pipe, bool state)
15 {
16 bool cur_state;
17
18 if (HAS_DDI(dev_priv)) {
19 /*
20 * DDI does not have a specific FDI_TX register.
21 *
22 * FDI is never fed from EDP transcoder
23 * so pipe->transcoder cast is fine here.
24 */
25 enum transcoder cpu_transcoder = (enum transcoder)pipe;
26 cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
27 } else {
28 cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
29 }
30 I915_STATE_WARN(cur_state != state,
31 "FDI TX state assertion failure (expected %s, current %s)\n",
32 onoff(state), onoff(cur_state));
33 }
34
assert_fdi_tx_enabled(struct drm_i915_private * i915,enum pipe pipe)35 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
36 {
37 assert_fdi_tx(i915, pipe, true);
38 }
39
assert_fdi_tx_disabled(struct drm_i915_private * i915,enum pipe pipe)40 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
41 {
42 assert_fdi_tx(i915, pipe, false);
43 }
44
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)45 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
46 enum pipe pipe, bool state)
47 {
48 bool cur_state;
49
50 cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
51 I915_STATE_WARN(cur_state != state,
52 "FDI RX state assertion failure (expected %s, current %s)\n",
53 onoff(state), onoff(cur_state));
54 }
55
assert_fdi_rx_enabled(struct drm_i915_private * i915,enum pipe pipe)56 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
57 {
58 assert_fdi_rx(i915, pipe, true);
59 }
60
assert_fdi_rx_disabled(struct drm_i915_private * i915,enum pipe pipe)61 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
62 {
63 assert_fdi_rx(i915, pipe, false);
64 }
65
assert_fdi_tx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)66 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
67 enum pipe pipe)
68 {
69 bool cur_state;
70
71 /* ILK FDI PLL is always enabled */
72 if (IS_IRONLAKE(i915))
73 return;
74
75 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
76 if (HAS_DDI(i915))
77 return;
78
79 cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
80 I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n");
81 }
82
assert_fdi_rx_pll(struct drm_i915_private * i915,enum pipe pipe,bool state)83 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
84 enum pipe pipe, bool state)
85 {
86 bool cur_state;
87
88 cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
89 I915_STATE_WARN(cur_state != state,
90 "FDI RX PLL assertion failure (expected %s, current %s)\n",
91 onoff(state), onoff(cur_state));
92 }
93
assert_fdi_rx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)94 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
95 {
96 assert_fdi_rx_pll(i915, pipe, true);
97 }
98
assert_fdi_rx_pll_disabled(struct drm_i915_private * i915,enum pipe pipe)99 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
100 {
101 assert_fdi_rx_pll(i915, pipe, false);
102 }
103
intel_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)104 void intel_fdi_link_train(struct intel_crtc *crtc,
105 const struct intel_crtc_state *crtc_state)
106 {
107 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
108
109 dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state);
110 }
111
112 /* units of 100MHz */
pipe_required_fdi_lanes(struct intel_crtc_state * crtc_state)113 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
114 {
115 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
116 return crtc_state->fdi_lanes;
117
118 return 0;
119 }
120
ilk_check_fdi_lanes(struct drm_device * dev,enum pipe pipe,struct intel_crtc_state * pipe_config)121 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
122 struct intel_crtc_state *pipe_config)
123 {
124 struct drm_i915_private *dev_priv = to_i915(dev);
125 struct drm_atomic_state *state = pipe_config->uapi.state;
126 struct intel_crtc *other_crtc;
127 struct intel_crtc_state *other_crtc_state;
128
129 drm_dbg_kms(&dev_priv->drm,
130 "checking fdi config on pipe %c, lanes %i\n",
131 pipe_name(pipe), pipe_config->fdi_lanes);
132 if (pipe_config->fdi_lanes > 4) {
133 drm_dbg_kms(&dev_priv->drm,
134 "invalid fdi lane config on pipe %c: %i lanes\n",
135 pipe_name(pipe), pipe_config->fdi_lanes);
136 return -EINVAL;
137 }
138
139 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
140 if (pipe_config->fdi_lanes > 2) {
141 drm_dbg_kms(&dev_priv->drm,
142 "only 2 lanes on haswell, required: %i lanes\n",
143 pipe_config->fdi_lanes);
144 return -EINVAL;
145 } else {
146 return 0;
147 }
148 }
149
150 if (INTEL_NUM_PIPES(dev_priv) == 2)
151 return 0;
152
153 /* Ivybridge 3 pipe is really complicated */
154 switch (pipe) {
155 case PIPE_A:
156 return 0;
157 case PIPE_B:
158 if (pipe_config->fdi_lanes <= 2)
159 return 0;
160
161 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
162 other_crtc_state =
163 intel_atomic_get_crtc_state(state, other_crtc);
164 if (IS_ERR(other_crtc_state))
165 return PTR_ERR(other_crtc_state);
166
167 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
168 drm_dbg_kms(&dev_priv->drm,
169 "invalid shared fdi lane config on pipe %c: %i lanes\n",
170 pipe_name(pipe), pipe_config->fdi_lanes);
171 return -EINVAL;
172 }
173 return 0;
174 case PIPE_C:
175 if (pipe_config->fdi_lanes > 2) {
176 drm_dbg_kms(&dev_priv->drm,
177 "only 2 lanes on pipe %c: required %i lanes\n",
178 pipe_name(pipe), pipe_config->fdi_lanes);
179 return -EINVAL;
180 }
181
182 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
183 other_crtc_state =
184 intel_atomic_get_crtc_state(state, other_crtc);
185 if (IS_ERR(other_crtc_state))
186 return PTR_ERR(other_crtc_state);
187
188 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
189 drm_dbg_kms(&dev_priv->drm,
190 "fdi link B uses too many lanes to enable link C\n");
191 return -EINVAL;
192 }
193 return 0;
194 default:
195 MISSING_CASE(pipe);
196 return 0;
197 }
198 }
199
intel_fdi_pll_freq_update(struct drm_i915_private * i915)200 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
201 {
202 if (IS_IRONLAKE(i915)) {
203 u32 fdi_pll_clk =
204 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
205
206 i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
207 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
208 i915->fdi_pll_freq = 270000;
209 } else {
210 return;
211 }
212
213 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq);
214 }
215
intel_fdi_link_freq(struct drm_i915_private * i915,const struct intel_crtc_state * pipe_config)216 int intel_fdi_link_freq(struct drm_i915_private *i915,
217 const struct intel_crtc_state *pipe_config)
218 {
219 if (HAS_DDI(i915))
220 return pipe_config->port_clock; /* SPLL */
221 else
222 return i915->fdi_pll_freq;
223 }
224
ilk_fdi_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)225 int ilk_fdi_compute_config(struct intel_crtc *crtc,
226 struct intel_crtc_state *pipe_config)
227 {
228 struct drm_device *dev = crtc->base.dev;
229 struct drm_i915_private *i915 = to_i915(dev);
230 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
231 int lane, link_bw, fdi_dotclock, ret;
232 bool needs_recompute = false;
233
234 retry:
235 /* FDI is a binary signal running at ~2.7GHz, encoding
236 * each output octet as 10 bits. The actual frequency
237 * is stored as a divider into a 100MHz clock, and the
238 * mode pixel clock is stored in units of 1KHz.
239 * Hence the bw of each lane in terms of the mode signal
240 * is:
241 */
242 link_bw = intel_fdi_link_freq(i915, pipe_config);
243
244 fdi_dotclock = adjusted_mode->crtc_clock;
245
246 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
247 pipe_config->pipe_bpp);
248
249 pipe_config->fdi_lanes = lane;
250
251 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
252 link_bw, &pipe_config->fdi_m_n, false, false);
253
254 ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
255 if (ret == -EDEADLK)
256 return ret;
257
258 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
259 pipe_config->pipe_bpp -= 2*3;
260 drm_dbg_kms(&i915->drm,
261 "fdi link bw constraint, reducing pipe bpp to %i\n",
262 pipe_config->pipe_bpp);
263 needs_recompute = true;
264 pipe_config->bw_constrained = true;
265
266 goto retry;
267 }
268
269 if (needs_recompute)
270 return -EAGAIN;
271
272 return ret;
273 }
274
cpt_set_fdi_bc_bifurcation(struct drm_i915_private * dev_priv,bool enable)275 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
276 {
277 u32 temp;
278
279 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
280 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
281 return;
282
283 drm_WARN_ON(&dev_priv->drm,
284 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
285 FDI_RX_ENABLE);
286 drm_WARN_ON(&dev_priv->drm,
287 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
288 FDI_RX_ENABLE);
289
290 temp &= ~FDI_BC_BIFURCATION_SELECT;
291 if (enable)
292 temp |= FDI_BC_BIFURCATION_SELECT;
293
294 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
295 enable ? "en" : "dis");
296 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
297 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
298 }
299
ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)300 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
301 {
302 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
304
305 switch (crtc->pipe) {
306 case PIPE_A:
307 break;
308 case PIPE_B:
309 if (crtc_state->fdi_lanes > 2)
310 cpt_set_fdi_bc_bifurcation(dev_priv, false);
311 else
312 cpt_set_fdi_bc_bifurcation(dev_priv, true);
313
314 break;
315 case PIPE_C:
316 cpt_set_fdi_bc_bifurcation(dev_priv, true);
317
318 break;
319 default:
320 MISSING_CASE(crtc->pipe);
321 }
322 }
323
intel_fdi_normal_train(struct intel_crtc * crtc)324 void intel_fdi_normal_train(struct intel_crtc *crtc)
325 {
326 struct drm_device *dev = crtc->base.dev;
327 struct drm_i915_private *dev_priv = to_i915(dev);
328 enum pipe pipe = crtc->pipe;
329 i915_reg_t reg;
330 u32 temp;
331
332 /* enable normal train */
333 reg = FDI_TX_CTL(pipe);
334 temp = intel_de_read(dev_priv, reg);
335 if (IS_IVYBRIDGE(dev_priv)) {
336 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
337 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
338 } else {
339 temp &= ~FDI_LINK_TRAIN_NONE;
340 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
341 }
342 intel_de_write(dev_priv, reg, temp);
343
344 reg = FDI_RX_CTL(pipe);
345 temp = intel_de_read(dev_priv, reg);
346 if (HAS_PCH_CPT(dev_priv)) {
347 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
348 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
349 } else {
350 temp &= ~FDI_LINK_TRAIN_NONE;
351 temp |= FDI_LINK_TRAIN_NONE;
352 }
353 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
354
355 /* wait one idle pattern time */
356 intel_de_posting_read(dev_priv, reg);
357 udelay(1000);
358
359 /* IVB wants error correction enabled */
360 if (IS_IVYBRIDGE(dev_priv))
361 intel_de_write(dev_priv, reg,
362 intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
363 }
364
365 /* The FDI link training functions for ILK/Ibexpeak. */
ilk_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)366 static void ilk_fdi_link_train(struct intel_crtc *crtc,
367 const struct intel_crtc_state *crtc_state)
368 {
369 struct drm_device *dev = crtc->base.dev;
370 struct drm_i915_private *dev_priv = to_i915(dev);
371 enum pipe pipe = crtc->pipe;
372 i915_reg_t reg;
373 u32 temp, tries;
374
375 /*
376 * Write the TU size bits before fdi link training, so that error
377 * detection works.
378 */
379 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
380 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
381
382 /* FDI needs bits from pipe first */
383 assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
384
385 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
386 for train result */
387 reg = FDI_RX_IMR(pipe);
388 temp = intel_de_read(dev_priv, reg);
389 temp &= ~FDI_RX_SYMBOL_LOCK;
390 temp &= ~FDI_RX_BIT_LOCK;
391 intel_de_write(dev_priv, reg, temp);
392 intel_de_read(dev_priv, reg);
393 udelay(150);
394
395 /* enable CPU FDI TX and PCH FDI RX */
396 reg = FDI_TX_CTL(pipe);
397 temp = intel_de_read(dev_priv, reg);
398 temp &= ~FDI_DP_PORT_WIDTH_MASK;
399 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
400 temp &= ~FDI_LINK_TRAIN_NONE;
401 temp |= FDI_LINK_TRAIN_PATTERN_1;
402 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
403
404 reg = FDI_RX_CTL(pipe);
405 temp = intel_de_read(dev_priv, reg);
406 temp &= ~FDI_LINK_TRAIN_NONE;
407 temp |= FDI_LINK_TRAIN_PATTERN_1;
408 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
409
410 intel_de_posting_read(dev_priv, reg);
411 udelay(150);
412
413 /* Ironlake workaround, enable clock pointer after FDI enable*/
414 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
415 FDI_RX_PHASE_SYNC_POINTER_OVR);
416 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
417 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
418
419 reg = FDI_RX_IIR(pipe);
420 for (tries = 0; tries < 5; tries++) {
421 temp = intel_de_read(dev_priv, reg);
422 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
423
424 if ((temp & FDI_RX_BIT_LOCK)) {
425 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
426 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
427 break;
428 }
429 }
430 if (tries == 5)
431 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
432
433 /* Train 2 */
434 reg = FDI_TX_CTL(pipe);
435 temp = intel_de_read(dev_priv, reg);
436 temp &= ~FDI_LINK_TRAIN_NONE;
437 temp |= FDI_LINK_TRAIN_PATTERN_2;
438 intel_de_write(dev_priv, reg, temp);
439
440 reg = FDI_RX_CTL(pipe);
441 temp = intel_de_read(dev_priv, reg);
442 temp &= ~FDI_LINK_TRAIN_NONE;
443 temp |= FDI_LINK_TRAIN_PATTERN_2;
444 intel_de_write(dev_priv, reg, temp);
445
446 intel_de_posting_read(dev_priv, reg);
447 udelay(150);
448
449 reg = FDI_RX_IIR(pipe);
450 for (tries = 0; tries < 5; tries++) {
451 temp = intel_de_read(dev_priv, reg);
452 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
453
454 if (temp & FDI_RX_SYMBOL_LOCK) {
455 intel_de_write(dev_priv, reg,
456 temp | FDI_RX_SYMBOL_LOCK);
457 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
458 break;
459 }
460 }
461 if (tries == 5)
462 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
463
464 drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
465
466 }
467
468 static const int snb_b_fdi_train_param[] = {
469 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
470 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
471 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
472 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
473 };
474
475 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)476 static void gen6_fdi_link_train(struct intel_crtc *crtc,
477 const struct intel_crtc_state *crtc_state)
478 {
479 struct drm_device *dev = crtc->base.dev;
480 struct drm_i915_private *dev_priv = to_i915(dev);
481 enum pipe pipe = crtc->pipe;
482 i915_reg_t reg;
483 u32 temp, i, retry;
484
485 /*
486 * Write the TU size bits before fdi link training, so that error
487 * detection works.
488 */
489 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
490 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
491
492 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
493 for train result */
494 reg = FDI_RX_IMR(pipe);
495 temp = intel_de_read(dev_priv, reg);
496 temp &= ~FDI_RX_SYMBOL_LOCK;
497 temp &= ~FDI_RX_BIT_LOCK;
498 intel_de_write(dev_priv, reg, temp);
499
500 intel_de_posting_read(dev_priv, reg);
501 udelay(150);
502
503 /* enable CPU FDI TX and PCH FDI RX */
504 reg = FDI_TX_CTL(pipe);
505 temp = intel_de_read(dev_priv, reg);
506 temp &= ~FDI_DP_PORT_WIDTH_MASK;
507 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
508 temp &= ~FDI_LINK_TRAIN_NONE;
509 temp |= FDI_LINK_TRAIN_PATTERN_1;
510 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
511 /* SNB-B */
512 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
513 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
514
515 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
516 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
517
518 reg = FDI_RX_CTL(pipe);
519 temp = intel_de_read(dev_priv, reg);
520 if (HAS_PCH_CPT(dev_priv)) {
521 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
522 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
523 } else {
524 temp &= ~FDI_LINK_TRAIN_NONE;
525 temp |= FDI_LINK_TRAIN_PATTERN_1;
526 }
527 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
528
529 intel_de_posting_read(dev_priv, reg);
530 udelay(150);
531
532 for (i = 0; i < 4; i++) {
533 reg = FDI_TX_CTL(pipe);
534 temp = intel_de_read(dev_priv, reg);
535 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
536 temp |= snb_b_fdi_train_param[i];
537 intel_de_write(dev_priv, reg, temp);
538
539 intel_de_posting_read(dev_priv, reg);
540 udelay(500);
541
542 for (retry = 0; retry < 5; retry++) {
543 reg = FDI_RX_IIR(pipe);
544 temp = intel_de_read(dev_priv, reg);
545 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
546 if (temp & FDI_RX_BIT_LOCK) {
547 intel_de_write(dev_priv, reg,
548 temp | FDI_RX_BIT_LOCK);
549 drm_dbg_kms(&dev_priv->drm,
550 "FDI train 1 done.\n");
551 break;
552 }
553 udelay(50);
554 }
555 if (retry < 5)
556 break;
557 }
558 if (i == 4)
559 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
560
561 /* Train 2 */
562 reg = FDI_TX_CTL(pipe);
563 temp = intel_de_read(dev_priv, reg);
564 temp &= ~FDI_LINK_TRAIN_NONE;
565 temp |= FDI_LINK_TRAIN_PATTERN_2;
566 if (IS_SANDYBRIDGE(dev_priv)) {
567 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
568 /* SNB-B */
569 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
570 }
571 intel_de_write(dev_priv, reg, temp);
572
573 reg = FDI_RX_CTL(pipe);
574 temp = intel_de_read(dev_priv, reg);
575 if (HAS_PCH_CPT(dev_priv)) {
576 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
577 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
578 } else {
579 temp &= ~FDI_LINK_TRAIN_NONE;
580 temp |= FDI_LINK_TRAIN_PATTERN_2;
581 }
582 intel_de_write(dev_priv, reg, temp);
583
584 intel_de_posting_read(dev_priv, reg);
585 udelay(150);
586
587 for (i = 0; i < 4; i++) {
588 reg = FDI_TX_CTL(pipe);
589 temp = intel_de_read(dev_priv, reg);
590 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
591 temp |= snb_b_fdi_train_param[i];
592 intel_de_write(dev_priv, reg, temp);
593
594 intel_de_posting_read(dev_priv, reg);
595 udelay(500);
596
597 for (retry = 0; retry < 5; retry++) {
598 reg = FDI_RX_IIR(pipe);
599 temp = intel_de_read(dev_priv, reg);
600 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
601 if (temp & FDI_RX_SYMBOL_LOCK) {
602 intel_de_write(dev_priv, reg,
603 temp | FDI_RX_SYMBOL_LOCK);
604 drm_dbg_kms(&dev_priv->drm,
605 "FDI train 2 done.\n");
606 break;
607 }
608 udelay(50);
609 }
610 if (retry < 5)
611 break;
612 }
613 if (i == 4)
614 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
615
616 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
617 }
618
619 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)620 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
621 const struct intel_crtc_state *crtc_state)
622 {
623 struct drm_device *dev = crtc->base.dev;
624 struct drm_i915_private *dev_priv = to_i915(dev);
625 enum pipe pipe = crtc->pipe;
626 i915_reg_t reg;
627 u32 temp, i, j;
628
629 ivb_update_fdi_bc_bifurcation(crtc_state);
630
631 /*
632 * Write the TU size bits before fdi link training, so that error
633 * detection works.
634 */
635 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
636 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
637
638 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
639 for train result */
640 reg = FDI_RX_IMR(pipe);
641 temp = intel_de_read(dev_priv, reg);
642 temp &= ~FDI_RX_SYMBOL_LOCK;
643 temp &= ~FDI_RX_BIT_LOCK;
644 intel_de_write(dev_priv, reg, temp);
645
646 intel_de_posting_read(dev_priv, reg);
647 udelay(150);
648
649 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
650 intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
651
652 /* Try each vswing and preemphasis setting twice before moving on */
653 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
654 /* disable first in case we need to retry */
655 reg = FDI_TX_CTL(pipe);
656 temp = intel_de_read(dev_priv, reg);
657 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
658 temp &= ~FDI_TX_ENABLE;
659 intel_de_write(dev_priv, reg, temp);
660
661 reg = FDI_RX_CTL(pipe);
662 temp = intel_de_read(dev_priv, reg);
663 temp &= ~FDI_LINK_TRAIN_AUTO;
664 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
665 temp &= ~FDI_RX_ENABLE;
666 intel_de_write(dev_priv, reg, temp);
667
668 /* enable CPU FDI TX and PCH FDI RX */
669 reg = FDI_TX_CTL(pipe);
670 temp = intel_de_read(dev_priv, reg);
671 temp &= ~FDI_DP_PORT_WIDTH_MASK;
672 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
673 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
674 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
675 temp |= snb_b_fdi_train_param[j/2];
676 temp |= FDI_COMPOSITE_SYNC;
677 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
678
679 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
680 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
681
682 reg = FDI_RX_CTL(pipe);
683 temp = intel_de_read(dev_priv, reg);
684 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
685 temp |= FDI_COMPOSITE_SYNC;
686 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
687
688 intel_de_posting_read(dev_priv, reg);
689 udelay(1); /* should be 0.5us */
690
691 for (i = 0; i < 4; i++) {
692 reg = FDI_RX_IIR(pipe);
693 temp = intel_de_read(dev_priv, reg);
694 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
695
696 if (temp & FDI_RX_BIT_LOCK ||
697 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
698 intel_de_write(dev_priv, reg,
699 temp | FDI_RX_BIT_LOCK);
700 drm_dbg_kms(&dev_priv->drm,
701 "FDI train 1 done, level %i.\n",
702 i);
703 break;
704 }
705 udelay(1); /* should be 0.5us */
706 }
707 if (i == 4) {
708 drm_dbg_kms(&dev_priv->drm,
709 "FDI train 1 fail on vswing %d\n", j / 2);
710 continue;
711 }
712
713 /* Train 2 */
714 reg = FDI_TX_CTL(pipe);
715 temp = intel_de_read(dev_priv, reg);
716 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
717 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
718 intel_de_write(dev_priv, reg, temp);
719
720 reg = FDI_RX_CTL(pipe);
721 temp = intel_de_read(dev_priv, reg);
722 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
723 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
724 intel_de_write(dev_priv, reg, temp);
725
726 intel_de_posting_read(dev_priv, reg);
727 udelay(2); /* should be 1.5us */
728
729 for (i = 0; i < 4; i++) {
730 reg = FDI_RX_IIR(pipe);
731 temp = intel_de_read(dev_priv, reg);
732 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
733
734 if (temp & FDI_RX_SYMBOL_LOCK ||
735 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
736 intel_de_write(dev_priv, reg,
737 temp | FDI_RX_SYMBOL_LOCK);
738 drm_dbg_kms(&dev_priv->drm,
739 "FDI train 2 done, level %i.\n",
740 i);
741 goto train_done;
742 }
743 udelay(2); /* should be 1.5us */
744 }
745 if (i == 4)
746 drm_dbg_kms(&dev_priv->drm,
747 "FDI train 2 fail on vswing %d\n", j / 2);
748 }
749
750 train_done:
751 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
752 }
753
754 /* Starting with Haswell, different DDI ports can work in FDI mode for
755 * connection to the PCH-located connectors. For this, it is necessary to train
756 * both the DDI port and PCH receiver for the desired DDI buffer settings.
757 *
758 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
759 * please note that when FDI mode is active on DDI E, it shares 2 lines with
760 * DDI A (which is used for eDP)
761 */
hsw_fdi_link_train(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)762 void hsw_fdi_link_train(struct intel_encoder *encoder,
763 const struct intel_crtc_state *crtc_state)
764 {
765 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
766 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
767 u32 temp, i, rx_ctl_val;
768 int n_entries;
769
770 encoder->get_buf_trans(encoder, crtc_state, &n_entries);
771
772 hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
773
774 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
775 * mode set "sequence for CRT port" document:
776 * - TP1 to TP2 time with the default value
777 * - FDI delay to 90h
778 *
779 * WaFDIAutoLinkSetTimingOverrride:hsw
780 */
781 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
782 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
783
784 /* Enable the PCH Receiver FDI PLL */
785 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
786 FDI_RX_PLL_ENABLE |
787 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
788 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
789 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
790 udelay(220);
791
792 /* Switch from Rawclk to PCDclk */
793 rx_ctl_val |= FDI_PCDCLK;
794 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
795
796 /* Configure Port Clock Select */
797 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
798 intel_ddi_enable_clock(encoder, crtc_state);
799
800 /* Start the training iterating through available voltages and emphasis,
801 * testing each value twice. */
802 for (i = 0; i < n_entries * 2; i++) {
803 /* Configure DP_TP_CTL with auto-training */
804 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
805 DP_TP_CTL_FDI_AUTOTRAIN |
806 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
807 DP_TP_CTL_LINK_TRAIN_PAT1 |
808 DP_TP_CTL_ENABLE);
809
810 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
811 * DDI E does not support port reversal, the functionality is
812 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
813 * port reversal bit */
814 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
815 DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
816 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
817
818 udelay(600);
819
820 /* Program PCH FDI Receiver TU */
821 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
822
823 /* Enable PCH FDI Receiver with auto-training */
824 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
825 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
826 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
827
828 /* Wait for FDI receiver lane calibration */
829 udelay(30);
830
831 /* Unset FDI_RX_MISC pwrdn lanes */
832 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
833 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
834 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
835 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
836
837 /* Wait for FDI auto training time */
838 udelay(5);
839
840 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
841 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
842 drm_dbg_kms(&dev_priv->drm,
843 "FDI link training done on step %d\n", i);
844 break;
845 }
846
847 /*
848 * Leave things enabled even if we failed to train FDI.
849 * Results in less fireworks from the state checker.
850 */
851 if (i == n_entries * 2 - 1) {
852 drm_err(&dev_priv->drm, "FDI link training failed!\n");
853 break;
854 }
855
856 rx_ctl_val &= ~FDI_RX_ENABLE;
857 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
858 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
859
860 temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
861 temp &= ~DDI_BUF_CTL_ENABLE;
862 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
863 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
864
865 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
866 temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
867 temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
868 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
869 intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
870 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
871
872 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
873
874 /* Reset FDI_RX_MISC pwrdn lanes */
875 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
876 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
877 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
878 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
879 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
880 }
881
882 /* Enable normal pixel sending for FDI */
883 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
884 DP_TP_CTL_FDI_AUTOTRAIN |
885 DP_TP_CTL_LINK_TRAIN_NORMAL |
886 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
887 DP_TP_CTL_ENABLE);
888 }
889
ilk_fdi_pll_enable(const struct intel_crtc_state * crtc_state)890 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
891 {
892 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
893 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
894 enum pipe pipe = crtc->pipe;
895 i915_reg_t reg;
896 u32 temp;
897
898 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
899 reg = FDI_RX_CTL(pipe);
900 temp = intel_de_read(dev_priv, reg);
901 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
902 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
903 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
904 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
905
906 intel_de_posting_read(dev_priv, reg);
907 udelay(200);
908
909 /* Switch from Rawclk to PCDclk */
910 temp = intel_de_read(dev_priv, reg);
911 intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
912
913 intel_de_posting_read(dev_priv, reg);
914 udelay(200);
915
916 /* Enable CPU FDI TX PLL, always on for Ironlake */
917 reg = FDI_TX_CTL(pipe);
918 temp = intel_de_read(dev_priv, reg);
919 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
920 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
921
922 intel_de_posting_read(dev_priv, reg);
923 udelay(100);
924 }
925 }
926
ilk_fdi_pll_disable(struct intel_crtc * crtc)927 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
928 {
929 struct drm_device *dev = crtc->base.dev;
930 struct drm_i915_private *dev_priv = to_i915(dev);
931 enum pipe pipe = crtc->pipe;
932 i915_reg_t reg;
933 u32 temp;
934
935 /* Switch from PCDclk to Rawclk */
936 reg = FDI_RX_CTL(pipe);
937 temp = intel_de_read(dev_priv, reg);
938 intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
939
940 /* Disable CPU FDI TX PLL */
941 reg = FDI_TX_CTL(pipe);
942 temp = intel_de_read(dev_priv, reg);
943 intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
944
945 intel_de_posting_read(dev_priv, reg);
946 udelay(100);
947
948 reg = FDI_RX_CTL(pipe);
949 temp = intel_de_read(dev_priv, reg);
950 intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
951
952 /* Wait for the clocks to turn off. */
953 intel_de_posting_read(dev_priv, reg);
954 udelay(100);
955 }
956
ilk_fdi_disable(struct intel_crtc * crtc)957 void ilk_fdi_disable(struct intel_crtc *crtc)
958 {
959 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
960 enum pipe pipe = crtc->pipe;
961 i915_reg_t reg;
962 u32 temp;
963
964 /* disable CPU FDI tx and PCH FDI rx */
965 reg = FDI_TX_CTL(pipe);
966 temp = intel_de_read(dev_priv, reg);
967 intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
968 intel_de_posting_read(dev_priv, reg);
969
970 reg = FDI_RX_CTL(pipe);
971 temp = intel_de_read(dev_priv, reg);
972 temp &= ~(0x7 << 16);
973 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
974 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
975
976 intel_de_posting_read(dev_priv, reg);
977 udelay(100);
978
979 /* Ironlake workaround, disable clock pointer after downing FDI */
980 if (HAS_PCH_IBX(dev_priv))
981 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
982 FDI_RX_PHASE_SYNC_POINTER_OVR);
983
984 /* still set train pattern 1 */
985 reg = FDI_TX_CTL(pipe);
986 temp = intel_de_read(dev_priv, reg);
987 temp &= ~FDI_LINK_TRAIN_NONE;
988 temp |= FDI_LINK_TRAIN_PATTERN_1;
989 intel_de_write(dev_priv, reg, temp);
990
991 reg = FDI_RX_CTL(pipe);
992 temp = intel_de_read(dev_priv, reg);
993 if (HAS_PCH_CPT(dev_priv)) {
994 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
995 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
996 } else {
997 temp &= ~FDI_LINK_TRAIN_NONE;
998 temp |= FDI_LINK_TRAIN_PATTERN_1;
999 }
1000 /* BPC in FDI rx is consistent with that in PIPECONF */
1001 temp &= ~(0x07 << 16);
1002 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
1003 intel_de_write(dev_priv, reg, temp);
1004
1005 intel_de_posting_read(dev_priv, reg);
1006 udelay(100);
1007 }
1008
lpt_fdi_reset_mphy(struct drm_i915_private * dev_priv)1009 static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
1010 {
1011 u32 tmp;
1012
1013 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
1014 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
1015 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
1016
1017 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
1018 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
1019 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
1020
1021 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
1022 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
1023 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
1024
1025 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
1026 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
1027 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
1028 }
1029
1030 /* WaMPhyProgramming:hsw */
lpt_fdi_program_mphy(struct drm_i915_private * dev_priv)1031 void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
1032 {
1033 u32 tmp;
1034
1035 lpt_fdi_reset_mphy(dev_priv);
1036
1037 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
1038 tmp &= ~(0xFF << 24);
1039 tmp |= (0x12 << 24);
1040 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
1041
1042 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
1043 tmp |= (1 << 11);
1044 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
1045
1046 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
1047 tmp |= (1 << 11);
1048 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
1049
1050 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
1051 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
1052 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
1053
1054 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
1055 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
1056 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
1057
1058 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
1059 tmp &= ~(7 << 13);
1060 tmp |= (5 << 13);
1061 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
1062
1063 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
1064 tmp &= ~(7 << 13);
1065 tmp |= (5 << 13);
1066 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
1067
1068 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
1069 tmp &= ~0xFF;
1070 tmp |= 0x1C;
1071 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
1072
1073 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
1074 tmp &= ~0xFF;
1075 tmp |= 0x1C;
1076 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
1077
1078 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
1079 tmp &= ~(0xFF << 16);
1080 tmp |= (0x1C << 16);
1081 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
1082
1083 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
1084 tmp &= ~(0xFF << 16);
1085 tmp |= (0x1C << 16);
1086 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
1087
1088 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
1089 tmp |= (1 << 27);
1090 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
1091
1092 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
1093 tmp |= (1 << 27);
1094 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
1095
1096 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
1097 tmp &= ~(0xF << 28);
1098 tmp |= (4 << 28);
1099 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
1100
1101 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
1102 tmp &= ~(0xF << 28);
1103 tmp |= (4 << 28);
1104 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
1105 }
1106
1107 static const struct intel_fdi_funcs ilk_funcs = {
1108 .fdi_link_train = ilk_fdi_link_train,
1109 };
1110
1111 static const struct intel_fdi_funcs gen6_funcs = {
1112 .fdi_link_train = gen6_fdi_link_train,
1113 };
1114
1115 static const struct intel_fdi_funcs ivb_funcs = {
1116 .fdi_link_train = ivb_manual_fdi_link_train,
1117 };
1118
1119 void
intel_fdi_init_hook(struct drm_i915_private * dev_priv)1120 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1121 {
1122 if (IS_IRONLAKE(dev_priv)) {
1123 dev_priv->fdi_funcs = &ilk_funcs;
1124 } else if (IS_SANDYBRIDGE(dev_priv)) {
1125 dev_priv->fdi_funcs = &gen6_funcs;
1126 } else if (IS_IVYBRIDGE(dev_priv)) {
1127 /* FIXME: detect B0+ stepping and use auto training */
1128 dev_priv->fdi_funcs = &ivb_funcs;
1129 }
1130 }
1131