1 /*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "intel_display_types.h"
25 #include "intel_dp.h"
26 #include "intel_dp_link_training.h"
27
28
intel_dp_reset_lttpr_common_caps(struct intel_dp * intel_dp)29 static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
30 {
31 memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
32 }
33
intel_dp_reset_lttpr_count(struct intel_dp * intel_dp)34 static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
35 {
36 intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
37 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
38 }
39
intel_dp_phy_name(enum drm_dp_phy dp_phy,char * buf,size_t buf_size)40 static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
41 char *buf, size_t buf_size)
42 {
43 if (dp_phy == DP_PHY_DPRX)
44 snprintf(buf, buf_size, "DPRX");
45 else
46 snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);
47
48 return buf;
49 }
50
intel_dp_lttpr_phy_caps(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)51 static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
52 enum drm_dp_phy dp_phy)
53 {
54 return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
55 }
56
intel_dp_read_lttpr_phy_caps(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)57 static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
58 enum drm_dp_phy dp_phy)
59 {
60 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
61 u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
62 char phy_name[10];
63
64 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
65
66 if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
67 drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
68 "[ENCODER:%d:%s][%s] failed to read the PHY caps\n",
69 encoder->base.base.id, encoder->base.name, phy_name);
70 return;
71 }
72
73 drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
74 "[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n",
75 encoder->base.base.id, encoder->base.name, phy_name,
76 (int)sizeof(intel_dp->lttpr_phy_caps[0]),
77 phy_caps);
78 }
79
intel_dp_read_lttpr_common_caps(struct intel_dp * intel_dp)80 static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
81 {
82 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
83 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
84
85 if (intel_dp_is_edp(intel_dp))
86 return false;
87
88 /*
89 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
90 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
91 */
92 if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
93 return false;
94
95 if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
96 intel_dp->lttpr_common_caps) < 0)
97 goto reset_caps;
98
99 drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
100 "[ENCODER:%d:%s] LTTPR common capabilities: %*ph\n",
101 encoder->base.base.id, encoder->base.name,
102 (int)sizeof(intel_dp->lttpr_common_caps),
103 intel_dp->lttpr_common_caps);
104
105 /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
106 if (intel_dp->lttpr_common_caps[0] < 0x14)
107 goto reset_caps;
108
109 return true;
110
111 reset_caps:
112 intel_dp_reset_lttpr_common_caps(intel_dp);
113 return false;
114 }
115
116 static bool
intel_dp_set_lttpr_transparent_mode(struct intel_dp * intel_dp,bool enable)117 intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
118 {
119 u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
120 DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
121
122 return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
123 }
124
intel_dp_init_lttpr(struct intel_dp * intel_dp)125 static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
126 {
127 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
128 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
129 int lttpr_count;
130 int i;
131
132 if (!intel_dp_read_lttpr_common_caps(intel_dp))
133 return 0;
134
135 lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
136 /*
137 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
138 * detected as this breaks link training at least on the Dell WD19TB
139 * dock.
140 */
141 if (lttpr_count == 0)
142 return 0;
143
144 /*
145 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
146 * non-transparent mode and the disable->enable non-transparent mode
147 * sequence.
148 */
149 intel_dp_set_lttpr_transparent_mode(intel_dp, true);
150
151 /*
152 * In case of unsupported number of LTTPRs or failing to switch to
153 * non-transparent mode fall-back to transparent link training mode,
154 * still taking into account any LTTPR common lane- rate/count limits.
155 */
156 if (lttpr_count < 0)
157 return 0;
158
159 if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
160 drm_dbg_kms(&i915->drm,
161 "[ENCODER:%d:%s] Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n",
162 encoder->base.base.id, encoder->base.name);
163
164 intel_dp_set_lttpr_transparent_mode(intel_dp, true);
165 intel_dp_reset_lttpr_count(intel_dp);
166
167 return 0;
168 }
169
170 for (i = 0; i < lttpr_count; i++)
171 intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i));
172
173 return lttpr_count;
174 }
175
176 /**
177 * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
178 * @intel_dp: Intel DP struct
179 *
180 * Read the LTTPR common and DPRX capabilities and switch to non-transparent
181 * link training mode if any is detected and read the PHY capabilities for all
182 * detected LTTPRs. In case of an LTTPR detection error or if the number of
183 * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
184 * transparent mode link training mode.
185 *
186 * Returns:
187 * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
188 * DPRX capabilities are read out.
189 * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
190 * detection failure and the transparent LT mode was set. The DPRX
191 * capabilities are read out.
192 * <0 Reading out the DPRX capabilities failed.
193 */
intel_dp_init_lttpr_and_dprx_caps(struct intel_dp * intel_dp)194 int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
195 {
196 int lttpr_count = intel_dp_init_lttpr(intel_dp);
197
198 /* The DPTX shall read the DPRX caps after LTTPR detection. */
199 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
200 intel_dp_reset_lttpr_common_caps(intel_dp);
201 return -EIO;
202 }
203
204 return lttpr_count;
205 }
206
dp_voltage_max(u8 preemph)207 static u8 dp_voltage_max(u8 preemph)
208 {
209 switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
210 case DP_TRAIN_PRE_EMPH_LEVEL_0:
211 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
212 case DP_TRAIN_PRE_EMPH_LEVEL_1:
213 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
214 case DP_TRAIN_PRE_EMPH_LEVEL_2:
215 return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
216 case DP_TRAIN_PRE_EMPH_LEVEL_3:
217 default:
218 return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
219 }
220 }
221
intel_dp_lttpr_voltage_max(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)222 static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
223 enum drm_dp_phy dp_phy)
224 {
225 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
226
227 if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
228 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
229 else
230 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
231 }
232
intel_dp_lttpr_preemph_max(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)233 static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
234 enum drm_dp_phy dp_phy)
235 {
236 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
237
238 if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
239 return DP_TRAIN_PRE_EMPH_LEVEL_3;
240 else
241 return DP_TRAIN_PRE_EMPH_LEVEL_2;
242 }
243
244 static bool
intel_dp_phy_is_downstream_of_source(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)245 intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
246 enum drm_dp_phy dp_phy)
247 {
248 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
249 int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
250
251 drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
252
253 return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
254 }
255
intel_dp_phy_voltage_max(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy)256 static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
257 const struct intel_crtc_state *crtc_state,
258 enum drm_dp_phy dp_phy)
259 {
260 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
261 u8 voltage_max;
262
263 /*
264 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
265 * the DPRX_PHY we train.
266 */
267 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
268 voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
269 else
270 voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
271
272 drm_WARN_ON_ONCE(&i915->drm,
273 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
274 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
275
276 return voltage_max;
277 }
278
intel_dp_phy_preemph_max(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)279 static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
280 enum drm_dp_phy dp_phy)
281 {
282 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
283 u8 preemph_max;
284
285 /*
286 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
287 * the DPRX_PHY we train.
288 */
289 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
290 preemph_max = intel_dp->preemph_max(intel_dp);
291 else
292 preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
293
294 drm_WARN_ON_ONCE(&i915->drm,
295 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
296 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
297
298 return preemph_max;
299 }
300
has_per_lane_signal_levels(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)301 static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
302 enum drm_dp_phy dp_phy)
303 {
304 return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy);
305 }
306
307 /* 128b/132b */
intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy,const u8 link_status[DP_LINK_STATUS_SIZE],int lane)308 static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp,
309 const struct intel_crtc_state *crtc_state,
310 enum drm_dp_phy dp_phy,
311 const u8 link_status[DP_LINK_STATUS_SIZE],
312 int lane)
313 {
314 u8 tx_ffe = 0;
315
316 if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
317 lane = min(lane, crtc_state->lane_count - 1);
318 tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane);
319 } else {
320 for (lane = 0; lane < crtc_state->lane_count; lane++)
321 tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane));
322 }
323
324 return tx_ffe;
325 }
326
327 /* 8b/10b */
intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy,const u8 link_status[DP_LINK_STATUS_SIZE],int lane)328 static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp,
329 const struct intel_crtc_state *crtc_state,
330 enum drm_dp_phy dp_phy,
331 const u8 link_status[DP_LINK_STATUS_SIZE],
332 int lane)
333 {
334 u8 v = 0;
335 u8 p = 0;
336 u8 voltage_max;
337 u8 preemph_max;
338
339 if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
340 lane = min(lane, crtc_state->lane_count - 1);
341
342 v = drm_dp_get_adjust_request_voltage(link_status, lane);
343 p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
344 } else {
345 for (lane = 0; lane < crtc_state->lane_count; lane++) {
346 v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
347 p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
348 }
349 }
350
351 preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
352 if (p >= preemph_max)
353 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
354
355 v = min(v, dp_voltage_max(p));
356
357 voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
358 if (v >= voltage_max)
359 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
360
361 return v | p;
362 }
363
intel_dp_get_lane_adjust_train(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy,const u8 link_status[DP_LINK_STATUS_SIZE],int lane)364 static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
365 const struct intel_crtc_state *crtc_state,
366 enum drm_dp_phy dp_phy,
367 const u8 link_status[DP_LINK_STATUS_SIZE],
368 int lane)
369 {
370 if (intel_dp_is_uhbr(crtc_state))
371 return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state,
372 dp_phy, link_status, lane);
373 else
374 return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state,
375 dp_phy, link_status, lane);
376 }
377
378 #define TRAIN_REQ_FMT "%d/%d/%d/%d"
379 #define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \
380 (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT)
381 #define TRAIN_REQ_VSWING_ARGS(link_status) \
382 _TRAIN_REQ_VSWING_ARGS(link_status, 0), \
383 _TRAIN_REQ_VSWING_ARGS(link_status, 1), \
384 _TRAIN_REQ_VSWING_ARGS(link_status, 2), \
385 _TRAIN_REQ_VSWING_ARGS(link_status, 3)
386 #define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \
387 (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT)
388 #define TRAIN_REQ_PREEMPH_ARGS(link_status) \
389 _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \
390 _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \
391 _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \
392 _TRAIN_REQ_PREEMPH_ARGS(link_status, 3)
393 #define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \
394 drm_dp_get_adjust_tx_ffe_preset((link_status), (lane))
395 #define TRAIN_REQ_TX_FFE_ARGS(link_status) \
396 _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \
397 _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \
398 _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
399 _TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
400
401 void
intel_dp_get_adjust_train(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy,const u8 link_status[DP_LINK_STATUS_SIZE])402 intel_dp_get_adjust_train(struct intel_dp *intel_dp,
403 const struct intel_crtc_state *crtc_state,
404 enum drm_dp_phy dp_phy,
405 const u8 link_status[DP_LINK_STATUS_SIZE])
406 {
407 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
408 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
409 char phy_name[10];
410 int lane;
411
412 if (intel_dp_is_uhbr(crtc_state)) {
413 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
414 "TX FFE request: " TRAIN_REQ_FMT "\n",
415 encoder->base.base.id, encoder->base.name,
416 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
417 crtc_state->lane_count,
418 TRAIN_REQ_TX_FFE_ARGS(link_status));
419 } else {
420 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
421 "vswing request: " TRAIN_REQ_FMT ", "
422 "pre-emphasis request: " TRAIN_REQ_FMT "\n",
423 encoder->base.base.id, encoder->base.name,
424 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
425 crtc_state->lane_count,
426 TRAIN_REQ_VSWING_ARGS(link_status),
427 TRAIN_REQ_PREEMPH_ARGS(link_status));
428 }
429
430 for (lane = 0; lane < 4; lane++)
431 intel_dp->train_set[lane] =
432 intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
433 dp_phy, link_status, lane);
434 }
435
intel_dp_training_pattern_set_reg(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)436 static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
437 enum drm_dp_phy dp_phy)
438 {
439 return dp_phy == DP_PHY_DPRX ?
440 DP_TRAINING_PATTERN_SET :
441 DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
442 }
443
444 static bool
intel_dp_set_link_train(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy,u8 dp_train_pat)445 intel_dp_set_link_train(struct intel_dp *intel_dp,
446 const struct intel_crtc_state *crtc_state,
447 enum drm_dp_phy dp_phy,
448 u8 dp_train_pat)
449 {
450 int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
451 u8 buf[sizeof(intel_dp->train_set) + 1];
452 int len;
453
454 intel_dp_program_link_training_pattern(intel_dp, crtc_state,
455 dp_phy, dp_train_pat);
456
457 buf[0] = dp_train_pat;
458 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
459 memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
460 len = crtc_state->lane_count + 1;
461
462 return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
463 }
464
dp_training_pattern_name(u8 train_pat)465 static char dp_training_pattern_name(u8 train_pat)
466 {
467 switch (train_pat) {
468 case DP_TRAINING_PATTERN_1:
469 case DP_TRAINING_PATTERN_2:
470 case DP_TRAINING_PATTERN_3:
471 return '0' + train_pat;
472 case DP_TRAINING_PATTERN_4:
473 return '4';
474 default:
475 MISSING_CASE(train_pat);
476 return '?';
477 }
478 }
479
480 void
intel_dp_program_link_training_pattern(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy,u8 dp_train_pat)481 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
482 const struct intel_crtc_state *crtc_state,
483 enum drm_dp_phy dp_phy,
484 u8 dp_train_pat)
485 {
486 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
487 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
488 u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
489 char phy_name[10];
490
491 if (train_pat != DP_TRAINING_PATTERN_DISABLE)
492 drm_dbg_kms(&i915->drm,
493 "[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n",
494 encoder->base.base.id, encoder->base.name,
495 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
496 dp_training_pattern_name(train_pat));
497
498 intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
499 }
500
501 #define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s"
502 #define _TRAIN_SET_VSWING_ARGS(train_set) \
503 ((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \
504 (train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : ""
505 #define TRAIN_SET_VSWING_ARGS(train_set) \
506 _TRAIN_SET_VSWING_ARGS((train_set)[0]), \
507 _TRAIN_SET_VSWING_ARGS((train_set)[1]), \
508 _TRAIN_SET_VSWING_ARGS((train_set)[2]), \
509 _TRAIN_SET_VSWING_ARGS((train_set)[3])
510 #define _TRAIN_SET_PREEMPH_ARGS(train_set) \
511 ((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \
512 (train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : ""
513 #define TRAIN_SET_PREEMPH_ARGS(train_set) \
514 _TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \
515 _TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
516 _TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
517 _TRAIN_SET_PREEMPH_ARGS((train_set)[3])
518 #define _TRAIN_SET_TX_FFE_ARGS(train_set) \
519 ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), ""
520 #define TRAIN_SET_TX_FFE_ARGS(train_set) \
521 _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \
522 _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \
523 _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \
524 _TRAIN_SET_TX_FFE_ARGS((train_set)[3])
525
intel_dp_set_signal_levels(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy)526 void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
527 const struct intel_crtc_state *crtc_state,
528 enum drm_dp_phy dp_phy)
529 {
530 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
531 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
532 char phy_name[10];
533
534 if (intel_dp_is_uhbr(crtc_state)) {
535 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
536 "TX FFE presets: " TRAIN_SET_FMT "\n",
537 encoder->base.base.id, encoder->base.name,
538 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
539 crtc_state->lane_count,
540 TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
541 } else {
542 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
543 "vswing levels: " TRAIN_SET_FMT ", "
544 "pre-emphasis levels: " TRAIN_SET_FMT "\n",
545 encoder->base.base.id, encoder->base.name,
546 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
547 crtc_state->lane_count,
548 TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
549 TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
550 }
551
552 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
553 encoder->set_signal_levels(encoder, crtc_state);
554 }
555
556 static bool
intel_dp_reset_link_train(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy,u8 dp_train_pat)557 intel_dp_reset_link_train(struct intel_dp *intel_dp,
558 const struct intel_crtc_state *crtc_state,
559 enum drm_dp_phy dp_phy,
560 u8 dp_train_pat)
561 {
562 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
563 intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
564 return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
565 }
566
567 static bool
intel_dp_update_link_train(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy)568 intel_dp_update_link_train(struct intel_dp *intel_dp,
569 const struct intel_crtc_state *crtc_state,
570 enum drm_dp_phy dp_phy)
571 {
572 int reg = dp_phy == DP_PHY_DPRX ?
573 DP_TRAINING_LANE0_SET :
574 DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
575 int ret;
576
577 intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
578
579 ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
580 intel_dp->train_set, crtc_state->lane_count);
581
582 return ret == crtc_state->lane_count;
583 }
584
585 /* 128b/132b */
intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)586 static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)
587 {
588 return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) ==
589 DP_TX_FFE_PRESET_VALUE_MASK;
590 }
591
592 /*
593 * 8b/10b
594 *
595 * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to
596 * have self contradicting tests around this area.
597 *
598 * In lieu of better ideas let's just stop when we've reached the max supported
599 * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on
600 * whether vswing level 3 is supported or not.
601 */
intel_dp_lane_max_vswing_reached(u8 train_set_lane)602 static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane)
603 {
604 u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
605 DP_TRAIN_VOLTAGE_SWING_SHIFT;
606 u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
607 DP_TRAIN_PRE_EMPHASIS_SHIFT;
608
609 if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0)
610 return false;
611
612 if (v + p != 3)
613 return false;
614
615 return true;
616 }
617
intel_dp_link_max_vswing_reached(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)618 static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
619 const struct intel_crtc_state *crtc_state)
620 {
621 int lane;
622
623 for (lane = 0; lane < crtc_state->lane_count; lane++) {
624 u8 train_set_lane = intel_dp->train_set[lane];
625
626 if (intel_dp_is_uhbr(crtc_state)) {
627 if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane))
628 return false;
629 } else {
630 if (!intel_dp_lane_max_vswing_reached(train_set_lane))
631 return false;
632 }
633 }
634
635 return true;
636 }
637
638 /*
639 * Prepare link training by configuring the link parameters. On DDI platforms
640 * also enable the port here.
641 */
642 static bool
intel_dp_prepare_link_train(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)643 intel_dp_prepare_link_train(struct intel_dp *intel_dp,
644 const struct intel_crtc_state *crtc_state)
645 {
646 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
647 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
648 u8 link_config[2];
649 u8 link_bw, rate_select;
650
651 if (intel_dp->prepare_link_retrain)
652 intel_dp->prepare_link_retrain(intel_dp, crtc_state);
653
654 intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
655 &link_bw, &rate_select);
656
657 if (link_bw)
658 drm_dbg_kms(&i915->drm,
659 "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
660 encoder->base.base.id, encoder->base.name, link_bw);
661 else
662 drm_dbg_kms(&i915->drm,
663 "[ENCODER:%d:%s] Using LINK_RATE_SET value %02x\n",
664 encoder->base.base.id, encoder->base.name, rate_select);
665
666 /* Write the link configuration data */
667 link_config[0] = link_bw;
668 link_config[1] = crtc_state->lane_count;
669 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
670 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
671 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
672
673 /* eDP 1.4 rate select method. */
674 if (!link_bw)
675 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
676 &rate_select, 1);
677
678 link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
679 link_config[1] = intel_dp_is_uhbr(crtc_state) ?
680 DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
681 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
682
683 return true;
684 }
685
intel_dp_link_training_clock_recovery_delay(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)686 static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
687 enum drm_dp_phy dp_phy)
688 {
689 if (dp_phy == DP_PHY_DPRX)
690 drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd);
691 else
692 drm_dp_lttpr_link_train_clock_recovery_delay();
693 }
694
intel_dp_adjust_request_changed(const struct intel_crtc_state * crtc_state,const u8 old_link_status[DP_LINK_STATUS_SIZE],const u8 new_link_status[DP_LINK_STATUS_SIZE])695 static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
696 const u8 old_link_status[DP_LINK_STATUS_SIZE],
697 const u8 new_link_status[DP_LINK_STATUS_SIZE])
698 {
699 int lane;
700
701 for (lane = 0; lane < crtc_state->lane_count; lane++) {
702 u8 old, new;
703
704 if (intel_dp_is_uhbr(crtc_state)) {
705 old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane);
706 new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane);
707 } else {
708 old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
709 drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
710 new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
711 drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
712 }
713
714 if (old != new)
715 return true;
716 }
717
718 return false;
719 }
720
721 static void
intel_dp_dump_link_status(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy,const u8 link_status[DP_LINK_STATUS_SIZE])722 intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
723 const u8 link_status[DP_LINK_STATUS_SIZE])
724 {
725 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
726 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
727 char phy_name[10];
728
729 drm_dbg_kms(&i915->drm,
730 "[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
731 encoder->base.base.id, encoder->base.name,
732 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
733 link_status[0], link_status[1], link_status[2],
734 link_status[3], link_status[4], link_status[5]);
735 }
736
737 /*
738 * Perform the link training clock recovery phase on the given DP PHY using
739 * training pattern 1.
740 */
741 static bool
intel_dp_link_training_clock_recovery(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy)742 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
743 const struct intel_crtc_state *crtc_state,
744 enum drm_dp_phy dp_phy)
745 {
746 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
747 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
748 u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
749 int voltage_tries, cr_tries, max_cr_tries;
750 u8 link_status[DP_LINK_STATUS_SIZE];
751 bool max_vswing_reached = false;
752 char phy_name[10];
753
754 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
755
756 /* clock recovery */
757 if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
758 DP_TRAINING_PATTERN_1 |
759 DP_LINK_SCRAMBLING_DISABLE)) {
760 drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n",
761 encoder->base.base.id, encoder->base.name, phy_name);
762 return false;
763 }
764
765 /*
766 * The DP 1.4 spec defines the max clock recovery retries value
767 * as 10 but for pre-DP 1.4 devices we set a very tolerant
768 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
769 * x 5 identical voltage retries). Since the previous specs didn't
770 * define a limit and created the possibility of an infinite loop
771 * we want to prevent any sync from triggering that corner case.
772 */
773 if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
774 max_cr_tries = 10;
775 else
776 max_cr_tries = 80;
777
778 voltage_tries = 1;
779 for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
780 intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
781
782 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
783 link_status) < 0) {
784 drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n",
785 encoder->base.base.id, encoder->base.name, phy_name);
786 return false;
787 }
788
789 if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
790 drm_dbg_kms(&i915->drm,
791 "[ENCODER:%d:%s][%s] Clock recovery OK\n",
792 encoder->base.base.id, encoder->base.name, phy_name);
793 return true;
794 }
795
796 if (voltage_tries == 5) {
797 intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
798 drm_dbg_kms(&i915->drm,
799 "[ENCODER:%d:%s][%s] Same voltage tried 5 times\n",
800 encoder->base.base.id, encoder->base.name, phy_name);
801 return false;
802 }
803
804 if (max_vswing_reached) {
805 intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
806 drm_dbg_kms(&i915->drm,
807 "[ENCODER:%d:%s][%s] Max Voltage Swing reached\n",
808 encoder->base.base.id, encoder->base.name, phy_name);
809 return false;
810 }
811
812 /* Update training set as requested by target */
813 intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
814 link_status);
815 if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
816 drm_err(&i915->drm,
817 "[ENCODER:%d:%s][%s] Failed to update link training\n",
818 encoder->base.base.id, encoder->base.name, phy_name);
819 return false;
820 }
821
822 if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status))
823 ++voltage_tries;
824 else
825 voltage_tries = 1;
826
827 memcpy(old_link_status, link_status, sizeof(link_status));
828
829 if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
830 max_vswing_reached = true;
831 }
832
833 intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
834 drm_err(&i915->drm,
835 "[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n",
836 encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries);
837
838 return false;
839 }
840
841 /*
842 * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
843 * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
844 * 1.2 devices that support it, TPS2 otherwise.
845 */
intel_dp_training_pattern(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy)846 static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
847 const struct intel_crtc_state *crtc_state,
848 enum drm_dp_phy dp_phy)
849 {
850 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
851 bool source_tps3, sink_tps3, source_tps4, sink_tps4;
852
853 /* UHBR+ use separate 128b/132b TPS2 */
854 if (intel_dp_is_uhbr(crtc_state))
855 return DP_TRAINING_PATTERN_2;
856
857 /*
858 * TPS4 support is mandatory for all downstream devices that
859 * support HBR3. There are no known eDP panels that support
860 * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
861 * LTTPRs must support TPS4.
862 */
863 source_tps4 = intel_dp_source_supports_tps4(i915);
864 sink_tps4 = dp_phy != DP_PHY_DPRX ||
865 drm_dp_tps4_supported(intel_dp->dpcd);
866 if (source_tps4 && sink_tps4) {
867 return DP_TRAINING_PATTERN_4;
868 } else if (crtc_state->port_clock == 810000) {
869 if (!source_tps4)
870 drm_dbg_kms(&i915->drm,
871 "8.1 Gbps link rate without source TPS4 support\n");
872 if (!sink_tps4)
873 drm_dbg_kms(&i915->drm,
874 "8.1 Gbps link rate without sink TPS4 support\n");
875 }
876
877 /*
878 * TPS3 support is mandatory for downstream devices that
879 * support HBR2. However, not all sinks follow the spec.
880 */
881 source_tps3 = intel_dp_source_supports_tps3(i915);
882 sink_tps3 = dp_phy != DP_PHY_DPRX ||
883 drm_dp_tps3_supported(intel_dp->dpcd);
884 if (source_tps3 && sink_tps3) {
885 return DP_TRAINING_PATTERN_3;
886 } else if (crtc_state->port_clock >= 540000) {
887 if (!source_tps3)
888 drm_dbg_kms(&i915->drm,
889 ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
890 if (!sink_tps3)
891 drm_dbg_kms(&i915->drm,
892 ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
893 }
894
895 return DP_TRAINING_PATTERN_2;
896 }
897
898 static void
intel_dp_link_training_channel_equalization_delay(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)899 intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
900 enum drm_dp_phy dp_phy)
901 {
902 if (dp_phy == DP_PHY_DPRX) {
903 drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd);
904 } else {
905 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
906
907 drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps);
908 }
909 }
910
911 /*
912 * Perform the link training channel equalization phase on the given DP PHY
913 * using one of training pattern 2, 3 or 4 depending on the source and
914 * sink capabilities.
915 */
916 static bool
intel_dp_link_training_channel_equalization(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy)917 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
918 const struct intel_crtc_state *crtc_state,
919 enum drm_dp_phy dp_phy)
920 {
921 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
922 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
923 int tries;
924 u32 training_pattern;
925 u8 link_status[DP_LINK_STATUS_SIZE];
926 bool channel_eq = false;
927 char phy_name[10];
928
929 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
930
931 training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
932 /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
933 if (training_pattern != DP_TRAINING_PATTERN_4)
934 training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
935
936 /* channel equalization */
937 if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
938 training_pattern)) {
939 drm_err(&i915->drm,
940 "[ENCODER:%d:%s][%s] Failed to start channel equalization\n",
941 encoder->base.base.id, encoder->base.name,
942 phy_name);
943 return false;
944 }
945
946 for (tries = 0; tries < 5; tries++) {
947 intel_dp_link_training_channel_equalization_delay(intel_dp,
948 dp_phy);
949 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
950 link_status) < 0) {
951 drm_err(&i915->drm,
952 "[ENCODER:%d:%s][%s] Failed to get link status\n",
953 encoder->base.base.id, encoder->base.name, phy_name);
954 break;
955 }
956
957 /* Make sure clock is still ok */
958 if (!drm_dp_clock_recovery_ok(link_status,
959 crtc_state->lane_count)) {
960 intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
961 drm_dbg_kms(&i915->drm,
962 "[ENCODER:%d:%s][%s] Clock recovery check failed, cannot "
963 "continue channel equalization\n",
964 encoder->base.base.id, encoder->base.name, phy_name);
965 break;
966 }
967
968 if (drm_dp_channel_eq_ok(link_status,
969 crtc_state->lane_count)) {
970 channel_eq = true;
971 drm_dbg_kms(&i915->drm,
972 "[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n",
973 encoder->base.base.id, encoder->base.name, phy_name);
974 break;
975 }
976
977 /* Update training set as requested by target */
978 intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
979 link_status);
980 if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
981 drm_err(&i915->drm,
982 "[ENCODER:%d:%s][%s] Failed to update link training\n",
983 encoder->base.base.id, encoder->base.name, phy_name);
984 break;
985 }
986 }
987
988 /* Try 5 times, else fail and try at lower BW */
989 if (tries == 5) {
990 intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
991 drm_dbg_kms(&i915->drm,
992 "[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n",
993 encoder->base.base.id, encoder->base.name, phy_name);
994 }
995
996 return channel_eq;
997 }
998
intel_dp_disable_dpcd_training_pattern(struct intel_dp * intel_dp,enum drm_dp_phy dp_phy)999 static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
1000 enum drm_dp_phy dp_phy)
1001 {
1002 int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
1003 u8 val = DP_TRAINING_PATTERN_DISABLE;
1004
1005 return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
1006 }
1007
1008 /**
1009 * intel_dp_stop_link_train - stop link training
1010 * @intel_dp: DP struct
1011 * @crtc_state: state for CRTC attached to the encoder
1012 *
1013 * Stop the link training of the @intel_dp port, disabling the training
1014 * pattern in the sink's DPCD, and disabling the test pattern symbol
1015 * generation on the port.
1016 *
1017 * What symbols are output on the port after this point is
1018 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
1019 * with the pipe being disabled, on older platforms it's HW specific if/how an
1020 * idle pattern is generated, as the pipe is already enabled here for those.
1021 *
1022 * This function must be called after intel_dp_start_link_train().
1023 */
intel_dp_stop_link_train(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1024 void intel_dp_stop_link_train(struct intel_dp *intel_dp,
1025 const struct intel_crtc_state *crtc_state)
1026 {
1027 intel_dp->link_trained = true;
1028
1029 intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
1030 intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
1031 DP_TRAINING_PATTERN_DISABLE);
1032 }
1033
1034 static bool
intel_dp_link_train_phy(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,enum drm_dp_phy dp_phy)1035 intel_dp_link_train_phy(struct intel_dp *intel_dp,
1036 const struct intel_crtc_state *crtc_state,
1037 enum drm_dp_phy dp_phy)
1038 {
1039 struct intel_connector *connector = intel_dp->attached_connector;
1040 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1041 char phy_name[10];
1042 bool ret = false;
1043
1044 if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
1045 goto out;
1046
1047 if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
1048 goto out;
1049
1050 ret = true;
1051
1052 out:
1053 drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
1054 "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n",
1055 connector->base.base.id, connector->base.name,
1056 encoder->base.base.id, encoder->base.name,
1057 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
1058 ret ? "passed" : "failed",
1059 crtc_state->port_clock, crtc_state->lane_count);
1060
1061 return ret;
1062 }
1063
intel_dp_schedule_fallback_link_training(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1064 static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
1065 const struct intel_crtc_state *crtc_state)
1066 {
1067 struct intel_connector *intel_connector = intel_dp->attached_connector;
1068 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1069
1070 if (intel_dp->hobl_active) {
1071 drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
1072 "[ENCODER:%d:%s] Link Training failed with HOBL active, "
1073 "not enabling it from now on",
1074 encoder->base.base.id, encoder->base.name);
1075 intel_dp->hobl_failed = true;
1076 } else if (intel_dp_get_link_train_fallback_values(intel_dp,
1077 crtc_state->port_clock,
1078 crtc_state->lane_count)) {
1079 return;
1080 }
1081
1082 /* Schedule a Hotplug Uevent to userspace to start modeset */
1083 schedule_work(&intel_connector->modeset_retry_work);
1084 }
1085
1086 /* Perform the link training on all LTTPRs and the DPRX on a link. */
1087 static bool
intel_dp_link_train_all_phys(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,int lttpr_count)1088 intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
1089 const struct intel_crtc_state *crtc_state,
1090 int lttpr_count)
1091 {
1092 bool ret = true;
1093 int i;
1094
1095 intel_dp_prepare_link_train(intel_dp, crtc_state);
1096
1097 for (i = lttpr_count - 1; i >= 0; i--) {
1098 enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
1099
1100 ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
1101 intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);
1102
1103 if (!ret)
1104 break;
1105 }
1106
1107 if (ret)
1108 ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
1109
1110 if (intel_dp->set_idle_link_train)
1111 intel_dp->set_idle_link_train(intel_dp, crtc_state);
1112
1113 return ret;
1114 }
1115
1116 /**
1117 * intel_dp_start_link_train - start link training
1118 * @intel_dp: DP struct
1119 * @crtc_state: state for CRTC attached to the encoder
1120 *
1121 * Start the link training of the @intel_dp port, scheduling a fallback
1122 * retraining with reduced link rate/lane parameters if the link training
1123 * fails.
1124 * After calling this function intel_dp_stop_link_train() must be called.
1125 */
intel_dp_start_link_train(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1126 void intel_dp_start_link_train(struct intel_dp *intel_dp,
1127 const struct intel_crtc_state *crtc_state)
1128 {
1129 /*
1130 * TODO: Reiniting LTTPRs here won't be needed once proper connector
1131 * HW state readout is added.
1132 */
1133 int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
1134
1135 if (lttpr_count < 0)
1136 /* Still continue with enabling the port and link training. */
1137 lttpr_count = 0;
1138
1139 if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
1140 intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
1141 }
1142