1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <rdma/mlx5-abi.h>
37 #include "lib/eq.h"
38 #include "en.h"
39 #include "clock.h"
40
41 enum {
42 MLX5_CYCLES_SHIFT = 23
43 };
44
45 enum {
46 MLX5_PIN_MODE_IN = 0x0,
47 MLX5_PIN_MODE_OUT = 0x1,
48 };
49
50 enum {
51 MLX5_OUT_PATTERN_PULSE = 0x0,
52 MLX5_OUT_PATTERN_PERIODIC = 0x1,
53 };
54
55 enum {
56 MLX5_EVENT_MODE_DISABLE = 0x0,
57 MLX5_EVENT_MODE_REPETETIVE = 0x1,
58 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
59 };
60
61 enum {
62 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
63 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
64 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
65 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
66 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
67 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
68 MLX5_MTPPS_FS_NPPS_PERIOD = BIT(0x9),
69 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa),
70 };
71
72 enum {
73 MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN = S16_MIN,
74 MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX = S16_MAX,
75 MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
76 MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
77 };
78
mlx5_real_time_mode(struct mlx5_core_dev * mdev)79 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
80 {
81 return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
82 }
83
mlx5_npps_real_time_supported(struct mlx5_core_dev * mdev)84 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
85 {
86 return (mlx5_real_time_mode(mdev) &&
87 MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
88 MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
89 }
90
mlx5_modify_mtutc_allowed(struct mlx5_core_dev * mdev)91 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
92 {
93 return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
94 }
95
mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev * mdev,s64 delta)96 static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
97 {
98 s64 min = MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN;
99 s64 max = MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
100
101 if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range)) {
102 min = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN;
103 max = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX;
104 }
105
106 if (delta < min || delta > max)
107 return false;
108
109 return true;
110 }
111
mlx5_set_mtutc(struct mlx5_core_dev * dev,u32 * mtutc,u32 size)112 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
113 {
114 u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
115
116 if (!MLX5_CAP_MCAM_REG(dev, mtutc))
117 return -EOPNOTSUPP;
118
119 return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
120 MLX5_REG_MTUTC, 0, 1);
121 }
122
mlx5_read_time(struct mlx5_core_dev * dev,struct ptp_system_timestamp * sts,bool real_time)123 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
124 struct ptp_system_timestamp *sts,
125 bool real_time)
126 {
127 u32 timer_h, timer_h1, timer_l;
128
129 timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
130 &dev->iseg->internal_timer_h);
131 ptp_read_system_prets(sts);
132 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
133 &dev->iseg->internal_timer_l);
134 ptp_read_system_postts(sts);
135 timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
136 &dev->iseg->internal_timer_h);
137 if (timer_h != timer_h1) {
138 /* wrap around */
139 ptp_read_system_prets(sts);
140 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
141 &dev->iseg->internal_timer_l);
142 ptp_read_system_postts(sts);
143 }
144
145 return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
146 (u64)timer_l | (u64)timer_h1 << 32;
147 }
148
read_internal_timer(const struct cyclecounter * cc)149 static u64 read_internal_timer(const struct cyclecounter *cc)
150 {
151 struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
152 struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
153 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
154 clock);
155
156 return mlx5_read_time(mdev, NULL, false) & cc->mask;
157 }
158
mlx5_update_clock_info_page(struct mlx5_core_dev * mdev)159 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
160 {
161 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
162 struct mlx5_clock *clock = &mdev->clock;
163 struct mlx5_timer *timer;
164 u32 sign;
165
166 if (!clock_info)
167 return;
168
169 sign = smp_load_acquire(&clock_info->sign);
170 smp_store_mb(clock_info->sign,
171 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
172
173 timer = &clock->timer;
174 clock_info->cycles = timer->tc.cycle_last;
175 clock_info->mult = timer->cycles.mult;
176 clock_info->nsec = timer->tc.nsec;
177 clock_info->frac = timer->tc.frac;
178
179 smp_store_release(&clock_info->sign,
180 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
181 }
182
mlx5_pps_out(struct work_struct * work)183 static void mlx5_pps_out(struct work_struct *work)
184 {
185 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
186 out_work);
187 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
188 pps_info);
189 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
190 clock);
191 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
192 unsigned long flags;
193 int i;
194
195 for (i = 0; i < clock->ptp_info.n_pins; i++) {
196 u64 tstart;
197
198 write_seqlock_irqsave(&clock->lock, flags);
199 tstart = clock->pps_info.start[i];
200 clock->pps_info.start[i] = 0;
201 write_sequnlock_irqrestore(&clock->lock, flags);
202 if (!tstart)
203 continue;
204
205 MLX5_SET(mtpps_reg, in, pin, i);
206 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
207 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
208 mlx5_set_mtpps(mdev, in, sizeof(in));
209 }
210 }
211
mlx5_timestamp_overflow(struct work_struct * work)212 static void mlx5_timestamp_overflow(struct work_struct *work)
213 {
214 struct delayed_work *dwork = to_delayed_work(work);
215 struct mlx5_core_dev *mdev;
216 struct mlx5_timer *timer;
217 struct mlx5_clock *clock;
218 unsigned long flags;
219
220 timer = container_of(dwork, struct mlx5_timer, overflow_work);
221 clock = container_of(timer, struct mlx5_clock, timer);
222 mdev = container_of(clock, struct mlx5_core_dev, clock);
223
224 write_seqlock_irqsave(&clock->lock, flags);
225 timecounter_read(&timer->tc);
226 mlx5_update_clock_info_page(mdev);
227 write_sequnlock_irqrestore(&clock->lock, flags);
228 schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
229 }
230
mlx5_ptp_settime_real_time(struct mlx5_core_dev * mdev,const struct timespec64 * ts)231 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
232 const struct timespec64 *ts)
233 {
234 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
235
236 if (!mlx5_modify_mtutc_allowed(mdev))
237 return 0;
238
239 if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
240 ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
241 return -EINVAL;
242
243 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
244 MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
245 MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
246
247 return mlx5_set_mtutc(mdev, in, sizeof(in));
248 }
249
mlx5_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)250 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
251 {
252 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
253 struct mlx5_timer *timer = &clock->timer;
254 struct mlx5_core_dev *mdev;
255 unsigned long flags;
256 int err;
257
258 mdev = container_of(clock, struct mlx5_core_dev, clock);
259 err = mlx5_ptp_settime_real_time(mdev, ts);
260 if (err)
261 return err;
262
263 write_seqlock_irqsave(&clock->lock, flags);
264 timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
265 mlx5_update_clock_info_page(mdev);
266 write_sequnlock_irqrestore(&clock->lock, flags);
267
268 return 0;
269 }
270
271 static
mlx5_ptp_gettimex_real_time(struct mlx5_core_dev * mdev,struct ptp_system_timestamp * sts)272 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
273 struct ptp_system_timestamp *sts)
274 {
275 struct timespec64 ts;
276 u64 time;
277
278 time = mlx5_read_time(mdev, sts, true);
279 ts = ns_to_timespec64(time);
280 return ts;
281 }
282
mlx5_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)283 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
284 struct ptp_system_timestamp *sts)
285 {
286 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
287 struct mlx5_timer *timer = &clock->timer;
288 struct mlx5_core_dev *mdev;
289 unsigned long flags;
290 u64 cycles, ns;
291
292 mdev = container_of(clock, struct mlx5_core_dev, clock);
293 if (mlx5_real_time_mode(mdev)) {
294 *ts = mlx5_ptp_gettimex_real_time(mdev, sts);
295 goto out;
296 }
297
298 write_seqlock_irqsave(&clock->lock, flags);
299 cycles = mlx5_read_time(mdev, sts, false);
300 ns = timecounter_cyc2time(&timer->tc, cycles);
301 write_sequnlock_irqrestore(&clock->lock, flags);
302 *ts = ns_to_timespec64(ns);
303 out:
304 return 0;
305 }
306
mlx5_ptp_adjtime_real_time(struct mlx5_core_dev * mdev,s64 delta)307 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
308 {
309 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
310
311 if (!mlx5_modify_mtutc_allowed(mdev))
312 return 0;
313
314 /* HW time adjustment range is checked. If out of range, settime instead */
315 if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
316 struct timespec64 ts;
317 s64 ns;
318
319 ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
320 ns = timespec64_to_ns(&ts) + delta;
321 ts = ns_to_timespec64(ns);
322 return mlx5_ptp_settime_real_time(mdev, &ts);
323 }
324
325 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
326 MLX5_SET(mtutc_reg, in, time_adjustment, delta);
327
328 return mlx5_set_mtutc(mdev, in, sizeof(in));
329 }
330
mlx5_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)331 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
332 {
333 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
334 struct mlx5_timer *timer = &clock->timer;
335 struct mlx5_core_dev *mdev;
336 unsigned long flags;
337 int err;
338
339 mdev = container_of(clock, struct mlx5_core_dev, clock);
340
341 err = mlx5_ptp_adjtime_real_time(mdev, delta);
342 if (err)
343 return err;
344 write_seqlock_irqsave(&clock->lock, flags);
345 timecounter_adjtime(&timer->tc, delta);
346 mlx5_update_clock_info_page(mdev);
347 write_sequnlock_irqrestore(&clock->lock, flags);
348
349 return 0;
350 }
351
mlx5_ptp_adjphase(struct ptp_clock_info * ptp,s32 delta)352 static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
353 {
354 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
355 struct mlx5_core_dev *mdev;
356
357 mdev = container_of(clock, struct mlx5_core_dev, clock);
358
359 if (!mlx5_is_mtutc_time_adj_cap(mdev, delta))
360 return -ERANGE;
361
362 return mlx5_ptp_adjtime(ptp, delta);
363 }
364
mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev * mdev,long scaled_ppm)365 static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
366 {
367 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
368
369 if (!mlx5_modify_mtutc_allowed(mdev))
370 return 0;
371
372 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
373
374 if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) {
375 MLX5_SET(mtutc_reg, in, freq_adj_units,
376 MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
377 MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm);
378 } else {
379 MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
380 MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
381 }
382
383 return mlx5_set_mtutc(mdev, in, sizeof(in));
384 }
385
mlx5_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)386 static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
387 {
388 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
389 struct mlx5_timer *timer = &clock->timer;
390 struct mlx5_core_dev *mdev;
391 unsigned long flags;
392 u32 mult;
393 int err;
394
395 mdev = container_of(clock, struct mlx5_core_dev, clock);
396
397 err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
398 if (err)
399 return err;
400
401 mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
402
403 write_seqlock_irqsave(&clock->lock, flags);
404 timecounter_read(&timer->tc);
405 timer->cycles.mult = mult;
406 mlx5_update_clock_info_page(mdev);
407 write_sequnlock_irqrestore(&clock->lock, flags);
408
409 return 0;
410 }
411
mlx5_extts_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)412 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
413 struct ptp_clock_request *rq,
414 int on)
415 {
416 struct mlx5_clock *clock =
417 container_of(ptp, struct mlx5_clock, ptp_info);
418 struct mlx5_core_dev *mdev =
419 container_of(clock, struct mlx5_core_dev, clock);
420 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
421 u32 field_select = 0;
422 u8 pin_mode = 0;
423 u8 pattern = 0;
424 int pin = -1;
425 int err = 0;
426
427 if (!MLX5_PPS_CAP(mdev))
428 return -EOPNOTSUPP;
429
430 /* Reject requests with unsupported flags */
431 if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
432 PTP_RISING_EDGE |
433 PTP_FALLING_EDGE |
434 PTP_STRICT_FLAGS))
435 return -EOPNOTSUPP;
436
437 /* Reject requests to enable time stamping on both edges. */
438 if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
439 (rq->extts.flags & PTP_ENABLE_FEATURE) &&
440 (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
441 return -EOPNOTSUPP;
442
443 if (rq->extts.index >= clock->ptp_info.n_pins)
444 return -EINVAL;
445
446 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
447 if (pin < 0)
448 return -EBUSY;
449
450 if (on) {
451 pin_mode = MLX5_PIN_MODE_IN;
452 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
453 field_select = MLX5_MTPPS_FS_PIN_MODE |
454 MLX5_MTPPS_FS_PATTERN |
455 MLX5_MTPPS_FS_ENABLE;
456 } else {
457 field_select = MLX5_MTPPS_FS_ENABLE;
458 }
459
460 MLX5_SET(mtpps_reg, in, pin, pin);
461 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
462 MLX5_SET(mtpps_reg, in, pattern, pattern);
463 MLX5_SET(mtpps_reg, in, enable, on);
464 MLX5_SET(mtpps_reg, in, field_select, field_select);
465
466 err = mlx5_set_mtpps(mdev, in, sizeof(in));
467 if (err)
468 return err;
469
470 return mlx5_set_mtppse(mdev, pin, 0,
471 MLX5_EVENT_MODE_REPETETIVE & on);
472 }
473
find_target_cycles(struct mlx5_core_dev * mdev,s64 target_ns)474 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
475 {
476 struct mlx5_clock *clock = &mdev->clock;
477 u64 cycles_now, cycles_delta;
478 u64 nsec_now, nsec_delta;
479 struct mlx5_timer *timer;
480 unsigned long flags;
481
482 timer = &clock->timer;
483
484 cycles_now = mlx5_read_time(mdev, NULL, false);
485 write_seqlock_irqsave(&clock->lock, flags);
486 nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
487 nsec_delta = target_ns - nsec_now;
488 cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
489 timer->cycles.mult);
490 write_sequnlock_irqrestore(&clock->lock, flags);
491
492 return cycles_now + cycles_delta;
493 }
494
perout_conf_internal_timer(struct mlx5_core_dev * mdev,s64 sec)495 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
496 {
497 struct timespec64 ts = {};
498 s64 target_ns;
499
500 ts.tv_sec = sec;
501 target_ns = timespec64_to_ns(&ts);
502
503 return find_target_cycles(mdev, target_ns);
504 }
505
perout_conf_real_time(s64 sec,u32 nsec)506 static u64 perout_conf_real_time(s64 sec, u32 nsec)
507 {
508 return (u64)nsec | (u64)sec << 32;
509 }
510
perout_conf_1pps(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u64 * time_stamp,bool real_time)511 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
512 u64 *time_stamp, bool real_time)
513 {
514 struct timespec64 ts;
515 s64 ns;
516
517 ts.tv_nsec = rq->perout.period.nsec;
518 ts.tv_sec = rq->perout.period.sec;
519 ns = timespec64_to_ns(&ts);
520
521 if ((ns >> 1) != 500000000LL)
522 return -EINVAL;
523
524 *time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
525 perout_conf_internal_timer(mdev, rq->perout.start.sec);
526
527 return 0;
528 }
529
530 #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * out_pulse_duration_ns)531 static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
532 struct ptp_clock_request *rq,
533 u32 *out_pulse_duration_ns)
534 {
535 struct mlx5_pps *pps_info = &mdev->clock.pps_info;
536 u32 out_pulse_duration;
537 struct timespec64 ts;
538
539 if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
540 ts.tv_sec = rq->perout.on.sec;
541 ts.tv_nsec = rq->perout.on.nsec;
542 out_pulse_duration = (u32)timespec64_to_ns(&ts);
543 } else {
544 /* out_pulse_duration_ns should be up to 50% of the
545 * pulse period as default
546 */
547 ts.tv_sec = rq->perout.period.sec;
548 ts.tv_nsec = rq->perout.period.nsec;
549 out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
550 }
551
552 if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
553 out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
554 mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
555 out_pulse_duration, pps_info->min_out_pulse_duration_ns,
556 MLX5_MAX_PULSE_DURATION);
557 return -EINVAL;
558 }
559 *out_pulse_duration_ns = out_pulse_duration;
560
561 return 0;
562 }
563
perout_conf_npps_real_time(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * field_select,u32 * out_pulse_duration_ns,u64 * period,u64 * time_stamp)564 static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
565 u32 *field_select, u32 *out_pulse_duration_ns,
566 u64 *period, u64 *time_stamp)
567 {
568 struct mlx5_pps *pps_info = &mdev->clock.pps_info;
569 struct ptp_clock_time *time = &rq->perout.start;
570 struct timespec64 ts;
571
572 ts.tv_sec = rq->perout.period.sec;
573 ts.tv_nsec = rq->perout.period.nsec;
574 if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
575 mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
576 pps_info->min_npps_period);
577 return -EINVAL;
578 }
579 *period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
580
581 if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
582 return -EINVAL;
583
584 *time_stamp = perout_conf_real_time(time->sec, time->nsec);
585 *field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
586 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
587
588 return 0;
589 }
590
mlx5_perout_verify_flags(struct mlx5_core_dev * mdev,unsigned int flags)591 static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
592 {
593 return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
594 (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
595 }
596
mlx5_perout_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)597 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
598 struct ptp_clock_request *rq,
599 int on)
600 {
601 struct mlx5_clock *clock =
602 container_of(ptp, struct mlx5_clock, ptp_info);
603 struct mlx5_core_dev *mdev =
604 container_of(clock, struct mlx5_core_dev, clock);
605 bool rt_mode = mlx5_real_time_mode(mdev);
606 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
607 u32 out_pulse_duration_ns = 0;
608 u32 field_select = 0;
609 u64 npps_period = 0;
610 u64 time_stamp = 0;
611 u8 pin_mode = 0;
612 u8 pattern = 0;
613 int pin = -1;
614 int err = 0;
615
616 if (!MLX5_PPS_CAP(mdev))
617 return -EOPNOTSUPP;
618
619 /* Reject requests with unsupported flags */
620 if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
621 return -EOPNOTSUPP;
622
623 if (rq->perout.index >= clock->ptp_info.n_pins)
624 return -EINVAL;
625
626 field_select = MLX5_MTPPS_FS_ENABLE;
627 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
628 if (pin < 0)
629 return -EBUSY;
630
631 if (on) {
632 bool rt_mode = mlx5_real_time_mode(mdev);
633
634 pin_mode = MLX5_PIN_MODE_OUT;
635 pattern = MLX5_OUT_PATTERN_PERIODIC;
636
637 if (rt_mode && rq->perout.start.sec > U32_MAX)
638 return -EINVAL;
639
640 field_select |= MLX5_MTPPS_FS_PIN_MODE |
641 MLX5_MTPPS_FS_PATTERN |
642 MLX5_MTPPS_FS_TIME_STAMP;
643
644 if (mlx5_npps_real_time_supported(mdev))
645 err = perout_conf_npps_real_time(mdev, rq, &field_select,
646 &out_pulse_duration_ns, &npps_period,
647 &time_stamp);
648 else
649 err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
650 if (err)
651 return err;
652 }
653
654 MLX5_SET(mtpps_reg, in, pin, pin);
655 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
656 MLX5_SET(mtpps_reg, in, pattern, pattern);
657 MLX5_SET(mtpps_reg, in, enable, on);
658 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
659 MLX5_SET(mtpps_reg, in, field_select, field_select);
660 MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
661 MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
662 err = mlx5_set_mtpps(mdev, in, sizeof(in));
663 if (err)
664 return err;
665
666 if (rt_mode)
667 return 0;
668
669 return mlx5_set_mtppse(mdev, pin, 0,
670 MLX5_EVENT_MODE_REPETETIVE & on);
671 }
672
mlx5_pps_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)673 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
674 struct ptp_clock_request *rq,
675 int on)
676 {
677 struct mlx5_clock *clock =
678 container_of(ptp, struct mlx5_clock, ptp_info);
679
680 clock->pps_info.enabled = !!on;
681 return 0;
682 }
683
mlx5_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)684 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
685 struct ptp_clock_request *rq,
686 int on)
687 {
688 switch (rq->type) {
689 case PTP_CLK_REQ_EXTTS:
690 return mlx5_extts_configure(ptp, rq, on);
691 case PTP_CLK_REQ_PEROUT:
692 return mlx5_perout_configure(ptp, rq, on);
693 case PTP_CLK_REQ_PPS:
694 return mlx5_pps_configure(ptp, rq, on);
695 default:
696 return -EOPNOTSUPP;
697 }
698 return 0;
699 }
700
701 enum {
702 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
703 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
704 };
705
mlx5_ptp_verify(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)706 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
707 enum ptp_pin_function func, unsigned int chan)
708 {
709 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
710 ptp_info);
711
712 switch (func) {
713 case PTP_PF_NONE:
714 return 0;
715 case PTP_PF_EXTTS:
716 return !(clock->pps_info.pin_caps[pin] &
717 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
718 case PTP_PF_PEROUT:
719 return !(clock->pps_info.pin_caps[pin] &
720 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
721 default:
722 return -EOPNOTSUPP;
723 }
724 }
725
726 static const struct ptp_clock_info mlx5_ptp_clock_info = {
727 .owner = THIS_MODULE,
728 .name = "mlx5_ptp",
729 .max_adj = 50000000,
730 .n_alarm = 0,
731 .n_ext_ts = 0,
732 .n_per_out = 0,
733 .n_pins = 0,
734 .pps = 0,
735 .adjfine = mlx5_ptp_adjfine,
736 .adjphase = mlx5_ptp_adjphase,
737 .adjtime = mlx5_ptp_adjtime,
738 .gettimex64 = mlx5_ptp_gettimex,
739 .settime64 = mlx5_ptp_settime,
740 .enable = NULL,
741 .verify = NULL,
742 };
743
mlx5_query_mtpps_pin_mode(struct mlx5_core_dev * mdev,u8 pin,u32 * mtpps,u32 mtpps_size)744 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
745 u32 *mtpps, u32 mtpps_size)
746 {
747 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
748
749 MLX5_SET(mtpps_reg, in, pin, pin);
750
751 return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
752 mtpps_size, MLX5_REG_MTPPS, 0, 0);
753 }
754
mlx5_get_pps_pin_mode(struct mlx5_clock * clock,u8 pin)755 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
756 {
757 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
758
759 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
760 u8 mode;
761 int err;
762
763 err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
764 if (err || !MLX5_GET(mtpps_reg, out, enable))
765 return PTP_PF_NONE;
766
767 mode = MLX5_GET(mtpps_reg, out, pin_mode);
768
769 if (mode == MLX5_PIN_MODE_IN)
770 return PTP_PF_EXTTS;
771 else if (mode == MLX5_PIN_MODE_OUT)
772 return PTP_PF_PEROUT;
773
774 return PTP_PF_NONE;
775 }
776
mlx5_init_pin_config(struct mlx5_clock * clock)777 static void mlx5_init_pin_config(struct mlx5_clock *clock)
778 {
779 int i;
780
781 if (!clock->ptp_info.n_pins)
782 return;
783
784 clock->ptp_info.pin_config =
785 kcalloc(clock->ptp_info.n_pins,
786 sizeof(*clock->ptp_info.pin_config),
787 GFP_KERNEL);
788 if (!clock->ptp_info.pin_config)
789 return;
790 clock->ptp_info.enable = mlx5_ptp_enable;
791 clock->ptp_info.verify = mlx5_ptp_verify;
792 clock->ptp_info.pps = 1;
793
794 for (i = 0; i < clock->ptp_info.n_pins; i++) {
795 snprintf(clock->ptp_info.pin_config[i].name,
796 sizeof(clock->ptp_info.pin_config[i].name),
797 "mlx5_pps%d", i);
798 clock->ptp_info.pin_config[i].index = i;
799 clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
800 clock->ptp_info.pin_config[i].chan = 0;
801 }
802 }
803
mlx5_get_pps_caps(struct mlx5_core_dev * mdev)804 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
805 {
806 struct mlx5_clock *clock = &mdev->clock;
807 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
808
809 mlx5_query_mtpps(mdev, out, sizeof(out));
810
811 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
812 cap_number_of_pps_pins);
813 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
814 cap_max_num_of_pps_in_pins);
815 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
816 cap_max_num_of_pps_out_pins);
817
818 if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
819 clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
820 cap_log_min_npps_period);
821 if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
822 clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
823 cap_log_min_out_pulse_duration_ns);
824
825 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
826 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
827 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
828 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
829 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
830 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
831 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
832 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
833 }
834
ts_next_sec(struct timespec64 * ts)835 static void ts_next_sec(struct timespec64 *ts)
836 {
837 ts->tv_sec += 1;
838 ts->tv_nsec = 0;
839 }
840
perout_conf_next_event_timer(struct mlx5_core_dev * mdev,struct mlx5_clock * clock)841 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
842 struct mlx5_clock *clock)
843 {
844 struct timespec64 ts;
845 s64 target_ns;
846
847 mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
848 ts_next_sec(&ts);
849 target_ns = timespec64_to_ns(&ts);
850
851 return find_target_cycles(mdev, target_ns);
852 }
853
mlx5_pps_event(struct notifier_block * nb,unsigned long type,void * data)854 static int mlx5_pps_event(struct notifier_block *nb,
855 unsigned long type, void *data)
856 {
857 struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
858 struct ptp_clock_event ptp_event;
859 struct mlx5_eqe *eqe = data;
860 int pin = eqe->data.pps.pin;
861 struct mlx5_core_dev *mdev;
862 unsigned long flags;
863 u64 ns;
864
865 mdev = container_of(clock, struct mlx5_core_dev, clock);
866
867 switch (clock->ptp_info.pin_config[pin].func) {
868 case PTP_PF_EXTTS:
869 ptp_event.index = pin;
870 ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
871 mlx5_real_time_cyc2time(clock,
872 be64_to_cpu(eqe->data.pps.time_stamp)) :
873 mlx5_timecounter_cyc2time(clock,
874 be64_to_cpu(eqe->data.pps.time_stamp));
875 if (clock->pps_info.enabled) {
876 ptp_event.type = PTP_CLOCK_PPSUSR;
877 ptp_event.pps_times.ts_real =
878 ns_to_timespec64(ptp_event.timestamp);
879 } else {
880 ptp_event.type = PTP_CLOCK_EXTTS;
881 }
882 /* TODOL clock->ptp can be NULL if ptp_clock_register fails */
883 ptp_clock_event(clock->ptp, &ptp_event);
884 break;
885 case PTP_PF_PEROUT:
886 ns = perout_conf_next_event_timer(mdev, clock);
887 write_seqlock_irqsave(&clock->lock, flags);
888 clock->pps_info.start[pin] = ns;
889 write_sequnlock_irqrestore(&clock->lock, flags);
890 schedule_work(&clock->pps_info.out_work);
891 break;
892 default:
893 mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
894 clock->ptp_info.pin_config[pin].func);
895 }
896
897 return NOTIFY_OK;
898 }
899
mlx5_timecounter_init(struct mlx5_core_dev * mdev)900 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
901 {
902 struct mlx5_clock *clock = &mdev->clock;
903 struct mlx5_timer *timer = &clock->timer;
904 u32 dev_freq;
905
906 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
907 timer->cycles.read = read_internal_timer;
908 timer->cycles.shift = MLX5_CYCLES_SHIFT;
909 timer->cycles.mult = clocksource_khz2mult(dev_freq,
910 timer->cycles.shift);
911 timer->nominal_c_mult = timer->cycles.mult;
912 timer->cycles.mask = CLOCKSOURCE_MASK(41);
913
914 timecounter_init(&timer->tc, &timer->cycles,
915 ktime_to_ns(ktime_get_real()));
916 }
917
mlx5_init_overflow_period(struct mlx5_clock * clock)918 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
919 {
920 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
921 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
922 struct mlx5_timer *timer = &clock->timer;
923 u64 overflow_cycles;
924 u64 frac = 0;
925 u64 ns;
926
927 /* Calculate period in seconds to call the overflow watchdog - to make
928 * sure counter is checked at least twice every wrap around.
929 * The period is calculated as the minimum between max HW cycles count
930 * (The clock source mask) and max amount of cycles that can be
931 * multiplied by clock multiplier where the result doesn't exceed
932 * 64bits.
933 */
934 overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
935 overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
936
937 ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
938 frac, &frac);
939 do_div(ns, NSEC_PER_SEC / HZ);
940 timer->overflow_period = ns;
941
942 INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
943 if (timer->overflow_period)
944 schedule_delayed_work(&timer->overflow_work, 0);
945 else
946 mlx5_core_warn(mdev,
947 "invalid overflow period, overflow_work is not scheduled\n");
948
949 if (clock_info)
950 clock_info->overflow_period = timer->overflow_period;
951 }
952
mlx5_init_clock_info(struct mlx5_core_dev * mdev)953 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
954 {
955 struct mlx5_clock *clock = &mdev->clock;
956 struct mlx5_ib_clock_info *info;
957 struct mlx5_timer *timer;
958
959 mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
960 if (!mdev->clock_info) {
961 mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
962 return;
963 }
964
965 info = mdev->clock_info;
966 timer = &clock->timer;
967
968 info->nsec = timer->tc.nsec;
969 info->cycles = timer->tc.cycle_last;
970 info->mask = timer->cycles.mask;
971 info->mult = timer->nominal_c_mult;
972 info->shift = timer->cycles.shift;
973 info->frac = timer->tc.frac;
974 }
975
mlx5_init_timer_clock(struct mlx5_core_dev * mdev)976 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
977 {
978 struct mlx5_clock *clock = &mdev->clock;
979
980 mlx5_timecounter_init(mdev);
981 mlx5_init_clock_info(mdev);
982 mlx5_init_overflow_period(clock);
983 clock->ptp_info = mlx5_ptp_clock_info;
984
985 if (mlx5_real_time_mode(mdev)) {
986 struct timespec64 ts;
987
988 ktime_get_real_ts64(&ts);
989 mlx5_ptp_settime(&clock->ptp_info, &ts);
990 }
991 }
992
mlx5_init_pps(struct mlx5_core_dev * mdev)993 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
994 {
995 struct mlx5_clock *clock = &mdev->clock;
996
997 if (!MLX5_PPS_CAP(mdev))
998 return;
999
1000 mlx5_get_pps_caps(mdev);
1001 mlx5_init_pin_config(clock);
1002 }
1003
mlx5_init_clock(struct mlx5_core_dev * mdev)1004 void mlx5_init_clock(struct mlx5_core_dev *mdev)
1005 {
1006 struct mlx5_clock *clock = &mdev->clock;
1007
1008 if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
1009 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
1010 return;
1011 }
1012
1013 seqlock_init(&clock->lock);
1014 mlx5_init_timer_clock(mdev);
1015 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
1016
1017 /* Configure the PHC */
1018 clock->ptp_info = mlx5_ptp_clock_info;
1019
1020 /* Initialize 1PPS data structures */
1021 mlx5_init_pps(mdev);
1022
1023 clock->ptp = ptp_clock_register(&clock->ptp_info,
1024 &mdev->pdev->dev);
1025 if (IS_ERR(clock->ptp)) {
1026 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
1027 PTR_ERR(clock->ptp));
1028 clock->ptp = NULL;
1029 }
1030
1031 MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
1032 mlx5_eq_notifier_register(mdev, &clock->pps_nb);
1033 }
1034
mlx5_cleanup_clock(struct mlx5_core_dev * mdev)1035 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
1036 {
1037 struct mlx5_clock *clock = &mdev->clock;
1038
1039 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1040 return;
1041
1042 mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
1043 if (clock->ptp) {
1044 ptp_clock_unregister(clock->ptp);
1045 clock->ptp = NULL;
1046 }
1047
1048 cancel_work_sync(&clock->pps_info.out_work);
1049 cancel_delayed_work_sync(&clock->timer.overflow_work);
1050
1051 if (mdev->clock_info) {
1052 free_page((unsigned long)mdev->clock_info);
1053 mdev->clock_info = NULL;
1054 }
1055
1056 kfree(clock->ptp_info.pin_config);
1057 }
1058