clock.c 17.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
/*
 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/clocksource.h>
34
35
#include <linux/highmem.h>
#include <rdma/mlx5-abi.h>
36
#include "lib/eq.h"
37
#include "en.h"
38
#include "clock.h"
39
40

enum {
41
	MLX5_CYCLES_SHIFT	= 23
42
43
};

44
enum {
45
46
	MLX5_PIN_MODE_IN		= 0x0,
	MLX5_PIN_MODE_OUT		= 0x1,
47
48
49
};

enum {
50
51
	MLX5_OUT_PATTERN_PULSE		= 0x0,
	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
52
53
54
};

enum {
55
56
57
	MLX5_EVENT_MODE_DISABLE	= 0x0,
	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
58
59
};

60
enum {
61
62
63
64
65
66
	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
67
68
};

69
static u64 read_internal_timer(const struct cyclecounter *cc)
70
{
71
72
73
	struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
						  clock);
74

75
	return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
76
77
}

78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
	struct mlx5_clock *clock = &mdev->clock;
	u32 sign;

	if (!clock_info)
		return;

	sign = smp_load_acquire(&clock_info->sign);
	smp_store_mb(clock_info->sign,
		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);

	clock_info->cycles = clock->tc.cycle_last;
	clock_info->mult   = clock->cycles.mult;
	clock_info->nsec   = clock->tc.nsec;
	clock_info->frac   = clock->tc.frac;

	smp_store_release(&clock_info->sign,
			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
}

100
static void mlx5_pps_out(struct work_struct *work)
101
{
102
103
104
105
106
107
	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
						 out_work);
	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
						pps_info);
	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
						  clock);
108
109
110
111
	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
	unsigned long flags;
	int i;

112
	for (i = 0; i < clock->ptp_info.n_pins; i++) {
113
114
		u64 tstart;

115
		write_seqlock_irqsave(&clock->lock, flags);
116
117
		tstart = clock->pps_info.start[i];
		clock->pps_info.start[i] = 0;
118
		write_sequnlock_irqrestore(&clock->lock, flags);
119
120
121
122
123
		if (!tstart)
			continue;

		MLX5_SET(mtpps_reg, in, pin, i);
		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
124
125
		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
		mlx5_set_mtpps(mdev, in, sizeof(in));
126
127
128
	}
}

129
static void mlx5_timestamp_overflow(struct work_struct *work)
130
131
{
	struct delayed_work *dwork = to_delayed_work(work);
132
133
	struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
						overflow_work);
134
	unsigned long flags;
135

136
	write_seqlock_irqsave(&clock->lock, flags);
137
	timecounter_read(&clock->tc);
138
	mlx5_update_clock_info_page(clock->mdev);
139
	write_sequnlock_irqrestore(&clock->lock, flags);
140
	schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
141
142
}

143
144
static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
			    const struct timespec64 *ts)
145
{
146
147
	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
						 ptp_info);
148
	u64 ns = timespec64_to_ns(ts);
149
	unsigned long flags;
150

151
	write_seqlock_irqsave(&clock->lock, flags);
152
	timecounter_init(&clock->tc, &clock->cycles, ns);
153
	mlx5_update_clock_info_page(clock->mdev);
154
	write_sequnlock_irqrestore(&clock->lock, flags);
155
156
157
158

	return 0;
}

159
160
static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
			     struct ptp_system_timestamp *sts)
161
{
162
163
	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
						ptp_info);
164
165
	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
						  clock);
166
	unsigned long flags;
167
	u64 cycles, ns;
168

169
	write_seqlock_irqsave(&clock->lock, flags);
170
171
	cycles = mlx5_read_internal_timer(mdev, sts);
	ns = timecounter_cyc2time(&clock->tc, cycles);
172
	write_sequnlock_irqrestore(&clock->lock, flags);
173
174
175
176
177
178

	*ts = ns_to_timespec64(ns);

	return 0;
}

179
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
180
{
181
182
	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
						ptp_info);
183
	unsigned long flags;
184

185
	write_seqlock_irqsave(&clock->lock, flags);
186
	timecounter_adjtime(&clock->tc, delta);
187
	mlx5_update_clock_info_page(clock->mdev);
188
	write_sequnlock_irqrestore(&clock->lock, flags);
189
190
191
192

	return 0;
}

193
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
194
195
196
{
	u64 adj;
	u32 diff;
197
	unsigned long flags;
198
	int neg_adj = 0;
199
200
	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
						ptp_info);
201
202
203
204
205
206

	if (delta < 0) {
		neg_adj = 1;
		delta = -delta;
	}

207
	adj = clock->nominal_c_mult;
208
209
210
	adj *= delta;
	diff = div_u64(adj, 1000000000ULL);

211
	write_seqlock_irqsave(&clock->lock, flags);
212
213
214
	timecounter_read(&clock->tc);
	clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
				       clock->nominal_c_mult + diff;
215
	mlx5_update_clock_info_page(clock->mdev);
216
	write_sequnlock_irqrestore(&clock->lock, flags);
217
218
219
220

	return 0;
}

221
222
223
static int mlx5_extts_configure(struct ptp_clock_info *ptp,
				struct ptp_clock_request *rq,
				int on)
224
{
225
226
227
228
	struct mlx5_clock *clock =
			container_of(ptp, struct mlx5_clock, ptp_info);
	struct mlx5_core_dev *mdev =
			container_of(clock, struct mlx5_core_dev, clock);
229
	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
230
231
	u32 field_select = 0;
	u8 pin_mode = 0;
232
233
234
235
	u8 pattern = 0;
	int pin = -1;
	int err = 0;

236
	if (!MLX5_PPS_CAP(mdev))
237
238
		return -EOPNOTSUPP;

239
240
241
	/* Reject requests with unsupported flags */
	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
				PTP_RISING_EDGE |
242
243
				PTP_FALLING_EDGE |
				PTP_STRICT_FLAGS))
244
245
		return -EOPNOTSUPP;

246
247
248
249
250
251
	/* Reject requests to enable time stamping on both edges. */
	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
		return -EOPNOTSUPP;

252
	if (rq->extts.index >= clock->ptp_info.n_pins)
253
254
255
		return -EINVAL;

	if (on) {
256
		pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
257
258
		if (pin < 0)
			return -EBUSY;
259
		pin_mode = MLX5_PIN_MODE_IN;
260
		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
261
262
263
		field_select = MLX5_MTPPS_FS_PIN_MODE |
			       MLX5_MTPPS_FS_PATTERN |
			       MLX5_MTPPS_FS_ENABLE;
264
265
	} else {
		pin = rq->extts.index;
266
		field_select = MLX5_MTPPS_FS_ENABLE;
267
268
269
	}

	MLX5_SET(mtpps_reg, in, pin, pin);
270
	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
271
272
	MLX5_SET(mtpps_reg, in, pattern, pattern);
	MLX5_SET(mtpps_reg, in, enable, on);
273
	MLX5_SET(mtpps_reg, in, field_select, field_select);
274

275
	err = mlx5_set_mtpps(mdev, in, sizeof(in));
276
277
278
	if (err)
		return err;

279
280
	return mlx5_set_mtppse(mdev, pin, 0,
			       MLX5_EVENT_MODE_REPETETIVE & on);
281
282
}

283
284
285
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
				 struct ptp_clock_request *rq,
				 int on)
286
{
287
288
289
290
	struct mlx5_clock *clock =
			container_of(ptp, struct mlx5_clock, ptp_info);
	struct mlx5_core_dev *mdev =
			container_of(clock, struct mlx5_core_dev, clock);
291
	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
292
	u64 nsec_now, nsec_delta, time_stamp = 0;
293
294
295
	u64 cycles_now, cycles_delta;
	struct timespec64 ts;
	unsigned long flags;
296
297
298
	u32 field_select = 0;
	u8 pin_mode = 0;
	u8 pattern = 0;
299
	int pin = -1;
300
	int err = 0;
301
302
	s64 ns;

303
	if (!MLX5_PPS_CAP(mdev))
304
305
		return -EOPNOTSUPP;

306
307
308
309
	/* Reject requests with unsupported flags */
	if (rq->perout.flags)
		return -EOPNOTSUPP;

310
	if (rq->perout.index >= clock->ptp_info.n_pins)
311
312
313
		return -EINVAL;

	if (on) {
314
		pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
315
316
317
318
				   rq->perout.index);
		if (pin < 0)
			return -EBUSY;

319
320
		pin_mode = MLX5_PIN_MODE_OUT;
		pattern = MLX5_OUT_PATTERN_PERIODIC;
321
322
323
324
		ts.tv_sec = rq->perout.period.sec;
		ts.tv_nsec = rq->perout.period.nsec;
		ns = timespec64_to_ns(&ts);

325
326
		if ((ns >> 1) != 500000000LL)
			return -EINVAL;
327
328
329
330

		ts.tv_sec = rq->perout.start.sec;
		ts.tv_nsec = rq->perout.start.nsec;
		ns = timespec64_to_ns(&ts);
331
		cycles_now = mlx5_read_internal_timer(mdev, NULL);
332
		write_seqlock_irqsave(&clock->lock, flags);
333
		nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
334
		nsec_delta = ns - nsec_now;
335
336
		cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
					 clock->cycles.mult);
337
		write_sequnlock_irqrestore(&clock->lock, flags);
338
		time_stamp = cycles_now + cycles_delta;
339
340
341
342
		field_select = MLX5_MTPPS_FS_PIN_MODE |
			       MLX5_MTPPS_FS_PATTERN |
			       MLX5_MTPPS_FS_ENABLE |
			       MLX5_MTPPS_FS_TIME_STAMP;
343
344
	} else {
		pin = rq->perout.index;
345
		field_select = MLX5_MTPPS_FS_ENABLE;
346
347
	}

348
	MLX5_SET(mtpps_reg, in, pin, pin);
349
350
	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
	MLX5_SET(mtpps_reg, in, pattern, pattern);
351
352
	MLX5_SET(mtpps_reg, in, enable, on);
	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
353
354
	MLX5_SET(mtpps_reg, in, field_select, field_select);

355
	err = mlx5_set_mtpps(mdev, in, sizeof(in));
356
357
358
	if (err)
		return err;

359
360
	return mlx5_set_mtppse(mdev, pin, 0,
			       MLX5_EVENT_MODE_REPETETIVE & on);
361
362
}

363
364
365
static int mlx5_pps_configure(struct ptp_clock_info *ptp,
			      struct ptp_clock_request *rq,
			      int on)
366
{
367
368
	struct mlx5_clock *clock =
			container_of(ptp, struct mlx5_clock, ptp_info);
369

370
	clock->pps_info.enabled = !!on;
371
372
373
	return 0;
}

374
375
376
static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
			   struct ptp_clock_request *rq,
			   int on)
377
378
379
{
	switch (rq->type) {
	case PTP_CLK_REQ_EXTTS:
380
		return mlx5_extts_configure(ptp, rq, on);
381
	case PTP_CLK_REQ_PEROUT:
382
		return mlx5_perout_configure(ptp, rq, on);
383
	case PTP_CLK_REQ_PPS:
384
		return mlx5_pps_configure(ptp, rq, on);
385
386
387
388
389
390
	default:
		return -EOPNOTSUPP;
	}
	return 0;
}

391
392
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
			   enum ptp_pin_function func, unsigned int chan)
393
394
395
396
{
	return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
}

397
static const struct ptp_clock_info mlx5_ptp_clock_info = {
398
	.owner		= THIS_MODULE,
399
	.name		= "mlx5_p2p",
400
401
402
403
404
405
	.max_adj	= 100000000,
	.n_alarm	= 0,
	.n_ext_ts	= 0,
	.n_per_out	= 0,
	.n_pins		= 0,
	.pps		= 0,
406
407
	.adjfreq	= mlx5_ptp_adjfreq,
	.adjtime	= mlx5_ptp_adjtime,
408
	.gettimex64	= mlx5_ptp_gettimex,
409
	.settime64	= mlx5_ptp_settime,
410
	.enable		= NULL,
411
	.verify		= NULL,
412
413
};

414
static int mlx5_init_pin_config(struct mlx5_clock *clock)
415
416
417
{
	int i;

418
	clock->ptp_info.pin_config =
Kees Cook's avatar
Kees Cook committed
419
420
421
			kcalloc(clock->ptp_info.n_pins,
				sizeof(*clock->ptp_info.pin_config),
				GFP_KERNEL);
422
	if (!clock->ptp_info.pin_config)
423
		return -ENOMEM;
424
425
426
	clock->ptp_info.enable = mlx5_ptp_enable;
	clock->ptp_info.verify = mlx5_ptp_verify;
	clock->ptp_info.pps = 1;
427

428
429
430
	for (i = 0; i < clock->ptp_info.n_pins; i++) {
		snprintf(clock->ptp_info.pin_config[i].name,
			 sizeof(clock->ptp_info.pin_config[i].name),
431
			 "mlx5_pps%d", i);
432
433
434
		clock->ptp_info.pin_config[i].index = i;
		clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
		clock->ptp_info.pin_config[i].chan = i;
435
436
437
438
439
	}

	return 0;
}

440
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
441
{
442
	struct mlx5_clock *clock = &mdev->clock;
443
444
	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};

445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
	mlx5_query_mtpps(mdev, out, sizeof(out));

	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
					  cap_number_of_pps_pins);
	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
					    cap_max_num_of_pps_in_pins);
	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
					     cap_max_num_of_pps_out_pins);

	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
462
463
}

464
465
static int mlx5_pps_event(struct notifier_block *nb,
			  unsigned long type, void *data)
466
{
467
468
	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
	struct mlx5_core_dev *mdev = clock->mdev;
469
	struct ptp_clock_event ptp_event;
470
	u64 cycles_now, cycles_delta;
471
472
	u64 nsec_now, nsec_delta, ns;
	struct mlx5_eqe *eqe = data;
473
	int pin = eqe->data.pps.pin;
474
	struct timespec64 ts;
475
	unsigned long flags;
476

477
	switch (clock->ptp_info.pin_config[pin].func) {
478
	case PTP_PF_EXTTS:
479
480
481
		ptp_event.index = pin;
		ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
					be64_to_cpu(eqe->data.pps.time_stamp));
482
483
		if (clock->pps_info.enabled) {
			ptp_event.type = PTP_CLOCK_PPSUSR;
484
485
			ptp_event.pps_times.ts_real =
					ns_to_timespec64(ptp_event.timestamp);
486
		} else {
487
			ptp_event.type = PTP_CLOCK_EXTTS;
488
		}
489
		/* TODOL clock->ptp can be NULL if ptp_clock_register failes */
490
		ptp_clock_event(clock->ptp, &ptp_event);
491
492
		break;
	case PTP_PF_PEROUT:
493
494
		mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
		cycles_now = mlx5_read_internal_timer(mdev, NULL);
495
496
497
		ts.tv_sec += 1;
		ts.tv_nsec = 0;
		ns = timespec64_to_ns(&ts);
498
		write_seqlock_irqsave(&clock->lock, flags);
499
		nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
500
		nsec_delta = ns - nsec_now;
501
502
503
504
		cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
					 clock->cycles.mult);
		clock->pps_info.start[pin] = cycles_now + cycles_delta;
		schedule_work(&clock->pps_info.out_work);
505
		write_sequnlock_irqrestore(&clock->lock, flags);
506
507
		break;
	default:
508
509
		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
			      clock->ptp_info.pin_config[pin].func);
510
	}
511
512

	return NOTIFY_OK;
513
514
}

515
void mlx5_init_clock(struct mlx5_core_dev *mdev)
516
{
517
	struct mlx5_clock *clock = &mdev->clock;
518
	u64 overflow_cycles;
519
520
521
522
	u64 ns;
	u64 frac = 0;
	u32 dev_freq;

523
	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
524
	if (!dev_freq) {
525
		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
526
527
		return;
	}
528
	seqlock_init(&clock->lock);
529
530
531
532
533
534
	clock->cycles.read = read_internal_timer;
	clock->cycles.shift = MLX5_CYCLES_SHIFT;
	clock->cycles.mult = clocksource_khz2mult(dev_freq,
						  clock->cycles.shift);
	clock->nominal_c_mult = clock->cycles.mult;
	clock->cycles.mask = CLOCKSOURCE_MASK(41);
535
	clock->mdev = mdev;
536
537

	timecounter_init(&clock->tc, &clock->cycles,
538
539
540
			 ktime_to_ns(ktime_get_real()));

	/* Calculate period in seconds to call the overflow watchdog - to make
541
	 * sure counter is checked at least twice every wrap around.
542
543
544
545
	 * The period is calculated as the minimum between max HW cycles count
	 * (The clock source mask) and max amount of cycles that can be
	 * multiplied by clock multiplier where the result doesn't exceed
	 * 64bits.
546
	 */
547
	overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
548
	overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
549
550

	ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
551
				 frac, &frac);
552
	do_div(ns, NSEC_PER_SEC / HZ);
553
	clock->overflow_period = ns;
554

555
556
557
558
559
560
561
562
563
564
	mdev->clock_info =
		(struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
	if (mdev->clock_info) {
		mdev->clock_info->nsec = clock->tc.nsec;
		mdev->clock_info->cycles = clock->tc.cycle_last;
		mdev->clock_info->mask = clock->cycles.mask;
		mdev->clock_info->mult = clock->nominal_c_mult;
		mdev->clock_info->shift = clock->cycles.shift;
		mdev->clock_info->frac = clock->tc.frac;
		mdev->clock_info->overflow_period = clock->overflow_period;
565
566
	}

567
568
569
570
	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
	INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
	if (clock->overflow_period)
		schedule_delayed_work(&clock->overflow_work, 0);
571
	else
572
		mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
573
574

	/* Configure the PHC */
575
	clock->ptp_info = mlx5_ptp_clock_info;
576

577
	/* Initialize 1PPS data structures */
578
579
580
581
582
583
584
585
586
587
588
	if (MLX5_PPS_CAP(mdev))
		mlx5_get_pps_caps(mdev);
	if (clock->ptp_info.n_pins)
		mlx5_init_pin_config(clock);

	clock->ptp = ptp_clock_register(&clock->ptp_info,
					&mdev->pdev->dev);
	if (IS_ERR(clock->ptp)) {
		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
			       PTR_ERR(clock->ptp));
		clock->ptp = NULL;
589
	}
590
591
592

	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
593
594
}

595
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
596
{
597
	struct mlx5_clock *clock = &mdev->clock;
598

599
	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
600
601
		return;

602
	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
603
604
605
	if (clock->ptp) {
		ptp_clock_unregister(clock->ptp);
		clock->ptp = NULL;
606
607
	}

608
609
	cancel_work_sync(&clock->pps_info.out_work);
	cancel_delayed_work_sync(&clock->overflow_work);
610
611

	if (mdev->clock_info) {
612
		free_page((unsigned long)mdev->clock_info);
613
614
615
		mdev->clock_info = NULL;
	}

616
	kfree(clock->ptp_info.pin_config);
617
}