1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * PTP 1588 clock support - character device implementation.
5 * Copyright (C) 2010 OMICRON electronics GmbH
7 #include <linux/module.h>
8 #include <linux/posix-clock.h>
9 #include <linux/poll.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/timekeeping.h>
14 #include <linux/nospec.h>
16 #include "ptp_private.h"
18 static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
19 enum ptp_pin_function func, unsigned int chan)
21 struct ptp_clock_request rq;
24 memset(&rq, 0, sizeof(rq));
30 rq.type = PTP_CLK_REQ_EXTTS;
31 rq.extts.index = chan;
32 err = ops->enable(ops, &rq, 0);
35 rq.type = PTP_CLK_REQ_PEROUT;
36 rq.perout.index = chan;
37 err = ops->enable(ops, &rq, 0);
48 int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
49 enum ptp_pin_function func, unsigned int chan)
51 struct ptp_clock_info *info = ptp->info;
52 struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
55 /* Check to see if any other pin previously had this function. */
56 for (i = 0; i < info->n_pins; i++) {
57 if (info->pin_config[i].func == func &&
58 info->pin_config[i].chan == chan) {
59 pin1 = &info->pin_config[i];
66 /* Check the desired function and channel. */
71 if (chan >= info->n_ext_ts)
75 if (chan >= info->n_per_out)
86 if (info->verify(info, pin, func, chan)) {
87 pr_err("driver cannot use function %u on pin %u\n", func, chan);
91 /* Disable whatever function was previously assigned. */
93 ptp_disable_pinfunc(info, func, chan);
94 pin1->func = PTP_PF_NONE;
97 ptp_disable_pinfunc(info, pin2->func, pin2->chan);
104 int ptp_open(struct posix_clock *pc, fmode_t fmode)
109 long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
111 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
112 struct ptp_sys_offset_extended *extoff = NULL;
113 struct ptp_sys_offset_precise precise_offset;
114 struct system_device_crosststamp xtstamp;
115 struct ptp_clock_info *ops = ptp->info;
116 struct ptp_sys_offset *sysoff = NULL;
117 struct ptp_system_timestamp sts;
118 struct ptp_clock_request req;
119 struct ptp_clock_caps caps;
120 struct ptp_clock_time *pct;
121 unsigned int i, pin_index;
122 struct ptp_pin_desc pd;
123 struct timespec64 ts;
128 case PTP_CLOCK_GETCAPS:
129 case PTP_CLOCK_GETCAPS2:
130 memset(&caps, 0, sizeof(caps));
132 caps.max_adj = ptp->info->max_adj;
133 caps.n_alarm = ptp->info->n_alarm;
134 caps.n_ext_ts = ptp->info->n_ext_ts;
135 caps.n_per_out = ptp->info->n_per_out;
136 caps.pps = ptp->info->pps;
137 caps.n_pins = ptp->info->n_pins;
138 caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
139 caps.adjust_phase = ptp->info->adjphase != NULL &&
140 ptp->info->getmaxphase != NULL;
141 if (caps.adjust_phase)
142 caps.max_phase_adj = ptp->info->getmaxphase(ptp->info);
143 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
147 case PTP_EXTTS_REQUEST:
148 case PTP_EXTTS_REQUEST2:
149 memset(&req, 0, sizeof(req));
151 if (copy_from_user(&req.extts, (void __user *)arg,
152 sizeof(req.extts))) {
156 if (cmd == PTP_EXTTS_REQUEST2) {
157 /* Tell the drivers to check the flags carefully. */
158 req.extts.flags |= PTP_STRICT_FLAGS;
159 /* Make sure no reserved bit is set. */
160 if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
161 req.extts.rsv[0] || req.extts.rsv[1]) {
165 /* Ensure one of the rising/falling edge bits is set. */
166 if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
167 (req.extts.flags & PTP_EXTTS_EDGES) == 0) {
171 } else if (cmd == PTP_EXTTS_REQUEST) {
172 req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
173 req.extts.rsv[0] = 0;
174 req.extts.rsv[1] = 0;
176 if (req.extts.index >= ops->n_ext_ts) {
180 req.type = PTP_CLK_REQ_EXTTS;
181 enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
182 if (mutex_lock_interruptible(&ptp->pincfg_mux))
184 err = ops->enable(ops, &req, enable);
185 mutex_unlock(&ptp->pincfg_mux);
188 case PTP_PEROUT_REQUEST:
189 case PTP_PEROUT_REQUEST2:
190 memset(&req, 0, sizeof(req));
192 if (copy_from_user(&req.perout, (void __user *)arg,
193 sizeof(req.perout))) {
197 if (cmd == PTP_PEROUT_REQUEST2) {
198 struct ptp_perout_request *perout = &req.perout;
200 if (perout->flags & ~PTP_PEROUT_VALID_FLAGS) {
205 * The "on" field has undefined meaning if
206 * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat
207 * it as reserved, which must be set to zero.
209 if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) &&
210 (perout->rsv[0] || perout->rsv[1] ||
211 perout->rsv[2] || perout->rsv[3])) {
215 if (perout->flags & PTP_PEROUT_DUTY_CYCLE) {
216 /* The duty cycle must be subunitary. */
217 if (perout->on.sec > perout->period.sec ||
218 (perout->on.sec == perout->period.sec &&
219 perout->on.nsec > perout->period.nsec)) {
224 if (perout->flags & PTP_PEROUT_PHASE) {
226 * The phase should be specified modulo the
227 * period, therefore anything equal or larger
228 * than 1 period is invalid.
230 if (perout->phase.sec > perout->period.sec ||
231 (perout->phase.sec == perout->period.sec &&
232 perout->phase.nsec >= perout->period.nsec)) {
237 } else if (cmd == PTP_PEROUT_REQUEST) {
238 req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS;
239 req.perout.rsv[0] = 0;
240 req.perout.rsv[1] = 0;
241 req.perout.rsv[2] = 0;
242 req.perout.rsv[3] = 0;
244 if (req.perout.index >= ops->n_per_out) {
248 req.type = PTP_CLK_REQ_PEROUT;
249 enable = req.perout.period.sec || req.perout.period.nsec;
250 if (mutex_lock_interruptible(&ptp->pincfg_mux))
252 err = ops->enable(ops, &req, enable);
253 mutex_unlock(&ptp->pincfg_mux);
257 case PTP_ENABLE_PPS2:
258 memset(&req, 0, sizeof(req));
260 if (!capable(CAP_SYS_TIME))
262 req.type = PTP_CLK_REQ_PPS;
263 enable = arg ? 1 : 0;
264 if (mutex_lock_interruptible(&ptp->pincfg_mux))
266 err = ops->enable(ops, &req, enable);
267 mutex_unlock(&ptp->pincfg_mux);
270 case PTP_SYS_OFFSET_PRECISE:
271 case PTP_SYS_OFFSET_PRECISE2:
272 if (!ptp->info->getcrosststamp) {
276 err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
280 memset(&precise_offset, 0, sizeof(precise_offset));
281 ts = ktime_to_timespec64(xtstamp.device);
282 precise_offset.device.sec = ts.tv_sec;
283 precise_offset.device.nsec = ts.tv_nsec;
284 ts = ktime_to_timespec64(xtstamp.sys_realtime);
285 precise_offset.sys_realtime.sec = ts.tv_sec;
286 precise_offset.sys_realtime.nsec = ts.tv_nsec;
287 ts = ktime_to_timespec64(xtstamp.sys_monoraw);
288 precise_offset.sys_monoraw.sec = ts.tv_sec;
289 precise_offset.sys_monoraw.nsec = ts.tv_nsec;
290 if (copy_to_user((void __user *)arg, &precise_offset,
291 sizeof(precise_offset)))
295 case PTP_SYS_OFFSET_EXTENDED:
296 case PTP_SYS_OFFSET_EXTENDED2:
297 if (!ptp->info->gettimex64) {
301 extoff = memdup_user((void __user *)arg, sizeof(*extoff));
302 if (IS_ERR(extoff)) {
303 err = PTR_ERR(extoff);
307 if (extoff->n_samples > PTP_MAX_SAMPLES
308 || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
312 for (i = 0; i < extoff->n_samples; i++) {
313 err = ptp->info->gettimex64(ptp->info, &ts, &sts);
316 extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
317 extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
318 extoff->ts[i][1].sec = ts.tv_sec;
319 extoff->ts[i][1].nsec = ts.tv_nsec;
320 extoff->ts[i][2].sec = sts.post_ts.tv_sec;
321 extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
323 if (copy_to_user((void __user *)arg, extoff, sizeof(*extoff)))
328 case PTP_SYS_OFFSET2:
329 sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
330 if (IS_ERR(sysoff)) {
331 err = PTR_ERR(sysoff);
335 if (sysoff->n_samples > PTP_MAX_SAMPLES) {
339 pct = &sysoff->ts[0];
340 for (i = 0; i < sysoff->n_samples; i++) {
341 ktime_get_real_ts64(&ts);
342 pct->sec = ts.tv_sec;
343 pct->nsec = ts.tv_nsec;
346 err = ops->gettimex64(ops, &ts, NULL);
348 err = ops->gettime64(ops, &ts);
351 pct->sec = ts.tv_sec;
352 pct->nsec = ts.tv_nsec;
355 ktime_get_real_ts64(&ts);
356 pct->sec = ts.tv_sec;
357 pct->nsec = ts.tv_nsec;
358 if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
362 case PTP_PIN_GETFUNC:
363 case PTP_PIN_GETFUNC2:
364 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
368 if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
369 || pd.rsv[3] || pd.rsv[4])
370 && cmd == PTP_PIN_GETFUNC2) {
373 } else if (cmd == PTP_PIN_GETFUNC) {
380 pin_index = pd.index;
381 if (pin_index >= ops->n_pins) {
385 pin_index = array_index_nospec(pin_index, ops->n_pins);
386 if (mutex_lock_interruptible(&ptp->pincfg_mux))
388 pd = ops->pin_config[pin_index];
389 mutex_unlock(&ptp->pincfg_mux);
390 if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
394 case PTP_PIN_SETFUNC:
395 case PTP_PIN_SETFUNC2:
396 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
400 if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
401 || pd.rsv[3] || pd.rsv[4])
402 && cmd == PTP_PIN_SETFUNC2) {
405 } else if (cmd == PTP_PIN_SETFUNC) {
412 pin_index = pd.index;
413 if (pin_index >= ops->n_pins) {
417 pin_index = array_index_nospec(pin_index, ops->n_pins);
418 if (mutex_lock_interruptible(&ptp->pincfg_mux))
420 err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
421 mutex_unlock(&ptp->pincfg_mux);
435 __poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
437 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
439 poll_wait(fp, &ptp->tsev_wq, wait);
441 return queue_cnt(&ptp->tsevq) ? EPOLLIN : 0;
444 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
446 ssize_t ptp_read(struct posix_clock *pc,
447 uint rdflags, char __user *buf, size_t cnt)
449 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
450 struct timestamp_event_queue *queue = &ptp->tsevq;
451 struct ptp_extts_event *event;
456 if (cnt % sizeof(struct ptp_extts_event) != 0)
459 if (cnt > EXTTS_BUFSIZE)
462 cnt = cnt / sizeof(struct ptp_extts_event);
464 if (mutex_lock_interruptible(&ptp->tsevq_mux))
467 if (wait_event_interruptible(ptp->tsev_wq,
468 ptp->defunct || queue_cnt(queue))) {
469 mutex_unlock(&ptp->tsevq_mux);
474 mutex_unlock(&ptp->tsevq_mux);
478 event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
480 mutex_unlock(&ptp->tsevq_mux);
484 spin_lock_irqsave(&queue->lock, flags);
486 qcnt = queue_cnt(queue);
491 for (i = 0; i < cnt; i++) {
492 event[i] = queue->buf[queue->head];
493 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
496 spin_unlock_irqrestore(&queue->lock, flags);
498 cnt = cnt * sizeof(struct ptp_extts_event);
500 mutex_unlock(&ptp->tsevq_mux);
503 if (copy_to_user(buf, event, cnt))