1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Quadrature Encoder Peripheral driver
5 * Copyright (C) 2019-2021 Intel Corporation
7 * Author: Felipe Balbi (Intel)
8 * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
9 * Author: Raymond Tan <raymond.tan@intel.com>
11 #include <linux/counter.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
18 #define INTEL_QEPCON 0x00
19 #define INTEL_QEPFLT 0x04
20 #define INTEL_QEPCOUNT 0x08
21 #define INTEL_QEPMAX 0x0c
22 #define INTEL_QEPWDT 0x10
23 #define INTEL_QEPCAPDIV 0x14
24 #define INTEL_QEPCNTR 0x18
25 #define INTEL_QEPCAPBUF 0x1c
26 #define INTEL_QEPINT_STAT 0x20
27 #define INTEL_QEPINT_MASK 0x24
30 #define INTEL_QEPCON_EN BIT(0)
31 #define INTEL_QEPCON_FLT_EN BIT(1)
32 #define INTEL_QEPCON_EDGE_A BIT(2)
33 #define INTEL_QEPCON_EDGE_B BIT(3)
34 #define INTEL_QEPCON_EDGE_INDX BIT(4)
35 #define INTEL_QEPCON_SWPAB BIT(5)
36 #define INTEL_QEPCON_OP_MODE BIT(6)
37 #define INTEL_QEPCON_PH_ERR BIT(7)
38 #define INTEL_QEPCON_COUNT_RST_MODE BIT(8)
39 #define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9)
40 #define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9)
41 #define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0)
42 #define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1)
43 #define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2)
44 #define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3)
45 #define INTEL_QEPCON_CAP_MODE BIT(11)
46 #define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12)
47 #define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12)
48 #define INTEL_QEPCON_FIFO_EMPTY BIT(15)
51 #define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff)
54 #define INTEL_QEPINT_FIFOCRIT BIT(5)
55 #define INTEL_QEPINT_FIFOENTRY BIT(4)
56 #define INTEL_QEPINT_QEPDIR BIT(3)
57 #define INTEL_QEPINT_QEPRST_UP BIT(2)
58 #define INTEL_QEPINT_QEPRST_DOWN BIT(1)
59 #define INTEL_QEPINT_WDT BIT(0)
61 #define INTEL_QEPINT_MASK_ALL GENMASK(5, 0)
63 #define INTEL_QEP_CLK_PERIOD_NS 10
65 #define INTEL_QEP_COUNTER_EXT_RW(_name) \
68 .read = _name##_read, \
69 .write = _name##_write, \
73 struct counter_device counter;
78 /* Context save registers */
84 static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset)
86 return readl(qep->regs + offset);
89 static inline void intel_qep_writel(struct intel_qep *qep,
90 u32 offset, u32 value)
92 writel(value, qep->regs + offset);
95 static void intel_qep_init(struct intel_qep *qep)
99 reg = intel_qep_readl(qep, INTEL_QEPCON);
100 reg &= ~INTEL_QEPCON_EN;
101 intel_qep_writel(qep, INTEL_QEPCON, reg);
102 qep->enabled = false;
104 * Make sure peripheral is disabled by flushing the write with
107 reg = intel_qep_readl(qep, INTEL_QEPCON);
109 reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN);
110 reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B |
111 INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE;
112 intel_qep_writel(qep, INTEL_QEPCON, reg);
113 intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
116 static int intel_qep_count_read(struct counter_device *counter,
117 struct counter_count *count,
120 struct intel_qep *const qep = counter->priv;
122 pm_runtime_get_sync(qep->dev);
123 *val = intel_qep_readl(qep, INTEL_QEPCOUNT);
124 pm_runtime_put(qep->dev);
129 static const enum counter_function intel_qep_count_functions[] = {
130 COUNTER_FUNCTION_QUADRATURE_X4,
133 static int intel_qep_function_get(struct counter_device *counter,
134 struct counter_count *count,
142 static const enum counter_synapse_action intel_qep_synapse_actions[] = {
143 COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
146 static int intel_qep_action_get(struct counter_device *counter,
147 struct counter_count *count,
148 struct counter_synapse *synapse,
155 static const struct counter_ops intel_qep_counter_ops = {
156 .count_read = intel_qep_count_read,
157 .function_get = intel_qep_function_get,
158 .action_get = intel_qep_action_get,
161 #define INTEL_QEP_SIGNAL(_id, _name) { \
166 static struct counter_signal intel_qep_signals[] = {
167 INTEL_QEP_SIGNAL(0, "Phase A"),
168 INTEL_QEP_SIGNAL(1, "Phase B"),
169 INTEL_QEP_SIGNAL(2, "Index"),
172 #define INTEL_QEP_SYNAPSE(_signal_id) { \
173 .actions_list = intel_qep_synapse_actions, \
174 .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \
175 .signal = &intel_qep_signals[(_signal_id)], \
178 static struct counter_synapse intel_qep_count_synapses[] = {
179 INTEL_QEP_SYNAPSE(0),
180 INTEL_QEP_SYNAPSE(1),
181 INTEL_QEP_SYNAPSE(2),
184 static ssize_t ceiling_read(struct counter_device *counter,
185 struct counter_count *count,
186 void *priv, char *buf)
188 struct intel_qep *qep = counter->priv;
191 pm_runtime_get_sync(qep->dev);
192 reg = intel_qep_readl(qep, INTEL_QEPMAX);
193 pm_runtime_put(qep->dev);
195 return sysfs_emit(buf, "%u\n", reg);
198 static ssize_t ceiling_write(struct counter_device *counter,
199 struct counter_count *count,
200 void *priv, const char *buf, size_t len)
202 struct intel_qep *qep = counter->priv;
206 ret = kstrtou32(buf, 0, &max);
210 mutex_lock(&qep->lock);
216 pm_runtime_get_sync(qep->dev);
217 intel_qep_writel(qep, INTEL_QEPMAX, max);
218 pm_runtime_put(qep->dev);
222 mutex_unlock(&qep->lock);
226 static ssize_t enable_read(struct counter_device *counter,
227 struct counter_count *count,
228 void *priv, char *buf)
230 struct intel_qep *qep = counter->priv;
232 return sysfs_emit(buf, "%u\n", qep->enabled);
235 static ssize_t enable_write(struct counter_device *counter,
236 struct counter_count *count,
237 void *priv, const char *buf, size_t len)
239 struct intel_qep *qep = counter->priv;
244 ret = kstrtobool(buf, &val);
248 mutex_lock(&qep->lock);
249 changed = val ^ qep->enabled;
253 pm_runtime_get_sync(qep->dev);
254 reg = intel_qep_readl(qep, INTEL_QEPCON);
256 /* Enable peripheral and keep runtime PM always on */
257 reg |= INTEL_QEPCON_EN;
258 pm_runtime_get_noresume(qep->dev);
260 /* Let runtime PM be idle and disable peripheral */
261 pm_runtime_put_noidle(qep->dev);
262 reg &= ~INTEL_QEPCON_EN;
264 intel_qep_writel(qep, INTEL_QEPCON, reg);
265 pm_runtime_put(qep->dev);
269 mutex_unlock(&qep->lock);
273 static ssize_t spike_filter_ns_read(struct counter_device *counter,
274 struct counter_count *count,
275 void *priv, char *buf)
277 struct intel_qep *qep = counter->priv;
280 pm_runtime_get_sync(qep->dev);
281 reg = intel_qep_readl(qep, INTEL_QEPCON);
282 if (!(reg & INTEL_QEPCON_FLT_EN)) {
283 pm_runtime_put(qep->dev);
284 return sysfs_emit(buf, "0\n");
286 reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
287 pm_runtime_put(qep->dev);
289 return sysfs_emit(buf, "%u\n", (reg + 2) * INTEL_QEP_CLK_PERIOD_NS);
292 static ssize_t spike_filter_ns_write(struct counter_device *counter,
293 struct counter_count *count,
294 void *priv, const char *buf, size_t len)
296 struct intel_qep *qep = counter->priv;
301 ret = kstrtou32(buf, 0, &length);
306 * Spike filter length is (MAX_COUNT + 2) clock periods.
307 * Disable filter when userspace writes 0, enable for valid
308 * nanoseconds values and error out otherwise.
310 length /= INTEL_QEP_CLK_PERIOD_NS;
314 } else if (length >= 2) {
321 if (length > INTEL_QEPFLT_MAX_COUNT(length))
324 mutex_lock(&qep->lock);
330 pm_runtime_get_sync(qep->dev);
331 reg = intel_qep_readl(qep, INTEL_QEPCON);
333 reg |= INTEL_QEPCON_FLT_EN;
335 reg &= ~INTEL_QEPCON_FLT_EN;
336 intel_qep_writel(qep, INTEL_QEPFLT, length);
337 intel_qep_writel(qep, INTEL_QEPCON, reg);
338 pm_runtime_put(qep->dev);
342 mutex_unlock(&qep->lock);
346 static ssize_t preset_enable_read(struct counter_device *counter,
347 struct counter_count *count,
348 void *priv, char *buf)
350 struct intel_qep *qep = counter->priv;
353 pm_runtime_get_sync(qep->dev);
354 reg = intel_qep_readl(qep, INTEL_QEPCON);
355 pm_runtime_put(qep->dev);
356 return sysfs_emit(buf, "%u\n", !(reg & INTEL_QEPCON_COUNT_RST_MODE));
359 static ssize_t preset_enable_write(struct counter_device *counter,
360 struct counter_count *count,
361 void *priv, const char *buf, size_t len)
363 struct intel_qep *qep = counter->priv;
368 ret = kstrtobool(buf, &val);
372 mutex_lock(&qep->lock);
378 pm_runtime_get_sync(qep->dev);
379 reg = intel_qep_readl(qep, INTEL_QEPCON);
381 reg &= ~INTEL_QEPCON_COUNT_RST_MODE;
383 reg |= INTEL_QEPCON_COUNT_RST_MODE;
385 intel_qep_writel(qep, INTEL_QEPCON, reg);
386 pm_runtime_put(qep->dev);
390 mutex_unlock(&qep->lock);
395 static const struct counter_count_ext intel_qep_count_ext[] = {
396 INTEL_QEP_COUNTER_EXT_RW(ceiling),
397 INTEL_QEP_COUNTER_EXT_RW(enable),
398 INTEL_QEP_COUNTER_EXT_RW(spike_filter_ns),
399 INTEL_QEP_COUNTER_EXT_RW(preset_enable)
402 static struct counter_count intel_qep_counter_count[] = {
405 .name = "Channel 1 Count",
406 .functions_list = intel_qep_count_functions,
407 .num_functions = ARRAY_SIZE(intel_qep_count_functions),
408 .synapses = intel_qep_count_synapses,
409 .num_synapses = ARRAY_SIZE(intel_qep_count_synapses),
410 .ext = intel_qep_count_ext,
411 .num_ext = ARRAY_SIZE(intel_qep_count_ext),
415 static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
417 struct intel_qep *qep;
418 struct device *dev = &pci->dev;
422 qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL);
426 ret = pcim_enable_device(pci);
432 ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
436 regs = pcim_iomap_table(pci)[0];
442 mutex_init(&qep->lock);
445 pci_set_drvdata(pci, qep);
447 qep->counter.name = pci_name(pci);
448 qep->counter.parent = dev;
449 qep->counter.ops = &intel_qep_counter_ops;
450 qep->counter.counts = intel_qep_counter_count;
451 qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count);
452 qep->counter.signals = intel_qep_signals;
453 qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals);
454 qep->counter.priv = qep;
455 qep->enabled = false;
458 pm_runtime_allow(dev);
460 return devm_counter_register(&pci->dev, &qep->counter);
463 static void intel_qep_remove(struct pci_dev *pci)
465 struct intel_qep *qep = pci_get_drvdata(pci);
466 struct device *dev = &pci->dev;
468 pm_runtime_forbid(dev);
472 intel_qep_writel(qep, INTEL_QEPCON, 0);
475 static int __maybe_unused intel_qep_suspend(struct device *dev)
477 struct pci_dev *pdev = to_pci_dev(dev);
478 struct intel_qep *qep = pci_get_drvdata(pdev);
480 qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON);
481 qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT);
482 qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX);
487 static int __maybe_unused intel_qep_resume(struct device *dev)
489 struct pci_dev *pdev = to_pci_dev(dev);
490 struct intel_qep *qep = pci_get_drvdata(pdev);
493 * Make sure peripheral is disabled when restoring registers and
494 * control register bits that are writable only when the peripheral
497 intel_qep_writel(qep, INTEL_QEPCON, 0);
498 intel_qep_readl(qep, INTEL_QEPCON);
500 intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt);
501 intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax);
502 intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
504 /* Restore all other control register bits except enable status */
505 intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN);
506 intel_qep_readl(qep, INTEL_QEPCON);
508 /* Restore enable status */
509 intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon);
514 static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops,
515 intel_qep_suspend, intel_qep_resume, NULL);
517 static const struct pci_device_id intel_qep_id_table[] = {
519 { PCI_VDEVICE(INTEL, 0x4bc3), },
520 { PCI_VDEVICE(INTEL, 0x4b81), },
521 { PCI_VDEVICE(INTEL, 0x4b82), },
522 { PCI_VDEVICE(INTEL, 0x4b83), },
523 { } /* Terminating Entry */
525 MODULE_DEVICE_TABLE(pci, intel_qep_id_table);
527 static struct pci_driver intel_qep_driver = {
529 .id_table = intel_qep_id_table,
530 .probe = intel_qep_probe,
531 .remove = intel_qep_remove,
533 .pm = &intel_qep_pm_ops,
537 module_pci_driver(intel_qep_driver);
539 MODULE_AUTHOR("Felipe Balbi (Intel)");
540 MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
541 MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
542 MODULE_LICENSE("GPL");
543 MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");