Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/counter/intel-qep.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Intel Quadrature Encoder Peripheral driver
4
*
5
* Copyright (C) 2019-2021 Intel Corporation
6
*
7
* Author: Felipe Balbi (Intel)
8
* Author: Jarkko Nikula <[email protected]>
9
* Author: Raymond Tan <[email protected]>
10
*/
11
#include <linux/counter.h>
12
#include <linux/kernel.h>
13
#include <linux/module.h>
14
#include <linux/mutex.h>
15
#include <linux/pci.h>
16
#include <linux/pm_runtime.h>
17
18
#define INTEL_QEPCON 0x00
19
#define INTEL_QEPFLT 0x04
20
#define INTEL_QEPCOUNT 0x08
21
#define INTEL_QEPMAX 0x0c
22
#define INTEL_QEPWDT 0x10
23
#define INTEL_QEPCAPDIV 0x14
24
#define INTEL_QEPCNTR 0x18
25
#define INTEL_QEPCAPBUF 0x1c
26
#define INTEL_QEPINT_STAT 0x20
27
#define INTEL_QEPINT_MASK 0x24
28
29
/* QEPCON */
30
#define INTEL_QEPCON_EN BIT(0)
31
#define INTEL_QEPCON_FLT_EN BIT(1)
32
#define INTEL_QEPCON_EDGE_A BIT(2)
33
#define INTEL_QEPCON_EDGE_B BIT(3)
34
#define INTEL_QEPCON_EDGE_INDX BIT(4)
35
#define INTEL_QEPCON_SWPAB BIT(5)
36
#define INTEL_QEPCON_OP_MODE BIT(6)
37
#define INTEL_QEPCON_PH_ERR BIT(7)
38
#define INTEL_QEPCON_COUNT_RST_MODE BIT(8)
39
#define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9)
40
#define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9)
41
#define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0)
42
#define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1)
43
#define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2)
44
#define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3)
45
#define INTEL_QEPCON_CAP_MODE BIT(11)
46
#define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12)
47
#define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12)
48
#define INTEL_QEPCON_FIFO_EMPTY BIT(15)
49
50
/* QEPFLT */
51
#define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff)
52
53
/* QEPINT */
54
#define INTEL_QEPINT_FIFOCRIT BIT(5)
55
#define INTEL_QEPINT_FIFOENTRY BIT(4)
56
#define INTEL_QEPINT_QEPDIR BIT(3)
57
#define INTEL_QEPINT_QEPRST_UP BIT(2)
58
#define INTEL_QEPINT_QEPRST_DOWN BIT(1)
59
#define INTEL_QEPINT_WDT BIT(0)
60
61
#define INTEL_QEPINT_MASK_ALL GENMASK(5, 0)
62
63
#define INTEL_QEP_CLK_PERIOD_NS 10
64
65
struct intel_qep {
66
struct mutex lock;
67
struct device *dev;
68
void __iomem *regs;
69
bool enabled;
70
/* Context save registers */
71
u32 qepcon;
72
u32 qepflt;
73
u32 qepmax;
74
};
75
76
static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset)
77
{
78
return readl(qep->regs + offset);
79
}
80
81
static inline void intel_qep_writel(struct intel_qep *qep,
82
u32 offset, u32 value)
83
{
84
writel(value, qep->regs + offset);
85
}
86
87
static void intel_qep_init(struct intel_qep *qep)
88
{
89
u32 reg;
90
91
reg = intel_qep_readl(qep, INTEL_QEPCON);
92
reg &= ~INTEL_QEPCON_EN;
93
intel_qep_writel(qep, INTEL_QEPCON, reg);
94
qep->enabled = false;
95
/*
96
* Make sure peripheral is disabled by flushing the write with
97
* a dummy read
98
*/
99
reg = intel_qep_readl(qep, INTEL_QEPCON);
100
101
reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN);
102
reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B |
103
INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE;
104
intel_qep_writel(qep, INTEL_QEPCON, reg);
105
intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
106
}
107
108
static int intel_qep_count_read(struct counter_device *counter,
109
struct counter_count *count, u64 *val)
110
{
111
struct intel_qep *const qep = counter_priv(counter);
112
113
pm_runtime_get_sync(qep->dev);
114
*val = intel_qep_readl(qep, INTEL_QEPCOUNT);
115
pm_runtime_put(qep->dev);
116
117
return 0;
118
}
119
120
static const enum counter_function intel_qep_count_functions[] = {
121
COUNTER_FUNCTION_QUADRATURE_X4,
122
};
123
124
static int intel_qep_function_read(struct counter_device *counter,
125
struct counter_count *count,
126
enum counter_function *function)
127
{
128
*function = COUNTER_FUNCTION_QUADRATURE_X4;
129
130
return 0;
131
}
132
133
static const enum counter_synapse_action intel_qep_synapse_actions[] = {
134
COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
135
};
136
137
static int intel_qep_action_read(struct counter_device *counter,
138
struct counter_count *count,
139
struct counter_synapse *synapse,
140
enum counter_synapse_action *action)
141
{
142
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
143
return 0;
144
}
145
146
static const struct counter_ops intel_qep_counter_ops = {
147
.count_read = intel_qep_count_read,
148
.function_read = intel_qep_function_read,
149
.action_read = intel_qep_action_read,
150
};
151
152
#define INTEL_QEP_SIGNAL(_id, _name) { \
153
.id = (_id), \
154
.name = (_name), \
155
}
156
157
static struct counter_signal intel_qep_signals[] = {
158
INTEL_QEP_SIGNAL(0, "Phase A"),
159
INTEL_QEP_SIGNAL(1, "Phase B"),
160
INTEL_QEP_SIGNAL(2, "Index"),
161
};
162
163
#define INTEL_QEP_SYNAPSE(_signal_id) { \
164
.actions_list = intel_qep_synapse_actions, \
165
.num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \
166
.signal = &intel_qep_signals[(_signal_id)], \
167
}
168
169
static struct counter_synapse intel_qep_count_synapses[] = {
170
INTEL_QEP_SYNAPSE(0),
171
INTEL_QEP_SYNAPSE(1),
172
INTEL_QEP_SYNAPSE(2),
173
};
174
175
static int intel_qep_ceiling_read(struct counter_device *counter,
176
struct counter_count *count, u64 *ceiling)
177
{
178
struct intel_qep *qep = counter_priv(counter);
179
180
pm_runtime_get_sync(qep->dev);
181
*ceiling = intel_qep_readl(qep, INTEL_QEPMAX);
182
pm_runtime_put(qep->dev);
183
184
return 0;
185
}
186
187
static int intel_qep_ceiling_write(struct counter_device *counter,
188
struct counter_count *count, u64 max)
189
{
190
struct intel_qep *qep = counter_priv(counter);
191
int ret = 0;
192
193
/* Intel QEP ceiling configuration only supports 32-bit values */
194
if (max != (u32)max)
195
return -ERANGE;
196
197
mutex_lock(&qep->lock);
198
if (qep->enabled) {
199
ret = -EBUSY;
200
goto out;
201
}
202
203
pm_runtime_get_sync(qep->dev);
204
intel_qep_writel(qep, INTEL_QEPMAX, max);
205
pm_runtime_put(qep->dev);
206
207
out:
208
mutex_unlock(&qep->lock);
209
return ret;
210
}
211
212
static int intel_qep_enable_read(struct counter_device *counter,
213
struct counter_count *count, u8 *enable)
214
{
215
struct intel_qep *qep = counter_priv(counter);
216
217
*enable = qep->enabled;
218
219
return 0;
220
}
221
222
static int intel_qep_enable_write(struct counter_device *counter,
223
struct counter_count *count, u8 val)
224
{
225
struct intel_qep *qep = counter_priv(counter);
226
u32 reg;
227
bool changed;
228
229
mutex_lock(&qep->lock);
230
changed = val ^ qep->enabled;
231
if (!changed)
232
goto out;
233
234
pm_runtime_get_sync(qep->dev);
235
reg = intel_qep_readl(qep, INTEL_QEPCON);
236
if (val) {
237
/* Enable peripheral and keep runtime PM always on */
238
reg |= INTEL_QEPCON_EN;
239
pm_runtime_get_noresume(qep->dev);
240
} else {
241
/* Let runtime PM be idle and disable peripheral */
242
pm_runtime_put_noidle(qep->dev);
243
reg &= ~INTEL_QEPCON_EN;
244
}
245
intel_qep_writel(qep, INTEL_QEPCON, reg);
246
pm_runtime_put(qep->dev);
247
qep->enabled = val;
248
249
out:
250
mutex_unlock(&qep->lock);
251
return 0;
252
}
253
254
static int intel_qep_spike_filter_ns_read(struct counter_device *counter,
255
struct counter_count *count,
256
u64 *length)
257
{
258
struct intel_qep *qep = counter_priv(counter);
259
u32 reg;
260
261
pm_runtime_get_sync(qep->dev);
262
reg = intel_qep_readl(qep, INTEL_QEPCON);
263
if (!(reg & INTEL_QEPCON_FLT_EN)) {
264
pm_runtime_put(qep->dev);
265
return 0;
266
}
267
reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
268
pm_runtime_put(qep->dev);
269
270
*length = (reg + 2) * INTEL_QEP_CLK_PERIOD_NS;
271
272
return 0;
273
}
274
275
static int intel_qep_spike_filter_ns_write(struct counter_device *counter,
276
struct counter_count *count,
277
u64 length)
278
{
279
struct intel_qep *qep = counter_priv(counter);
280
u32 reg;
281
bool enable;
282
int ret = 0;
283
284
/*
285
* Spike filter length is (MAX_COUNT + 2) clock periods.
286
* Disable filter when userspace writes 0, enable for valid
287
* nanoseconds values and error out otherwise.
288
*/
289
do_div(length, INTEL_QEP_CLK_PERIOD_NS);
290
if (length == 0) {
291
enable = false;
292
length = 0;
293
} else if (length >= 2) {
294
enable = true;
295
length -= 2;
296
} else {
297
return -EINVAL;
298
}
299
300
if (length > INTEL_QEPFLT_MAX_COUNT(length))
301
return -ERANGE;
302
303
mutex_lock(&qep->lock);
304
if (qep->enabled) {
305
ret = -EBUSY;
306
goto out;
307
}
308
309
pm_runtime_get_sync(qep->dev);
310
reg = intel_qep_readl(qep, INTEL_QEPCON);
311
if (enable)
312
reg |= INTEL_QEPCON_FLT_EN;
313
else
314
reg &= ~INTEL_QEPCON_FLT_EN;
315
intel_qep_writel(qep, INTEL_QEPFLT, length);
316
intel_qep_writel(qep, INTEL_QEPCON, reg);
317
pm_runtime_put(qep->dev);
318
319
out:
320
mutex_unlock(&qep->lock);
321
return ret;
322
}
323
324
static int intel_qep_preset_enable_read(struct counter_device *counter,
325
struct counter_count *count,
326
u8 *preset_enable)
327
{
328
struct intel_qep *qep = counter_priv(counter);
329
u32 reg;
330
331
pm_runtime_get_sync(qep->dev);
332
reg = intel_qep_readl(qep, INTEL_QEPCON);
333
pm_runtime_put(qep->dev);
334
335
*preset_enable = !(reg & INTEL_QEPCON_COUNT_RST_MODE);
336
337
return 0;
338
}
339
340
static int intel_qep_preset_enable_write(struct counter_device *counter,
341
struct counter_count *count, u8 val)
342
{
343
struct intel_qep *qep = counter_priv(counter);
344
u32 reg;
345
int ret = 0;
346
347
mutex_lock(&qep->lock);
348
if (qep->enabled) {
349
ret = -EBUSY;
350
goto out;
351
}
352
353
pm_runtime_get_sync(qep->dev);
354
reg = intel_qep_readl(qep, INTEL_QEPCON);
355
if (val)
356
reg &= ~INTEL_QEPCON_COUNT_RST_MODE;
357
else
358
reg |= INTEL_QEPCON_COUNT_RST_MODE;
359
360
intel_qep_writel(qep, INTEL_QEPCON, reg);
361
pm_runtime_put(qep->dev);
362
363
out:
364
mutex_unlock(&qep->lock);
365
366
return ret;
367
}
368
369
static struct counter_comp intel_qep_count_ext[] = {
370
COUNTER_COMP_ENABLE(intel_qep_enable_read, intel_qep_enable_write),
371
COUNTER_COMP_CEILING(intel_qep_ceiling_read, intel_qep_ceiling_write),
372
COUNTER_COMP_PRESET_ENABLE(intel_qep_preset_enable_read,
373
intel_qep_preset_enable_write),
374
COUNTER_COMP_COUNT_U64("spike_filter_ns",
375
intel_qep_spike_filter_ns_read,
376
intel_qep_spike_filter_ns_write),
377
};
378
379
static struct counter_count intel_qep_counter_count[] = {
380
{
381
.id = 0,
382
.name = "Channel 1 Count",
383
.functions_list = intel_qep_count_functions,
384
.num_functions = ARRAY_SIZE(intel_qep_count_functions),
385
.synapses = intel_qep_count_synapses,
386
.num_synapses = ARRAY_SIZE(intel_qep_count_synapses),
387
.ext = intel_qep_count_ext,
388
.num_ext = ARRAY_SIZE(intel_qep_count_ext),
389
},
390
};
391
392
static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
393
{
394
struct counter_device *counter;
395
struct intel_qep *qep;
396
struct device *dev = &pci->dev;
397
void __iomem *regs;
398
int ret;
399
400
counter = devm_counter_alloc(dev, sizeof(*qep));
401
if (!counter)
402
return -ENOMEM;
403
qep = counter_priv(counter);
404
405
ret = pcim_enable_device(pci);
406
if (ret)
407
return ret;
408
409
pci_set_master(pci);
410
411
regs = pcim_iomap_region(pci, 0, pci_name(pci));
412
if (IS_ERR(regs))
413
return PTR_ERR(regs);
414
415
qep->dev = dev;
416
qep->regs = regs;
417
mutex_init(&qep->lock);
418
419
intel_qep_init(qep);
420
pci_set_drvdata(pci, qep);
421
422
counter->name = pci_name(pci);
423
counter->parent = dev;
424
counter->ops = &intel_qep_counter_ops;
425
counter->counts = intel_qep_counter_count;
426
counter->num_counts = ARRAY_SIZE(intel_qep_counter_count);
427
counter->signals = intel_qep_signals;
428
counter->num_signals = ARRAY_SIZE(intel_qep_signals);
429
qep->enabled = false;
430
431
pm_runtime_put(dev);
432
pm_runtime_allow(dev);
433
434
ret = devm_counter_add(&pci->dev, counter);
435
if (ret < 0)
436
return dev_err_probe(&pci->dev, ret, "Failed to add counter\n");
437
438
return 0;
439
}
440
441
static void intel_qep_remove(struct pci_dev *pci)
442
{
443
struct intel_qep *qep = pci_get_drvdata(pci);
444
struct device *dev = &pci->dev;
445
446
pm_runtime_forbid(dev);
447
if (!qep->enabled)
448
pm_runtime_get(dev);
449
450
intel_qep_writel(qep, INTEL_QEPCON, 0);
451
}
452
453
static int __maybe_unused intel_qep_suspend(struct device *dev)
454
{
455
struct pci_dev *pdev = to_pci_dev(dev);
456
struct intel_qep *qep = pci_get_drvdata(pdev);
457
458
qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON);
459
qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT);
460
qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX);
461
462
return 0;
463
}
464
465
static int __maybe_unused intel_qep_resume(struct device *dev)
466
{
467
struct pci_dev *pdev = to_pci_dev(dev);
468
struct intel_qep *qep = pci_get_drvdata(pdev);
469
470
/*
471
* Make sure peripheral is disabled when restoring registers and
472
* control register bits that are writable only when the peripheral
473
* is disabled
474
*/
475
intel_qep_writel(qep, INTEL_QEPCON, 0);
476
intel_qep_readl(qep, INTEL_QEPCON);
477
478
intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt);
479
intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax);
480
intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
481
482
/* Restore all other control register bits except enable status */
483
intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN);
484
intel_qep_readl(qep, INTEL_QEPCON);
485
486
/* Restore enable status */
487
intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon);
488
489
return 0;
490
}
491
492
static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops,
493
intel_qep_suspend, intel_qep_resume, NULL);
494
495
static const struct pci_device_id intel_qep_id_table[] = {
496
/* EHL */
497
{ PCI_VDEVICE(INTEL, 0x4bc3), },
498
{ PCI_VDEVICE(INTEL, 0x4b81), },
499
{ PCI_VDEVICE(INTEL, 0x4b82), },
500
{ PCI_VDEVICE(INTEL, 0x4b83), },
501
{ } /* Terminating Entry */
502
};
503
MODULE_DEVICE_TABLE(pci, intel_qep_id_table);
504
505
static struct pci_driver intel_qep_driver = {
506
.name = "intel-qep",
507
.id_table = intel_qep_id_table,
508
.probe = intel_qep_probe,
509
.remove = intel_qep_remove,
510
.driver = {
511
.pm = &intel_qep_pm_ops,
512
}
513
};
514
515
module_pci_driver(intel_qep_driver);
516
517
MODULE_AUTHOR("Felipe Balbi (Intel)");
518
MODULE_AUTHOR("Jarkko Nikula <[email protected]>");
519
MODULE_AUTHOR("Raymond Tan <[email protected]>");
520
MODULE_LICENSE("GPL");
521
MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
522
MODULE_IMPORT_NS("COUNTER");
523
524