Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/char/ipmi/kcs_bmc_aspeed.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (c) 2015-2018, Intel Corporation.
4
*/
5
6
#define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt
7
8
#include <linux/atomic.h>
9
#include <linux/errno.h>
10
#include <linux/interrupt.h>
11
#include <linux/io.h>
12
#include <linux/irq.h>
13
#include <linux/mfd/syscon.h>
14
#include <linux/module.h>
15
#include <linux/of.h>
16
#include <linux/of_address.h>
17
#include <linux/platform_device.h>
18
#include <linux/poll.h>
19
#include <linux/regmap.h>
20
#include <linux/sched.h>
21
#include <linux/slab.h>
22
#include <linux/timer.h>
23
24
#include "kcs_bmc_device.h"
25
26
27
#define DEVICE_NAME "ast-kcs-bmc"
28
29
#define KCS_CHANNEL_MAX 4
30
31
/*
32
* Field class descriptions
33
*
34
* LPCyE Enable LPC channel y
35
* IBFIEy Input Buffer Full IRQ Enable for LPC channel y
36
* IRQxEy Assert SerIRQ x for LPC channel y (Deprecated, use IDyIRQX, IRQXEy)
37
* IDyIRQX Use the specified 4-bit SerIRQ for LPC channel y
38
* SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1)
39
* IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y
40
*/
41
42
#define LPC_TYIRQX_LOW 0b00
43
#define LPC_TYIRQX_HIGH 0b01
44
#define LPC_TYIRQX_RSVD 0b10
45
#define LPC_TYIRQX_RISING 0b11
46
47
#define LPC_HICR0 0x000
48
#define LPC_HICR0_LPC3E BIT(7)
49
#define LPC_HICR0_LPC2E BIT(6)
50
#define LPC_HICR0_LPC1E BIT(5)
51
#define LPC_HICR2 0x008
52
#define LPC_HICR2_IBFIE3 BIT(3)
53
#define LPC_HICR2_IBFIE2 BIT(2)
54
#define LPC_HICR2_IBFIE1 BIT(1)
55
#define LPC_HICR4 0x010
56
#define LPC_HICR4_LADR12AS BIT(7)
57
#define LPC_HICR4_KCSENBL BIT(2)
58
#define LPC_SIRQCR0 0x070
59
/* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */
60
#define LPC_SIRQCR0_IRQ12E1 BIT(1)
61
#define LPC_SIRQCR0_IRQ1E1 BIT(0)
62
#define LPC_HICR5 0x080
63
#define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20)
64
#define LPC_HICR5_ID3IRQX_SHIFT 20
65
#define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16)
66
#define LPC_HICR5_ID2IRQX_SHIFT 16
67
#define LPC_HICR5_SEL3IRQX BIT(15)
68
#define LPC_HICR5_IRQXE3 BIT(14)
69
#define LPC_HICR5_SEL2IRQX BIT(13)
70
#define LPC_HICR5_IRQXE2 BIT(12)
71
#define LPC_LADR3H 0x014
72
#define LPC_LADR3L 0x018
73
#define LPC_LADR12H 0x01C
74
#define LPC_LADR12L 0x020
75
#define LPC_IDR1 0x024
76
#define LPC_IDR2 0x028
77
#define LPC_IDR3 0x02C
78
#define LPC_ODR1 0x030
79
#define LPC_ODR2 0x034
80
#define LPC_ODR3 0x038
81
#define LPC_STR1 0x03C
82
#define LPC_STR2 0x040
83
#define LPC_STR3 0x044
84
#define LPC_HICRB 0x100
85
#define LPC_HICRB_EN16LADR2 BIT(5)
86
#define LPC_HICRB_EN16LADR1 BIT(4)
87
#define LPC_HICRB_IBFIE4 BIT(1)
88
#define LPC_HICRB_LPC4E BIT(0)
89
#define LPC_HICRC 0x104
90
#define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4)
91
#define LPC_HICRC_ID4IRQX_SHIFT 4
92
#define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2)
93
#define LPC_HICRC_TY4IRQX_SHIFT 2
94
#define LPC_HICRC_OBF4_AUTO_CLR BIT(1)
95
#define LPC_HICRC_IRQXE4 BIT(0)
96
#define LPC_LADR4 0x110
97
#define LPC_IDR4 0x114
98
#define LPC_ODR4 0x118
99
#define LPC_STR4 0x11C
100
#define LPC_LSADR12 0x120
101
#define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16)
102
#define LPC_LSADR12_LSADR2_SHIFT 16
103
#define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0)
104
#define LPC_LSADR12_LSADR1_SHIFT 0
105
106
#define OBE_POLL_PERIOD (HZ / 2)
107
108
enum aspeed_kcs_irq_mode {
109
aspeed_kcs_irq_none,
110
aspeed_kcs_irq_serirq,
111
};
112
113
struct aspeed_kcs_bmc {
114
struct kcs_bmc_device kcs_bmc;
115
116
struct regmap *map;
117
118
struct {
119
enum aspeed_kcs_irq_mode mode;
120
int id;
121
} upstream_irq;
122
123
struct {
124
spinlock_t lock;
125
bool remove;
126
struct timer_list timer;
127
} obe;
128
};
129
130
static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
131
{
132
return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc);
133
}
134
135
static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
136
{
137
struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
138
u32 val = 0;
139
int rc;
140
141
rc = regmap_read(priv->map, reg, &val);
142
WARN(rc != 0, "regmap_read() failed: %d\n", rc);
143
144
return rc == 0 ? (u8) val : 0;
145
}
146
147
static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
148
{
149
struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
150
int rc;
151
152
rc = regmap_write(priv->map, reg, data);
153
WARN(rc != 0, "regmap_write() failed: %d\n", rc);
154
155
/* Trigger the upstream IRQ on ODR writes, if enabled */
156
157
switch (reg) {
158
case LPC_ODR1:
159
case LPC_ODR2:
160
case LPC_ODR3:
161
case LPC_ODR4:
162
break;
163
default:
164
return;
165
}
166
167
if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq)
168
return;
169
170
switch (kcs_bmc->channel) {
171
case 1:
172
switch (priv->upstream_irq.id) {
173
case 12:
174
regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1,
175
LPC_SIRQCR0_IRQ12E1);
176
break;
177
case 1:
178
regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1,
179
LPC_SIRQCR0_IRQ1E1);
180
break;
181
default:
182
break;
183
}
184
break;
185
case 2:
186
regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2);
187
break;
188
case 3:
189
regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3);
190
break;
191
case 4:
192
regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4);
193
break;
194
default:
195
break;
196
}
197
}
198
199
static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val)
200
{
201
struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
202
int rc;
203
204
rc = regmap_update_bits(priv->map, reg, mask, val);
205
WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
206
}
207
208
/*
209
* We note D for Data, and C for Cmd/Status, default rules are
210
*
211
* 1. Only the D address is given:
212
* A. KCS1/KCS2 (D/C: X/X+4)
213
* D/C: CA0h/CA4h
214
* D/C: CA8h/CACh
215
* B. KCS3 (D/C: XX2/XX3h)
216
* D/C: CA2h/CA3h
217
* C. KCS4 (D/C: X/X+1)
218
* D/C: CA4h/CA5h
219
*
220
* 2. Both the D/C addresses are given:
221
* A. KCS1/KCS2/KCS4 (D/C: X/Y)
222
* D/C: CA0h/CA1h
223
* D/C: CA8h/CA9h
224
* D/C: CA4h/CA5h
225
* B. KCS3 (D/C: XX2/XX3h)
226
* D/C: CA2h/CA3h
227
*/
228
static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
229
{
230
struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
231
232
if (WARN_ON(nr_addrs < 1 || nr_addrs > 2))
233
return -EINVAL;
234
235
switch (priv->kcs_bmc.channel) {
236
case 1:
237
regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0);
238
regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
239
regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
240
if (nr_addrs == 2) {
241
regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK,
242
addrs[1] << LPC_LSADR12_LSADR1_SHIFT);
243
244
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1,
245
LPC_HICRB_EN16LADR1);
246
}
247
break;
248
249
case 2:
250
regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
251
regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
252
regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
253
if (nr_addrs == 2) {
254
regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK,
255
addrs[1] << LPC_LSADR12_LSADR2_SHIFT);
256
257
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2,
258
LPC_HICRB_EN16LADR2);
259
}
260
break;
261
262
case 3:
263
if (nr_addrs == 2) {
264
dev_err(priv->kcs_bmc.dev,
265
"Channel 3 only supports inferred status IO address\n");
266
return -EINVAL;
267
}
268
269
regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8);
270
regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF);
271
break;
272
273
case 4:
274
if (nr_addrs == 1)
275
regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]);
276
else
277
regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]);
278
279
break;
280
281
default:
282
return -EINVAL;
283
}
284
285
return 0;
286
}
287
288
static inline int aspeed_kcs_map_serirq_type(u32 dt_type)
289
{
290
switch (dt_type) {
291
case IRQ_TYPE_EDGE_RISING:
292
return LPC_TYIRQX_RISING;
293
case IRQ_TYPE_LEVEL_HIGH:
294
return LPC_TYIRQX_HIGH;
295
case IRQ_TYPE_LEVEL_LOW:
296
return LPC_TYIRQX_LOW;
297
default:
298
return -EINVAL;
299
}
300
}
301
302
static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type)
303
{
304
unsigned int mask, val, hw_type;
305
int ret;
306
307
if (id > 15)
308
return -EINVAL;
309
310
ret = aspeed_kcs_map_serirq_type(dt_type);
311
if (ret < 0)
312
return ret;
313
hw_type = ret;
314
315
priv->upstream_irq.mode = aspeed_kcs_irq_serirq;
316
priv->upstream_irq.id = id;
317
318
switch (priv->kcs_bmc.channel) {
319
case 1:
320
/* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */
321
break;
322
case 2:
323
if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
324
return -EINVAL;
325
326
mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK;
327
val = (id << LPC_HICR5_ID2IRQX_SHIFT);
328
val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0;
329
regmap_update_bits(priv->map, LPC_HICR5, mask, val);
330
331
break;
332
case 3:
333
if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
334
return -EINVAL;
335
336
mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK;
337
val = (id << LPC_HICR5_ID3IRQX_SHIFT);
338
val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0;
339
regmap_update_bits(priv->map, LPC_HICR5, mask, val);
340
341
break;
342
case 4:
343
mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR;
344
val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT);
345
regmap_update_bits(priv->map, LPC_HICRC, mask, val);
346
break;
347
default:
348
dev_warn(priv->kcs_bmc.dev,
349
"SerIRQ configuration not supported on KCS channel %d\n",
350
priv->kcs_bmc.channel);
351
return -EINVAL;
352
}
353
354
return 0;
355
}
356
357
static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
358
{
359
struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
360
361
switch (kcs_bmc->channel) {
362
case 1:
363
regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E);
364
return;
365
case 2:
366
regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E);
367
return;
368
case 3:
369
regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E);
370
regmap_update_bits(priv->map, LPC_HICR4,
371
LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL);
372
return;
373
case 4:
374
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E);
375
return;
376
default:
377
pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
378
return;
379
}
380
}
381
382
static void aspeed_kcs_check_obe(struct timer_list *timer)
383
{
384
struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer);
385
unsigned long flags;
386
u8 str;
387
388
spin_lock_irqsave(&priv->obe.lock, flags);
389
if (priv->obe.remove) {
390
spin_unlock_irqrestore(&priv->obe.lock, flags);
391
return;
392
}
393
394
str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
395
if (str & KCS_BMC_STR_OBF) {
396
mod_timer(timer, jiffies + OBE_POLL_PERIOD);
397
spin_unlock_irqrestore(&priv->obe.lock, flags);
398
return;
399
}
400
spin_unlock_irqrestore(&priv->obe.lock, flags);
401
402
kcs_bmc_handle_event(&priv->kcs_bmc);
403
}
404
405
static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
406
{
407
struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
408
int rc;
409
u8 str;
410
411
/* We don't have an OBE IRQ, emulate it */
412
if (mask & KCS_BMC_EVENT_TYPE_OBE) {
413
if (KCS_BMC_EVENT_TYPE_OBE & state) {
414
/*
415
* Given we don't have an OBE IRQ, delay by polling briefly to see if we can
416
* observe such an event before returning to the caller. This is not
417
* incorrect because OBF may have already become clear before enabling the
418
* IRQ if we had one, under which circumstance no event will be propagated
419
* anyway.
420
*
421
* The onus is on the client to perform a race-free check that it hasn't
422
* missed the event.
423
*/
424
rc = read_poll_timeout_atomic(aspeed_kcs_inb, str,
425
!(str & KCS_BMC_STR_OBF), 1, 100, false,
426
&priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
427
/* Time for the slow path? */
428
if (rc == -ETIMEDOUT)
429
mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
430
} else {
431
timer_delete(&priv->obe.timer);
432
}
433
}
434
435
if (mask & KCS_BMC_EVENT_TYPE_IBF) {
436
const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF);
437
438
switch (kcs_bmc->channel) {
439
case 1:
440
regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1,
441
enable * LPC_HICR2_IBFIE1);
442
return;
443
case 2:
444
regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2,
445
enable * LPC_HICR2_IBFIE2);
446
return;
447
case 3:
448
regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3,
449
enable * LPC_HICR2_IBFIE3);
450
return;
451
case 4:
452
regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4,
453
enable * LPC_HICRB_IBFIE4);
454
return;
455
default:
456
pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
457
return;
458
}
459
}
460
}
461
462
static const struct kcs_bmc_device_ops aspeed_kcs_ops = {
463
.irq_mask_update = aspeed_kcs_irq_mask_update,
464
.io_inputb = aspeed_kcs_inb,
465
.io_outputb = aspeed_kcs_outb,
466
.io_updateb = aspeed_kcs_updateb,
467
};
468
469
static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
470
{
471
struct kcs_bmc_device *kcs_bmc = arg;
472
473
return kcs_bmc_handle_event(kcs_bmc);
474
}
475
476
static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc,
477
struct platform_device *pdev)
478
{
479
struct device *dev = &pdev->dev;
480
int irq;
481
482
irq = platform_get_irq(pdev, 0);
483
if (irq < 0)
484
return irq;
485
486
return devm_request_irq(dev, irq, aspeed_kcs_irq, IRQF_SHARED,
487
dev_name(dev), kcs_bmc);
488
}
489
490
static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
491
{ .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 },
492
{ .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 },
493
{ .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 },
494
{ .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
495
};
496
497
static int aspeed_kcs_of_get_channel(struct platform_device *pdev)
498
{
499
struct device_node *np;
500
struct kcs_ioreg ioreg;
501
const __be32 *reg;
502
int i;
503
504
np = pdev->dev.of_node;
505
506
/* Don't translate addresses, we want offsets for the regmaps */
507
reg = of_get_address(np, 0, NULL, NULL);
508
if (!reg)
509
return -EINVAL;
510
ioreg.idr = be32_to_cpup(reg);
511
512
reg = of_get_address(np, 1, NULL, NULL);
513
if (!reg)
514
return -EINVAL;
515
ioreg.odr = be32_to_cpup(reg);
516
517
reg = of_get_address(np, 2, NULL, NULL);
518
if (!reg)
519
return -EINVAL;
520
ioreg.str = be32_to_cpup(reg);
521
522
for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
523
if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg)))
524
return i + 1;
525
}
526
return -EINVAL;
527
}
528
529
static int
530
aspeed_kcs_of_get_io_address(struct platform_device *pdev, u32 addrs[2])
531
{
532
int rc;
533
534
rc = of_property_read_variable_u32_array(pdev->dev.of_node,
535
"aspeed,lpc-io-reg",
536
addrs, 1, 2);
537
if (rc < 0) {
538
dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n");
539
return rc;
540
}
541
542
if (addrs[0] > 0xffff) {
543
dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n");
544
return -EINVAL;
545
}
546
547
if (rc == 2 && addrs[1] > 0xffff) {
548
dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n");
549
return -EINVAL;
550
}
551
552
return rc;
553
}
554
555
static int aspeed_kcs_probe(struct platform_device *pdev)
556
{
557
struct kcs_bmc_device *kcs_bmc;
558
struct aspeed_kcs_bmc *priv;
559
struct device_node *np;
560
bool have_upstream_irq;
561
u32 upstream_irq[2];
562
int rc, channel;
563
int nr_addrs;
564
u32 addrs[2];
565
566
np = pdev->dev.of_node->parent;
567
if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
568
!of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
569
!of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
570
dev_err(&pdev->dev, "unsupported LPC device binding\n");
571
return -ENODEV;
572
}
573
574
channel = aspeed_kcs_of_get_channel(pdev);
575
if (channel < 0)
576
return channel;
577
578
nr_addrs = aspeed_kcs_of_get_io_address(pdev, addrs);
579
if (nr_addrs < 0)
580
return nr_addrs;
581
582
np = pdev->dev.of_node;
583
rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2);
584
if (rc && rc != -EINVAL)
585
return -EINVAL;
586
587
have_upstream_irq = !rc;
588
589
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
590
if (!priv)
591
return -ENOMEM;
592
593
kcs_bmc = &priv->kcs_bmc;
594
kcs_bmc->dev = &pdev->dev;
595
kcs_bmc->channel = channel;
596
kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1];
597
kcs_bmc->ops = &aspeed_kcs_ops;
598
599
priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
600
if (IS_ERR(priv->map)) {
601
dev_err(&pdev->dev, "Couldn't get regmap\n");
602
return -ENODEV;
603
}
604
605
spin_lock_init(&priv->obe.lock);
606
priv->obe.remove = false;
607
timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0);
608
609
rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs);
610
if (rc)
611
return rc;
612
613
/* Host to BMC IRQ */
614
rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev);
615
if (rc)
616
return rc;
617
618
/* BMC to Host IRQ */
619
if (have_upstream_irq) {
620
rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]);
621
if (rc < 0)
622
return rc;
623
} else {
624
priv->upstream_irq.mode = aspeed_kcs_irq_none;
625
}
626
627
platform_set_drvdata(pdev, priv);
628
629
aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
630
aspeed_kcs_enable_channel(kcs_bmc, true);
631
632
rc = kcs_bmc_add_device(&priv->kcs_bmc);
633
if (rc) {
634
dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
635
return rc;
636
}
637
638
dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n",
639
kcs_bmc->channel, addrs[0]);
640
641
return 0;
642
}
643
644
static void aspeed_kcs_remove(struct platform_device *pdev)
645
{
646
struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev);
647
struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
648
649
kcs_bmc_remove_device(kcs_bmc);
650
651
aspeed_kcs_enable_channel(kcs_bmc, false);
652
aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
653
654
/* Make sure it's proper dead */
655
spin_lock_irq(&priv->obe.lock);
656
priv->obe.remove = true;
657
spin_unlock_irq(&priv->obe.lock);
658
timer_delete_sync(&priv->obe.timer);
659
}
660
661
static const struct of_device_id ast_kcs_bmc_match[] = {
662
{ .compatible = "aspeed,ast2400-kcs-bmc-v2" },
663
{ .compatible = "aspeed,ast2500-kcs-bmc-v2" },
664
{ .compatible = "aspeed,ast2600-kcs-bmc" },
665
{ }
666
};
667
MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
668
669
static struct platform_driver ast_kcs_bmc_driver = {
670
.driver = {
671
.name = DEVICE_NAME,
672
.of_match_table = ast_kcs_bmc_match,
673
},
674
.probe = aspeed_kcs_probe,
675
.remove = aspeed_kcs_remove,
676
};
677
module_platform_driver(ast_kcs_bmc_driver);
678
679
MODULE_LICENSE("GPL v2");
680
MODULE_AUTHOR("Haiyue Wang <[email protected]>");
681
MODULE_AUTHOR("Andrew Jeffery <[email protected]>");
682
MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device");
683
684