Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/hsi/clients/ssi_protocol.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* ssi_protocol.c
4
*
5
* Implementation of the SSI McSAAB improved protocol.
6
*
7
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
8
* Copyright (C) 2013 Sebastian Reichel <[email protected]>
9
*
10
* Contact: Carlos Chinea <[email protected]>
11
*/
12
13
#include <linux/atomic.h>
14
#include <linux/clk.h>
15
#include <linux/device.h>
16
#include <linux/err.h>
17
#include <linux/if_ether.h>
18
#include <linux/if_arp.h>
19
#include <linux/if_phonet.h>
20
#include <linux/init.h>
21
#include <linux/irq.h>
22
#include <linux/list.h>
23
#include <linux/module.h>
24
#include <linux/netdevice.h>
25
#include <linux/notifier.h>
26
#include <linux/scatterlist.h>
27
#include <linux/skbuff.h>
28
#include <linux/slab.h>
29
#include <linux/spinlock.h>
30
#include <linux/timer.h>
31
#include <linux/hsi/hsi.h>
32
#include <linux/hsi/ssi_protocol.h>
33
34
#define SSIP_TXQUEUE_LEN 100
35
#define SSIP_MAX_MTU 65535
36
#define SSIP_DEFAULT_MTU 4000
37
#define PN_MEDIA_SOS 21
38
#define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */
39
#define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */
40
#define SSIP_KATOUT 15 /* 15 msecs */
41
#define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */
42
#define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
43
#define SSIP_CMT_LOADER_SYNC 0x11223344
44
/*
45
* SSI protocol command definitions
46
*/
47
#define SSIP_COMMAND(data) ((data) >> 28)
48
#define SSIP_PAYLOAD(data) ((data) & 0xfffffff)
49
/* Commands */
50
#define SSIP_SW_BREAK 0
51
#define SSIP_BOOTINFO_REQ 1
52
#define SSIP_BOOTINFO_RESP 2
53
#define SSIP_WAKETEST_RESULT 3
54
#define SSIP_START_TRANS 4
55
#define SSIP_READY 5
56
/* Payloads */
57
#define SSIP_DATA_VERSION(data) ((data) & 0xff)
58
#define SSIP_LOCAL_VERID 1
59
#define SSIP_WAKETEST_OK 0
60
#define SSIP_WAKETEST_FAILED 1
61
#define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff)
62
#define SSIP_MSG_ID(data) ((data) & 0xff)
63
/* Generic Command */
64
#define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff))
65
/* Commands for the control channel */
66
#define SSIP_BOOTINFO_REQ_CMD(ver) \
67
SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver))
68
#define SSIP_BOOTINFO_RESP_CMD(ver) \
69
SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver))
70
#define SSIP_START_TRANS_CMD(pdulen, id) \
71
SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id)))
72
#define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0)
73
#define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0)
74
75
#define SSIP_WAKETEST_FLAG 0
76
77
/* Main state machine states */
78
enum {
79
INIT,
80
HANDSHAKE,
81
ACTIVE,
82
};
83
84
/* Send state machine states */
85
enum {
86
SEND_IDLE,
87
WAIT4READY,
88
SEND_READY,
89
SENDING,
90
SENDING_SWBREAK,
91
};
92
93
/* Receive state machine states */
94
enum {
95
RECV_IDLE,
96
RECV_READY,
97
RECEIVING,
98
};
99
100
/**
101
* struct ssi_protocol - SSI protocol (McSAAB) data
102
* @main_state: Main state machine
103
* @send_state: TX state machine
104
* @recv_state: RX state machine
105
* @flags: Flags, currently only used to follow wake line test
106
* @rxid: RX data id
107
* @txid: TX data id
108
* @txqueue_len: TX queue length
109
* @tx_wd: TX watchdog
110
* @rx_wd: RX watchdog
111
* @keep_alive: Workaround for SSI HW bug
112
* @lock: To serialize access to this struct
113
* @netdev: Phonet network device
114
* @txqueue: TX data queue
115
* @cmdqueue: Queue of free commands
116
* @work: &struct work_struct for scheduled work
117
* @cl: HSI client own reference
118
* @link: Link for ssip_list
119
* @tx_usecnt: Refcount to keep track the slaves that use the wake line
120
* @channel_id_cmd: HSI channel id for command stream
121
* @channel_id_data: HSI channel id for data stream
122
*/
123
struct ssi_protocol {
124
unsigned int main_state;
125
unsigned int send_state;
126
unsigned int recv_state;
127
unsigned long flags;
128
u8 rxid;
129
u8 txid;
130
unsigned int txqueue_len;
131
struct timer_list tx_wd;
132
struct timer_list rx_wd;
133
struct timer_list keep_alive; /* wake-up workaround */
134
spinlock_t lock;
135
struct net_device *netdev;
136
struct list_head txqueue;
137
struct list_head cmdqueue;
138
struct work_struct work;
139
struct hsi_client *cl;
140
struct list_head link;
141
atomic_t tx_usecnt;
142
int channel_id_cmd;
143
int channel_id_data;
144
};
145
146
/* List of ssi protocol instances */
147
static LIST_HEAD(ssip_list);
148
149
static void ssip_rxcmd_complete(struct hsi_msg *msg);
150
151
static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd)
152
{
153
u32 *data;
154
155
data = sg_virt(msg->sgt.sgl);
156
*data = cmd;
157
}
158
159
static inline u32 ssip_get_cmd(struct hsi_msg *msg)
160
{
161
u32 *data;
162
163
data = sg_virt(msg->sgt.sgl);
164
165
return *data;
166
}
167
168
static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
169
{
170
skb_frag_t *frag;
171
struct scatterlist *sg;
172
int i;
173
174
BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1));
175
176
sg = msg->sgt.sgl;
177
sg_set_buf(sg, skb->data, skb_headlen(skb));
178
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
179
sg = sg_next(sg);
180
BUG_ON(!sg);
181
frag = &skb_shinfo(skb)->frags[i];
182
sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag),
183
skb_frag_off(frag));
184
}
185
}
186
187
static void ssip_free_data(struct hsi_msg *msg)
188
{
189
struct sk_buff *skb;
190
191
skb = msg->context;
192
pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context,
193
skb);
194
msg->destructor = NULL;
195
dev_kfree_skb(skb);
196
hsi_free_msg(msg);
197
}
198
199
static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi,
200
struct sk_buff *skb, gfp_t flags)
201
{
202
struct hsi_msg *msg;
203
204
msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags);
205
if (!msg)
206
return NULL;
207
ssip_skb_to_msg(skb, msg);
208
msg->destructor = ssip_free_data;
209
msg->channel = ssi->channel_id_data;
210
msg->context = skb;
211
212
return msg;
213
}
214
215
static inline void ssip_release_cmd(struct hsi_msg *msg)
216
{
217
struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl);
218
219
dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg));
220
spin_lock_bh(&ssi->lock);
221
list_add_tail(&msg->link, &ssi->cmdqueue);
222
spin_unlock_bh(&ssi->lock);
223
}
224
225
static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi)
226
{
227
struct hsi_msg *msg;
228
229
BUG_ON(list_empty(&ssi->cmdqueue));
230
231
spin_lock_bh(&ssi->lock);
232
msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
233
list_del(&msg->link);
234
spin_unlock_bh(&ssi->lock);
235
msg->destructor = ssip_release_cmd;
236
237
return msg;
238
}
239
240
static void ssip_free_cmds(struct ssi_protocol *ssi)
241
{
242
struct hsi_msg *msg, *tmp;
243
244
list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
245
list_del(&msg->link);
246
msg->destructor = NULL;
247
kfree(sg_virt(msg->sgt.sgl));
248
hsi_free_msg(msg);
249
}
250
}
251
252
static int ssip_alloc_cmds(struct ssi_protocol *ssi)
253
{
254
struct hsi_msg *msg;
255
u32 *buf;
256
unsigned int i;
257
258
for (i = 0; i < SSIP_MAX_CMDS; i++) {
259
msg = hsi_alloc_msg(1, GFP_KERNEL);
260
if (!msg)
261
goto out;
262
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
263
if (!buf) {
264
hsi_free_msg(msg);
265
goto out;
266
}
267
sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
268
msg->channel = ssi->channel_id_cmd;
269
list_add_tail(&msg->link, &ssi->cmdqueue);
270
}
271
272
return 0;
273
out:
274
ssip_free_cmds(ssi);
275
276
return -ENOMEM;
277
}
278
279
static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state)
280
{
281
ssi->recv_state = state;
282
switch (state) {
283
case RECV_IDLE:
284
timer_delete(&ssi->rx_wd);
285
if (ssi->send_state == SEND_IDLE)
286
timer_delete(&ssi->keep_alive);
287
break;
288
case RECV_READY:
289
/* CMT speech workaround */
290
if (atomic_read(&ssi->tx_usecnt))
291
break;
292
fallthrough;
293
case RECEIVING:
294
mod_timer(&ssi->keep_alive, jiffies +
295
msecs_to_jiffies(SSIP_KATOUT));
296
mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
297
break;
298
default:
299
break;
300
}
301
}
302
303
static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state)
304
{
305
ssi->send_state = state;
306
switch (state) {
307
case SEND_IDLE:
308
case SEND_READY:
309
timer_delete(&ssi->tx_wd);
310
if (ssi->recv_state == RECV_IDLE)
311
timer_delete(&ssi->keep_alive);
312
break;
313
case WAIT4READY:
314
case SENDING:
315
case SENDING_SWBREAK:
316
mod_timer(&ssi->keep_alive,
317
jiffies + msecs_to_jiffies(SSIP_KATOUT));
318
mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
319
break;
320
default:
321
break;
322
}
323
}
324
325
struct hsi_client *ssip_slave_get_master(struct hsi_client *slave)
326
{
327
struct hsi_client *master = ERR_PTR(-ENODEV);
328
struct ssi_protocol *ssi;
329
330
list_for_each_entry(ssi, &ssip_list, link)
331
if (slave->device.parent == ssi->cl->device.parent) {
332
master = ssi->cl;
333
break;
334
}
335
336
return master;
337
}
338
EXPORT_SYMBOL_GPL(ssip_slave_get_master);
339
340
int ssip_slave_start_tx(struct hsi_client *master)
341
{
342
struct ssi_protocol *ssi = hsi_client_drvdata(master);
343
344
dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt));
345
spin_lock_bh(&ssi->lock);
346
if (ssi->send_state == SEND_IDLE) {
347
ssip_set_txstate(ssi, WAIT4READY);
348
hsi_start_tx(master);
349
}
350
spin_unlock_bh(&ssi->lock);
351
atomic_inc(&ssi->tx_usecnt);
352
353
return 0;
354
}
355
EXPORT_SYMBOL_GPL(ssip_slave_start_tx);
356
357
int ssip_slave_stop_tx(struct hsi_client *master)
358
{
359
struct ssi_protocol *ssi = hsi_client_drvdata(master);
360
361
WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0);
362
363
if (atomic_dec_and_test(&ssi->tx_usecnt)) {
364
spin_lock_bh(&ssi->lock);
365
if ((ssi->send_state == SEND_READY) ||
366
(ssi->send_state == WAIT4READY)) {
367
ssip_set_txstate(ssi, SEND_IDLE);
368
hsi_stop_tx(master);
369
}
370
spin_unlock_bh(&ssi->lock);
371
}
372
dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt));
373
374
return 0;
375
}
376
EXPORT_SYMBOL_GPL(ssip_slave_stop_tx);
377
378
int ssip_slave_running(struct hsi_client *master)
379
{
380
struct ssi_protocol *ssi = hsi_client_drvdata(master);
381
return netif_running(ssi->netdev);
382
}
383
EXPORT_SYMBOL_GPL(ssip_slave_running);
384
385
static void ssip_reset(struct hsi_client *cl)
386
{
387
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
388
struct list_head *head, *tmp;
389
struct hsi_msg *msg;
390
391
if (netif_running(ssi->netdev))
392
netif_carrier_off(ssi->netdev);
393
hsi_flush(cl);
394
spin_lock_bh(&ssi->lock);
395
if (ssi->send_state != SEND_IDLE)
396
hsi_stop_tx(cl);
397
spin_unlock_bh(&ssi->lock);
398
if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
399
ssi_waketest(cl, 0); /* FIXME: To be removed */
400
spin_lock_bh(&ssi->lock);
401
timer_delete(&ssi->rx_wd);
402
timer_delete(&ssi->tx_wd);
403
timer_delete(&ssi->keep_alive);
404
cancel_work_sync(&ssi->work);
405
ssi->main_state = 0;
406
ssi->send_state = 0;
407
ssi->recv_state = 0;
408
ssi->flags = 0;
409
ssi->rxid = 0;
410
ssi->txid = 0;
411
list_for_each_safe(head, tmp, &ssi->txqueue) {
412
msg = list_entry(head, struct hsi_msg, link);
413
dev_dbg(&cl->device, "Pending TX data\n");
414
list_del(head);
415
ssip_free_data(msg);
416
}
417
ssi->txqueue_len = 0;
418
spin_unlock_bh(&ssi->lock);
419
}
420
421
static void ssip_dump_state(struct hsi_client *cl)
422
{
423
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
424
struct hsi_msg *msg;
425
426
spin_lock_bh(&ssi->lock);
427
dev_err(&cl->device, "Main state: %d\n", ssi->main_state);
428
dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state);
429
dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
430
dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
431
"Online" : "Offline");
432
dev_err(&cl->device, "Wake test %d\n",
433
test_bit(SSIP_WAKETEST_FLAG, &ssi->flags));
434
dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
435
dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
436
437
list_for_each_entry(msg, &ssi->txqueue, link)
438
dev_err(&cl->device, "pending TX data (%p)\n", msg);
439
spin_unlock_bh(&ssi->lock);
440
}
441
442
static void ssip_error(struct hsi_client *cl)
443
{
444
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
445
struct hsi_msg *msg;
446
447
ssip_dump_state(cl);
448
ssip_reset(cl);
449
msg = ssip_claim_cmd(ssi);
450
msg->complete = ssip_rxcmd_complete;
451
hsi_async_read(cl, msg);
452
}
453
454
static void ssip_keep_alive(struct timer_list *t)
455
{
456
struct ssi_protocol *ssi = timer_container_of(ssi, t, keep_alive);
457
struct hsi_client *cl = ssi->cl;
458
459
dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
460
ssi->main_state, ssi->recv_state, ssi->send_state);
461
462
spin_lock(&ssi->lock);
463
if (ssi->recv_state == RECV_IDLE)
464
switch (ssi->send_state) {
465
case SEND_READY:
466
if (atomic_read(&ssi->tx_usecnt) == 0)
467
break;
468
fallthrough;
469
/*
470
* Workaround for cmt-speech in that case
471
* we relay on audio timers.
472
*/
473
case SEND_IDLE:
474
spin_unlock(&ssi->lock);
475
return;
476
}
477
mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT));
478
spin_unlock(&ssi->lock);
479
}
480
481
static void ssip_rx_wd(struct timer_list *t)
482
{
483
struct ssi_protocol *ssi = timer_container_of(ssi, t, rx_wd);
484
struct hsi_client *cl = ssi->cl;
485
486
dev_err(&cl->device, "Watchdog triggered\n");
487
ssip_error(cl);
488
}
489
490
static void ssip_tx_wd(struct timer_list *t)
491
{
492
struct ssi_protocol *ssi = timer_container_of(ssi, t, tx_wd);
493
struct hsi_client *cl = ssi->cl;
494
495
dev_err(&cl->device, "Watchdog triggered\n");
496
ssip_error(cl);
497
}
498
499
static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl)
500
{
501
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
502
struct hsi_msg *msg;
503
504
dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n");
505
msg = ssip_claim_cmd(ssi);
506
ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID));
507
msg->complete = ssip_release_cmd;
508
hsi_async_write(cl, msg);
509
dev_dbg(&cl->device, "Issuing RX command\n");
510
msg = ssip_claim_cmd(ssi);
511
msg->complete = ssip_rxcmd_complete;
512
hsi_async_read(cl, msg);
513
}
514
515
static void ssip_start_rx(struct hsi_client *cl)
516
{
517
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
518
struct hsi_msg *msg;
519
520
dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
521
ssi->recv_state);
522
spin_lock_bh(&ssi->lock);
523
/*
524
* We can have two UP events in a row due to a short low
525
* high transition. Therefore we need to ignore the sencond UP event.
526
*/
527
if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
528
spin_unlock_bh(&ssi->lock);
529
return;
530
}
531
ssip_set_rxstate(ssi, RECV_READY);
532
spin_unlock_bh(&ssi->lock);
533
534
msg = ssip_claim_cmd(ssi);
535
ssip_set_cmd(msg, SSIP_READY_CMD);
536
msg->complete = ssip_release_cmd;
537
dev_dbg(&cl->device, "Send READY\n");
538
hsi_async_write(cl, msg);
539
}
540
541
static void ssip_stop_rx(struct hsi_client *cl)
542
{
543
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
544
545
dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
546
spin_lock_bh(&ssi->lock);
547
if (likely(ssi->main_state == ACTIVE))
548
ssip_set_rxstate(ssi, RECV_IDLE);
549
spin_unlock_bh(&ssi->lock);
550
}
551
552
static void ssip_free_strans(struct hsi_msg *msg)
553
{
554
ssip_free_data(msg->context);
555
ssip_release_cmd(msg);
556
}
557
558
static void ssip_strans_complete(struct hsi_msg *msg)
559
{
560
struct hsi_client *cl = msg->cl;
561
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
562
struct hsi_msg *data;
563
564
data = msg->context;
565
ssip_release_cmd(msg);
566
spin_lock_bh(&ssi->lock);
567
ssip_set_txstate(ssi, SENDING);
568
spin_unlock_bh(&ssi->lock);
569
hsi_async_write(cl, data);
570
}
571
572
static int ssip_xmit(struct hsi_client *cl)
573
{
574
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
575
struct hsi_msg *msg, *dmsg;
576
struct sk_buff *skb;
577
578
spin_lock_bh(&ssi->lock);
579
if (list_empty(&ssi->txqueue)) {
580
spin_unlock_bh(&ssi->lock);
581
return 0;
582
}
583
dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link);
584
list_del(&dmsg->link);
585
ssi->txqueue_len--;
586
spin_unlock_bh(&ssi->lock);
587
588
msg = ssip_claim_cmd(ssi);
589
skb = dmsg->context;
590
msg->context = dmsg;
591
msg->complete = ssip_strans_complete;
592
msg->destructor = ssip_free_strans;
593
594
spin_lock_bh(&ssi->lock);
595
ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len),
596
ssi->txid));
597
ssi->txid++;
598
ssip_set_txstate(ssi, SENDING);
599
spin_unlock_bh(&ssi->lock);
600
601
dev_dbg(&cl->device, "Send STRANS (%d frames)\n",
602
SSIP_BYTES_TO_FRAMES(skb->len));
603
604
return hsi_async_write(cl, msg);
605
}
606
607
/* In soft IRQ context */
608
static void ssip_pn_rx(struct sk_buff *skb)
609
{
610
struct net_device *dev = skb->dev;
611
612
if (unlikely(!netif_running(dev))) {
613
dev_dbg(&dev->dev, "Drop RX packet\n");
614
dev->stats.rx_dropped++;
615
dev_kfree_skb(skb);
616
return;
617
}
618
if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) {
619
dev_dbg(&dev->dev, "Error drop RX packet\n");
620
dev->stats.rx_errors++;
621
dev->stats.rx_length_errors++;
622
dev_kfree_skb(skb);
623
return;
624
}
625
dev->stats.rx_packets++;
626
dev->stats.rx_bytes += skb->len;
627
628
/* length field is exchanged in network byte order */
629
((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]);
630
dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n",
631
((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2]));
632
633
skb->protocol = htons(ETH_P_PHONET);
634
skb_reset_mac_header(skb);
635
__skb_pull(skb, 1);
636
netif_rx(skb);
637
}
638
639
static void ssip_rx_data_complete(struct hsi_msg *msg)
640
{
641
struct hsi_client *cl = msg->cl;
642
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
643
struct sk_buff *skb;
644
645
if (msg->status == HSI_STATUS_ERROR) {
646
dev_err(&cl->device, "RX data error\n");
647
ssip_free_data(msg);
648
ssip_error(cl);
649
return;
650
}
651
timer_delete(&ssi->rx_wd); /* FIXME: Revisit */
652
skb = msg->context;
653
ssip_pn_rx(skb);
654
hsi_free_msg(msg);
655
}
656
657
static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
658
{
659
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
660
struct hsi_msg *msg;
661
662
/* Workaroud: Ignore CMT Loader message leftover */
663
if (cmd == SSIP_CMT_LOADER_SYNC)
664
return;
665
666
switch (ssi->main_state) {
667
case ACTIVE:
668
dev_err(&cl->device, "Boot info req on active state\n");
669
ssip_error(cl);
670
fallthrough;
671
case INIT:
672
case HANDSHAKE:
673
spin_lock_bh(&ssi->lock);
674
ssi->main_state = HANDSHAKE;
675
spin_unlock_bh(&ssi->lock);
676
677
if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
678
ssi_waketest(cl, 1); /* FIXME: To be removed */
679
680
spin_lock_bh(&ssi->lock);
681
/* Start boot handshake watchdog */
682
mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
683
spin_unlock_bh(&ssi->lock);
684
dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
685
if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
686
dev_warn(&cl->device, "boot info req verid mismatch\n");
687
msg = ssip_claim_cmd(ssi);
688
ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID));
689
msg->complete = ssip_release_cmd;
690
hsi_async_write(cl, msg);
691
break;
692
default:
693
dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
694
break;
695
}
696
}
697
698
static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
699
{
700
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
701
702
if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
703
dev_warn(&cl->device, "boot info resp verid mismatch\n");
704
705
spin_lock_bh(&ssi->lock);
706
if (ssi->main_state != ACTIVE)
707
/* Use tx_wd as a boot watchdog in non ACTIVE state */
708
mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
709
else
710
dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
711
ssi->main_state);
712
spin_unlock_bh(&ssi->lock);
713
}
714
715
static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
716
{
717
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
718
unsigned int wkres = SSIP_PAYLOAD(cmd);
719
720
spin_lock_bh(&ssi->lock);
721
if (ssi->main_state != HANDSHAKE) {
722
dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
723
ssi->main_state);
724
spin_unlock_bh(&ssi->lock);
725
return;
726
}
727
spin_unlock_bh(&ssi->lock);
728
729
if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
730
ssi_waketest(cl, 0); /* FIXME: To be removed */
731
732
spin_lock_bh(&ssi->lock);
733
ssi->main_state = ACTIVE;
734
timer_delete(&ssi->tx_wd); /* Stop boot handshake timer */
735
spin_unlock_bh(&ssi->lock);
736
737
dev_notice(&cl->device, "WAKELINES TEST %s\n",
738
wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK");
739
if (wkres & SSIP_WAKETEST_FAILED) {
740
ssip_error(cl);
741
return;
742
}
743
dev_dbg(&cl->device, "CMT is ONLINE\n");
744
netif_wake_queue(ssi->netdev);
745
netif_carrier_on(ssi->netdev);
746
}
747
748
static void ssip_rx_ready(struct hsi_client *cl)
749
{
750
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
751
752
spin_lock_bh(&ssi->lock);
753
if (unlikely(ssi->main_state != ACTIVE)) {
754
dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
755
ssi->send_state, ssi->main_state);
756
spin_unlock_bh(&ssi->lock);
757
return;
758
}
759
if (ssi->send_state != WAIT4READY) {
760
dev_dbg(&cl->device, "Ignore spurious READY command\n");
761
spin_unlock_bh(&ssi->lock);
762
return;
763
}
764
ssip_set_txstate(ssi, SEND_READY);
765
spin_unlock_bh(&ssi->lock);
766
ssip_xmit(cl);
767
}
768
769
static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
770
{
771
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
772
struct sk_buff *skb;
773
struct hsi_msg *msg;
774
int len = SSIP_PDU_LENGTH(cmd);
775
776
dev_dbg(&cl->device, "RX strans: %d frames\n", len);
777
spin_lock_bh(&ssi->lock);
778
if (unlikely(ssi->main_state != ACTIVE)) {
779
dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
780
ssi->send_state, ssi->main_state);
781
spin_unlock_bh(&ssi->lock);
782
return;
783
}
784
ssip_set_rxstate(ssi, RECEIVING);
785
if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
786
dev_err(&cl->device, "START TRANS id %d expected %d\n",
787
SSIP_MSG_ID(cmd), ssi->rxid);
788
spin_unlock_bh(&ssi->lock);
789
goto out1;
790
}
791
ssi->rxid++;
792
spin_unlock_bh(&ssi->lock);
793
skb = netdev_alloc_skb(ssi->netdev, len * 4);
794
if (unlikely(!skb)) {
795
dev_err(&cl->device, "No memory for rx skb\n");
796
goto out1;
797
}
798
skb_put(skb, len * 4);
799
msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
800
if (unlikely(!msg)) {
801
dev_err(&cl->device, "No memory for RX data msg\n");
802
goto out2;
803
}
804
msg->complete = ssip_rx_data_complete;
805
hsi_async_read(cl, msg);
806
807
return;
808
out2:
809
dev_kfree_skb(skb);
810
out1:
811
ssip_error(cl);
812
}
813
814
static void ssip_rxcmd_complete(struct hsi_msg *msg)
815
{
816
struct hsi_client *cl = msg->cl;
817
u32 cmd = ssip_get_cmd(msg);
818
unsigned int cmdid = SSIP_COMMAND(cmd);
819
820
if (msg->status == HSI_STATUS_ERROR) {
821
dev_err(&cl->device, "RX error detected\n");
822
ssip_release_cmd(msg);
823
ssip_error(cl);
824
return;
825
}
826
hsi_async_read(cl, msg);
827
dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd);
828
switch (cmdid) {
829
case SSIP_SW_BREAK:
830
/* Ignored */
831
break;
832
case SSIP_BOOTINFO_REQ:
833
ssip_rx_bootinforeq(cl, cmd);
834
break;
835
case SSIP_BOOTINFO_RESP:
836
ssip_rx_bootinforesp(cl, cmd);
837
break;
838
case SSIP_WAKETEST_RESULT:
839
ssip_rx_waketest(cl, cmd);
840
break;
841
case SSIP_START_TRANS:
842
ssip_rx_strans(cl, cmd);
843
break;
844
case SSIP_READY:
845
ssip_rx_ready(cl);
846
break;
847
default:
848
dev_warn(&cl->device, "command 0x%08x not supported\n", cmd);
849
break;
850
}
851
}
852
853
static void ssip_swbreak_complete(struct hsi_msg *msg)
854
{
855
struct hsi_client *cl = msg->cl;
856
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
857
858
ssip_release_cmd(msg);
859
spin_lock_bh(&ssi->lock);
860
if (list_empty(&ssi->txqueue)) {
861
if (atomic_read(&ssi->tx_usecnt)) {
862
ssip_set_txstate(ssi, SEND_READY);
863
} else {
864
ssip_set_txstate(ssi, SEND_IDLE);
865
hsi_stop_tx(cl);
866
}
867
spin_unlock_bh(&ssi->lock);
868
} else {
869
spin_unlock_bh(&ssi->lock);
870
ssip_xmit(cl);
871
}
872
netif_wake_queue(ssi->netdev);
873
}
874
875
static void ssip_tx_data_complete(struct hsi_msg *msg)
876
{
877
struct hsi_client *cl = msg->cl;
878
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
879
struct hsi_msg *cmsg;
880
881
if (msg->status == HSI_STATUS_ERROR) {
882
dev_err(&cl->device, "TX data error\n");
883
ssip_error(cl);
884
goto out;
885
}
886
spin_lock_bh(&ssi->lock);
887
if (list_empty(&ssi->txqueue)) {
888
ssip_set_txstate(ssi, SENDING_SWBREAK);
889
spin_unlock_bh(&ssi->lock);
890
cmsg = ssip_claim_cmd(ssi);
891
ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD);
892
cmsg->complete = ssip_swbreak_complete;
893
dev_dbg(&cl->device, "Send SWBREAK\n");
894
hsi_async_write(cl, cmsg);
895
} else {
896
spin_unlock_bh(&ssi->lock);
897
ssip_xmit(cl);
898
}
899
out:
900
ssip_free_data(msg);
901
}
902
903
static void ssip_port_event(struct hsi_client *cl, unsigned long event)
904
{
905
switch (event) {
906
case HSI_EVENT_START_RX:
907
ssip_start_rx(cl);
908
break;
909
case HSI_EVENT_STOP_RX:
910
ssip_stop_rx(cl);
911
break;
912
default:
913
return;
914
}
915
}
916
917
static int ssip_pn_open(struct net_device *dev)
918
{
919
struct hsi_client *cl = to_hsi_client(dev->dev.parent);
920
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
921
int err;
922
923
err = hsi_claim_port(cl, 1);
924
if (err < 0) {
925
dev_err(&cl->device, "SSI port already claimed\n");
926
return err;
927
}
928
err = hsi_register_port_event(cl, ssip_port_event);
929
if (err < 0) {
930
dev_err(&cl->device, "Register HSI port event failed (%d)\n",
931
err);
932
hsi_release_port(cl);
933
return err;
934
}
935
dev_dbg(&cl->device, "Configuring SSI port\n");
936
hsi_setup(cl);
937
938
if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
939
ssi_waketest(cl, 1); /* FIXME: To be removed */
940
941
spin_lock_bh(&ssi->lock);
942
ssi->main_state = HANDSHAKE;
943
spin_unlock_bh(&ssi->lock);
944
945
ssip_send_bootinfo_req_cmd(cl);
946
947
return 0;
948
}
949
950
static int ssip_pn_stop(struct net_device *dev)
951
{
952
struct hsi_client *cl = to_hsi_client(dev->dev.parent);
953
954
ssip_reset(cl);
955
hsi_unregister_port_event(cl);
956
hsi_release_port(cl);
957
958
return 0;
959
}
960
961
static void ssip_xmit_work(struct work_struct *work)
962
{
963
struct ssi_protocol *ssi =
964
container_of(work, struct ssi_protocol, work);
965
struct hsi_client *cl = ssi->cl;
966
967
ssip_xmit(cl);
968
}
969
970
static netdev_tx_t ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
971
{
972
struct hsi_client *cl = to_hsi_client(dev->dev.parent);
973
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
974
struct hsi_msg *msg;
975
976
if ((skb->protocol != htons(ETH_P_PHONET)) ||
977
(skb->len < SSIP_MIN_PN_HDR))
978
goto drop;
979
/* Pad to 32-bits - FIXME: Revisit*/
980
if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
981
goto inc_dropped;
982
983
/*
984
* Modem sends Phonet messages over SSI with its own endianness.
985
* Assume that modem has the same endianness as we do.
986
*/
987
if (skb_cow_head(skb, 0))
988
goto drop;
989
990
/* length field is exchanged in network byte order */
991
((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]);
992
993
msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
994
if (!msg) {
995
dev_dbg(&cl->device, "Dropping tx data: No memory\n");
996
goto drop;
997
}
998
msg->complete = ssip_tx_data_complete;
999
1000
spin_lock_bh(&ssi->lock);
1001
if (unlikely(ssi->main_state != ACTIVE)) {
1002
spin_unlock_bh(&ssi->lock);
1003
dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n");
1004
goto drop2;
1005
}
1006
list_add_tail(&msg->link, &ssi->txqueue);
1007
ssi->txqueue_len++;
1008
if (dev->tx_queue_len < ssi->txqueue_len) {
1009
dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len);
1010
netif_stop_queue(dev);
1011
}
1012
if (ssi->send_state == SEND_IDLE) {
1013
ssip_set_txstate(ssi, WAIT4READY);
1014
spin_unlock_bh(&ssi->lock);
1015
dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len);
1016
hsi_start_tx(cl);
1017
} else if (ssi->send_state == SEND_READY) {
1018
/* Needed for cmt-speech workaround */
1019
dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
1020
ssi->txqueue_len);
1021
spin_unlock_bh(&ssi->lock);
1022
schedule_work(&ssi->work);
1023
} else {
1024
spin_unlock_bh(&ssi->lock);
1025
}
1026
dev->stats.tx_packets++;
1027
dev->stats.tx_bytes += skb->len;
1028
1029
return NETDEV_TX_OK;
1030
drop2:
1031
hsi_free_msg(msg);
1032
drop:
1033
dev_kfree_skb(skb);
1034
inc_dropped:
1035
dev->stats.tx_dropped++;
1036
1037
return NETDEV_TX_OK;
1038
}
1039
1040
/* CMT reset event handler */
1041
void ssip_reset_event(struct hsi_client *master)
1042
{
1043
struct ssi_protocol *ssi = hsi_client_drvdata(master);
1044
dev_err(&ssi->cl->device, "CMT reset detected!\n");
1045
ssip_error(ssi->cl);
1046
}
1047
EXPORT_SYMBOL_GPL(ssip_reset_event);
1048
1049
static const struct net_device_ops ssip_pn_ops = {
1050
.ndo_open = ssip_pn_open,
1051
.ndo_stop = ssip_pn_stop,
1052
.ndo_start_xmit = ssip_pn_xmit,
1053
};
1054
1055
static void ssip_pn_setup(struct net_device *dev)
1056
{
1057
static const u8 addr = PN_MEDIA_SOS;
1058
1059
dev->features = 0;
1060
dev->netdev_ops = &ssip_pn_ops;
1061
dev->type = ARPHRD_PHONET;
1062
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1063
dev->mtu = SSIP_DEFAULT_MTU;
1064
dev->hard_header_len = 1;
1065
dev->addr_len = 1;
1066
dev_addr_set(dev, &addr);
1067
dev->tx_queue_len = SSIP_TXQUEUE_LEN;
1068
1069
dev->needs_free_netdev = true;
1070
dev->header_ops = &phonet_header_ops;
1071
}
1072
1073
static int ssi_protocol_probe(struct device *dev)
1074
{
1075
static const char ifname[] = "phonet%d";
1076
struct hsi_client *cl = to_hsi_client(dev);
1077
struct ssi_protocol *ssi;
1078
int err;
1079
1080
ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
1081
if (!ssi)
1082
return -ENOMEM;
1083
1084
spin_lock_init(&ssi->lock);
1085
timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE);
1086
timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE);
1087
timer_setup(&ssi->keep_alive, ssip_keep_alive, 0);
1088
INIT_LIST_HEAD(&ssi->txqueue);
1089
INIT_LIST_HEAD(&ssi->cmdqueue);
1090
atomic_set(&ssi->tx_usecnt, 0);
1091
hsi_client_set_drvdata(cl, ssi);
1092
ssi->cl = cl;
1093
INIT_WORK(&ssi->work, ssip_xmit_work);
1094
1095
ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control");
1096
if (ssi->channel_id_cmd < 0) {
1097
err = ssi->channel_id_cmd;
1098
dev_err(dev, "Could not get cmd channel (%d)\n", err);
1099
goto out;
1100
}
1101
1102
ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data");
1103
if (ssi->channel_id_data < 0) {
1104
err = ssi->channel_id_data;
1105
dev_err(dev, "Could not get data channel (%d)\n", err);
1106
goto out;
1107
}
1108
1109
err = ssip_alloc_cmds(ssi);
1110
if (err < 0) {
1111
dev_err(dev, "No memory for commands\n");
1112
goto out;
1113
}
1114
1115
ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup);
1116
if (!ssi->netdev) {
1117
dev_err(dev, "No memory for netdev\n");
1118
err = -ENOMEM;
1119
goto out1;
1120
}
1121
1122
/* MTU range: 6 - 65535 */
1123
ssi->netdev->min_mtu = PHONET_MIN_MTU;
1124
ssi->netdev->max_mtu = SSIP_MAX_MTU;
1125
1126
SET_NETDEV_DEV(ssi->netdev, dev);
1127
netif_carrier_off(ssi->netdev);
1128
err = register_netdev(ssi->netdev);
1129
if (err < 0) {
1130
dev_err(dev, "Register netdev failed (%d)\n", err);
1131
goto out2;
1132
}
1133
1134
list_add(&ssi->link, &ssip_list);
1135
1136
dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n",
1137
ssi->channel_id_cmd, ssi->channel_id_data);
1138
1139
return 0;
1140
out2:
1141
free_netdev(ssi->netdev);
1142
out1:
1143
ssip_free_cmds(ssi);
1144
out:
1145
kfree(ssi);
1146
1147
return err;
1148
}
1149
1150
static int ssi_protocol_remove(struct device *dev)
1151
{
1152
struct hsi_client *cl = to_hsi_client(dev);
1153
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
1154
1155
list_del(&ssi->link);
1156
unregister_netdev(ssi->netdev);
1157
ssip_free_cmds(ssi);
1158
hsi_client_set_drvdata(cl, NULL);
1159
kfree(ssi);
1160
1161
return 0;
1162
}
1163
1164
static struct hsi_client_driver ssip_driver = {
1165
.driver = {
1166
.name = "ssi-protocol",
1167
.owner = THIS_MODULE,
1168
.probe = ssi_protocol_probe,
1169
.remove = ssi_protocol_remove,
1170
},
1171
};
1172
1173
static int __init ssip_init(void)
1174
{
1175
pr_info("SSI protocol aka McSAAB added\n");
1176
1177
return hsi_register_client_driver(&ssip_driver);
1178
}
1179
module_init(ssip_init);
1180
1181
static void __exit ssip_exit(void)
1182
{
1183
hsi_unregister_client_driver(&ssip_driver);
1184
pr_info("SSI protocol driver removed\n");
1185
}
1186
module_exit(ssip_exit);
1187
1188
MODULE_ALIAS("hsi:ssi-protocol");
1189
MODULE_AUTHOR("Carlos Chinea <[email protected]>");
1190
MODULE_AUTHOR("Remi Denis-Courmont <[email protected]>");
1191
MODULE_DESCRIPTION("SSI protocol improved aka McSAAB");
1192
MODULE_LICENSE("GPL");
1193
1194