Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/bluetooth/hci_h5.c
49695 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
*
4
* Bluetooth HCI Three-wire UART driver
5
*
6
* Copyright (C) 2012 Intel Corporation
7
*/
8
9
#include <linux/acpi.h>
10
#include <linux/bitrev.h>
11
#include <linux/crc-ccitt.h>
12
#include <linux/errno.h>
13
#include <linux/gpio/consumer.h>
14
#include <linux/kernel.h>
15
#include <linux/mod_devicetable.h>
16
#include <linux/of.h>
17
#include <linux/pm_runtime.h>
18
#include <linux/serdev.h>
19
#include <linux/skbuff.h>
20
21
#include <net/bluetooth/bluetooth.h>
22
#include <net/bluetooth/hci_core.h>
23
24
#include "btrtl.h"
25
#include "hci_uart.h"
26
27
#define SUSPEND_TIMEOUT_MS 6000
28
29
#define HCI_3WIRE_ACK_PKT 0
30
#define HCI_3WIRE_LINK_PKT 15
31
32
/* Sliding window size */
33
#define H5_TX_WIN_MAX 4
34
35
#define H5_ACK_TIMEOUT msecs_to_jiffies(250)
36
#define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
37
38
/*
39
* Maximum Three-wire packet:
40
* 4 byte header + max value for 12-bit length + 2 bytes for CRC
41
*/
42
#define H5_MAX_LEN (4 + 0xfff + 2)
43
44
/* Convenience macros for reading Three-wire header values */
45
#define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
46
#define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
47
#define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
48
#define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
49
#define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
50
#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
51
52
#define SLIP_DELIMITER 0xc0
53
#define SLIP_ESC 0xdb
54
#define SLIP_ESC_DELIM 0xdc
55
#define SLIP_ESC_ESC 0xdd
56
57
/* H5 state flags */
58
enum {
59
H5_RX_ESC, /* SLIP escape mode */
60
H5_TX_ACK_REQ, /* Pending ack to send */
61
H5_WAKEUP_DISABLE, /* Device cannot wake host */
62
H5_HW_FLOW_CONTROL, /* Use HW flow control */
63
H5_CRC, /* Use CRC */
64
};
65
66
struct h5 {
67
/* Must be the first member, hci_serdev.c expects this. */
68
struct hci_uart serdev_hu;
69
70
struct sk_buff_head unack; /* Unack'ed packets queue */
71
struct sk_buff_head rel; /* Reliable packets queue */
72
struct sk_buff_head unrel; /* Unreliable packets queue */
73
74
unsigned long flags;
75
76
struct sk_buff *rx_skb; /* Receive buffer */
77
size_t rx_pending; /* Expecting more bytes */
78
u8 rx_ack; /* Last ack number received */
79
80
int (*rx_func)(struct hci_uart *hu, u8 c);
81
82
struct timer_list timer; /* Retransmission timer */
83
struct hci_uart *hu; /* Parent HCI UART */
84
85
u8 tx_seq; /* Next seq number to send */
86
u8 tx_ack; /* Next ack number to send */
87
u8 tx_win; /* Sliding window size */
88
89
enum {
90
H5_UNINITIALIZED,
91
H5_INITIALIZED,
92
H5_ACTIVE,
93
} state;
94
95
enum {
96
H5_AWAKE,
97
H5_SLEEPING,
98
H5_WAKING_UP,
99
} sleep;
100
101
const struct h5_vnd *vnd;
102
const char *id;
103
104
struct gpio_desc *enable_gpio;
105
struct gpio_desc *device_wake_gpio;
106
};
107
108
enum h5_driver_info {
109
H5_INFO_WAKEUP_DISABLE = BIT(0),
110
};
111
112
struct h5_vnd {
113
int (*setup)(struct h5 *h5);
114
void (*open)(struct h5 *h5);
115
void (*close)(struct h5 *h5);
116
int (*suspend)(struct h5 *h5);
117
int (*resume)(struct h5 *h5);
118
const struct acpi_gpio_mapping *acpi_gpio_map;
119
int sizeof_priv;
120
};
121
122
struct h5_device_data {
123
uint32_t driver_info;
124
struct h5_vnd *vnd;
125
};
126
127
static void h5_reset_rx(struct h5 *h5);
128
129
static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
130
{
131
struct h5 *h5 = hu->priv;
132
struct sk_buff *nskb;
133
134
nskb = alloc_skb(3, GFP_ATOMIC);
135
if (!nskb)
136
return;
137
138
hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
139
140
skb_put_data(nskb, data, len);
141
142
skb_queue_tail(&h5->unrel, nskb);
143
}
144
145
static u8 h5_cfg_field(struct h5 *h5)
146
{
147
/* Sliding window size (first 3 bits) and CRC request (fifth bit). */
148
return (h5->tx_win & 0x07) | 0x10;
149
}
150
151
static void h5_timed_event(struct timer_list *t)
152
{
153
const unsigned char sync_req[] = { 0x01, 0x7e };
154
unsigned char conf_req[3] = { 0x03, 0xfc };
155
struct h5 *h5 = timer_container_of(h5, t, timer);
156
struct hci_uart *hu = h5->hu;
157
struct sk_buff *skb;
158
unsigned long flags;
159
160
BT_DBG("%s", hu->hdev->name);
161
162
if (h5->state == H5_UNINITIALIZED)
163
h5_link_control(hu, sync_req, sizeof(sync_req));
164
165
if (h5->state == H5_INITIALIZED) {
166
conf_req[2] = h5_cfg_field(h5);
167
h5_link_control(hu, conf_req, sizeof(conf_req));
168
}
169
170
if (h5->state != H5_ACTIVE) {
171
mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
172
goto wakeup;
173
}
174
175
if (h5->sleep != H5_AWAKE) {
176
h5->sleep = H5_SLEEPING;
177
goto wakeup;
178
}
179
180
BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
181
182
spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
183
184
while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
185
h5->tx_seq = (h5->tx_seq - 1) & 0x07;
186
skb_queue_head(&h5->rel, skb);
187
}
188
189
spin_unlock_irqrestore(&h5->unack.lock, flags);
190
191
wakeup:
192
hci_uart_tx_wakeup(hu);
193
}
194
195
static void h5_peer_reset(struct hci_uart *hu)
196
{
197
struct h5 *h5 = hu->priv;
198
199
bt_dev_err(hu->hdev, "Peer device has reset");
200
201
h5->state = H5_UNINITIALIZED;
202
203
timer_delete(&h5->timer);
204
205
skb_queue_purge(&h5->rel);
206
skb_queue_purge(&h5->unrel);
207
skb_queue_purge(&h5->unack);
208
209
h5->tx_seq = 0;
210
h5->tx_ack = 0;
211
212
/* Send reset request to upper stack */
213
hci_reset_dev(hu->hdev);
214
}
215
216
static int h5_open(struct hci_uart *hu)
217
{
218
struct h5 *h5;
219
220
BT_DBG("hu %p", hu);
221
222
if (hu->serdev) {
223
h5 = serdev_device_get_drvdata(hu->serdev);
224
} else {
225
h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
226
if (!h5)
227
return -ENOMEM;
228
}
229
230
hu->priv = h5;
231
h5->hu = hu;
232
233
skb_queue_head_init(&h5->unack);
234
skb_queue_head_init(&h5->rel);
235
skb_queue_head_init(&h5->unrel);
236
237
h5_reset_rx(h5);
238
239
timer_setup(&h5->timer, h5_timed_event, 0);
240
241
h5->tx_win = H5_TX_WIN_MAX;
242
243
if (h5->vnd && h5->vnd->open)
244
h5->vnd->open(h5);
245
246
set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
247
248
/*
249
* Wait one jiffy because the UART layer won't set HCI_UART_PROTO_READY,
250
* which allows us to send link packets, until this function returns.
251
*/
252
mod_timer(&h5->timer, jiffies + 1);
253
254
return 0;
255
}
256
257
static int h5_close(struct hci_uart *hu)
258
{
259
struct h5 *h5 = hu->priv;
260
261
timer_delete_sync(&h5->timer);
262
263
skb_queue_purge(&h5->unack);
264
skb_queue_purge(&h5->rel);
265
skb_queue_purge(&h5->unrel);
266
267
kfree_skb(h5->rx_skb);
268
h5->rx_skb = NULL;
269
270
if (h5->vnd && h5->vnd->close)
271
h5->vnd->close(h5);
272
273
if (!hu->serdev)
274
kfree(h5);
275
276
return 0;
277
}
278
279
static int h5_setup(struct hci_uart *hu)
280
{
281
struct h5 *h5 = hu->priv;
282
283
if (h5->vnd && h5->vnd->setup)
284
return h5->vnd->setup(h5);
285
286
return 0;
287
}
288
289
static void h5_pkt_cull(struct h5 *h5)
290
{
291
struct sk_buff *skb, *tmp;
292
unsigned long flags;
293
int i, to_remove;
294
u8 seq;
295
296
spin_lock_irqsave(&h5->unack.lock, flags);
297
298
to_remove = skb_queue_len(&h5->unack);
299
if (to_remove == 0)
300
goto unlock;
301
302
seq = h5->tx_seq;
303
304
while (to_remove > 0) {
305
if (h5->rx_ack == seq)
306
break;
307
308
to_remove--;
309
seq = (seq - 1) & 0x07;
310
}
311
312
if (seq != h5->rx_ack)
313
BT_ERR("Controller acked invalid packet");
314
315
i = 0;
316
skb_queue_walk_safe(&h5->unack, skb, tmp) {
317
if (i++ >= to_remove)
318
break;
319
320
__skb_unlink(skb, &h5->unack);
321
dev_kfree_skb_irq(skb);
322
}
323
324
if (skb_queue_empty(&h5->unack))
325
timer_delete(&h5->timer);
326
327
unlock:
328
spin_unlock_irqrestore(&h5->unack.lock, flags);
329
}
330
331
static void h5_handle_internal_rx(struct hci_uart *hu)
332
{
333
struct h5 *h5 = hu->priv;
334
const unsigned char sync_req[] = { 0x01, 0x7e };
335
const unsigned char sync_rsp[] = { 0x02, 0x7d };
336
unsigned char conf_req[3] = { 0x03, 0xfc };
337
const unsigned char conf_rsp[] = { 0x04, 0x7b };
338
const unsigned char wakeup_req[] = { 0x05, 0xfa };
339
const unsigned char woken_req[] = { 0x06, 0xf9 };
340
const unsigned char sleep_req[] = { 0x07, 0x78 };
341
const unsigned char *hdr = h5->rx_skb->data;
342
const unsigned char *data = &h5->rx_skb->data[4];
343
344
BT_DBG("%s", hu->hdev->name);
345
346
if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
347
return;
348
349
if (H5_HDR_LEN(hdr) < 2)
350
return;
351
352
conf_req[2] = h5_cfg_field(h5);
353
354
if (memcmp(data, sync_req, 2) == 0) {
355
if (h5->state == H5_ACTIVE)
356
h5_peer_reset(hu);
357
h5_link_control(hu, sync_rsp, 2);
358
} else if (memcmp(data, sync_rsp, 2) == 0) {
359
if (h5->state == H5_ACTIVE)
360
h5_peer_reset(hu);
361
h5->state = H5_INITIALIZED;
362
h5_link_control(hu, conf_req, 3);
363
} else if (memcmp(data, conf_req, 2) == 0) {
364
h5_link_control(hu, conf_rsp, 2);
365
h5_link_control(hu, conf_req, 3);
366
} else if (memcmp(data, conf_rsp, 2) == 0) {
367
if (H5_HDR_LEN(hdr) > 2) {
368
h5->tx_win = (data[2] & 0x07);
369
assign_bit(H5_CRC, &h5->flags, data[2] & 0x10);
370
}
371
BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
372
h5->state = H5_ACTIVE;
373
hci_uart_init_ready(hu);
374
return;
375
} else if (memcmp(data, sleep_req, 2) == 0) {
376
BT_DBG("Peer went to sleep");
377
h5->sleep = H5_SLEEPING;
378
return;
379
} else if (memcmp(data, woken_req, 2) == 0) {
380
BT_DBG("Peer woke up");
381
h5->sleep = H5_AWAKE;
382
} else if (memcmp(data, wakeup_req, 2) == 0) {
383
BT_DBG("Peer requested wakeup");
384
h5_link_control(hu, woken_req, 2);
385
h5->sleep = H5_AWAKE;
386
} else {
387
BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
388
return;
389
}
390
391
hci_uart_tx_wakeup(hu);
392
}
393
394
static void h5_complete_rx_pkt(struct hci_uart *hu)
395
{
396
struct h5 *h5 = hu->priv;
397
const unsigned char *hdr = h5->rx_skb->data;
398
399
if (H5_HDR_RELIABLE(hdr)) {
400
h5->tx_ack = (h5->tx_ack + 1) % 8;
401
set_bit(H5_TX_ACK_REQ, &h5->flags);
402
hci_uart_tx_wakeup(hu);
403
}
404
405
h5->rx_ack = H5_HDR_ACK(hdr);
406
407
h5_pkt_cull(h5);
408
409
switch (H5_HDR_PKT_TYPE(hdr)) {
410
case HCI_EVENT_PKT:
411
case HCI_ACLDATA_PKT:
412
case HCI_SCODATA_PKT:
413
case HCI_ISODATA_PKT:
414
hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
415
416
/* Remove Three-wire header */
417
skb_pull(h5->rx_skb, 4);
418
419
hci_recv_frame(hu->hdev, h5->rx_skb);
420
h5->rx_skb = NULL;
421
422
break;
423
424
default:
425
h5_handle_internal_rx(hu);
426
break;
427
}
428
429
h5_reset_rx(h5);
430
}
431
432
static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
433
{
434
struct h5 *h5 = hu->priv;
435
const unsigned char *hdr = h5->rx_skb->data;
436
u16 crc;
437
__be16 crc_be;
438
439
crc = crc_ccitt(0xffff, hdr, 4 + H5_HDR_LEN(hdr));
440
crc = bitrev16(crc);
441
442
crc_be = cpu_to_be16(crc);
443
444
if (memcmp(&crc_be, hdr + 4 + H5_HDR_LEN(hdr), 2) != 0) {
445
bt_dev_err(hu->hdev, "Received packet with invalid CRC");
446
h5_reset_rx(h5);
447
} else {
448
/* Remove CRC bytes */
449
skb_trim(h5->rx_skb, 4 + H5_HDR_LEN(hdr));
450
h5_complete_rx_pkt(hu);
451
}
452
453
return 0;
454
}
455
456
static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
457
{
458
struct h5 *h5 = hu->priv;
459
const unsigned char *hdr = h5->rx_skb->data;
460
461
if (H5_HDR_CRC(hdr)) {
462
h5->rx_func = h5_rx_crc;
463
h5->rx_pending = 2;
464
} else {
465
h5_complete_rx_pkt(hu);
466
}
467
468
return 0;
469
}
470
471
static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
472
{
473
struct h5 *h5 = hu->priv;
474
const unsigned char *hdr = h5->rx_skb->data;
475
476
BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
477
hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
478
H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
479
H5_HDR_LEN(hdr));
480
481
if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
482
bt_dev_err(hu->hdev, "Invalid header checksum");
483
h5_reset_rx(h5);
484
return 0;
485
}
486
487
if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
488
bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)",
489
H5_HDR_SEQ(hdr), h5->tx_ack);
490
set_bit(H5_TX_ACK_REQ, &h5->flags);
491
hci_uart_tx_wakeup(hu);
492
h5_reset_rx(h5);
493
return 0;
494
}
495
496
if (h5->state != H5_ACTIVE &&
497
H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
498
bt_dev_err(hu->hdev, "Non-link packet received in non-active state");
499
h5_reset_rx(h5);
500
return 0;
501
}
502
503
h5->rx_func = h5_rx_payload;
504
h5->rx_pending = H5_HDR_LEN(hdr);
505
506
return 0;
507
}
508
509
static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
510
{
511
struct h5 *h5 = hu->priv;
512
513
if (c == SLIP_DELIMITER)
514
return 1;
515
516
h5->rx_func = h5_rx_3wire_hdr;
517
h5->rx_pending = 4;
518
519
h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
520
if (!h5->rx_skb) {
521
bt_dev_err(hu->hdev, "Can't allocate mem for new packet");
522
h5_reset_rx(h5);
523
return -ENOMEM;
524
}
525
526
h5->rx_skb->dev = (void *)hu->hdev;
527
528
return 0;
529
}
530
531
static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
532
{
533
struct h5 *h5 = hu->priv;
534
535
if (c == SLIP_DELIMITER)
536
h5->rx_func = h5_rx_pkt_start;
537
538
return 1;
539
}
540
541
static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
542
{
543
const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
544
const u8 *byte = &c;
545
546
if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
547
set_bit(H5_RX_ESC, &h5->flags);
548
return;
549
}
550
551
if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
552
switch (c) {
553
case SLIP_ESC_DELIM:
554
byte = &delim;
555
break;
556
case SLIP_ESC_ESC:
557
byte = &esc;
558
break;
559
default:
560
BT_ERR("Invalid esc byte 0x%02hhx", c);
561
h5_reset_rx(h5);
562
return;
563
}
564
}
565
566
skb_put_data(h5->rx_skb, byte, 1);
567
h5->rx_pending--;
568
569
BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
570
}
571
572
static void h5_reset_rx(struct h5 *h5)
573
{
574
if (h5->rx_skb) {
575
kfree_skb(h5->rx_skb);
576
h5->rx_skb = NULL;
577
}
578
579
h5->rx_func = h5_rx_delimiter;
580
h5->rx_pending = 0;
581
clear_bit(H5_RX_ESC, &h5->flags);
582
clear_bit(H5_CRC, &h5->flags);
583
}
584
585
static int h5_recv(struct hci_uart *hu, const void *data, int count)
586
{
587
struct h5 *h5 = hu->priv;
588
const unsigned char *ptr = data;
589
590
BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
591
count);
592
593
while (count > 0) {
594
int processed;
595
596
if (h5->rx_pending > 0) {
597
if (*ptr == SLIP_DELIMITER) {
598
bt_dev_err(hu->hdev, "Too short H5 packet");
599
h5_reset_rx(h5);
600
continue;
601
}
602
603
h5_unslip_one_byte(h5, *ptr);
604
605
ptr++; count--;
606
continue;
607
}
608
609
processed = h5->rx_func(hu, *ptr);
610
if (processed < 0)
611
return processed;
612
613
ptr += processed;
614
count -= processed;
615
}
616
617
if (hu->serdev) {
618
pm_runtime_get(&hu->serdev->dev);
619
pm_runtime_put_autosuspend(&hu->serdev->dev);
620
}
621
622
return 0;
623
}
624
625
static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
626
{
627
struct h5 *h5 = hu->priv;
628
629
if (skb->len > 0xfff) {
630
bt_dev_err(hu->hdev, "Packet too long (%u bytes)", skb->len);
631
kfree_skb(skb);
632
return 0;
633
}
634
635
if (h5->state != H5_ACTIVE) {
636
bt_dev_err(hu->hdev, "Ignoring HCI data in non-active state");
637
kfree_skb(skb);
638
return 0;
639
}
640
641
switch (hci_skb_pkt_type(skb)) {
642
case HCI_ACLDATA_PKT:
643
case HCI_COMMAND_PKT:
644
skb_queue_tail(&h5->rel, skb);
645
break;
646
647
case HCI_SCODATA_PKT:
648
case HCI_ISODATA_PKT:
649
skb_queue_tail(&h5->unrel, skb);
650
break;
651
652
default:
653
bt_dev_err(hu->hdev, "Unknown packet type %u", hci_skb_pkt_type(skb));
654
kfree_skb(skb);
655
break;
656
}
657
658
if (hu->serdev) {
659
pm_runtime_get_sync(&hu->serdev->dev);
660
pm_runtime_put_autosuspend(&hu->serdev->dev);
661
}
662
663
return 0;
664
}
665
666
static void h5_slip_delim(struct sk_buff *skb)
667
{
668
const char delim = SLIP_DELIMITER;
669
670
skb_put_data(skb, &delim, 1);
671
}
672
673
static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
674
{
675
const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
676
const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
677
678
switch (c) {
679
case SLIP_DELIMITER:
680
skb_put_data(skb, &esc_delim, 2);
681
break;
682
case SLIP_ESC:
683
skb_put_data(skb, &esc_esc, 2);
684
break;
685
default:
686
skb_put_data(skb, &c, 1);
687
}
688
}
689
690
static bool valid_packet_type(u8 type)
691
{
692
switch (type) {
693
case HCI_ACLDATA_PKT:
694
case HCI_COMMAND_PKT:
695
case HCI_SCODATA_PKT:
696
case HCI_ISODATA_PKT:
697
case HCI_3WIRE_LINK_PKT:
698
case HCI_3WIRE_ACK_PKT:
699
return true;
700
default:
701
return false;
702
}
703
}
704
705
static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
706
const u8 *data, size_t len)
707
{
708
struct h5 *h5 = hu->priv;
709
struct sk_buff *nskb;
710
u8 hdr[4];
711
u16 crc;
712
int i;
713
714
if (!valid_packet_type(pkt_type)) {
715
bt_dev_err(hu->hdev, "Unknown packet type %u", pkt_type);
716
return NULL;
717
}
718
719
/*
720
* Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
721
* (because bytes 0xc0 and 0xdb are escaped, worst case is when
722
* the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
723
* delimiters at start and end).
724
*/
725
nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
726
if (!nskb)
727
return NULL;
728
729
hci_skb_pkt_type(nskb) = pkt_type;
730
731
h5_slip_delim(nskb);
732
733
hdr[0] = h5->tx_ack << 3;
734
clear_bit(H5_TX_ACK_REQ, &h5->flags);
735
736
/* Reliable packet? */
737
if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
738
hdr[0] |= 1 << 7;
739
hdr[0] |= (test_bit(H5_CRC, &h5->flags) && 1) << 6;
740
hdr[0] |= h5->tx_seq;
741
h5->tx_seq = (h5->tx_seq + 1) % 8;
742
}
743
744
hdr[1] = pkt_type | ((len & 0x0f) << 4);
745
hdr[2] = len >> 4;
746
hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
747
748
BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
749
hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
750
H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
751
H5_HDR_LEN(hdr));
752
753
for (i = 0; i < 4; i++)
754
h5_slip_one_byte(nskb, hdr[i]);
755
756
for (i = 0; i < len; i++)
757
h5_slip_one_byte(nskb, data[i]);
758
759
if (H5_HDR_CRC(hdr)) {
760
crc = crc_ccitt(0xffff, hdr, 4);
761
crc = crc_ccitt(crc, data, len);
762
crc = bitrev16(crc);
763
764
h5_slip_one_byte(nskb, (crc >> 8) & 0xff);
765
h5_slip_one_byte(nskb, crc & 0xff);
766
}
767
768
h5_slip_delim(nskb);
769
770
return nskb;
771
}
772
773
static struct sk_buff *h5_dequeue(struct hci_uart *hu)
774
{
775
struct h5 *h5 = hu->priv;
776
unsigned long flags;
777
struct sk_buff *skb, *nskb;
778
779
if (h5->sleep != H5_AWAKE) {
780
const unsigned char wakeup_req[] = { 0x05, 0xfa };
781
782
if (h5->sleep == H5_WAKING_UP)
783
return NULL;
784
785
h5->sleep = H5_WAKING_UP;
786
BT_DBG("Sending wakeup request");
787
788
mod_timer(&h5->timer, jiffies + HZ / 100);
789
return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
790
}
791
792
skb = skb_dequeue(&h5->unrel);
793
if (skb) {
794
nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
795
skb->data, skb->len);
796
if (nskb) {
797
kfree_skb(skb);
798
return nskb;
799
}
800
801
skb_queue_head(&h5->unrel, skb);
802
bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
803
}
804
805
spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
806
807
if (h5->unack.qlen >= h5->tx_win)
808
goto unlock;
809
810
skb = skb_dequeue(&h5->rel);
811
if (skb) {
812
nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
813
skb->data, skb->len);
814
if (nskb) {
815
__skb_queue_tail(&h5->unack, skb);
816
mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
817
spin_unlock_irqrestore(&h5->unack.lock, flags);
818
return nskb;
819
}
820
821
skb_queue_head(&h5->rel, skb);
822
bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
823
}
824
825
unlock:
826
spin_unlock_irqrestore(&h5->unack.lock, flags);
827
828
if (test_bit(H5_TX_ACK_REQ, &h5->flags))
829
return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
830
831
return NULL;
832
}
833
834
static int h5_flush(struct hci_uart *hu)
835
{
836
BT_DBG("hu %p", hu);
837
return 0;
838
}
839
840
static const struct hci_uart_proto h5p = {
841
.id = HCI_UART_3WIRE,
842
.name = "Three-wire (H5)",
843
.open = h5_open,
844
.close = h5_close,
845
.setup = h5_setup,
846
.recv = h5_recv,
847
.enqueue = h5_enqueue,
848
.dequeue = h5_dequeue,
849
.flush = h5_flush,
850
};
851
852
static int h5_serdev_probe(struct serdev_device *serdev)
853
{
854
struct device *dev = &serdev->dev;
855
struct h5 *h5;
856
const struct h5_device_data *data;
857
858
h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
859
if (!h5)
860
return -ENOMEM;
861
862
h5->hu = &h5->serdev_hu;
863
h5->serdev_hu.serdev = serdev;
864
serdev_device_set_drvdata(serdev, h5);
865
866
if (has_acpi_companion(dev)) {
867
const struct acpi_device_id *match;
868
869
match = acpi_match_device(dev->driver->acpi_match_table, dev);
870
if (!match)
871
return -ENODEV;
872
873
data = (const struct h5_device_data *)match->driver_data;
874
h5->vnd = data->vnd;
875
h5->id = (char *)match->id;
876
877
if (h5->vnd->acpi_gpio_map)
878
devm_acpi_dev_add_driver_gpios(dev,
879
h5->vnd->acpi_gpio_map);
880
} else {
881
data = of_device_get_match_data(dev);
882
if (!data)
883
return -ENODEV;
884
885
h5->vnd = data->vnd;
886
}
887
888
if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
889
set_bit(H5_WAKEUP_DISABLE, &h5->flags);
890
891
h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
892
if (IS_ERR(h5->enable_gpio))
893
return PTR_ERR(h5->enable_gpio);
894
895
h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
896
GPIOD_OUT_LOW);
897
if (IS_ERR(h5->device_wake_gpio))
898
return PTR_ERR(h5->device_wake_gpio);
899
900
return hci_uart_register_device_priv(&h5->serdev_hu, &h5p,
901
h5->vnd->sizeof_priv);
902
}
903
904
static void h5_serdev_remove(struct serdev_device *serdev)
905
{
906
struct h5 *h5 = serdev_device_get_drvdata(serdev);
907
908
hci_uart_unregister_device(&h5->serdev_hu);
909
}
910
911
static int __maybe_unused h5_serdev_suspend(struct device *dev)
912
{
913
struct h5 *h5 = dev_get_drvdata(dev);
914
int ret = 0;
915
916
if (h5->vnd && h5->vnd->suspend)
917
ret = h5->vnd->suspend(h5);
918
919
return ret;
920
}
921
922
static int __maybe_unused h5_serdev_resume(struct device *dev)
923
{
924
struct h5 *h5 = dev_get_drvdata(dev);
925
int ret = 0;
926
927
if (h5->vnd && h5->vnd->resume)
928
ret = h5->vnd->resume(h5);
929
930
return ret;
931
}
932
933
#ifdef CONFIG_BT_HCIUART_RTL
934
static int h5_btrtl_setup(struct h5 *h5)
935
{
936
struct btrtl_device_info *btrtl_dev;
937
struct sk_buff *skb;
938
__le32 baudrate_data;
939
u32 device_baudrate;
940
unsigned int controller_baudrate;
941
bool flow_control;
942
int err;
943
944
btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
945
if (IS_ERR(btrtl_dev))
946
return PTR_ERR(btrtl_dev);
947
948
err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
949
&controller_baudrate, &device_baudrate,
950
&flow_control);
951
if (err)
952
goto out_free;
953
954
baudrate_data = cpu_to_le32(device_baudrate);
955
skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
956
&baudrate_data, HCI_INIT_TIMEOUT);
957
if (IS_ERR(skb)) {
958
rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
959
err = PTR_ERR(skb);
960
goto out_free;
961
} else {
962
kfree_skb(skb);
963
}
964
/* Give the device some time to set up the new baudrate. */
965
usleep_range(10000, 20000);
966
967
serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
968
serdev_device_set_flow_control(h5->hu->serdev, flow_control);
969
970
if (flow_control)
971
set_bit(H5_HW_FLOW_CONTROL, &h5->flags);
972
973
err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
974
/* Give the device some time before the hci-core sends it a reset */
975
usleep_range(10000, 20000);
976
if (err)
977
goto out_free;
978
979
btrtl_set_quirks(h5->hu->hdev, btrtl_dev);
980
981
out_free:
982
btrtl_free(btrtl_dev);
983
984
return err;
985
}
986
987
static void h5_btrtl_open(struct h5 *h5)
988
{
989
/*
990
* Since h5_btrtl_resume() does a device_reprobe() the suspend handling
991
* done by the hci_suspend_notifier is not necessary; it actually causes
992
* delays and a bunch of errors to get logged, so disable it.
993
*/
994
if (test_bit(H5_WAKEUP_DISABLE, &h5->flags))
995
set_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &h5->hu->flags);
996
997
/* Devices always start with these fixed parameters */
998
serdev_device_set_flow_control(h5->hu->serdev, false);
999
serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
1000
serdev_device_set_baudrate(h5->hu->serdev, 115200);
1001
1002
if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
1003
pm_runtime_set_active(&h5->hu->serdev->dev);
1004
pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
1005
pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
1006
SUSPEND_TIMEOUT_MS);
1007
pm_runtime_enable(&h5->hu->serdev->dev);
1008
}
1009
1010
/* The controller needs reset to startup */
1011
gpiod_set_value_cansleep(h5->enable_gpio, 0);
1012
gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
1013
msleep(100);
1014
1015
/* The controller needs up to 500ms to wakeup */
1016
gpiod_set_value_cansleep(h5->enable_gpio, 1);
1017
gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
1018
msleep(500);
1019
}
1020
1021
static void h5_btrtl_close(struct h5 *h5)
1022
{
1023
if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags))
1024
pm_runtime_disable(&h5->hu->serdev->dev);
1025
1026
gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
1027
gpiod_set_value_cansleep(h5->enable_gpio, 0);
1028
}
1029
1030
/* Suspend/resume support. On many devices the RTL BT device loses power during
1031
* suspend/resume, causing it to lose its firmware and all state. So we simply
1032
* turn it off on suspend and reprobe on resume. This mirrors how RTL devices
1033
* are handled in the USB driver, where the BTUSB_WAKEUP_DISABLE is used which
1034
* also causes a reprobe on resume.
1035
*/
1036
static int h5_btrtl_suspend(struct h5 *h5)
1037
{
1038
serdev_device_set_flow_control(h5->hu->serdev, false);
1039
gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
1040
1041
if (test_bit(H5_WAKEUP_DISABLE, &h5->flags))
1042
gpiod_set_value_cansleep(h5->enable_gpio, 0);
1043
1044
return 0;
1045
}
1046
1047
struct h5_btrtl_reprobe {
1048
struct device *dev;
1049
struct work_struct work;
1050
};
1051
1052
static void h5_btrtl_reprobe_worker(struct work_struct *work)
1053
{
1054
struct h5_btrtl_reprobe *reprobe =
1055
container_of(work, struct h5_btrtl_reprobe, work);
1056
int ret;
1057
1058
ret = device_reprobe(reprobe->dev);
1059
if (ret && ret != -EPROBE_DEFER)
1060
dev_err(reprobe->dev, "Reprobe error %d\n", ret);
1061
1062
put_device(reprobe->dev);
1063
kfree(reprobe);
1064
module_put(THIS_MODULE);
1065
}
1066
1067
static int h5_btrtl_resume(struct h5 *h5)
1068
{
1069
if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
1070
struct h5_btrtl_reprobe *reprobe;
1071
1072
reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
1073
if (!reprobe)
1074
return -ENOMEM;
1075
1076
__module_get(THIS_MODULE);
1077
1078
INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker);
1079
reprobe->dev = get_device(&h5->hu->serdev->dev);
1080
queue_work(system_long_wq, &reprobe->work);
1081
} else {
1082
gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
1083
1084
if (test_bit(H5_HW_FLOW_CONTROL, &h5->flags))
1085
serdev_device_set_flow_control(h5->hu->serdev, true);
1086
}
1087
1088
return 0;
1089
}
1090
1091
static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
1092
static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
1093
static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
1094
static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
1095
{ "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
1096
{ "enable-gpios", &btrtl_enable_gpios, 1 },
1097
{ "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
1098
{},
1099
};
1100
1101
static struct h5_vnd rtl_vnd = {
1102
.setup = h5_btrtl_setup,
1103
.open = h5_btrtl_open,
1104
.close = h5_btrtl_close,
1105
.suspend = h5_btrtl_suspend,
1106
.resume = h5_btrtl_resume,
1107
.acpi_gpio_map = acpi_btrtl_gpios,
1108
.sizeof_priv = sizeof(struct btrealtek_data),
1109
};
1110
1111
static const struct h5_device_data h5_data_rtl8822cs = {
1112
.vnd = &rtl_vnd,
1113
};
1114
1115
static const struct h5_device_data h5_data_rtl8723bs = {
1116
.driver_info = H5_INFO_WAKEUP_DISABLE,
1117
.vnd = &rtl_vnd,
1118
};
1119
#endif
1120
1121
#ifdef CONFIG_ACPI
1122
static const struct acpi_device_id h5_acpi_match[] = {
1123
#ifdef CONFIG_BT_HCIUART_RTL
1124
{ "OBDA0623", (kernel_ulong_t)&h5_data_rtl8723bs },
1125
{ "OBDA8723", (kernel_ulong_t)&h5_data_rtl8723bs },
1126
#endif
1127
{ },
1128
};
1129
MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
1130
#endif
1131
1132
static const struct dev_pm_ops h5_serdev_pm_ops = {
1133
SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume)
1134
SET_RUNTIME_PM_OPS(h5_serdev_suspend, h5_serdev_resume, NULL)
1135
};
1136
1137
static const struct of_device_id rtl_bluetooth_of_match[] = {
1138
#ifdef CONFIG_BT_HCIUART_RTL
1139
{ .compatible = "realtek,rtl8822cs-bt",
1140
.data = (const void *)&h5_data_rtl8822cs },
1141
{ .compatible = "realtek,rtl8723bs-bt",
1142
.data = (const void *)&h5_data_rtl8723bs },
1143
{ .compatible = "realtek,rtl8723cs-bt",
1144
.data = (const void *)&h5_data_rtl8723bs },
1145
{ .compatible = "realtek,rtl8723ds-bt",
1146
.data = (const void *)&h5_data_rtl8723bs },
1147
#endif
1148
{ },
1149
};
1150
MODULE_DEVICE_TABLE(of, rtl_bluetooth_of_match);
1151
1152
static struct serdev_device_driver h5_serdev_driver = {
1153
.probe = h5_serdev_probe,
1154
.remove = h5_serdev_remove,
1155
.driver = {
1156
.name = "hci_uart_h5",
1157
.acpi_match_table = ACPI_PTR(h5_acpi_match),
1158
.pm = &h5_serdev_pm_ops,
1159
.of_match_table = rtl_bluetooth_of_match,
1160
},
1161
};
1162
1163
int __init h5_init(void)
1164
{
1165
serdev_device_driver_register(&h5_serdev_driver);
1166
return hci_uart_register_proto(&h5p);
1167
}
1168
1169
int __exit h5_deinit(void)
1170
{
1171
serdev_device_driver_unregister(&h5_serdev_driver);
1172
return hci_uart_unregister_proto(&h5p);
1173
}
1174
1175