Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath10k/htt.h
105545 views
1
/* SPDX-License-Identifier: ISC */
2
/*
3
* Copyright (c) 2005-2011 Atheros Communications Inc.
4
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
6
* Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7
*/
8
9
#ifndef _HTT_H_
10
#define _HTT_H_
11
12
#include <linux/bug.h>
13
#include <linux/interrupt.h>
14
#include <linux/dmapool.h>
15
#include <linux/hashtable.h>
16
#include <linux/kfifo.h>
17
#include <net/mac80211.h>
18
#if defined(__FreeBSD__)
19
#include <linux/wait.h>
20
#endif
21
22
#include "htc.h"
23
#include "hw.h"
24
#include "rx_desc.h"
25
26
enum htt_dbg_stats_type {
27
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
28
HTT_DBG_STATS_RX_REORDER = 1 << 1,
29
HTT_DBG_STATS_RX_RATE_INFO = 1 << 2,
30
HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3,
31
HTT_DBG_STATS_TX_RATE_INFO = 1 << 4,
32
/* bits 5-23 currently reserved */
33
34
HTT_DBG_NUM_STATS /* keep this last */
35
};
36
37
enum htt_h2t_msg_type { /* host-to-target */
38
HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
39
HTT_H2T_MSG_TYPE_TX_FRM = 1,
40
HTT_H2T_MSG_TYPE_RX_RING_CFG = 2,
41
HTT_H2T_MSG_TYPE_STATS_REQ = 3,
42
HTT_H2T_MSG_TYPE_SYNC = 4,
43
HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
44
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
45
46
/* This command is used for sending management frames in HTT < 3.0.
47
* HTT >= 3.0 uses TX_FRM for everything.
48
*/
49
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
50
HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11,
51
52
HTT_H2T_NUM_MSGS /* keep this last */
53
};
54
55
struct htt_cmd_hdr {
56
u8 msg_type;
57
} __packed;
58
59
struct htt_ver_req {
60
u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
61
} __packed;
62
63
/*
64
* HTT tx MSDU descriptor
65
*
66
* The HTT tx MSDU descriptor is created by the host HTT SW for each
67
* tx MSDU. The HTT tx MSDU descriptor contains the information that
68
* the target firmware needs for the FW's tx processing, particularly
69
* for creating the HW msdu descriptor.
70
* The same HTT tx descriptor is used for HL and LL systems, though
71
* a few fields within the tx descriptor are used only by LL or
72
* only by HL.
73
* The HTT tx descriptor is defined in two manners: by a struct with
74
* bitfields, and by a series of [dword offset, bit mask, bit shift]
75
* definitions.
76
* The target should use the struct def, for simplicity and clarity,
77
* but the host shall use the bit-mast + bit-shift defs, to be endian-
78
* neutral. Specifically, the host shall use the get/set macros built
79
* around the mask + shift defs.
80
*/
81
struct htt_data_tx_desc_frag {
82
union {
83
struct double_word_addr {
84
__le32 paddr;
85
__le32 len;
86
} __packed dword_addr;
87
struct triple_word_addr {
88
__le32 paddr_lo;
89
__le16 paddr_hi;
90
__le16 len_16;
91
} __packed tword_addr;
92
} __packed;
93
} __packed;
94
95
struct htt_msdu_ext_desc {
96
__le32 tso_flag[3];
97
__le16 ip_identification;
98
u8 flags;
99
u8 reserved;
100
struct htt_data_tx_desc_frag frags[6];
101
};
102
103
struct htt_msdu_ext_desc_64 {
104
__le32 tso_flag[5];
105
__le16 ip_identification;
106
u8 flags;
107
u8 reserved;
108
struct htt_data_tx_desc_frag frags[6];
109
};
110
111
#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
112
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
113
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
114
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
115
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
116
117
#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
118
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
119
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
120
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
121
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
122
123
#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16)
124
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17)
125
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18)
126
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19)
127
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20)
128
#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21)
129
130
#define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
131
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
132
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
133
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
134
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
135
136
enum htt_data_tx_desc_flags0 {
137
HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
138
HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
139
HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2,
140
HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3,
141
HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4
142
#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
143
#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
144
};
145
146
enum htt_data_tx_desc_flags1 {
147
#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
148
#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
149
#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
150
#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
151
#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
152
#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
153
HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11,
154
HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
155
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
156
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
157
HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15
158
};
159
160
#define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000
161
#define HTT_TX_CREDIT_DELTA_ABS_S 16
162
#define HTT_TX_CREDIT_DELTA_ABS_GET(word) \
163
(((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S)
164
165
#define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100
166
#define HTT_TX_CREDIT_SIGN_BIT_S 8
167
#define HTT_TX_CREDIT_SIGN_BIT_GET(word) \
168
(((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S)
169
170
enum htt_data_tx_ext_tid {
171
HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
172
HTT_DATA_TX_EXT_TID_MGMT = 17,
173
HTT_DATA_TX_EXT_TID_INVALID = 31
174
};
175
176
#define HTT_INVALID_PEERID 0xFFFF
177
178
/*
179
* htt_data_tx_desc - used for data tx path
180
*
181
* Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
182
* ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
183
* for special kinds of tids
184
* postponed: only for HL hosts. indicates if this is a resend
185
* (HL hosts manage queues on the host )
186
* more_in_batch: only for HL hosts. indicates if more packets are
187
* pending. this allows target to wait and aggregate
188
* freq: 0 means home channel of given vdev. intended for offchannel
189
*/
190
struct htt_data_tx_desc {
191
u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
192
__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
193
__le16 len;
194
__le16 id;
195
__le32 frags_paddr;
196
union {
197
__le32 peerid;
198
struct {
199
__le16 peerid;
200
__le16 freq;
201
} __packed offchan_tx;
202
} __packed;
203
u8 prefetch[0]; /* start of frame, for FW classification engine */
204
} __packed;
205
206
struct htt_data_tx_desc_64 {
207
u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
208
__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
209
__le16 len;
210
__le16 id;
211
__le64 frags_paddr;
212
union {
213
__le32 peerid;
214
struct {
215
__le16 peerid;
216
__le16 freq;
217
} __packed offchan_tx;
218
} __packed;
219
u8 prefetch[0]; /* start of frame, for FW classification engine */
220
} __packed;
221
222
enum htt_rx_ring_flags {
223
HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
224
HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
225
HTT_RX_RING_FLAGS_PPDU_START = 1 << 2,
226
HTT_RX_RING_FLAGS_PPDU_END = 1 << 3,
227
HTT_RX_RING_FLAGS_MPDU_START = 1 << 4,
228
HTT_RX_RING_FLAGS_MPDU_END = 1 << 5,
229
HTT_RX_RING_FLAGS_MSDU_START = 1 << 6,
230
HTT_RX_RING_FLAGS_MSDU_END = 1 << 7,
231
HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
232
HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9,
233
HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10,
234
HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
235
HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12,
236
HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13,
237
HTT_RX_RING_FLAGS_NULL_RX = 1 << 14,
238
HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
239
};
240
241
#define HTT_RX_RING_SIZE_MIN 128
242
#define HTT_RX_RING_SIZE_MAX 2048
243
#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
244
#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
245
#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
246
247
struct htt_rx_ring_rx_desc_offsets {
248
/* the following offsets are in 4-byte units */
249
__le16 mac80211_hdr_offset;
250
__le16 msdu_payload_offset;
251
__le16 ppdu_start_offset;
252
__le16 ppdu_end_offset;
253
__le16 mpdu_start_offset;
254
__le16 mpdu_end_offset;
255
__le16 msdu_start_offset;
256
__le16 msdu_end_offset;
257
__le16 rx_attention_offset;
258
__le16 frag_info_offset;
259
} __packed;
260
261
struct htt_rx_ring_setup_ring32 {
262
__le32 fw_idx_shadow_reg_paddr;
263
__le32 rx_ring_base_paddr;
264
__le16 rx_ring_len; /* in 4-byte words */
265
__le16 rx_ring_bufsize; /* rx skb size - in bytes */
266
__le16 flags; /* %HTT_RX_RING_FLAGS_ */
267
__le16 fw_idx_init_val;
268
269
struct htt_rx_ring_rx_desc_offsets offsets;
270
} __packed;
271
272
struct htt_rx_ring_setup_ring64 {
273
__le64 fw_idx_shadow_reg_paddr;
274
__le64 rx_ring_base_paddr;
275
__le16 rx_ring_len; /* in 4-byte words */
276
__le16 rx_ring_bufsize; /* rx skb size - in bytes */
277
__le16 flags; /* %HTT_RX_RING_FLAGS_ */
278
__le16 fw_idx_init_val;
279
280
struct htt_rx_ring_rx_desc_offsets offsets;
281
} __packed;
282
283
struct htt_rx_ring_setup_hdr {
284
u8 num_rings; /* supported values: 1, 2 */
285
__le16 rsvd0;
286
} __packed;
287
288
struct htt_rx_ring_setup_32 {
289
struct htt_rx_ring_setup_hdr hdr;
290
struct htt_rx_ring_setup_ring32 rings[];
291
} __packed;
292
293
struct htt_rx_ring_setup_64 {
294
struct htt_rx_ring_setup_hdr hdr;
295
struct htt_rx_ring_setup_ring64 rings[];
296
} __packed;
297
298
/*
299
* htt_stats_req - request target to send specified statistics
300
*
301
* @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
302
* @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
303
* so make sure its little-endian.
304
* @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
305
* so make sure its little-endian.
306
* @cfg_val: stat_type specific configuration
307
* @stat_type: see %htt_dbg_stats_type
308
* @cookie_lsb: used for confirmation message from target->host
309
* @cookie_msb: ditto as %cookie
310
*/
311
struct htt_stats_req {
312
u8 upload_types[3];
313
u8 rsvd0;
314
u8 reset_types[3];
315
struct {
316
u8 mpdu_bytes;
317
u8 mpdu_num_msdus;
318
u8 msdu_bytes;
319
} __packed;
320
u8 stat_type;
321
__le32 cookie_lsb;
322
__le32 cookie_msb;
323
} __packed;
324
325
#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
326
#define HTT_STATS_BIT_MASK GENMASK(16, 0)
327
328
/*
329
* htt_oob_sync_req - request out-of-band sync
330
*
331
* The HTT SYNC tells the target to suspend processing of subsequent
332
* HTT host-to-target messages until some other target agent locally
333
* informs the target HTT FW that the current sync counter is equal to
334
* or greater than (in a modulo sense) the sync counter specified in
335
* the SYNC message.
336
*
337
* This allows other host-target components to synchronize their operation
338
* with HTT, e.g. to ensure that tx frames don't get transmitted until a
339
* security key has been downloaded to and activated by the target.
340
* In the absence of any explicit synchronization counter value
341
* specification, the target HTT FW will use zero as the default current
342
* sync value.
343
*
344
* The HTT target FW will suspend its host->target message processing as long
345
* as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
346
*/
347
struct htt_oob_sync_req {
348
u8 sync_count;
349
__le16 rsvd0;
350
} __packed;
351
352
struct htt_aggr_conf {
353
u8 max_num_ampdu_subframes;
354
/* amsdu_subframes is limited by 0x1F mask */
355
u8 max_num_amsdu_subframes;
356
} __packed;
357
358
struct htt_aggr_conf_v2 {
359
u8 max_num_ampdu_subframes;
360
/* amsdu_subframes is limited by 0x1F mask */
361
u8 max_num_amsdu_subframes;
362
u8 reserved;
363
} __packed;
364
365
#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
366
struct htt_mgmt_tx_desc_qca99x0 {
367
__le32 rate;
368
} __packed;
369
370
struct htt_mgmt_tx_desc {
371
u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
372
__le32 msdu_paddr;
373
__le32 desc_id;
374
__le32 len;
375
__le32 vdev_id;
376
u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
377
union {
378
struct htt_mgmt_tx_desc_qca99x0 qca99x0;
379
} __packed;
380
} __packed;
381
382
enum htt_mgmt_tx_status {
383
HTT_MGMT_TX_STATUS_OK = 0,
384
HTT_MGMT_TX_STATUS_RETRY = 1,
385
HTT_MGMT_TX_STATUS_DROP = 2
386
};
387
388
/*=== target -> host messages ===============================================*/
389
390
enum htt_main_t2h_msg_type {
391
HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
392
HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
393
HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
394
HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
395
HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
396
HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
397
HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
398
HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
399
HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
400
HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
401
HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
402
HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
403
HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
404
HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
405
HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
406
HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
407
HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
408
HTT_MAIN_T2H_MSG_TYPE_TEST,
409
/* keep this last */
410
HTT_MAIN_T2H_NUM_MSGS
411
};
412
413
enum htt_10x_t2h_msg_type {
414
HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
415
HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
416
HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
417
HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
418
HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
419
HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
420
HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
421
HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
422
HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
423
HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
424
HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
425
HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
426
HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
427
HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
428
HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
429
HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
430
HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
431
HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
432
HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
433
/* keep this last */
434
HTT_10X_T2H_NUM_MSGS
435
};
436
437
enum htt_tlv_t2h_msg_type {
438
HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
439
HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
440
HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
441
HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
442
HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
443
HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
444
HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
445
HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
446
HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
447
HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
448
HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
449
HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
450
HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
451
HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
452
HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
453
HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
454
HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
455
HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
456
HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
457
/* 0x13 reservd */
458
HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
459
HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
460
HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
461
HTT_TLV_T2H_MSG_TYPE_TEST,
462
/* keep this last */
463
HTT_TLV_T2H_NUM_MSGS
464
};
465
466
enum htt_10_4_t2h_msg_type {
467
HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0,
468
HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1,
469
HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2,
470
HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3,
471
HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
472
HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5,
473
HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6,
474
HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
475
HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8,
476
HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9,
477
HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
478
HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb,
479
HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
480
HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
481
HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
482
HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
483
HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10,
484
HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11,
485
HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
486
HTT_10_4_T2H_MSG_TYPE_TEST = 0x13,
487
HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
488
HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
489
HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
490
HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17,
491
HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
492
/* 0x19 to 0x2f are reserved */
493
HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30,
494
HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31,
495
/* keep this last */
496
HTT_10_4_T2H_NUM_MSGS
497
};
498
499
enum htt_t2h_msg_type {
500
HTT_T2H_MSG_TYPE_VERSION_CONF,
501
HTT_T2H_MSG_TYPE_RX_IND,
502
HTT_T2H_MSG_TYPE_RX_FLUSH,
503
HTT_T2H_MSG_TYPE_PEER_MAP,
504
HTT_T2H_MSG_TYPE_PEER_UNMAP,
505
HTT_T2H_MSG_TYPE_RX_ADDBA,
506
HTT_T2H_MSG_TYPE_RX_DELBA,
507
HTT_T2H_MSG_TYPE_TX_COMPL_IND,
508
HTT_T2H_MSG_TYPE_PKTLOG,
509
HTT_T2H_MSG_TYPE_STATS_CONF,
510
HTT_T2H_MSG_TYPE_RX_FRAG_IND,
511
HTT_T2H_MSG_TYPE_SEC_IND,
512
HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
513
HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
514
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
515
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
516
HTT_T2H_MSG_TYPE_RX_PN_IND,
517
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
518
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
519
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
520
HTT_T2H_MSG_TYPE_CHAN_CHANGE,
521
HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
522
HTT_T2H_MSG_TYPE_AGGR_CONF,
523
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
524
HTT_T2H_MSG_TYPE_TEST,
525
HTT_T2H_MSG_TYPE_EN_STATS,
526
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
527
HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
528
HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
529
HTT_T2H_MSG_TYPE_PEER_STATS,
530
/* keep this last */
531
HTT_T2H_NUM_MSGS
532
};
533
534
/*
535
* htt_resp_hdr - header for target-to-host messages
536
*
537
* msg_type: see htt_t2h_msg_type
538
*/
539
struct htt_resp_hdr {
540
u8 msg_type;
541
} __packed;
542
543
#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
544
#define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
545
#define HTT_RESP_HDR_MSG_TYPE_LSB 0
546
547
/* htt_ver_resp - response sent for htt_ver_req */
548
struct htt_ver_resp {
549
u8 minor;
550
u8 major;
551
u8 rsvd0;
552
} __packed;
553
554
#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
555
556
#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0)
557
558
struct htt_mgmt_tx_completion {
559
u8 rsvd0;
560
u8 rsvd1;
561
u8 flags;
562
__le32 desc_id;
563
__le32 status;
564
__le32 ppdu_id;
565
__le32 info;
566
} __packed;
567
568
#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
569
#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
570
#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
571
#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
572
#define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7)
573
574
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
575
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
576
#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
577
#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
578
#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
579
#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
580
#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
581
#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
582
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
583
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
584
585
#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
586
#define HTT_TX_CMPL_FLAG_PPID_PRESENT BIT(1)
587
#define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2)
588
#define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3)
589
590
#define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3)
591
#define HTT_TX_DATA_APPEND_RETRIES BIT(0)
592
#define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1)
593
594
struct htt_rx_indication_hdr {
595
u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
596
__le16 peer_id;
597
__le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
598
} __packed;
599
600
#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
601
#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
602
#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
603
#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
604
#define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
605
#define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
606
607
#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
608
#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
609
#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
610
#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
611
612
#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
613
#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
614
#define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
615
#define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
616
617
enum htt_rx_legacy_rate {
618
HTT_RX_OFDM_48 = 0,
619
HTT_RX_OFDM_24 = 1,
620
HTT_RX_OFDM_12,
621
HTT_RX_OFDM_6,
622
HTT_RX_OFDM_54,
623
HTT_RX_OFDM_36,
624
HTT_RX_OFDM_18,
625
HTT_RX_OFDM_9,
626
627
/* long preamble */
628
HTT_RX_CCK_11_LP = 0,
629
HTT_RX_CCK_5_5_LP = 1,
630
HTT_RX_CCK_2_LP,
631
HTT_RX_CCK_1_LP,
632
/* short preamble */
633
HTT_RX_CCK_11_SP,
634
HTT_RX_CCK_5_5_SP,
635
HTT_RX_CCK_2_SP
636
};
637
638
enum htt_rx_legacy_rate_type {
639
HTT_RX_LEGACY_RATE_OFDM = 0,
640
HTT_RX_LEGACY_RATE_CCK
641
};
642
643
enum htt_rx_preamble_type {
644
HTT_RX_LEGACY = 0x4,
645
HTT_RX_HT = 0x8,
646
HTT_RX_HT_WITH_TXBF = 0x9,
647
HTT_RX_VHT = 0xC,
648
HTT_RX_VHT_WITH_TXBF = 0xD,
649
};
650
651
/*
652
* Fields: phy_err_valid, phy_err_code, tsf,
653
* usec_timestamp, sub_usec_timestamp
654
* ..are valid only if end_valid == 1.
655
*
656
* Fields: rssi_chains, legacy_rate_type,
657
* legacy_rate_cck, preamble_type, service,
658
* vht_sig_*
659
* ..are valid only if start_valid == 1;
660
*/
661
struct htt_rx_indication_ppdu {
662
u8 combined_rssi;
663
u8 sub_usec_timestamp;
664
u8 phy_err_code;
665
u8 info0; /* HTT_RX_INDICATION_INFO0_ */
666
struct {
667
u8 pri20_db;
668
u8 ext20_db;
669
u8 ext40_db;
670
u8 ext80_db;
671
} __packed rssi_chains[4];
672
__le32 tsf;
673
__le32 usec_timestamp;
674
__le32 info1; /* HTT_RX_INDICATION_INFO1_ */
675
__le32 info2; /* HTT_RX_INDICATION_INFO2_ */
676
} __packed;
677
678
enum htt_rx_mpdu_status {
679
HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
680
HTT_RX_IND_MPDU_STATUS_OK,
681
HTT_RX_IND_MPDU_STATUS_ERR_FCS,
682
HTT_RX_IND_MPDU_STATUS_ERR_DUP,
683
HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
684
HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
685
/* only accept EAPOL frames */
686
HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
687
HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
688
/* Non-data in promiscuous mode */
689
HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
690
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
691
HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
692
HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
693
HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
694
HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
695
696
/*
697
* MISC: discard for unspecified reasons.
698
* Leave this enum value last.
699
*/
700
HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
701
};
702
703
struct htt_rx_indication_mpdu_range {
704
u8 mpdu_count;
705
u8 mpdu_range_status; /* %htt_rx_mpdu_status */
706
u8 pad0;
707
u8 pad1;
708
} __packed;
709
710
struct htt_rx_indication_prefix {
711
__le16 fw_rx_desc_bytes;
712
u8 pad0;
713
u8 pad1;
714
} __packed;
715
716
struct htt_rx_indication {
717
struct htt_rx_indication_hdr hdr;
718
struct htt_rx_indication_ppdu ppdu;
719
struct htt_rx_indication_prefix prefix;
720
721
/*
722
* the following fields are both dynamically sized, so
723
* take care addressing them
724
*/
725
726
/* the size of this is %fw_rx_desc_bytes */
727
struct fw_rx_desc_base fw_desc;
728
729
/*
730
* %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
731
* and has %num_mpdu_ranges elements.
732
*/
733
struct htt_rx_indication_mpdu_range mpdu_ranges[];
734
} __packed;
735
736
/* High latency version of the RX indication */
737
struct htt_rx_indication_hl {
738
struct htt_rx_indication_hdr hdr;
739
struct htt_rx_indication_ppdu ppdu;
740
struct htt_rx_indication_prefix prefix;
741
struct fw_rx_desc_hl fw_desc;
742
struct htt_rx_indication_mpdu_range mpdu_ranges[];
743
} __packed;
744
745
struct htt_hl_rx_desc {
746
__le32 info;
747
__le32 pn_31_0;
748
union {
749
struct {
750
__le16 pn_47_32;
751
__le16 pn_63_48;
752
} pn16;
753
__le32 pn_63_32;
754
} u0;
755
__le32 pn_95_64;
756
__le32 pn_127_96;
757
} __packed;
758
759
static inline struct htt_rx_indication_mpdu_range *
760
htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
761
{
762
#if defined(__linux__)
763
void *ptr = rx_ind;
764
#elif defined(__FreeBSD__)
765
u8 *ptr = (void *)rx_ind;
766
#endif
767
768
ptr += sizeof(rx_ind->hdr)
769
+ sizeof(rx_ind->ppdu)
770
+ sizeof(rx_ind->prefix)
771
+ roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
772
#if defined(__linux__)
773
return ptr;
774
#elif defined(__FreeBSD__)
775
return ((void *)ptr);
776
#endif
777
}
778
779
static inline struct htt_rx_indication_mpdu_range *
780
htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
781
{
782
#if defined(__linux__)
783
void *ptr = rx_ind;
784
#elif defined(__FreeBSD__)
785
u8 *ptr = (void *)rx_ind;
786
#endif
787
788
ptr += sizeof(rx_ind->hdr)
789
+ sizeof(rx_ind->ppdu)
790
+ sizeof(rx_ind->prefix)
791
+ sizeof(rx_ind->fw_desc);
792
#if defined(__linux__)
793
return ptr;
794
#elif defined(__FreeBSD__)
795
return ((void *)ptr);
796
#endif
797
}
798
799
enum htt_rx_flush_mpdu_status {
800
HTT_RX_FLUSH_MPDU_DISCARD = 0,
801
HTT_RX_FLUSH_MPDU_REORDER = 1,
802
};
803
804
/*
805
* htt_rx_flush - discard or reorder given range of mpdus
806
*
807
* Note: host must check if all sequence numbers between
808
* [seq_num_start, seq_num_end-1] are valid.
809
*/
810
struct htt_rx_flush {
811
__le16 peer_id;
812
u8 tid;
813
u8 rsvd0;
814
u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
815
u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
816
u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
817
};
818
819
struct htt_rx_peer_map {
820
u8 vdev_id;
821
__le16 peer_id;
822
u8 addr[6];
823
u8 rsvd0;
824
u8 rsvd1;
825
} __packed;
826
827
struct htt_rx_peer_unmap {
828
u8 rsvd0;
829
__le16 peer_id;
830
} __packed;
831
832
enum htt_txrx_sec_cast_type {
833
HTT_TXRX_SEC_MCAST = 0,
834
HTT_TXRX_SEC_UCAST
835
};
836
837
enum htt_rx_pn_check_type {
838
HTT_RX_NON_PN_CHECK = 0,
839
HTT_RX_PN_CHECK
840
};
841
842
enum htt_rx_tkip_demic_type {
843
HTT_RX_NON_TKIP_MIC = 0,
844
HTT_RX_TKIP_MIC
845
};
846
847
enum htt_security_types {
848
HTT_SECURITY_NONE,
849
HTT_SECURITY_WEP128,
850
HTT_SECURITY_WEP104,
851
HTT_SECURITY_WEP40,
852
HTT_SECURITY_TKIP,
853
HTT_SECURITY_TKIP_NOMIC,
854
HTT_SECURITY_AES_CCMP,
855
HTT_SECURITY_WAPI,
856
857
HTT_NUM_SECURITY_TYPES /* keep this last! */
858
};
859
860
#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
861
#define ATH10K_TXRX_NUM_EXT_TIDS 19
862
#define ATH10K_TXRX_NON_QOS_TID 16
863
864
enum htt_security_flags {
865
#define HTT_SECURITY_TYPE_MASK 0x7F
866
#define HTT_SECURITY_TYPE_LSB 0
867
HTT_SECURITY_IS_UNICAST = 1 << 7
868
};
869
870
struct htt_security_indication {
871
union {
872
/* dont use bitfields; undefined behaviour */
873
u8 flags; /* %htt_security_flags */
874
struct {
875
u8 security_type:7, /* %htt_security_types */
876
is_unicast:1;
877
} __packed;
878
} __packed;
879
__le16 peer_id;
880
u8 michael_key[8];
881
u8 wapi_rsc[16];
882
} __packed;
883
884
#define HTT_RX_BA_INFO0_TID_MASK 0x000F
885
#define HTT_RX_BA_INFO0_TID_LSB 0
886
#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
887
#define HTT_RX_BA_INFO0_PEER_ID_LSB 4
888
889
struct htt_rx_addba {
890
u8 window_size;
891
__le16 info0; /* %HTT_RX_BA_INFO0_ */
892
} __packed;
893
894
struct htt_rx_delba {
895
u8 rsvd0;
896
__le16 info0; /* %HTT_RX_BA_INFO0_ */
897
} __packed;
898
899
enum htt_data_tx_status {
900
HTT_DATA_TX_STATUS_OK = 0,
901
HTT_DATA_TX_STATUS_DISCARD = 1,
902
HTT_DATA_TX_STATUS_NO_ACK = 2,
903
HTT_DATA_TX_STATUS_POSTPONE = 3 /* HL only */
904
};
905
906
enum htt_data_tx_flags {
907
#define HTT_DATA_TX_STATUS_MASK 0x07
908
#define HTT_DATA_TX_STATUS_LSB 0
909
#define HTT_DATA_TX_TID_MASK 0x78
910
#define HTT_DATA_TX_TID_LSB 3
911
HTT_DATA_TX_TID_INVALID = 1 << 7
912
};
913
914
#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
915
916
struct htt_append_retries {
917
__le16 msdu_id;
918
u8 tx_retries;
919
u8 flag;
920
} __packed;
921
922
struct htt_data_tx_completion_ext {
923
struct htt_append_retries a_retries;
924
__le32 t_stamp;
925
__le16 msdus_rssi[];
926
} __packed;
927
928
/*
929
* @brief target -> host TX completion indication message definition
930
*
931
* @details
932
* The following diagram shows the format of the TX completion indication sent
933
* from the target to the host
934
*
935
* |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0|
936
* |-------------------------------------------------------------|
937
* header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type |
938
* |-------------------------------------------------------------|
939
* payload: | MSDU1 ID | MSDU0 ID |
940
* |-------------------------------------------------------------|
941
* : MSDU3 ID : MSDU2 ID :
942
* |-------------------------------------------------------------|
943
* | struct htt_tx_compl_ind_append_retries |
944
* |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
945
* | struct htt_tx_compl_ind_append_tx_tstamp |
946
* |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
947
* | MSDU1 ACK RSSI | MSDU0 ACK RSSI |
948
* |-------------------------------------------------------------|
949
* : MSDU3 ACK RSSI : MSDU2 ACK RSSI :
950
* |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
951
* -msg_type
952
* Bits 7:0
953
* Purpose: identifies this as HTT TX completion indication
954
* -status
955
* Bits 10:8
956
* Purpose: the TX completion status of payload fragmentations descriptors
957
* Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD
958
* -tid
959
* Bits 14:11
960
* Purpose: the tid associated with those fragmentation descriptors. It is
961
* valid or not, depending on the tid_invalid bit.
962
* Value: 0 to 15
963
* -tid_invalid
964
* Bits 15:15
965
* Purpose: this bit indicates whether the tid field is valid or not
966
* Value: 0 indicates valid, 1 indicates invalid
967
* -num
968
* Bits 23:16
969
* Purpose: the number of payload in this indication
970
* Value: 1 to 255
971
* -A0 = append
972
* Bits 24:24
973
* Purpose: append the struct htt_tx_compl_ind_append_retries which contains
974
* the number of tx retries for one MSDU at the end of this message
975
* Value: 0 indicates no appending, 1 indicates appending
976
* -A1 = append1
977
* Bits 25:25
978
* Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which
979
* contains the timestamp info for each TX msdu id in payload.
980
* Value: 0 indicates no appending, 1 indicates appending
981
* -TP = MSDU tx power presence
982
* Bits 26:26
983
* Purpose: Indicate whether the TX_COMPL_IND includes a tx power report
984
* for each MSDU referenced by the TX_COMPL_IND message.
985
* The order of the per-MSDU tx power reports matches the order
986
* of the MSDU IDs.
987
* Value: 0 indicates not appending, 1 indicates appending
988
* -A2 = append2
989
* Bits 27:27
990
* Purpose: Indicate whether data ACK RSSI is appended for each MSDU in
991
* TX_COMP_IND message. The order of the per-MSDU ACK RSSI report
992
* matches the order of the MSDU IDs.
993
* The ACK RSSI values are valid when status is COMPLETE_OK (and
994
* this append2 bit is set).
995
* Value: 0 indicates not appending, 1 indicates appending
996
*/
997
998
struct htt_data_tx_completion {
999
union {
1000
u8 flags;
1001
struct {
1002
u8 status:3,
1003
tid:4,
1004
tid_invalid:1;
1005
} __packed;
1006
} __packed;
1007
u8 num_msdus;
1008
u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
1009
__le16 msdus[]; /* variable length based on %num_msdus */
1010
} __packed;
1011
1012
#define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0)
1013
#define HTT_TX_PPDU_DUR_INFO0_TID_MASK GENMASK(20, 16)
1014
1015
struct htt_data_tx_ppdu_dur {
1016
__le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */
1017
__le32 tx_duration; /* in usecs */
1018
} __packed;
1019
1020
#define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK GENMASK(7, 0)
1021
1022
struct htt_data_tx_compl_ppdu_dur {
1023
__le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */
1024
struct htt_data_tx_ppdu_dur ppdu_dur[];
1025
} __packed;
1026
1027
struct htt_tx_compl_ind_base {
1028
u32 hdr;
1029
u16 payload[1/*or more*/];
1030
} __packed;
1031
1032
struct htt_rc_tx_done_params {
1033
u32 rate_code;
1034
u32 rate_code_flags;
1035
u32 flags;
1036
u32 num_enqued; /* 1 for non-AMPDU */
1037
u32 num_retries;
1038
u32 num_failed; /* for AMPDU */
1039
u32 ack_rssi;
1040
u32 time_stamp;
1041
u32 is_probe;
1042
};
1043
1044
struct htt_rc_update {
1045
u8 vdev_id;
1046
__le16 peer_id;
1047
u8 addr[6];
1048
u8 num_elems;
1049
u8 rsvd0;
1050
struct htt_rc_tx_done_params params[]; /* variable length %num_elems */
1051
} __packed;
1052
1053
/* see htt_rx_indication for similar fields and descriptions */
1054
struct htt_rx_fragment_indication {
1055
union {
1056
u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
1057
struct {
1058
u8 ext_tid:5,
1059
flush_valid:1;
1060
} __packed;
1061
} __packed;
1062
__le16 peer_id;
1063
__le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
1064
__le16 fw_rx_desc_bytes;
1065
__le16 rsvd0;
1066
1067
u8 fw_msdu_rx_desc[];
1068
} __packed;
1069
1070
#define ATH10K_IEEE80211_EXTIV BIT(5)
1071
#define ATH10K_IEEE80211_TKIP_MICLEN 8 /* trailing MIC */
1072
1073
#define HTT_RX_FRAG_IND_INFO0_HEADER_LEN 16
1074
1075
#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
1076
#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
1077
#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
1078
#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
1079
1080
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
1081
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
1082
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
1083
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
1084
1085
struct htt_rx_pn_ind {
1086
__le16 peer_id;
1087
u8 tid;
1088
u8 seqno_start;
1089
u8 seqno_end;
1090
u8 pn_ie_count;
1091
u8 reserved;
1092
u8 pn_ies[];
1093
} __packed;
1094
1095
struct htt_rx_offload_msdu {
1096
__le16 msdu_len;
1097
__le16 peer_id;
1098
u8 vdev_id;
1099
u8 tid;
1100
u8 fw_desc;
1101
u8 payload[];
1102
} __packed;
1103
1104
struct htt_rx_offload_ind {
1105
u8 reserved;
1106
__le16 msdu_count;
1107
} __packed;
1108
1109
struct htt_rx_in_ord_msdu_desc {
1110
__le32 msdu_paddr;
1111
__le16 msdu_len;
1112
u8 fw_desc;
1113
u8 reserved;
1114
} __packed;
1115
1116
struct htt_rx_in_ord_msdu_desc_ext {
1117
__le64 msdu_paddr;
1118
__le16 msdu_len;
1119
u8 fw_desc;
1120
u8 reserved;
1121
} __packed;
1122
1123
struct htt_rx_in_ord_ind {
1124
u8 info;
1125
__le16 peer_id;
1126
u8 vdev_id;
1127
u8 reserved;
1128
__le16 msdu_count;
1129
union {
1130
DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc,
1131
msdu_descs32);
1132
DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc_ext,
1133
msdu_descs64);
1134
} __packed;
1135
} __packed;
1136
1137
#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
1138
#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
1139
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
1140
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
1141
#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
1142
#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
1143
1144
/*
1145
* target -> host test message definition
1146
*
1147
* The following field definitions describe the format of the test
1148
* message sent from the target to the host.
1149
* The message consists of a 4-octet header, followed by a variable
1150
* number of 32-bit integer values, followed by a variable number
1151
* of 8-bit character values.
1152
*
1153
* |31 16|15 8|7 0|
1154
* |-----------------------------------------------------------|
1155
* | num chars | num ints | msg type |
1156
* |-----------------------------------------------------------|
1157
* | int 0 |
1158
* |-----------------------------------------------------------|
1159
* | int 1 |
1160
* |-----------------------------------------------------------|
1161
* | ... |
1162
* |-----------------------------------------------------------|
1163
* | char 3 | char 2 | char 1 | char 0 |
1164
* |-----------------------------------------------------------|
1165
* | | | ... | char 4 |
1166
* |-----------------------------------------------------------|
1167
* - MSG_TYPE
1168
* Bits 7:0
1169
* Purpose: identifies this as a test message
1170
* Value: HTT_MSG_TYPE_TEST
1171
* - NUM_INTS
1172
* Bits 15:8
1173
* Purpose: indicate how many 32-bit integers follow the message header
1174
* - NUM_CHARS
1175
* Bits 31:16
1176
* Purpose: indicate how many 8-bit characters follow the series of integers
1177
*/
1178
struct htt_rx_test {
1179
u8 num_ints;
1180
__le16 num_chars;
1181
1182
/* payload consists of 2 lists:
1183
* a) num_ints * sizeof(__le32)
1184
* b) num_chars * sizeof(u8) aligned to 4bytes
1185
*/
1186
u8 payload[];
1187
} __packed;
1188
1189
static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
1190
{
1191
return (__le32 *)rx_test->payload;
1192
}
1193
1194
static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
1195
{
1196
return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
1197
}
1198
1199
/*
1200
* target -> host packet log message
1201
*
1202
* The following field definitions describe the format of the packet log
1203
* message sent from the target to the host.
1204
* The message consists of a 4-octet header,followed by a variable number
1205
* of 32-bit character values.
1206
*
1207
* |31 24|23 16|15 8|7 0|
1208
* |-----------------------------------------------------------|
1209
* | | | | msg type |
1210
* |-----------------------------------------------------------|
1211
* | payload |
1212
* |-----------------------------------------------------------|
1213
* - MSG_TYPE
1214
* Bits 7:0
1215
* Purpose: identifies this as a test message
1216
* Value: HTT_MSG_TYPE_PACKETLOG
1217
*/
1218
struct htt_pktlog_msg {
1219
u8 pad[3];
1220
u8 payload[];
1221
} __packed;
1222
1223
struct htt_dbg_stats_rx_reorder_stats {
1224
/* Non QoS MPDUs received */
1225
__le32 deliver_non_qos;
1226
1227
/* MPDUs received in-order */
1228
__le32 deliver_in_order;
1229
1230
/* Flush due to reorder timer expired */
1231
__le32 deliver_flush_timeout;
1232
1233
/* Flush due to move out of window */
1234
__le32 deliver_flush_oow;
1235
1236
/* Flush due to DELBA */
1237
__le32 deliver_flush_delba;
1238
1239
/* MPDUs dropped due to FCS error */
1240
__le32 fcs_error;
1241
1242
/* MPDUs dropped due to monitor mode non-data packet */
1243
__le32 mgmt_ctrl;
1244
1245
/* MPDUs dropped due to invalid peer */
1246
__le32 invalid_peer;
1247
1248
/* MPDUs dropped due to duplication (non aggregation) */
1249
__le32 dup_non_aggr;
1250
1251
/* MPDUs dropped due to processed before */
1252
__le32 dup_past;
1253
1254
/* MPDUs dropped due to duplicate in reorder queue */
1255
__le32 dup_in_reorder;
1256
1257
/* Reorder timeout happened */
1258
__le32 reorder_timeout;
1259
1260
/* invalid bar ssn */
1261
__le32 invalid_bar_ssn;
1262
1263
/* reorder reset due to bar ssn */
1264
__le32 ssn_reset;
1265
};
1266
1267
struct htt_dbg_stats_wal_tx_stats {
1268
/* Num HTT cookies queued to dispatch list */
1269
__le32 comp_queued;
1270
1271
/* Num HTT cookies dispatched */
1272
__le32 comp_delivered;
1273
1274
/* Num MSDU queued to WAL */
1275
__le32 msdu_enqued;
1276
1277
/* Num MPDU queue to WAL */
1278
__le32 mpdu_enqued;
1279
1280
/* Num MSDUs dropped by WMM limit */
1281
__le32 wmm_drop;
1282
1283
/* Num Local frames queued */
1284
__le32 local_enqued;
1285
1286
/* Num Local frames done */
1287
__le32 local_freed;
1288
1289
/* Num queued to HW */
1290
__le32 hw_queued;
1291
1292
/* Num PPDU reaped from HW */
1293
__le32 hw_reaped;
1294
1295
/* Num underruns */
1296
__le32 underrun;
1297
1298
/* Num PPDUs cleaned up in TX abort */
1299
__le32 tx_abort;
1300
1301
/* Num MPDUs requeued by SW */
1302
__le32 mpdus_requeued;
1303
1304
/* excessive retries */
1305
__le32 tx_ko;
1306
1307
/* data hw rate code */
1308
__le32 data_rc;
1309
1310
/* Scheduler self triggers */
1311
__le32 self_triggers;
1312
1313
/* frames dropped due to excessive sw retries */
1314
__le32 sw_retry_failure;
1315
1316
/* illegal rate phy errors */
1317
__le32 illgl_rate_phy_err;
1318
1319
/* wal pdev continuous xretry */
1320
__le32 pdev_cont_xretry;
1321
1322
/* wal pdev continuous xretry */
1323
__le32 pdev_tx_timeout;
1324
1325
/* wal pdev resets */
1326
__le32 pdev_resets;
1327
1328
__le32 phy_underrun;
1329
1330
/* MPDU is more than txop limit */
1331
__le32 txop_ovf;
1332
} __packed;
1333
1334
struct htt_dbg_stats_wal_rx_stats {
1335
/* Cnts any change in ring routing mid-ppdu */
1336
__le32 mid_ppdu_route_change;
1337
1338
/* Total number of statuses processed */
1339
__le32 status_rcvd;
1340
1341
/* Extra frags on rings 0-3 */
1342
__le32 r0_frags;
1343
__le32 r1_frags;
1344
__le32 r2_frags;
1345
__le32 r3_frags;
1346
1347
/* MSDUs / MPDUs delivered to HTT */
1348
__le32 htt_msdus;
1349
__le32 htt_mpdus;
1350
1351
/* MSDUs / MPDUs delivered to local stack */
1352
__le32 loc_msdus;
1353
__le32 loc_mpdus;
1354
1355
/* AMSDUs that have more MSDUs than the status ring size */
1356
__le32 oversize_amsdu;
1357
1358
/* Number of PHY errors */
1359
__le32 phy_errs;
1360
1361
/* Number of PHY errors drops */
1362
__le32 phy_err_drop;
1363
1364
/* Number of mpdu errors - FCS, MIC, ENC etc. */
1365
__le32 mpdu_errs;
1366
} __packed;
1367
1368
struct htt_dbg_stats_wal_peer_stats {
1369
__le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
1370
} __packed;
1371
1372
struct htt_dbg_stats_wal_pdev_txrx {
1373
struct htt_dbg_stats_wal_tx_stats tx_stats;
1374
struct htt_dbg_stats_wal_rx_stats rx_stats;
1375
struct htt_dbg_stats_wal_peer_stats peer_stats;
1376
} __packed;
1377
1378
struct htt_dbg_stats_rx_rate_info {
1379
__le32 mcs[10];
1380
__le32 sgi[10];
1381
__le32 nss[4];
1382
__le32 stbc[10];
1383
__le32 bw[3];
1384
__le32 pream[6];
1385
__le32 ldpc;
1386
__le32 txbf;
1387
};
1388
1389
/*
1390
* htt_dbg_stats_status -
1391
* present - The requested stats have been delivered in full.
1392
* This indicates that either the stats information was contained
1393
* in its entirety within this message, or else this message
1394
* completes the delivery of the requested stats info that was
1395
* partially delivered through earlier STATS_CONF messages.
1396
* partial - The requested stats have been delivered in part.
1397
* One or more subsequent STATS_CONF messages with the same
1398
* cookie value will be sent to deliver the remainder of the
1399
* information.
1400
* error - The requested stats could not be delivered, for example due
1401
* to a shortage of memory to construct a message holding the
1402
* requested stats.
1403
* invalid - The requested stat type is either not recognized, or the
1404
* target is configured to not gather the stats type in question.
1405
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1406
* series_done - This special value indicates that no further stats info
1407
* elements are present within a series of stats info elems
1408
* (within a stats upload confirmation message).
1409
*/
1410
enum htt_dbg_stats_status {
1411
HTT_DBG_STATS_STATUS_PRESENT = 0,
1412
HTT_DBG_STATS_STATUS_PARTIAL = 1,
1413
HTT_DBG_STATS_STATUS_ERROR = 2,
1414
HTT_DBG_STATS_STATUS_INVALID = 3,
1415
HTT_DBG_STATS_STATUS_SERIES_DONE = 7
1416
};
1417
1418
/*
1419
* host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
1420
*
1421
* The following field definitions describe the format of the HTT host
1422
* to target frag_desc/msdu_ext bank configuration message.
1423
* The message contains the based address and the min and max id of the
1424
* MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
1425
* MSDU_EXT/FRAG_DESC.
1426
* HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
1427
* For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
1428
* the hardware does the mapping/translation.
1429
*
1430
* Total banks that can be configured is configured to 16.
1431
*
1432
* This should be called before any TX has be initiated by the HTT
1433
*
1434
* |31 16|15 8|7 5|4 0|
1435
* |------------------------------------------------------------|
1436
* | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
1437
* |------------------------------------------------------------|
1438
* | BANK0_BASE_ADDRESS |
1439
* |------------------------------------------------------------|
1440
* | ... |
1441
* |------------------------------------------------------------|
1442
* | BANK15_BASE_ADDRESS |
1443
* |------------------------------------------------------------|
1444
* | BANK0_MAX_ID | BANK0_MIN_ID |
1445
* |------------------------------------------------------------|
1446
* | ... |
1447
* |------------------------------------------------------------|
1448
* | BANK15_MAX_ID | BANK15_MIN_ID |
1449
* |------------------------------------------------------------|
1450
* Header fields:
1451
* - MSG_TYPE
1452
* Bits 7:0
1453
* Value: 0x6
1454
* - BANKx_BASE_ADDRESS
1455
* Bits 31:0
1456
* Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
1457
* bank physical/bus address.
1458
* - BANKx_MIN_ID
1459
* Bits 15:0
1460
* Purpose: Provide a mechanism to specify the min index that needs to
1461
* mapped.
1462
* - BANKx_MAX_ID
1463
* Bits 31:16
1464
* Purpose: Provide a mechanism to specify the max index that needs to
1465
*
1466
*/
1467
struct htt_frag_desc_bank_id {
1468
__le16 bank_min_id;
1469
__le16 bank_max_id;
1470
} __packed;
1471
1472
/* real is 16 but it wouldn't fit in the max htt message size
1473
* so we use a conservatively safe value for now
1474
*/
1475
#define HTT_FRAG_DESC_BANK_MAX 4
1476
1477
#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
1478
#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
1479
#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2)
1480
#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3)
1481
#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4)
1482
#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4
1483
1484
enum htt_q_depth_type {
1485
HTT_Q_DEPTH_TYPE_BYTES = 0,
1486
HTT_Q_DEPTH_TYPE_MSDUS = 1,
1487
};
1488
1489
#define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
1490
TARGET_10_4_NUM_VDEVS)
1491
#define HTT_TX_Q_STATE_NUM_TIDS 8
1492
#define HTT_TX_Q_STATE_ENTRY_SIZE 1
1493
#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
1494
1495
/**
1496
* struct htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
1497
*
1498
* Defines host q state format and behavior. See htt_q_state.
1499
*
1500
* @paddr: Queue physical address
1501
* @num_peers: Number of supported peers
1502
* @num_tids: Number of supported TIDs
1503
* @record_size: Defines the size of each host q entry in bytes. In practice
1504
* however firmware (at least 10.4.3-00191) ignores this host
1505
* configuration value and uses hardcoded value of 1.
1506
* @record_multiplier: This is valid only when q depth type is MSDUs. It
1507
* defines the exponent for the power of 2 multiplication.
1508
* @pad: struct padding for 32-bit alignment
1509
*/
1510
struct htt_q_state_conf {
1511
__le32 paddr;
1512
__le16 num_peers;
1513
__le16 num_tids;
1514
u8 record_size;
1515
u8 record_multiplier;
1516
u8 pad[2];
1517
} __packed;
1518
1519
struct htt_frag_desc_bank_cfg32 {
1520
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1521
u8 num_banks;
1522
u8 desc_size;
1523
__le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
1524
struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
1525
struct htt_q_state_conf q_state;
1526
} __packed;
1527
1528
struct htt_frag_desc_bank_cfg64 {
1529
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1530
u8 num_banks;
1531
u8 desc_size;
1532
__le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
1533
struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
1534
struct htt_q_state_conf q_state;
1535
} __packed;
1536
1537
#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
1538
#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
1539
#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
1540
#define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0
1541
#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
1542
1543
/**
1544
* struct htt_q_state - shared between host and firmware via DMA
1545
*
1546
* This structure is used for the host to expose it's software queue state to
1547
* firmware so that its rate control can schedule fetch requests for optimized
1548
* performance. This is most notably used for MU-MIMO aggregation when multiple
1549
* MU clients are connected.
1550
*
1551
* @count: Each element defines the host queue depth. When q depth type was
1552
* configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
1553
* FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
1554
* HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
1555
* HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
1556
* record_multiplier (see htt_q_state_conf).
1557
* @map: Used by firmware to quickly check which host queues are not empty. It
1558
* is a bitmap simply saying.
1559
* @seq: Used by firmware to quickly check if the host queues were updated
1560
* since it last checked.
1561
*
1562
* FIXME: Is the q_state map[] size calculation really correct?
1563
*/
1564
struct htt_q_state {
1565
u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
1566
u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
1567
__le32 seq;
1568
} __packed;
1569
1570
#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff
1571
#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0
1572
#define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000
1573
#define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12
1574
1575
struct htt_tx_fetch_record {
1576
__le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
1577
__le16 num_msdus;
1578
__le32 num_bytes;
1579
} __packed;
1580
1581
struct htt_tx_fetch_ind {
1582
u8 pad0;
1583
__le16 fetch_seq_num;
1584
__le32 token;
1585
__le16 num_resp_ids;
1586
__le16 num_records;
1587
union {
1588
/* ath10k_htt_get_tx_fetch_ind_resp_ids() */
1589
DECLARE_FLEX_ARRAY(__le32, resp_ids);
1590
DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records);
1591
} __packed;
1592
} __packed;
1593
1594
static inline void *
1595
ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
1596
{
1597
return (void *)&ind->records[le16_to_cpu(ind->num_records)];
1598
}
1599
1600
struct htt_tx_fetch_resp {
1601
u8 pad0;
1602
__le16 resp_id;
1603
__le16 fetch_seq_num;
1604
__le16 num_records;
1605
__le32 token;
1606
struct htt_tx_fetch_record records[];
1607
} __packed;
1608
1609
struct htt_tx_fetch_confirm {
1610
u8 pad0;
1611
__le16 num_resp_ids;
1612
__le32 resp_ids[];
1613
} __packed;
1614
1615
enum htt_tx_mode_switch_mode {
1616
HTT_TX_MODE_SWITCH_PUSH = 0,
1617
HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
1618
};
1619
1620
#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0)
1621
#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe
1622
#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1
1623
1624
#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003
1625
#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0
1626
#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc
1627
#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2
1628
1629
#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff
1630
#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0
1631
#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000
1632
#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12
1633
1634
struct htt_tx_mode_switch_record {
1635
__le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
1636
__le16 num_max_msdus;
1637
} __packed;
1638
1639
struct htt_tx_mode_switch_ind {
1640
u8 pad0;
1641
__le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
1642
__le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
1643
u8 pad1[2];
1644
struct htt_tx_mode_switch_record records[];
1645
} __packed;
1646
1647
struct htt_channel_change {
1648
u8 pad[3];
1649
__le32 freq;
1650
__le32 center_freq1;
1651
__le32 center_freq2;
1652
__le32 phymode;
1653
} __packed;
1654
1655
struct htt_per_peer_tx_stats_ind {
1656
__le32 succ_bytes;
1657
__le32 retry_bytes;
1658
__le32 failed_bytes;
1659
u8 ratecode;
1660
u8 flags;
1661
__le16 peer_id;
1662
__le16 succ_pkts;
1663
__le16 retry_pkts;
1664
__le16 failed_pkts;
1665
__le16 tx_duration;
1666
__le32 reserved1;
1667
__le32 reserved2;
1668
} __packed;
1669
1670
struct htt_peer_tx_stats {
1671
u8 num_ppdu;
1672
u8 ppdu_len;
1673
u8 version;
1674
u8 payload[];
1675
} __packed;
1676
1677
#define ATH10K_10_2_TX_STATS_OFFSET 136
1678
#define PEER_STATS_FOR_NO_OF_PPDUS 4
1679
1680
struct ath10k_10_2_peer_tx_stats {
1681
u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
1682
u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
1683
__le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
1684
u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
1685
__le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
1686
u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
1687
__le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
1688
u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
1689
__le32 tx_duration;
1690
u8 tx_ppdu_cnt;
1691
u8 peer_id;
1692
} __packed;
1693
1694
union htt_rx_pn_t {
1695
/* WEP: 24-bit PN */
1696
u32 pn24;
1697
1698
/* TKIP or CCMP: 48-bit PN */
1699
u64 pn48;
1700
1701
/* WAPI: 128-bit PN */
1702
u64 pn128[2];
1703
};
1704
1705
struct htt_cmd {
1706
struct htt_cmd_hdr hdr;
1707
union {
1708
struct htt_ver_req ver_req;
1709
struct htt_mgmt_tx_desc mgmt_tx;
1710
struct htt_data_tx_desc data_tx;
1711
struct htt_rx_ring_setup_32 rx_setup_32;
1712
struct htt_rx_ring_setup_64 rx_setup_64;
1713
struct htt_stats_req stats_req;
1714
struct htt_oob_sync_req oob_sync_req;
1715
struct htt_aggr_conf aggr_conf;
1716
struct htt_aggr_conf_v2 aggr_conf_v2;
1717
struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
1718
struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
1719
struct htt_tx_fetch_resp tx_fetch_resp;
1720
};
1721
} __packed;
1722
1723
struct htt_resp {
1724
struct htt_resp_hdr hdr;
1725
union {
1726
struct htt_ver_resp ver_resp;
1727
struct htt_mgmt_tx_completion mgmt_tx_completion;
1728
struct htt_data_tx_completion data_tx_completion;
1729
struct htt_rx_indication rx_ind;
1730
struct htt_rx_indication_hl rx_ind_hl;
1731
struct htt_rx_fragment_indication rx_frag_ind;
1732
struct htt_rx_peer_map peer_map;
1733
struct htt_rx_peer_unmap peer_unmap;
1734
struct htt_rx_flush rx_flush;
1735
struct htt_rx_addba rx_addba;
1736
struct htt_rx_delba rx_delba;
1737
struct htt_security_indication security_indication;
1738
struct htt_rc_update rc_update;
1739
struct htt_rx_test rx_test;
1740
struct htt_pktlog_msg pktlog_msg;
1741
struct htt_rx_pn_ind rx_pn_ind;
1742
struct htt_rx_offload_ind rx_offload_ind;
1743
struct htt_rx_in_ord_ind rx_in_ord_ind;
1744
struct htt_tx_fetch_ind tx_fetch_ind;
1745
struct htt_tx_fetch_confirm tx_fetch_confirm;
1746
struct htt_tx_mode_switch_ind tx_mode_switch_ind;
1747
struct htt_channel_change chan_change;
1748
struct htt_peer_tx_stats peer_tx_stats;
1749
} __packed;
1750
} __packed;
1751
1752
/*** host side structures follow ***/
1753
1754
struct htt_tx_done {
1755
u16 msdu_id;
1756
u16 status;
1757
u8 ack_rssi;
1758
};
1759
1760
enum htt_tx_compl_state {
1761
HTT_TX_COMPL_STATE_NONE,
1762
HTT_TX_COMPL_STATE_ACK,
1763
HTT_TX_COMPL_STATE_NOACK,
1764
HTT_TX_COMPL_STATE_DISCARD,
1765
};
1766
1767
struct htt_peer_map_event {
1768
u8 vdev_id;
1769
u16 peer_id;
1770
u8 addr[ETH_ALEN];
1771
};
1772
1773
struct htt_peer_unmap_event {
1774
u16 peer_id;
1775
};
1776
1777
struct ath10k_htt_txbuf_32 {
1778
struct htt_data_tx_desc_frag frags[2];
1779
struct ath10k_htc_hdr htc_hdr;
1780
struct htt_cmd_hdr cmd_hdr;
1781
struct htt_data_tx_desc cmd_tx;
1782
} __packed __aligned(4);
1783
1784
struct ath10k_htt_txbuf_64 {
1785
struct htt_data_tx_desc_frag frags[2];
1786
struct ath10k_htc_hdr htc_hdr;
1787
struct htt_cmd_hdr cmd_hdr;
1788
struct htt_data_tx_desc_64 cmd_tx;
1789
} __packed __aligned(4);
1790
1791
struct ath10k_htt {
1792
struct ath10k *ar;
1793
enum ath10k_htc_ep_id eid;
1794
1795
struct sk_buff_head rx_indication_head;
1796
1797
u8 target_version_major;
1798
u8 target_version_minor;
1799
struct completion target_version_received;
1800
u8 max_num_amsdu;
1801
u8 max_num_ampdu;
1802
1803
const enum htt_t2h_msg_type *t2h_msg_types;
1804
u32 t2h_msg_types_max;
1805
1806
struct {
1807
/*
1808
* Ring of network buffer objects - This ring is
1809
* used exclusively by the host SW. This ring
1810
* mirrors the dev_addrs_ring that is shared
1811
* between the host SW and the MAC HW. The host SW
1812
* uses this netbufs ring to locate the network
1813
* buffer objects whose data buffers the HW has
1814
* filled.
1815
*/
1816
struct sk_buff **netbufs_ring;
1817
1818
/* This is used only with firmware supporting IN_ORD_IND.
1819
*
1820
* With Full Rx Reorder the HTT Rx Ring is more of a temporary
1821
* buffer ring from which buffer addresses are copied by the
1822
* firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
1823
* pointing to specific (re-ordered) buffers.
1824
*
1825
* FIXME: With kernel generic hashing functions there's a lot
1826
* of hash collisions for sk_buffs.
1827
*/
1828
bool in_ord_rx;
1829
DECLARE_HASHTABLE(skb_table, 4);
1830
1831
/*
1832
* Ring of buffer addresses -
1833
* This ring holds the "physical" device address of the
1834
* rx buffers the host SW provides for the MAC HW to
1835
* fill.
1836
*/
1837
union {
1838
__le64 *paddrs_ring_64;
1839
__le32 *paddrs_ring_32;
1840
};
1841
1842
/*
1843
* Base address of ring, as a "physical" device address
1844
* rather than a CPU address.
1845
*/
1846
dma_addr_t base_paddr;
1847
1848
/* how many elems in the ring (power of 2) */
1849
int size;
1850
1851
/* size - 1 */
1852
unsigned int size_mask;
1853
1854
/* how many rx buffers to keep in the ring */
1855
int fill_level;
1856
1857
/* how many rx buffers (full+empty) are in the ring */
1858
int fill_cnt;
1859
1860
/*
1861
* alloc_idx - where HTT SW has deposited empty buffers
1862
* This is allocated in consistent mem, so that the FW can
1863
* read this variable, and program the HW's FW_IDX reg with
1864
* the value of this shadow register.
1865
*/
1866
struct {
1867
__le32 *vaddr;
1868
dma_addr_t paddr;
1869
} alloc_idx;
1870
1871
/* where HTT SW has processed bufs filled by rx MAC DMA */
1872
struct {
1873
unsigned int msdu_payld;
1874
} sw_rd_idx;
1875
1876
/*
1877
* refill_retry_timer - timer triggered when the ring is
1878
* not refilled to the level expected
1879
*/
1880
struct timer_list refill_retry_timer;
1881
1882
/* Protects access to all rx ring buffer state variables */
1883
spinlock_t lock;
1884
} rx_ring;
1885
1886
unsigned int prefetch_len;
1887
1888
/* Protects access to pending_tx, num_pending_tx */
1889
spinlock_t tx_lock;
1890
int max_num_pending_tx;
1891
int num_pending_tx;
1892
int num_pending_mgmt_tx;
1893
struct idr pending_tx;
1894
wait_queue_head_t empty_tx_wq;
1895
1896
/* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
1897
DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
1898
1899
/* set if host-fw communication goes haywire
1900
* used to avoid further failures
1901
*/
1902
bool rx_confused;
1903
atomic_t num_mpdus_ready;
1904
1905
/* This is used to group tx/rx completions separately and process them
1906
* in batches to reduce cache stalls
1907
*/
1908
struct sk_buff_head rx_msdus_q;
1909
struct sk_buff_head rx_in_ord_compl_q;
1910
struct sk_buff_head tx_fetch_ind_q;
1911
1912
/* rx_status template */
1913
struct ieee80211_rx_status rx_status;
1914
1915
struct {
1916
dma_addr_t paddr;
1917
union {
1918
struct htt_msdu_ext_desc *vaddr_desc_32;
1919
struct htt_msdu_ext_desc_64 *vaddr_desc_64;
1920
};
1921
size_t size;
1922
} frag_desc;
1923
1924
struct {
1925
dma_addr_t paddr;
1926
union {
1927
struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
1928
struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
1929
};
1930
size_t size;
1931
} txbuf;
1932
1933
struct {
1934
bool enabled;
1935
struct htt_q_state *vaddr;
1936
dma_addr_t paddr;
1937
u16 num_push_allowed;
1938
u16 num_peers;
1939
u16 num_tids;
1940
enum htt_tx_mode_switch_mode mode;
1941
enum htt_q_depth_type type;
1942
} tx_q_state;
1943
1944
bool tx_mem_allocated;
1945
const struct ath10k_htt_tx_ops *tx_ops;
1946
const struct ath10k_htt_rx_ops *rx_ops;
1947
bool disable_tx_comp;
1948
bool bundle_tx;
1949
struct sk_buff_head tx_req_head;
1950
struct sk_buff_head tx_complete_head;
1951
};
1952
1953
struct ath10k_htt_tx_ops {
1954
int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
1955
int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
1956
int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
1957
void (*htt_free_frag_desc)(struct ath10k_htt *htt);
1958
int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
1959
struct sk_buff *msdu);
1960
int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
1961
void (*htt_free_txbuff)(struct ath10k_htt *htt);
1962
int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
1963
u8 max_subfrms_ampdu,
1964
u8 max_subfrms_amsdu);
1965
void (*htt_flush_tx)(struct ath10k_htt *htt);
1966
};
1967
1968
static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
1969
{
1970
if (!htt->tx_ops->htt_send_rx_ring_cfg)
1971
return -EOPNOTSUPP;
1972
1973
return htt->tx_ops->htt_send_rx_ring_cfg(htt);
1974
}
1975
1976
static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
1977
{
1978
if (!htt->tx_ops->htt_send_frag_desc_bank_cfg)
1979
return -EOPNOTSUPP;
1980
1981
return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
1982
}
1983
1984
static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt)
1985
{
1986
if (!htt->tx_ops->htt_alloc_frag_desc)
1987
return -EOPNOTSUPP;
1988
1989
return htt->tx_ops->htt_alloc_frag_desc(htt);
1990
}
1991
1992
static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt)
1993
{
1994
if (htt->tx_ops->htt_free_frag_desc)
1995
htt->tx_ops->htt_free_frag_desc(htt);
1996
}
1997
1998
static inline int ath10k_htt_tx(struct ath10k_htt *htt,
1999
enum ath10k_hw_txrx_mode txmode,
2000
struct sk_buff *msdu)
2001
{
2002
return htt->tx_ops->htt_tx(htt, txmode, msdu);
2003
}
2004
2005
static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
2006
{
2007
if (htt->tx_ops->htt_flush_tx)
2008
htt->tx_ops->htt_flush_tx(htt);
2009
}
2010
2011
static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
2012
{
2013
if (!htt->tx_ops->htt_alloc_txbuff)
2014
return -EOPNOTSUPP;
2015
2016
return htt->tx_ops->htt_alloc_txbuff(htt);
2017
}
2018
2019
static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
2020
{
2021
if (htt->tx_ops->htt_free_txbuff)
2022
htt->tx_ops->htt_free_txbuff(htt);
2023
}
2024
2025
static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
2026
u8 max_subfrms_ampdu,
2027
u8 max_subfrms_amsdu)
2028
2029
{
2030
if (!htt->tx_ops->htt_h2t_aggr_cfg_msg)
2031
return -EOPNOTSUPP;
2032
2033
return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt,
2034
max_subfrms_ampdu,
2035
max_subfrms_amsdu);
2036
}
2037
2038
struct ath10k_htt_rx_ops {
2039
size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
2040
void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
2041
void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
2042
int idx);
2043
void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
2044
void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
2045
bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt,
2046
struct htt_rx_fragment_indication *rx,
2047
struct sk_buff *skb);
2048
};
2049
2050
static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
2051
{
2052
if (!htt->rx_ops->htt_get_rx_ring_size)
2053
return 0;
2054
2055
return htt->rx_ops->htt_get_rx_ring_size(htt);
2056
}
2057
2058
static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt,
2059
void *vaddr)
2060
{
2061
if (htt->rx_ops->htt_config_paddrs_ring)
2062
htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
2063
}
2064
2065
static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt,
2066
dma_addr_t paddr,
2067
int idx)
2068
{
2069
if (htt->rx_ops->htt_set_paddrs_ring)
2070
htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
2071
}
2072
2073
static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt)
2074
{
2075
if (!htt->rx_ops->htt_get_vaddr_ring)
2076
return NULL;
2077
2078
return htt->rx_ops->htt_get_vaddr_ring(htt);
2079
}
2080
2081
static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
2082
{
2083
if (htt->rx_ops->htt_reset_paddrs_ring)
2084
htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
2085
}
2086
2087
static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
2088
struct htt_rx_fragment_indication *rx,
2089
struct sk_buff *skb)
2090
{
2091
if (!htt->rx_ops->htt_rx_proc_rx_frag_ind)
2092
return true;
2093
2094
return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
2095
}
2096
2097
/* the driver strongly assumes that the rx header status be 64 bytes long,
2098
* so all possible rx_desc structures must respect this assumption.
2099
*/
2100
#define RX_HTT_HDR_STATUS_LEN 64
2101
2102
/* The rx descriptor structure layout is programmed via rx ring setup
2103
* so that FW knows how to transfer the rx descriptor to the host.
2104
* Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
2105
* when modifying the structure layout of the rx descriptor beyond what it expects
2106
* (even if it correctly programmed during the rx ring setup).
2107
* Therefore we must keep two different memory layouts, abstract the rx descriptor
2108
* representation and use ath10k_rx_desc_ops
2109
* for correctly accessing rx descriptor data.
2110
*/
2111
2112
/* base struct used for abstracting the rx descriptor representation */
2113
struct htt_rx_desc {
2114
union {
2115
/* This field is filled on the host using the msdu buffer
2116
* from htt_rx_indication
2117
*/
2118
struct fw_rx_desc_base fw_desc;
2119
u32 pad;
2120
} __packed;
2121
} __packed;
2122
2123
/* rx descriptor for wcn3990 and possibly extensible for newer cards
2124
* Buffers like this are placed on the rx ring.
2125
*/
2126
struct htt_rx_desc_v2 {
2127
struct htt_rx_desc base;
2128
struct {
2129
struct rx_attention attention;
2130
struct rx_frag_info frag_info;
2131
struct rx_mpdu_start mpdu_start;
2132
struct rx_msdu_start msdu_start;
2133
struct rx_msdu_end msdu_end;
2134
struct rx_mpdu_end mpdu_end;
2135
struct rx_ppdu_start ppdu_start;
2136
struct rx_ppdu_end ppdu_end;
2137
} __packed;
2138
u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
2139
u8 msdu_payload[];
2140
};
2141
2142
/* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
2143
* works correctly. We keep a single rx descriptor for all these three
2144
* families of cards because from tests it seems to be the most stable solution,
2145
* e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
2146
* during some tests.
2147
* Buffers like this are placed on the rx ring.
2148
*/
2149
struct htt_rx_desc_v1 {
2150
struct htt_rx_desc base;
2151
struct {
2152
struct rx_attention attention;
2153
struct rx_frag_info_v1 frag_info;
2154
struct rx_mpdu_start mpdu_start;
2155
struct rx_msdu_start_v1 msdu_start;
2156
struct rx_msdu_end_v1 msdu_end;
2157
struct rx_mpdu_end mpdu_end;
2158
struct rx_ppdu_start ppdu_start;
2159
struct rx_ppdu_end_v1 ppdu_end;
2160
} __packed;
2161
u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
2162
u8 msdu_payload[];
2163
};
2164
2165
/* rx_desc abstraction */
2166
struct ath10k_htt_rx_desc_ops {
2167
/* These fields are mandatory, they must be specified in any instance */
2168
2169
/* sizeof() of the rx_desc structure used by this hw */
2170
size_t rx_desc_size;
2171
2172
/* offset of msdu_payload inside the rx_desc structure used by this hw */
2173
size_t rx_desc_msdu_payload_offset;
2174
2175
/* These fields are options.
2176
* When a field is not provided the default implementation gets used
2177
* (see the ath10k_rx_desc_* operations below for more info about the defaults)
2178
*/
2179
bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
2180
int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
2181
2182
/* Safely cast from a void* buffer containing an rx descriptor
2183
* to the proper rx_desc structure
2184
*/
2185
struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
2186
2187
void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
2188
struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
2189
struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
2190
struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
2191
struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
2192
struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
2193
struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
2194
struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
2195
struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
2196
u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
2197
u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
2198
};
2199
2200
extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
2201
extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
2202
extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
2203
2204
static inline int
2205
ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2206
{
2207
if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
2208
return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
2209
return 0;
2210
}
2211
2212
static inline bool
2213
ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2214
{
2215
if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
2216
return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
2217
return false;
2218
}
2219
2220
/* The default implementation of all these getters is using the old rx_desc,
2221
* so that it is easier to define the ath10k_htt_rx_desc_ops instances.
2222
* But probably, if new wireless cards must be supported, it would be better
2223
* to switch the default implementation to the new rx_desc, since this would
2224
* make the extension easier .
2225
*/
2226
static inline struct htt_rx_desc *
2227
ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff)
2228
{
2229
if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
2230
return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
2231
return &((struct htt_rx_desc_v1 *)buff)->base;
2232
}
2233
2234
static inline void
2235
ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
2236
struct htt_rx_ring_rx_desc_offsets *off)
2237
{
2238
if (hw->rx_desc_ops->rx_desc_get_offsets) {
2239
hw->rx_desc_ops->rx_desc_get_offsets(off);
2240
} else {
2241
#define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4)
2242
off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
2243
off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
2244
off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
2245
off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
2246
off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
2247
off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
2248
off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
2249
off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
2250
off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
2251
off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
2252
#undef desc_offset
2253
}
2254
}
2255
2256
static inline struct rx_attention *
2257
ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2258
{
2259
struct htt_rx_desc_v1 *rx_desc;
2260
2261
if (hw->rx_desc_ops->rx_desc_get_attention)
2262
return hw->rx_desc_ops->rx_desc_get_attention(rxd);
2263
2264
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2265
return &rx_desc->attention;
2266
}
2267
2268
static inline struct rx_frag_info_common *
2269
ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2270
{
2271
struct htt_rx_desc_v1 *rx_desc;
2272
2273
if (hw->rx_desc_ops->rx_desc_get_frag_info)
2274
return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
2275
2276
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2277
return &rx_desc->frag_info.common;
2278
}
2279
2280
static inline struct rx_mpdu_start *
2281
ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2282
{
2283
struct htt_rx_desc_v1 *rx_desc;
2284
2285
if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
2286
return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
2287
2288
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2289
return &rx_desc->mpdu_start;
2290
}
2291
2292
static inline struct rx_mpdu_end *
2293
ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2294
{
2295
struct htt_rx_desc_v1 *rx_desc;
2296
2297
if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
2298
return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
2299
2300
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2301
return &rx_desc->mpdu_end;
2302
}
2303
2304
static inline struct rx_msdu_start_common *
2305
ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2306
{
2307
struct htt_rx_desc_v1 *rx_desc;
2308
2309
if (hw->rx_desc_ops->rx_desc_get_msdu_start)
2310
return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
2311
2312
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2313
return &rx_desc->msdu_start.common;
2314
}
2315
2316
static inline struct rx_msdu_end_common *
2317
ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2318
{
2319
struct htt_rx_desc_v1 *rx_desc;
2320
2321
if (hw->rx_desc_ops->rx_desc_get_msdu_end)
2322
return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
2323
2324
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2325
return &rx_desc->msdu_end.common;
2326
}
2327
2328
static inline struct rx_ppdu_start *
2329
ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2330
{
2331
struct htt_rx_desc_v1 *rx_desc;
2332
2333
if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
2334
return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
2335
2336
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2337
return &rx_desc->ppdu_start;
2338
}
2339
2340
static inline struct rx_ppdu_end_common *
2341
ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2342
{
2343
struct htt_rx_desc_v1 *rx_desc;
2344
2345
if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
2346
return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
2347
2348
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2349
return &rx_desc->ppdu_end.common;
2350
}
2351
2352
static inline u8 *
2353
ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2354
{
2355
struct htt_rx_desc_v1 *rx_desc;
2356
2357
if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
2358
return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
2359
2360
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2361
return rx_desc->rx_hdr_status;
2362
}
2363
2364
static inline u8 *
2365
ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2366
{
2367
struct htt_rx_desc_v1 *rx_desc;
2368
2369
if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
2370
return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
2371
2372
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2373
return rx_desc->msdu_payload;
2374
}
2375
2376
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
2377
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
2378
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
2379
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12
2380
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
2381
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13
2382
#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00010000
2383
#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 16
2384
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000
2385
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17
2386
2387
struct htt_rx_desc_base_hl {
2388
__le32 info; /* HTT_RX_DESC_HL_INFO_ */
2389
};
2390
2391
struct htt_rx_chan_info {
2392
__le16 primary_chan_center_freq_mhz;
2393
__le16 contig_chan1_center_freq_mhz;
2394
__le16 contig_chan2_center_freq_mhz;
2395
u8 phy_mode;
2396
u8 reserved;
2397
} __packed;
2398
2399
#define HTT_RX_DESC_ALIGN 8
2400
2401
#define HTT_MAC_ADDR_LEN 6
2402
2403
/*
2404
* FIX THIS
2405
* Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
2406
* rounded up to a cache line size.
2407
*/
2408
#define HTT_RX_BUF_SIZE 2048
2409
2410
/* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
2411
* because it depends on the underlying device rx_desc representation
2412
*/
2413
static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
2414
{
2415
return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
2416
}
2417
2418
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
2419
* aggregated traffic more nicely.
2420
*/
2421
#define ATH10K_HTT_MAX_NUM_REFILL 100
2422
2423
/*
2424
* DMA_MAP expects the buffer to be an integral number of cache lines.
2425
* Rather than checking the actual cache line size, this code makes a
2426
* conservative estimate of what the cache line size could be.
2427
*/
2428
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
2429
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
2430
2431
/* These values are default in most firmware revisions and apparently are a
2432
* sweet spot performance wise.
2433
*/
2434
#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
2435
#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
2436
2437
int ath10k_htt_connect(struct ath10k_htt *htt);
2438
int ath10k_htt_init(struct ath10k *ar);
2439
int ath10k_htt_setup(struct ath10k_htt *htt);
2440
2441
int ath10k_htt_tx_start(struct ath10k_htt *htt);
2442
void ath10k_htt_tx_stop(struct ath10k_htt *htt);
2443
void ath10k_htt_tx_destroy(struct ath10k_htt *htt);
2444
void ath10k_htt_tx_free(struct ath10k_htt *htt);
2445
2446
int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
2447
int ath10k_htt_rx_ring_refill(struct ath10k *ar);
2448
void ath10k_htt_rx_free(struct ath10k_htt *htt);
2449
2450
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
2451
void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
2452
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
2453
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
2454
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
2455
u64 cookie);
2456
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
2457
int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
2458
__le32 token,
2459
__le16 fetch_seq_num,
2460
struct htt_tx_fetch_record *records,
2461
size_t num_records);
2462
void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
2463
2464
void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
2465
struct ieee80211_txq *txq);
2466
void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
2467
struct ieee80211_txq *txq);
2468
void ath10k_htt_tx_txq_sync(struct ath10k *ar);
2469
void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
2470
int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
2471
void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
2472
int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
2473
bool is_presp);
2474
2475
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
2476
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
2477
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
2478
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2479
struct sk_buff *skb);
2480
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
2481
int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
2482
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
2483
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
2484
#endif
2485
2486