Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath10k/htt.h
48375 views
1
/* SPDX-License-Identifier: ISC */
2
/*
3
* Copyright (c) 2005-2011 Atheros Communications Inc.
4
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
6
*/
7
8
#ifndef _HTT_H_
9
#define _HTT_H_
10
11
#include <linux/bug.h>
12
#include <linux/interrupt.h>
13
#include <linux/dmapool.h>
14
#include <linux/hashtable.h>
15
#include <linux/kfifo.h>
16
#include <net/mac80211.h>
17
#if defined(__FreeBSD__)
18
#include <linux/wait.h>
19
#endif
20
21
#include "htc.h"
22
#include "hw.h"
23
#include "rx_desc.h"
24
25
enum htt_dbg_stats_type {
26
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
27
HTT_DBG_STATS_RX_REORDER = 1 << 1,
28
HTT_DBG_STATS_RX_RATE_INFO = 1 << 2,
29
HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3,
30
HTT_DBG_STATS_TX_RATE_INFO = 1 << 4,
31
/* bits 5-23 currently reserved */
32
33
HTT_DBG_NUM_STATS /* keep this last */
34
};
35
36
enum htt_h2t_msg_type { /* host-to-target */
37
HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
38
HTT_H2T_MSG_TYPE_TX_FRM = 1,
39
HTT_H2T_MSG_TYPE_RX_RING_CFG = 2,
40
HTT_H2T_MSG_TYPE_STATS_REQ = 3,
41
HTT_H2T_MSG_TYPE_SYNC = 4,
42
HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
43
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
44
45
/* This command is used for sending management frames in HTT < 3.0.
46
* HTT >= 3.0 uses TX_FRM for everything.
47
*/
48
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
49
HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11,
50
51
HTT_H2T_NUM_MSGS /* keep this last */
52
};
53
54
struct htt_cmd_hdr {
55
u8 msg_type;
56
} __packed;
57
58
struct htt_ver_req {
59
u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
60
} __packed;
61
62
/*
63
* HTT tx MSDU descriptor
64
*
65
* The HTT tx MSDU descriptor is created by the host HTT SW for each
66
* tx MSDU. The HTT tx MSDU descriptor contains the information that
67
* the target firmware needs for the FW's tx processing, particularly
68
* for creating the HW msdu descriptor.
69
* The same HTT tx descriptor is used for HL and LL systems, though
70
* a few fields within the tx descriptor are used only by LL or
71
* only by HL.
72
* The HTT tx descriptor is defined in two manners: by a struct with
73
* bitfields, and by a series of [dword offset, bit mask, bit shift]
74
* definitions.
75
* The target should use the struct def, for simplicitly and clarity,
76
* but the host shall use the bit-mast + bit-shift defs, to be endian-
77
* neutral. Specifically, the host shall use the get/set macros built
78
* around the mask + shift defs.
79
*/
80
struct htt_data_tx_desc_frag {
81
union {
82
struct double_word_addr {
83
__le32 paddr;
84
__le32 len;
85
} __packed dword_addr;
86
struct triple_word_addr {
87
__le32 paddr_lo;
88
__le16 paddr_hi;
89
__le16 len_16;
90
} __packed tword_addr;
91
} __packed;
92
} __packed;
93
94
struct htt_msdu_ext_desc {
95
__le32 tso_flag[3];
96
__le16 ip_identification;
97
u8 flags;
98
u8 reserved;
99
struct htt_data_tx_desc_frag frags[6];
100
};
101
102
struct htt_msdu_ext_desc_64 {
103
__le32 tso_flag[5];
104
__le16 ip_identification;
105
u8 flags;
106
u8 reserved;
107
struct htt_data_tx_desc_frag frags[6];
108
};
109
110
#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
111
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
112
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
113
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
114
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
115
116
#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
117
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
118
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
119
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
120
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
121
122
#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16)
123
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17)
124
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18)
125
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19)
126
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20)
127
#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21)
128
129
#define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
130
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
131
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
132
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
133
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
134
135
enum htt_data_tx_desc_flags0 {
136
HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
137
HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
138
HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2,
139
HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3,
140
HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4
141
#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
142
#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
143
};
144
145
enum htt_data_tx_desc_flags1 {
146
#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
147
#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
148
#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
149
#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
150
#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
151
#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
152
HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11,
153
HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
154
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
155
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
156
HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15
157
};
158
159
#define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000
160
#define HTT_TX_CREDIT_DELTA_ABS_S 16
161
#define HTT_TX_CREDIT_DELTA_ABS_GET(word) \
162
(((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S)
163
164
#define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100
165
#define HTT_TX_CREDIT_SIGN_BIT_S 8
166
#define HTT_TX_CREDIT_SIGN_BIT_GET(word) \
167
(((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S)
168
169
enum htt_data_tx_ext_tid {
170
HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
171
HTT_DATA_TX_EXT_TID_MGMT = 17,
172
HTT_DATA_TX_EXT_TID_INVALID = 31
173
};
174
175
#define HTT_INVALID_PEERID 0xFFFF
176
177
/*
178
* htt_data_tx_desc - used for data tx path
179
*
180
* Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
181
* ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
182
* for special kinds of tids
183
* postponed: only for HL hosts. indicates if this is a resend
184
* (HL hosts manage queues on the host )
185
* more_in_batch: only for HL hosts. indicates if more packets are
186
* pending. this allows target to wait and aggregate
187
* freq: 0 means home channel of given vdev. intended for offchannel
188
*/
189
struct htt_data_tx_desc {
190
u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
191
__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
192
__le16 len;
193
__le16 id;
194
__le32 frags_paddr;
195
union {
196
__le32 peerid;
197
struct {
198
__le16 peerid;
199
__le16 freq;
200
} __packed offchan_tx;
201
} __packed;
202
u8 prefetch[0]; /* start of frame, for FW classification engine */
203
} __packed;
204
205
struct htt_data_tx_desc_64 {
206
u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
207
__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
208
__le16 len;
209
__le16 id;
210
__le64 frags_paddr;
211
union {
212
__le32 peerid;
213
struct {
214
__le16 peerid;
215
__le16 freq;
216
} __packed offchan_tx;
217
} __packed;
218
u8 prefetch[0]; /* start of frame, for FW classification engine */
219
} __packed;
220
221
enum htt_rx_ring_flags {
222
HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
223
HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
224
HTT_RX_RING_FLAGS_PPDU_START = 1 << 2,
225
HTT_RX_RING_FLAGS_PPDU_END = 1 << 3,
226
HTT_RX_RING_FLAGS_MPDU_START = 1 << 4,
227
HTT_RX_RING_FLAGS_MPDU_END = 1 << 5,
228
HTT_RX_RING_FLAGS_MSDU_START = 1 << 6,
229
HTT_RX_RING_FLAGS_MSDU_END = 1 << 7,
230
HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
231
HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9,
232
HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10,
233
HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
234
HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12,
235
HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13,
236
HTT_RX_RING_FLAGS_NULL_RX = 1 << 14,
237
HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
238
};
239
240
#define HTT_RX_RING_SIZE_MIN 128
241
#define HTT_RX_RING_SIZE_MAX 2048
242
#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
243
#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
244
#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
245
246
struct htt_rx_ring_rx_desc_offsets {
247
/* the following offsets are in 4-byte units */
248
__le16 mac80211_hdr_offset;
249
__le16 msdu_payload_offset;
250
__le16 ppdu_start_offset;
251
__le16 ppdu_end_offset;
252
__le16 mpdu_start_offset;
253
__le16 mpdu_end_offset;
254
__le16 msdu_start_offset;
255
__le16 msdu_end_offset;
256
__le16 rx_attention_offset;
257
__le16 frag_info_offset;
258
} __packed;
259
260
struct htt_rx_ring_setup_ring32 {
261
__le32 fw_idx_shadow_reg_paddr;
262
__le32 rx_ring_base_paddr;
263
__le16 rx_ring_len; /* in 4-byte words */
264
__le16 rx_ring_bufsize; /* rx skb size - in bytes */
265
__le16 flags; /* %HTT_RX_RING_FLAGS_ */
266
__le16 fw_idx_init_val;
267
268
struct htt_rx_ring_rx_desc_offsets offsets;
269
} __packed;
270
271
struct htt_rx_ring_setup_ring64 {
272
__le64 fw_idx_shadow_reg_paddr;
273
__le64 rx_ring_base_paddr;
274
__le16 rx_ring_len; /* in 4-byte words */
275
__le16 rx_ring_bufsize; /* rx skb size - in bytes */
276
__le16 flags; /* %HTT_RX_RING_FLAGS_ */
277
__le16 fw_idx_init_val;
278
279
struct htt_rx_ring_rx_desc_offsets offsets;
280
} __packed;
281
282
struct htt_rx_ring_setup_hdr {
283
u8 num_rings; /* supported values: 1, 2 */
284
__le16 rsvd0;
285
} __packed;
286
287
struct htt_rx_ring_setup_32 {
288
struct htt_rx_ring_setup_hdr hdr;
289
struct htt_rx_ring_setup_ring32 rings[];
290
} __packed;
291
292
struct htt_rx_ring_setup_64 {
293
struct htt_rx_ring_setup_hdr hdr;
294
struct htt_rx_ring_setup_ring64 rings[];
295
} __packed;
296
297
/*
298
* htt_stats_req - request target to send specified statistics
299
*
300
* @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
301
* @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
302
* so make sure its little-endian.
303
* @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
304
* so make sure its little-endian.
305
* @cfg_val: stat_type specific configuration
306
* @stat_type: see %htt_dbg_stats_type
307
* @cookie_lsb: used for confirmation message from target->host
308
* @cookie_msb: ditto as %cookie
309
*/
310
struct htt_stats_req {
311
u8 upload_types[3];
312
u8 rsvd0;
313
u8 reset_types[3];
314
struct {
315
u8 mpdu_bytes;
316
u8 mpdu_num_msdus;
317
u8 msdu_bytes;
318
} __packed;
319
u8 stat_type;
320
__le32 cookie_lsb;
321
__le32 cookie_msb;
322
} __packed;
323
324
#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
325
#define HTT_STATS_BIT_MASK GENMASK(16, 0)
326
327
/*
328
* htt_oob_sync_req - request out-of-band sync
329
*
330
* The HTT SYNC tells the target to suspend processing of subsequent
331
* HTT host-to-target messages until some other target agent locally
332
* informs the target HTT FW that the current sync counter is equal to
333
* or greater than (in a modulo sense) the sync counter specified in
334
* the SYNC message.
335
*
336
* This allows other host-target components to synchronize their operation
337
* with HTT, e.g. to ensure that tx frames don't get transmitted until a
338
* security key has been downloaded to and activated by the target.
339
* In the absence of any explicit synchronization counter value
340
* specification, the target HTT FW will use zero as the default current
341
* sync value.
342
*
343
* The HTT target FW will suspend its host->target message processing as long
344
* as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
345
*/
346
struct htt_oob_sync_req {
347
u8 sync_count;
348
__le16 rsvd0;
349
} __packed;
350
351
struct htt_aggr_conf {
352
u8 max_num_ampdu_subframes;
353
/* amsdu_subframes is limited by 0x1F mask */
354
u8 max_num_amsdu_subframes;
355
} __packed;
356
357
struct htt_aggr_conf_v2 {
358
u8 max_num_ampdu_subframes;
359
/* amsdu_subframes is limited by 0x1F mask */
360
u8 max_num_amsdu_subframes;
361
u8 reserved;
362
} __packed;
363
364
#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
365
struct htt_mgmt_tx_desc_qca99x0 {
366
__le32 rate;
367
} __packed;
368
369
struct htt_mgmt_tx_desc {
370
u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
371
__le32 msdu_paddr;
372
__le32 desc_id;
373
__le32 len;
374
__le32 vdev_id;
375
u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
376
union {
377
struct htt_mgmt_tx_desc_qca99x0 qca99x0;
378
} __packed;
379
} __packed;
380
381
enum htt_mgmt_tx_status {
382
HTT_MGMT_TX_STATUS_OK = 0,
383
HTT_MGMT_TX_STATUS_RETRY = 1,
384
HTT_MGMT_TX_STATUS_DROP = 2
385
};
386
387
/*=== target -> host messages ===============================================*/
388
389
enum htt_main_t2h_msg_type {
390
HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
391
HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
392
HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
393
HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
394
HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
395
HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
396
HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
397
HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
398
HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
399
HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
400
HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
401
HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
402
HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
403
HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
404
HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
405
HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
406
HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
407
HTT_MAIN_T2H_MSG_TYPE_TEST,
408
/* keep this last */
409
HTT_MAIN_T2H_NUM_MSGS
410
};
411
412
enum htt_10x_t2h_msg_type {
413
HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
414
HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
415
HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
416
HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
417
HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
418
HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
419
HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
420
HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
421
HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
422
HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
423
HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
424
HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
425
HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
426
HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
427
HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
428
HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
429
HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
430
HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
431
HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
432
/* keep this last */
433
HTT_10X_T2H_NUM_MSGS
434
};
435
436
enum htt_tlv_t2h_msg_type {
437
HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
438
HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
439
HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
440
HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
441
HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
442
HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
443
HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
444
HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
445
HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
446
HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
447
HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
448
HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
449
HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
450
HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
451
HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
452
HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
453
HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
454
HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
455
HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
456
/* 0x13 reservd */
457
HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
458
HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
459
HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
460
HTT_TLV_T2H_MSG_TYPE_TEST,
461
/* keep this last */
462
HTT_TLV_T2H_NUM_MSGS
463
};
464
465
enum htt_10_4_t2h_msg_type {
466
HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0,
467
HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1,
468
HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2,
469
HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3,
470
HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
471
HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5,
472
HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6,
473
HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
474
HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8,
475
HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9,
476
HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
477
HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb,
478
HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
479
HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
480
HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
481
HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
482
HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10,
483
HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11,
484
HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
485
HTT_10_4_T2H_MSG_TYPE_TEST = 0x13,
486
HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
487
HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
488
HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
489
HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17,
490
HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
491
/* 0x19 to 0x2f are reserved */
492
HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30,
493
HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31,
494
/* keep this last */
495
HTT_10_4_T2H_NUM_MSGS
496
};
497
498
enum htt_t2h_msg_type {
499
HTT_T2H_MSG_TYPE_VERSION_CONF,
500
HTT_T2H_MSG_TYPE_RX_IND,
501
HTT_T2H_MSG_TYPE_RX_FLUSH,
502
HTT_T2H_MSG_TYPE_PEER_MAP,
503
HTT_T2H_MSG_TYPE_PEER_UNMAP,
504
HTT_T2H_MSG_TYPE_RX_ADDBA,
505
HTT_T2H_MSG_TYPE_RX_DELBA,
506
HTT_T2H_MSG_TYPE_TX_COMPL_IND,
507
HTT_T2H_MSG_TYPE_PKTLOG,
508
HTT_T2H_MSG_TYPE_STATS_CONF,
509
HTT_T2H_MSG_TYPE_RX_FRAG_IND,
510
HTT_T2H_MSG_TYPE_SEC_IND,
511
HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
512
HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
513
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
514
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
515
HTT_T2H_MSG_TYPE_RX_PN_IND,
516
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
517
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
518
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
519
HTT_T2H_MSG_TYPE_CHAN_CHANGE,
520
HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
521
HTT_T2H_MSG_TYPE_AGGR_CONF,
522
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
523
HTT_T2H_MSG_TYPE_TEST,
524
HTT_T2H_MSG_TYPE_EN_STATS,
525
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
526
HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
527
HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
528
HTT_T2H_MSG_TYPE_PEER_STATS,
529
/* keep this last */
530
HTT_T2H_NUM_MSGS
531
};
532
533
/*
534
* htt_resp_hdr - header for target-to-host messages
535
*
536
* msg_type: see htt_t2h_msg_type
537
*/
538
struct htt_resp_hdr {
539
u8 msg_type;
540
} __packed;
541
542
#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
543
#define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
544
#define HTT_RESP_HDR_MSG_TYPE_LSB 0
545
546
/* htt_ver_resp - response sent for htt_ver_req */
547
struct htt_ver_resp {
548
u8 minor;
549
u8 major;
550
u8 rsvd0;
551
} __packed;
552
553
#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
554
555
#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0)
556
557
struct htt_mgmt_tx_completion {
558
u8 rsvd0;
559
u8 rsvd1;
560
u8 flags;
561
__le32 desc_id;
562
__le32 status;
563
__le32 ppdu_id;
564
__le32 info;
565
} __packed;
566
567
#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
568
#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
569
#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
570
#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
571
#define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7)
572
573
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
574
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
575
#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
576
#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
577
#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
578
#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
579
#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
580
#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
581
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
582
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
583
584
#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
585
#define HTT_TX_CMPL_FLAG_PPID_PRESENT BIT(1)
586
#define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2)
587
#define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3)
588
589
#define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3)
590
#define HTT_TX_DATA_APPEND_RETRIES BIT(0)
591
#define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1)
592
593
struct htt_rx_indication_hdr {
594
u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
595
__le16 peer_id;
596
__le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
597
} __packed;
598
599
#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
600
#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
601
#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
602
#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
603
#define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
604
#define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
605
606
#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
607
#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
608
#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
609
#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
610
611
#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
612
#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
613
#define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
614
#define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
615
616
enum htt_rx_legacy_rate {
617
HTT_RX_OFDM_48 = 0,
618
HTT_RX_OFDM_24 = 1,
619
HTT_RX_OFDM_12,
620
HTT_RX_OFDM_6,
621
HTT_RX_OFDM_54,
622
HTT_RX_OFDM_36,
623
HTT_RX_OFDM_18,
624
HTT_RX_OFDM_9,
625
626
/* long preamble */
627
HTT_RX_CCK_11_LP = 0,
628
HTT_RX_CCK_5_5_LP = 1,
629
HTT_RX_CCK_2_LP,
630
HTT_RX_CCK_1_LP,
631
/* short preamble */
632
HTT_RX_CCK_11_SP,
633
HTT_RX_CCK_5_5_SP,
634
HTT_RX_CCK_2_SP
635
};
636
637
enum htt_rx_legacy_rate_type {
638
HTT_RX_LEGACY_RATE_OFDM = 0,
639
HTT_RX_LEGACY_RATE_CCK
640
};
641
642
enum htt_rx_preamble_type {
643
HTT_RX_LEGACY = 0x4,
644
HTT_RX_HT = 0x8,
645
HTT_RX_HT_WITH_TXBF = 0x9,
646
HTT_RX_VHT = 0xC,
647
HTT_RX_VHT_WITH_TXBF = 0xD,
648
};
649
650
/*
651
* Fields: phy_err_valid, phy_err_code, tsf,
652
* usec_timestamp, sub_usec_timestamp
653
* ..are valid only if end_valid == 1.
654
*
655
* Fields: rssi_chains, legacy_rate_type,
656
* legacy_rate_cck, preamble_type, service,
657
* vht_sig_*
658
* ..are valid only if start_valid == 1;
659
*/
660
struct htt_rx_indication_ppdu {
661
u8 combined_rssi;
662
u8 sub_usec_timestamp;
663
u8 phy_err_code;
664
u8 info0; /* HTT_RX_INDICATION_INFO0_ */
665
struct {
666
u8 pri20_db;
667
u8 ext20_db;
668
u8 ext40_db;
669
u8 ext80_db;
670
} __packed rssi_chains[4];
671
__le32 tsf;
672
__le32 usec_timestamp;
673
__le32 info1; /* HTT_RX_INDICATION_INFO1_ */
674
__le32 info2; /* HTT_RX_INDICATION_INFO2_ */
675
} __packed;
676
677
enum htt_rx_mpdu_status {
678
HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
679
HTT_RX_IND_MPDU_STATUS_OK,
680
HTT_RX_IND_MPDU_STATUS_ERR_FCS,
681
HTT_RX_IND_MPDU_STATUS_ERR_DUP,
682
HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
683
HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
684
/* only accept EAPOL frames */
685
HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
686
HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
687
/* Non-data in promiscuous mode */
688
HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
689
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
690
HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
691
HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
692
HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
693
HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
694
695
/*
696
* MISC: discard for unspecified reasons.
697
* Leave this enum value last.
698
*/
699
HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
700
};
701
702
struct htt_rx_indication_mpdu_range {
703
u8 mpdu_count;
704
u8 mpdu_range_status; /* %htt_rx_mpdu_status */
705
u8 pad0;
706
u8 pad1;
707
} __packed;
708
709
struct htt_rx_indication_prefix {
710
__le16 fw_rx_desc_bytes;
711
u8 pad0;
712
u8 pad1;
713
} __packed;
714
715
struct htt_rx_indication {
716
struct htt_rx_indication_hdr hdr;
717
struct htt_rx_indication_ppdu ppdu;
718
struct htt_rx_indication_prefix prefix;
719
720
/*
721
* the following fields are both dynamically sized, so
722
* take care addressing them
723
*/
724
725
/* the size of this is %fw_rx_desc_bytes */
726
struct fw_rx_desc_base fw_desc;
727
728
/*
729
* %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
730
* and has %num_mpdu_ranges elements.
731
*/
732
struct htt_rx_indication_mpdu_range mpdu_ranges[];
733
} __packed;
734
735
/* High latency version of the RX indication */
736
struct htt_rx_indication_hl {
737
struct htt_rx_indication_hdr hdr;
738
struct htt_rx_indication_ppdu ppdu;
739
struct htt_rx_indication_prefix prefix;
740
struct fw_rx_desc_hl fw_desc;
741
struct htt_rx_indication_mpdu_range mpdu_ranges[];
742
} __packed;
743
744
struct htt_hl_rx_desc {
745
__le32 info;
746
__le32 pn_31_0;
747
union {
748
struct {
749
__le16 pn_47_32;
750
__le16 pn_63_48;
751
} pn16;
752
__le32 pn_63_32;
753
} u0;
754
__le32 pn_95_64;
755
__le32 pn_127_96;
756
} __packed;
757
758
static inline struct htt_rx_indication_mpdu_range *
759
htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
760
{
761
#if defined(__linux__)
762
void *ptr = rx_ind;
763
#elif defined(__FreeBSD__)
764
u8 *ptr = (void *)rx_ind;
765
#endif
766
767
ptr += sizeof(rx_ind->hdr)
768
+ sizeof(rx_ind->ppdu)
769
+ sizeof(rx_ind->prefix)
770
+ roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
771
#if defined(__linux__)
772
return ptr;
773
#elif defined(__FreeBSD__)
774
return ((void *)ptr);
775
#endif
776
}
777
778
static inline struct htt_rx_indication_mpdu_range *
779
htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
780
{
781
#if defined(__linux__)
782
void *ptr = rx_ind;
783
#elif defined(__FreeBSD__)
784
u8 *ptr = (void *)rx_ind;
785
#endif
786
787
ptr += sizeof(rx_ind->hdr)
788
+ sizeof(rx_ind->ppdu)
789
+ sizeof(rx_ind->prefix)
790
+ sizeof(rx_ind->fw_desc);
791
#if defined(__linux__)
792
return ptr;
793
#elif defined(__FreeBSD__)
794
return ((void *)ptr);
795
#endif
796
}
797
798
enum htt_rx_flush_mpdu_status {
799
HTT_RX_FLUSH_MPDU_DISCARD = 0,
800
HTT_RX_FLUSH_MPDU_REORDER = 1,
801
};
802
803
/*
804
* htt_rx_flush - discard or reorder given range of mpdus
805
*
806
* Note: host must check if all sequence numbers between
807
* [seq_num_start, seq_num_end-1] are valid.
808
*/
809
struct htt_rx_flush {
810
__le16 peer_id;
811
u8 tid;
812
u8 rsvd0;
813
u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
814
u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
815
u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
816
};
817
818
struct htt_rx_peer_map {
819
u8 vdev_id;
820
__le16 peer_id;
821
u8 addr[6];
822
u8 rsvd0;
823
u8 rsvd1;
824
} __packed;
825
826
struct htt_rx_peer_unmap {
827
u8 rsvd0;
828
__le16 peer_id;
829
} __packed;
830
831
enum htt_txrx_sec_cast_type {
832
HTT_TXRX_SEC_MCAST = 0,
833
HTT_TXRX_SEC_UCAST
834
};
835
836
enum htt_rx_pn_check_type {
837
HTT_RX_NON_PN_CHECK = 0,
838
HTT_RX_PN_CHECK
839
};
840
841
enum htt_rx_tkip_demic_type {
842
HTT_RX_NON_TKIP_MIC = 0,
843
HTT_RX_TKIP_MIC
844
};
845
846
enum htt_security_types {
847
HTT_SECURITY_NONE,
848
HTT_SECURITY_WEP128,
849
HTT_SECURITY_WEP104,
850
HTT_SECURITY_WEP40,
851
HTT_SECURITY_TKIP,
852
HTT_SECURITY_TKIP_NOMIC,
853
HTT_SECURITY_AES_CCMP,
854
HTT_SECURITY_WAPI,
855
856
HTT_NUM_SECURITY_TYPES /* keep this last! */
857
};
858
859
#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
860
#define ATH10K_TXRX_NUM_EXT_TIDS 19
861
#define ATH10K_TXRX_NON_QOS_TID 16
862
863
enum htt_security_flags {
864
#define HTT_SECURITY_TYPE_MASK 0x7F
865
#define HTT_SECURITY_TYPE_LSB 0
866
HTT_SECURITY_IS_UNICAST = 1 << 7
867
};
868
869
struct htt_security_indication {
870
union {
871
/* dont use bitfields; undefined behaviour */
872
u8 flags; /* %htt_security_flags */
873
struct {
874
u8 security_type:7, /* %htt_security_types */
875
is_unicast:1;
876
} __packed;
877
} __packed;
878
__le16 peer_id;
879
u8 michael_key[8];
880
u8 wapi_rsc[16];
881
} __packed;
882
883
#define HTT_RX_BA_INFO0_TID_MASK 0x000F
884
#define HTT_RX_BA_INFO0_TID_LSB 0
885
#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
886
#define HTT_RX_BA_INFO0_PEER_ID_LSB 4
887
888
struct htt_rx_addba {
889
u8 window_size;
890
__le16 info0; /* %HTT_RX_BA_INFO0_ */
891
} __packed;
892
893
struct htt_rx_delba {
894
u8 rsvd0;
895
__le16 info0; /* %HTT_RX_BA_INFO0_ */
896
} __packed;
897
898
enum htt_data_tx_status {
899
HTT_DATA_TX_STATUS_OK = 0,
900
HTT_DATA_TX_STATUS_DISCARD = 1,
901
HTT_DATA_TX_STATUS_NO_ACK = 2,
902
HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */
903
HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
904
};
905
906
enum htt_data_tx_flags {
907
#define HTT_DATA_TX_STATUS_MASK 0x07
908
#define HTT_DATA_TX_STATUS_LSB 0
909
#define HTT_DATA_TX_TID_MASK 0x78
910
#define HTT_DATA_TX_TID_LSB 3
911
HTT_DATA_TX_TID_INVALID = 1 << 7
912
};
913
914
#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
915
916
struct htt_append_retries {
917
__le16 msdu_id;
918
u8 tx_retries;
919
u8 flag;
920
} __packed;
921
922
struct htt_data_tx_completion_ext {
923
struct htt_append_retries a_retries;
924
__le32 t_stamp;
925
__le16 msdus_rssi[];
926
} __packed;
927
928
/**
929
* @brief target -> host TX completion indication message definition
930
*
931
* @details
932
* The following diagram shows the format of the TX completion indication sent
933
* from the target to the host
934
*
935
* |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0|
936
* |-------------------------------------------------------------|
937
* header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type |
938
* |-------------------------------------------------------------|
939
* payload: | MSDU1 ID | MSDU0 ID |
940
* |-------------------------------------------------------------|
941
* : MSDU3 ID : MSDU2 ID :
942
* |-------------------------------------------------------------|
943
* | struct htt_tx_compl_ind_append_retries |
944
* |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
945
* | struct htt_tx_compl_ind_append_tx_tstamp |
946
* |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
947
* | MSDU1 ACK RSSI | MSDU0 ACK RSSI |
948
* |-------------------------------------------------------------|
949
* : MSDU3 ACK RSSI : MSDU2 ACK RSSI :
950
* |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
951
* -msg_type
952
* Bits 7:0
953
* Purpose: identifies this as HTT TX completion indication
954
* -status
955
* Bits 10:8
956
* Purpose: the TX completion status of payload fragmentations descriptors
957
* Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD
958
* -tid
959
* Bits 14:11
960
* Purpose: the tid associated with those fragmentation descriptors. It is
961
* valid or not, depending on the tid_invalid bit.
962
* Value: 0 to 15
963
* -tid_invalid
964
* Bits 15:15
965
* Purpose: this bit indicates whether the tid field is valid or not
966
* Value: 0 indicates valid, 1 indicates invalid
967
* -num
968
* Bits 23:16
969
* Purpose: the number of payload in this indication
970
* Value: 1 to 255
971
* -A0 = append
972
* Bits 24:24
973
* Purpose: append the struct htt_tx_compl_ind_append_retries which contains
974
* the number of tx retries for one MSDU at the end of this message
975
* Value: 0 indicates no appending, 1 indicates appending
976
* -A1 = append1
977
* Bits 25:25
978
* Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which
979
* contains the timestamp info for each TX msdu id in payload.
980
* Value: 0 indicates no appending, 1 indicates appending
981
* -TP = MSDU tx power presence
982
* Bits 26:26
983
* Purpose: Indicate whether the TX_COMPL_IND includes a tx power report
984
* for each MSDU referenced by the TX_COMPL_IND message.
985
* The order of the per-MSDU tx power reports matches the order
986
* of the MSDU IDs.
987
* Value: 0 indicates not appending, 1 indicates appending
988
* -A2 = append2
989
* Bits 27:27
990
* Purpose: Indicate whether data ACK RSSI is appended for each MSDU in
991
* TX_COMP_IND message. The order of the per-MSDU ACK RSSI report
992
* matches the order of the MSDU IDs.
993
* The ACK RSSI values are valid when status is COMPLETE_OK (and
994
* this append2 bit is set).
995
* Value: 0 indicates not appending, 1 indicates appending
996
*/
997
998
struct htt_data_tx_completion {
999
union {
1000
u8 flags;
1001
struct {
1002
u8 status:3,
1003
tid:4,
1004
tid_invalid:1;
1005
} __packed;
1006
} __packed;
1007
u8 num_msdus;
1008
u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
1009
__le16 msdus[]; /* variable length based on %num_msdus */
1010
} __packed;
1011
1012
#define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0)
1013
#define HTT_TX_PPDU_DUR_INFO0_TID_MASK GENMASK(20, 16)
1014
1015
struct htt_data_tx_ppdu_dur {
1016
__le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */
1017
__le32 tx_duration; /* in usecs */
1018
} __packed;
1019
1020
#define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK GENMASK(7, 0)
1021
1022
struct htt_data_tx_compl_ppdu_dur {
1023
__le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */
1024
struct htt_data_tx_ppdu_dur ppdu_dur[];
1025
} __packed;
1026
1027
struct htt_tx_compl_ind_base {
1028
u32 hdr;
1029
u16 payload[1/*or more*/];
1030
} __packed;
1031
1032
struct htt_rc_tx_done_params {
1033
u32 rate_code;
1034
u32 rate_code_flags;
1035
u32 flags;
1036
u32 num_enqued; /* 1 for non-AMPDU */
1037
u32 num_retries;
1038
u32 num_failed; /* for AMPDU */
1039
u32 ack_rssi;
1040
u32 time_stamp;
1041
u32 is_probe;
1042
};
1043
1044
struct htt_rc_update {
1045
u8 vdev_id;
1046
__le16 peer_id;
1047
u8 addr[6];
1048
u8 num_elems;
1049
u8 rsvd0;
1050
struct htt_rc_tx_done_params params[]; /* variable length %num_elems */
1051
} __packed;
1052
1053
/* see htt_rx_indication for similar fields and descriptions */
1054
struct htt_rx_fragment_indication {
1055
union {
1056
u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
1057
struct {
1058
u8 ext_tid:5,
1059
flush_valid:1;
1060
} __packed;
1061
} __packed;
1062
__le16 peer_id;
1063
__le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
1064
__le16 fw_rx_desc_bytes;
1065
__le16 rsvd0;
1066
1067
u8 fw_msdu_rx_desc[];
1068
} __packed;
1069
1070
#define ATH10K_IEEE80211_EXTIV BIT(5)
1071
#define ATH10K_IEEE80211_TKIP_MICLEN 8 /* trailing MIC */
1072
1073
#define HTT_RX_FRAG_IND_INFO0_HEADER_LEN 16
1074
1075
#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
1076
#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
1077
#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
1078
#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
1079
1080
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
1081
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
1082
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
1083
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
1084
1085
struct htt_rx_pn_ind {
1086
__le16 peer_id;
1087
u8 tid;
1088
u8 seqno_start;
1089
u8 seqno_end;
1090
u8 pn_ie_count;
1091
u8 reserved;
1092
u8 pn_ies[];
1093
} __packed;
1094
1095
struct htt_rx_offload_msdu {
1096
__le16 msdu_len;
1097
__le16 peer_id;
1098
u8 vdev_id;
1099
u8 tid;
1100
u8 fw_desc;
1101
u8 payload[];
1102
} __packed;
1103
1104
struct htt_rx_offload_ind {
1105
u8 reserved;
1106
__le16 msdu_count;
1107
} __packed;
1108
1109
struct htt_rx_in_ord_msdu_desc {
1110
__le32 msdu_paddr;
1111
__le16 msdu_len;
1112
u8 fw_desc;
1113
u8 reserved;
1114
} __packed;
1115
1116
struct htt_rx_in_ord_msdu_desc_ext {
1117
__le64 msdu_paddr;
1118
__le16 msdu_len;
1119
u8 fw_desc;
1120
u8 reserved;
1121
} __packed;
1122
1123
struct htt_rx_in_ord_ind {
1124
u8 info;
1125
__le16 peer_id;
1126
u8 vdev_id;
1127
u8 reserved;
1128
__le16 msdu_count;
1129
union {
1130
DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc,
1131
msdu_descs32);
1132
DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc_ext,
1133
msdu_descs64);
1134
} __packed;
1135
} __packed;
1136
1137
#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
1138
#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
1139
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
1140
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
1141
#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
1142
#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
1143
1144
/*
1145
* target -> host test message definition
1146
*
1147
* The following field definitions describe the format of the test
1148
* message sent from the target to the host.
1149
* The message consists of a 4-octet header, followed by a variable
1150
* number of 32-bit integer values, followed by a variable number
1151
* of 8-bit character values.
1152
*
1153
* |31 16|15 8|7 0|
1154
* |-----------------------------------------------------------|
1155
* | num chars | num ints | msg type |
1156
* |-----------------------------------------------------------|
1157
* | int 0 |
1158
* |-----------------------------------------------------------|
1159
* | int 1 |
1160
* |-----------------------------------------------------------|
1161
* | ... |
1162
* |-----------------------------------------------------------|
1163
* | char 3 | char 2 | char 1 | char 0 |
1164
* |-----------------------------------------------------------|
1165
* | | | ... | char 4 |
1166
* |-----------------------------------------------------------|
1167
* - MSG_TYPE
1168
* Bits 7:0
1169
* Purpose: identifies this as a test message
1170
* Value: HTT_MSG_TYPE_TEST
1171
* - NUM_INTS
1172
* Bits 15:8
1173
* Purpose: indicate how many 32-bit integers follow the message header
1174
* - NUM_CHARS
1175
* Bits 31:16
1176
* Purpose: indicate how many 8-bit characters follow the series of integers
1177
*/
1178
struct htt_rx_test {
1179
u8 num_ints;
1180
__le16 num_chars;
1181
1182
/* payload consists of 2 lists:
1183
* a) num_ints * sizeof(__le32)
1184
* b) num_chars * sizeof(u8) aligned to 4bytes
1185
*/
1186
u8 payload[];
1187
} __packed;
1188
1189
static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
1190
{
1191
return (__le32 *)rx_test->payload;
1192
}
1193
1194
static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
1195
{
1196
return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
1197
}
1198
1199
/*
1200
* target -> host packet log message
1201
*
1202
* The following field definitions describe the format of the packet log
1203
* message sent from the target to the host.
1204
* The message consists of a 4-octet header,followed by a variable number
1205
* of 32-bit character values.
1206
*
1207
* |31 24|23 16|15 8|7 0|
1208
* |-----------------------------------------------------------|
1209
* | | | | msg type |
1210
* |-----------------------------------------------------------|
1211
* | payload |
1212
* |-----------------------------------------------------------|
1213
* - MSG_TYPE
1214
* Bits 7:0
1215
* Purpose: identifies this as a test message
1216
* Value: HTT_MSG_TYPE_PACKETLOG
1217
*/
1218
struct htt_pktlog_msg {
1219
u8 pad[3];
1220
u8 payload[];
1221
} __packed;
1222
1223
struct htt_dbg_stats_rx_reorder_stats {
1224
/* Non QoS MPDUs received */
1225
__le32 deliver_non_qos;
1226
1227
/* MPDUs received in-order */
1228
__le32 deliver_in_order;
1229
1230
/* Flush due to reorder timer expired */
1231
__le32 deliver_flush_timeout;
1232
1233
/* Flush due to move out of window */
1234
__le32 deliver_flush_oow;
1235
1236
/* Flush due to DELBA */
1237
__le32 deliver_flush_delba;
1238
1239
/* MPDUs dropped due to FCS error */
1240
__le32 fcs_error;
1241
1242
/* MPDUs dropped due to monitor mode non-data packet */
1243
__le32 mgmt_ctrl;
1244
1245
/* MPDUs dropped due to invalid peer */
1246
__le32 invalid_peer;
1247
1248
/* MPDUs dropped due to duplication (non aggregation) */
1249
__le32 dup_non_aggr;
1250
1251
/* MPDUs dropped due to processed before */
1252
__le32 dup_past;
1253
1254
/* MPDUs dropped due to duplicate in reorder queue */
1255
__le32 dup_in_reorder;
1256
1257
/* Reorder timeout happened */
1258
__le32 reorder_timeout;
1259
1260
/* invalid bar ssn */
1261
__le32 invalid_bar_ssn;
1262
1263
/* reorder reset due to bar ssn */
1264
__le32 ssn_reset;
1265
};
1266
1267
struct htt_dbg_stats_wal_tx_stats {
1268
/* Num HTT cookies queued to dispatch list */
1269
__le32 comp_queued;
1270
1271
/* Num HTT cookies dispatched */
1272
__le32 comp_delivered;
1273
1274
/* Num MSDU queued to WAL */
1275
__le32 msdu_enqued;
1276
1277
/* Num MPDU queue to WAL */
1278
__le32 mpdu_enqued;
1279
1280
/* Num MSDUs dropped by WMM limit */
1281
__le32 wmm_drop;
1282
1283
/* Num Local frames queued */
1284
__le32 local_enqued;
1285
1286
/* Num Local frames done */
1287
__le32 local_freed;
1288
1289
/* Num queued to HW */
1290
__le32 hw_queued;
1291
1292
/* Num PPDU reaped from HW */
1293
__le32 hw_reaped;
1294
1295
/* Num underruns */
1296
__le32 underrun;
1297
1298
/* Num PPDUs cleaned up in TX abort */
1299
__le32 tx_abort;
1300
1301
/* Num MPDUs requeued by SW */
1302
__le32 mpdus_requeued;
1303
1304
/* excessive retries */
1305
__le32 tx_ko;
1306
1307
/* data hw rate code */
1308
__le32 data_rc;
1309
1310
/* Scheduler self triggers */
1311
__le32 self_triggers;
1312
1313
/* frames dropped due to excessive sw retries */
1314
__le32 sw_retry_failure;
1315
1316
/* illegal rate phy errors */
1317
__le32 illgl_rate_phy_err;
1318
1319
/* wal pdev continuous xretry */
1320
__le32 pdev_cont_xretry;
1321
1322
/* wal pdev continuous xretry */
1323
__le32 pdev_tx_timeout;
1324
1325
/* wal pdev resets */
1326
__le32 pdev_resets;
1327
1328
__le32 phy_underrun;
1329
1330
/* MPDU is more than txop limit */
1331
__le32 txop_ovf;
1332
} __packed;
1333
1334
struct htt_dbg_stats_wal_rx_stats {
1335
/* Cnts any change in ring routing mid-ppdu */
1336
__le32 mid_ppdu_route_change;
1337
1338
/* Total number of statuses processed */
1339
__le32 status_rcvd;
1340
1341
/* Extra frags on rings 0-3 */
1342
__le32 r0_frags;
1343
__le32 r1_frags;
1344
__le32 r2_frags;
1345
__le32 r3_frags;
1346
1347
/* MSDUs / MPDUs delivered to HTT */
1348
__le32 htt_msdus;
1349
__le32 htt_mpdus;
1350
1351
/* MSDUs / MPDUs delivered to local stack */
1352
__le32 loc_msdus;
1353
__le32 loc_mpdus;
1354
1355
/* AMSDUs that have more MSDUs than the status ring size */
1356
__le32 oversize_amsdu;
1357
1358
/* Number of PHY errors */
1359
__le32 phy_errs;
1360
1361
/* Number of PHY errors drops */
1362
__le32 phy_err_drop;
1363
1364
/* Number of mpdu errors - FCS, MIC, ENC etc. */
1365
__le32 mpdu_errs;
1366
} __packed;
1367
1368
struct htt_dbg_stats_wal_peer_stats {
1369
__le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
1370
} __packed;
1371
1372
struct htt_dbg_stats_wal_pdev_txrx {
1373
struct htt_dbg_stats_wal_tx_stats tx_stats;
1374
struct htt_dbg_stats_wal_rx_stats rx_stats;
1375
struct htt_dbg_stats_wal_peer_stats peer_stats;
1376
} __packed;
1377
1378
struct htt_dbg_stats_rx_rate_info {
1379
__le32 mcs[10];
1380
__le32 sgi[10];
1381
__le32 nss[4];
1382
__le32 stbc[10];
1383
__le32 bw[3];
1384
__le32 pream[6];
1385
__le32 ldpc;
1386
__le32 txbf;
1387
};
1388
1389
/*
1390
* htt_dbg_stats_status -
1391
* present - The requested stats have been delivered in full.
1392
* This indicates that either the stats information was contained
1393
* in its entirety within this message, or else this message
1394
* completes the delivery of the requested stats info that was
1395
* partially delivered through earlier STATS_CONF messages.
1396
* partial - The requested stats have been delivered in part.
1397
* One or more subsequent STATS_CONF messages with the same
1398
* cookie value will be sent to deliver the remainder of the
1399
* information.
1400
* error - The requested stats could not be delivered, for example due
1401
* to a shortage of memory to construct a message holding the
1402
* requested stats.
1403
* invalid - The requested stat type is either not recognized, or the
1404
* target is configured to not gather the stats type in question.
1405
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1406
* series_done - This special value indicates that no further stats info
1407
* elements are present within a series of stats info elems
1408
* (within a stats upload confirmation message).
1409
*/
1410
enum htt_dbg_stats_status {
1411
HTT_DBG_STATS_STATUS_PRESENT = 0,
1412
HTT_DBG_STATS_STATUS_PARTIAL = 1,
1413
HTT_DBG_STATS_STATUS_ERROR = 2,
1414
HTT_DBG_STATS_STATUS_INVALID = 3,
1415
HTT_DBG_STATS_STATUS_SERIES_DONE = 7
1416
};
1417
1418
/*
1419
* host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
1420
*
1421
* The following field definitions describe the format of the HTT host
1422
* to target frag_desc/msdu_ext bank configuration message.
1423
* The message contains the based address and the min and max id of the
1424
* MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
1425
* MSDU_EXT/FRAG_DESC.
1426
* HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
1427
* For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
1428
* the hardware does the mapping/translation.
1429
*
1430
* Total banks that can be configured is configured to 16.
1431
*
1432
* This should be called before any TX has be initiated by the HTT
1433
*
1434
* |31 16|15 8|7 5|4 0|
1435
* |------------------------------------------------------------|
1436
* | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
1437
* |------------------------------------------------------------|
1438
* | BANK0_BASE_ADDRESS |
1439
* |------------------------------------------------------------|
1440
* | ... |
1441
* |------------------------------------------------------------|
1442
* | BANK15_BASE_ADDRESS |
1443
* |------------------------------------------------------------|
1444
* | BANK0_MAX_ID | BANK0_MIN_ID |
1445
* |------------------------------------------------------------|
1446
* | ... |
1447
* |------------------------------------------------------------|
1448
* | BANK15_MAX_ID | BANK15_MIN_ID |
1449
* |------------------------------------------------------------|
1450
* Header fields:
1451
* - MSG_TYPE
1452
* Bits 7:0
1453
* Value: 0x6
1454
* - BANKx_BASE_ADDRESS
1455
* Bits 31:0
1456
* Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
1457
* bank physical/bus address.
1458
* - BANKx_MIN_ID
1459
* Bits 15:0
1460
* Purpose: Provide a mechanism to specify the min index that needs to
1461
* mapped.
1462
* - BANKx_MAX_ID
1463
* Bits 31:16
1464
* Purpose: Provide a mechanism to specify the max index that needs to
1465
*
1466
*/
1467
struct htt_frag_desc_bank_id {
1468
__le16 bank_min_id;
1469
__le16 bank_max_id;
1470
} __packed;
1471
1472
/* real is 16 but it wouldn't fit in the max htt message size
1473
* so we use a conservatively safe value for now
1474
*/
1475
#define HTT_FRAG_DESC_BANK_MAX 4
1476
1477
#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
1478
#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
1479
#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2)
1480
#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3)
1481
#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4)
1482
#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4
1483
1484
enum htt_q_depth_type {
1485
HTT_Q_DEPTH_TYPE_BYTES = 0,
1486
HTT_Q_DEPTH_TYPE_MSDUS = 1,
1487
};
1488
1489
#define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
1490
TARGET_10_4_NUM_VDEVS)
1491
#define HTT_TX_Q_STATE_NUM_TIDS 8
1492
#define HTT_TX_Q_STATE_ENTRY_SIZE 1
1493
#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
1494
1495
/**
1496
* htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
1497
*
1498
* Defines host q state format and behavior. See htt_q_state.
1499
*
1500
* @record_size: Defines the size of each host q entry in bytes. In practice
1501
* however firmware (at least 10.4.3-00191) ignores this host
1502
* configuration value and uses hardcoded value of 1.
1503
* @record_multiplier: This is valid only when q depth type is MSDUs. It
1504
* defines the exponent for the power of 2 multiplication.
1505
*/
1506
struct htt_q_state_conf {
1507
__le32 paddr;
1508
__le16 num_peers;
1509
__le16 num_tids;
1510
u8 record_size;
1511
u8 record_multiplier;
1512
u8 pad[2];
1513
} __packed;
1514
1515
struct htt_frag_desc_bank_cfg32 {
1516
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1517
u8 num_banks;
1518
u8 desc_size;
1519
__le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
1520
struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
1521
struct htt_q_state_conf q_state;
1522
} __packed;
1523
1524
struct htt_frag_desc_bank_cfg64 {
1525
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1526
u8 num_banks;
1527
u8 desc_size;
1528
__le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
1529
struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
1530
struct htt_q_state_conf q_state;
1531
} __packed;
1532
1533
#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
1534
#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
1535
#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
1536
#define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0
1537
#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
1538
1539
/**
1540
* htt_q_state - shared between host and firmware via DMA
1541
*
1542
* This structure is used for the host to expose it's software queue state to
1543
* firmware so that its rate control can schedule fetch requests for optimized
1544
* performance. This is most notably used for MU-MIMO aggregation when multiple
1545
* MU clients are connected.
1546
*
1547
* @count: Each element defines the host queue depth. When q depth type was
1548
* configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
1549
* FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
1550
* HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
1551
* HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
1552
* record_multiplier (see htt_q_state_conf).
1553
* @map: Used by firmware to quickly check which host queues are not empty. It
1554
* is a bitmap simply saying.
1555
* @seq: Used by firmware to quickly check if the host queues were updated
1556
* since it last checked.
1557
*
1558
* FIXME: Is the q_state map[] size calculation really correct?
1559
*/
1560
struct htt_q_state {
1561
u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
1562
u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
1563
__le32 seq;
1564
} __packed;
1565
1566
#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff
1567
#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0
1568
#define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000
1569
#define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12
1570
1571
struct htt_tx_fetch_record {
1572
__le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
1573
__le16 num_msdus;
1574
__le32 num_bytes;
1575
} __packed;
1576
1577
struct htt_tx_fetch_ind {
1578
u8 pad0;
1579
__le16 fetch_seq_num;
1580
__le32 token;
1581
__le16 num_resp_ids;
1582
__le16 num_records;
1583
union {
1584
/* ath10k_htt_get_tx_fetch_ind_resp_ids() */
1585
DECLARE_FLEX_ARRAY(__le32, resp_ids);
1586
DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records);
1587
} __packed;
1588
} __packed;
1589
1590
static inline void *
1591
ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
1592
{
1593
return (void *)&ind->records[le16_to_cpu(ind->num_records)];
1594
}
1595
1596
struct htt_tx_fetch_resp {
1597
u8 pad0;
1598
__le16 resp_id;
1599
__le16 fetch_seq_num;
1600
__le16 num_records;
1601
__le32 token;
1602
struct htt_tx_fetch_record records[];
1603
} __packed;
1604
1605
struct htt_tx_fetch_confirm {
1606
u8 pad0;
1607
__le16 num_resp_ids;
1608
__le32 resp_ids[];
1609
} __packed;
1610
1611
enum htt_tx_mode_switch_mode {
1612
HTT_TX_MODE_SWITCH_PUSH = 0,
1613
HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
1614
};
1615
1616
#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0)
1617
#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe
1618
#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1
1619
1620
#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003
1621
#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0
1622
#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc
1623
#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2
1624
1625
#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff
1626
#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0
1627
#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000
1628
#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12
1629
1630
struct htt_tx_mode_switch_record {
1631
__le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
1632
__le16 num_max_msdus;
1633
} __packed;
1634
1635
struct htt_tx_mode_switch_ind {
1636
u8 pad0;
1637
__le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
1638
__le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
1639
u8 pad1[2];
1640
struct htt_tx_mode_switch_record records[];
1641
} __packed;
1642
1643
struct htt_channel_change {
1644
u8 pad[3];
1645
__le32 freq;
1646
__le32 center_freq1;
1647
__le32 center_freq2;
1648
__le32 phymode;
1649
} __packed;
1650
1651
struct htt_per_peer_tx_stats_ind {
1652
__le32 succ_bytes;
1653
__le32 retry_bytes;
1654
__le32 failed_bytes;
1655
u8 ratecode;
1656
u8 flags;
1657
__le16 peer_id;
1658
__le16 succ_pkts;
1659
__le16 retry_pkts;
1660
__le16 failed_pkts;
1661
__le16 tx_duration;
1662
__le32 reserved1;
1663
__le32 reserved2;
1664
} __packed;
1665
1666
struct htt_peer_tx_stats {
1667
u8 num_ppdu;
1668
u8 ppdu_len;
1669
u8 version;
1670
u8 payload[];
1671
} __packed;
1672
1673
#define ATH10K_10_2_TX_STATS_OFFSET 136
1674
#define PEER_STATS_FOR_NO_OF_PPDUS 4
1675
1676
struct ath10k_10_2_peer_tx_stats {
1677
u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
1678
u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
1679
__le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
1680
u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
1681
__le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
1682
u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
1683
__le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
1684
u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
1685
__le32 tx_duration;
1686
u8 tx_ppdu_cnt;
1687
u8 peer_id;
1688
} __packed;
1689
1690
union htt_rx_pn_t {
1691
/* WEP: 24-bit PN */
1692
u32 pn24;
1693
1694
/* TKIP or CCMP: 48-bit PN */
1695
u64 pn48;
1696
1697
/* WAPI: 128-bit PN */
1698
u64 pn128[2];
1699
};
1700
1701
struct htt_cmd {
1702
struct htt_cmd_hdr hdr;
1703
union {
1704
struct htt_ver_req ver_req;
1705
struct htt_mgmt_tx_desc mgmt_tx;
1706
struct htt_data_tx_desc data_tx;
1707
struct htt_rx_ring_setup_32 rx_setup_32;
1708
struct htt_rx_ring_setup_64 rx_setup_64;
1709
struct htt_stats_req stats_req;
1710
struct htt_oob_sync_req oob_sync_req;
1711
struct htt_aggr_conf aggr_conf;
1712
struct htt_aggr_conf_v2 aggr_conf_v2;
1713
struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
1714
struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
1715
struct htt_tx_fetch_resp tx_fetch_resp;
1716
};
1717
} __packed;
1718
1719
struct htt_resp {
1720
struct htt_resp_hdr hdr;
1721
union {
1722
struct htt_ver_resp ver_resp;
1723
struct htt_mgmt_tx_completion mgmt_tx_completion;
1724
struct htt_data_tx_completion data_tx_completion;
1725
struct htt_rx_indication rx_ind;
1726
struct htt_rx_indication_hl rx_ind_hl;
1727
struct htt_rx_fragment_indication rx_frag_ind;
1728
struct htt_rx_peer_map peer_map;
1729
struct htt_rx_peer_unmap peer_unmap;
1730
struct htt_rx_flush rx_flush;
1731
struct htt_rx_addba rx_addba;
1732
struct htt_rx_delba rx_delba;
1733
struct htt_security_indication security_indication;
1734
struct htt_rc_update rc_update;
1735
struct htt_rx_test rx_test;
1736
struct htt_pktlog_msg pktlog_msg;
1737
struct htt_rx_pn_ind rx_pn_ind;
1738
struct htt_rx_offload_ind rx_offload_ind;
1739
struct htt_rx_in_ord_ind rx_in_ord_ind;
1740
struct htt_tx_fetch_ind tx_fetch_ind;
1741
struct htt_tx_fetch_confirm tx_fetch_confirm;
1742
struct htt_tx_mode_switch_ind tx_mode_switch_ind;
1743
struct htt_channel_change chan_change;
1744
struct htt_peer_tx_stats peer_tx_stats;
1745
} __packed;
1746
} __packed;
1747
1748
/*** host side structures follow ***/
1749
1750
struct htt_tx_done {
1751
u16 msdu_id;
1752
u16 status;
1753
u8 ack_rssi;
1754
};
1755
1756
enum htt_tx_compl_state {
1757
HTT_TX_COMPL_STATE_NONE,
1758
HTT_TX_COMPL_STATE_ACK,
1759
HTT_TX_COMPL_STATE_NOACK,
1760
HTT_TX_COMPL_STATE_DISCARD,
1761
};
1762
1763
struct htt_peer_map_event {
1764
u8 vdev_id;
1765
u16 peer_id;
1766
u8 addr[ETH_ALEN];
1767
};
1768
1769
struct htt_peer_unmap_event {
1770
u16 peer_id;
1771
};
1772
1773
struct ath10k_htt_txbuf_32 {
1774
struct htt_data_tx_desc_frag frags[2];
1775
struct ath10k_htc_hdr htc_hdr;
1776
struct htt_cmd_hdr cmd_hdr;
1777
struct htt_data_tx_desc cmd_tx;
1778
} __packed __aligned(4);
1779
1780
struct ath10k_htt_txbuf_64 {
1781
struct htt_data_tx_desc_frag frags[2];
1782
struct ath10k_htc_hdr htc_hdr;
1783
struct htt_cmd_hdr cmd_hdr;
1784
struct htt_data_tx_desc_64 cmd_tx;
1785
} __packed __aligned(4);
1786
1787
struct ath10k_htt {
1788
struct ath10k *ar;
1789
enum ath10k_htc_ep_id eid;
1790
1791
struct sk_buff_head rx_indication_head;
1792
1793
u8 target_version_major;
1794
u8 target_version_minor;
1795
struct completion target_version_received;
1796
u8 max_num_amsdu;
1797
u8 max_num_ampdu;
1798
1799
const enum htt_t2h_msg_type *t2h_msg_types;
1800
u32 t2h_msg_types_max;
1801
1802
struct {
1803
/*
1804
* Ring of network buffer objects - This ring is
1805
* used exclusively by the host SW. This ring
1806
* mirrors the dev_addrs_ring that is shared
1807
* between the host SW and the MAC HW. The host SW
1808
* uses this netbufs ring to locate the network
1809
* buffer objects whose data buffers the HW has
1810
* filled.
1811
*/
1812
struct sk_buff **netbufs_ring;
1813
1814
/* This is used only with firmware supporting IN_ORD_IND.
1815
*
1816
* With Full Rx Reorder the HTT Rx Ring is more of a temporary
1817
* buffer ring from which buffer addresses are copied by the
1818
* firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
1819
* pointing to specific (re-ordered) buffers.
1820
*
1821
* FIXME: With kernel generic hashing functions there's a lot
1822
* of hash collisions for sk_buffs.
1823
*/
1824
bool in_ord_rx;
1825
DECLARE_HASHTABLE(skb_table, 4);
1826
1827
/*
1828
* Ring of buffer addresses -
1829
* This ring holds the "physical" device address of the
1830
* rx buffers the host SW provides for the MAC HW to
1831
* fill.
1832
*/
1833
union {
1834
__le64 *paddrs_ring_64;
1835
__le32 *paddrs_ring_32;
1836
};
1837
1838
/*
1839
* Base address of ring, as a "physical" device address
1840
* rather than a CPU address.
1841
*/
1842
dma_addr_t base_paddr;
1843
1844
/* how many elems in the ring (power of 2) */
1845
int size;
1846
1847
/* size - 1 */
1848
unsigned int size_mask;
1849
1850
/* how many rx buffers to keep in the ring */
1851
int fill_level;
1852
1853
/* how many rx buffers (full+empty) are in the ring */
1854
int fill_cnt;
1855
1856
/*
1857
* alloc_idx - where HTT SW has deposited empty buffers
1858
* This is allocated in consistent mem, so that the FW can
1859
* read this variable, and program the HW's FW_IDX reg with
1860
* the value of this shadow register.
1861
*/
1862
struct {
1863
__le32 *vaddr;
1864
dma_addr_t paddr;
1865
} alloc_idx;
1866
1867
/* where HTT SW has processed bufs filled by rx MAC DMA */
1868
struct {
1869
unsigned int msdu_payld;
1870
} sw_rd_idx;
1871
1872
/*
1873
* refill_retry_timer - timer triggered when the ring is
1874
* not refilled to the level expected
1875
*/
1876
struct timer_list refill_retry_timer;
1877
1878
/* Protects access to all rx ring buffer state variables */
1879
spinlock_t lock;
1880
} rx_ring;
1881
1882
unsigned int prefetch_len;
1883
1884
/* Protects access to pending_tx, num_pending_tx */
1885
spinlock_t tx_lock;
1886
int max_num_pending_tx;
1887
int num_pending_tx;
1888
int num_pending_mgmt_tx;
1889
struct idr pending_tx;
1890
wait_queue_head_t empty_tx_wq;
1891
1892
/* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
1893
DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
1894
1895
/* set if host-fw communication goes haywire
1896
* used to avoid further failures
1897
*/
1898
bool rx_confused;
1899
atomic_t num_mpdus_ready;
1900
1901
/* This is used to group tx/rx completions separately and process them
1902
* in batches to reduce cache stalls
1903
*/
1904
struct sk_buff_head rx_msdus_q;
1905
struct sk_buff_head rx_in_ord_compl_q;
1906
struct sk_buff_head tx_fetch_ind_q;
1907
1908
/* rx_status template */
1909
struct ieee80211_rx_status rx_status;
1910
1911
struct {
1912
dma_addr_t paddr;
1913
union {
1914
struct htt_msdu_ext_desc *vaddr_desc_32;
1915
struct htt_msdu_ext_desc_64 *vaddr_desc_64;
1916
};
1917
size_t size;
1918
} frag_desc;
1919
1920
struct {
1921
dma_addr_t paddr;
1922
union {
1923
struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
1924
struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
1925
};
1926
size_t size;
1927
} txbuf;
1928
1929
struct {
1930
bool enabled;
1931
struct htt_q_state *vaddr;
1932
dma_addr_t paddr;
1933
u16 num_push_allowed;
1934
u16 num_peers;
1935
u16 num_tids;
1936
enum htt_tx_mode_switch_mode mode;
1937
enum htt_q_depth_type type;
1938
} tx_q_state;
1939
1940
bool tx_mem_allocated;
1941
const struct ath10k_htt_tx_ops *tx_ops;
1942
const struct ath10k_htt_rx_ops *rx_ops;
1943
bool disable_tx_comp;
1944
bool bundle_tx;
1945
struct sk_buff_head tx_req_head;
1946
struct sk_buff_head tx_complete_head;
1947
};
1948
1949
struct ath10k_htt_tx_ops {
1950
int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
1951
int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
1952
int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
1953
void (*htt_free_frag_desc)(struct ath10k_htt *htt);
1954
int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
1955
struct sk_buff *msdu);
1956
int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
1957
void (*htt_free_txbuff)(struct ath10k_htt *htt);
1958
int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
1959
u8 max_subfrms_ampdu,
1960
u8 max_subfrms_amsdu);
1961
void (*htt_flush_tx)(struct ath10k_htt *htt);
1962
};
1963
1964
static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
1965
{
1966
if (!htt->tx_ops->htt_send_rx_ring_cfg)
1967
return -EOPNOTSUPP;
1968
1969
return htt->tx_ops->htt_send_rx_ring_cfg(htt);
1970
}
1971
1972
static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
1973
{
1974
if (!htt->tx_ops->htt_send_frag_desc_bank_cfg)
1975
return -EOPNOTSUPP;
1976
1977
return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
1978
}
1979
1980
static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt)
1981
{
1982
if (!htt->tx_ops->htt_alloc_frag_desc)
1983
return -EOPNOTSUPP;
1984
1985
return htt->tx_ops->htt_alloc_frag_desc(htt);
1986
}
1987
1988
static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt)
1989
{
1990
if (htt->tx_ops->htt_free_frag_desc)
1991
htt->tx_ops->htt_free_frag_desc(htt);
1992
}
1993
1994
static inline int ath10k_htt_tx(struct ath10k_htt *htt,
1995
enum ath10k_hw_txrx_mode txmode,
1996
struct sk_buff *msdu)
1997
{
1998
return htt->tx_ops->htt_tx(htt, txmode, msdu);
1999
}
2000
2001
static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
2002
{
2003
if (htt->tx_ops->htt_flush_tx)
2004
htt->tx_ops->htt_flush_tx(htt);
2005
}
2006
2007
static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
2008
{
2009
if (!htt->tx_ops->htt_alloc_txbuff)
2010
return -EOPNOTSUPP;
2011
2012
return htt->tx_ops->htt_alloc_txbuff(htt);
2013
}
2014
2015
static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
2016
{
2017
if (htt->tx_ops->htt_free_txbuff)
2018
htt->tx_ops->htt_free_txbuff(htt);
2019
}
2020
2021
static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
2022
u8 max_subfrms_ampdu,
2023
u8 max_subfrms_amsdu)
2024
2025
{
2026
if (!htt->tx_ops->htt_h2t_aggr_cfg_msg)
2027
return -EOPNOTSUPP;
2028
2029
return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt,
2030
max_subfrms_ampdu,
2031
max_subfrms_amsdu);
2032
}
2033
2034
struct ath10k_htt_rx_ops {
2035
size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
2036
void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
2037
void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
2038
int idx);
2039
void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
2040
void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
2041
bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt,
2042
struct htt_rx_fragment_indication *rx,
2043
struct sk_buff *skb);
2044
};
2045
2046
static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
2047
{
2048
if (!htt->rx_ops->htt_get_rx_ring_size)
2049
return 0;
2050
2051
return htt->rx_ops->htt_get_rx_ring_size(htt);
2052
}
2053
2054
static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt,
2055
void *vaddr)
2056
{
2057
if (htt->rx_ops->htt_config_paddrs_ring)
2058
htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
2059
}
2060
2061
static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt,
2062
dma_addr_t paddr,
2063
int idx)
2064
{
2065
if (htt->rx_ops->htt_set_paddrs_ring)
2066
htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
2067
}
2068
2069
static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt)
2070
{
2071
if (!htt->rx_ops->htt_get_vaddr_ring)
2072
return NULL;
2073
2074
return htt->rx_ops->htt_get_vaddr_ring(htt);
2075
}
2076
2077
static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
2078
{
2079
if (htt->rx_ops->htt_reset_paddrs_ring)
2080
htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
2081
}
2082
2083
static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
2084
struct htt_rx_fragment_indication *rx,
2085
struct sk_buff *skb)
2086
{
2087
if (!htt->rx_ops->htt_rx_proc_rx_frag_ind)
2088
return true;
2089
2090
return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
2091
}
2092
2093
/* the driver strongly assumes that the rx header status be 64 bytes long,
2094
* so all possible rx_desc structures must respect this assumption.
2095
*/
2096
#define RX_HTT_HDR_STATUS_LEN 64
2097
2098
/* The rx descriptor structure layout is programmed via rx ring setup
2099
* so that FW knows how to transfer the rx descriptor to the host.
2100
* Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
2101
* when modifying the structure layout of the rx descriptor beyond what it expects
2102
* (even if it correctly programmed during the rx ring setup).
2103
* Therefore we must keep two different memory layouts, abstract the rx descriptor
2104
* representation and use ath10k_rx_desc_ops
2105
* for correctly accessing rx descriptor data.
2106
*/
2107
2108
/* base struct used for abstracting the rx descritor representation */
2109
struct htt_rx_desc {
2110
union {
2111
/* This field is filled on the host using the msdu buffer
2112
* from htt_rx_indication
2113
*/
2114
struct fw_rx_desc_base fw_desc;
2115
u32 pad;
2116
} __packed;
2117
} __packed;
2118
2119
/* rx descriptor for wcn3990 and possibly extensible for newer cards
2120
* Buffers like this are placed on the rx ring.
2121
*/
2122
struct htt_rx_desc_v2 {
2123
struct htt_rx_desc base;
2124
struct {
2125
struct rx_attention attention;
2126
struct rx_frag_info frag_info;
2127
struct rx_mpdu_start mpdu_start;
2128
struct rx_msdu_start msdu_start;
2129
struct rx_msdu_end msdu_end;
2130
struct rx_mpdu_end mpdu_end;
2131
struct rx_ppdu_start ppdu_start;
2132
struct rx_ppdu_end ppdu_end;
2133
} __packed;
2134
u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
2135
u8 msdu_payload[];
2136
};
2137
2138
/* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
2139
* works correctly. We keep a single rx descriptor for all these three
2140
* families of cards because from tests it seems to be the most stable solution,
2141
* e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
2142
* during some tests.
2143
* Buffers like this are placed on the rx ring.
2144
*/
2145
struct htt_rx_desc_v1 {
2146
struct htt_rx_desc base;
2147
struct {
2148
struct rx_attention attention;
2149
struct rx_frag_info_v1 frag_info;
2150
struct rx_mpdu_start mpdu_start;
2151
struct rx_msdu_start_v1 msdu_start;
2152
struct rx_msdu_end_v1 msdu_end;
2153
struct rx_mpdu_end mpdu_end;
2154
struct rx_ppdu_start ppdu_start;
2155
struct rx_ppdu_end_v1 ppdu_end;
2156
} __packed;
2157
u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
2158
u8 msdu_payload[];
2159
};
2160
2161
/* rx_desc abstraction */
2162
struct ath10k_htt_rx_desc_ops {
2163
/* These fields are mandatory, they must be specified in any instance */
2164
2165
/* sizeof() of the rx_desc structure used by this hw */
2166
size_t rx_desc_size;
2167
2168
/* offset of msdu_payload inside the rx_desc structure used by this hw */
2169
size_t rx_desc_msdu_payload_offset;
2170
2171
/* These fields are options.
2172
* When a field is not provided the default implementation gets used
2173
* (see the ath10k_rx_desc_* operations below for more info about the defaults)
2174
*/
2175
bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
2176
int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
2177
2178
/* Safely cast from a void* buffer containing an rx descriptor
2179
* to the proper rx_desc structure
2180
*/
2181
struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
2182
2183
void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
2184
struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
2185
struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
2186
struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
2187
struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
2188
struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
2189
struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
2190
struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
2191
struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
2192
u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
2193
u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
2194
};
2195
2196
extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
2197
extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
2198
extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
2199
2200
static inline int
2201
ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2202
{
2203
if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
2204
return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
2205
return 0;
2206
}
2207
2208
static inline bool
2209
ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2210
{
2211
if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
2212
return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
2213
return false;
2214
}
2215
2216
/* The default implementation of all these getters is using the old rx_desc,
2217
* so that it is easier to define the ath10k_htt_rx_desc_ops instances.
2218
* But probably, if new wireless cards must be supported, it would be better
2219
* to switch the default implementation to the new rx_desc, since this would
2220
* make the extension easier .
2221
*/
2222
static inline struct htt_rx_desc *
2223
ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff)
2224
{
2225
if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
2226
return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
2227
return &((struct htt_rx_desc_v1 *)buff)->base;
2228
}
2229
2230
static inline void
2231
ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
2232
struct htt_rx_ring_rx_desc_offsets *off)
2233
{
2234
if (hw->rx_desc_ops->rx_desc_get_offsets) {
2235
hw->rx_desc_ops->rx_desc_get_offsets(off);
2236
} else {
2237
#define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4)
2238
off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
2239
off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
2240
off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
2241
off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
2242
off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
2243
off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
2244
off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
2245
off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
2246
off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
2247
off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
2248
#undef desc_offset
2249
}
2250
}
2251
2252
static inline struct rx_attention *
2253
ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2254
{
2255
struct htt_rx_desc_v1 *rx_desc;
2256
2257
if (hw->rx_desc_ops->rx_desc_get_attention)
2258
return hw->rx_desc_ops->rx_desc_get_attention(rxd);
2259
2260
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2261
return &rx_desc->attention;
2262
}
2263
2264
static inline struct rx_frag_info_common *
2265
ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2266
{
2267
struct htt_rx_desc_v1 *rx_desc;
2268
2269
if (hw->rx_desc_ops->rx_desc_get_frag_info)
2270
return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
2271
2272
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2273
return &rx_desc->frag_info.common;
2274
}
2275
2276
static inline struct rx_mpdu_start *
2277
ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2278
{
2279
struct htt_rx_desc_v1 *rx_desc;
2280
2281
if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
2282
return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
2283
2284
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2285
return &rx_desc->mpdu_start;
2286
}
2287
2288
static inline struct rx_mpdu_end *
2289
ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2290
{
2291
struct htt_rx_desc_v1 *rx_desc;
2292
2293
if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
2294
return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
2295
2296
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2297
return &rx_desc->mpdu_end;
2298
}
2299
2300
static inline struct rx_msdu_start_common *
2301
ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2302
{
2303
struct htt_rx_desc_v1 *rx_desc;
2304
2305
if (hw->rx_desc_ops->rx_desc_get_msdu_start)
2306
return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
2307
2308
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2309
return &rx_desc->msdu_start.common;
2310
}
2311
2312
static inline struct rx_msdu_end_common *
2313
ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2314
{
2315
struct htt_rx_desc_v1 *rx_desc;
2316
2317
if (hw->rx_desc_ops->rx_desc_get_msdu_end)
2318
return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
2319
2320
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2321
return &rx_desc->msdu_end.common;
2322
}
2323
2324
static inline struct rx_ppdu_start *
2325
ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2326
{
2327
struct htt_rx_desc_v1 *rx_desc;
2328
2329
if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
2330
return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
2331
2332
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2333
return &rx_desc->ppdu_start;
2334
}
2335
2336
static inline struct rx_ppdu_end_common *
2337
ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2338
{
2339
struct htt_rx_desc_v1 *rx_desc;
2340
2341
if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
2342
return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
2343
2344
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2345
return &rx_desc->ppdu_end.common;
2346
}
2347
2348
static inline u8 *
2349
ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2350
{
2351
struct htt_rx_desc_v1 *rx_desc;
2352
2353
if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
2354
return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
2355
2356
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2357
return rx_desc->rx_hdr_status;
2358
}
2359
2360
static inline u8 *
2361
ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2362
{
2363
struct htt_rx_desc_v1 *rx_desc;
2364
2365
if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
2366
return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
2367
2368
rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2369
return rx_desc->msdu_payload;
2370
}
2371
2372
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
2373
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
2374
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
2375
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12
2376
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
2377
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13
2378
#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00010000
2379
#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 16
2380
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000
2381
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17
2382
2383
struct htt_rx_desc_base_hl {
2384
__le32 info; /* HTT_RX_DESC_HL_INFO_ */
2385
};
2386
2387
struct htt_rx_chan_info {
2388
__le16 primary_chan_center_freq_mhz;
2389
__le16 contig_chan1_center_freq_mhz;
2390
__le16 contig_chan2_center_freq_mhz;
2391
u8 phy_mode;
2392
u8 reserved;
2393
} __packed;
2394
2395
#define HTT_RX_DESC_ALIGN 8
2396
2397
#define HTT_MAC_ADDR_LEN 6
2398
2399
/*
2400
* FIX THIS
2401
* Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
2402
* rounded up to a cache line size.
2403
*/
2404
#define HTT_RX_BUF_SIZE 2048
2405
2406
/* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
2407
* because it depends on the underlying device rx_desc representation
2408
*/
2409
static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
2410
{
2411
return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
2412
}
2413
2414
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
2415
* aggregated traffic more nicely.
2416
*/
2417
#define ATH10K_HTT_MAX_NUM_REFILL 100
2418
2419
/*
2420
* DMA_MAP expects the buffer to be an integral number of cache lines.
2421
* Rather than checking the actual cache line size, this code makes a
2422
* conservative estimate of what the cache line size could be.
2423
*/
2424
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
2425
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
2426
2427
/* These values are default in most firmware revisions and apparently are a
2428
* sweet spot performance wise.
2429
*/
2430
#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
2431
#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
2432
2433
int ath10k_htt_connect(struct ath10k_htt *htt);
2434
int ath10k_htt_init(struct ath10k *ar);
2435
int ath10k_htt_setup(struct ath10k_htt *htt);
2436
2437
int ath10k_htt_tx_start(struct ath10k_htt *htt);
2438
void ath10k_htt_tx_stop(struct ath10k_htt *htt);
2439
void ath10k_htt_tx_destroy(struct ath10k_htt *htt);
2440
void ath10k_htt_tx_free(struct ath10k_htt *htt);
2441
2442
int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
2443
int ath10k_htt_rx_ring_refill(struct ath10k *ar);
2444
void ath10k_htt_rx_free(struct ath10k_htt *htt);
2445
2446
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
2447
void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
2448
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
2449
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
2450
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
2451
u64 cookie);
2452
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
2453
int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
2454
__le32 token,
2455
__le16 fetch_seq_num,
2456
struct htt_tx_fetch_record *records,
2457
size_t num_records);
2458
void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
2459
2460
void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
2461
struct ieee80211_txq *txq);
2462
void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
2463
struct ieee80211_txq *txq);
2464
void ath10k_htt_tx_txq_sync(struct ath10k *ar);
2465
void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
2466
int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
2467
void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
2468
int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
2469
bool is_presp);
2470
2471
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
2472
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
2473
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
2474
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2475
struct sk_buff *skb);
2476
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
2477
int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
2478
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
2479
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
2480
#endif
2481
2482