Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/mediatek/mt76/agg-rx.c
105380 views
1
// SPDX-License-Identifier: BSD-3-Clause-Clear
2
/*
3
* Copyright (C) 2018 Felix Fietkau <[email protected]>
4
*/
5
#include "mt76.h"
6
7
static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
8
{
9
/* Currently voice traffic (AC_VO) always runs without aggregation,
10
* no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
11
* for non AC_BK/AC_BE and set smaller timeout for it. */
12
return HZ / (tidno >= 4 ? 25 : 10);
13
}
14
15
static void
16
mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
17
{
18
struct sk_buff *skb;
19
20
tid->head = ieee80211_sn_inc(tid->head);
21
22
skb = tid->reorder_buf[idx];
23
if (!skb)
24
return;
25
26
tid->reorder_buf[idx] = NULL;
27
tid->nframes--;
28
__skb_queue_tail(frames, skb);
29
}
30
31
static void
32
mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
33
struct sk_buff_head *frames,
34
u16 head)
35
{
36
int idx;
37
38
while (ieee80211_sn_less(tid->head, head)) {
39
idx = tid->head % tid->size;
40
mt76_aggr_release(tid, frames, idx);
41
}
42
}
43
44
static void
45
mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
46
{
47
int idx = tid->head % tid->size;
48
49
while (tid->reorder_buf[idx]) {
50
mt76_aggr_release(tid, frames, idx);
51
idx = tid->head % tid->size;
52
}
53
}
54
55
static void
56
mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
57
{
58
struct mt76_rx_status *status;
59
struct sk_buff *skb;
60
int start, idx, nframes;
61
62
if (!tid->nframes)
63
return;
64
65
mt76_rx_aggr_release_head(tid, frames);
66
67
start = tid->head % tid->size;
68
nframes = tid->nframes;
69
70
for (idx = (tid->head + 1) % tid->size;
71
idx != start && nframes;
72
idx = (idx + 1) % tid->size) {
73
skb = tid->reorder_buf[idx];
74
if (!skb)
75
continue;
76
77
nframes--;
78
status = (struct mt76_rx_status *)skb->cb;
79
if (!time_after32(jiffies,
80
status->reorder_time +
81
mt76_aggr_tid_to_timeo(tid->num)))
82
continue;
83
84
mt76_rx_aggr_release_frames(tid, frames, status->seqno);
85
}
86
87
mt76_rx_aggr_release_head(tid, frames);
88
}
89
90
static void
91
mt76_rx_aggr_reorder_work(struct work_struct *work)
92
{
93
struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
94
reorder_work.work);
95
struct mt76_dev *dev = tid->dev;
96
struct sk_buff_head frames;
97
int nframes;
98
99
__skb_queue_head_init(&frames);
100
101
local_bh_disable();
102
rcu_read_lock();
103
104
spin_lock(&tid->lock);
105
mt76_rx_aggr_check_release(tid, &frames);
106
nframes = tid->nframes;
107
spin_unlock(&tid->lock);
108
109
if (nframes)
110
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
111
mt76_aggr_tid_to_timeo(tid->num));
112
mt76_rx_complete(dev, &frames, NULL);
113
114
rcu_read_unlock();
115
local_bh_enable();
116
}
117
118
static void
119
mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
120
{
121
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
122
struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
123
struct mt76_wcid *wcid = status->wcid;
124
struct mt76_rx_tid *tid;
125
u8 tidno;
126
u16 seqno;
127
128
if (!ieee80211_is_ctl(bar->frame_control))
129
return;
130
131
if (!ieee80211_is_back_req(bar->frame_control))
132
return;
133
134
status->qos_ctl = tidno = le16_to_cpu(bar->control) >> 12;
135
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
136
tid = rcu_dereference(wcid->aggr[tidno]);
137
if (!tid)
138
return;
139
140
spin_lock_bh(&tid->lock);
141
if (!tid->stopped) {
142
mt76_rx_aggr_release_frames(tid, frames, seqno);
143
mt76_rx_aggr_release_head(tid, frames);
144
}
145
spin_unlock_bh(&tid->lock);
146
}
147
148
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
149
{
150
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
151
struct mt76_wcid *wcid = status->wcid;
152
struct ieee80211_sta *sta;
153
struct mt76_rx_tid *tid;
154
bool sn_less;
155
u16 seqno, head, size, idx;
156
u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
157
u8 ackp;
158
159
__skb_queue_tail(frames, skb);
160
161
sta = wcid_to_sta(wcid);
162
if (!sta)
163
return;
164
165
if (!status->aggr) {
166
if (!(status->flag & RX_FLAG_8023))
167
mt76_rx_aggr_check_ctl(skb, frames);
168
return;
169
}
170
171
/* not part of a BA session */
172
ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
173
if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
174
return;
175
176
if (wcid->def_wcid)
177
wcid = wcid->def_wcid;
178
tid = rcu_dereference(wcid->aggr[tidno]);
179
if (!tid)
180
return;
181
182
status->flag |= RX_FLAG_DUP_VALIDATED;
183
spin_lock_bh(&tid->lock);
184
185
if (tid->stopped)
186
goto out;
187
188
head = tid->head;
189
seqno = status->seqno;
190
size = tid->size;
191
sn_less = ieee80211_sn_less(seqno, head);
192
193
if (!tid->started) {
194
if (sn_less)
195
goto out;
196
197
tid->started = true;
198
}
199
200
if (sn_less) {
201
__skb_unlink(skb, frames);
202
dev_kfree_skb(skb);
203
goto out;
204
}
205
206
if (seqno == head) {
207
tid->head = ieee80211_sn_inc(head);
208
if (tid->nframes)
209
mt76_rx_aggr_release_head(tid, frames);
210
goto out;
211
}
212
213
__skb_unlink(skb, frames);
214
215
/*
216
* Frame sequence number exceeds buffering window, free up some space
217
* by releasing previous frames
218
*/
219
if (!ieee80211_sn_less(seqno, head + size)) {
220
head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
221
mt76_rx_aggr_release_frames(tid, frames, head);
222
}
223
224
idx = seqno % size;
225
226
/* Discard if the current slot is already in use */
227
if (tid->reorder_buf[idx]) {
228
dev_kfree_skb(skb);
229
goto out;
230
}
231
232
status->reorder_time = jiffies;
233
tid->reorder_buf[idx] = skb;
234
tid->nframes++;
235
mt76_rx_aggr_release_head(tid, frames);
236
237
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
238
mt76_aggr_tid_to_timeo(tid->num));
239
240
out:
241
spin_unlock_bh(&tid->lock);
242
}
243
244
int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
245
u16 ssn, u16 size)
246
{
247
struct mt76_rx_tid *tid;
248
249
mt76_rx_aggr_stop(dev, wcid, tidno);
250
251
tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
252
if (!tid)
253
return -ENOMEM;
254
255
tid->dev = dev;
256
tid->head = ssn;
257
tid->size = size;
258
tid->num = tidno;
259
INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
260
spin_lock_init(&tid->lock);
261
262
rcu_assign_pointer(wcid->aggr[tidno], tid);
263
264
return 0;
265
}
266
EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
267
268
static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
269
{
270
u16 size = tid->size;
271
int i;
272
273
spin_lock_bh(&tid->lock);
274
275
tid->stopped = true;
276
for (i = 0; tid->nframes && i < size; i++) {
277
struct sk_buff *skb = tid->reorder_buf[i];
278
279
if (!skb)
280
continue;
281
282
tid->reorder_buf[i] = NULL;
283
tid->nframes--;
284
dev_kfree_skb(skb);
285
}
286
287
spin_unlock_bh(&tid->lock);
288
289
cancel_delayed_work_sync(&tid->reorder_work);
290
}
291
292
void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
293
{
294
struct mt76_rx_tid *tid = NULL;
295
296
tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
297
lockdep_is_held(&dev->mutex));
298
if (tid) {
299
mt76_rx_aggr_shutdown(dev, tid);
300
kfree_rcu(tid, rcu_head);
301
}
302
}
303
EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
304
305