Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
nu11secur1ty
GitHub Repository: nu11secur1ty/Kali-Linux
Path: blob/master/ALFA-W1F1/RTL8814AU/os_dep/osdep_service.c
1307 views
1
/******************************************************************************
2
*
3
* Copyright(c) 2007 - 2017 Realtek Corporation.
4
*
5
* This program is free software; you can redistribute it and/or modify it
6
* under the terms of version 2 of the GNU General Public License as
7
* published by the Free Software Foundation.
8
*
9
* This program is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
* more details.
13
*
14
*****************************************************************************/
15
16
17
#define _OSDEP_SERVICE_C_
18
19
#include <drv_types.h>
20
21
#define RT_TAG '1178'
22
23
#ifdef DBG_MEMORY_LEAK
24
#ifdef PLATFORM_LINUX
25
atomic_t _malloc_cnt = ATOMIC_INIT(0);
26
atomic_t _malloc_size = ATOMIC_INIT(0);
27
#endif
28
#endif /* DBG_MEMORY_LEAK */
29
30
31
#if defined(PLATFORM_LINUX)
32
/*
33
* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
34
* @return: one of RTW_STATUS_CODE
35
*/
36
inline int RTW_STATUS_CODE(int error_code)
37
{
38
if (error_code >= 0)
39
return _SUCCESS;
40
41
switch (error_code) {
42
/* case -ETIMEDOUT: */
43
/* return RTW_STATUS_TIMEDOUT; */
44
default:
45
return _FAIL;
46
}
47
}
48
#else
49
inline int RTW_STATUS_CODE(int error_code)
50
{
51
return error_code;
52
}
53
#endif
54
55
u32 rtw_atoi(u8 *s)
56
{
57
58
int num = 0, flag = 0;
59
int i;
60
for (i = 0; i <= strlen(s); i++) {
61
if (s[i] >= '0' && s[i] <= '9')
62
num = num * 10 + s[i] - '0';
63
else if (s[0] == '-' && i == 0)
64
flag = 1;
65
else
66
break;
67
}
68
69
if (flag == 1)
70
num = num * -1;
71
72
return num;
73
74
}
75
76
inline void *_rtw_vmalloc(u32 sz)
77
{
78
void *pbuf;
79
#ifdef PLATFORM_LINUX
80
pbuf = vmalloc(sz);
81
#endif
82
#ifdef PLATFORM_FREEBSD
83
pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
84
#endif
85
86
#ifdef PLATFORM_WINDOWS
87
NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
88
#endif
89
90
#ifdef DBG_MEMORY_LEAK
91
#ifdef PLATFORM_LINUX
92
if (pbuf != NULL) {
93
atomic_inc(&_malloc_cnt);
94
atomic_add(sz, &_malloc_size);
95
}
96
#endif
97
#endif /* DBG_MEMORY_LEAK */
98
99
return pbuf;
100
}
101
102
inline void *_rtw_zvmalloc(u32 sz)
103
{
104
void *pbuf;
105
#ifdef PLATFORM_LINUX
106
pbuf = _rtw_vmalloc(sz);
107
if (pbuf != NULL)
108
memset(pbuf, 0, sz);
109
#endif
110
#ifdef PLATFORM_FREEBSD
111
pbuf = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
112
#endif
113
#ifdef PLATFORM_WINDOWS
114
NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
115
if (pbuf != NULL)
116
NdisFillMemory(pbuf, sz, 0);
117
#endif
118
119
return pbuf;
120
}
121
122
inline void _rtw_vmfree(void *pbuf, u32 sz)
123
{
124
#ifdef PLATFORM_LINUX
125
vfree(pbuf);
126
#endif
127
#ifdef PLATFORM_FREEBSD
128
free(pbuf, M_DEVBUF);
129
#endif
130
#ifdef PLATFORM_WINDOWS
131
NdisFreeMemory(pbuf, sz, 0);
132
#endif
133
134
#ifdef DBG_MEMORY_LEAK
135
#ifdef PLATFORM_LINUX
136
atomic_dec(&_malloc_cnt);
137
atomic_sub(sz, &_malloc_size);
138
#endif
139
#endif /* DBG_MEMORY_LEAK */
140
}
141
142
void *_rtw_malloc(u32 sz)
143
{
144
void *pbuf = NULL;
145
146
#ifdef PLATFORM_LINUX
147
#ifdef RTK_DMP_PLATFORM
148
if (sz > 0x4000)
149
pbuf = dvr_malloc(sz);
150
else
151
#endif
152
pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
153
154
#endif
155
#ifdef PLATFORM_FREEBSD
156
pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
157
#endif
158
#ifdef PLATFORM_WINDOWS
159
160
NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
161
162
#endif
163
164
#ifdef DBG_MEMORY_LEAK
165
#ifdef PLATFORM_LINUX
166
if (pbuf != NULL) {
167
atomic_inc(&_malloc_cnt);
168
atomic_add(sz, &_malloc_size);
169
}
170
#endif
171
#endif /* DBG_MEMORY_LEAK */
172
173
return pbuf;
174
175
}
176
177
178
void *_rtw_zmalloc(u32 sz)
179
{
180
#ifdef PLATFORM_FREEBSD
181
return malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
182
#else /* PLATFORM_FREEBSD */
183
void *pbuf = _rtw_malloc(sz);
184
185
if (pbuf != NULL) {
186
187
#ifdef PLATFORM_LINUX
188
memset(pbuf, 0, sz);
189
#endif
190
191
#ifdef PLATFORM_WINDOWS
192
NdisFillMemory(pbuf, sz, 0);
193
#endif
194
195
}
196
197
return pbuf;
198
#endif /* PLATFORM_FREEBSD */
199
}
200
201
void _rtw_mfree(void *pbuf, u32 sz)
202
{
203
204
#ifdef PLATFORM_LINUX
205
#ifdef RTK_DMP_PLATFORM
206
if (sz > 0x4000)
207
dvr_free(pbuf);
208
else
209
#endif
210
kfree(pbuf);
211
212
#endif
213
#ifdef PLATFORM_FREEBSD
214
free(pbuf, M_DEVBUF);
215
#endif
216
#ifdef PLATFORM_WINDOWS
217
218
NdisFreeMemory(pbuf, sz, 0);
219
220
#endif
221
222
#ifdef DBG_MEMORY_LEAK
223
#ifdef PLATFORM_LINUX
224
atomic_dec(&_malloc_cnt);
225
atomic_sub(sz, &_malloc_size);
226
#endif
227
#endif /* DBG_MEMORY_LEAK */
228
229
}
230
231
#ifdef PLATFORM_FREEBSD
232
/* review again */
233
struct sk_buff *dev_alloc_skb(unsigned int size)
234
{
235
struct sk_buff *skb = NULL;
236
u8 *data = NULL;
237
238
/* skb = _rtw_zmalloc(sizeof(struct sk_buff)); */ /* for skb->len, etc. */
239
skb = _rtw_malloc(sizeof(struct sk_buff));
240
if (!skb)
241
goto out;
242
data = _rtw_malloc(size);
243
if (!data)
244
goto nodata;
245
246
skb->head = (unsigned char *)data;
247
skb->data = (unsigned char *)data;
248
skb->tail = (unsigned char *)data;
249
skb->end = (unsigned char *)data + size;
250
skb->len = 0;
251
/* printf("%s()-%d: skb=%p, skb->head = %p\n", __FUNCTION__, __LINE__, skb, skb->head); */
252
253
out:
254
return skb;
255
nodata:
256
_rtw_mfree(skb, sizeof(struct sk_buff));
257
skb = NULL;
258
goto out;
259
260
}
261
262
void dev_kfree_skb_any(struct sk_buff *skb)
263
{
264
/* printf("%s()-%d: skb->head = %p\n", __FUNCTION__, __LINE__, skb->head); */
265
if (skb->head)
266
_rtw_mfree(skb->head, 0);
267
/* printf("%s()-%d: skb = %p\n", __FUNCTION__, __LINE__, skb); */
268
if (skb)
269
_rtw_mfree(skb, 0);
270
}
271
struct sk_buff *skb_clone(const struct sk_buff *skb)
272
{
273
return NULL;
274
}
275
276
#endif /* PLATFORM_FREEBSD */
277
278
inline struct sk_buff *_rtw_skb_alloc(u32 sz)
279
{
280
#ifdef PLATFORM_LINUX
281
return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
282
#endif /* PLATFORM_LINUX */
283
284
#ifdef PLATFORM_FREEBSD
285
return dev_alloc_skb(sz);
286
#endif /* PLATFORM_FREEBSD */
287
}
288
289
inline void _rtw_skb_free(struct sk_buff *skb)
290
{
291
dev_kfree_skb_any(skb);
292
}
293
294
inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
295
{
296
#ifdef PLATFORM_LINUX
297
return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
298
#endif /* PLATFORM_LINUX */
299
300
#ifdef PLATFORM_FREEBSD
301
return NULL;
302
#endif /* PLATFORM_FREEBSD */
303
}
304
305
inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
306
{
307
#ifdef PLATFORM_LINUX
308
return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
309
#endif /* PLATFORM_LINUX */
310
311
#ifdef PLATFORM_FREEBSD
312
return skb_clone(skb);
313
#endif /* PLATFORM_FREEBSD */
314
}
315
inline struct sk_buff *_rtw_pskb_copy(struct sk_buff *skb)
316
{
317
#ifdef PLATFORM_LINUX
318
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
319
return pskb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
320
#else
321
return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
322
#endif
323
#endif /* PLATFORM_LINUX */
324
325
#ifdef PLATFORM_FREEBSD
326
return NULL;
327
#endif /* PLATFORM_FREEBSD */
328
}
329
330
inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
331
{
332
#if defined(PLATFORM_LINUX)
333
skb->dev = ndev;
334
return netif_rx(skb);
335
#elif defined(PLATFORM_FREEBSD)
336
return (*ndev->if_input)(ndev, skb);
337
#else
338
rtw_warn_on(1);
339
return -1;
340
#endif
341
}
342
343
#ifdef CONFIG_RTW_NAPI
344
inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
345
{
346
#if defined(PLATFORM_LINUX)
347
skb->dev = ndev;
348
return netif_receive_skb(skb);
349
#else
350
rtw_warn_on(1);
351
return -1;
352
#endif
353
}
354
355
#ifdef CONFIG_RTW_GRO
356
inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
357
{
358
#if defined(PLATFORM_LINUX)
359
return napi_gro_receive(napi, skb);
360
#else
361
rtw_warn_on(1);
362
return -1;
363
#endif
364
}
365
#endif /* CONFIG_RTW_GRO */
366
#endif /* CONFIG_RTW_NAPI */
367
368
void _rtw_skb_queue_purge(struct sk_buff_head *list)
369
{
370
struct sk_buff *skb;
371
372
while ((skb = skb_dequeue(list)) != NULL)
373
_rtw_skb_free(skb);
374
}
375
376
#ifdef CONFIG_USB_HCI
377
inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
378
{
379
#ifdef PLATFORM_LINUX
380
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
381
return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
382
#else
383
return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
384
#endif
385
#endif /* PLATFORM_LINUX */
386
387
#ifdef PLATFORM_FREEBSD
388
return malloc(size, M_USBDEV, M_NOWAIT | M_ZERO);
389
#endif /* PLATFORM_FREEBSD */
390
}
391
inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
392
{
393
#ifdef PLATFORM_LINUX
394
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
395
usb_free_coherent(dev, size, addr, dma);
396
#else
397
usb_buffer_free(dev, size, addr, dma);
398
#endif
399
#endif /* PLATFORM_LINUX */
400
401
#ifdef PLATFORM_FREEBSD
402
free(addr, M_USBDEV);
403
#endif /* PLATFORM_FREEBSD */
404
}
405
#endif /* CONFIG_USB_HCI */
406
407
#if defined(DBG_MEM_ALLOC)
408
409
struct rtw_mem_stat {
410
ATOMIC_T alloc; /* the memory bytes we allocate currently */
411
ATOMIC_T peak; /* the peak memory bytes we allocate */
412
ATOMIC_T alloc_cnt; /* the alloc count for alloc currently */
413
ATOMIC_T alloc_err_cnt; /* the error times we fail to allocate memory */
414
};
415
416
struct rtw_mem_stat rtw_mem_type_stat[mstat_tf_idx(MSTAT_TYPE_MAX)];
417
#ifdef RTW_MEM_FUNC_STAT
418
struct rtw_mem_stat rtw_mem_func_stat[mstat_ff_idx(MSTAT_FUNC_MAX)];
419
#endif
420
421
char *MSTAT_TYPE_str[] = {
422
"VIR",
423
"PHY",
424
"SKB",
425
"USB",
426
};
427
428
#ifdef RTW_MEM_FUNC_STAT
429
char *MSTAT_FUNC_str[] = {
430
"UNSP",
431
"IO",
432
"TXIO",
433
"RXIO",
434
"TX",
435
"RX",
436
};
437
#endif
438
439
void rtw_mstat_dump(void *sel)
440
{
441
int i;
442
int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
443
#ifdef RTW_MEM_FUNC_STAT
444
int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
445
#endif
446
447
for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
448
value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
449
value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
450
value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
451
value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
452
}
453
454
#ifdef RTW_MEM_FUNC_STAT
455
for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
456
value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
457
value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
458
value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
459
value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
460
}
461
#endif
462
463
RTW_PRINT_SEL(sel, "===================== MSTAT =====================\n");
464
RTW_PRINT_SEL(sel, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
465
RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
466
for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++)
467
RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
468
#ifdef RTW_MEM_FUNC_STAT
469
RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
470
for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++)
471
RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
472
#endif
473
}
474
475
void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
476
{
477
static systime update_time = 0;
478
int peak, alloc;
479
int i;
480
481
/* initialization */
482
if (!update_time) {
483
for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
484
ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
485
ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
486
ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
487
ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
488
}
489
#ifdef RTW_MEM_FUNC_STAT
490
for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
491
ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
492
ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
493
ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
494
ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
495
}
496
#endif
497
}
498
499
switch (status) {
500
case MSTAT_ALLOC_SUCCESS:
501
ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
502
alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
503
peak = ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
504
if (peak < alloc)
505
ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);
506
507
#ifdef RTW_MEM_FUNC_STAT
508
ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
509
alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
510
peak = ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
511
if (peak < alloc)
512
ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
513
#endif
514
break;
515
516
case MSTAT_ALLOC_FAIL:
517
ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));
518
#ifdef RTW_MEM_FUNC_STAT
519
ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
520
#endif
521
break;
522
523
case MSTAT_FREE:
524
ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
525
ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
526
#ifdef RTW_MEM_FUNC_STAT
527
ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
528
ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
529
#endif
530
break;
531
};
532
533
/* if (rtw_get_passing_time_ms(update_time) > 5000) { */
534
/* rtw_mstat_dump(RTW_DBGDUMP); */
535
update_time = rtw_get_current_time();
536
/* } */
537
}
538
539
#ifndef SIZE_MAX
540
#define SIZE_MAX (~(size_t)0)
541
#endif
542
543
struct mstat_sniff_rule {
544
enum mstat_f flags;
545
size_t lb;
546
size_t hb;
547
};
548
549
struct mstat_sniff_rule mstat_sniff_rules[] = {
550
{MSTAT_TYPE_PHY, 4097, SIZE_MAX},
551
};
552
553
int mstat_sniff_rule_num = sizeof(mstat_sniff_rules) / sizeof(struct mstat_sniff_rule);
554
555
bool match_mstat_sniff_rules(const enum mstat_f flags, const size_t size)
556
{
557
int i;
558
for (i = 0; i < mstat_sniff_rule_num; i++) {
559
if (mstat_sniff_rules[i].flags == flags
560
&& mstat_sniff_rules[i].lb <= size
561
&& mstat_sniff_rules[i].hb >= size)
562
return _TRUE;
563
}
564
565
return _FALSE;
566
}
567
568
inline void *dbg_rtw_vmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
569
{
570
void *p;
571
572
if (match_mstat_sniff_rules(flags, sz))
573
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
574
575
p = _rtw_vmalloc((sz));
576
577
rtw_mstat_update(
578
flags
579
, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
580
, sz
581
);
582
583
return p;
584
}
585
586
inline void *dbg_rtw_zvmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
587
{
588
void *p;
589
590
if (match_mstat_sniff_rules(flags, sz))
591
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
592
593
p = _rtw_zvmalloc((sz));
594
595
rtw_mstat_update(
596
flags
597
, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
598
, sz
599
);
600
601
return p;
602
}
603
604
inline void dbg_rtw_vmfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
605
{
606
607
if (match_mstat_sniff_rules(flags, sz))
608
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
609
610
_rtw_vmfree((pbuf), (sz));
611
612
rtw_mstat_update(
613
flags
614
, MSTAT_FREE
615
, sz
616
);
617
}
618
619
inline void *dbg_rtw_malloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
620
{
621
void *p;
622
623
if (match_mstat_sniff_rules(flags, sz))
624
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
625
626
p = _rtw_malloc((sz));
627
628
rtw_mstat_update(
629
flags
630
, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
631
, sz
632
);
633
634
return p;
635
}
636
637
inline void *dbg_rtw_zmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
638
{
639
void *p;
640
641
if (match_mstat_sniff_rules(flags, sz))
642
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
643
644
p = _rtw_zmalloc((sz));
645
646
rtw_mstat_update(
647
flags
648
, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
649
, sz
650
);
651
652
return p;
653
}
654
655
inline void dbg_rtw_mfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
656
{
657
if (match_mstat_sniff_rules(flags, sz))
658
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
659
660
_rtw_mfree((pbuf), (sz));
661
662
rtw_mstat_update(
663
flags
664
, MSTAT_FREE
665
, sz
666
);
667
}
668
669
inline struct sk_buff *dbg_rtw_skb_alloc(unsigned int size, const enum mstat_f flags, const char *func, int line)
670
{
671
struct sk_buff *skb;
672
unsigned int truesize = 0;
673
674
skb = _rtw_skb_alloc(size);
675
676
if (skb)
677
truesize = skb->truesize;
678
679
if (!skb || truesize < size || match_mstat_sniff_rules(flags, truesize))
680
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d), skb:%p, truesize=%u\n", func, line, __FUNCTION__, size, skb, truesize);
681
682
rtw_mstat_update(
683
flags
684
, skb ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
685
, truesize
686
);
687
688
return skb;
689
}
690
691
inline void dbg_rtw_skb_free(struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
692
{
693
unsigned int truesize = skb->truesize;
694
695
if (match_mstat_sniff_rules(flags, truesize))
696
RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
697
698
_rtw_skb_free(skb);
699
700
rtw_mstat_update(
701
flags
702
, MSTAT_FREE
703
, truesize
704
);
705
}
706
707
inline struct sk_buff *dbg_rtw_skb_copy(const struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
708
{
709
struct sk_buff *skb_cp;
710
unsigned int truesize = skb->truesize;
711
unsigned int cp_truesize = 0;
712
713
skb_cp = _rtw_skb_copy(skb);
714
if (skb_cp)
715
cp_truesize = skb_cp->truesize;
716
717
if (!skb_cp || cp_truesize < truesize || match_mstat_sniff_rules(flags, cp_truesize))
718
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cp:%p, cp_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cp, cp_truesize);
719
720
rtw_mstat_update(
721
flags
722
, skb_cp ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
723
, cp_truesize
724
);
725
726
return skb_cp;
727
}
728
729
inline struct sk_buff *dbg_rtw_skb_clone(struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
730
{
731
struct sk_buff *skb_cl;
732
unsigned int truesize = skb->truesize;
733
unsigned int cl_truesize = 0;
734
735
skb_cl = _rtw_skb_clone(skb);
736
if (skb_cl)
737
cl_truesize = skb_cl->truesize;
738
739
if (!skb_cl || cl_truesize < truesize || match_mstat_sniff_rules(flags, cl_truesize))
740
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cl:%p, cl_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cl, cl_truesize);
741
742
rtw_mstat_update(
743
flags
744
, skb_cl ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
745
, cl_truesize
746
);
747
748
return skb_cl;
749
}
750
751
inline int dbg_rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
752
{
753
int ret;
754
unsigned int truesize = skb->truesize;
755
756
if (match_mstat_sniff_rules(flags, truesize))
757
RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
758
759
ret = _rtw_netif_rx(ndev, skb);
760
761
rtw_mstat_update(
762
flags
763
, MSTAT_FREE
764
, truesize
765
);
766
767
return ret;
768
}
769
770
#ifdef CONFIG_RTW_NAPI
771
inline int dbg_rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
772
{
773
int ret;
774
unsigned int truesize = skb->truesize;
775
776
if (match_mstat_sniff_rules(flags, truesize))
777
RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
778
779
ret = _rtw_netif_receive_skb(ndev, skb);
780
781
rtw_mstat_update(
782
flags
783
, MSTAT_FREE
784
, truesize
785
);
786
787
return ret;
788
}
789
790
#ifdef CONFIG_RTW_GRO
791
inline gro_result_t dbg_rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
792
{
793
int ret;
794
unsigned int truesize = skb->truesize;
795
796
if (match_mstat_sniff_rules(flags, truesize))
797
RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
798
799
ret = _rtw_napi_gro_receive(napi, skb);
800
801
rtw_mstat_update(
802
flags
803
, MSTAT_FREE
804
, truesize
805
);
806
807
return ret;
808
}
809
#endif /* CONFIG_RTW_GRO */
810
#endif /* CONFIG_RTW_NAPI */
811
812
inline void dbg_rtw_skb_queue_purge(struct sk_buff_head *list, enum mstat_f flags, const char *func, int line)
813
{
814
struct sk_buff *skb;
815
816
while ((skb = skb_dequeue(list)) != NULL)
817
dbg_rtw_skb_free(skb, flags, func, line);
818
}
819
820
#ifdef CONFIG_USB_HCI
821
inline void *dbg_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma, const enum mstat_f flags, const char *func, int line)
822
{
823
void *p;
824
825
if (match_mstat_sniff_rules(flags, size))
826
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
827
828
p = _rtw_usb_buffer_alloc(dev, size, dma);
829
830
rtw_mstat_update(
831
flags
832
, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
833
, size
834
);
835
836
return p;
837
}
838
839
inline void dbg_rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma, const enum mstat_f flags, const char *func, int line)
840
{
841
842
if (match_mstat_sniff_rules(flags, size))
843
RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
844
845
_rtw_usb_buffer_free(dev, size, addr, dma);
846
847
rtw_mstat_update(
848
flags
849
, MSTAT_FREE
850
, size
851
);
852
}
853
#endif /* CONFIG_USB_HCI */
854
855
#endif /* defined(DBG_MEM_ALLOC) */
856
857
void *rtw_malloc2d(int h, int w, size_t size)
858
{
859
int j;
860
861
void **a = (void **) rtw_zmalloc(h * sizeof(void *) + h * w * size);
862
if (a == NULL) {
863
RTW_INFO("%s: alloc memory fail!\n", __FUNCTION__);
864
return NULL;
865
}
866
867
for (j = 0; j < h; j++)
868
a[j] = ((char *)(a + h)) + j * w * size;
869
870
return a;
871
}
872
873
void rtw_mfree2d(void *pbuf, int h, int w, int size)
874
{
875
rtw_mfree((u8 *)pbuf, h * sizeof(void *) + w * h * size);
876
}
877
878
inline void rtw_os_pkt_free(_pkt *pkt)
879
{
880
#if defined(PLATFORM_LINUX)
881
rtw_skb_free(pkt);
882
#elif defined(PLATFORM_FREEBSD)
883
m_freem(pkt);
884
#else
885
#error "TBD\n"
886
#endif
887
}
888
889
inline _pkt *rtw_os_pkt_copy(_pkt *pkt)
890
{
891
#if defined(PLATFORM_LINUX)
892
return rtw_skb_copy(pkt);
893
#elif defined(PLATFORM_FREEBSD)
894
return m_dup(pkt, M_NOWAIT);
895
#else
896
#error "TBD\n"
897
#endif
898
}
899
900
inline void *rtw_os_pkt_data(_pkt *pkt)
901
{
902
#if defined(PLATFORM_LINUX)
903
return pkt->data;
904
#elif defined(PLATFORM_FREEBSD)
905
return pkt->m_data;
906
#else
907
#error "TBD\n"
908
#endif
909
}
910
911
inline u32 rtw_os_pkt_len(_pkt *pkt)
912
{
913
#if defined(PLATFORM_LINUX)
914
return pkt->len;
915
#elif defined(PLATFORM_FREEBSD)
916
return pkt->m_pkthdr.len;
917
#else
918
#error "TBD\n"
919
#endif
920
}
921
922
void _rtw_memcpy(void *dst, const void *src, u32 sz)
923
{
924
925
#if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
926
927
memcpy(dst, src, sz);
928
929
#endif
930
931
#ifdef PLATFORM_WINDOWS
932
933
NdisMoveMemory(dst, src, sz);
934
935
#endif
936
937
}
938
939
inline void _rtw_memmove(void *dst, const void *src, u32 sz)
940
{
941
#if defined(PLATFORM_LINUX)
942
memmove(dst, src, sz);
943
#else
944
#error "TBD\n"
945
#endif
946
}
947
948
int _rtw_memcmp(const void *dst, const void *src, u32 sz)
949
{
950
951
#if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
952
/* under Linux/GNU/GLibc, the return value of memcmp for two same mem. chunk is 0 */
953
954
if (!(memcmp(dst, src, sz)))
955
return _TRUE;
956
else
957
return _FALSE;
958
#endif
959
960
961
#ifdef PLATFORM_WINDOWS
962
/* under Windows, the return value of NdisEqualMemory for two same mem. chunk is 1 */
963
964
if (NdisEqualMemory(dst, src, sz))
965
return _TRUE;
966
else
967
return _FALSE;
968
969
#endif
970
971
972
973
}
974
975
void _rtw_memset(void *pbuf, int c, u32 sz)
976
{
977
978
#if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
979
980
memset(pbuf, c, sz);
981
982
#endif
983
984
#ifdef PLATFORM_WINDOWS
985
#if 0
986
NdisZeroMemory(pbuf, sz);
987
if (c != 0)
988
memset(pbuf, c, sz);
989
#else
990
NdisFillMemory(pbuf, sz, c);
991
#endif
992
#endif
993
994
}
995
996
#ifdef PLATFORM_FREEBSD
997
static inline void __list_add(_list *pnew, _list *pprev, _list *pnext)
998
{
999
pnext->prev = pnew;
1000
pnew->next = pnext;
1001
pnew->prev = pprev;
1002
pprev->next = pnew;
1003
}
1004
#endif /* PLATFORM_FREEBSD */
1005
1006
1007
void _rtw_init_listhead(_list *list)
1008
{
1009
1010
#ifdef PLATFORM_LINUX
1011
1012
INIT_LIST_HEAD(list);
1013
1014
#endif
1015
1016
#ifdef PLATFORM_FREEBSD
1017
list->next = list;
1018
list->prev = list;
1019
#endif
1020
#ifdef PLATFORM_WINDOWS
1021
1022
NdisInitializeListHead(list);
1023
1024
#endif
1025
1026
}
1027
1028
1029
/*
1030
For the following list_xxx operations,
1031
caller must guarantee the atomic context.
1032
Otherwise, there will be racing condition.
1033
*/
1034
u32 rtw_is_list_empty(_list *phead)
1035
{
1036
1037
#ifdef PLATFORM_LINUX
1038
1039
if (list_empty(phead))
1040
return _TRUE;
1041
else
1042
return _FALSE;
1043
1044
#endif
1045
#ifdef PLATFORM_FREEBSD
1046
1047
if (phead->next == phead)
1048
return _TRUE;
1049
else
1050
return _FALSE;
1051
1052
#endif
1053
1054
1055
#ifdef PLATFORM_WINDOWS
1056
1057
if (IsListEmpty(phead))
1058
return _TRUE;
1059
else
1060
return _FALSE;
1061
1062
#endif
1063
1064
1065
}
1066
1067
void rtw_list_insert_head(_list *plist, _list *phead)
1068
{
1069
1070
#ifdef PLATFORM_LINUX
1071
list_add(plist, phead);
1072
#endif
1073
1074
#ifdef PLATFORM_FREEBSD
1075
__list_add(plist, phead, phead->next);
1076
#endif
1077
1078
#ifdef PLATFORM_WINDOWS
1079
InsertHeadList(phead, plist);
1080
#endif
1081
}
1082
1083
void rtw_list_insert_tail(_list *plist, _list *phead)
1084
{
1085
1086
#ifdef PLATFORM_LINUX
1087
1088
list_add_tail(plist, phead);
1089
1090
#endif
1091
#ifdef PLATFORM_FREEBSD
1092
1093
__list_add(plist, phead->prev, phead);
1094
1095
#endif
1096
#ifdef PLATFORM_WINDOWS
1097
1098
InsertTailList(phead, plist);
1099
1100
#endif
1101
1102
}
1103
1104
inline void rtw_list_splice(_list *list, _list *head)
1105
{
1106
#ifdef PLATFORM_LINUX
1107
list_splice(list, head);
1108
#else
1109
#error "TBD\n"
1110
#endif
1111
}
1112
1113
inline void rtw_list_splice_init(_list *list, _list *head)
1114
{
1115
#ifdef PLATFORM_LINUX
1116
list_splice_init(list, head);
1117
#else
1118
#error "TBD\n"
1119
#endif
1120
}
1121
1122
inline void rtw_list_splice_tail(_list *list, _list *head)
1123
{
1124
#ifdef PLATFORM_LINUX
1125
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
1126
if (!list_empty(list))
1127
__list_splice(list, head);
1128
#else
1129
list_splice_tail(list, head);
1130
#endif
1131
#else
1132
#error "TBD\n"
1133
#endif
1134
}
1135
1136
inline void rtw_hlist_head_init(rtw_hlist_head *h)
1137
{
1138
#ifdef PLATFORM_LINUX
1139
INIT_HLIST_HEAD(h);
1140
#else
1141
#error "TBD\n"
1142
#endif
1143
}
1144
1145
inline void rtw_hlist_add_head(rtw_hlist_node *n, rtw_hlist_head *h)
1146
{
1147
#ifdef PLATFORM_LINUX
1148
hlist_add_head(n, h);
1149
#else
1150
#error "TBD\n"
1151
#endif
1152
}
1153
1154
inline void rtw_hlist_del(rtw_hlist_node *n)
1155
{
1156
#ifdef PLATFORM_LINUX
1157
hlist_del(n);
1158
#else
1159
#error "TBD\n"
1160
#endif
1161
}
1162
1163
inline void rtw_hlist_add_head_rcu(rtw_hlist_node *n, rtw_hlist_head *h)
1164
{
1165
#ifdef PLATFORM_LINUX
1166
hlist_add_head_rcu(n, h);
1167
#else
1168
#error "TBD\n"
1169
#endif
1170
}
1171
1172
inline void rtw_hlist_del_rcu(rtw_hlist_node *n)
1173
{
1174
#ifdef PLATFORM_LINUX
1175
hlist_del_rcu(n);
1176
#else
1177
#error "TBD\n"
1178
#endif
1179
}
1180
1181
void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc, void *ctx)
1182
{
1183
_adapter *adapter = (_adapter *)padapter;
1184
1185
#ifdef PLATFORM_LINUX
1186
_init_timer(ptimer, adapter->pnetdev, pfunc, ctx);
1187
#endif
1188
#ifdef PLATFORM_FREEBSD
1189
_init_timer(ptimer, adapter->pifp, pfunc, ctx);
1190
#endif
1191
#ifdef PLATFORM_WINDOWS
1192
_init_timer(ptimer, adapter->hndis_adapter, pfunc, ctx);
1193
#endif
1194
}
1195
1196
/*
1197
1198
Caller must check if the list is empty before calling rtw_list_delete
1199
1200
*/
1201
1202
1203
void _rtw_init_sema(_sema *sema, int init_val)
1204
{
1205
1206
#ifdef PLATFORM_LINUX
1207
1208
sema_init(sema, init_val);
1209
1210
#endif
1211
#ifdef PLATFORM_FREEBSD
1212
sema_init(sema, init_val, "rtw_drv");
1213
#endif
1214
#ifdef PLATFORM_OS_XP
1215
1216
KeInitializeSemaphore(sema, init_val, SEMA_UPBND); /* count=0; */
1217
1218
#endif
1219
1220
#ifdef PLATFORM_OS_CE
1221
if (*sema == NULL)
1222
*sema = CreateSemaphore(NULL, init_val, SEMA_UPBND, NULL);
1223
#endif
1224
1225
}
1226
1227
void _rtw_free_sema(_sema *sema)
1228
{
1229
#ifdef PLATFORM_FREEBSD
1230
sema_destroy(sema);
1231
#endif
1232
#ifdef PLATFORM_OS_CE
1233
CloseHandle(*sema);
1234
#endif
1235
1236
}
1237
1238
void _rtw_up_sema(_sema *sema)
1239
{
1240
1241
#ifdef PLATFORM_LINUX
1242
1243
up(sema);
1244
1245
#endif
1246
#ifdef PLATFORM_FREEBSD
1247
sema_post(sema);
1248
#endif
1249
#ifdef PLATFORM_OS_XP
1250
1251
KeReleaseSemaphore(sema, IO_NETWORK_INCREMENT, 1, FALSE);
1252
1253
#endif
1254
1255
#ifdef PLATFORM_OS_CE
1256
ReleaseSemaphore(*sema, 1, NULL);
1257
#endif
1258
}
1259
1260
u32 _rtw_down_sema(_sema *sema)
1261
{
1262
1263
#ifdef PLATFORM_LINUX
1264
1265
if (down_interruptible(sema))
1266
return _FAIL;
1267
else
1268
return _SUCCESS;
1269
1270
#endif
1271
#ifdef PLATFORM_FREEBSD
1272
sema_wait(sema);
1273
return _SUCCESS;
1274
#endif
1275
#ifdef PLATFORM_OS_XP
1276
1277
if (STATUS_SUCCESS == KeWaitForSingleObject(sema, Executive, KernelMode, TRUE, NULL))
1278
return _SUCCESS;
1279
else
1280
return _FAIL;
1281
#endif
1282
1283
#ifdef PLATFORM_OS_CE
1284
if (WAIT_OBJECT_0 == WaitForSingleObject(*sema, INFINITE))
1285
return _SUCCESS;
1286
else
1287
return _FAIL;
1288
#endif
1289
}
1290
1291
inline void thread_exit(_completion *comp)
1292
{
1293
#ifdef PLATFORM_LINUX
1294
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
1295
kthread_complete_and_exit(comp, 0);
1296
#else
1297
complete_and_exit(comp, 0);
1298
#endif
1299
#endif
1300
1301
#ifdef PLATFORM_FREEBSD
1302
printf("%s", "RTKTHREAD_exit");
1303
#endif
1304
1305
#ifdef PLATFORM_OS_CE
1306
ExitThread(STATUS_SUCCESS);
1307
#endif
1308
1309
#ifdef PLATFORM_OS_XP
1310
PsTerminateSystemThread(STATUS_SUCCESS);
1311
#endif
1312
}
1313
1314
inline void _rtw_init_completion(_completion *comp)
1315
{
1316
#ifdef PLATFORM_LINUX
1317
init_completion(comp);
1318
#endif
1319
}
1320
inline void _rtw_wait_for_comp_timeout(_completion *comp)
1321
{
1322
#ifdef PLATFORM_LINUX
1323
wait_for_completion_timeout(comp, msecs_to_jiffies(3000));
1324
#endif
1325
}
1326
inline void _rtw_wait_for_comp(_completion *comp)
1327
{
1328
#ifdef PLATFORM_LINUX
1329
wait_for_completion(comp);
1330
#endif
1331
}
1332
1333
void _rtw_mutex_init(_mutex *pmutex)
1334
{
1335
#ifdef PLATFORM_LINUX
1336
1337
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1338
mutex_init(pmutex);
1339
#else
1340
init_MUTEX(pmutex);
1341
#endif
1342
1343
#endif
1344
#ifdef PLATFORM_FREEBSD
1345
mtx_init(pmutex, "", NULL, MTX_DEF | MTX_RECURSE);
1346
#endif
1347
#ifdef PLATFORM_OS_XP
1348
1349
KeInitializeMutex(pmutex, 0);
1350
1351
#endif
1352
1353
#ifdef PLATFORM_OS_CE
1354
*pmutex = CreateMutex(NULL, _FALSE, NULL);
1355
#endif
1356
}
1357
1358
void _rtw_mutex_free(_mutex *pmutex);
1359
void _rtw_mutex_free(_mutex *pmutex)
1360
{
1361
#ifdef PLATFORM_LINUX
1362
1363
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1364
mutex_destroy(pmutex);
1365
#else
1366
#endif
1367
1368
#ifdef PLATFORM_FREEBSD
1369
sema_destroy(pmutex);
1370
#endif
1371
1372
#endif
1373
1374
#ifdef PLATFORM_OS_XP
1375
1376
#endif
1377
1378
#ifdef PLATFORM_OS_CE
1379
1380
#endif
1381
}
1382
1383
void _rtw_spinlock_init(_lock *plock)
1384
{
1385
1386
#ifdef PLATFORM_LINUX
1387
1388
spin_lock_init(plock);
1389
1390
#endif
1391
#ifdef PLATFORM_FREEBSD
1392
mtx_init(plock, "", NULL, MTX_DEF | MTX_RECURSE);
1393
#endif
1394
#ifdef PLATFORM_WINDOWS
1395
1396
NdisAllocateSpinLock(plock);
1397
1398
#endif
1399
1400
}
1401
1402
void _rtw_spinlock_free(_lock *plock)
1403
{
1404
#ifdef PLATFORM_FREEBSD
1405
mtx_destroy(plock);
1406
#endif
1407
1408
#ifdef PLATFORM_WINDOWS
1409
1410
NdisFreeSpinLock(plock);
1411
1412
#endif
1413
1414
}
1415
#ifdef PLATFORM_FREEBSD
1416
extern PADAPTER prtw_lock;
1417
1418
void rtw_mtx_lock(_lock *plock)
1419
{
1420
if (prtw_lock)
1421
mtx_lock(&prtw_lock->glock);
1422
else
1423
printf("%s prtw_lock==NULL", __FUNCTION__);
1424
}
1425
void rtw_mtx_unlock(_lock *plock)
1426
{
1427
if (prtw_lock)
1428
mtx_unlock(&prtw_lock->glock);
1429
else
1430
printf("%s prtw_lock==NULL", __FUNCTION__);
1431
1432
}
1433
#endif /* PLATFORM_FREEBSD */
1434
1435
1436
void _rtw_spinlock(_lock *plock)
1437
{
1438
1439
#ifdef PLATFORM_LINUX
1440
1441
spin_lock(plock);
1442
1443
#endif
1444
#ifdef PLATFORM_FREEBSD
1445
mtx_lock(plock);
1446
#endif
1447
#ifdef PLATFORM_WINDOWS
1448
1449
NdisAcquireSpinLock(plock);
1450
1451
#endif
1452
1453
}
1454
1455
void _rtw_spinunlock(_lock *plock)
1456
{
1457
1458
#ifdef PLATFORM_LINUX
1459
1460
spin_unlock(plock);
1461
1462
#endif
1463
#ifdef PLATFORM_FREEBSD
1464
mtx_unlock(plock);
1465
#endif
1466
#ifdef PLATFORM_WINDOWS
1467
1468
NdisReleaseSpinLock(plock);
1469
1470
#endif
1471
}
1472
1473
1474
void _rtw_spinlock_ex(_lock *plock)
1475
{
1476
1477
#ifdef PLATFORM_LINUX
1478
1479
spin_lock(plock);
1480
1481
#endif
1482
#ifdef PLATFORM_FREEBSD
1483
mtx_lock(plock);
1484
#endif
1485
#ifdef PLATFORM_WINDOWS
1486
1487
NdisDprAcquireSpinLock(plock);
1488
1489
#endif
1490
1491
}
1492
1493
void _rtw_spinunlock_ex(_lock *plock)
1494
{
1495
1496
#ifdef PLATFORM_LINUX
1497
1498
spin_unlock(plock);
1499
1500
#endif
1501
#ifdef PLATFORM_FREEBSD
1502
mtx_unlock(plock);
1503
#endif
1504
#ifdef PLATFORM_WINDOWS
1505
1506
NdisDprReleaseSpinLock(plock);
1507
1508
#endif
1509
}
1510
1511
1512
1513
void _rtw_init_queue(_queue *pqueue)
1514
{
1515
_rtw_init_listhead(&(pqueue->queue));
1516
_rtw_spinlock_init(&(pqueue->lock));
1517
}
1518
1519
void _rtw_deinit_queue(_queue *pqueue)
1520
{
1521
_rtw_spinlock_free(&(pqueue->lock));
1522
}
1523
1524
u32 _rtw_queue_empty(_queue *pqueue)
1525
{
1526
return rtw_is_list_empty(&(pqueue->queue));
1527
}
1528
1529
1530
u32 rtw_end_of_queue_search(_list *head, _list *plist)
1531
{
1532
if (head == plist)
1533
return _TRUE;
1534
else
1535
return _FALSE;
1536
}
1537
1538
1539
systime _rtw_get_current_time(void)
1540
{
1541
1542
#ifdef PLATFORM_LINUX
1543
return jiffies;
1544
#endif
1545
#ifdef PLATFORM_FREEBSD
1546
struct timeval tvp;
1547
getmicrotime(&tvp);
1548
return tvp.tv_sec;
1549
#endif
1550
#ifdef PLATFORM_WINDOWS
1551
LARGE_INTEGER SystemTime;
1552
NdisGetCurrentSystemTime(&SystemTime);
1553
return SystemTime.LowPart;/* count of 100-nanosecond intervals */
1554
#endif
1555
}
1556
1557
inline u32 _rtw_systime_to_ms(systime stime)
1558
{
1559
#ifdef PLATFORM_LINUX
1560
return jiffies_to_msecs(stime);
1561
#endif
1562
#ifdef PLATFORM_FREEBSD
1563
return stime * 1000;
1564
#endif
1565
#ifdef PLATFORM_WINDOWS
1566
return stime / 10000 ;
1567
#endif
1568
}
1569
1570
inline systime _rtw_ms_to_systime(u32 ms)
1571
{
1572
#ifdef PLATFORM_LINUX
1573
return msecs_to_jiffies(ms);
1574
#endif
1575
#ifdef PLATFORM_FREEBSD
1576
return ms / 1000;
1577
#endif
1578
#ifdef PLATFORM_WINDOWS
1579
return ms * 10000 ;
1580
#endif
1581
}
1582
1583
inline systime _rtw_us_to_systime(u32 us)
1584
{
1585
#ifdef PLATFORM_LINUX
1586
return usecs_to_jiffies(us);
1587
#else
1588
#error "TBD\n"
1589
#endif
1590
}
1591
1592
/* the input parameter start use the same unit as returned by rtw_get_current_time */
1593
inline s32 _rtw_get_passing_time_ms(systime start)
1594
{
1595
return _rtw_systime_to_ms(_rtw_get_current_time() - start);
1596
}
1597
1598
inline s32 _rtw_get_remaining_time_ms(systime end)
1599
{
1600
return _rtw_systime_to_ms(end - _rtw_get_current_time());
1601
}
1602
1603
inline s32 _rtw_get_time_interval_ms(systime start, systime end)
1604
{
1605
return _rtw_systime_to_ms(end - start);
1606
}
1607
1608
inline bool _rtw_time_after(systime a, systime b)
1609
{
1610
#ifdef PLATFORM_LINUX
1611
return time_after(a, b);
1612
#else
1613
#error "TBD\n"
1614
#endif
1615
}
1616
1617
void rtw_sleep_schedulable(int ms)
1618
{
1619
1620
#ifdef PLATFORM_LINUX
1621
1622
u32 delta;
1623
1624
delta = (ms * HZ) / 1000; /* (ms) */
1625
if (delta == 0) {
1626
delta = 1;/* 1 ms */
1627
}
1628
set_current_state(TASK_INTERRUPTIBLE);
1629
schedule_timeout(delta);
1630
return;
1631
1632
#endif
1633
#ifdef PLATFORM_FREEBSD
1634
DELAY(ms * 1000);
1635
return ;
1636
#endif
1637
1638
#ifdef PLATFORM_WINDOWS
1639
1640
NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
1641
1642
#endif
1643
1644
}
1645
1646
1647
void rtw_msleep_os(int ms)
1648
{
1649
1650
#ifdef PLATFORM_LINUX
1651
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
1652
if (ms < 20) {
1653
unsigned long us = ms * 1000UL;
1654
usleep_range(us, us + 1000UL);
1655
} else
1656
#endif
1657
msleep((unsigned int)ms);
1658
1659
#endif
1660
#ifdef PLATFORM_FREEBSD
1661
/* Delay for delay microseconds */
1662
DELAY(ms * 1000);
1663
return ;
1664
#endif
1665
#ifdef PLATFORM_WINDOWS
1666
1667
NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
1668
1669
#endif
1670
1671
1672
}
1673
void rtw_usleep_os(int us)
1674
{
1675
#ifdef PLATFORM_LINUX
1676
1677
/* msleep((unsigned int)us); */
1678
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
1679
usleep_range(us, us + 1);
1680
#else
1681
if (1 < (us / 1000))
1682
msleep(1);
1683
else
1684
msleep((us / 1000) + 1);
1685
#endif
1686
#endif
1687
1688
#ifdef PLATFORM_FREEBSD
1689
/* Delay for delay microseconds */
1690
DELAY(us);
1691
1692
return ;
1693
#endif
1694
#ifdef PLATFORM_WINDOWS
1695
1696
NdisMSleep(us); /* (us) */
1697
1698
#endif
1699
1700
1701
}
1702
1703
1704
#ifdef DBG_DELAY_OS
1705
void _rtw_mdelay_os(int ms, const char *func, const int line)
1706
{
1707
#if 0
1708
if (ms > 10)
1709
RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
1710
rtw_msleep_os(ms);
1711
return;
1712
#endif
1713
1714
1715
RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
1716
1717
#if defined(PLATFORM_LINUX)
1718
1719
mdelay((unsigned long)ms);
1720
1721
#elif defined(PLATFORM_WINDOWS)
1722
1723
NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
1724
1725
#endif
1726
1727
1728
}
1729
void _rtw_udelay_os(int us, const char *func, const int line)
1730
{
1731
1732
#if 0
1733
if (us > 1000) {
1734
RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
1735
rtw_usleep_os(us);
1736
return;
1737
}
1738
#endif
1739
1740
1741
RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
1742
1743
1744
#if defined(PLATFORM_LINUX)
1745
1746
udelay((unsigned long)us);
1747
1748
#elif defined(PLATFORM_WINDOWS)
1749
1750
NdisStallExecution(us); /* (us) */
1751
1752
#endif
1753
1754
}
1755
#else
1756
void rtw_mdelay_os(int ms)
1757
{
1758
1759
#ifdef PLATFORM_LINUX
1760
1761
mdelay((unsigned long)ms);
1762
1763
#endif
1764
#ifdef PLATFORM_FREEBSD
1765
DELAY(ms * 1000);
1766
return ;
1767
#endif
1768
#ifdef PLATFORM_WINDOWS
1769
1770
NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
1771
1772
#endif
1773
1774
1775
}
1776
void rtw_udelay_os(int us)
1777
{
1778
1779
#ifdef PLATFORM_LINUX
1780
1781
udelay((unsigned long)us);
1782
1783
#endif
1784
#ifdef PLATFORM_FREEBSD
1785
/* Delay for delay microseconds */
1786
DELAY(us);
1787
return ;
1788
#endif
1789
#ifdef PLATFORM_WINDOWS
1790
1791
NdisStallExecution(us); /* (us) */
1792
1793
#endif
1794
1795
}
1796
#endif
1797
1798
void rtw_yield_os(void)
1799
{
1800
#ifdef PLATFORM_LINUX
1801
yield();
1802
#endif
1803
#ifdef PLATFORM_FREEBSD
1804
yield();
1805
#endif
1806
#ifdef PLATFORM_WINDOWS
1807
SwitchToThread();
1808
#endif
1809
}
1810
1811
bool rtw_macaddr_is_larger(const u8 *a, const u8 *b)
1812
{
1813
u32 va, vb;
1814
1815
va = be32_to_cpu(*((u32 *)a));
1816
vb = be32_to_cpu(*((u32 *)b));
1817
if (va > vb)
1818
return 1;
1819
else if (va < vb)
1820
return 0;
1821
1822
return be16_to_cpu(*((u16 *)(a + 4))) > be16_to_cpu(*((u16 *)(b + 4)));
1823
}
1824
1825
#define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
1826
#define RTW_SUSPEND_TRAFFIC_LOCK_NAME "rtw_wifi_traffic"
1827
#define RTW_SUSPEND_RESUME_LOCK_NAME "rtw_wifi_resume"
1828
#ifdef CONFIG_WAKELOCK
1829
static struct wake_lock rtw_suspend_lock;
1830
static struct wake_lock rtw_suspend_traffic_lock;
1831
static struct wake_lock rtw_suspend_resume_lock;
1832
#elif defined(CONFIG_ANDROID_POWER)
1833
static android_suspend_lock_t rtw_suspend_lock = {
1834
.name = RTW_SUSPEND_LOCK_NAME
1835
};
1836
static android_suspend_lock_t rtw_suspend_traffic_lock = {
1837
.name = RTW_SUSPEND_TRAFFIC_LOCK_NAME
1838
};
1839
static android_suspend_lock_t rtw_suspend_resume_lock = {
1840
.name = RTW_SUSPEND_RESUME_LOCK_NAME
1841
};
1842
#endif
1843
1844
inline void rtw_suspend_lock_init(void)
1845
{
1846
#ifdef CONFIG_WAKELOCK
1847
wake_lock_init(&rtw_suspend_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_LOCK_NAME);
1848
wake_lock_init(&rtw_suspend_traffic_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_TRAFFIC_LOCK_NAME);
1849
wake_lock_init(&rtw_suspend_resume_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_RESUME_LOCK_NAME);
1850
#elif defined(CONFIG_ANDROID_POWER)
1851
android_init_suspend_lock(&rtw_suspend_lock);
1852
android_init_suspend_lock(&rtw_suspend_traffic_lock);
1853
android_init_suspend_lock(&rtw_suspend_resume_lock);
1854
#endif
1855
}
1856
1857
inline void rtw_suspend_lock_uninit(void)
1858
{
1859
#ifdef CONFIG_WAKELOCK
1860
wake_lock_destroy(&rtw_suspend_lock);
1861
wake_lock_destroy(&rtw_suspend_traffic_lock);
1862
wake_lock_destroy(&rtw_suspend_resume_lock);
1863
#elif defined(CONFIG_ANDROID_POWER)
1864
android_uninit_suspend_lock(&rtw_suspend_lock);
1865
android_uninit_suspend_lock(&rtw_suspend_traffic_lock);
1866
android_uninit_suspend_lock(&rtw_suspend_resume_lock);
1867
#endif
1868
}
1869
1870
inline void rtw_lock_suspend(void)
1871
{
1872
#ifdef CONFIG_WAKELOCK
1873
wake_lock(&rtw_suspend_lock);
1874
#elif defined(CONFIG_ANDROID_POWER)
1875
android_lock_suspend(&rtw_suspend_lock);
1876
#endif
1877
1878
#if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1879
/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
1880
#endif
1881
}
1882
1883
inline void rtw_unlock_suspend(void)
1884
{
1885
#ifdef CONFIG_WAKELOCK
1886
wake_unlock(&rtw_suspend_lock);
1887
#elif defined(CONFIG_ANDROID_POWER)
1888
android_unlock_suspend(&rtw_suspend_lock);
1889
#endif
1890
1891
#if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1892
/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
1893
#endif
1894
}
1895
1896
inline void rtw_resume_lock_suspend(void)
1897
{
1898
#ifdef CONFIG_WAKELOCK
1899
wake_lock(&rtw_suspend_resume_lock);
1900
#elif defined(CONFIG_ANDROID_POWER)
1901
android_lock_suspend(&rtw_suspend_resume_lock);
1902
#endif
1903
1904
#if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1905
/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
1906
#endif
1907
}
1908
1909
inline void rtw_resume_unlock_suspend(void)
1910
{
1911
#ifdef CONFIG_WAKELOCK
1912
wake_unlock(&rtw_suspend_resume_lock);
1913
#elif defined(CONFIG_ANDROID_POWER)
1914
android_unlock_suspend(&rtw_suspend_resume_lock);
1915
#endif
1916
1917
#if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1918
/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
1919
#endif
1920
}
1921
1922
inline void rtw_lock_suspend_timeout(u32 timeout_ms)
1923
{
1924
#ifdef CONFIG_WAKELOCK
1925
wake_lock_timeout(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
1926
#elif defined(CONFIG_ANDROID_POWER)
1927
android_lock_suspend_auto_expire(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
1928
#endif
1929
}
1930
1931
1932
inline void rtw_lock_traffic_suspend_timeout(u32 timeout_ms)
1933
{
1934
#ifdef CONFIG_WAKELOCK
1935
wake_lock_timeout(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
1936
#elif defined(CONFIG_ANDROID_POWER)
1937
android_lock_suspend_auto_expire(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
1938
#endif
1939
/* RTW_INFO("traffic lock timeout:%d\n", timeout_ms); */
1940
}
1941
1942
inline void rtw_set_bit(int nr, unsigned long *addr)
1943
{
1944
#ifdef PLATFORM_LINUX
1945
set_bit(nr, addr);
1946
#else
1947
#error "TBD\n";
1948
#endif
1949
}
1950
1951
inline void rtw_clear_bit(int nr, unsigned long *addr)
1952
{
1953
#ifdef PLATFORM_LINUX
1954
clear_bit(nr, addr);
1955
#else
1956
#error "TBD\n";
1957
#endif
1958
}
1959
1960
inline int rtw_test_and_clear_bit(int nr, unsigned long *addr)
1961
{
1962
#ifdef PLATFORM_LINUX
1963
return test_and_clear_bit(nr, addr);
1964
#else
1965
#error "TBD\n";
1966
#endif
1967
}
1968
1969
inline void ATOMIC_SET(ATOMIC_T *v, int i)
1970
{
1971
#ifdef PLATFORM_LINUX
1972
atomic_set(v, i);
1973
#elif defined(PLATFORM_WINDOWS)
1974
*v = i; /* other choice???? */
1975
#elif defined(PLATFORM_FREEBSD)
1976
atomic_set_int(v, i);
1977
#endif
1978
}
1979
1980
inline int ATOMIC_READ(ATOMIC_T *v)
1981
{
1982
#ifdef PLATFORM_LINUX
1983
return atomic_read(v);
1984
#elif defined(PLATFORM_WINDOWS)
1985
return *v; /* other choice???? */
1986
#elif defined(PLATFORM_FREEBSD)
1987
return atomic_load_acq_32(v);
1988
#endif
1989
}
1990
1991
inline void ATOMIC_ADD(ATOMIC_T *v, int i)
1992
{
1993
#ifdef PLATFORM_LINUX
1994
atomic_add(i, v);
1995
#elif defined(PLATFORM_WINDOWS)
1996
InterlockedAdd(v, i);
1997
#elif defined(PLATFORM_FREEBSD)
1998
atomic_add_int(v, i);
1999
#endif
2000
}
2001
inline void ATOMIC_SUB(ATOMIC_T *v, int i)
2002
{
2003
#ifdef PLATFORM_LINUX
2004
atomic_sub(i, v);
2005
#elif defined(PLATFORM_WINDOWS)
2006
InterlockedAdd(v, -i);
2007
#elif defined(PLATFORM_FREEBSD)
2008
atomic_subtract_int(v, i);
2009
#endif
2010
}
2011
2012
inline void ATOMIC_INC(ATOMIC_T *v)
2013
{
2014
#ifdef PLATFORM_LINUX
2015
atomic_inc(v);
2016
#elif defined(PLATFORM_WINDOWS)
2017
InterlockedIncrement(v);
2018
#elif defined(PLATFORM_FREEBSD)
2019
atomic_add_int(v, 1);
2020
#endif
2021
}
2022
2023
inline void ATOMIC_DEC(ATOMIC_T *v)
2024
{
2025
#ifdef PLATFORM_LINUX
2026
atomic_dec(v);
2027
#elif defined(PLATFORM_WINDOWS)
2028
InterlockedDecrement(v);
2029
#elif defined(PLATFORM_FREEBSD)
2030
atomic_subtract_int(v, 1);
2031
#endif
2032
}
2033
2034
inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
2035
{
2036
#ifdef PLATFORM_LINUX
2037
return atomic_add_return(i, v);
2038
#elif defined(PLATFORM_WINDOWS)
2039
return InterlockedAdd(v, i);
2040
#elif defined(PLATFORM_FREEBSD)
2041
atomic_add_int(v, i);
2042
return atomic_load_acq_32(v);
2043
#endif
2044
}
2045
2046
inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
2047
{
2048
#ifdef PLATFORM_LINUX
2049
return atomic_sub_return(i, v);
2050
#elif defined(PLATFORM_WINDOWS)
2051
return InterlockedAdd(v, -i);
2052
#elif defined(PLATFORM_FREEBSD)
2053
atomic_subtract_int(v, i);
2054
return atomic_load_acq_32(v);
2055
#endif
2056
}
2057
2058
inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
2059
{
2060
#ifdef PLATFORM_LINUX
2061
return atomic_inc_return(v);
2062
#elif defined(PLATFORM_WINDOWS)
2063
return InterlockedIncrement(v);
2064
#elif defined(PLATFORM_FREEBSD)
2065
atomic_add_int(v, 1);
2066
return atomic_load_acq_32(v);
2067
#endif
2068
}
2069
2070
inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
2071
{
2072
#ifdef PLATFORM_LINUX
2073
return atomic_dec_return(v);
2074
#elif defined(PLATFORM_WINDOWS)
2075
return InterlockedDecrement(v);
2076
#elif defined(PLATFORM_FREEBSD)
2077
atomic_subtract_int(v, 1);
2078
return atomic_load_acq_32(v);
2079
#endif
2080
}
2081
2082
inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
2083
{
2084
#ifdef PLATFORM_LINUX
2085
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
2086
return atomic_add_unless(v, 1, u);
2087
#else
2088
/* only make sure not exceed after this function */
2089
if (ATOMIC_INC_RETURN(v) > u) {
2090
ATOMIC_DEC(v);
2091
return 0;
2092
}
2093
return 1;
2094
#endif
2095
#else
2096
#error "TBD\n"
2097
#endif
2098
}
2099
2100
#ifdef PLATFORM_LINUX
2101
/*
2102
* Open a file with the specific @param path, @param flag, @param mode
2103
* @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
2104
* @param path the path of the file to open
2105
* @param flag file operation flags, please refer to linux document
2106
* @param mode please refer to linux document
2107
* @return Linux specific error code
2108
*/
2109
static int openFile(struct file **fpp, const char *path, int flag, int mode)
2110
{
2111
struct file *fp;
2112
2113
fp = filp_open(path, flag, mode);
2114
if (IS_ERR(fp)) {
2115
*fpp = NULL;
2116
return PTR_ERR(fp);
2117
} else {
2118
*fpp = fp;
2119
return 0;
2120
}
2121
}
2122
2123
/*
2124
* Close the file with the specific @param fp
2125
* @param fp the pointer of struct file to close
2126
* @return always 0
2127
*/
2128
static int closeFile(struct file *fp)
2129
{
2130
filp_close(fp, NULL);
2131
return 0;
2132
}
2133
2134
static int readFile(struct file *fp, char *buf, int len)
2135
{
2136
int rlen = 0, sum = 0;
2137
2138
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2139
if (!(fp->f_mode & FMODE_CAN_READ))
2140
#else
2141
if (!fp->f_op || !fp->f_op->read)
2142
#endif
2143
return -EPERM;
2144
2145
while (sum < len) {
2146
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2147
rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
2148
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2149
rlen = __vfs_read(fp, buf + sum, len - sum, &fp->f_pos);
2150
#else
2151
rlen = fp->f_op->read(fp, buf + sum, len - sum, &fp->f_pos);
2152
#endif
2153
if (rlen > 0)
2154
sum += rlen;
2155
else if (0 != rlen)
2156
return rlen;
2157
else
2158
break;
2159
}
2160
2161
return sum;
2162
2163
}
2164
2165
static int writeFile(struct file *fp, char *buf, int len)
2166
{
2167
int wlen = 0, sum = 0;
2168
2169
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2170
if (!(fp->f_mode & FMODE_CAN_WRITE))
2171
#else
2172
if (!fp->f_op || !fp->f_op->write)
2173
#endif
2174
return -EPERM;
2175
2176
while (sum < len) {
2177
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2178
wlen = kernel_write(fp, buf + sum, len - sum, &fp->f_pos);
2179
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2180
wlen = __vfs_write(fp, buf + sum, len - sum, &fp->f_pos);
2181
#else
2182
wlen = fp->f_op->write(fp, buf + sum, len - sum, &fp->f_pos);
2183
#endif
2184
if (wlen > 0)
2185
sum += wlen;
2186
else if (0 != wlen)
2187
return wlen;
2188
else
2189
break;
2190
}
2191
2192
return sum;
2193
2194
}
2195
2196
/*
2197
* Test if the specifi @param pathname is a direct and readable
2198
* If readable, @param sz is not used
2199
* @param pathname the name of the path to test
2200
* @return Linux specific error code
2201
*/
2202
static int isDirReadable(const char *pathname, u32 *sz)
2203
{
2204
struct path path;
2205
int error = 0;
2206
2207
return kern_path(pathname, LOOKUP_FOLLOW, &path);
2208
}
2209
2210
/*
2211
* Test if the specifi @param path is a file and readable
2212
* If readable, @param sz is got
2213
* @param path the path of the file to test
2214
* @return Linux specific error code
2215
*/
2216
static int isFileReadable(const char *path, u32 *sz)
2217
{
2218
struct file *fp;
2219
int ret = 0;
2220
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2221
mm_segment_t oldfs;
2222
#endif
2223
char buf;
2224
2225
fp = filp_open(path, O_RDONLY, 0);
2226
if (IS_ERR(fp))
2227
ret = PTR_ERR(fp);
2228
else {
2229
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2230
oldfs = get_fs();
2231
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2232
set_fs(KERNEL_DS);
2233
#else
2234
set_fs(get_ds());
2235
#endif
2236
#endif
2237
2238
if (1 != readFile(fp, &buf, 1))
2239
ret = PTR_ERR(fp);
2240
2241
if (ret == 0 && sz) {
2242
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
2243
*sz = i_size_read(fp->f_path.dentry->d_inode);
2244
#else
2245
*sz = i_size_read(fp->f_dentry->d_inode);
2246
#endif
2247
}
2248
2249
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2250
set_fs(oldfs);
2251
#endif
2252
filp_close(fp, NULL);
2253
}
2254
return ret;
2255
}
2256
2257
/*
2258
* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2259
* @param path the path of the file to open and read
2260
* @param buf the starting address of the buffer to store file content
2261
* @param sz how many bytes to read at most
2262
* @return the byte we've read, or Linux specific error code
2263
*/
2264
static int retriveFromFile(const char *path, u8 *buf, u32 sz)
2265
{
2266
int ret = -1;
2267
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2268
mm_segment_t oldfs;
2269
#endif
2270
struct file *fp;
2271
2272
if (path && buf) {
2273
ret = openFile(&fp, path, O_RDONLY, 0);
2274
if (0 == ret) {
2275
RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2276
2277
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2278
oldfs = get_fs();
2279
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2280
set_fs(KERNEL_DS);
2281
#else
2282
set_fs(get_ds());
2283
#endif
2284
#endif
2285
ret = readFile(fp, buf, sz);
2286
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2287
set_fs(oldfs);
2288
#endif
2289
closeFile(fp);
2290
2291
RTW_INFO("%s readFile, ret:%d\n", __FUNCTION__, ret);
2292
2293
} else
2294
RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2295
} else {
2296
RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2297
ret = -EINVAL;
2298
}
2299
return ret;
2300
}
2301
2302
/*
2303
* Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2304
* @param path the path of the file to open and write
2305
* @param buf the starting address of the data to write into file
2306
* @param sz how many bytes to write at most
2307
* @return the byte we've written, or Linux specific error code
2308
*/
2309
static int storeToFile(const char *path, u8 *buf, u32 sz)
2310
{
2311
int ret = 0;
2312
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2313
mm_segment_t oldfs;
2314
#endif
2315
struct file *fp;
2316
2317
if (path && buf) {
2318
ret = openFile(&fp, path, O_CREAT | O_WRONLY, 0666);
2319
if (0 == ret) {
2320
RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2321
2322
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2323
oldfs = get_fs();
2324
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2325
set_fs(KERNEL_DS);
2326
#else
2327
set_fs(get_ds());
2328
#endif
2329
#endif
2330
ret = writeFile(fp, buf, sz);
2331
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2332
set_fs(oldfs);
2333
#endif
2334
closeFile(fp);
2335
2336
RTW_INFO("%s writeFile, ret:%d\n", __FUNCTION__, ret);
2337
2338
} else
2339
RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2340
} else {
2341
RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2342
ret = -EINVAL;
2343
}
2344
return ret;
2345
}
2346
#endif /* PLATFORM_LINUX */
2347
2348
/*
2349
* Test if the specifi @param path is a direct and readable
2350
* @param path the path of the direct to test
2351
* @return _TRUE or _FALSE
2352
*/
2353
int rtw_is_dir_readable(const char *path)
2354
{
2355
#ifdef PLATFORM_LINUX
2356
if (isDirReadable(path, NULL) == 0)
2357
return _TRUE;
2358
else
2359
return _FALSE;
2360
#else
2361
/* Todo... */
2362
return _FALSE;
2363
#endif
2364
}
2365
2366
/*
2367
* Test if the specifi @param path is a file and readable
2368
* @param path the path of the file to test
2369
* @return _TRUE or _FALSE
2370
*/
2371
int rtw_is_file_readable(const char *path)
2372
{
2373
#ifdef PLATFORM_LINUX
2374
if (isFileReadable(path, NULL) == 0)
2375
return _TRUE;
2376
else
2377
return _FALSE;
2378
#else
2379
/* Todo... */
2380
return _FALSE;
2381
#endif
2382
}
2383
2384
/*
2385
* Test if the specifi @param path is a file and readable.
2386
* If readable, @param sz is got
2387
* @param path the path of the file to test
2388
* @return _TRUE or _FALSE
2389
*/
2390
int rtw_is_file_readable_with_size(const char *path, u32 *sz)
2391
{
2392
#ifdef PLATFORM_LINUX
2393
if (isFileReadable(path, sz) == 0)
2394
return _TRUE;
2395
else
2396
return _FALSE;
2397
#else
2398
/* Todo... */
2399
return _FALSE;
2400
#endif
2401
}
2402
2403
/*
2404
* Test if the specifi @param path is a readable file with valid size.
2405
* If readable, @param sz is got
2406
* @param path the path of the file to test
2407
* @return _TRUE or _FALSE
2408
*/
2409
int rtw_readable_file_sz_chk(const char *path, u32 sz)
2410
{
2411
u32 fsz;
2412
2413
if (rtw_is_file_readable_with_size(path, &fsz) == _FALSE)
2414
return _FALSE;
2415
2416
if (fsz > sz)
2417
return _FALSE;
2418
2419
return _TRUE;
2420
}
2421
2422
/*
2423
* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2424
* @param path the path of the file to open and read
2425
* @param buf the starting address of the buffer to store file content
2426
* @param sz how many bytes to read at most
2427
* @return the byte we've read
2428
*/
2429
int rtw_retrieve_from_file(const char *path, u8 *buf, u32 sz)
2430
{
2431
#ifdef PLATFORM_LINUX
2432
int ret = retriveFromFile(path, buf, sz);
2433
return ret >= 0 ? ret : 0;
2434
#else
2435
/* Todo... */
2436
return 0;
2437
#endif
2438
}
2439
2440
/*
2441
* Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2442
* @param path the path of the file to open and write
2443
* @param buf the starting address of the data to write into file
2444
* @param sz how many bytes to write at most
2445
* @return the byte we've written
2446
*/
2447
int rtw_store_to_file(const char *path, u8 *buf, u32 sz)
2448
{
2449
#ifdef PLATFORM_LINUX
2450
int ret = storeToFile(path, buf, sz);
2451
return ret >= 0 ? ret : 0;
2452
#else
2453
/* Todo... */
2454
return 0;
2455
#endif
2456
}
2457
2458
#ifdef PLATFORM_LINUX
2459
struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
2460
{
2461
struct net_device *pnetdev;
2462
struct rtw_netdev_priv_indicator *pnpi;
2463
2464
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
2465
pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
2466
#else
2467
pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
2468
#endif
2469
if (!pnetdev)
2470
goto RETURN;
2471
2472
pnpi = netdev_priv(pnetdev);
2473
pnpi->priv = old_priv;
2474
pnpi->sizeof_priv = sizeof_priv;
2475
2476
RETURN:
2477
return pnetdev;
2478
}
2479
2480
struct net_device *rtw_alloc_etherdev(int sizeof_priv)
2481
{
2482
struct net_device *pnetdev;
2483
struct rtw_netdev_priv_indicator *pnpi;
2484
2485
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
2486
pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
2487
#else
2488
pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
2489
#endif
2490
if (!pnetdev)
2491
goto RETURN;
2492
2493
pnpi = netdev_priv(pnetdev);
2494
2495
pnpi->priv = rtw_zvmalloc(sizeof_priv);
2496
if (!pnpi->priv) {
2497
free_netdev(pnetdev);
2498
pnetdev = NULL;
2499
goto RETURN;
2500
}
2501
2502
pnpi->sizeof_priv = sizeof_priv;
2503
RETURN:
2504
return pnetdev;
2505
}
2506
2507
void rtw_free_netdev(struct net_device *netdev)
2508
{
2509
struct rtw_netdev_priv_indicator *pnpi;
2510
2511
if (!netdev)
2512
goto RETURN;
2513
2514
pnpi = netdev_priv(netdev);
2515
2516
if (!pnpi->priv)
2517
goto RETURN;
2518
2519
free_netdev(netdev);
2520
2521
RETURN:
2522
return;
2523
}
2524
2525
int rtw_change_ifname(_adapter *padapter, const char *ifname)
2526
{
2527
struct dvobj_priv *dvobj;
2528
struct net_device *pnetdev;
2529
struct net_device *cur_pnetdev;
2530
struct rereg_nd_name_data *rereg_priv;
2531
int ret;
2532
u8 rtnl_lock_needed;
2533
2534
if (!padapter)
2535
goto error;
2536
2537
dvobj = adapter_to_dvobj(padapter);
2538
cur_pnetdev = padapter->pnetdev;
2539
rereg_priv = &padapter->rereg_nd_name_priv;
2540
2541
/* free the old_pnetdev */
2542
if (rereg_priv->old_pnetdev) {
2543
free_netdev(rereg_priv->old_pnetdev);
2544
rereg_priv->old_pnetdev = NULL;
2545
}
2546
2547
rtnl_lock_needed = rtw_rtnl_lock_needed(dvobj);
2548
2549
if (rtnl_lock_needed)
2550
unregister_netdev(cur_pnetdev);
2551
else
2552
unregister_netdevice(cur_pnetdev);
2553
2554
rereg_priv->old_pnetdev = cur_pnetdev;
2555
2556
pnetdev = rtw_init_netdev(padapter);
2557
if (!pnetdev) {
2558
ret = -1;
2559
goto error;
2560
}
2561
2562
SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter)));
2563
2564
rtw_init_netdev_name(pnetdev, ifname);
2565
2566
_rtw_memcpy(pnetdev->dev_addr, adapter_mac_addr(padapter), ETH_ALEN);
2567
2568
if (rtnl_lock_needed)
2569
ret = register_netdev(pnetdev);
2570
else
2571
ret = register_netdevice(pnetdev);
2572
2573
if (ret != 0) {
2574
goto error;
2575
}
2576
2577
return 0;
2578
2579
error:
2580
2581
return -1;
2582
2583
}
2584
#endif
2585
2586
#ifdef PLATFORM_FREEBSD
2587
/*
2588
* Copy a buffer from userspace and write into kernel address
2589
* space.
2590
*
2591
* This emulation just calls the FreeBSD copyin function (to
2592
* copy data from user space buffer into a kernel space buffer)
2593
* and is designed to be used with the above io_write_wrapper.
2594
*
2595
* This function should return the number of bytes not copied.
2596
* I.e. success results in a zero value.
2597
* Negative error values are not returned.
2598
*/
2599
unsigned long
2600
copy_from_user(void *to, const void *from, unsigned long n)
2601
{
2602
if (copyin(from, to, n) != 0) {
2603
/* Any errors will be treated as a failure
2604
to copy any of the requested bytes */
2605
return n;
2606
}
2607
2608
return 0;
2609
}
2610
2611
unsigned long
2612
copy_to_user(void *to, const void *from, unsigned long n)
2613
{
2614
if (copyout(from, to, n) != 0) {
2615
/* Any errors will be treated as a failure
2616
to copy any of the requested bytes */
2617
return n;
2618
}
2619
2620
return 0;
2621
}
2622
2623
2624
/*
2625
* The usb_register and usb_deregister functions are used to register
2626
* usb drivers with the usb subsystem. In this compatibility layer
2627
* emulation a list of drivers (struct usb_driver) is maintained
2628
* and is used for probing/attaching etc.
2629
*
2630
* usb_register and usb_deregister simply call these functions.
2631
*/
2632
int
2633
usb_register(struct usb_driver *driver)
2634
{
2635
rtw_usb_linux_register(driver);
2636
return 0;
2637
}
2638
2639
2640
int
2641
usb_deregister(struct usb_driver *driver)
2642
{
2643
rtw_usb_linux_deregister(driver);
2644
return 0;
2645
}
2646
2647
void module_init_exit_wrapper(void *arg)
2648
{
2649
int (*func)(void) = arg;
2650
func();
2651
return;
2652
}
2653
2654
#endif /* PLATFORM_FREEBSD */
2655
2656
#ifdef CONFIG_PLATFORM_SPRD
2657
#ifdef do_div
2658
#undef do_div
2659
#endif
2660
#include <asm-generic/div64.h>
2661
#endif
2662
2663
u64 rtw_modular64(u64 x, u64 y)
2664
{
2665
#ifdef PLATFORM_LINUX
2666
return do_div(x, y);
2667
#elif defined(PLATFORM_WINDOWS)
2668
return x % y;
2669
#elif defined(PLATFORM_FREEBSD)
2670
return x % y;
2671
#endif
2672
}
2673
2674
u64 rtw_division64(u64 x, u64 y)
2675
{
2676
#ifdef PLATFORM_LINUX
2677
do_div(x, y);
2678
return x;
2679
#elif defined(PLATFORM_WINDOWS)
2680
return x / y;
2681
#elif defined(PLATFORM_FREEBSD)
2682
return x / y;
2683
#endif
2684
}
2685
2686
inline u32 rtw_random32(void)
2687
{
2688
#ifdef PLATFORM_LINUX
2689
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
2690
return get_random_u32();
2691
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
2692
return prandom_u32();
2693
#elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18))
2694
u32 random_int;
2695
get_random_bytes(&random_int , 4);
2696
return random_int;
2697
#else
2698
return random32();
2699
#endif
2700
#elif defined(PLATFORM_WINDOWS)
2701
#error "to be implemented\n"
2702
#elif defined(PLATFORM_FREEBSD)
2703
#error "to be implemented\n"
2704
#endif
2705
}
2706
2707
void rtw_buf_free(u8 **buf, u32 *buf_len)
2708
{
2709
u32 ori_len;
2710
2711
if (!buf || !buf_len)
2712
return;
2713
2714
ori_len = *buf_len;
2715
2716
if (*buf) {
2717
u32 tmp_buf_len = *buf_len;
2718
*buf_len = 0;
2719
rtw_mfree(*buf, tmp_buf_len);
2720
*buf = NULL;
2721
}
2722
}
2723
2724
void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len)
2725
{
2726
u32 ori_len = 0, dup_len = 0;
2727
u8 *ori = NULL;
2728
u8 *dup = NULL;
2729
2730
if (!buf || !buf_len)
2731
return;
2732
2733
if (!src || !src_len)
2734
goto keep_ori;
2735
2736
/* duplicate src */
2737
dup = rtw_malloc(src_len);
2738
if (dup) {
2739
dup_len = src_len;
2740
_rtw_memcpy(dup, src, dup_len);
2741
}
2742
2743
keep_ori:
2744
ori = *buf;
2745
ori_len = *buf_len;
2746
2747
/* replace buf with dup */
2748
*buf_len = 0;
2749
*buf = dup;
2750
*buf_len = dup_len;
2751
2752
/* free ori */
2753
if (ori && ori_len > 0)
2754
rtw_mfree(ori, ori_len);
2755
}
2756
2757
2758
/**
2759
* rtw_cbuf_full - test if cbuf is full
2760
* @cbuf: pointer of struct rtw_cbuf
2761
*
2762
* Returns: _TRUE if cbuf is full
2763
*/
2764
inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
2765
{
2766
return (cbuf->write == cbuf->read - 1) ? _TRUE : _FALSE;
2767
}
2768
2769
/**
2770
* rtw_cbuf_empty - test if cbuf is empty
2771
* @cbuf: pointer of struct rtw_cbuf
2772
*
2773
* Returns: _TRUE if cbuf is empty
2774
*/
2775
inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
2776
{
2777
return (cbuf->write == cbuf->read) ? _TRUE : _FALSE;
2778
}
2779
2780
/**
2781
* rtw_cbuf_push - push a pointer into cbuf
2782
* @cbuf: pointer of struct rtw_cbuf
2783
* @buf: pointer to push in
2784
*
2785
* Lock free operation, be careful of the use scheme
2786
* Returns: _TRUE push success
2787
*/
2788
bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
2789
{
2790
if (rtw_cbuf_full(cbuf))
2791
return _FAIL;
2792
2793
if (0)
2794
RTW_INFO("%s on %u\n", __func__, cbuf->write);
2795
cbuf->bufs[cbuf->write] = buf;
2796
cbuf->write = (cbuf->write + 1) % cbuf->size;
2797
2798
return _SUCCESS;
2799
}
2800
2801
/**
2802
* rtw_cbuf_pop - pop a pointer from cbuf
2803
* @cbuf: pointer of struct rtw_cbuf
2804
*
2805
* Lock free operation, be careful of the use scheme
2806
* Returns: pointer popped out
2807
*/
2808
void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
2809
{
2810
void *buf;
2811
if (rtw_cbuf_empty(cbuf))
2812
return NULL;
2813
2814
if (0)
2815
RTW_INFO("%s on %u\n", __func__, cbuf->read);
2816
buf = cbuf->bufs[cbuf->read];
2817
cbuf->read = (cbuf->read + 1) % cbuf->size;
2818
2819
return buf;
2820
}
2821
2822
/**
2823
* rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
2824
* @size: size of pointer
2825
*
2826
* Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
2827
*/
2828
struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
2829
{
2830
struct rtw_cbuf *cbuf;
2831
2832
cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
2833
2834
if (cbuf) {
2835
cbuf->write = cbuf->read = 0;
2836
cbuf->size = size;
2837
}
2838
2839
return cbuf;
2840
}
2841
2842
/**
2843
* rtw_cbuf_free - free the given rtw_cbuf
2844
* @cbuf: pointer of struct rtw_cbuf to free
2845
*/
2846
void rtw_cbuf_free(struct rtw_cbuf *cbuf)
2847
{
2848
rtw_mfree((u8 *)cbuf, sizeof(*cbuf) + sizeof(void *) * cbuf->size);
2849
}
2850
2851
/**
2852
* map_readN - read a range of map data
2853
* @map: map to read
2854
* @offset: start address to read
2855
* @len: length to read
2856
* @buf: pointer of buffer to store data read
2857
*
2858
* Returns: _SUCCESS or _FAIL
2859
*/
2860
int map_readN(const struct map_t *map, u16 offset, u16 len, u8 *buf)
2861
{
2862
const struct map_seg_t *seg;
2863
int ret = _FAIL;
2864
int i;
2865
2866
if (len == 0) {
2867
rtw_warn_on(1);
2868
goto exit;
2869
}
2870
2871
if (offset + len > map->len) {
2872
rtw_warn_on(1);
2873
goto exit;
2874
}
2875
2876
_rtw_memset(buf, map->init_value, len);
2877
2878
for (i = 0; i < map->seg_num; i++) {
2879
u8 *c_dst, *c_src;
2880
u16 c_len;
2881
2882
seg = map->segs + i;
2883
if (seg->sa + seg->len <= offset || seg->sa >= offset + len)
2884
continue;
2885
2886
if (seg->sa >= offset) {
2887
c_dst = buf + (seg->sa - offset);
2888
c_src = seg->c;
2889
if (seg->sa + seg->len <= offset + len)
2890
c_len = seg->len;
2891
else
2892
c_len = offset + len - seg->sa;
2893
} else {
2894
c_dst = buf;
2895
c_src = seg->c + (offset - seg->sa);
2896
if (seg->sa + seg->len >= offset + len)
2897
c_len = len;
2898
else
2899
c_len = seg->sa + seg->len - offset;
2900
}
2901
2902
_rtw_memcpy(c_dst, c_src, c_len);
2903
}
2904
2905
exit:
2906
return ret;
2907
}
2908
2909
/**
2910
* map_read8 - read 1 byte of map data
2911
* @map: map to read
2912
* @offset: address to read
2913
*
2914
* Returns: value of data of specified offset. map.init_value if offset is out of range
2915
*/
2916
u8 map_read8(const struct map_t *map, u16 offset)
2917
{
2918
const struct map_seg_t *seg;
2919
u8 val = map->init_value;
2920
int i;
2921
2922
if (offset + 1 > map->len) {
2923
rtw_warn_on(1);
2924
goto exit;
2925
}
2926
2927
for (i = 0; i < map->seg_num; i++) {
2928
seg = map->segs + i;
2929
if (seg->sa + seg->len <= offset || seg->sa >= offset + 1)
2930
continue;
2931
2932
val = *(seg->c + offset - seg->sa);
2933
break;
2934
}
2935
2936
exit:
2937
return val;
2938
}
2939
2940
int rtw_blacklist_add(_queue *blist, const u8 *addr, u32 timeout_ms)
2941
{
2942
struct blacklist_ent *ent;
2943
_list *list, *head;
2944
u8 exist = _FALSE, timeout = _FALSE;
2945
2946
enter_critical_bh(&blist->lock);
2947
2948
head = &blist->queue;
2949
list = get_next(head);
2950
while (rtw_end_of_queue_search(head, list) == _FALSE) {
2951
ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
2952
list = get_next(list);
2953
2954
if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
2955
exist = _TRUE;
2956
if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
2957
timeout = _TRUE;
2958
ent->exp_time = rtw_get_current_time()
2959
+ rtw_ms_to_systime(timeout_ms);
2960
break;
2961
}
2962
2963
if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
2964
rtw_list_delete(&ent->list);
2965
rtw_mfree(ent, sizeof(struct blacklist_ent));
2966
}
2967
}
2968
2969
if (exist == _FALSE) {
2970
ent = rtw_malloc(sizeof(struct blacklist_ent));
2971
if (ent) {
2972
_rtw_memcpy(ent->addr, addr, ETH_ALEN);
2973
ent->exp_time = rtw_get_current_time()
2974
+ rtw_ms_to_systime(timeout_ms);
2975
rtw_list_insert_tail(&ent->list, head);
2976
}
2977
}
2978
2979
exit_critical_bh(&blist->lock);
2980
2981
return (exist == _TRUE && timeout == _FALSE) ? RTW_ALREADY : (ent ? _SUCCESS : _FAIL);
2982
}
2983
2984
int rtw_blacklist_del(_queue *blist, const u8 *addr)
2985
{
2986
struct blacklist_ent *ent = NULL;
2987
_list *list, *head;
2988
u8 exist = _FALSE;
2989
2990
enter_critical_bh(&blist->lock);
2991
head = &blist->queue;
2992
list = get_next(head);
2993
while (rtw_end_of_queue_search(head, list) == _FALSE) {
2994
ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
2995
list = get_next(list);
2996
2997
if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
2998
rtw_list_delete(&ent->list);
2999
rtw_mfree(ent, sizeof(struct blacklist_ent));
3000
exist = _TRUE;
3001
break;
3002
}
3003
3004
if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3005
rtw_list_delete(&ent->list);
3006
rtw_mfree(ent, sizeof(struct blacklist_ent));
3007
}
3008
}
3009
3010
exit_critical_bh(&blist->lock);
3011
3012
return exist == _TRUE ? _SUCCESS : RTW_ALREADY;
3013
}
3014
3015
int rtw_blacklist_search(_queue *blist, const u8 *addr)
3016
{
3017
struct blacklist_ent *ent = NULL;
3018
_list *list, *head;
3019
u8 exist = _FALSE;
3020
3021
enter_critical_bh(&blist->lock);
3022
head = &blist->queue;
3023
list = get_next(head);
3024
while (rtw_end_of_queue_search(head, list) == _FALSE) {
3025
ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3026
list = get_next(list);
3027
3028
if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3029
if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3030
rtw_list_delete(&ent->list);
3031
rtw_mfree(ent, sizeof(struct blacklist_ent));
3032
} else
3033
exist = _TRUE;
3034
break;
3035
}
3036
3037
if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3038
rtw_list_delete(&ent->list);
3039
rtw_mfree(ent, sizeof(struct blacklist_ent));
3040
}
3041
}
3042
3043
exit_critical_bh(&blist->lock);
3044
3045
return exist;
3046
}
3047
3048
void rtw_blacklist_flush(_queue *blist)
3049
{
3050
struct blacklist_ent *ent;
3051
_list *list, *head;
3052
_list tmp;
3053
3054
_rtw_init_listhead(&tmp);
3055
3056
enter_critical_bh(&blist->lock);
3057
rtw_list_splice_init(&blist->queue, &tmp);
3058
exit_critical_bh(&blist->lock);
3059
3060
head = &tmp;
3061
list = get_next(head);
3062
while (rtw_end_of_queue_search(head, list) == _FALSE) {
3063
ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3064
list = get_next(list);
3065
rtw_list_delete(&ent->list);
3066
rtw_mfree(ent, sizeof(struct blacklist_ent));
3067
}
3068
}
3069
3070
void dump_blacklist(void *sel, _queue *blist, const char *title)
3071
{
3072
struct blacklist_ent *ent = NULL;
3073
_list *list, *head;
3074
3075
enter_critical_bh(&blist->lock);
3076
head = &blist->queue;
3077
list = get_next(head);
3078
3079
if (rtw_end_of_queue_search(head, list) == _FALSE) {
3080
if (title)
3081
RTW_PRINT_SEL(sel, "%s:\n", title);
3082
3083
while (rtw_end_of_queue_search(head, list) == _FALSE) {
3084
ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3085
list = get_next(list);
3086
3087
if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3088
RTW_PRINT_SEL(sel, MAC_FMT" expired\n", MAC_ARG(ent->addr));
3089
else
3090
RTW_PRINT_SEL(sel, MAC_FMT" %u\n", MAC_ARG(ent->addr)
3091
, rtw_get_remaining_time_ms(ent->exp_time));
3092
}
3093
3094
}
3095
exit_critical_bh(&blist->lock);
3096
}
3097
3098
/**
3099
* is_null -
3100
*
3101
* Return TRUE if c is null character
3102
* FALSE otherwise.
3103
*/
3104
inline BOOLEAN is_null(char c)
3105
{
3106
if (c == '\0')
3107
return _TRUE;
3108
else
3109
return _FALSE;
3110
}
3111
3112
inline BOOLEAN is_all_null(char *c, int len)
3113
{
3114
for (; len > 0; len--)
3115
if (c[len - 1] != '\0')
3116
return _FALSE;
3117
3118
return _TRUE;
3119
}
3120
3121
/**
3122
* is_eol -
3123
*
3124
* Return TRUE if c is represent for EOL (end of line)
3125
* FALSE otherwise.
3126
*/
3127
inline BOOLEAN is_eol(char c)
3128
{
3129
if (c == '\r' || c == '\n')
3130
return _TRUE;
3131
else
3132
return _FALSE;
3133
}
3134
3135
/**
3136
* is_space -
3137
*
3138
* Return TRUE if c is represent for space
3139
* FALSE otherwise.
3140
*/
3141
inline BOOLEAN is_space(char c)
3142
{
3143
if (c == ' ' || c == '\t')
3144
return _TRUE;
3145
else
3146
return _FALSE;
3147
}
3148
3149
/**
3150
* IsHexDigit -
3151
*
3152
* Return TRUE if chTmp is represent for hex digit
3153
* FALSE otherwise.
3154
*/
3155
inline BOOLEAN IsHexDigit(char chTmp)
3156
{
3157
if ((chTmp >= '0' && chTmp <= '9') ||
3158
(chTmp >= 'a' && chTmp <= 'f') ||
3159
(chTmp >= 'A' && chTmp <= 'F'))
3160
return _TRUE;
3161
else
3162
return _FALSE;
3163
}
3164
3165
/**
3166
* is_alpha -
3167
*
3168
* Return TRUE if chTmp is represent for alphabet
3169
* FALSE otherwise.
3170
*/
3171
inline BOOLEAN is_alpha(char chTmp)
3172
{
3173
if ((chTmp >= 'a' && chTmp <= 'z') ||
3174
(chTmp >= 'A' && chTmp <= 'Z'))
3175
return _TRUE;
3176
else
3177
return _FALSE;
3178
}
3179
3180
inline char alpha_to_upper(char c)
3181
{
3182
if ((c >= 'a' && c <= 'z'))
3183
c = 'A' + (c - 'a');
3184
return c;
3185
}
3186
3187
int hex2num_i(char c)
3188
{
3189
if (c >= '0' && c <= '9')
3190
return c - '0';
3191
if (c >= 'a' && c <= 'f')
3192
return c - 'a' + 10;
3193
if (c >= 'A' && c <= 'F')
3194
return c - 'A' + 10;
3195
return -1;
3196
}
3197
3198
int hex2byte_i(const char *hex)
3199
{
3200
int a, b;
3201
a = hex2num_i(*hex++);
3202
if (a < 0)
3203
return -1;
3204
b = hex2num_i(*hex++);
3205
if (b < 0)
3206
return -1;
3207
return (a << 4) | b;
3208
}
3209
3210
int hexstr2bin(const char *hex, u8 *buf, size_t len)
3211
{
3212
size_t i;
3213
int a;
3214
const char *ipos = hex;
3215
u8 *opos = buf;
3216
3217
for (i = 0; i < len; i++) {
3218
a = hex2byte_i(ipos);
3219
if (a < 0)
3220
return -1;
3221
*opos++ = a;
3222
ipos += 2;
3223
}
3224
return 0;
3225
}
3226
3227
3228