Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/iwlwifi/iwl-trans.c
48253 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/*
3
* Copyright (C) 2015 Intel Mobile Communications GmbH
4
* Copyright (C) 2016-2017 Intel Deutschland GmbH
5
* Copyright (C) 2019-2021, 2023-2025 Intel Corporation
6
*/
7
#include <linux/kernel.h>
8
#include <linux/bsearch.h>
9
#include <linux/list.h>
10
11
#include "fw/api/tx.h"
12
#include "iwl-trans.h"
13
#include "iwl-drv.h"
14
#include "iwl-fh.h"
15
#include <linux/dmapool.h>
16
#include "fw/api/commands.h"
17
#include "pcie/gen1_2/internal.h"
18
#include "pcie/iwl-context-info-v2.h"
19
20
struct iwl_trans_dev_restart_data {
21
struct list_head list;
22
unsigned int restart_count;
23
time64_t last_error;
24
bool backoff;
25
char name[];
26
};
27
28
static LIST_HEAD(restart_data_list);
29
static DEFINE_SPINLOCK(restart_data_lock);
30
31
static struct iwl_trans_dev_restart_data *
32
iwl_trans_get_restart_data(struct device *dev)
33
{
34
struct iwl_trans_dev_restart_data *tmp, *data = NULL;
35
const char *name = dev_name(dev);
36
37
spin_lock(&restart_data_lock);
38
list_for_each_entry(tmp, &restart_data_list, list) {
39
if (strcmp(tmp->name, name))
40
continue;
41
data = tmp;
42
break;
43
}
44
spin_unlock(&restart_data_lock);
45
46
if (data)
47
return data;
48
49
data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC);
50
if (!data)
51
return NULL;
52
53
strcpy(data->name, name);
54
spin_lock(&restart_data_lock);
55
list_add_tail(&data->list, &restart_data_list);
56
spin_unlock(&restart_data_lock);
57
58
return data;
59
}
60
61
static void iwl_trans_inc_restart_count(struct device *dev)
62
{
63
struct iwl_trans_dev_restart_data *data;
64
65
data = iwl_trans_get_restart_data(dev);
66
if (data) {
67
data->last_error = ktime_get_boottime_seconds();
68
data->restart_count++;
69
}
70
}
71
72
void iwl_trans_free_restart_list(void)
73
{
74
struct iwl_trans_dev_restart_data *tmp;
75
76
while ((tmp = list_first_entry_or_null(&restart_data_list,
77
typeof(*tmp), list))) {
78
list_del(&tmp->list);
79
kfree(tmp);
80
}
81
}
82
83
struct iwl_trans_reprobe {
84
struct device *dev;
85
struct delayed_work work;
86
};
87
88
static void iwl_trans_reprobe_wk(struct work_struct *wk)
89
{
90
struct iwl_trans_reprobe *reprobe;
91
92
reprobe = container_of(wk, typeof(*reprobe), work.work);
93
94
if (device_reprobe(reprobe->dev))
95
dev_err(reprobe->dev, "reprobe failed!\n");
96
put_device(reprobe->dev);
97
kfree(reprobe);
98
module_put(THIS_MODULE);
99
}
100
101
static void iwl_trans_schedule_reprobe(struct iwl_trans *trans,
102
unsigned int delay_ms)
103
{
104
struct iwl_trans_reprobe *reprobe;
105
106
/*
107
* get a module reference to avoid doing this while unloading
108
* anyway and to avoid scheduling a work with code that's
109
* being removed.
110
*/
111
if (!try_module_get(THIS_MODULE)) {
112
IWL_ERR(trans, "Module is being unloaded - abort\n");
113
return;
114
}
115
116
reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
117
if (!reprobe) {
118
module_put(THIS_MODULE);
119
return;
120
}
121
reprobe->dev = get_device(trans->dev);
122
INIT_DELAYED_WORK(&reprobe->work, iwl_trans_reprobe_wk);
123
schedule_delayed_work(&reprobe->work, msecs_to_jiffies(delay_ms));
124
}
125
126
#define IWL_TRANS_RESET_OK_TIME 7 /* seconds */
127
128
static enum iwl_reset_mode
129
iwl_trans_determine_restart_mode(struct iwl_trans *trans)
130
{
131
struct iwl_trans_dev_restart_data *data;
132
enum iwl_reset_mode at_least = 0;
133
unsigned int index;
134
static const enum iwl_reset_mode escalation_list_old[] = {
135
IWL_RESET_MODE_SW_RESET,
136
IWL_RESET_MODE_REPROBE,
137
IWL_RESET_MODE_REPROBE,
138
IWL_RESET_MODE_FUNC_RESET,
139
IWL_RESET_MODE_PROD_RESET,
140
};
141
static const enum iwl_reset_mode escalation_list_sc[] = {
142
IWL_RESET_MODE_SW_RESET,
143
IWL_RESET_MODE_REPROBE,
144
IWL_RESET_MODE_REPROBE,
145
IWL_RESET_MODE_FUNC_RESET,
146
IWL_RESET_MODE_TOP_RESET,
147
IWL_RESET_MODE_PROD_RESET,
148
IWL_RESET_MODE_TOP_RESET,
149
IWL_RESET_MODE_PROD_RESET,
150
IWL_RESET_MODE_TOP_RESET,
151
IWL_RESET_MODE_PROD_RESET,
152
};
153
const enum iwl_reset_mode *escalation_list;
154
size_t escalation_list_size;
155
156
/* used by TOP fatal error/TOP reset */
157
if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_FAILED)
158
return IWL_RESET_MODE_PROD_RESET;
159
160
if (trans->request_top_reset) {
161
trans->request_top_reset = 0;
162
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC)
163
return IWL_RESET_MODE_TOP_RESET;
164
return IWL_RESET_MODE_PROD_RESET;
165
}
166
167
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
168
escalation_list = escalation_list_sc;
169
escalation_list_size = ARRAY_SIZE(escalation_list_sc);
170
} else {
171
escalation_list = escalation_list_old;
172
escalation_list_size = ARRAY_SIZE(escalation_list_old);
173
}
174
175
if (trans->restart.during_reset)
176
at_least = IWL_RESET_MODE_REPROBE;
177
178
data = iwl_trans_get_restart_data(trans->dev);
179
if (!data)
180
return at_least;
181
182
if (!data->backoff &&
183
ktime_get_boottime_seconds() - data->last_error >=
184
IWL_TRANS_RESET_OK_TIME)
185
data->restart_count = 0;
186
187
index = data->restart_count;
188
if (index >= escalation_list_size) {
189
index = escalation_list_size - 1;
190
if (!data->backoff) {
191
data->backoff = true;
192
return IWL_RESET_MODE_BACKOFF;
193
}
194
data->backoff = false;
195
}
196
197
return max(at_least, escalation_list[index]);
198
}
199
200
#define IWL_TRANS_TOP_FOLLOWER_WAIT 180 /* ms */
201
202
#define IWL_TRANS_RESET_DELAY (HZ * 60)
203
204
static void iwl_trans_restart_wk(struct work_struct *wk)
205
{
206
struct iwl_trans *trans = container_of(wk, typeof(*trans),
207
restart.wk.work);
208
enum iwl_reset_mode mode;
209
210
if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_BY_BT) {
211
iwl_trans_schedule_reprobe(trans, IWL_TRANS_TOP_FOLLOWER_WAIT);
212
return;
213
}
214
215
if (!trans->op_mode)
216
return;
217
218
/* might have been scheduled before marked as dead, re-check */
219
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
220
return;
221
222
iwl_op_mode_dump_error(trans->op_mode, &trans->restart.mode);
223
224
/*
225
* If the opmode stopped the device while we were trying to dump and
226
* reset, then we'll have done the dump already (synchronized by the
227
* opmode lock that it will acquire in iwl_op_mode_dump_error()) and
228
* managed that via trans->restart.mode.
229
* Additionally, make sure that in such a case we won't attempt to do
230
* any resets now, since it's no longer requested.
231
*/
232
if (!test_and_clear_bit(STATUS_RESET_PENDING, &trans->status))
233
return;
234
235
if (!iwlwifi_mod_params.fw_restart)
236
return;
237
238
mode = iwl_trans_determine_restart_mode(trans);
239
if (mode == IWL_RESET_MODE_BACKOFF) {
240
IWL_ERR(trans, "Too many device errors - delay next reset\n");
241
queue_delayed_work(system_unbound_wq, &trans->restart.wk,
242
IWL_TRANS_RESET_DELAY);
243
return;
244
}
245
246
iwl_trans_inc_restart_count(trans->dev);
247
248
switch (mode) {
249
case IWL_RESET_MODE_TOP_RESET:
250
trans->do_top_reset = 1;
251
IWL_ERR(trans, "Device error - TOP reset\n");
252
fallthrough;
253
case IWL_RESET_MODE_SW_RESET:
254
if (mode == IWL_RESET_MODE_SW_RESET)
255
IWL_ERR(trans, "Device error - SW reset\n");
256
iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type);
257
break;
258
case IWL_RESET_MODE_REPROBE:
259
IWL_ERR(trans, "Device error - reprobe!\n");
260
261
iwl_trans_schedule_reprobe(trans, 0);
262
break;
263
default:
264
iwl_trans_pcie_reset(trans, mode);
265
break;
266
}
267
}
268
269
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
270
struct device *dev,
271
const struct iwl_mac_cfg *mac_cfg,
272
unsigned int txcmd_size,
273
unsigned int txcmd_align)
274
{
275
struct iwl_trans *trans;
276
#ifdef CONFIG_LOCKDEP
277
static struct lock_class_key __sync_cmd_key;
278
#endif
279
280
trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL);
281
if (!trans)
282
return NULL;
283
284
trans->mac_cfg = mac_cfg;
285
286
#ifdef CONFIG_LOCKDEP
287
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
288
&__sync_cmd_key, 0);
289
#endif
290
291
trans->dev = dev;
292
293
INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk);
294
295
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
296
"iwl_cmd_pool:%s", dev_name(trans->dev));
297
trans->dev_cmd_pool =
298
kmem_cache_create(trans->dev_cmd_pool_name,
299
txcmd_size, txcmd_align,
300
SLAB_HWCACHE_ALIGN, NULL);
301
if (!trans->dev_cmd_pool)
302
return NULL;
303
304
return trans;
305
}
306
307
void iwl_trans_free(struct iwl_trans *trans)
308
{
309
cancel_delayed_work_sync(&trans->restart.wk);
310
kmem_cache_destroy(trans->dev_cmd_pool);
311
}
312
313
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
314
{
315
int ret;
316
317
if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
318
test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
319
return -ERFKILL;
320
321
if (unlikely(test_bit(STATUS_SUSPENDED, &trans->status)))
322
return -EHOSTDOWN;
323
324
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
325
return -EIO;
326
327
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
328
"bad state = %d\n", trans->state))
329
return -EIO;
330
331
if (!(cmd->flags & CMD_ASYNC))
332
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
333
334
if (trans->conf.wide_cmd_header && !iwl_cmd_groupid(cmd->id)) {
335
if (cmd->id != REPLY_ERROR)
336
cmd->id = DEF_ID(cmd->id);
337
}
338
339
ret = iwl_trans_pcie_send_hcmd(trans, cmd);
340
341
if (!(cmd->flags & CMD_ASYNC))
342
lock_map_release(&trans->sync_cmd_lockdep_map);
343
344
if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt))
345
return -EIO;
346
347
return ret;
348
}
349
IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
350
351
/* Comparator for struct iwl_hcmd_names.
352
* Used in the binary search over a list of host commands.
353
*
354
* @key: command_id that we're looking for.
355
* @elt: struct iwl_hcmd_names candidate for match.
356
*
357
* @return 0 iff equal.
358
*/
359
static int iwl_hcmd_names_cmp(const void *key, const void *elt)
360
{
361
const struct iwl_hcmd_names *name = elt;
362
const u8 *cmd1 = key;
363
u8 cmd2 = name->cmd_id;
364
365
return (*cmd1 - cmd2);
366
}
367
368
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
369
{
370
u8 grp, cmd;
371
struct iwl_hcmd_names *ret;
372
const struct iwl_hcmd_arr *arr;
373
size_t size = sizeof(struct iwl_hcmd_names);
374
375
grp = iwl_cmd_groupid(id);
376
cmd = iwl_cmd_opcode(id);
377
378
if (!trans->conf.command_groups ||
379
grp >= trans->conf.command_groups_size ||
380
!trans->conf.command_groups[grp].arr)
381
return "UNKNOWN";
382
383
arr = &trans->conf.command_groups[grp];
384
ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp);
385
if (!ret)
386
return "UNKNOWN";
387
return ret->cmd_name;
388
}
389
IWL_EXPORT_SYMBOL(iwl_get_cmd_string);
390
391
void iwl_trans_op_mode_enter(struct iwl_trans *trans,
392
struct iwl_op_mode *op_mode)
393
{
394
trans->op_mode = op_mode;
395
396
if (WARN_ON(trans->conf.n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
397
trans->conf.n_no_reclaim_cmds =
398
ARRAY_SIZE(trans->conf.no_reclaim_cmds);
399
400
WARN_ON_ONCE(!trans->conf.rx_mpdu_cmd);
401
402
iwl_trans_pcie_op_mode_enter(trans);
403
}
404
IWL_EXPORT_SYMBOL(iwl_trans_op_mode_enter);
405
406
int iwl_trans_start_hw(struct iwl_trans *trans)
407
{
408
might_sleep();
409
410
clear_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status);
411
/* opmode may not resume if it detects errors */
412
clear_bit(STATUS_SUSPENDED, &trans->status);
413
414
return iwl_trans_pcie_start_hw(trans);
415
}
416
IWL_EXPORT_SYMBOL(iwl_trans_start_hw);
417
418
void iwl_trans_op_mode_leave(struct iwl_trans *trans)
419
{
420
might_sleep();
421
422
if (trans->mac_cfg->gen2)
423
iwl_trans_pcie_gen2_op_mode_leave(trans);
424
else
425
iwl_trans_pcie_op_mode_leave(trans);
426
427
cancel_delayed_work_sync(&trans->restart.wk);
428
429
trans->op_mode = NULL;
430
memset(&trans->conf, 0, sizeof(trans->conf));
431
432
trans->state = IWL_TRANS_NO_FW;
433
}
434
IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave);
435
436
void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
437
{
438
iwl_trans_pcie_write8(trans, ofs, val);
439
}
440
441
void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
442
{
443
iwl_trans_pcie_write32(trans, ofs, val);
444
}
445
446
u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
447
{
448
return iwl_trans_pcie_read32(trans, ofs);
449
}
450
451
u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
452
{
453
return iwl_trans_pcie_read_prph(trans, ofs);
454
}
455
456
void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
457
{
458
return iwl_trans_pcie_write_prph(trans, ofs, val);
459
}
460
461
int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
462
void *buf, int dwords)
463
{
464
return iwl_trans_pcie_read_mem(trans, addr, buf, dwords);
465
}
466
IWL_EXPORT_SYMBOL(iwl_trans_read_mem);
467
468
int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
469
const void *buf, int dwords)
470
{
471
int offs, ret = 0;
472
const u32 *vals = buf;
473
474
if (iwl_trans_grab_nic_access(trans)) {
475
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
476
for (offs = 0; offs < dwords; offs++)
477
iwl_write32(trans, HBUS_TARG_MEM_WDAT,
478
vals ? vals[offs] : 0);
479
iwl_trans_release_nic_access(trans);
480
} else {
481
ret = -EBUSY;
482
}
483
return ret;
484
}
485
IWL_EXPORT_SYMBOL(iwl_trans_write_mem);
486
487
void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
488
{
489
if (state)
490
set_bit(STATUS_TPOWER_PMI, &trans->status);
491
else
492
clear_bit(STATUS_TPOWER_PMI, &trans->status);
493
}
494
IWL_EXPORT_SYMBOL(iwl_trans_set_pmi);
495
496
int iwl_trans_sw_reset(struct iwl_trans *trans)
497
{
498
return iwl_trans_pcie_sw_reset(trans, true);
499
}
500
501
struct iwl_trans_dump_data *
502
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
503
const struct iwl_dump_sanitize_ops *sanitize_ops,
504
void *sanitize_ctx)
505
{
506
return iwl_trans_pcie_dump_data(trans, dump_mask,
507
sanitize_ops, sanitize_ctx);
508
}
509
510
int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
511
{
512
int err;
513
514
might_sleep();
515
516
err = iwl_trans_pcie_d3_suspend(trans, test, reset);
517
518
if (!err)
519
set_bit(STATUS_SUSPENDED, &trans->status);
520
521
return err;
522
}
523
IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
524
525
int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
526
bool test, bool reset)
527
{
528
int err;
529
530
might_sleep();
531
532
err = iwl_trans_pcie_d3_resume(trans, status, test, reset);
533
534
clear_bit(STATUS_SUSPENDED, &trans->status);
535
536
return err;
537
}
538
IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
539
540
void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
541
{
542
iwl_trans_pci_interrupts(trans, enable);
543
}
544
545
void iwl_trans_sync_nmi(struct iwl_trans *trans)
546
{
547
iwl_trans_pcie_sync_nmi(trans);
548
}
549
550
int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
551
u64 src_addr, u32 byte_cnt)
552
{
553
return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt);
554
}
555
556
void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
557
u32 mask, u32 value)
558
{
559
iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
560
}
561
IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask);
562
563
int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
564
u32 *val)
565
{
566
return iwl_trans_pcie_read_config32(trans, ofs, val);
567
}
568
569
bool _iwl_trans_grab_nic_access(struct iwl_trans *trans)
570
{
571
return iwl_trans_pcie_grab_nic_access(trans);
572
}
573
IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access);
574
575
void __releases(nic_access)
576
iwl_trans_release_nic_access(struct iwl_trans *trans)
577
{
578
iwl_trans_pcie_release_nic_access(trans);
579
}
580
IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
581
582
void iwl_trans_fw_alive(struct iwl_trans *trans)
583
{
584
might_sleep();
585
586
trans->state = IWL_TRANS_FW_ALIVE;
587
588
if (trans->mac_cfg->gen2)
589
iwl_trans_pcie_gen2_fw_alive(trans);
590
else
591
iwl_trans_pcie_fw_alive(trans);
592
}
593
IWL_EXPORT_SYMBOL(iwl_trans_fw_alive);
594
595
int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
596
enum iwl_ucode_type ucode_type, bool run_in_rfkill)
597
{
598
const struct fw_img *img;
599
int ret;
600
601
might_sleep();
602
603
img = iwl_get_ucode_image(fw, ucode_type);
604
if (!img)
605
return -EINVAL;
606
607
clear_bit(STATUS_FW_ERROR, &trans->status);
608
609
if (trans->mac_cfg->gen2)
610
ret = iwl_trans_pcie_gen2_start_fw(trans, fw, img,
611
run_in_rfkill);
612
else
613
ret = iwl_trans_pcie_start_fw(trans, fw, img,
614
run_in_rfkill);
615
616
if (ret == 0)
617
trans->state = IWL_TRANS_FW_STARTED;
618
619
return ret;
620
}
621
IWL_EXPORT_SYMBOL(iwl_trans_start_fw);
622
623
void iwl_trans_stop_device(struct iwl_trans *trans)
624
{
625
might_sleep();
626
627
/*
628
* See also the comment in iwl_trans_restart_wk().
629
*
630
* When the opmode stops the device while a reset is pending, the
631
* worker (iwl_trans_restart_wk) might not have run yet or, more
632
* likely, will be blocked on the opmode lock. Due to the locking,
633
* we can't just flush the worker.
634
*
635
* If this is the case, then the test_and_clear_bit() ensures that
636
* the worker won't attempt to do anything after the stop.
637
*
638
* The trans->restart.mode is a handshake with the opmode, we set
639
* the context there to ABORT so that when the worker can finally
640
* acquire the lock in the opmode, the code there won't attempt to
641
* do any dumps. Since we'd really like to have the dump though,
642
* also do it inline here (with the opmode locks already held),
643
* but use a separate mode struct to avoid races.
644
*/
645
if (test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) {
646
struct iwl_fw_error_dump_mode mode;
647
648
mode = trans->restart.mode;
649
mode.context = IWL_ERR_CONTEXT_FROM_OPMODE;
650
trans->restart.mode.context = IWL_ERR_CONTEXT_ABORT;
651
652
iwl_op_mode_dump_error(trans->op_mode, &mode);
653
}
654
655
if (trans->mac_cfg->gen2)
656
iwl_trans_pcie_gen2_stop_device(trans);
657
else
658
iwl_trans_pcie_stop_device(trans);
659
660
trans->state = IWL_TRANS_NO_FW;
661
}
662
IWL_EXPORT_SYMBOL(iwl_trans_stop_device);
663
664
int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
665
struct iwl_device_tx_cmd *dev_cmd, int queue)
666
{
667
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
668
return -EIO;
669
670
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
671
"bad state = %d\n", trans->state))
672
return -EIO;
673
674
if (trans->mac_cfg->gen2)
675
return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue);
676
677
return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue);
678
}
679
IWL_EXPORT_SYMBOL(iwl_trans_tx);
680
681
void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
682
struct sk_buff_head *skbs, bool is_flush)
683
{
684
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
685
return;
686
687
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
688
"bad state = %d\n", trans->state))
689
return;
690
691
iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush);
692
}
693
IWL_EXPORT_SYMBOL(iwl_trans_reclaim);
694
695
void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
696
bool configure_scd)
697
{
698
iwl_trans_pcie_txq_disable(trans, queue, configure_scd);
699
}
700
IWL_EXPORT_SYMBOL(iwl_trans_txq_disable);
701
702
bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
703
const struct iwl_trans_txq_scd_cfg *cfg,
704
unsigned int queue_wdg_timeout)
705
{
706
might_sleep();
707
708
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
709
"bad state = %d\n", trans->state))
710
return false;
711
712
return iwl_trans_pcie_txq_enable(trans, queue, ssn,
713
cfg, queue_wdg_timeout);
714
}
715
IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
716
717
int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
718
{
719
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
720
return -EIO;
721
722
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
723
"bad state = %d\n", trans->state))
724
return -EIO;
725
726
return iwl_trans_pcie_wait_txq_empty(trans, queue);
727
}
728
IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty);
729
730
int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs)
731
{
732
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
733
"bad state = %d\n", trans->state))
734
return -EIO;
735
736
return iwl_trans_pcie_wait_txqs_empty(trans, txqs);
737
}
738
IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty);
739
740
void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
741
unsigned long txqs, bool freeze)
742
{
743
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
744
"bad state = %d\n", trans->state))
745
return;
746
747
iwl_pcie_freeze_txq_timer(trans, txqs, freeze);
748
}
749
IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer);
750
751
void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
752
int txq_id, bool shared_mode)
753
{
754
iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode);
755
}
756
IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode);
757
758
#ifdef CONFIG_IWLWIFI_DEBUGFS
759
void iwl_trans_debugfs_cleanup(struct iwl_trans *trans)
760
{
761
iwl_trans_pcie_debugfs_cleanup(trans);
762
}
763
#endif
764
765
void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr)
766
{
767
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
768
"bad state = %d\n", trans->state))
769
return;
770
771
iwl_pcie_set_q_ptrs(trans, queue, ptr);
772
}
773
IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs);
774
775
int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
776
u8 tid, int size, unsigned int wdg_timeout)
777
{
778
might_sleep();
779
780
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
781
"bad state = %d\n", trans->state))
782
return -EIO;
783
784
return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid,
785
size, wdg_timeout);
786
}
787
IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc);
788
789
void iwl_trans_txq_free(struct iwl_trans *trans, int queue)
790
{
791
iwl_txq_dyn_free(trans, queue);
792
}
793
IWL_EXPORT_SYMBOL(iwl_trans_txq_free);
794
795
int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
796
struct iwl_trans_rxq_dma_data *data)
797
{
798
return iwl_trans_pcie_rxq_dma_data(trans, queue, data);
799
}
800
801
int iwl_trans_load_pnvm(struct iwl_trans *trans,
802
const struct iwl_pnvm_image *pnvm_data,
803
const struct iwl_ucode_capabilities *capa)
804
{
805
return iwl_trans_pcie_ctx_info_v2_load_pnvm(trans, pnvm_data, capa);
806
}
807
IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm);
808
809
void iwl_trans_set_pnvm(struct iwl_trans *trans,
810
const struct iwl_ucode_capabilities *capa)
811
{
812
iwl_trans_pcie_ctx_info_v2_set_pnvm(trans, capa);
813
}
814
815
int iwl_trans_load_reduce_power(struct iwl_trans *trans,
816
const struct iwl_pnvm_image *payloads,
817
const struct iwl_ucode_capabilities *capa)
818
{
819
return iwl_trans_pcie_ctx_info_v2_load_reduce_power(trans, payloads,
820
capa);
821
}
822
823
void iwl_trans_set_reduce_power(struct iwl_trans *trans,
824
const struct iwl_ucode_capabilities *capa)
825
{
826
iwl_trans_pcie_ctx_info_v2_set_reduce_power(trans, capa);
827
}
828
829