Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/iwlwifi/pcie/ctxt-info.c
48372 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/*
3
* Copyright (C) 2017 Intel Deutschland GmbH
4
* Copyright (C) 2018-2025 Intel Corporation
5
*/
6
#include "iwl-trans.h"
7
#include "iwl-fh.h"
8
#include "iwl-context-info.h"
9
#include "gen1_2/internal.h"
10
#include "iwl-prph.h"
11
12
static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
13
size_t size,
14
dma_addr_t *phys,
15
int depth)
16
{
17
void *result;
18
19
if (WARN(depth > 2,
20
"failed to allocate DMA memory not crossing 2^32 boundary"))
21
return NULL;
22
23
result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
24
25
if (!result)
26
return NULL;
27
28
if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) {
29
void *old = result;
30
dma_addr_t oldphys = *phys;
31
32
result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
33
phys,
34
depth + 1);
35
dma_free_coherent(trans->dev, size, old, oldphys);
36
}
37
38
return result;
39
}
40
41
void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
42
size_t size,
43
dma_addr_t *phys)
44
{
45
return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
46
}
47
48
int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
49
const void *data, u32 len,
50
struct iwl_dram_data *dram)
51
{
52
dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
53
&dram->physical);
54
if (!dram->block)
55
return -ENOMEM;
56
57
dram->size = len;
58
memcpy(dram->block, data, len);
59
60
return 0;
61
}
62
63
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
64
{
65
struct iwl_self_init_dram *dram = &trans->init_dram;
66
int i;
67
68
if (!dram->paging) {
69
WARN_ON(dram->paging_cnt);
70
return;
71
}
72
73
/* free paging*/
74
for (i = 0; i < dram->paging_cnt; i++)
75
dma_free_coherent(trans->dev, dram->paging[i].size,
76
dram->paging[i].block,
77
dram->paging[i].physical);
78
79
kfree(dram->paging);
80
dram->paging_cnt = 0;
81
dram->paging = NULL;
82
}
83
84
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
85
const struct fw_img *fw,
86
struct iwl_context_info_dram_nonfseq *ctxt_dram)
87
{
88
struct iwl_self_init_dram *dram = &trans->init_dram;
89
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
90
91
if (WARN(dram->paging,
92
"paging shouldn't already be initialized (%d pages)\n",
93
dram->paging_cnt))
94
iwl_pcie_ctxt_info_free_paging(trans);
95
96
lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
97
/* add 1 due to separator */
98
umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
99
/* add 2 due to separators */
100
paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
101
102
dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
103
if (!dram->fw)
104
return -ENOMEM;
105
dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
106
if (!dram->paging)
107
return -ENOMEM;
108
109
/* initialize lmac sections */
110
for (i = 0; i < lmac_cnt; i++) {
111
ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data,
112
fw->sec[i].len,
113
&dram->fw[dram->fw_cnt]);
114
if (ret)
115
return ret;
116
ctxt_dram->lmac_img[i] =
117
cpu_to_le64(dram->fw[dram->fw_cnt].physical);
118
dram->fw_cnt++;
119
}
120
121
/* initialize umac sections */
122
for (i = 0; i < umac_cnt; i++) {
123
/* access FW with +1 to make up for lmac separator */
124
ret = iwl_pcie_ctxt_info_alloc_dma(trans,
125
fw->sec[dram->fw_cnt + 1].data,
126
fw->sec[dram->fw_cnt + 1].len,
127
&dram->fw[dram->fw_cnt]);
128
if (ret)
129
return ret;
130
ctxt_dram->umac_img[i] =
131
cpu_to_le64(dram->fw[dram->fw_cnt].physical);
132
dram->fw_cnt++;
133
}
134
135
/*
136
* Initialize paging.
137
* Paging memory isn't stored in dram->fw as the umac and lmac - it is
138
* stored separately.
139
* This is since the timing of its release is different -
140
* while fw memory can be released on alive, the paging memory can be
141
* freed only when the device goes down.
142
* Given that, the logic here in accessing the fw image is a bit
143
* different - fw_cnt isn't changing so loop counter is added to it.
144
*/
145
for (i = 0; i < paging_cnt; i++) {
146
/* access FW with +2 to make up for lmac & umac separators */
147
int fw_idx = dram->fw_cnt + i + 2;
148
149
ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data,
150
fw->sec[fw_idx].len,
151
&dram->paging[i]);
152
if (ret)
153
return ret;
154
155
ctxt_dram->virtual_img[i] =
156
cpu_to_le64(dram->paging[i].physical);
157
dram->paging_cnt++;
158
}
159
160
return 0;
161
}
162
163
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
164
const struct fw_img *img)
165
{
166
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
167
struct iwl_context_info *ctxt_info;
168
struct iwl_context_info_rbd_cfg *rx_cfg;
169
u32 control_flags = 0, rb_size, cb_size;
170
dma_addr_t phys;
171
int ret;
172
173
ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
174
sizeof(*ctxt_info),
175
&phys);
176
if (!ctxt_info)
177
return -ENOMEM;
178
179
trans_pcie->ctxt_info_dma_addr = phys;
180
181
ctxt_info->version.version = 0;
182
ctxt_info->version.mac_id =
183
cpu_to_le16((u16)trans->info.hw_rev);
184
/* size is in DWs */
185
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
186
187
switch (trans->conf.rx_buf_size) {
188
case IWL_AMSDU_2K:
189
rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
190
break;
191
case IWL_AMSDU_4K:
192
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
193
break;
194
case IWL_AMSDU_8K:
195
rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
196
break;
197
case IWL_AMSDU_12K:
198
rb_size = IWL_CTXT_INFO_RB_SIZE_16K;
199
break;
200
default:
201
WARN_ON(1);
202
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
203
}
204
205
cb_size = RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans));
206
if (WARN_ON(cb_size > 12))
207
cb_size = 12;
208
209
control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
210
control_flags |= u32_encode_bits(cb_size, IWL_CTXT_INFO_RB_CB_SIZE);
211
control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
212
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
213
214
/* initialize RX default queue */
215
rx_cfg = &ctxt_info->rbd_cfg;
216
rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
217
rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
218
rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
219
220
/* initialize TX command queue */
221
ctxt_info->hcmd_cfg.cmd_queue_addr =
222
cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);
223
ctxt_info->hcmd_cfg.cmd_queue_size =
224
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
225
226
/* allocate ucode sections in dram and set addresses */
227
ret = iwl_pcie_init_fw_sec(trans, img, &ctxt_info->dram);
228
if (ret) {
229
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
230
ctxt_info, trans_pcie->ctxt_info_dma_addr);
231
return ret;
232
}
233
234
trans_pcie->ctxt_info = ctxt_info;
235
236
iwl_enable_fw_load_int_ctx_info(trans, false);
237
238
/* Configure debug, if exists */
239
if (iwl_pcie_dbg_on(trans))
240
iwl_pcie_apply_destination(trans);
241
242
/* kick FW self load */
243
iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
244
245
/* Context info will be released upon alive or failure to get one */
246
247
return 0;
248
}
249
250
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans)
251
{
252
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253
254
if (!trans_pcie->ctxt_info)
255
return;
256
257
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
258
trans_pcie->ctxt_info,
259
trans_pcie->ctxt_info_dma_addr);
260
trans_pcie->ctxt_info_dma_addr = 0;
261
trans_pcie->ctxt_info = NULL;
262
263
iwl_pcie_ctxt_info_free_fw_img(trans);
264
}
265
266