Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/core/devmem.h
49054 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* Device memory TCP support
4
*
5
* Authors: Mina Almasry <[email protected]>
6
* Willem de Bruijn <[email protected]>
7
* Kaiyuan Zhang <[email protected]>
8
*
9
*/
10
#ifndef _NET_DEVMEM_H
11
#define _NET_DEVMEM_H
12
13
#include <net/netmem.h>
14
#include <net/netdev_netlink.h>
15
16
struct netlink_ext_ack;
17
18
struct net_devmem_dmabuf_binding {
19
struct dma_buf *dmabuf;
20
struct dma_buf_attachment *attachment;
21
struct sg_table *sgt;
22
struct net_device *dev;
23
struct gen_pool *chunk_pool;
24
/* Protect dev */
25
struct mutex lock;
26
27
/* The user holds a ref (via the netlink API) for as long as they want
28
* the binding to remain alive. Each page pool using this binding holds
29
* a ref to keep the binding alive. The page_pool does not release the
30
* ref until all the net_iovs allocated from this binding are released
31
* back to the page_pool.
32
*
33
* The binding undos itself and unmaps the underlying dmabuf once all
34
* those refs are dropped and the binding is no longer desired or in
35
* use.
36
*
37
* net_devmem_get_net_iov() on dmabuf net_iovs will increment this
38
* reference, making sure that the binding remains alive until all the
39
* net_iovs are no longer used. net_iovs allocated from this binding
40
* that are stuck in the TX path for any reason (such as awaiting
41
* retransmits) hold a reference to the binding until the skb holding
42
* them is freed.
43
*/
44
refcount_t ref;
45
46
/* The list of bindings currently active. Used for netlink to notify us
47
* of the user dropping the bind.
48
*/
49
struct list_head list;
50
51
/* rxq's this binding is active on. */
52
struct xarray bound_rxqs;
53
54
/* ID of this binding. Globally unique to all bindings currently
55
* active.
56
*/
57
u32 id;
58
59
/* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */
60
enum dma_data_direction direction;
61
62
/* Array of net_iov pointers for this binding, sorted by virtual
63
* address. This array is convenient to map the virtual addresses to
64
* net_iovs in the TX path.
65
*/
66
struct net_iov **tx_vec;
67
68
struct work_struct unbind_w;
69
};
70
71
#if defined(CONFIG_NET_DEVMEM)
72
/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
73
* entry from the dmabuf is inserted into the genpool as a chunk, and needs
74
* this owner struct to keep track of some metadata necessary to create
75
* allocations from this chunk.
76
*/
77
struct dmabuf_genpool_chunk_owner {
78
struct net_iov_area area;
79
struct net_devmem_dmabuf_binding *binding;
80
81
/* dma_addr of the start of the chunk. */
82
dma_addr_t base_dma_addr;
83
};
84
85
void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
86
struct net_devmem_dmabuf_binding *
87
net_devmem_bind_dmabuf(struct net_device *dev,
88
struct device *dma_dev,
89
enum dma_data_direction direction,
90
unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
91
struct netlink_ext_ack *extack);
92
struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id);
93
void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
94
int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
95
struct net_devmem_dmabuf_binding *binding,
96
struct netlink_ext_ack *extack);
97
98
static inline struct dmabuf_genpool_chunk_owner *
99
net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
100
{
101
struct net_iov_area *owner = net_iov_owner(niov);
102
103
return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
104
}
105
106
static inline struct net_devmem_dmabuf_binding *
107
net_devmem_iov_binding(const struct net_iov *niov)
108
{
109
return net_devmem_iov_to_chunk_owner(niov)->binding;
110
}
111
112
static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
113
{
114
return net_devmem_iov_binding(niov)->id;
115
}
116
117
static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
118
{
119
struct net_iov_area *owner = net_iov_owner(niov);
120
121
return owner->base_virtual +
122
((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
123
}
124
125
static inline bool
126
net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
127
{
128
return refcount_inc_not_zero(&binding->ref);
129
}
130
131
static inline void
132
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
133
{
134
if (!refcount_dec_and_test(&binding->ref))
135
return;
136
137
INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free);
138
schedule_work(&binding->unbind_w);
139
}
140
141
void net_devmem_get_net_iov(struct net_iov *niov);
142
void net_devmem_put_net_iov(struct net_iov *niov);
143
144
struct net_iov *
145
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
146
void net_devmem_free_dmabuf(struct net_iov *ppiov);
147
148
bool net_is_devmem_iov(struct net_iov *niov);
149
struct net_devmem_dmabuf_binding *
150
net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id);
151
struct net_iov *
152
net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
153
size_t *off, size_t *size);
154
155
#else
156
struct net_devmem_dmabuf_binding;
157
158
static inline void
159
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
160
{
161
}
162
163
static inline void net_devmem_get_net_iov(struct net_iov *niov)
164
{
165
}
166
167
static inline void net_devmem_put_net_iov(struct net_iov *niov)
168
{
169
}
170
171
static inline struct net_devmem_dmabuf_binding *
172
net_devmem_bind_dmabuf(struct net_device *dev,
173
struct device *dma_dev,
174
enum dma_data_direction direction,
175
unsigned int dmabuf_fd,
176
struct netdev_nl_sock *priv,
177
struct netlink_ext_ack *extack)
178
{
179
return ERR_PTR(-EOPNOTSUPP);
180
}
181
182
static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
183
{
184
return NULL;
185
}
186
187
static inline void
188
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
189
{
190
}
191
192
static inline int
193
net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
194
struct net_devmem_dmabuf_binding *binding,
195
struct netlink_ext_ack *extack)
196
197
{
198
return -EOPNOTSUPP;
199
}
200
201
static inline struct net_iov *
202
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
203
{
204
return NULL;
205
}
206
207
static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
208
{
209
}
210
211
static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
212
{
213
return 0;
214
}
215
216
static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
217
{
218
return 0;
219
}
220
221
static inline bool net_is_devmem_iov(struct net_iov *niov)
222
{
223
return false;
224
}
225
226
static inline struct net_devmem_dmabuf_binding *
227
net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id)
228
{
229
return ERR_PTR(-EOPNOTSUPP);
230
}
231
232
static inline struct net_iov *
233
net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
234
size_t *off, size_t *size)
235
{
236
return NULL;
237
}
238
239
static inline struct net_devmem_dmabuf_binding *
240
net_devmem_iov_binding(const struct net_iov *niov)
241
{
242
return NULL;
243
}
244
#endif
245
246
#endif /* _NET_DEVMEM_H */
247
248