Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/rdma/ib_umem.h
26278 views
1
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2
/*
3
* Copyright (c) 2007 Cisco Systems. All rights reserved.
4
* Copyright (c) 2020 Intel Corporation. All rights reserved.
5
*/
6
7
#ifndef IB_UMEM_H
8
#define IB_UMEM_H
9
10
#include <linux/list.h>
11
#include <linux/scatterlist.h>
12
#include <linux/workqueue.h>
13
#include <rdma/ib_verbs.h>
14
15
struct ib_ucontext;
16
struct ib_umem_odp;
17
struct dma_buf_attach_ops;
18
19
struct ib_umem {
20
struct ib_device *ibdev;
21
struct mm_struct *owning_mm;
22
u64 iova;
23
size_t length;
24
unsigned long address;
25
u32 writable : 1;
26
u32 is_odp : 1;
27
u32 is_dmabuf : 1;
28
struct sg_append_table sgt_append;
29
};
30
31
struct ib_umem_dmabuf {
32
struct ib_umem umem;
33
struct dma_buf_attachment *attach;
34
struct sg_table *sgt;
35
struct scatterlist *first_sg;
36
struct scatterlist *last_sg;
37
unsigned long first_sg_offset;
38
unsigned long last_sg_trim;
39
void *private;
40
u8 pinned : 1;
41
u8 revoked : 1;
42
};
43
44
static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
45
{
46
return container_of(umem, struct ib_umem_dmabuf, umem);
47
}
48
49
/* Returns the offset of the umem start relative to the first page. */
50
static inline int ib_umem_offset(struct ib_umem *umem)
51
{
52
return umem->address & ~PAGE_MASK;
53
}
54
55
static inline dma_addr_t ib_umem_start_dma_addr(struct ib_umem *umem)
56
{
57
return sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem);
58
}
59
60
static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
61
unsigned long pgsz)
62
{
63
return ib_umem_start_dma_addr(umem) & (pgsz - 1);
64
}
65
66
static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
67
unsigned long pgsz)
68
{
69
return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
70
ALIGN_DOWN(umem->iova, pgsz))) /
71
pgsz;
72
}
73
74
static inline size_t ib_umem_num_pages(struct ib_umem *umem)
75
{
76
return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
77
}
78
79
static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
80
struct ib_umem *umem,
81
unsigned long pgsz)
82
{
83
__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
84
umem->sgt_append.sgt.nents, pgsz);
85
biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
86
biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
87
}
88
89
static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
90
{
91
return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
92
}
93
94
/**
95
* rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
96
* @umem: umem to iterate over
97
* @pgsz: Page size to split the list into
98
*
99
* pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
100
* returned DMA blocks will be aligned to pgsz and span the range:
101
* ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
102
*
103
* Performs exactly ib_umem_num_dma_blocks() iterations.
104
*/
105
#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
106
for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
107
__rdma_umem_block_iter_next(biter);)
108
109
#ifdef CONFIG_INFINIBAND_USER_MEM
110
111
struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
112
size_t size, int access);
113
void ib_umem_release(struct ib_umem *umem);
114
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
115
size_t length);
116
unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
117
unsigned long pgsz_bitmap,
118
unsigned long virt);
119
120
/**
121
* ib_umem_find_best_pgoff - Find best HW page size
122
*
123
* @umem: umem struct
124
* @pgsz_bitmap bitmap of HW supported page sizes
125
* @pgoff_bitmask: Mask of bits that can be represented with an offset
126
*
127
* This is very similar to ib_umem_find_best_pgsz() except instead of accepting
128
* an IOVA it accepts a bitmask specifying what address bits can be represented
129
* with a page offset.
130
*
131
* For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
132
* and can support aligned offsets up to 4032 then pgoff_bitmask would be
133
* "111111000000".
134
*
135
* If the pgoff_bitmask requires either alignment in the low bit or an
136
* unavailable page size for the high bits, this function returns 0.
137
*/
138
static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
139
unsigned long pgsz_bitmap,
140
u64 pgoff_bitmask)
141
{
142
dma_addr_t dma_addr;
143
144
dma_addr = ib_umem_start_dma_addr(umem);
145
return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
146
dma_addr & pgoff_bitmask);
147
}
148
149
static inline bool ib_umem_is_contiguous(struct ib_umem *umem)
150
{
151
dma_addr_t dma_addr;
152
unsigned long pgsz;
153
154
/*
155
* Select the smallest aligned page that can contain the whole umem if
156
* it was contiguous.
157
*/
158
dma_addr = ib_umem_start_dma_addr(umem);
159
pgsz = roundup_pow_of_two((dma_addr ^ (umem->length - 1 + dma_addr)) + 1);
160
return !!ib_umem_find_best_pgoff(umem, pgsz, U64_MAX);
161
}
162
163
struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
164
unsigned long offset, size_t size,
165
int fd, int access,
166
const struct dma_buf_attach_ops *ops);
167
struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
168
unsigned long offset,
169
size_t size, int fd,
170
int access);
171
struct ib_umem_dmabuf *
172
ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
173
struct device *dma_device,
174
unsigned long offset, size_t size,
175
int fd, int access);
176
int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
177
void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
178
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
179
void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
180
181
#else /* CONFIG_INFINIBAND_USER_MEM */
182
183
#include <linux/err.h>
184
185
static inline struct ib_umem *ib_umem_get(struct ib_device *device,
186
unsigned long addr, size_t size,
187
int access)
188
{
189
return ERR_PTR(-EOPNOTSUPP);
190
}
191
static inline void ib_umem_release(struct ib_umem *umem) { }
192
static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
193
size_t length) {
194
return -EOPNOTSUPP;
195
}
196
static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
197
unsigned long pgsz_bitmap,
198
unsigned long virt)
199
{
200
return 0;
201
}
202
static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
203
unsigned long pgsz_bitmap,
204
u64 pgoff_bitmask)
205
{
206
return 0;
207
}
208
static inline
209
struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
210
unsigned long offset,
211
size_t size, int fd,
212
int access,
213
struct dma_buf_attach_ops *ops)
214
{
215
return ERR_PTR(-EOPNOTSUPP);
216
}
217
static inline struct ib_umem_dmabuf *
218
ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
219
size_t size, int fd, int access)
220
{
221
return ERR_PTR(-EOPNOTSUPP);
222
}
223
224
static inline struct ib_umem_dmabuf *
225
ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
226
struct device *dma_device,
227
unsigned long offset, size_t size,
228
int fd, int access)
229
{
230
return ERR_PTR(-EOPNOTSUPP);
231
}
232
233
static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
234
{
235
return -EOPNOTSUPP;
236
}
237
static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
238
static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
239
static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
240
241
#endif /* CONFIG_INFINIBAND_USER_MEM */
242
#endif /* IB_UMEM_H */
243
244