Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sparc/kernel/iommu-common.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* IOMMU mmap management and range allocation functions.
4
* Based almost entirely upon the powerpc iommu allocator.
5
*/
6
7
#include <linux/export.h>
8
#include <linux/bitmap.h>
9
#include <linux/bug.h>
10
#include <linux/iommu-helper.h>
11
#include <linux/dma-mapping.h>
12
#include <linux/hash.h>
13
#include <asm/iommu-common.h>
14
15
static unsigned long iommu_large_alloc = 15;
16
17
static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
18
19
static inline bool need_flush(struct iommu_map_table *iommu)
20
{
21
return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
22
}
23
24
static inline void set_flush(struct iommu_map_table *iommu)
25
{
26
iommu->flags |= IOMMU_NEED_FLUSH;
27
}
28
29
static inline void clear_flush(struct iommu_map_table *iommu)
30
{
31
iommu->flags &= ~IOMMU_NEED_FLUSH;
32
}
33
34
static void setup_iommu_pool_hash(void)
35
{
36
unsigned int i;
37
static bool do_once;
38
39
if (do_once)
40
return;
41
do_once = true;
42
for_each_possible_cpu(i)
43
per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
44
}
45
46
/*
47
* Initialize iommu_pool entries for the iommu_map_table. `num_entries'
48
* is the number of table entries. If `large_pool' is set to true,
49
* the top 1/4 of the table will be set aside for pool allocations
50
* of more than iommu_large_alloc pages.
51
*/
52
void iommu_tbl_pool_init(struct iommu_map_table *iommu,
53
unsigned long num_entries,
54
u32 table_shift,
55
void (*lazy_flush)(struct iommu_map_table *),
56
bool large_pool, u32 npools,
57
bool skip_span_boundary_check)
58
{
59
unsigned int start, i;
60
struct iommu_pool *p = &(iommu->large_pool);
61
62
setup_iommu_pool_hash();
63
if (npools == 0)
64
iommu->nr_pools = IOMMU_NR_POOLS;
65
else
66
iommu->nr_pools = npools;
67
BUG_ON(npools > IOMMU_NR_POOLS);
68
69
iommu->table_shift = table_shift;
70
iommu->lazy_flush = lazy_flush;
71
start = 0;
72
if (skip_span_boundary_check)
73
iommu->flags |= IOMMU_NO_SPAN_BOUND;
74
if (large_pool)
75
iommu->flags |= IOMMU_HAS_LARGE_POOL;
76
77
if (!large_pool)
78
iommu->poolsize = num_entries/iommu->nr_pools;
79
else
80
iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
81
for (i = 0; i < iommu->nr_pools; i++) {
82
spin_lock_init(&(iommu->pools[i].lock));
83
iommu->pools[i].start = start;
84
iommu->pools[i].hint = start;
85
start += iommu->poolsize; /* start for next pool */
86
iommu->pools[i].end = start - 1;
87
}
88
if (!large_pool)
89
return;
90
/* initialize large_pool */
91
spin_lock_init(&(p->lock));
92
p->start = start;
93
p->hint = p->start;
94
p->end = num_entries;
95
}
96
97
unsigned long iommu_tbl_range_alloc(struct device *dev,
98
struct iommu_map_table *iommu,
99
unsigned long npages,
100
unsigned long *handle,
101
unsigned long mask,
102
unsigned int align_order)
103
{
104
unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
105
unsigned long n, end, start, limit, boundary_size;
106
struct iommu_pool *pool;
107
int pass = 0;
108
unsigned int pool_nr;
109
unsigned int npools = iommu->nr_pools;
110
unsigned long flags;
111
bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
112
bool largealloc = (large_pool && npages > iommu_large_alloc);
113
unsigned long shift;
114
unsigned long align_mask = 0;
115
116
if (align_order > 0)
117
align_mask = ~0ul >> (BITS_PER_LONG - align_order);
118
119
/* Sanity check */
120
if (unlikely(npages == 0)) {
121
WARN_ON_ONCE(1);
122
return IOMMU_ERROR_CODE;
123
}
124
125
if (largealloc) {
126
pool = &(iommu->large_pool);
127
pool_nr = 0; /* to keep compiler happy */
128
} else {
129
/* pick out pool_nr */
130
pool_nr = pool_hash & (npools - 1);
131
pool = &(iommu->pools[pool_nr]);
132
}
133
spin_lock_irqsave(&pool->lock, flags);
134
135
again:
136
if (pass == 0 && handle && *handle &&
137
(*handle >= pool->start) && (*handle < pool->end))
138
start = *handle;
139
else
140
start = pool->hint;
141
142
limit = pool->end;
143
144
/* The case below can happen if we have a small segment appended
145
* to a large, or when the previous alloc was at the very end of
146
* the available space. If so, go back to the beginning. If a
147
* flush is needed, it will get done based on the return value
148
* from iommu_area_alloc() below.
149
*/
150
if (start >= limit)
151
start = pool->start;
152
shift = iommu->table_map_base >> iommu->table_shift;
153
if (limit + shift > mask) {
154
limit = mask - shift + 1;
155
/* If we're constrained on address range, first try
156
* at the masked hint to avoid O(n) search complexity,
157
* but on second pass, start at 0 in pool 0.
158
*/
159
if ((start & mask) >= limit || pass > 0) {
160
spin_unlock(&(pool->lock));
161
pool = &(iommu->pools[0]);
162
spin_lock(&(pool->lock));
163
start = pool->start;
164
} else {
165
start &= mask;
166
}
167
}
168
169
/*
170
* if the skip_span_boundary_check had been set during init, we set
171
* things up so that iommu_is_span_boundary() merely checks if the
172
* (index + npages) < num_tsb_entries
173
*/
174
if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
175
shift = 0;
176
boundary_size = iommu->poolsize * iommu->nr_pools;
177
} else {
178
boundary_size = dma_get_seg_boundary_nr_pages(dev,
179
iommu->table_shift);
180
}
181
n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
182
boundary_size, align_mask);
183
if (n == -1) {
184
if (likely(pass == 0)) {
185
/* First failure, rescan from the beginning. */
186
pool->hint = pool->start;
187
set_flush(iommu);
188
pass++;
189
goto again;
190
} else if (!largealloc && pass <= iommu->nr_pools) {
191
spin_unlock(&(pool->lock));
192
pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
193
pool = &(iommu->pools[pool_nr]);
194
spin_lock(&(pool->lock));
195
pool->hint = pool->start;
196
set_flush(iommu);
197
pass++;
198
goto again;
199
} else {
200
/* give up */
201
n = IOMMU_ERROR_CODE;
202
goto bail;
203
}
204
}
205
if (iommu->lazy_flush &&
206
(n < pool->hint || need_flush(iommu))) {
207
clear_flush(iommu);
208
iommu->lazy_flush(iommu);
209
}
210
211
end = n + npages;
212
pool->hint = end;
213
214
/* Update handle for SG allocations */
215
if (handle)
216
*handle = end;
217
bail:
218
spin_unlock_irqrestore(&(pool->lock), flags);
219
220
return n;
221
}
222
223
static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
224
unsigned long entry)
225
{
226
struct iommu_pool *p;
227
unsigned long largepool_start = tbl->large_pool.start;
228
bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
229
230
/* The large pool is the last pool at the top of the table */
231
if (large_pool && entry >= largepool_start) {
232
p = &tbl->large_pool;
233
} else {
234
unsigned int pool_nr = entry / tbl->poolsize;
235
236
BUG_ON(pool_nr >= tbl->nr_pools);
237
p = &tbl->pools[pool_nr];
238
}
239
return p;
240
}
241
242
/* Caller supplies the index of the entry into the iommu map table
243
* itself when the mapping from dma_addr to the entry is not the
244
* default addr->entry mapping below.
245
*/
246
void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
247
unsigned long npages, unsigned long entry)
248
{
249
struct iommu_pool *pool;
250
unsigned long flags;
251
unsigned long shift = iommu->table_shift;
252
253
if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
254
entry = (dma_addr - iommu->table_map_base) >> shift;
255
pool = get_pool(iommu, entry);
256
257
spin_lock_irqsave(&(pool->lock), flags);
258
bitmap_clear(iommu->map, entry, npages);
259
spin_unlock_irqrestore(&(pool->lock), flags);
260
}
261
262