Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/nvdimm/test/iomap.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4
*/
5
#include <linux/memremap.h>
6
#include <linux/rculist.h>
7
#include <linux/export.h>
8
#include <linux/ioport.h>
9
#include <linux/module.h>
10
#include <linux/types.h>
11
#include <linux/acpi.h>
12
#include <linux/io.h>
13
#include <linux/mm.h>
14
#include "nfit_test.h"
15
16
static LIST_HEAD(iomap_head);
17
18
static struct iomap_ops {
19
nfit_test_lookup_fn nfit_test_lookup;
20
nfit_test_evaluate_dsm_fn evaluate_dsm;
21
struct list_head list;
22
} iomap_ops = {
23
.list = LIST_HEAD_INIT(iomap_ops.list),
24
};
25
26
void nfit_test_setup(nfit_test_lookup_fn lookup,
27
nfit_test_evaluate_dsm_fn evaluate)
28
{
29
iomap_ops.nfit_test_lookup = lookup;
30
iomap_ops.evaluate_dsm = evaluate;
31
list_add_rcu(&iomap_ops.list, &iomap_head);
32
}
33
EXPORT_SYMBOL(nfit_test_setup);
34
35
void nfit_test_teardown(void)
36
{
37
list_del_rcu(&iomap_ops.list);
38
synchronize_rcu();
39
}
40
EXPORT_SYMBOL(nfit_test_teardown);
41
42
static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
43
{
44
struct iomap_ops *ops;
45
46
ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
47
if (ops)
48
return ops->nfit_test_lookup(resource);
49
return NULL;
50
}
51
52
struct nfit_test_resource *get_nfit_res(resource_size_t resource)
53
{
54
struct nfit_test_resource *res;
55
56
rcu_read_lock();
57
res = __get_nfit_res(resource);
58
rcu_read_unlock();
59
60
return res;
61
}
62
EXPORT_SYMBOL(get_nfit_res);
63
64
#define __nfit_test_ioremap(offset, size, fallback_fn) ({ \
65
struct nfit_test_resource *nfit_res = get_nfit_res(offset); \
66
nfit_res ? \
67
(void __iomem *) nfit_res->buf + (offset) \
68
- nfit_res->res.start \
69
: \
70
fallback_fn((offset), (size)) ; \
71
})
72
73
void __iomem *__wrap_devm_ioremap(struct device *dev,
74
resource_size_t offset, unsigned long size)
75
{
76
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
77
78
if (nfit_res)
79
return (void __iomem *) nfit_res->buf + offset
80
- nfit_res->res.start;
81
return devm_ioremap(dev, offset, size);
82
}
83
EXPORT_SYMBOL(__wrap_devm_ioremap);
84
85
void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
86
size_t size, unsigned long flags)
87
{
88
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
89
90
if (nfit_res)
91
return nfit_res->buf + offset - nfit_res->res.start;
92
return devm_memremap(dev, offset, size, flags);
93
}
94
EXPORT_SYMBOL(__wrap_devm_memremap);
95
96
static void nfit_test_kill(void *_pgmap)
97
{
98
struct dev_pagemap *pgmap = _pgmap;
99
100
WARN_ON(!pgmap);
101
102
percpu_ref_kill(&pgmap->ref);
103
104
wait_for_completion(&pgmap->done);
105
percpu_ref_exit(&pgmap->ref);
106
}
107
108
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
109
{
110
struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
111
112
complete(&pgmap->done);
113
}
114
115
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
116
{
117
int error;
118
resource_size_t offset = pgmap->range.start;
119
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
120
121
if (!nfit_res)
122
return devm_memremap_pages(dev, pgmap);
123
124
init_completion(&pgmap->done);
125
error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
126
GFP_KERNEL);
127
if (error)
128
return ERR_PTR(error);
129
130
error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
131
if (error)
132
return ERR_PTR(error);
133
return nfit_res->buf + offset - nfit_res->res.start;
134
}
135
EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
136
137
void *__wrap_memremap(resource_size_t offset, size_t size,
138
unsigned long flags)
139
{
140
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
141
142
if (nfit_res)
143
return nfit_res->buf + offset - nfit_res->res.start;
144
return memremap(offset, size, flags);
145
}
146
EXPORT_SYMBOL(__wrap_memremap);
147
148
void __wrap_devm_memunmap(struct device *dev, void *addr)
149
{
150
struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
151
152
if (nfit_res)
153
return;
154
return devm_memunmap(dev, addr);
155
}
156
EXPORT_SYMBOL(__wrap_devm_memunmap);
157
158
void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size)
159
{
160
return __nfit_test_ioremap(offset, size, ioremap);
161
}
162
EXPORT_SYMBOL(__wrap_ioremap);
163
164
void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
165
{
166
return __nfit_test_ioremap(offset, size, ioremap_wc);
167
}
168
EXPORT_SYMBOL(__wrap_ioremap_wc);
169
170
void __wrap_iounmap(volatile void __iomem *addr)
171
{
172
struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
173
if (nfit_res)
174
return;
175
return iounmap(addr);
176
}
177
EXPORT_SYMBOL(__wrap_iounmap);
178
179
void __wrap_memunmap(void *addr)
180
{
181
struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
182
183
if (nfit_res)
184
return;
185
return memunmap(addr);
186
}
187
EXPORT_SYMBOL(__wrap_memunmap);
188
189
static bool nfit_test_release_region(struct device *dev,
190
struct resource *parent, resource_size_t start,
191
resource_size_t n);
192
193
static void nfit_devres_release(struct device *dev, void *data)
194
{
195
struct resource *res = *((struct resource **) data);
196
197
WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
198
resource_size(res)));
199
}
200
201
static int match(struct device *dev, void *__res, void *match_data)
202
{
203
struct resource *res = *((struct resource **) __res);
204
resource_size_t start = *((resource_size_t *) match_data);
205
206
return res->start == start;
207
}
208
209
static bool nfit_test_release_region(struct device *dev,
210
struct resource *parent, resource_size_t start,
211
resource_size_t n)
212
{
213
if (parent == &iomem_resource) {
214
struct nfit_test_resource *nfit_res = get_nfit_res(start);
215
216
if (nfit_res) {
217
struct nfit_test_request *req;
218
struct resource *res = NULL;
219
220
if (dev) {
221
devres_release(dev, nfit_devres_release, match,
222
&start);
223
return true;
224
}
225
226
spin_lock(&nfit_res->lock);
227
list_for_each_entry(req, &nfit_res->requests, list)
228
if (req->res.start == start) {
229
res = &req->res;
230
list_del(&req->list);
231
break;
232
}
233
spin_unlock(&nfit_res->lock);
234
235
WARN(!res || resource_size(res) != n,
236
"%s: start: %llx n: %llx mismatch: %pr\n",
237
__func__, start, n, res);
238
if (res)
239
kfree(req);
240
return true;
241
}
242
}
243
return false;
244
}
245
246
static struct resource *nfit_test_request_region(struct device *dev,
247
struct resource *parent, resource_size_t start,
248
resource_size_t n, const char *name, int flags)
249
{
250
struct nfit_test_resource *nfit_res;
251
252
if (parent == &iomem_resource) {
253
nfit_res = get_nfit_res(start);
254
if (nfit_res) {
255
struct nfit_test_request *req;
256
struct resource *res = NULL;
257
258
if (start + n > nfit_res->res.start
259
+ resource_size(&nfit_res->res)) {
260
pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
261
__func__, start, n,
262
&nfit_res->res);
263
return NULL;
264
}
265
266
spin_lock(&nfit_res->lock);
267
list_for_each_entry(req, &nfit_res->requests, list)
268
if (start == req->res.start) {
269
res = &req->res;
270
break;
271
}
272
spin_unlock(&nfit_res->lock);
273
274
if (res) {
275
WARN(1, "%pr already busy\n", res);
276
return NULL;
277
}
278
279
req = kzalloc(sizeof(*req), GFP_KERNEL);
280
if (!req)
281
return NULL;
282
INIT_LIST_HEAD(&req->list);
283
res = &req->res;
284
285
res->start = start;
286
res->end = start + n - 1;
287
res->name = name;
288
res->flags = resource_type(parent);
289
res->flags |= IORESOURCE_BUSY | flags;
290
spin_lock(&nfit_res->lock);
291
list_add(&req->list, &nfit_res->requests);
292
spin_unlock(&nfit_res->lock);
293
294
if (dev) {
295
struct resource **d;
296
297
d = devres_alloc(nfit_devres_release,
298
sizeof(struct resource *),
299
GFP_KERNEL);
300
if (!d)
301
return NULL;
302
*d = res;
303
devres_add(dev, d);
304
}
305
306
pr_debug("%s: %pr\n", __func__, res);
307
return res;
308
}
309
}
310
if (dev)
311
return __devm_request_region(dev, parent, start, n, name);
312
return __request_region(parent, start, n, name, flags);
313
}
314
315
struct resource *__wrap___request_region(struct resource *parent,
316
resource_size_t start, resource_size_t n, const char *name,
317
int flags)
318
{
319
return nfit_test_request_region(NULL, parent, start, n, name, flags);
320
}
321
EXPORT_SYMBOL(__wrap___request_region);
322
323
int __wrap_insert_resource(struct resource *parent, struct resource *res)
324
{
325
if (get_nfit_res(res->start))
326
return 0;
327
return insert_resource(parent, res);
328
}
329
EXPORT_SYMBOL(__wrap_insert_resource);
330
331
int __wrap_remove_resource(struct resource *res)
332
{
333
if (get_nfit_res(res->start))
334
return 0;
335
return remove_resource(res);
336
}
337
EXPORT_SYMBOL(__wrap_remove_resource);
338
339
struct resource *__wrap___devm_request_region(struct device *dev,
340
struct resource *parent, resource_size_t start,
341
resource_size_t n, const char *name)
342
{
343
if (!dev)
344
return NULL;
345
return nfit_test_request_region(dev, parent, start, n, name, 0);
346
}
347
EXPORT_SYMBOL(__wrap___devm_request_region);
348
349
void __wrap___release_region(struct resource *parent, resource_size_t start,
350
resource_size_t n)
351
{
352
if (!nfit_test_release_region(NULL, parent, start, n))
353
__release_region(parent, start, n);
354
}
355
EXPORT_SYMBOL(__wrap___release_region);
356
357
void __wrap___devm_release_region(struct device *dev, struct resource *parent,
358
resource_size_t start, resource_size_t n)
359
{
360
if (!nfit_test_release_region(dev, parent, start, n))
361
__devm_release_region(dev, parent, start, n);
362
}
363
EXPORT_SYMBOL(__wrap___devm_release_region);
364
365
acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
366
struct acpi_object_list *p, struct acpi_buffer *buf)
367
{
368
struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
369
union acpi_object **obj;
370
371
if (!nfit_res || strcmp(path, "_FIT") || !buf)
372
return acpi_evaluate_object(handle, path, p, buf);
373
374
obj = nfit_res->buf;
375
buf->length = sizeof(union acpi_object);
376
buf->pointer = *obj;
377
return AE_OK;
378
}
379
EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
380
381
union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
382
u64 rev, u64 func, union acpi_object *argv4)
383
{
384
union acpi_object *obj = ERR_PTR(-ENXIO);
385
struct iomap_ops *ops;
386
387
rcu_read_lock();
388
ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
389
if (ops)
390
obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
391
rcu_read_unlock();
392
393
if (IS_ERR(obj))
394
return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
395
return obj;
396
}
397
EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
398
399
MODULE_DESCRIPTION("NVDIMM unit test");
400
MODULE_LICENSE("GPL v2");
401
402