Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/firmware/qcom/qcom_tzmem.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Memory allocator for buffers shared with the TrustZone.
4
*
5
* Copyright (C) 2023-2024 Linaro Ltd.
6
*/
7
8
#include <linux/bug.h>
9
#include <linux/cleanup.h>
10
#include <linux/dma-mapping.h>
11
#include <linux/err.h>
12
#include <linux/firmware/qcom/qcom_tzmem.h>
13
#include <linux/genalloc.h>
14
#include <linux/gfp.h>
15
#include <linux/kernel.h>
16
#include <linux/list.h>
17
#include <linux/mm.h>
18
#include <linux/radix-tree.h>
19
#include <linux/slab.h>
20
#include <linux/spinlock.h>
21
#include <linux/types.h>
22
23
#include "qcom_scm.h"
24
#include "qcom_tzmem.h"
25
26
struct qcom_tzmem_area {
27
struct list_head list;
28
void *vaddr;
29
dma_addr_t paddr;
30
size_t size;
31
void *priv;
32
};
33
34
struct qcom_tzmem_pool {
35
struct gen_pool *genpool;
36
struct list_head areas;
37
enum qcom_tzmem_policy policy;
38
size_t increment;
39
size_t max_size;
40
spinlock_t lock;
41
};
42
43
struct qcom_tzmem_chunk {
44
size_t size;
45
struct qcom_tzmem_pool *owner;
46
};
47
48
static struct device *qcom_tzmem_dev;
49
static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
50
static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
51
52
#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
53
54
static int qcom_tzmem_init(void)
55
{
56
return 0;
57
}
58
59
static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
60
{
61
return 0;
62
}
63
64
static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
65
{
66
67
}
68
69
#elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
70
71
#include <linux/firmware/qcom/qcom_scm.h>
72
#include <linux/of.h>
73
74
#define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
75
76
static bool qcom_tzmem_using_shm_bridge;
77
78
/* List of machines that are known to not support SHM bridge correctly. */
79
static const char *const qcom_tzmem_blacklist[] = {
80
"qcom,sc8180x",
81
"qcom,sdm670", /* failure in GPU firmware loading */
82
"qcom,sdm845", /* reset in rmtfs memory assignment */
83
"qcom,sm7150", /* reset in rmtfs memory assignment */
84
"qcom,sm8150", /* reset in rmtfs memory assignment */
85
NULL
86
};
87
88
static int qcom_tzmem_init(void)
89
{
90
const char *const *platform;
91
int ret;
92
93
for (platform = qcom_tzmem_blacklist; *platform; platform++) {
94
if (of_machine_is_compatible(*platform))
95
goto notsupp;
96
}
97
98
ret = qcom_scm_shm_bridge_enable(qcom_tzmem_dev);
99
if (ret == -EOPNOTSUPP)
100
goto notsupp;
101
102
if (!ret)
103
qcom_tzmem_using_shm_bridge = true;
104
105
return ret;
106
107
notsupp:
108
dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
109
return 0;
110
}
111
112
static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
113
{
114
u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
115
int ret;
116
117
if (!qcom_tzmem_using_shm_bridge)
118
return 0;
119
120
pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
121
ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
122
size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
123
124
u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
125
if (!handle)
126
return -ENOMEM;
127
128
ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
129
size_and_flags, QCOM_SCM_VMID_HLOS,
130
handle);
131
if (ret)
132
return ret;
133
134
area->priv = no_free_ptr(handle);
135
136
return 0;
137
}
138
139
static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
140
{
141
u64 *handle = area->priv;
142
143
if (!qcom_tzmem_using_shm_bridge)
144
return;
145
146
qcom_scm_shm_bridge_delete(*handle);
147
kfree(handle);
148
}
149
150
#endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
151
152
static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
153
size_t size, gfp_t gfp)
154
{
155
int ret;
156
157
struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
158
gfp);
159
if (!area)
160
return -ENOMEM;
161
162
area->size = PAGE_ALIGN(size);
163
164
area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
165
&area->paddr, gfp);
166
if (!area->vaddr)
167
return -ENOMEM;
168
169
ret = qcom_tzmem_init_area(area);
170
if (ret) {
171
dma_free_coherent(qcom_tzmem_dev, area->size,
172
area->vaddr, area->paddr);
173
return ret;
174
}
175
176
ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
177
(phys_addr_t)area->paddr, size, -1);
178
if (ret) {
179
dma_free_coherent(qcom_tzmem_dev, area->size,
180
area->vaddr, area->paddr);
181
return ret;
182
}
183
184
scoped_guard(spinlock_irqsave, &pool->lock)
185
list_add_tail(&area->list, &pool->areas);
186
187
area = NULL;
188
return 0;
189
}
190
191
/**
192
* qcom_tzmem_pool_new() - Create a new TZ memory pool.
193
* @config: Pool configuration.
194
*
195
* Create a new pool of memory suitable for sharing with the TrustZone.
196
*
197
* Must not be used in atomic context.
198
*
199
* Return: New memory pool address or ERR_PTR() on error.
200
*/
201
struct qcom_tzmem_pool *
202
qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
203
{
204
int ret = -ENOMEM;
205
206
might_sleep();
207
208
switch (config->policy) {
209
case QCOM_TZMEM_POLICY_STATIC:
210
if (!config->initial_size)
211
return ERR_PTR(-EINVAL);
212
break;
213
case QCOM_TZMEM_POLICY_MULTIPLIER:
214
if (!config->increment)
215
return ERR_PTR(-EINVAL);
216
break;
217
case QCOM_TZMEM_POLICY_ON_DEMAND:
218
break;
219
default:
220
return ERR_PTR(-EINVAL);
221
}
222
223
struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
224
GFP_KERNEL);
225
if (!pool)
226
return ERR_PTR(-ENOMEM);
227
228
pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
229
if (!pool->genpool)
230
return ERR_PTR(-ENOMEM);
231
232
gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
233
234
pool->policy = config->policy;
235
pool->increment = config->increment;
236
pool->max_size = config->max_size;
237
INIT_LIST_HEAD(&pool->areas);
238
spin_lock_init(&pool->lock);
239
240
if (config->initial_size) {
241
ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
242
GFP_KERNEL);
243
if (ret) {
244
gen_pool_destroy(pool->genpool);
245
return ERR_PTR(ret);
246
}
247
}
248
249
return_ptr(pool);
250
}
251
EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
252
253
/**
254
* qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
255
* @pool: Memory pool to free.
256
*
257
* Must not be called if any of the allocated chunks has not been freed.
258
* Must not be used in atomic context.
259
*/
260
void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
261
{
262
struct qcom_tzmem_area *area, *next;
263
struct qcom_tzmem_chunk *chunk;
264
struct radix_tree_iter iter;
265
bool non_empty = false;
266
void __rcu **slot;
267
268
might_sleep();
269
270
if (!pool)
271
return;
272
273
scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
274
radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
275
chunk = radix_tree_deref_slot_protected(slot,
276
&qcom_tzmem_chunks_lock);
277
278
if (chunk->owner == pool)
279
non_empty = true;
280
}
281
}
282
283
WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
284
285
list_for_each_entry_safe(area, next, &pool->areas, list) {
286
list_del(&area->list);
287
qcom_tzmem_cleanup_area(area);
288
dma_free_coherent(qcom_tzmem_dev, area->size,
289
area->vaddr, area->paddr);
290
kfree(area);
291
}
292
293
gen_pool_destroy(pool->genpool);
294
kfree(pool);
295
}
296
EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
297
298
static void devm_qcom_tzmem_pool_free(void *data)
299
{
300
struct qcom_tzmem_pool *pool = data;
301
302
qcom_tzmem_pool_free(pool);
303
}
304
305
/**
306
* devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
307
* @dev: Device managing this resource.
308
* @config: Pool configuration.
309
*
310
* Must not be used in atomic context.
311
*
312
* Return: Address of the managed pool or ERR_PTR() on failure.
313
*/
314
struct qcom_tzmem_pool *
315
devm_qcom_tzmem_pool_new(struct device *dev,
316
const struct qcom_tzmem_pool_config *config)
317
{
318
struct qcom_tzmem_pool *pool;
319
int ret;
320
321
pool = qcom_tzmem_pool_new(config);
322
if (IS_ERR(pool))
323
return pool;
324
325
ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
326
if (ret)
327
return ERR_PTR(ret);
328
329
return pool;
330
}
331
EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
332
333
static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
334
size_t requested, gfp_t gfp)
335
{
336
size_t current_size = gen_pool_size(pool->genpool);
337
338
if (pool->max_size && (current_size + requested) > pool->max_size)
339
return false;
340
341
switch (pool->policy) {
342
case QCOM_TZMEM_POLICY_STATIC:
343
return false;
344
case QCOM_TZMEM_POLICY_MULTIPLIER:
345
requested = current_size * pool->increment;
346
break;
347
case QCOM_TZMEM_POLICY_ON_DEMAND:
348
break;
349
}
350
351
return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
352
}
353
354
/**
355
* qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
356
* @pool: TZ memory pool from which to allocate memory.
357
* @size: Number of bytes to allocate.
358
* @gfp: GFP flags.
359
*
360
* Can be used in any context.
361
*
362
* Return:
363
* Address of the allocated buffer or NULL if no more memory can be allocated.
364
* The buffer must be released using qcom_tzmem_free().
365
*/
366
void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
367
{
368
unsigned long vaddr;
369
int ret;
370
371
if (!size)
372
return NULL;
373
374
size = PAGE_ALIGN(size);
375
376
struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
377
gfp);
378
if (!chunk)
379
return NULL;
380
381
again:
382
vaddr = gen_pool_alloc(pool->genpool, size);
383
if (!vaddr) {
384
if (qcom_tzmem_try_grow_pool(pool, size, gfp))
385
goto again;
386
387
return NULL;
388
}
389
390
chunk->size = size;
391
chunk->owner = pool;
392
393
scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
394
ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
395
if (ret) {
396
gen_pool_free(pool->genpool, vaddr, size);
397
return NULL;
398
}
399
400
chunk = NULL;
401
}
402
403
return (void *)vaddr;
404
}
405
EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
406
407
/**
408
* qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
409
* @vaddr: Virtual address of the buffer.
410
*
411
* Can be used in any context.
412
*/
413
void qcom_tzmem_free(void *vaddr)
414
{
415
struct qcom_tzmem_chunk *chunk;
416
417
scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
418
chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
419
(unsigned long)vaddr, NULL);
420
421
if (!chunk) {
422
WARN(1, "Virtual address %p not owned by TZ memory allocator",
423
vaddr);
424
return;
425
}
426
427
scoped_guard(spinlock_irqsave, &chunk->owner->lock)
428
gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
429
chunk->size);
430
kfree(chunk);
431
}
432
EXPORT_SYMBOL_GPL(qcom_tzmem_free);
433
434
/**
435
* qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical.
436
* @vaddr: Virtual address of memory allocated from a TZ memory pool.
437
*
438
* Can be used in any context. The address must point to memory allocated
439
* using qcom_tzmem_alloc().
440
*
441
* Returns:
442
* Physical address mapped from the virtual or 0 if the mapping failed.
443
*/
444
phys_addr_t qcom_tzmem_to_phys(void *vaddr)
445
{
446
struct qcom_tzmem_chunk *chunk;
447
struct radix_tree_iter iter;
448
void __rcu **slot;
449
phys_addr_t ret;
450
451
guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
452
453
radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
454
chunk = radix_tree_deref_slot_protected(slot,
455
&qcom_tzmem_chunks_lock);
456
457
ret = gen_pool_virt_to_phys(chunk->owner->genpool,
458
(unsigned long)vaddr);
459
if (ret == -1)
460
continue;
461
462
return ret;
463
}
464
465
return 0;
466
}
467
EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
468
469
int qcom_tzmem_enable(struct device *dev)
470
{
471
if (qcom_tzmem_dev)
472
return -EBUSY;
473
474
qcom_tzmem_dev = dev;
475
476
return qcom_tzmem_init();
477
}
478
EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
479
480
MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
481
MODULE_AUTHOR("Bartosz Golaszewski <[email protected]>");
482
MODULE_LICENSE("GPL");
483
484