Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/firmware/qcom/qcom_scm.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3
* Copyright (C) 2015 Linaro Ltd.
4
*/
5
6
#include <linux/arm-smccc.h>
7
#include <linux/bitfield.h>
8
#include <linux/bits.h>
9
#include <linux/cleanup.h>
10
#include <linux/clk.h>
11
#include <linux/completion.h>
12
#include <linux/cpumask.h>
13
#include <linux/dma-mapping.h>
14
#include <linux/err.h>
15
#include <linux/export.h>
16
#include <linux/firmware/qcom/qcom_scm.h>
17
#include <linux/firmware/qcom/qcom_tzmem.h>
18
#include <linux/init.h>
19
#include <linux/interconnect.h>
20
#include <linux/interrupt.h>
21
#include <linux/kstrtox.h>
22
#include <linux/module.h>
23
#include <linux/of.h>
24
#include <linux/of_address.h>
25
#include <linux/of_irq.h>
26
#include <linux/of_platform.h>
27
#include <linux/of_reserved_mem.h>
28
#include <linux/platform_device.h>
29
#include <linux/reset-controller.h>
30
#include <linux/sizes.h>
31
#include <linux/types.h>
32
33
#include "qcom_scm.h"
34
#include "qcom_tzmem.h"
35
36
static u32 download_mode;
37
38
struct qcom_scm {
39
struct device *dev;
40
struct clk *core_clk;
41
struct clk *iface_clk;
42
struct clk *bus_clk;
43
struct icc_path *path;
44
struct completion waitq_comp;
45
struct reset_controller_dev reset;
46
47
/* control access to the interconnect path */
48
struct mutex scm_bw_lock;
49
int scm_vote_count;
50
51
u64 dload_mode_addr;
52
53
struct qcom_tzmem_pool *mempool;
54
};
55
56
struct qcom_scm_current_perm_info {
57
__le32 vmid;
58
__le32 perm;
59
__le64 ctx;
60
__le32 ctx_size;
61
__le32 unused;
62
};
63
64
struct qcom_scm_mem_map_info {
65
__le64 mem_addr;
66
__le64 mem_size;
67
};
68
69
/**
70
* struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
71
* @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
72
* @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
73
* @data: Response data. The type of this data is given in @resp_type.
74
*/
75
struct qcom_scm_qseecom_resp {
76
u64 result;
77
u64 resp_type;
78
u64 data;
79
};
80
81
enum qcom_scm_qseecom_result {
82
QSEECOM_RESULT_SUCCESS = 0,
83
QSEECOM_RESULT_INCOMPLETE = 1,
84
QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2,
85
QSEECOM_RESULT_FAILURE = 0xFFFFFFFF,
86
};
87
88
enum qcom_scm_qseecom_resp_type {
89
QSEECOM_SCM_RES_APP_ID = 0xEE01,
90
QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02,
91
};
92
93
enum qcom_scm_qseecom_tz_owner {
94
QSEECOM_TZ_OWNER_SIP = 2,
95
QSEECOM_TZ_OWNER_TZ_APPS = 48,
96
QSEECOM_TZ_OWNER_QSEE_OS = 50
97
};
98
99
enum qcom_scm_qseecom_tz_svc {
100
QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0,
101
QSEECOM_TZ_SVC_APP_MGR = 1,
102
QSEECOM_TZ_SVC_INFO = 6,
103
};
104
105
enum qcom_scm_qseecom_tz_cmd_app {
106
QSEECOM_TZ_CMD_APP_SEND = 1,
107
QSEECOM_TZ_CMD_APP_LOOKUP = 3,
108
};
109
110
enum qcom_scm_qseecom_tz_cmd_info {
111
QSEECOM_TZ_CMD_INFO_VERSION = 3,
112
};
113
114
#define QSEECOM_MAX_APP_NAME_SIZE 64
115
#define SHMBRIDGE_RESULT_NOTSUPP 4
116
117
/* Each bit configures cold/warm boot address for one of the 4 CPUs */
118
static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
119
0, BIT(0), BIT(3), BIT(5)
120
};
121
static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
122
BIT(2), BIT(1), BIT(4), BIT(6)
123
};
124
125
#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
126
127
#define QCOM_DLOAD_MASK GENMASK(5, 4)
128
#define QCOM_DLOAD_NODUMP 0
129
#define QCOM_DLOAD_FULLDUMP 1
130
#define QCOM_DLOAD_MINIDUMP 2
131
#define QCOM_DLOAD_BOTHDUMP 3
132
133
static const char * const qcom_scm_convention_names[] = {
134
[SMC_CONVENTION_UNKNOWN] = "unknown",
135
[SMC_CONVENTION_ARM_32] = "smc arm 32",
136
[SMC_CONVENTION_ARM_64] = "smc arm 64",
137
[SMC_CONVENTION_LEGACY] = "smc legacy",
138
};
139
140
static const char * const download_mode_name[] = {
141
[QCOM_DLOAD_NODUMP] = "off",
142
[QCOM_DLOAD_FULLDUMP] = "full",
143
[QCOM_DLOAD_MINIDUMP] = "mini",
144
[QCOM_DLOAD_BOTHDUMP] = "full,mini",
145
};
146
147
static struct qcom_scm *__scm;
148
149
static int qcom_scm_clk_enable(void)
150
{
151
int ret;
152
153
ret = clk_prepare_enable(__scm->core_clk);
154
if (ret)
155
goto bail;
156
157
ret = clk_prepare_enable(__scm->iface_clk);
158
if (ret)
159
goto disable_core;
160
161
ret = clk_prepare_enable(__scm->bus_clk);
162
if (ret)
163
goto disable_iface;
164
165
return 0;
166
167
disable_iface:
168
clk_disable_unprepare(__scm->iface_clk);
169
disable_core:
170
clk_disable_unprepare(__scm->core_clk);
171
bail:
172
return ret;
173
}
174
175
static void qcom_scm_clk_disable(void)
176
{
177
clk_disable_unprepare(__scm->core_clk);
178
clk_disable_unprepare(__scm->iface_clk);
179
clk_disable_unprepare(__scm->bus_clk);
180
}
181
182
static int qcom_scm_bw_enable(void)
183
{
184
int ret = 0;
185
186
if (!__scm->path)
187
return 0;
188
189
mutex_lock(&__scm->scm_bw_lock);
190
if (!__scm->scm_vote_count) {
191
ret = icc_set_bw(__scm->path, 0, UINT_MAX);
192
if (ret < 0) {
193
dev_err(__scm->dev, "failed to set bandwidth request\n");
194
goto err_bw;
195
}
196
}
197
__scm->scm_vote_count++;
198
err_bw:
199
mutex_unlock(&__scm->scm_bw_lock);
200
201
return ret;
202
}
203
204
static void qcom_scm_bw_disable(void)
205
{
206
if (!__scm->path)
207
return;
208
209
mutex_lock(&__scm->scm_bw_lock);
210
if (__scm->scm_vote_count-- == 1)
211
icc_set_bw(__scm->path, 0, 0);
212
mutex_unlock(&__scm->scm_bw_lock);
213
}
214
215
enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
216
static DEFINE_SPINLOCK(scm_query_lock);
217
218
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
219
{
220
if (!qcom_scm_is_available())
221
return NULL;
222
223
return __scm->mempool;
224
}
225
226
static enum qcom_scm_convention __get_convention(void)
227
{
228
unsigned long flags;
229
struct qcom_scm_desc desc = {
230
.svc = QCOM_SCM_SVC_INFO,
231
.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
232
.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
233
QCOM_SCM_INFO_IS_CALL_AVAIL) |
234
(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
235
.arginfo = QCOM_SCM_ARGS(1),
236
.owner = ARM_SMCCC_OWNER_SIP,
237
};
238
struct qcom_scm_res res;
239
enum qcom_scm_convention probed_convention;
240
int ret;
241
bool forced = false;
242
243
if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
244
return qcom_scm_convention;
245
246
/*
247
* Per the "SMC calling convention specification", the 64-bit calling
248
* convention can only be used when the client is 64-bit, otherwise
249
* system will encounter the undefined behaviour.
250
*/
251
#if IS_ENABLED(CONFIG_ARM64)
252
/*
253
* Device isn't required as there is only one argument - no device
254
* needed to dma_map_single to secure world
255
*/
256
probed_convention = SMC_CONVENTION_ARM_64;
257
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
258
if (!ret && res.result[0] == 1)
259
goto found;
260
261
/*
262
* Some SC7180 firmwares didn't implement the
263
* QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
264
* calling conventions on these firmwares. Luckily we don't make any
265
* early calls into the firmware on these SoCs so the device pointer
266
* will be valid here to check if the compatible matches.
267
*/
268
if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
269
forced = true;
270
goto found;
271
}
272
#endif
273
274
probed_convention = SMC_CONVENTION_ARM_32;
275
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
276
if (!ret && res.result[0] == 1)
277
goto found;
278
279
probed_convention = SMC_CONVENTION_LEGACY;
280
found:
281
spin_lock_irqsave(&scm_query_lock, flags);
282
if (probed_convention != qcom_scm_convention) {
283
qcom_scm_convention = probed_convention;
284
pr_info("qcom_scm: convention: %s%s\n",
285
qcom_scm_convention_names[qcom_scm_convention],
286
forced ? " (forced)" : "");
287
}
288
spin_unlock_irqrestore(&scm_query_lock, flags);
289
290
return qcom_scm_convention;
291
}
292
293
/**
294
* qcom_scm_call() - Invoke a syscall in the secure world
295
* @dev: device
296
* @desc: Descriptor structure containing arguments and return values
297
* @res: Structure containing results from SMC/HVC call
298
*
299
* Sends a command to the SCM and waits for the command to finish processing.
300
* This should *only* be called in pre-emptible context.
301
*/
302
static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
303
struct qcom_scm_res *res)
304
{
305
might_sleep();
306
switch (__get_convention()) {
307
case SMC_CONVENTION_ARM_32:
308
case SMC_CONVENTION_ARM_64:
309
return scm_smc_call(dev, desc, res, false);
310
case SMC_CONVENTION_LEGACY:
311
return scm_legacy_call(dev, desc, res);
312
default:
313
pr_err("Unknown current SCM calling convention.\n");
314
return -EINVAL;
315
}
316
}
317
318
/**
319
* qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
320
* @dev: device
321
* @desc: Descriptor structure containing arguments and return values
322
* @res: Structure containing results from SMC/HVC call
323
*
324
* Sends a command to the SCM and waits for the command to finish processing.
325
* This can be called in atomic context.
326
*/
327
static int qcom_scm_call_atomic(struct device *dev,
328
const struct qcom_scm_desc *desc,
329
struct qcom_scm_res *res)
330
{
331
switch (__get_convention()) {
332
case SMC_CONVENTION_ARM_32:
333
case SMC_CONVENTION_ARM_64:
334
return scm_smc_call(dev, desc, res, true);
335
case SMC_CONVENTION_LEGACY:
336
return scm_legacy_call_atomic(dev, desc, res);
337
default:
338
pr_err("Unknown current SCM calling convention.\n");
339
return -EINVAL;
340
}
341
}
342
343
static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
344
u32 cmd_id)
345
{
346
int ret;
347
struct qcom_scm_desc desc = {
348
.svc = QCOM_SCM_SVC_INFO,
349
.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
350
.owner = ARM_SMCCC_OWNER_SIP,
351
};
352
struct qcom_scm_res res;
353
354
desc.arginfo = QCOM_SCM_ARGS(1);
355
switch (__get_convention()) {
356
case SMC_CONVENTION_ARM_32:
357
case SMC_CONVENTION_ARM_64:
358
desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
359
(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
360
break;
361
case SMC_CONVENTION_LEGACY:
362
desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
363
break;
364
default:
365
pr_err("Unknown SMC convention being used\n");
366
return false;
367
}
368
369
ret = qcom_scm_call(dev, &desc, &res);
370
371
return ret ? false : !!res.result[0];
372
}
373
374
static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
375
{
376
int cpu;
377
unsigned int flags = 0;
378
struct qcom_scm_desc desc = {
379
.svc = QCOM_SCM_SVC_BOOT,
380
.cmd = QCOM_SCM_BOOT_SET_ADDR,
381
.arginfo = QCOM_SCM_ARGS(2),
382
.owner = ARM_SMCCC_OWNER_SIP,
383
};
384
385
for_each_present_cpu(cpu) {
386
if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
387
return -EINVAL;
388
flags |= cpu_bits[cpu];
389
}
390
391
desc.args[0] = flags;
392
desc.args[1] = virt_to_phys(entry);
393
394
return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
395
}
396
397
static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
398
{
399
struct qcom_scm_desc desc = {
400
.svc = QCOM_SCM_SVC_BOOT,
401
.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
402
.owner = ARM_SMCCC_OWNER_SIP,
403
.arginfo = QCOM_SCM_ARGS(6),
404
.args = {
405
virt_to_phys(entry),
406
/* Apply to all CPUs in all affinity levels */
407
~0ULL, ~0ULL, ~0ULL, ~0ULL,
408
flags,
409
},
410
};
411
412
/* Need a device for DMA of the additional arguments */
413
if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
414
return -EOPNOTSUPP;
415
416
return qcom_scm_call(__scm->dev, &desc, NULL);
417
}
418
419
/**
420
* qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
421
* @entry: Entry point function for the cpus
422
*
423
* Set the Linux entry point for the SCM to transfer control to when coming
424
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
425
*/
426
int qcom_scm_set_warm_boot_addr(void *entry)
427
{
428
if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
429
/* Fallback to old SCM call */
430
return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
431
return 0;
432
}
433
EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
434
435
/**
436
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
437
* @entry: Entry point function for the cpus
438
*/
439
int qcom_scm_set_cold_boot_addr(void *entry)
440
{
441
if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
442
/* Fallback to old SCM call */
443
return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
444
return 0;
445
}
446
EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
447
448
/**
449
* qcom_scm_cpu_power_down() - Power down the cpu
450
* @flags: Flags to flush cache
451
*
452
* This is an end point to power down cpu. If there was a pending interrupt,
453
* the control would return from this function, otherwise, the cpu jumps to the
454
* warm boot entry point set for this cpu upon reset.
455
*/
456
void qcom_scm_cpu_power_down(u32 flags)
457
{
458
struct qcom_scm_desc desc = {
459
.svc = QCOM_SCM_SVC_BOOT,
460
.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
461
.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
462
.arginfo = QCOM_SCM_ARGS(1),
463
.owner = ARM_SMCCC_OWNER_SIP,
464
};
465
466
qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
467
}
468
EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
469
470
int qcom_scm_set_remote_state(u32 state, u32 id)
471
{
472
struct qcom_scm_desc desc = {
473
.svc = QCOM_SCM_SVC_BOOT,
474
.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
475
.arginfo = QCOM_SCM_ARGS(2),
476
.args[0] = state,
477
.args[1] = id,
478
.owner = ARM_SMCCC_OWNER_SIP,
479
};
480
struct qcom_scm_res res;
481
int ret;
482
483
ret = qcom_scm_call(__scm->dev, &desc, &res);
484
485
return ret ? : res.result[0];
486
}
487
EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
488
489
static int qcom_scm_disable_sdi(void)
490
{
491
int ret;
492
struct qcom_scm_desc desc = {
493
.svc = QCOM_SCM_SVC_BOOT,
494
.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
495
.args[0] = 1, /* Disable watchdog debug */
496
.args[1] = 0, /* Disable SDI */
497
.arginfo = QCOM_SCM_ARGS(2),
498
.owner = ARM_SMCCC_OWNER_SIP,
499
};
500
struct qcom_scm_res res;
501
502
ret = qcom_scm_clk_enable();
503
if (ret)
504
return ret;
505
ret = qcom_scm_call(__scm->dev, &desc, &res);
506
507
qcom_scm_clk_disable();
508
509
return ret ? : res.result[0];
510
}
511
512
static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
513
{
514
struct qcom_scm_desc desc = {
515
.svc = QCOM_SCM_SVC_BOOT,
516
.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
517
.arginfo = QCOM_SCM_ARGS(2),
518
.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
519
.owner = ARM_SMCCC_OWNER_SIP,
520
};
521
522
desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
523
524
return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
525
}
526
527
static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
528
{
529
unsigned int old;
530
unsigned int new;
531
int ret;
532
533
ret = qcom_scm_io_readl(addr, &old);
534
if (ret)
535
return ret;
536
537
new = (old & ~mask) | (val & mask);
538
539
return qcom_scm_io_writel(addr, new);
540
}
541
542
static void qcom_scm_set_download_mode(u32 dload_mode)
543
{
544
int ret = 0;
545
546
if (__scm->dload_mode_addr) {
547
ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
548
FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
549
} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
550
QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
551
ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
552
} else if (dload_mode) {
553
dev_err(__scm->dev,
554
"No available mechanism for setting download mode\n");
555
}
556
557
if (ret)
558
dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
559
}
560
561
/**
562
* qcom_scm_pas_init_image() - Initialize peripheral authentication service
563
* state machine for a given peripheral, using the
564
* metadata
565
* @peripheral: peripheral id
566
* @metadata: pointer to memory containing ELF header, program header table
567
* and optional blob of data used for authenticating the metadata
568
* and the rest of the firmware
569
* @size: size of the metadata
570
* @ctx: optional metadata context
571
*
572
* Return: 0 on success.
573
*
574
* Upon successful return, the PAS metadata context (@ctx) will be used to
575
* track the metadata allocation, this needs to be released by invoking
576
* qcom_scm_pas_metadata_release() by the caller.
577
*/
578
int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
579
struct qcom_scm_pas_metadata *ctx)
580
{
581
dma_addr_t mdata_phys;
582
void *mdata_buf;
583
int ret;
584
struct qcom_scm_desc desc = {
585
.svc = QCOM_SCM_SVC_PIL,
586
.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
587
.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
588
.args[0] = peripheral,
589
.owner = ARM_SMCCC_OWNER_SIP,
590
};
591
struct qcom_scm_res res;
592
593
/*
594
* During the scm call memory protection will be enabled for the meta
595
* data blob, so make sure it's physically contiguous, 4K aligned and
596
* non-cachable to avoid XPU violations.
597
*
598
* For PIL calls the hypervisor creates SHM Bridges for the blob
599
* buffers on behalf of Linux so we must not do it ourselves hence
600
* not using the TZMem allocator here.
601
*
602
* If we pass a buffer that is already part of an SHM Bridge to this
603
* call, it will fail.
604
*/
605
mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
606
GFP_KERNEL);
607
if (!mdata_buf)
608
return -ENOMEM;
609
610
memcpy(mdata_buf, metadata, size);
611
612
ret = qcom_scm_clk_enable();
613
if (ret)
614
goto out;
615
616
ret = qcom_scm_bw_enable();
617
if (ret)
618
goto disable_clk;
619
620
desc.args[1] = mdata_phys;
621
622
ret = qcom_scm_call(__scm->dev, &desc, &res);
623
qcom_scm_bw_disable();
624
625
disable_clk:
626
qcom_scm_clk_disable();
627
628
out:
629
if (ret < 0 || !ctx) {
630
dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
631
} else if (ctx) {
632
ctx->ptr = mdata_buf;
633
ctx->phys = mdata_phys;
634
ctx->size = size;
635
}
636
637
return ret ? : res.result[0];
638
}
639
EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
640
641
/**
642
* qcom_scm_pas_metadata_release() - release metadata context
643
* @ctx: metadata context
644
*/
645
void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
646
{
647
if (!ctx->ptr)
648
return;
649
650
dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
651
652
ctx->ptr = NULL;
653
ctx->phys = 0;
654
ctx->size = 0;
655
}
656
EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
657
658
/**
659
* qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
660
* for firmware loading
661
* @peripheral: peripheral id
662
* @addr: start address of memory area to prepare
663
* @size: size of the memory area to prepare
664
*
665
* Returns 0 on success.
666
*/
667
int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
668
{
669
int ret;
670
struct qcom_scm_desc desc = {
671
.svc = QCOM_SCM_SVC_PIL,
672
.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
673
.arginfo = QCOM_SCM_ARGS(3),
674
.args[0] = peripheral,
675
.args[1] = addr,
676
.args[2] = size,
677
.owner = ARM_SMCCC_OWNER_SIP,
678
};
679
struct qcom_scm_res res;
680
681
ret = qcom_scm_clk_enable();
682
if (ret)
683
return ret;
684
685
ret = qcom_scm_bw_enable();
686
if (ret)
687
goto disable_clk;
688
689
ret = qcom_scm_call(__scm->dev, &desc, &res);
690
qcom_scm_bw_disable();
691
692
disable_clk:
693
qcom_scm_clk_disable();
694
695
return ret ? : res.result[0];
696
}
697
EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
698
699
/**
700
* qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
701
* and reset the remote processor
702
* @peripheral: peripheral id
703
*
704
* Return 0 on success.
705
*/
706
int qcom_scm_pas_auth_and_reset(u32 peripheral)
707
{
708
int ret;
709
struct qcom_scm_desc desc = {
710
.svc = QCOM_SCM_SVC_PIL,
711
.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
712
.arginfo = QCOM_SCM_ARGS(1),
713
.args[0] = peripheral,
714
.owner = ARM_SMCCC_OWNER_SIP,
715
};
716
struct qcom_scm_res res;
717
718
ret = qcom_scm_clk_enable();
719
if (ret)
720
return ret;
721
722
ret = qcom_scm_bw_enable();
723
if (ret)
724
goto disable_clk;
725
726
ret = qcom_scm_call(__scm->dev, &desc, &res);
727
qcom_scm_bw_disable();
728
729
disable_clk:
730
qcom_scm_clk_disable();
731
732
return ret ? : res.result[0];
733
}
734
EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
735
736
/**
737
* qcom_scm_pas_shutdown() - Shut down the remote processor
738
* @peripheral: peripheral id
739
*
740
* Returns 0 on success.
741
*/
742
int qcom_scm_pas_shutdown(u32 peripheral)
743
{
744
int ret;
745
struct qcom_scm_desc desc = {
746
.svc = QCOM_SCM_SVC_PIL,
747
.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
748
.arginfo = QCOM_SCM_ARGS(1),
749
.args[0] = peripheral,
750
.owner = ARM_SMCCC_OWNER_SIP,
751
};
752
struct qcom_scm_res res;
753
754
ret = qcom_scm_clk_enable();
755
if (ret)
756
return ret;
757
758
ret = qcom_scm_bw_enable();
759
if (ret)
760
goto disable_clk;
761
762
ret = qcom_scm_call(__scm->dev, &desc, &res);
763
qcom_scm_bw_disable();
764
765
disable_clk:
766
qcom_scm_clk_disable();
767
768
return ret ? : res.result[0];
769
}
770
EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
771
772
/**
773
* qcom_scm_pas_supported() - Check if the peripheral authentication service is
774
* available for the given peripherial
775
* @peripheral: peripheral id
776
*
777
* Returns true if PAS is supported for this peripheral, otherwise false.
778
*/
779
bool qcom_scm_pas_supported(u32 peripheral)
780
{
781
int ret;
782
struct qcom_scm_desc desc = {
783
.svc = QCOM_SCM_SVC_PIL,
784
.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
785
.arginfo = QCOM_SCM_ARGS(1),
786
.args[0] = peripheral,
787
.owner = ARM_SMCCC_OWNER_SIP,
788
};
789
struct qcom_scm_res res;
790
791
if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
792
QCOM_SCM_PIL_PAS_IS_SUPPORTED))
793
return false;
794
795
ret = qcom_scm_call(__scm->dev, &desc, &res);
796
797
return ret ? false : !!res.result[0];
798
}
799
EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
800
801
static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
802
{
803
struct qcom_scm_desc desc = {
804
.svc = QCOM_SCM_SVC_PIL,
805
.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
806
.arginfo = QCOM_SCM_ARGS(2),
807
.args[0] = reset,
808
.args[1] = 0,
809
.owner = ARM_SMCCC_OWNER_SIP,
810
};
811
struct qcom_scm_res res;
812
int ret;
813
814
ret = qcom_scm_call(__scm->dev, &desc, &res);
815
816
return ret ? : res.result[0];
817
}
818
819
static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
820
unsigned long idx)
821
{
822
if (idx != 0)
823
return -EINVAL;
824
825
return __qcom_scm_pas_mss_reset(__scm->dev, 1);
826
}
827
828
static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
829
unsigned long idx)
830
{
831
if (idx != 0)
832
return -EINVAL;
833
834
return __qcom_scm_pas_mss_reset(__scm->dev, 0);
835
}
836
837
static const struct reset_control_ops qcom_scm_pas_reset_ops = {
838
.assert = qcom_scm_pas_reset_assert,
839
.deassert = qcom_scm_pas_reset_deassert,
840
};
841
842
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
843
{
844
struct qcom_scm_desc desc = {
845
.svc = QCOM_SCM_SVC_IO,
846
.cmd = QCOM_SCM_IO_READ,
847
.arginfo = QCOM_SCM_ARGS(1),
848
.args[0] = addr,
849
.owner = ARM_SMCCC_OWNER_SIP,
850
};
851
struct qcom_scm_res res;
852
int ret;
853
854
855
ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
856
if (ret >= 0)
857
*val = res.result[0];
858
859
return ret < 0 ? ret : 0;
860
}
861
EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
862
863
int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
864
{
865
struct qcom_scm_desc desc = {
866
.svc = QCOM_SCM_SVC_IO,
867
.cmd = QCOM_SCM_IO_WRITE,
868
.arginfo = QCOM_SCM_ARGS(2),
869
.args[0] = addr,
870
.args[1] = val,
871
.owner = ARM_SMCCC_OWNER_SIP,
872
};
873
874
return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
875
}
876
EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
877
878
/**
879
* qcom_scm_restore_sec_cfg_available() - Check if secure environment
880
* supports restore security config interface.
881
*
882
* Return true if restore-cfg interface is supported, false if not.
883
*/
884
bool qcom_scm_restore_sec_cfg_available(void)
885
{
886
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
887
QCOM_SCM_MP_RESTORE_SEC_CFG);
888
}
889
EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
890
891
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
892
{
893
struct qcom_scm_desc desc = {
894
.svc = QCOM_SCM_SVC_MP,
895
.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
896
.arginfo = QCOM_SCM_ARGS(2),
897
.args[0] = device_id,
898
.args[1] = spare,
899
.owner = ARM_SMCCC_OWNER_SIP,
900
};
901
struct qcom_scm_res res;
902
int ret;
903
904
ret = qcom_scm_call(__scm->dev, &desc, &res);
905
906
return ret ? : res.result[0];
907
}
908
EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
909
910
#define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0)
911
912
bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
913
{
914
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
915
QCOM_SCM_MP_CP_SMMU_APERTURE_ID);
916
}
917
EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available);
918
919
int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)
920
{
921
struct qcom_scm_desc desc = {
922
.svc = QCOM_SCM_SVC_MP,
923
.cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID,
924
.arginfo = QCOM_SCM_ARGS(4),
925
.args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank),
926
.args[1] = 0xffffffff,
927
.args[2] = 0xffffffff,
928
.args[3] = 0xffffffff,
929
.owner = ARM_SMCCC_OWNER_SIP
930
};
931
932
return qcom_scm_call(__scm->dev, &desc, NULL);
933
}
934
EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture);
935
936
int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
937
{
938
struct qcom_scm_desc desc = {
939
.svc = QCOM_SCM_SVC_MP,
940
.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
941
.arginfo = QCOM_SCM_ARGS(1),
942
.args[0] = spare,
943
.owner = ARM_SMCCC_OWNER_SIP,
944
};
945
struct qcom_scm_res res;
946
int ret;
947
948
ret = qcom_scm_call(__scm->dev, &desc, &res);
949
950
if (size)
951
*size = res.result[0];
952
953
return ret ? : res.result[1];
954
}
955
EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
956
957
int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
958
{
959
struct qcom_scm_desc desc = {
960
.svc = QCOM_SCM_SVC_MP,
961
.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
962
.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
963
QCOM_SCM_VAL),
964
.args[0] = addr,
965
.args[1] = size,
966
.args[2] = spare,
967
.owner = ARM_SMCCC_OWNER_SIP,
968
};
969
int ret;
970
971
ret = qcom_scm_call(__scm->dev, &desc, NULL);
972
973
/* the pg table has been initialized already, ignore the error */
974
if (ret == -EPERM)
975
ret = 0;
976
977
return ret;
978
}
979
EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
980
981
int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
982
{
983
struct qcom_scm_desc desc = {
984
.svc = QCOM_SCM_SVC_MP,
985
.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
986
.arginfo = QCOM_SCM_ARGS(2),
987
.args[0] = size,
988
.args[1] = spare,
989
.owner = ARM_SMCCC_OWNER_SIP,
990
};
991
992
return qcom_scm_call(__scm->dev, &desc, NULL);
993
}
994
EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
995
996
int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
997
u32 cp_nonpixel_start,
998
u32 cp_nonpixel_size)
999
{
1000
int ret;
1001
struct qcom_scm_desc desc = {
1002
.svc = QCOM_SCM_SVC_MP,
1003
.cmd = QCOM_SCM_MP_VIDEO_VAR,
1004
.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1005
QCOM_SCM_VAL, QCOM_SCM_VAL),
1006
.args[0] = cp_start,
1007
.args[1] = cp_size,
1008
.args[2] = cp_nonpixel_start,
1009
.args[3] = cp_nonpixel_size,
1010
.owner = ARM_SMCCC_OWNER_SIP,
1011
};
1012
struct qcom_scm_res res;
1013
1014
ret = qcom_scm_call(__scm->dev, &desc, &res);
1015
1016
return ret ? : res.result[0];
1017
}
1018
EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
1019
1020
static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
1021
size_t mem_sz, phys_addr_t src, size_t src_sz,
1022
phys_addr_t dest, size_t dest_sz)
1023
{
1024
int ret;
1025
struct qcom_scm_desc desc = {
1026
.svc = QCOM_SCM_SVC_MP,
1027
.cmd = QCOM_SCM_MP_ASSIGN,
1028
.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
1029
QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
1030
QCOM_SCM_VAL, QCOM_SCM_VAL),
1031
.args[0] = mem_region,
1032
.args[1] = mem_sz,
1033
.args[2] = src,
1034
.args[3] = src_sz,
1035
.args[4] = dest,
1036
.args[5] = dest_sz,
1037
.args[6] = 0,
1038
.owner = ARM_SMCCC_OWNER_SIP,
1039
};
1040
struct qcom_scm_res res;
1041
1042
ret = qcom_scm_call(dev, &desc, &res);
1043
1044
return ret ? : res.result[0];
1045
}
1046
1047
/**
1048
* qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1049
* @mem_addr: mem region whose ownership need to be reassigned
1050
* @mem_sz: size of the region.
1051
* @srcvm: vmid for current set of owners, each set bit in
1052
* flag indicate a unique owner
1053
* @newvm: array having new owners and corresponding permission
1054
* flags
1055
* @dest_cnt: number of owners in next set.
1056
*
1057
* Return negative errno on failure or 0 on success with @srcvm updated.
1058
*/
1059
int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1060
u64 *srcvm,
1061
const struct qcom_scm_vmperm *newvm,
1062
unsigned int dest_cnt)
1063
{
1064
struct qcom_scm_current_perm_info *destvm;
1065
struct qcom_scm_mem_map_info *mem_to_map;
1066
phys_addr_t mem_to_map_phys;
1067
phys_addr_t dest_phys;
1068
phys_addr_t ptr_phys;
1069
size_t mem_to_map_sz;
1070
size_t dest_sz;
1071
size_t src_sz;
1072
size_t ptr_sz;
1073
int next_vm;
1074
__le32 *src;
1075
int ret, i, b;
1076
u64 srcvm_bits = *srcvm;
1077
1078
src_sz = hweight64(srcvm_bits) * sizeof(*src);
1079
mem_to_map_sz = sizeof(*mem_to_map);
1080
dest_sz = dest_cnt * sizeof(*destvm);
1081
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1082
ALIGN(dest_sz, SZ_64);
1083
1084
void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1085
ptr_sz, GFP_KERNEL);
1086
if (!ptr)
1087
return -ENOMEM;
1088
1089
ptr_phys = qcom_tzmem_to_phys(ptr);
1090
1091
/* Fill source vmid detail */
1092
src = ptr;
1093
i = 0;
1094
for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1095
if (srcvm_bits & BIT(b))
1096
src[i++] = cpu_to_le32(b);
1097
}
1098
1099
/* Fill details of mem buff to map */
1100
mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1101
mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1102
mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1103
mem_to_map->mem_size = cpu_to_le64(mem_sz);
1104
1105
next_vm = 0;
1106
/* Fill details of next vmid detail */
1107
destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1108
dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1109
for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1110
destvm->vmid = cpu_to_le32(newvm->vmid);
1111
destvm->perm = cpu_to_le32(newvm->perm);
1112
destvm->ctx = 0;
1113
destvm->ctx_size = 0;
1114
next_vm |= BIT(newvm->vmid);
1115
}
1116
1117
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1118
ptr_phys, src_sz, dest_phys, dest_sz);
1119
if (ret) {
1120
dev_err(__scm->dev,
1121
"Assign memory protection call failed %d\n", ret);
1122
return -EINVAL;
1123
}
1124
1125
*srcvm = next_vm;
1126
return 0;
1127
}
1128
EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1129
1130
/**
1131
* qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1132
*/
1133
bool qcom_scm_ocmem_lock_available(void)
1134
{
1135
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1136
QCOM_SCM_OCMEM_LOCK_CMD);
1137
}
1138
EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1139
1140
/**
1141
* qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1142
* region to the specified initiator
1143
*
1144
* @id: tz initiator id
1145
* @offset: OCMEM offset
1146
* @size: OCMEM size
1147
* @mode: access mode (WIDE/NARROW)
1148
*/
1149
int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1150
u32 mode)
1151
{
1152
struct qcom_scm_desc desc = {
1153
.svc = QCOM_SCM_SVC_OCMEM,
1154
.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1155
.args[0] = id,
1156
.args[1] = offset,
1157
.args[2] = size,
1158
.args[3] = mode,
1159
.arginfo = QCOM_SCM_ARGS(4),
1160
};
1161
1162
return qcom_scm_call(__scm->dev, &desc, NULL);
1163
}
1164
EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1165
1166
/**
1167
* qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1168
* region from the specified initiator
1169
*
1170
* @id: tz initiator id
1171
* @offset: OCMEM offset
1172
* @size: OCMEM size
1173
*/
1174
int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1175
{
1176
struct qcom_scm_desc desc = {
1177
.svc = QCOM_SCM_SVC_OCMEM,
1178
.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1179
.args[0] = id,
1180
.args[1] = offset,
1181
.args[2] = size,
1182
.arginfo = QCOM_SCM_ARGS(3),
1183
};
1184
1185
return qcom_scm_call(__scm->dev, &desc, NULL);
1186
}
1187
EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1188
1189
/**
1190
* qcom_scm_ice_available() - Is the ICE key programming interface available?
1191
*
1192
* Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1193
* qcom_scm_ice_set_key() are available.
1194
*/
1195
bool qcom_scm_ice_available(void)
1196
{
1197
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1198
QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1199
__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1200
QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1201
}
1202
EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1203
1204
/**
1205
* qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1206
* @index: the keyslot to invalidate
1207
*
1208
* The UFSHCI and eMMC standards define a standard way to do this, but it
1209
* doesn't work on these SoCs; only this SCM call does.
1210
*
1211
* It is assumed that the SoC has only one ICE instance being used, as this SCM
1212
* call doesn't specify which ICE instance the keyslot belongs to.
1213
*
1214
* Return: 0 on success; -errno on failure.
1215
*/
1216
int qcom_scm_ice_invalidate_key(u32 index)
1217
{
1218
struct qcom_scm_desc desc = {
1219
.svc = QCOM_SCM_SVC_ES,
1220
.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1221
.arginfo = QCOM_SCM_ARGS(1),
1222
.args[0] = index,
1223
.owner = ARM_SMCCC_OWNER_SIP,
1224
};
1225
1226
return qcom_scm_call(__scm->dev, &desc, NULL);
1227
}
1228
EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1229
1230
/**
1231
* qcom_scm_ice_set_key() - Set an inline encryption key
1232
* @index: the keyslot into which to set the key
1233
* @key: the key to program
1234
* @key_size: the size of the key in bytes
1235
* @cipher: the encryption algorithm the key is for
1236
* @data_unit_size: the encryption data unit size, i.e. the size of each
1237
* individual plaintext and ciphertext. Given in 512-byte
1238
* units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1239
*
1240
* Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1241
* can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1242
*
1243
* The UFSHCI and eMMC standards define a standard way to do this, but it
1244
* doesn't work on these SoCs; only this SCM call does.
1245
*
1246
* It is assumed that the SoC has only one ICE instance being used, as this SCM
1247
* call doesn't specify which ICE instance the keyslot belongs to.
1248
*
1249
* Return: 0 on success; -errno on failure.
1250
*/
1251
int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1252
enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1253
{
1254
struct qcom_scm_desc desc = {
1255
.svc = QCOM_SCM_SVC_ES,
1256
.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1257
.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1258
QCOM_SCM_VAL, QCOM_SCM_VAL,
1259
QCOM_SCM_VAL),
1260
.args[0] = index,
1261
.args[2] = key_size,
1262
.args[3] = cipher,
1263
.args[4] = data_unit_size,
1264
.owner = ARM_SMCCC_OWNER_SIP,
1265
};
1266
1267
int ret;
1268
1269
void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1270
key_size,
1271
GFP_KERNEL);
1272
if (!keybuf)
1273
return -ENOMEM;
1274
memcpy(keybuf, key, key_size);
1275
desc.args[1] = qcom_tzmem_to_phys(keybuf);
1276
1277
ret = qcom_scm_call(__scm->dev, &desc, NULL);
1278
1279
memzero_explicit(keybuf, key_size);
1280
1281
return ret;
1282
}
1283
EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1284
1285
bool qcom_scm_has_wrapped_key_support(void)
1286
{
1287
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1288
QCOM_SCM_ES_DERIVE_SW_SECRET) &&
1289
__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1290
QCOM_SCM_ES_GENERATE_ICE_KEY) &&
1291
__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1292
QCOM_SCM_ES_PREPARE_ICE_KEY) &&
1293
__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1294
QCOM_SCM_ES_IMPORT_ICE_KEY);
1295
}
1296
EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support);
1297
1298
/**
1299
* qcom_scm_derive_sw_secret() - Derive software secret from wrapped key
1300
* @eph_key: an ephemerally-wrapped key
1301
* @eph_key_size: size of @eph_key in bytes
1302
* @sw_secret: output buffer for the software secret
1303
* @sw_secret_size: size of the software secret to derive in bytes
1304
*
1305
* Derive a software secret from an ephemerally-wrapped key for software crypto
1306
* operations. This is done by calling into the secure execution environment,
1307
* which then calls into the hardware to unwrap and derive the secret.
1308
*
1309
* For more information on sw_secret, see the "Hardware-wrapped keys" section of
1310
* Documentation/block/inline-encryption.rst.
1311
*
1312
* Return: 0 on success; -errno on failure.
1313
*/
1314
int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
1315
u8 *sw_secret, size_t sw_secret_size)
1316
{
1317
struct qcom_scm_desc desc = {
1318
.svc = QCOM_SCM_SVC_ES,
1319
.cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
1320
.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
1321
QCOM_SCM_RW, QCOM_SCM_VAL),
1322
.owner = ARM_SMCCC_OWNER_SIP,
1323
};
1324
int ret;
1325
1326
void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1327
eph_key_size,
1328
GFP_KERNEL);
1329
if (!eph_key_buf)
1330
return -ENOMEM;
1331
1332
void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1333
sw_secret_size,
1334
GFP_KERNEL);
1335
if (!sw_secret_buf)
1336
return -ENOMEM;
1337
1338
memcpy(eph_key_buf, eph_key, eph_key_size);
1339
desc.args[0] = qcom_tzmem_to_phys(eph_key_buf);
1340
desc.args[1] = eph_key_size;
1341
desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf);
1342
desc.args[3] = sw_secret_size;
1343
1344
ret = qcom_scm_call(__scm->dev, &desc, NULL);
1345
if (!ret)
1346
memcpy(sw_secret, sw_secret_buf, sw_secret_size);
1347
1348
memzero_explicit(eph_key_buf, eph_key_size);
1349
memzero_explicit(sw_secret_buf, sw_secret_size);
1350
return ret;
1351
}
1352
EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret);
1353
1354
/**
1355
* qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption
1356
* @lt_key: output buffer for the long-term wrapped key
1357
* @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
1358
* used by the SoC.
1359
*
1360
* Generate a key using the built-in HW module in the SoC. The resulting key is
1361
* returned wrapped with the platform-specific Key Encryption Key.
1362
*
1363
* Return: 0 on success; -errno on failure.
1364
*/
1365
int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
1366
{
1367
struct qcom_scm_desc desc = {
1368
.svc = QCOM_SCM_SVC_ES,
1369
.cmd = QCOM_SCM_ES_GENERATE_ICE_KEY,
1370
.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
1371
.owner = ARM_SMCCC_OWNER_SIP,
1372
};
1373
int ret;
1374
1375
void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1376
lt_key_size,
1377
GFP_KERNEL);
1378
if (!lt_key_buf)
1379
return -ENOMEM;
1380
1381
desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
1382
desc.args[1] = lt_key_size;
1383
1384
ret = qcom_scm_call(__scm->dev, &desc, NULL);
1385
if (!ret)
1386
memcpy(lt_key, lt_key_buf, lt_key_size);
1387
1388
memzero_explicit(lt_key_buf, lt_key_size);
1389
return ret;
1390
}
1391
EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key);
1392
1393
/**
1394
* qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key
1395
* @lt_key: a long-term wrapped key
1396
* @lt_key_size: size of @lt_key in bytes
1397
* @eph_key: output buffer for the ephemerally-wrapped key
1398
* @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size
1399
* used by the SoC.
1400
*
1401
* Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for
1402
* added protection. The resulting key will only be valid for the current boot.
1403
*
1404
* Return: 0 on success; -errno on failure.
1405
*/
1406
int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
1407
u8 *eph_key, size_t eph_key_size)
1408
{
1409
struct qcom_scm_desc desc = {
1410
.svc = QCOM_SCM_SVC_ES,
1411
.cmd = QCOM_SCM_ES_PREPARE_ICE_KEY,
1412
.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
1413
QCOM_SCM_RW, QCOM_SCM_VAL),
1414
.owner = ARM_SMCCC_OWNER_SIP,
1415
};
1416
int ret;
1417
1418
void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1419
lt_key_size,
1420
GFP_KERNEL);
1421
if (!lt_key_buf)
1422
return -ENOMEM;
1423
1424
void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1425
eph_key_size,
1426
GFP_KERNEL);
1427
if (!eph_key_buf)
1428
return -ENOMEM;
1429
1430
memcpy(lt_key_buf, lt_key, lt_key_size);
1431
desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
1432
desc.args[1] = lt_key_size;
1433
desc.args[2] = qcom_tzmem_to_phys(eph_key_buf);
1434
desc.args[3] = eph_key_size;
1435
1436
ret = qcom_scm_call(__scm->dev, &desc, NULL);
1437
if (!ret)
1438
memcpy(eph_key, eph_key_buf, eph_key_size);
1439
1440
memzero_explicit(lt_key_buf, lt_key_size);
1441
memzero_explicit(eph_key_buf, eph_key_size);
1442
return ret;
1443
}
1444
EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key);
1445
1446
/**
1447
* qcom_scm_import_ice_key() - Import key for storage encryption
1448
* @raw_key: the raw key to import
1449
* @raw_key_size: size of @raw_key in bytes
1450
* @lt_key: output buffer for the long-term wrapped key
1451
* @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
1452
* used by the SoC.
1453
*
1454
* Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to
1455
* wrap the raw key using the platform-specific Key Encryption Key.
1456
*
1457
* Return: 0 on success; -errno on failure.
1458
*/
1459
int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
1460
u8 *lt_key, size_t lt_key_size)
1461
{
1462
struct qcom_scm_desc desc = {
1463
.svc = QCOM_SCM_SVC_ES,
1464
.cmd = QCOM_SCM_ES_IMPORT_ICE_KEY,
1465
.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
1466
QCOM_SCM_RW, QCOM_SCM_VAL),
1467
.owner = ARM_SMCCC_OWNER_SIP,
1468
};
1469
int ret;
1470
1471
void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1472
raw_key_size,
1473
GFP_KERNEL);
1474
if (!raw_key_buf)
1475
return -ENOMEM;
1476
1477
void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1478
lt_key_size,
1479
GFP_KERNEL);
1480
if (!lt_key_buf)
1481
return -ENOMEM;
1482
1483
memcpy(raw_key_buf, raw_key, raw_key_size);
1484
desc.args[0] = qcom_tzmem_to_phys(raw_key_buf);
1485
desc.args[1] = raw_key_size;
1486
desc.args[2] = qcom_tzmem_to_phys(lt_key_buf);
1487
desc.args[3] = lt_key_size;
1488
1489
ret = qcom_scm_call(__scm->dev, &desc, NULL);
1490
if (!ret)
1491
memcpy(lt_key, lt_key_buf, lt_key_size);
1492
1493
memzero_explicit(raw_key_buf, raw_key_size);
1494
memzero_explicit(lt_key_buf, lt_key_size);
1495
return ret;
1496
}
1497
EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key);
1498
1499
/**
1500
* qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1501
*
1502
* Return true if HDCP is supported, false if not.
1503
*/
1504
bool qcom_scm_hdcp_available(void)
1505
{
1506
bool avail;
1507
int ret = qcom_scm_clk_enable();
1508
1509
if (ret)
1510
return ret;
1511
1512
avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1513
QCOM_SCM_HDCP_INVOKE);
1514
1515
qcom_scm_clk_disable();
1516
1517
return avail;
1518
}
1519
EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1520
1521
/**
1522
* qcom_scm_hdcp_req() - Send HDCP request.
1523
* @req: HDCP request array
1524
* @req_cnt: HDCP request array count
1525
* @resp: response buffer passed to SCM
1526
*
1527
* Write HDCP register(s) through SCM.
1528
*/
1529
int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1530
{
1531
int ret;
1532
struct qcom_scm_desc desc = {
1533
.svc = QCOM_SCM_SVC_HDCP,
1534
.cmd = QCOM_SCM_HDCP_INVOKE,
1535
.arginfo = QCOM_SCM_ARGS(10),
1536
.args = {
1537
req[0].addr,
1538
req[0].val,
1539
req[1].addr,
1540
req[1].val,
1541
req[2].addr,
1542
req[2].val,
1543
req[3].addr,
1544
req[3].val,
1545
req[4].addr,
1546
req[4].val
1547
},
1548
.owner = ARM_SMCCC_OWNER_SIP,
1549
};
1550
struct qcom_scm_res res;
1551
1552
if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1553
return -ERANGE;
1554
1555
ret = qcom_scm_clk_enable();
1556
if (ret)
1557
return ret;
1558
1559
ret = qcom_scm_call(__scm->dev, &desc, &res);
1560
*resp = res.result[0];
1561
1562
qcom_scm_clk_disable();
1563
1564
return ret;
1565
}
1566
EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1567
1568
int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1569
{
1570
struct qcom_scm_desc desc = {
1571
.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1572
.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1573
.arginfo = QCOM_SCM_ARGS(3),
1574
.args[0] = sec_id,
1575
.args[1] = ctx_num,
1576
.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1577
.owner = ARM_SMCCC_OWNER_SIP,
1578
};
1579
1580
return qcom_scm_call(__scm->dev, &desc, NULL);
1581
}
1582
EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1583
1584
int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1585
{
1586
struct qcom_scm_desc desc = {
1587
.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1588
.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1589
.arginfo = QCOM_SCM_ARGS(2),
1590
.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1591
.args[1] = en,
1592
.owner = ARM_SMCCC_OWNER_SIP,
1593
};
1594
1595
1596
return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1597
}
1598
EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1599
1600
bool qcom_scm_lmh_dcvsh_available(void)
1601
{
1602
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1603
}
1604
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1605
1606
/*
1607
* This is only supposed to be called once by the TZMem module. It takes the
1608
* SCM struct device as argument and uses it to pass the call as at the time
1609
* the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't
1610
* accept global user calls. Don't try to use the __scm pointer here.
1611
*/
1612
int qcom_scm_shm_bridge_enable(struct device *scm_dev)
1613
{
1614
int ret;
1615
1616
struct qcom_scm_desc desc = {
1617
.svc = QCOM_SCM_SVC_MP,
1618
.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
1619
.owner = ARM_SMCCC_OWNER_SIP
1620
};
1621
1622
struct qcom_scm_res res;
1623
1624
if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP,
1625
QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
1626
return -EOPNOTSUPP;
1627
1628
ret = qcom_scm_call(scm_dev, &desc, &res);
1629
1630
if (ret)
1631
return ret;
1632
1633
if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
1634
return -EOPNOTSUPP;
1635
1636
return res.result[0];
1637
}
1638
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
1639
1640
int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags,
1641
u64 ipfn_and_s_perm_flags, u64 size_and_flags,
1642
u64 ns_vmids, u64 *handle)
1643
{
1644
struct qcom_scm_desc desc = {
1645
.svc = QCOM_SCM_SVC_MP,
1646
.cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
1647
.owner = ARM_SMCCC_OWNER_SIP,
1648
.args[0] = pfn_and_ns_perm_flags,
1649
.args[1] = ipfn_and_s_perm_flags,
1650
.args[2] = size_and_flags,
1651
.args[3] = ns_vmids,
1652
.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1653
QCOM_SCM_VAL, QCOM_SCM_VAL),
1654
};
1655
1656
struct qcom_scm_res res;
1657
int ret;
1658
1659
ret = qcom_scm_call(__scm->dev, &desc, &res);
1660
1661
if (handle && !ret)
1662
*handle = res.result[1];
1663
1664
return ret ?: res.result[0];
1665
}
1666
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
1667
1668
int qcom_scm_shm_bridge_delete(u64 handle)
1669
{
1670
struct qcom_scm_desc desc = {
1671
.svc = QCOM_SCM_SVC_MP,
1672
.cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
1673
.owner = ARM_SMCCC_OWNER_SIP,
1674
.args[0] = handle,
1675
.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1676
};
1677
1678
return qcom_scm_call(__scm->dev, &desc, NULL);
1679
}
1680
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
1681
1682
int qcom_scm_lmh_profile_change(u32 profile_id)
1683
{
1684
struct qcom_scm_desc desc = {
1685
.svc = QCOM_SCM_SVC_LMH,
1686
.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1687
.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1688
.args[0] = profile_id,
1689
.owner = ARM_SMCCC_OWNER_SIP,
1690
};
1691
1692
return qcom_scm_call(__scm->dev, &desc, NULL);
1693
}
1694
EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1695
1696
int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1697
u64 limit_node, u32 node_id, u64 version)
1698
{
1699
int ret, payload_size = 5 * sizeof(u32);
1700
1701
struct qcom_scm_desc desc = {
1702
.svc = QCOM_SCM_SVC_LMH,
1703
.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1704
.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1705
QCOM_SCM_VAL, QCOM_SCM_VAL),
1706
.args[1] = payload_size,
1707
.args[2] = limit_node,
1708
.args[3] = node_id,
1709
.args[4] = version,
1710
.owner = ARM_SMCCC_OWNER_SIP,
1711
};
1712
1713
u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1714
payload_size,
1715
GFP_KERNEL);
1716
if (!payload_buf)
1717
return -ENOMEM;
1718
1719
payload_buf[0] = payload_fn;
1720
payload_buf[1] = 0;
1721
payload_buf[2] = payload_reg;
1722
payload_buf[3] = 1;
1723
payload_buf[4] = payload_val;
1724
1725
desc.args[0] = qcom_tzmem_to_phys(payload_buf);
1726
1727
ret = qcom_scm_call(__scm->dev, &desc, NULL);
1728
1729
return ret;
1730
}
1731
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1732
1733
int qcom_scm_gpu_init_regs(u32 gpu_req)
1734
{
1735
struct qcom_scm_desc desc = {
1736
.svc = QCOM_SCM_SVC_GPU,
1737
.cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
1738
.arginfo = QCOM_SCM_ARGS(1),
1739
.args[0] = gpu_req,
1740
.owner = ARM_SMCCC_OWNER_SIP,
1741
};
1742
1743
return qcom_scm_call(__scm->dev, &desc, NULL);
1744
}
1745
EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
1746
1747
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1748
{
1749
struct device_node *tcsr;
1750
struct device_node *np = dev->of_node;
1751
struct resource res;
1752
u32 offset;
1753
int ret;
1754
1755
tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1756
if (!tcsr)
1757
return 0;
1758
1759
ret = of_address_to_resource(tcsr, 0, &res);
1760
of_node_put(tcsr);
1761
if (ret)
1762
return ret;
1763
1764
ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1765
if (ret < 0)
1766
return ret;
1767
1768
*addr = res.start + offset;
1769
1770
return 0;
1771
}
1772
1773
#ifdef CONFIG_QCOM_QSEECOM
1774
1775
/* Lock for QSEECOM SCM call executions */
1776
static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1777
1778
static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1779
struct qcom_scm_qseecom_resp *res)
1780
{
1781
struct qcom_scm_res scm_res = {};
1782
int status;
1783
1784
/*
1785
* QSEECOM SCM calls should not be executed concurrently. Therefore, we
1786
* require the respective call lock to be held.
1787
*/
1788
lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1789
1790
status = qcom_scm_call(__scm->dev, desc, &scm_res);
1791
1792
res->result = scm_res.result[0];
1793
res->resp_type = scm_res.result[1];
1794
res->data = scm_res.result[2];
1795
1796
if (status)
1797
return status;
1798
1799
return 0;
1800
}
1801
1802
/**
1803
* qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1804
* @desc: SCM call descriptor.
1805
* @res: SCM call response (output).
1806
*
1807
* Performs the QSEECOM SCM call described by @desc, returning the response in
1808
* @rsp.
1809
*
1810
* Return: Zero on success, nonzero on failure.
1811
*/
1812
static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1813
struct qcom_scm_qseecom_resp *res)
1814
{
1815
int status;
1816
1817
/*
1818
* Note: Multiple QSEECOM SCM calls should not be executed same time,
1819
* so lock things here. This needs to be extended to callback/listener
1820
* handling when support for that is implemented.
1821
*/
1822
1823
mutex_lock(&qcom_scm_qseecom_call_lock);
1824
status = __qcom_scm_qseecom_call(desc, res);
1825
mutex_unlock(&qcom_scm_qseecom_call_lock);
1826
1827
dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1828
__func__, desc->owner, desc->svc, desc->cmd, res->result,
1829
res->resp_type, res->data);
1830
1831
if (status) {
1832
dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1833
return status;
1834
}
1835
1836
/*
1837
* TODO: Handle incomplete and blocked calls:
1838
*
1839
* Incomplete and blocked calls are not supported yet. Some devices
1840
* and/or commands require those, some don't. Let's warn about them
1841
* prominently in case someone attempts to try these commands with a
1842
* device/command combination that isn't supported yet.
1843
*/
1844
WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1845
WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1846
1847
return 0;
1848
}
1849
1850
/**
1851
* qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1852
* @version: Pointer where the QSEECOM version will be stored.
1853
*
1854
* Performs the QSEECOM SCM querying the QSEECOM version currently running in
1855
* the TrustZone.
1856
*
1857
* Return: Zero on success, nonzero on failure.
1858
*/
1859
static int qcom_scm_qseecom_get_version(u32 *version)
1860
{
1861
struct qcom_scm_desc desc = {};
1862
struct qcom_scm_qseecom_resp res = {};
1863
u32 feature = 10;
1864
int ret;
1865
1866
desc.owner = QSEECOM_TZ_OWNER_SIP;
1867
desc.svc = QSEECOM_TZ_SVC_INFO;
1868
desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1869
desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1870
desc.args[0] = feature;
1871
1872
ret = qcom_scm_qseecom_call(&desc, &res);
1873
if (ret)
1874
return ret;
1875
1876
*version = res.result;
1877
return 0;
1878
}
1879
1880
/**
1881
* qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1882
* @app_name: The name of the app.
1883
* @app_id: The returned app ID.
1884
*
1885
* Query and return the application ID of the SEE app identified by the given
1886
* name. This returned ID is the unique identifier of the app required for
1887
* subsequent communication.
1888
*
1889
* Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1890
* loaded or could not be found.
1891
*/
1892
int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1893
{
1894
unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1895
unsigned long app_name_len = strlen(app_name);
1896
struct qcom_scm_desc desc = {};
1897
struct qcom_scm_qseecom_resp res = {};
1898
int status;
1899
1900
if (app_name_len >= name_buf_size)
1901
return -EINVAL;
1902
1903
char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1904
name_buf_size,
1905
GFP_KERNEL);
1906
if (!name_buf)
1907
return -ENOMEM;
1908
1909
memcpy(name_buf, app_name, app_name_len);
1910
1911
desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1912
desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1913
desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1914
desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1915
desc.args[0] = qcom_tzmem_to_phys(name_buf);
1916
desc.args[1] = app_name_len;
1917
1918
status = qcom_scm_qseecom_call(&desc, &res);
1919
1920
if (status)
1921
return status;
1922
1923
if (res.result == QSEECOM_RESULT_FAILURE)
1924
return -ENOENT;
1925
1926
if (res.result != QSEECOM_RESULT_SUCCESS)
1927
return -EINVAL;
1928
1929
if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1930
return -EINVAL;
1931
1932
*app_id = res.data;
1933
return 0;
1934
}
1935
EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1936
1937
/**
1938
* qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1939
* @app_id: The ID of the target app.
1940
* @req: Request buffer sent to the app (must be TZ memory)
1941
* @req_size: Size of the request buffer.
1942
* @rsp: Response buffer, written to by the app (must be TZ memory)
1943
* @rsp_size: Size of the response buffer.
1944
*
1945
* Sends a request to the QSEE app associated with the given ID and read back
1946
* its response. The caller must provide two DMA memory regions, one for the
1947
* request and one for the response, and fill out the @req region with the
1948
* respective (app-specific) request data. The QSEE app reads this and returns
1949
* its response in the @rsp region.
1950
*
1951
* Return: Zero on success, nonzero on failure.
1952
*/
1953
int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
1954
void *rsp, size_t rsp_size)
1955
{
1956
struct qcom_scm_qseecom_resp res = {};
1957
struct qcom_scm_desc desc = {};
1958
phys_addr_t req_phys;
1959
phys_addr_t rsp_phys;
1960
int status;
1961
1962
req_phys = qcom_tzmem_to_phys(req);
1963
rsp_phys = qcom_tzmem_to_phys(rsp);
1964
1965
desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1966
desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1967
desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1968
desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1969
QCOM_SCM_RW, QCOM_SCM_VAL,
1970
QCOM_SCM_RW, QCOM_SCM_VAL);
1971
desc.args[0] = app_id;
1972
desc.args[1] = req_phys;
1973
desc.args[2] = req_size;
1974
desc.args[3] = rsp_phys;
1975
desc.args[4] = rsp_size;
1976
1977
status = qcom_scm_qseecom_call(&desc, &res);
1978
1979
if (status)
1980
return status;
1981
1982
if (res.result != QSEECOM_RESULT_SUCCESS)
1983
return -EIO;
1984
1985
return 0;
1986
}
1987
EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1988
1989
/*
1990
* We do not yet support re-entrant calls via the qseecom interface. To prevent
1991
+ any potential issues with this, only allow validated machines for now.
1992
*/
1993
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
1994
{ .compatible = "asus,vivobook-s15" },
1995
{ .compatible = "asus,zenbook-a14-ux3407qa" },
1996
{ .compatible = "asus,zenbook-a14-ux3407ra" },
1997
{ .compatible = "dell,xps13-9345" },
1998
{ .compatible = "hp,elitebook-ultra-g1q" },
1999
{ .compatible = "hp,omnibook-x14" },
2000
{ .compatible = "huawei,gaokun3" },
2001
{ .compatible = "lenovo,flex-5g" },
2002
{ .compatible = "lenovo,thinkpad-t14s" },
2003
{ .compatible = "lenovo,thinkpad-x13s", },
2004
{ .compatible = "lenovo,yoga-slim7x" },
2005
{ .compatible = "microsoft,arcata", },
2006
{ .compatible = "microsoft,blackrock" },
2007
{ .compatible = "microsoft,romulus13", },
2008
{ .compatible = "microsoft,romulus15", },
2009
{ .compatible = "qcom,sc8180x-primus" },
2010
{ .compatible = "qcom,x1e001de-devkit" },
2011
{ .compatible = "qcom,x1e80100-crd" },
2012
{ .compatible = "qcom,x1e80100-qcp" },
2013
{ .compatible = "qcom,x1p42100-crd" },
2014
{ }
2015
};
2016
2017
static bool qcom_scm_qseecom_machine_is_allowed(void)
2018
{
2019
struct device_node *np;
2020
bool match;
2021
2022
np = of_find_node_by_path("/");
2023
if (!np)
2024
return false;
2025
2026
match = of_match_node(qcom_scm_qseecom_allowlist, np);
2027
of_node_put(np);
2028
2029
return match;
2030
}
2031
2032
static void qcom_scm_qseecom_free(void *data)
2033
{
2034
struct platform_device *qseecom_dev = data;
2035
2036
platform_device_del(qseecom_dev);
2037
platform_device_put(qseecom_dev);
2038
}
2039
2040
static int qcom_scm_qseecom_init(struct qcom_scm *scm)
2041
{
2042
struct platform_device *qseecom_dev;
2043
u32 version;
2044
int ret;
2045
2046
/*
2047
* Note: We do two steps of validation here: First, we try to query the
2048
* QSEECOM version as a check to see if the interface exists on this
2049
* device. Second, we check against known good devices due to current
2050
* driver limitations (see comment in qcom_scm_qseecom_allowlist).
2051
*
2052
* Note that we deliberately do the machine check after the version
2053
* check so that we can log potentially supported devices. This should
2054
* be safe as downstream sources indicate that the version query is
2055
* neither blocking nor reentrant.
2056
*/
2057
ret = qcom_scm_qseecom_get_version(&version);
2058
if (ret)
2059
return 0;
2060
2061
dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
2062
2063
if (!qcom_scm_qseecom_machine_is_allowed()) {
2064
dev_info(scm->dev, "qseecom: untested machine, skipping\n");
2065
return 0;
2066
}
2067
2068
/*
2069
* Set up QSEECOM interface device. All application clients will be
2070
* set up and managed by the corresponding driver for it.
2071
*/
2072
qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
2073
if (!qseecom_dev)
2074
return -ENOMEM;
2075
2076
qseecom_dev->dev.parent = scm->dev;
2077
2078
ret = platform_device_add(qseecom_dev);
2079
if (ret) {
2080
platform_device_put(qseecom_dev);
2081
return ret;
2082
}
2083
2084
return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
2085
}
2086
2087
#else /* CONFIG_QCOM_QSEECOM */
2088
2089
static int qcom_scm_qseecom_init(struct qcom_scm *scm)
2090
{
2091
return 0;
2092
}
2093
2094
#endif /* CONFIG_QCOM_QSEECOM */
2095
2096
/**
2097
* qcom_scm_is_available() - Checks if SCM is available
2098
*/
2099
bool qcom_scm_is_available(void)
2100
{
2101
/* Paired with smp_store_release() in qcom_scm_probe */
2102
return !!smp_load_acquire(&__scm);
2103
}
2104
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
2105
2106
static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
2107
{
2108
/* FW currently only supports a single wq_ctx (zero).
2109
* TODO: Update this logic to include dynamic allocation and lookup of
2110
* completion structs when FW supports more wq_ctx values.
2111
*/
2112
if (wq_ctx != 0) {
2113
dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
2114
return -EINVAL;
2115
}
2116
2117
return 0;
2118
}
2119
2120
int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
2121
{
2122
int ret;
2123
2124
ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
2125
if (ret)
2126
return ret;
2127
2128
wait_for_completion(&__scm->waitq_comp);
2129
2130
return 0;
2131
}
2132
2133
static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
2134
{
2135
int ret;
2136
2137
ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
2138
if (ret)
2139
return ret;
2140
2141
complete(&__scm->waitq_comp);
2142
2143
return 0;
2144
}
2145
2146
static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
2147
{
2148
int ret;
2149
struct qcom_scm *scm = data;
2150
u32 wq_ctx, flags, more_pending = 0;
2151
2152
do {
2153
ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
2154
if (ret) {
2155
dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
2156
goto out;
2157
}
2158
2159
if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
2160
dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
2161
goto out;
2162
}
2163
2164
ret = qcom_scm_waitq_wakeup(wq_ctx);
2165
if (ret)
2166
goto out;
2167
} while (more_pending);
2168
2169
out:
2170
return IRQ_HANDLED;
2171
}
2172
2173
static int get_download_mode(char *buffer, const struct kernel_param *kp)
2174
{
2175
if (download_mode >= ARRAY_SIZE(download_mode_name))
2176
return sysfs_emit(buffer, "unknown mode\n");
2177
2178
return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
2179
}
2180
2181
static int set_download_mode(const char *val, const struct kernel_param *kp)
2182
{
2183
bool tmp;
2184
int ret;
2185
2186
ret = sysfs_match_string(download_mode_name, val);
2187
if (ret < 0) {
2188
ret = kstrtobool(val, &tmp);
2189
if (ret < 0) {
2190
pr_err("qcom_scm: err: %d\n", ret);
2191
return ret;
2192
}
2193
2194
ret = tmp ? 1 : 0;
2195
}
2196
2197
download_mode = ret;
2198
if (__scm)
2199
qcom_scm_set_download_mode(download_mode);
2200
2201
return 0;
2202
}
2203
2204
static const struct kernel_param_ops download_mode_param_ops = {
2205
.get = get_download_mode,
2206
.set = set_download_mode,
2207
};
2208
2209
module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
2210
MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
2211
2212
static int qcom_scm_probe(struct platform_device *pdev)
2213
{
2214
struct qcom_tzmem_pool_config pool_config;
2215
struct qcom_scm *scm;
2216
int irq, ret;
2217
2218
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
2219
if (!scm)
2220
return -ENOMEM;
2221
2222
scm->dev = &pdev->dev;
2223
ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
2224
if (ret < 0)
2225
return ret;
2226
2227
init_completion(&scm->waitq_comp);
2228
mutex_init(&scm->scm_bw_lock);
2229
2230
scm->path = devm_of_icc_get(&pdev->dev, NULL);
2231
if (IS_ERR(scm->path))
2232
return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
2233
"failed to acquire interconnect path\n");
2234
2235
scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
2236
if (IS_ERR(scm->core_clk))
2237
return PTR_ERR(scm->core_clk);
2238
2239
scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
2240
if (IS_ERR(scm->iface_clk))
2241
return PTR_ERR(scm->iface_clk);
2242
2243
scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
2244
if (IS_ERR(scm->bus_clk))
2245
return PTR_ERR(scm->bus_clk);
2246
2247
scm->reset.ops = &qcom_scm_pas_reset_ops;
2248
scm->reset.nr_resets = 1;
2249
scm->reset.of_node = pdev->dev.of_node;
2250
ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
2251
if (ret)
2252
return ret;
2253
2254
/* vote for max clk rate for highest performance */
2255
ret = clk_set_rate(scm->core_clk, INT_MAX);
2256
if (ret)
2257
return ret;
2258
2259
ret = of_reserved_mem_device_init(scm->dev);
2260
if (ret && ret != -ENODEV)
2261
return dev_err_probe(scm->dev, ret,
2262
"Failed to setup the reserved memory region for TZ mem\n");
2263
2264
ret = qcom_tzmem_enable(scm->dev);
2265
if (ret)
2266
return dev_err_probe(scm->dev, ret,
2267
"Failed to enable the TrustZone memory allocator\n");
2268
2269
memset(&pool_config, 0, sizeof(pool_config));
2270
pool_config.initial_size = 0;
2271
pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
2272
pool_config.max_size = SZ_256K;
2273
2274
scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config);
2275
if (IS_ERR(scm->mempool))
2276
return dev_err_probe(scm->dev, PTR_ERR(scm->mempool),
2277
"Failed to create the SCM memory pool\n");
2278
2279
irq = platform_get_irq_optional(pdev, 0);
2280
if (irq < 0) {
2281
if (irq != -ENXIO)
2282
return irq;
2283
} else {
2284
ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler,
2285
IRQF_ONESHOT, "qcom-scm", scm);
2286
if (ret < 0)
2287
return dev_err_probe(scm->dev, ret,
2288
"Failed to request qcom-scm irq\n");
2289
}
2290
2291
/*
2292
* Paired with smp_load_acquire() in qcom_scm_is_available().
2293
*
2294
* This marks the SCM API as ready to accept user calls and can only
2295
* be called after the TrustZone memory pool is initialized and the
2296
* waitqueue interrupt requested.
2297
*/
2298
smp_store_release(&__scm, scm);
2299
2300
__get_convention();
2301
2302
/*
2303
* If "download mode" is requested, from this point on warmboot
2304
* will cause the boot stages to enter download mode, unless
2305
* disabled below by a clean shutdown/reboot.
2306
*/
2307
qcom_scm_set_download_mode(download_mode);
2308
2309
/*
2310
* Disable SDI if indicated by DT that it is enabled by default.
2311
*/
2312
if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
2313
qcom_scm_disable_sdi();
2314
2315
/*
2316
* Initialize the QSEECOM interface.
2317
*
2318
* Note: QSEECOM is fairly self-contained and this only adds the
2319
* interface device (the driver of which does most of the heavy
2320
* lifting). So any errors returned here should be either -ENOMEM or
2321
* -EINVAL (with the latter only in case there's a bug in our code).
2322
* This means that there is no need to bring down the whole SCM driver.
2323
* Just log the error instead and let SCM live.
2324
*/
2325
ret = qcom_scm_qseecom_init(scm);
2326
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
2327
2328
return 0;
2329
}
2330
2331
static void qcom_scm_shutdown(struct platform_device *pdev)
2332
{
2333
/* Clean shutdown, disable download mode to allow normal restart */
2334
qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
2335
}
2336
2337
static const struct of_device_id qcom_scm_dt_match[] = {
2338
{ .compatible = "qcom,scm" },
2339
2340
/* Legacy entries kept for backwards compatibility */
2341
{ .compatible = "qcom,scm-apq8064" },
2342
{ .compatible = "qcom,scm-apq8084" },
2343
{ .compatible = "qcom,scm-ipq4019" },
2344
{ .compatible = "qcom,scm-msm8953" },
2345
{ .compatible = "qcom,scm-msm8974" },
2346
{ .compatible = "qcom,scm-msm8996" },
2347
{}
2348
};
2349
MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
2350
2351
static struct platform_driver qcom_scm_driver = {
2352
.driver = {
2353
.name = "qcom_scm",
2354
.of_match_table = qcom_scm_dt_match,
2355
.suppress_bind_attrs = true,
2356
},
2357
.probe = qcom_scm_probe,
2358
.shutdown = qcom_scm_shutdown,
2359
};
2360
2361
static int __init qcom_scm_init(void)
2362
{
2363
return platform_driver_register(&qcom_scm_driver);
2364
}
2365
subsys_initcall(qcom_scm_init);
2366
2367
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2368
MODULE_LICENSE("GPL v2");
2369
2370