Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/powernv/opal.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* PowerNV OPAL high level interfaces
4
*
5
* Copyright 2011 IBM Corp.
6
*/
7
8
#define pr_fmt(fmt) "opal: " fmt
9
10
#include <linux/printk.h>
11
#include <linux/types.h>
12
#include <linux/of.h>
13
#include <linux/of_fdt.h>
14
#include <linux/of_platform.h>
15
#include <linux/of_address.h>
16
#include <linux/interrupt.h>
17
#include <linux/notifier.h>
18
#include <linux/slab.h>
19
#include <linux/sched.h>
20
#include <linux/kobject.h>
21
#include <linux/delay.h>
22
#include <linux/memblock.h>
23
#include <linux/kthread.h>
24
#include <linux/freezer.h>
25
#include <linux/kmsg_dump.h>
26
#include <linux/console.h>
27
#include <linux/sched/debug.h>
28
29
#include <asm/machdep.h>
30
#include <asm/opal.h>
31
#include <asm/firmware.h>
32
#include <asm/mce.h>
33
#include <asm/imc-pmu.h>
34
#include <asm/bug.h>
35
36
#include "powernv.h"
37
38
#define OPAL_MSG_QUEUE_MAX 16
39
40
struct opal_msg_node {
41
struct list_head list;
42
struct opal_msg msg;
43
};
44
45
static DEFINE_SPINLOCK(msg_list_lock);
46
static LIST_HEAD(msg_list);
47
48
/* /sys/firmware/opal */
49
struct kobject *opal_kobj;
50
51
struct opal {
52
u64 base;
53
u64 entry;
54
u64 size;
55
} opal;
56
57
struct mcheck_recoverable_range {
58
u64 start_addr;
59
u64 end_addr;
60
u64 recover_addr;
61
};
62
63
static int msg_list_size;
64
65
static struct mcheck_recoverable_range *mc_recoverable_range;
66
static int mc_recoverable_range_len;
67
68
struct device_node *opal_node;
69
static DEFINE_SPINLOCK(opal_write_lock);
70
static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
71
static uint32_t opal_heartbeat;
72
static struct task_struct *kopald_tsk;
73
static struct opal_msg *opal_msg;
74
static u32 opal_msg_size __ro_after_init;
75
76
void __init opal_configure_cores(void)
77
{
78
u64 reinit_flags = 0;
79
80
/* Do the actual re-init, This will clobber all FPRs, VRs, etc...
81
*
82
* It will preserve non volatile GPRs and HSPRG0/1. It will
83
* also restore HIDs and other SPRs to their original value
84
* but it might clobber a bunch.
85
*/
86
#ifdef __BIG_ENDIAN__
87
reinit_flags |= OPAL_REINIT_CPUS_HILE_BE;
88
#else
89
reinit_flags |= OPAL_REINIT_CPUS_HILE_LE;
90
#endif
91
92
/*
93
* POWER9 always support running hash:
94
* ie. Host hash supports hash guests
95
* Host radix supports hash/radix guests
96
*/
97
if (early_cpu_has_feature(CPU_FTR_ARCH_300)) {
98
reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH;
99
if (early_radix_enabled())
100
reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX;
101
}
102
103
opal_reinit_cpus(reinit_flags);
104
105
/* Restore some bits */
106
if (cur_cpu_spec->cpu_restore)
107
cur_cpu_spec->cpu_restore();
108
}
109
110
int __init early_init_dt_scan_opal(unsigned long node,
111
const char *uname, int depth, void *data)
112
{
113
const void *basep, *entryp, *sizep;
114
int basesz, entrysz, runtimesz;
115
116
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
117
return 0;
118
119
basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
120
entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
121
sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
122
123
if (!basep || !entryp || !sizep)
124
return 1;
125
126
opal.base = of_read_number(basep, basesz/4);
127
opal.entry = of_read_number(entryp, entrysz/4);
128
opal.size = of_read_number(sizep, runtimesz/4);
129
130
pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
131
opal.base, basep, basesz);
132
pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
133
opal.entry, entryp, entrysz);
134
pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
135
opal.size, sizep, runtimesz);
136
137
if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
138
powerpc_firmware_features |= FW_FEATURE_OPAL;
139
pr_debug("OPAL detected !\n");
140
} else {
141
panic("OPAL != V3 detected, no longer supported.\n");
142
}
143
144
return 1;
145
}
146
147
int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
148
const char *uname, int depth, void *data)
149
{
150
int i, psize, size;
151
const __be32 *prop;
152
153
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
154
return 0;
155
156
prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
157
158
if (!prop)
159
return 1;
160
161
pr_debug("Found machine check recoverable ranges.\n");
162
163
/*
164
* Calculate number of available entries.
165
*
166
* Each recoverable address range entry is (start address, len,
167
* recovery address), 2 cells each for start and recovery address,
168
* 1 cell for len, totalling 5 cells per entry.
169
*/
170
mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
171
172
/* Sanity check */
173
if (!mc_recoverable_range_len)
174
return 1;
175
176
/* Size required to hold all the entries. */
177
size = mc_recoverable_range_len *
178
sizeof(struct mcheck_recoverable_range);
179
180
/*
181
* Allocate a buffer to hold the MC recoverable ranges.
182
*/
183
mc_recoverable_range = memblock_alloc_or_panic(size, __alignof__(u64));
184
185
for (i = 0; i < mc_recoverable_range_len; i++) {
186
mc_recoverable_range[i].start_addr =
187
of_read_number(prop + (i * 5) + 0, 2);
188
mc_recoverable_range[i].end_addr =
189
mc_recoverable_range[i].start_addr +
190
of_read_number(prop + (i * 5) + 2, 1);
191
mc_recoverable_range[i].recover_addr =
192
of_read_number(prop + (i * 5) + 3, 2);
193
194
pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
195
mc_recoverable_range[i].start_addr,
196
mc_recoverable_range[i].end_addr,
197
mc_recoverable_range[i].recover_addr);
198
}
199
return 1;
200
}
201
202
static int __init opal_register_exception_handlers(void)
203
{
204
#ifdef __BIG_ENDIAN__
205
u64 glue;
206
207
if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
208
return -ENODEV;
209
210
/* Hookup some exception handlers except machine check. We use the
211
* fwnmi area at 0x7000 to provide the glue space to OPAL
212
*/
213
glue = 0x7000;
214
215
/*
216
* Only ancient OPAL firmware requires this.
217
* Specifically, firmware from FW810.00 (released June 2014)
218
* through FW810.20 (Released October 2014).
219
*
220
* Check if we are running on newer (post Oct 2014) firmware that
221
* exports the OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to
222
* patch the HMI interrupt and we catch it directly in Linux.
223
*
224
* For older firmware (i.e < FW810.20), we fallback to old behavior and
225
* let OPAL patch the HMI vector and handle it inside OPAL firmware.
226
*
227
* For newer firmware we catch/handle the HMI directly in Linux.
228
*/
229
if (!opal_check_token(OPAL_HANDLE_HMI)) {
230
pr_info("Old firmware detected, OPAL handles HMIs.\n");
231
opal_register_exception_handler(
232
OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
233
0, glue);
234
glue += 128;
235
}
236
237
/*
238
* Only applicable to ancient firmware, all modern
239
* (post March 2015/skiboot 5.0) firmware will just return
240
* OPAL_UNSUPPORTED.
241
*/
242
opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
243
#endif
244
245
return 0;
246
}
247
machine_early_initcall(powernv, opal_register_exception_handlers);
248
249
static void queue_replay_msg(void *msg)
250
{
251
struct opal_msg_node *msg_node;
252
253
if (msg_list_size < OPAL_MSG_QUEUE_MAX) {
254
msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
255
if (msg_node) {
256
INIT_LIST_HEAD(&msg_node->list);
257
memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
258
list_add_tail(&msg_node->list, &msg_list);
259
msg_list_size++;
260
} else
261
pr_warn_once("message queue no memory\n");
262
263
if (msg_list_size >= OPAL_MSG_QUEUE_MAX)
264
pr_warn_once("message queue full\n");
265
}
266
}
267
268
static void dequeue_replay_msg(enum opal_msg_type msg_type)
269
{
270
struct opal_msg_node *msg_node, *tmp;
271
272
list_for_each_entry_safe(msg_node, tmp, &msg_list, list) {
273
if (be32_to_cpu(msg_node->msg.msg_type) != msg_type)
274
continue;
275
276
atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
277
msg_type,
278
&msg_node->msg);
279
280
list_del(&msg_node->list);
281
kfree(msg_node);
282
msg_list_size--;
283
}
284
}
285
286
/*
287
* Opal message notifier based on message type. Allow subscribers to get
288
* notified for specific messgae type.
289
*/
290
int opal_message_notifier_register(enum opal_msg_type msg_type,
291
struct notifier_block *nb)
292
{
293
int ret;
294
unsigned long flags;
295
296
if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
297
pr_warn("%s: Invalid arguments, msg_type:%d\n",
298
__func__, msg_type);
299
return -EINVAL;
300
}
301
302
spin_lock_irqsave(&msg_list_lock, flags);
303
ret = atomic_notifier_chain_register(
304
&opal_msg_notifier_head[msg_type], nb);
305
306
/*
307
* If the registration succeeded, replay any queued messages that came
308
* in prior to the notifier chain registration. msg_list_lock held here
309
* to ensure they're delivered prior to any subsequent messages.
310
*/
311
if (ret == 0)
312
dequeue_replay_msg(msg_type);
313
314
spin_unlock_irqrestore(&msg_list_lock, flags);
315
316
return ret;
317
}
318
EXPORT_SYMBOL_GPL(opal_message_notifier_register);
319
320
int opal_message_notifier_unregister(enum opal_msg_type msg_type,
321
struct notifier_block *nb)
322
{
323
return atomic_notifier_chain_unregister(
324
&opal_msg_notifier_head[msg_type], nb);
325
}
326
EXPORT_SYMBOL_GPL(opal_message_notifier_unregister);
327
328
static void opal_message_do_notify(uint32_t msg_type, void *msg)
329
{
330
unsigned long flags;
331
bool queued = false;
332
333
spin_lock_irqsave(&msg_list_lock, flags);
334
if (opal_msg_notifier_head[msg_type].head == NULL) {
335
/*
336
* Queue up the msg since no notifiers have registered
337
* yet for this msg_type.
338
*/
339
queue_replay_msg(msg);
340
queued = true;
341
}
342
spin_unlock_irqrestore(&msg_list_lock, flags);
343
344
if (queued)
345
return;
346
347
/* notify subscribers */
348
atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
349
msg_type, msg);
350
}
351
352
static void opal_handle_message(void)
353
{
354
s64 ret;
355
u32 type;
356
357
ret = opal_get_msg(__pa(opal_msg), opal_msg_size);
358
/* No opal message pending. */
359
if (ret == OPAL_RESOURCE)
360
return;
361
362
/* check for errors. */
363
if (ret) {
364
pr_warn("%s: Failed to retrieve opal message, err=%lld\n",
365
__func__, ret);
366
return;
367
}
368
369
type = be32_to_cpu(opal_msg->msg_type);
370
371
/* Sanity check */
372
if (type >= OPAL_MSG_TYPE_MAX) {
373
pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
374
return;
375
}
376
opal_message_do_notify(type, (void *)opal_msg);
377
}
378
379
static irqreturn_t opal_message_notify(int irq, void *data)
380
{
381
opal_handle_message();
382
return IRQ_HANDLED;
383
}
384
385
static int __init opal_message_init(struct device_node *opal_node)
386
{
387
int ret, i, irq;
388
389
ret = of_property_read_u32(opal_node, "opal-msg-size", &opal_msg_size);
390
if (ret) {
391
pr_notice("Failed to read opal-msg-size property\n");
392
opal_msg_size = sizeof(struct opal_msg);
393
}
394
395
opal_msg = kmalloc(opal_msg_size, GFP_KERNEL);
396
if (!opal_msg) {
397
opal_msg_size = sizeof(struct opal_msg);
398
/* Try to allocate fixed message size */
399
opal_msg = kmalloc(opal_msg_size, GFP_KERNEL);
400
BUG_ON(opal_msg == NULL);
401
}
402
403
for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
404
ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
405
406
irq = opal_event_request(ilog2(OPAL_EVENT_MSG_PENDING));
407
if (!irq) {
408
pr_err("%s: Can't register OPAL event irq (%d)\n",
409
__func__, irq);
410
return irq;
411
}
412
413
ret = request_irq(irq, opal_message_notify,
414
IRQ_TYPE_LEVEL_HIGH, "opal-msg", NULL);
415
if (ret) {
416
pr_err("%s: Can't request OPAL event irq (%d)\n",
417
__func__, ret);
418
return ret;
419
}
420
421
return 0;
422
}
423
424
ssize_t opal_get_chars(uint32_t vtermno, u8 *buf, size_t count)
425
{
426
s64 rc;
427
__be64 evt, len;
428
429
if (!opal.entry)
430
return -ENODEV;
431
opal_poll_events(&evt);
432
if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
433
return 0;
434
len = cpu_to_be64(count);
435
rc = opal_console_read(vtermno, &len, buf);
436
if (rc == OPAL_SUCCESS)
437
return be64_to_cpu(len);
438
return 0;
439
}
440
441
static ssize_t __opal_put_chars(uint32_t vtermno, const u8 *data,
442
size_t total_len, bool atomic)
443
{
444
unsigned long flags = 0 /* shut up gcc */;
445
ssize_t written;
446
__be64 olen;
447
s64 rc;
448
449
if (!opal.entry)
450
return -ENODEV;
451
452
if (atomic)
453
spin_lock_irqsave(&opal_write_lock, flags);
454
rc = opal_console_write_buffer_space(vtermno, &olen);
455
if (rc || be64_to_cpu(olen) < total_len) {
456
/* Closed -> drop characters */
457
if (rc)
458
written = total_len;
459
else
460
written = -EAGAIN;
461
goto out;
462
}
463
464
/* Should not get a partial write here because space is available. */
465
olen = cpu_to_be64(total_len);
466
rc = opal_console_write(vtermno, &olen, data);
467
if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
468
if (rc == OPAL_BUSY_EVENT)
469
opal_poll_events(NULL);
470
written = -EAGAIN;
471
goto out;
472
}
473
474
/* Closed or other error drop */
475
if (rc != OPAL_SUCCESS) {
476
written = opal_error_code(rc);
477
goto out;
478
}
479
480
written = be64_to_cpu(olen);
481
if (written < total_len) {
482
if (atomic) {
483
/* Should not happen */
484
pr_warn("atomic console write returned partial "
485
"len=%zu written=%zd\n", total_len, written);
486
}
487
if (!written)
488
written = -EAGAIN;
489
}
490
491
out:
492
if (atomic)
493
spin_unlock_irqrestore(&opal_write_lock, flags);
494
495
return written;
496
}
497
498
ssize_t opal_put_chars(uint32_t vtermno, const u8 *data, size_t total_len)
499
{
500
return __opal_put_chars(vtermno, data, total_len, false);
501
}
502
503
/*
504
* opal_put_chars_atomic will not perform partial-writes. Data will be
505
* atomically written to the terminal or not at all. This is not strictly
506
* true at the moment because console space can race with OPAL's console
507
* writes.
508
*/
509
ssize_t opal_put_chars_atomic(uint32_t vtermno, const u8 *data,
510
size_t total_len)
511
{
512
return __opal_put_chars(vtermno, data, total_len, true);
513
}
514
515
static s64 __opal_flush_console(uint32_t vtermno)
516
{
517
s64 rc;
518
519
if (!opal_check_token(OPAL_CONSOLE_FLUSH)) {
520
__be64 evt;
521
522
/*
523
* If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
524
* the console can still be flushed by calling the polling
525
* function while it has OPAL_EVENT_CONSOLE_OUTPUT events.
526
*/
527
WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n");
528
529
opal_poll_events(&evt);
530
if (!(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT))
531
return OPAL_SUCCESS;
532
return OPAL_BUSY;
533
534
} else {
535
rc = opal_console_flush(vtermno);
536
if (rc == OPAL_BUSY_EVENT) {
537
opal_poll_events(NULL);
538
rc = OPAL_BUSY;
539
}
540
return rc;
541
}
542
543
}
544
545
/*
546
* opal_flush_console spins until the console is flushed
547
*/
548
int opal_flush_console(uint32_t vtermno)
549
{
550
for (;;) {
551
s64 rc = __opal_flush_console(vtermno);
552
553
if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
554
mdelay(1);
555
continue;
556
}
557
558
return opal_error_code(rc);
559
}
560
}
561
562
/*
563
* opal_flush_chars is an hvc interface that sleeps until the console is
564
* flushed if wait, otherwise it will return -EBUSY if the console has data,
565
* -EAGAIN if it has data and some of it was flushed.
566
*/
567
int opal_flush_chars(uint32_t vtermno, bool wait)
568
{
569
for (;;) {
570
s64 rc = __opal_flush_console(vtermno);
571
572
if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
573
if (wait) {
574
msleep(OPAL_BUSY_DELAY_MS);
575
continue;
576
}
577
if (rc == OPAL_PARTIAL)
578
return -EAGAIN;
579
}
580
581
return opal_error_code(rc);
582
}
583
}
584
585
static int opal_recover_mce(struct pt_regs *regs,
586
struct machine_check_event *evt)
587
{
588
int recovered = 0;
589
590
if (regs_is_unrecoverable(regs)) {
591
/* If MSR_RI isn't set, we cannot recover */
592
pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
593
recovered = 0;
594
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
595
/* Platform corrected itself */
596
recovered = 1;
597
} else if (evt->severity == MCE_SEV_FATAL) {
598
/* Fatal machine check */
599
pr_err("Machine check interrupt is fatal\n");
600
recovered = 0;
601
}
602
603
if (!recovered && evt->sync_error) {
604
/*
605
* Try to kill processes if we get a synchronous machine check
606
* (e.g., one caused by execution of this instruction). This
607
* will devolve into a panic if we try to kill init or are in
608
* an interrupt etc.
609
*
610
* TODO: Queue up this address for hwpoisioning later.
611
* TODO: This is not quite right for d-side machine
612
* checks ->nip is not necessarily the important
613
* address.
614
*/
615
if ((user_mode(regs))) {
616
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
617
recovered = 1;
618
} else if (die_will_crash()) {
619
/*
620
* die() would kill the kernel, so better to go via
621
* the platform reboot code that will log the
622
* machine check.
623
*/
624
recovered = 0;
625
} else {
626
die_mce("Machine check", regs, SIGBUS);
627
recovered = 1;
628
}
629
}
630
631
return recovered;
632
}
633
634
void __noreturn pnv_platform_error_reboot(struct pt_regs *regs, const char *msg)
635
{
636
panic_flush_kmsg_start();
637
638
pr_emerg("Hardware platform error: %s\n", msg);
639
if (regs)
640
show_regs(regs);
641
smp_send_stop();
642
643
panic_flush_kmsg_end();
644
645
/*
646
* Don't bother to shut things down because this will
647
* xstop the system.
648
*/
649
if (opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, msg)
650
== OPAL_UNSUPPORTED) {
651
pr_emerg("Reboot type %d not supported for %s\n",
652
OPAL_REBOOT_PLATFORM_ERROR, msg);
653
}
654
655
/*
656
* We reached here. There can be three possibilities:
657
* 1. We are running on a firmware level that do not support
658
* opal_cec_reboot2()
659
* 2. We are running on a firmware level that do not support
660
* OPAL_REBOOT_PLATFORM_ERROR reboot type.
661
* 3. We are running on FSP based system that does not need
662
* opal to trigger checkstop explicitly for error analysis.
663
* The FSP PRD component would have already got notified
664
* about this error through other channels.
665
* 4. We are running on a newer skiboot that by default does
666
* not cause a checkstop, drops us back to the kernel to
667
* extract context and state at the time of the error.
668
*/
669
670
panic(msg);
671
}
672
673
int opal_machine_check(struct pt_regs *regs)
674
{
675
struct machine_check_event evt;
676
677
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
678
return 0;
679
680
/* Print things out */
681
if (evt.version != MCE_V1) {
682
pr_err("Machine Check Exception, Unknown event version %d !\n",
683
evt.version);
684
return 0;
685
}
686
machine_check_print_event_info(&evt, user_mode(regs), false);
687
688
if (opal_recover_mce(regs, &evt))
689
return 1;
690
691
pnv_platform_error_reboot(regs, "Unrecoverable Machine Check exception");
692
}
693
694
/* Early hmi handler called in real mode. */
695
int opal_hmi_exception_early(struct pt_regs *regs)
696
{
697
s64 rc;
698
699
/*
700
* call opal hmi handler. Pass paca address as token.
701
* The return value OPAL_SUCCESS is an indication that there is
702
* an HMI event generated waiting to pull by Linux.
703
*/
704
rc = opal_handle_hmi();
705
if (rc == OPAL_SUCCESS) {
706
local_paca->hmi_event_available = 1;
707
return 1;
708
}
709
return 0;
710
}
711
712
int opal_hmi_exception_early2(struct pt_regs *regs)
713
{
714
s64 rc;
715
__be64 out_flags;
716
717
/*
718
* call opal hmi handler.
719
* Check 64-bit flag mask to find out if an event was generated,
720
* and whether TB is still valid or not etc.
721
*/
722
rc = opal_handle_hmi2(&out_flags);
723
if (rc != OPAL_SUCCESS)
724
return 0;
725
726
if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_NEW_EVENT)
727
local_paca->hmi_event_available = 1;
728
if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_TOD_TB_FAIL)
729
tb_invalid = true;
730
return 1;
731
}
732
733
/* HMI exception handler called in virtual mode when irqs are next enabled. */
734
int opal_handle_hmi_exception(struct pt_regs *regs)
735
{
736
/*
737
* Check if HMI event is available.
738
* if Yes, then wake kopald to process them.
739
*/
740
if (!local_paca->hmi_event_available)
741
return 0;
742
743
local_paca->hmi_event_available = 0;
744
opal_wake_poller();
745
746
return 1;
747
}
748
749
static uint64_t find_recovery_address(uint64_t nip)
750
{
751
int i;
752
753
for (i = 0; i < mc_recoverable_range_len; i++)
754
if ((nip >= mc_recoverable_range[i].start_addr) &&
755
(nip < mc_recoverable_range[i].end_addr))
756
return mc_recoverable_range[i].recover_addr;
757
return 0;
758
}
759
760
bool opal_mce_check_early_recovery(struct pt_regs *regs)
761
{
762
uint64_t recover_addr = 0;
763
764
if (!opal.base || !opal.size)
765
goto out;
766
767
if ((regs->nip >= opal.base) &&
768
(regs->nip < (opal.base + opal.size)))
769
recover_addr = find_recovery_address(regs->nip);
770
771
/*
772
* Setup regs->nip to rfi into fixup address.
773
*/
774
if (recover_addr)
775
regs_set_return_ip(regs, recover_addr);
776
777
out:
778
return !!recover_addr;
779
}
780
781
static int __init opal_sysfs_init(void)
782
{
783
opal_kobj = kobject_create_and_add("opal", firmware_kobj);
784
if (!opal_kobj) {
785
pr_warn("kobject_create_and_add opal failed\n");
786
return -ENOMEM;
787
}
788
789
return 0;
790
}
791
792
static int opal_add_one_export(struct kobject *parent, const char *export_name,
793
struct device_node *np, const char *prop_name)
794
{
795
struct bin_attribute *attr = NULL;
796
const char *name = NULL;
797
u64 vals[2];
798
int rc;
799
800
rc = of_property_read_u64_array(np, prop_name, &vals[0], 2);
801
if (rc)
802
goto out;
803
804
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
805
if (!attr) {
806
rc = -ENOMEM;
807
goto out;
808
}
809
name = kstrdup(export_name, GFP_KERNEL);
810
if (!name) {
811
rc = -ENOMEM;
812
goto out;
813
}
814
815
sysfs_bin_attr_init(attr);
816
attr->attr.name = name;
817
attr->attr.mode = 0400;
818
attr->read = sysfs_bin_attr_simple_read;
819
attr->private = __va(vals[0]);
820
attr->size = vals[1];
821
822
rc = sysfs_create_bin_file(parent, attr);
823
out:
824
if (rc) {
825
kfree(name);
826
kfree(attr);
827
}
828
829
return rc;
830
}
831
832
static void opal_add_exported_attrs(struct device_node *np,
833
struct kobject *kobj)
834
{
835
struct device_node *child;
836
struct property *prop;
837
838
for_each_property_of_node(np, prop) {
839
int rc;
840
841
if (!strcmp(prop->name, "name") ||
842
!strcmp(prop->name, "phandle"))
843
continue;
844
845
rc = opal_add_one_export(kobj, prop->name, np, prop->name);
846
if (rc) {
847
pr_warn("Unable to add export %pOF/%s, rc = %d!\n",
848
np, prop->name, rc);
849
}
850
}
851
852
for_each_child_of_node(np, child) {
853
struct kobject *child_kobj;
854
855
child_kobj = kobject_create_and_add(child->name, kobj);
856
if (!child_kobj) {
857
pr_err("Unable to create export dir for %pOF\n", child);
858
continue;
859
}
860
861
opal_add_exported_attrs(child, child_kobj);
862
}
863
}
864
865
/*
866
* opal_export_attrs: creates a sysfs node for each property listed in
867
* the device-tree under /ibm,opal/firmware/exports/
868
* All new sysfs nodes are created under /opal/exports/.
869
* This allows for reserved memory regions (e.g. HDAT) to be read.
870
* The new sysfs nodes are only readable by root.
871
*/
872
static void opal_export_attrs(void)
873
{
874
struct device_node *np;
875
struct kobject *kobj;
876
int rc;
877
878
np = of_find_node_by_path("/ibm,opal/firmware/exports");
879
if (!np)
880
return;
881
882
/* Create new 'exports' directory - /sys/firmware/opal/exports */
883
kobj = kobject_create_and_add("exports", opal_kobj);
884
if (!kobj) {
885
pr_warn("kobject_create_and_add() of exports failed\n");
886
of_node_put(np);
887
return;
888
}
889
890
opal_add_exported_attrs(np, kobj);
891
892
/*
893
* NB: symbol_map existed before the generic export interface so it
894
* lives under the top level opal_kobj.
895
*/
896
rc = opal_add_one_export(opal_kobj, "symbol_map",
897
np->parent, "symbol-map");
898
if (rc)
899
pr_warn("Error %d creating OPAL symbols file\n", rc);
900
901
of_node_put(np);
902
}
903
904
static void __init opal_dump_region_init(void)
905
{
906
void *addr;
907
uint64_t size;
908
int rc;
909
910
if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
911
return;
912
913
/* Register kernel log buffer */
914
addr = log_buf_addr_get();
915
if (addr == NULL)
916
return;
917
918
size = log_buf_len_get();
919
if (size == 0)
920
return;
921
922
rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
923
__pa(addr), size);
924
/* Don't warn if this is just an older OPAL that doesn't
925
* know about that call
926
*/
927
if (rc && rc != OPAL_UNSUPPORTED)
928
pr_warn("DUMP: Failed to register kernel log buffer. "
929
"rc = %d\n", rc);
930
}
931
932
static void __init opal_pdev_init(const char *compatible)
933
{
934
struct device_node *np;
935
936
for_each_compatible_node(np, NULL, compatible)
937
of_platform_device_create(np, NULL, NULL);
938
}
939
940
static void __init opal_imc_init_dev(void)
941
{
942
struct device_node *np;
943
944
np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT);
945
if (np)
946
of_platform_device_create(np, NULL, NULL);
947
948
of_node_put(np);
949
}
950
951
static int kopald(void *unused)
952
{
953
unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1;
954
955
set_freezable();
956
do {
957
try_to_freeze();
958
959
opal_handle_events();
960
961
set_current_state(TASK_INTERRUPTIBLE);
962
if (opal_have_pending_events())
963
__set_current_state(TASK_RUNNING);
964
else
965
schedule_timeout(timeout);
966
967
} while (!kthread_should_stop());
968
969
return 0;
970
}
971
972
void opal_wake_poller(void)
973
{
974
if (kopald_tsk)
975
wake_up_process(kopald_tsk);
976
}
977
978
static void __init opal_init_heartbeat(void)
979
{
980
/* Old firwmware, we assume the HVC heartbeat is sufficient */
981
if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
982
&opal_heartbeat) != 0)
983
opal_heartbeat = 0;
984
985
if (opal_heartbeat)
986
kopald_tsk = kthread_run(kopald, NULL, "kopald");
987
}
988
989
static int __init opal_init(void)
990
{
991
struct device_node *np, *consoles, *leds;
992
int rc;
993
994
opal_node = of_find_node_by_path("/ibm,opal");
995
if (!opal_node) {
996
pr_warn("Device node not found\n");
997
return -ENODEV;
998
}
999
1000
/* Register OPAL consoles if any ports */
1001
consoles = of_find_node_by_path("/ibm,opal/consoles");
1002
if (consoles) {
1003
for_each_child_of_node(consoles, np) {
1004
if (!of_node_name_eq(np, "serial"))
1005
continue;
1006
of_platform_device_create(np, NULL, NULL);
1007
}
1008
of_node_put(consoles);
1009
}
1010
1011
/* Initialise OPAL messaging system */
1012
opal_message_init(opal_node);
1013
1014
/* Initialise OPAL asynchronous completion interface */
1015
opal_async_comp_init();
1016
1017
/* Initialise OPAL sensor interface */
1018
opal_sensor_init();
1019
1020
/* Initialise OPAL hypervisor maintainence interrupt handling */
1021
opal_hmi_handler_init();
1022
1023
/* Create i2c platform devices */
1024
opal_pdev_init("ibm,opal-i2c");
1025
1026
/* Handle non-volatile memory devices */
1027
opal_pdev_init("pmem-region");
1028
1029
/* Setup a heatbeat thread if requested by OPAL */
1030
opal_init_heartbeat();
1031
1032
/* Detect In-Memory Collection counters and create devices*/
1033
opal_imc_init_dev();
1034
1035
/* Create leds platform devices */
1036
leds = of_find_node_by_path("/ibm,opal/leds");
1037
if (leds) {
1038
of_platform_device_create(leds, "opal_leds", NULL);
1039
of_node_put(leds);
1040
}
1041
1042
/* Initialise OPAL message log interface */
1043
opal_msglog_init();
1044
1045
/* Create "opal" kobject under /sys/firmware */
1046
rc = opal_sysfs_init();
1047
if (rc == 0) {
1048
/* Setup dump region interface */
1049
opal_dump_region_init();
1050
/* Setup error log interface */
1051
rc = opal_elog_init();
1052
/* Setup code update interface */
1053
opal_flash_update_init();
1054
/* Setup platform dump extract interface */
1055
opal_platform_dump_init();
1056
/* Setup system parameters interface */
1057
opal_sys_param_init();
1058
/* Setup message log sysfs interface. */
1059
opal_msglog_sysfs_init();
1060
/* Add all export properties*/
1061
opal_export_attrs();
1062
}
1063
1064
/* Initialize platform devices: IPMI backend, PRD & flash interface */
1065
opal_pdev_init("ibm,opal-ipmi");
1066
opal_pdev_init("ibm,opal-flash");
1067
opal_pdev_init("ibm,opal-prd");
1068
1069
/* Initialise platform device: oppanel interface */
1070
opal_pdev_init("ibm,opal-oppanel");
1071
1072
/* Initialise OPAL kmsg dumper for flushing console on panic */
1073
opal_kmsg_init();
1074
1075
/* Initialise OPAL powercap interface */
1076
opal_powercap_init();
1077
1078
/* Initialise OPAL Power-Shifting-Ratio interface */
1079
opal_psr_init();
1080
1081
/* Initialise OPAL sensor groups */
1082
opal_sensor_groups_init();
1083
1084
/* Initialise OPAL Power control interface */
1085
opal_power_control_init();
1086
1087
/* Initialize OPAL secure variables */
1088
opal_pdev_init("ibm,secvar-backend");
1089
1090
return 0;
1091
}
1092
machine_subsys_initcall(powernv, opal_init);
1093
1094
void opal_shutdown(void)
1095
{
1096
long rc = OPAL_BUSY;
1097
1098
opal_event_shutdown();
1099
1100
/*
1101
* Then sync with OPAL which ensure anything that can
1102
* potentially write to our memory has completed such
1103
* as an ongoing dump retrieval
1104
*/
1105
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
1106
rc = opal_sync_host_reboot();
1107
if (rc == OPAL_BUSY)
1108
opal_poll_events(NULL);
1109
else
1110
mdelay(10);
1111
}
1112
1113
/* Unregister memory dump region */
1114
if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
1115
opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
1116
}
1117
1118
/* Export this so that test modules can use it */
1119
EXPORT_SYMBOL_GPL(opal_invalid_call);
1120
EXPORT_SYMBOL_GPL(opal_xscom_read);
1121
EXPORT_SYMBOL_GPL(opal_xscom_write);
1122
EXPORT_SYMBOL_GPL(opal_ipmi_send);
1123
EXPORT_SYMBOL_GPL(opal_ipmi_recv);
1124
EXPORT_SYMBOL_GPL(opal_flash_read);
1125
EXPORT_SYMBOL_GPL(opal_flash_write);
1126
EXPORT_SYMBOL_GPL(opal_flash_erase);
1127
EXPORT_SYMBOL_GPL(opal_prd_msg);
1128
EXPORT_SYMBOL_GPL(opal_check_token);
1129
1130
/* Convert a region of vmalloc memory to an opal sg list */
1131
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
1132
unsigned long vmalloc_size)
1133
{
1134
struct opal_sg_list *sg, *first = NULL;
1135
unsigned long i = 0;
1136
1137
sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
1138
if (!sg)
1139
goto nomem;
1140
1141
first = sg;
1142
1143
while (vmalloc_size > 0) {
1144
uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
1145
uint64_t length = min(vmalloc_size, PAGE_SIZE);
1146
1147
sg->entry[i].data = cpu_to_be64(data);
1148
sg->entry[i].length = cpu_to_be64(length);
1149
i++;
1150
1151
if (i >= SG_ENTRIES_PER_NODE) {
1152
struct opal_sg_list *next;
1153
1154
next = kzalloc(PAGE_SIZE, GFP_KERNEL);
1155
if (!next)
1156
goto nomem;
1157
1158
sg->length = cpu_to_be64(
1159
i * sizeof(struct opal_sg_entry) + 16);
1160
i = 0;
1161
sg->next = cpu_to_be64(__pa(next));
1162
sg = next;
1163
}
1164
1165
vmalloc_addr += length;
1166
vmalloc_size -= length;
1167
}
1168
1169
sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
1170
1171
return first;
1172
1173
nomem:
1174
pr_err("%s : Failed to allocate memory\n", __func__);
1175
opal_free_sg_list(first);
1176
return NULL;
1177
}
1178
1179
void opal_free_sg_list(struct opal_sg_list *sg)
1180
{
1181
while (sg) {
1182
uint64_t next = be64_to_cpu(sg->next);
1183
1184
kfree(sg);
1185
1186
if (next)
1187
sg = __va(next);
1188
else
1189
sg = NULL;
1190
}
1191
}
1192
1193
int opal_error_code(int rc)
1194
{
1195
switch (rc) {
1196
case OPAL_SUCCESS: return 0;
1197
1198
case OPAL_PARAMETER: return -EINVAL;
1199
case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
1200
case OPAL_BUSY:
1201
case OPAL_BUSY_EVENT: return -EBUSY;
1202
case OPAL_NO_MEM: return -ENOMEM;
1203
case OPAL_PERMISSION: return -EPERM;
1204
1205
case OPAL_UNSUPPORTED: return -EIO;
1206
case OPAL_HARDWARE: return -EIO;
1207
case OPAL_INTERNAL_ERROR: return -EIO;
1208
case OPAL_TIMEOUT: return -ETIMEDOUT;
1209
default:
1210
pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
1211
return -EIO;
1212
}
1213
}
1214
1215
void powernv_set_nmmu_ptcr(unsigned long ptcr)
1216
{
1217
int rc;
1218
1219
if (firmware_has_feature(FW_FEATURE_OPAL)) {
1220
rc = opal_nmmu_set_ptcr(-1UL, ptcr);
1221
if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
1222
pr_warn("%s: Unable to set nest mmu ptcr\n", __func__);
1223
}
1224
}
1225
1226
EXPORT_SYMBOL_GPL(opal_poll_events);
1227
EXPORT_SYMBOL_GPL(opal_rtc_read);
1228
EXPORT_SYMBOL_GPL(opal_rtc_write);
1229
EXPORT_SYMBOL_GPL(opal_tpo_read);
1230
EXPORT_SYMBOL_GPL(opal_tpo_write);
1231
EXPORT_SYMBOL_GPL(opal_i2c_request);
1232
/* Export these symbols for PowerNV LED class driver */
1233
EXPORT_SYMBOL_GPL(opal_leds_get_ind);
1234
EXPORT_SYMBOL_GPL(opal_leds_set_ind);
1235
/* Export this symbol for PowerNV Operator Panel class driver */
1236
EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
1237
/* Export this for KVM */
1238
EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
1239
EXPORT_SYMBOL_GPL(opal_int_eoi);
1240
EXPORT_SYMBOL_GPL(opal_error_code);
1241
/* Export the below symbol for NX compression */
1242
EXPORT_SYMBOL(opal_nx_coproc_init);
1243
1244