Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/oprofile/nmi_int.c
10818 views
1
/**
2
* @file nmi_int.c
3
*
4
* @remark Copyright 2002-2009 OProfile authors
5
* @remark Read the file COPYING
6
*
7
* @author John Levon <[email protected]>
8
* @author Robert Richter <[email protected]>
9
* @author Barry Kasindorf <[email protected]>
10
* @author Jason Yeh <[email protected]>
11
* @author Suravee Suthikulpanit <[email protected]>
12
*/
13
14
#include <linux/init.h>
15
#include <linux/notifier.h>
16
#include <linux/smp.h>
17
#include <linux/oprofile.h>
18
#include <linux/syscore_ops.h>
19
#include <linux/slab.h>
20
#include <linux/moduleparam.h>
21
#include <linux/kdebug.h>
22
#include <linux/cpu.h>
23
#include <asm/nmi.h>
24
#include <asm/msr.h>
25
#include <asm/apic.h>
26
27
#include "op_counter.h"
28
#include "op_x86_model.h"
29
30
static struct op_x86_model_spec *model;
31
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32
static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
33
34
/* must be protected with get_online_cpus()/put_online_cpus(): */
35
static int nmi_enabled;
36
static int ctr_running;
37
38
struct op_counter_config counter_config[OP_MAX_COUNTER];
39
40
/* common functions */
41
42
u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
43
struct op_counter_config *counter_config)
44
{
45
u64 val = 0;
46
u16 event = (u16)counter_config->event;
47
48
val |= ARCH_PERFMON_EVENTSEL_INT;
49
val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
50
val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
51
val |= (counter_config->unit_mask & 0xFF) << 8;
52
counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
53
ARCH_PERFMON_EVENTSEL_EDGE |
54
ARCH_PERFMON_EVENTSEL_CMASK);
55
val |= counter_config->extra;
56
event &= model->event_mask ? model->event_mask : 0xFF;
57
val |= event & 0xFF;
58
val |= (event & 0x0F00) << 24;
59
60
return val;
61
}
62
63
64
static int profile_exceptions_notify(struct notifier_block *self,
65
unsigned long val, void *data)
66
{
67
struct die_args *args = (struct die_args *)data;
68
int ret = NOTIFY_DONE;
69
70
switch (val) {
71
case DIE_NMI:
72
if (ctr_running)
73
model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
74
else if (!nmi_enabled)
75
break;
76
else
77
model->stop(&__get_cpu_var(cpu_msrs));
78
ret = NOTIFY_STOP;
79
break;
80
default:
81
break;
82
}
83
return ret;
84
}
85
86
static void nmi_cpu_save_registers(struct op_msrs *msrs)
87
{
88
struct op_msr *counters = msrs->counters;
89
struct op_msr *controls = msrs->controls;
90
unsigned int i;
91
92
for (i = 0; i < model->num_counters; ++i) {
93
if (counters[i].addr)
94
rdmsrl(counters[i].addr, counters[i].saved);
95
}
96
97
for (i = 0; i < model->num_controls; ++i) {
98
if (controls[i].addr)
99
rdmsrl(controls[i].addr, controls[i].saved);
100
}
101
}
102
103
static void nmi_cpu_start(void *dummy)
104
{
105
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
106
if (!msrs->controls)
107
WARN_ON_ONCE(1);
108
else
109
model->start(msrs);
110
}
111
112
static int nmi_start(void)
113
{
114
get_online_cpus();
115
ctr_running = 1;
116
/* make ctr_running visible to the nmi handler: */
117
smp_mb();
118
on_each_cpu(nmi_cpu_start, NULL, 1);
119
put_online_cpus();
120
return 0;
121
}
122
123
static void nmi_cpu_stop(void *dummy)
124
{
125
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
126
if (!msrs->controls)
127
WARN_ON_ONCE(1);
128
else
129
model->stop(msrs);
130
}
131
132
static void nmi_stop(void)
133
{
134
get_online_cpus();
135
on_each_cpu(nmi_cpu_stop, NULL, 1);
136
ctr_running = 0;
137
put_online_cpus();
138
}
139
140
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
141
142
static DEFINE_PER_CPU(int, switch_index);
143
144
static inline int has_mux(void)
145
{
146
return !!model->switch_ctrl;
147
}
148
149
inline int op_x86_phys_to_virt(int phys)
150
{
151
return __this_cpu_read(switch_index) + phys;
152
}
153
154
inline int op_x86_virt_to_phys(int virt)
155
{
156
return virt % model->num_counters;
157
}
158
159
static void nmi_shutdown_mux(void)
160
{
161
int i;
162
163
if (!has_mux())
164
return;
165
166
for_each_possible_cpu(i) {
167
kfree(per_cpu(cpu_msrs, i).multiplex);
168
per_cpu(cpu_msrs, i).multiplex = NULL;
169
per_cpu(switch_index, i) = 0;
170
}
171
}
172
173
static int nmi_setup_mux(void)
174
{
175
size_t multiplex_size =
176
sizeof(struct op_msr) * model->num_virt_counters;
177
int i;
178
179
if (!has_mux())
180
return 1;
181
182
for_each_possible_cpu(i) {
183
per_cpu(cpu_msrs, i).multiplex =
184
kzalloc(multiplex_size, GFP_KERNEL);
185
if (!per_cpu(cpu_msrs, i).multiplex)
186
return 0;
187
}
188
189
return 1;
190
}
191
192
static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
193
{
194
int i;
195
struct op_msr *multiplex = msrs->multiplex;
196
197
if (!has_mux())
198
return;
199
200
for (i = 0; i < model->num_virt_counters; ++i) {
201
if (counter_config[i].enabled) {
202
multiplex[i].saved = -(u64)counter_config[i].count;
203
} else {
204
multiplex[i].saved = 0;
205
}
206
}
207
208
per_cpu(switch_index, cpu) = 0;
209
}
210
211
static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
212
{
213
struct op_msr *counters = msrs->counters;
214
struct op_msr *multiplex = msrs->multiplex;
215
int i;
216
217
for (i = 0; i < model->num_counters; ++i) {
218
int virt = op_x86_phys_to_virt(i);
219
if (counters[i].addr)
220
rdmsrl(counters[i].addr, multiplex[virt].saved);
221
}
222
}
223
224
static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
225
{
226
struct op_msr *counters = msrs->counters;
227
struct op_msr *multiplex = msrs->multiplex;
228
int i;
229
230
for (i = 0; i < model->num_counters; ++i) {
231
int virt = op_x86_phys_to_virt(i);
232
if (counters[i].addr)
233
wrmsrl(counters[i].addr, multiplex[virt].saved);
234
}
235
}
236
237
static void nmi_cpu_switch(void *dummy)
238
{
239
int cpu = smp_processor_id();
240
int si = per_cpu(switch_index, cpu);
241
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
242
243
nmi_cpu_stop(NULL);
244
nmi_cpu_save_mpx_registers(msrs);
245
246
/* move to next set */
247
si += model->num_counters;
248
if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
249
per_cpu(switch_index, cpu) = 0;
250
else
251
per_cpu(switch_index, cpu) = si;
252
253
model->switch_ctrl(model, msrs);
254
nmi_cpu_restore_mpx_registers(msrs);
255
256
nmi_cpu_start(NULL);
257
}
258
259
260
/*
261
* Quick check to see if multiplexing is necessary.
262
* The check should be sufficient since counters are used
263
* in ordre.
264
*/
265
static int nmi_multiplex_on(void)
266
{
267
return counter_config[model->num_counters].count ? 0 : -EINVAL;
268
}
269
270
static int nmi_switch_event(void)
271
{
272
if (!has_mux())
273
return -ENOSYS; /* not implemented */
274
if (nmi_multiplex_on() < 0)
275
return -EINVAL; /* not necessary */
276
277
get_online_cpus();
278
if (ctr_running)
279
on_each_cpu(nmi_cpu_switch, NULL, 1);
280
put_online_cpus();
281
282
return 0;
283
}
284
285
static inline void mux_init(struct oprofile_operations *ops)
286
{
287
if (has_mux())
288
ops->switch_events = nmi_switch_event;
289
}
290
291
static void mux_clone(int cpu)
292
{
293
if (!has_mux())
294
return;
295
296
memcpy(per_cpu(cpu_msrs, cpu).multiplex,
297
per_cpu(cpu_msrs, 0).multiplex,
298
sizeof(struct op_msr) * model->num_virt_counters);
299
}
300
301
#else
302
303
inline int op_x86_phys_to_virt(int phys) { return phys; }
304
inline int op_x86_virt_to_phys(int virt) { return virt; }
305
static inline void nmi_shutdown_mux(void) { }
306
static inline int nmi_setup_mux(void) { return 1; }
307
static inline void
308
nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
309
static inline void mux_init(struct oprofile_operations *ops) { }
310
static void mux_clone(int cpu) { }
311
312
#endif
313
314
static void free_msrs(void)
315
{
316
int i;
317
for_each_possible_cpu(i) {
318
kfree(per_cpu(cpu_msrs, i).counters);
319
per_cpu(cpu_msrs, i).counters = NULL;
320
kfree(per_cpu(cpu_msrs, i).controls);
321
per_cpu(cpu_msrs, i).controls = NULL;
322
}
323
nmi_shutdown_mux();
324
}
325
326
static int allocate_msrs(void)
327
{
328
size_t controls_size = sizeof(struct op_msr) * model->num_controls;
329
size_t counters_size = sizeof(struct op_msr) * model->num_counters;
330
331
int i;
332
for_each_possible_cpu(i) {
333
per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
334
GFP_KERNEL);
335
if (!per_cpu(cpu_msrs, i).counters)
336
goto fail;
337
per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
338
GFP_KERNEL);
339
if (!per_cpu(cpu_msrs, i).controls)
340
goto fail;
341
}
342
343
if (!nmi_setup_mux())
344
goto fail;
345
346
return 1;
347
348
fail:
349
free_msrs();
350
return 0;
351
}
352
353
static void nmi_cpu_setup(void *dummy)
354
{
355
int cpu = smp_processor_id();
356
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
357
nmi_cpu_save_registers(msrs);
358
spin_lock(&oprofilefs_lock);
359
model->setup_ctrs(model, msrs);
360
nmi_cpu_setup_mux(cpu, msrs);
361
spin_unlock(&oprofilefs_lock);
362
per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
363
apic_write(APIC_LVTPC, APIC_DM_NMI);
364
}
365
366
static struct notifier_block profile_exceptions_nb = {
367
.notifier_call = profile_exceptions_notify,
368
.next = NULL,
369
.priority = NMI_LOCAL_LOW_PRIOR,
370
};
371
372
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
373
{
374
struct op_msr *counters = msrs->counters;
375
struct op_msr *controls = msrs->controls;
376
unsigned int i;
377
378
for (i = 0; i < model->num_controls; ++i) {
379
if (controls[i].addr)
380
wrmsrl(controls[i].addr, controls[i].saved);
381
}
382
383
for (i = 0; i < model->num_counters; ++i) {
384
if (counters[i].addr)
385
wrmsrl(counters[i].addr, counters[i].saved);
386
}
387
}
388
389
static void nmi_cpu_shutdown(void *dummy)
390
{
391
unsigned int v;
392
int cpu = smp_processor_id();
393
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
394
395
/* restoring APIC_LVTPC can trigger an apic error because the delivery
396
* mode and vector nr combination can be illegal. That's by design: on
397
* power on apic lvt contain a zero vector nr which are legal only for
398
* NMI delivery mode. So inhibit apic err before restoring lvtpc
399
*/
400
v = apic_read(APIC_LVTERR);
401
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
402
apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
403
apic_write(APIC_LVTERR, v);
404
nmi_cpu_restore_registers(msrs);
405
if (model->cpu_down)
406
model->cpu_down();
407
}
408
409
static void nmi_cpu_up(void *dummy)
410
{
411
if (nmi_enabled)
412
nmi_cpu_setup(dummy);
413
if (ctr_running)
414
nmi_cpu_start(dummy);
415
}
416
417
static void nmi_cpu_down(void *dummy)
418
{
419
if (ctr_running)
420
nmi_cpu_stop(dummy);
421
if (nmi_enabled)
422
nmi_cpu_shutdown(dummy);
423
}
424
425
static int nmi_create_files(struct super_block *sb, struct dentry *root)
426
{
427
unsigned int i;
428
429
for (i = 0; i < model->num_virt_counters; ++i) {
430
struct dentry *dir;
431
char buf[4];
432
433
/* quick little hack to _not_ expose a counter if it is not
434
* available for use. This should protect userspace app.
435
* NOTE: assumes 1:1 mapping here (that counters are organized
436
* sequentially in their struct assignment).
437
*/
438
if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
439
continue;
440
441
snprintf(buf, sizeof(buf), "%d", i);
442
dir = oprofilefs_mkdir(sb, root, buf);
443
oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
444
oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
445
oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
446
oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
447
oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
448
oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
449
oprofilefs_create_ulong(sb, dir, "extra", &counter_config[i].extra);
450
}
451
452
return 0;
453
}
454
455
static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
456
void *data)
457
{
458
int cpu = (unsigned long)data;
459
switch (action) {
460
case CPU_DOWN_FAILED:
461
case CPU_ONLINE:
462
smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
463
break;
464
case CPU_DOWN_PREPARE:
465
smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
466
break;
467
}
468
return NOTIFY_DONE;
469
}
470
471
static struct notifier_block oprofile_cpu_nb = {
472
.notifier_call = oprofile_cpu_notifier
473
};
474
475
static int nmi_setup(void)
476
{
477
int err = 0;
478
int cpu;
479
480
if (!allocate_msrs())
481
return -ENOMEM;
482
483
/* We need to serialize save and setup for HT because the subset
484
* of msrs are distinct for save and setup operations
485
*/
486
487
/* Assume saved/restored counters are the same on all CPUs */
488
err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
489
if (err)
490
goto fail;
491
492
for_each_possible_cpu(cpu) {
493
if (!cpu)
494
continue;
495
496
memcpy(per_cpu(cpu_msrs, cpu).counters,
497
per_cpu(cpu_msrs, 0).counters,
498
sizeof(struct op_msr) * model->num_counters);
499
500
memcpy(per_cpu(cpu_msrs, cpu).controls,
501
per_cpu(cpu_msrs, 0).controls,
502
sizeof(struct op_msr) * model->num_controls);
503
504
mux_clone(cpu);
505
}
506
507
nmi_enabled = 0;
508
ctr_running = 0;
509
/* make variables visible to the nmi handler: */
510
smp_mb();
511
err = register_die_notifier(&profile_exceptions_nb);
512
if (err)
513
goto fail;
514
515
get_online_cpus();
516
register_cpu_notifier(&oprofile_cpu_nb);
517
nmi_enabled = 1;
518
/* make nmi_enabled visible to the nmi handler: */
519
smp_mb();
520
on_each_cpu(nmi_cpu_setup, NULL, 1);
521
put_online_cpus();
522
523
return 0;
524
fail:
525
free_msrs();
526
return err;
527
}
528
529
static void nmi_shutdown(void)
530
{
531
struct op_msrs *msrs;
532
533
get_online_cpus();
534
unregister_cpu_notifier(&oprofile_cpu_nb);
535
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
536
nmi_enabled = 0;
537
ctr_running = 0;
538
put_online_cpus();
539
/* make variables visible to the nmi handler: */
540
smp_mb();
541
unregister_die_notifier(&profile_exceptions_nb);
542
msrs = &get_cpu_var(cpu_msrs);
543
model->shutdown(msrs);
544
free_msrs();
545
put_cpu_var(cpu_msrs);
546
}
547
548
#ifdef CONFIG_PM
549
550
static int nmi_suspend(void)
551
{
552
/* Only one CPU left, just stop that one */
553
if (nmi_enabled == 1)
554
nmi_cpu_stop(NULL);
555
return 0;
556
}
557
558
static void nmi_resume(void)
559
{
560
if (nmi_enabled == 1)
561
nmi_cpu_start(NULL);
562
}
563
564
static struct syscore_ops oprofile_syscore_ops = {
565
.resume = nmi_resume,
566
.suspend = nmi_suspend,
567
};
568
569
static void __init init_suspend_resume(void)
570
{
571
register_syscore_ops(&oprofile_syscore_ops);
572
}
573
574
static void exit_suspend_resume(void)
575
{
576
unregister_syscore_ops(&oprofile_syscore_ops);
577
}
578
579
#else
580
581
static inline void init_suspend_resume(void) { }
582
static inline void exit_suspend_resume(void) { }
583
584
#endif /* CONFIG_PM */
585
586
static int __init p4_init(char **cpu_type)
587
{
588
__u8 cpu_model = boot_cpu_data.x86_model;
589
590
if (cpu_model > 6 || cpu_model == 5)
591
return 0;
592
593
#ifndef CONFIG_SMP
594
*cpu_type = "i386/p4";
595
model = &op_p4_spec;
596
return 1;
597
#else
598
switch (smp_num_siblings) {
599
case 1:
600
*cpu_type = "i386/p4";
601
model = &op_p4_spec;
602
return 1;
603
604
case 2:
605
*cpu_type = "i386/p4-ht";
606
model = &op_p4_ht2_spec;
607
return 1;
608
}
609
#endif
610
611
printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
612
printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
613
return 0;
614
}
615
616
static int force_arch_perfmon;
617
static int force_cpu_type(const char *str, struct kernel_param *kp)
618
{
619
if (!strcmp(str, "arch_perfmon")) {
620
force_arch_perfmon = 1;
621
printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
622
}
623
624
return 0;
625
}
626
module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
627
628
static int __init ppro_init(char **cpu_type)
629
{
630
__u8 cpu_model = boot_cpu_data.x86_model;
631
struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
632
633
if (force_arch_perfmon && cpu_has_arch_perfmon)
634
return 0;
635
636
/*
637
* Documentation on identifying Intel processors by CPU family
638
* and model can be found in the Intel Software Developer's
639
* Manuals (SDM):
640
*
641
* http://www.intel.com/products/processor/manuals/
642
*
643
* As of May 2010 the documentation for this was in the:
644
* "Intel 64 and IA-32 Architectures Software Developer's
645
* Manual Volume 3B: System Programming Guide", "Table B-1
646
* CPUID Signature Values of DisplayFamily_DisplayModel".
647
*/
648
switch (cpu_model) {
649
case 0 ... 2:
650
*cpu_type = "i386/ppro";
651
break;
652
case 3 ... 5:
653
*cpu_type = "i386/pii";
654
break;
655
case 6 ... 8:
656
case 10 ... 11:
657
*cpu_type = "i386/piii";
658
break;
659
case 9:
660
case 13:
661
*cpu_type = "i386/p6_mobile";
662
break;
663
case 14:
664
*cpu_type = "i386/core";
665
break;
666
case 0x0f:
667
case 0x16:
668
case 0x17:
669
case 0x1d:
670
*cpu_type = "i386/core_2";
671
break;
672
case 0x1a:
673
case 0x1e:
674
case 0x2e:
675
spec = &op_arch_perfmon_spec;
676
*cpu_type = "i386/core_i7";
677
break;
678
case 0x1c:
679
*cpu_type = "i386/atom";
680
break;
681
default:
682
/* Unknown */
683
return 0;
684
}
685
686
model = spec;
687
return 1;
688
}
689
690
int __init op_nmi_init(struct oprofile_operations *ops)
691
{
692
__u8 vendor = boot_cpu_data.x86_vendor;
693
__u8 family = boot_cpu_data.x86;
694
char *cpu_type = NULL;
695
int ret = 0;
696
697
if (!cpu_has_apic)
698
return -ENODEV;
699
700
switch (vendor) {
701
case X86_VENDOR_AMD:
702
/* Needs to be at least an Athlon (or hammer in 32bit mode) */
703
704
switch (family) {
705
case 6:
706
cpu_type = "i386/athlon";
707
break;
708
case 0xf:
709
/*
710
* Actually it could be i386/hammer too, but
711
* give user space an consistent name.
712
*/
713
cpu_type = "x86-64/hammer";
714
break;
715
case 0x10:
716
cpu_type = "x86-64/family10";
717
break;
718
case 0x11:
719
cpu_type = "x86-64/family11h";
720
break;
721
case 0x12:
722
cpu_type = "x86-64/family12h";
723
break;
724
case 0x14:
725
cpu_type = "x86-64/family14h";
726
break;
727
case 0x15:
728
cpu_type = "x86-64/family15h";
729
break;
730
default:
731
return -ENODEV;
732
}
733
model = &op_amd_spec;
734
break;
735
736
case X86_VENDOR_INTEL:
737
switch (family) {
738
/* Pentium IV */
739
case 0xf:
740
p4_init(&cpu_type);
741
break;
742
743
/* A P6-class processor */
744
case 6:
745
ppro_init(&cpu_type);
746
break;
747
748
default:
749
break;
750
}
751
752
if (cpu_type)
753
break;
754
755
if (!cpu_has_arch_perfmon)
756
return -ENODEV;
757
758
/* use arch perfmon as fallback */
759
cpu_type = "i386/arch_perfmon";
760
model = &op_arch_perfmon_spec;
761
break;
762
763
default:
764
return -ENODEV;
765
}
766
767
/* default values, can be overwritten by model */
768
ops->create_files = nmi_create_files;
769
ops->setup = nmi_setup;
770
ops->shutdown = nmi_shutdown;
771
ops->start = nmi_start;
772
ops->stop = nmi_stop;
773
ops->cpu_type = cpu_type;
774
775
if (model->init)
776
ret = model->init(ops);
777
if (ret)
778
return ret;
779
780
if (!model->num_virt_counters)
781
model->num_virt_counters = model->num_counters;
782
783
mux_init(ops);
784
785
init_suspend_resume();
786
787
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
788
return 0;
789
}
790
791
void op_nmi_exit(void)
792
{
793
exit_suspend_resume();
794
}
795
796