Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/lib/sstep.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Single-step support.
4
*
5
* Copyright (C) 2004 Paul Mackerras <[email protected]>, IBM
6
*/
7
#include <linux/kernel.h>
8
#include <linux/kprobes.h>
9
#include <linux/ptrace.h>
10
#include <linux/prefetch.h>
11
#include <asm/sstep.h>
12
#include <asm/processor.h>
13
#include <linux/uaccess.h>
14
#include <asm/cpu_has_feature.h>
15
#include <asm/cputable.h>
16
#include <asm/disassemble.h>
17
18
#ifdef CONFIG_PPC64
19
/* Bits in SRR1 that are copied from MSR */
20
#define MSR_MASK 0xffffffff87c0ffffUL
21
#else
22
#define MSR_MASK 0x87c0ffff
23
#endif
24
25
/* Bits in XER */
26
#define XER_SO 0x80000000U
27
#define XER_OV 0x40000000U
28
#define XER_CA 0x20000000U
29
#define XER_OV32 0x00080000U
30
#define XER_CA32 0x00040000U
31
32
#ifdef CONFIG_VSX
33
#define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe))
34
#endif
35
36
#ifdef CONFIG_PPC_FPU
37
/*
38
* Functions in ldstfp.S
39
*/
40
extern void get_fpr(int rn, double *p);
41
extern void put_fpr(int rn, const double *p);
42
extern void get_vr(int rn, __vector128 *p);
43
extern void put_vr(int rn, __vector128 *p);
44
extern void load_vsrn(int vsr, const void *p);
45
extern void store_vsrn(int vsr, void *p);
46
extern void conv_sp_to_dp(const float *sp, double *dp);
47
extern void conv_dp_to_sp(const double *dp, float *sp);
48
#endif
49
50
#ifdef __powerpc64__
51
/*
52
* Functions in quad.S
53
*/
54
extern int do_lq(unsigned long ea, unsigned long *regs);
55
extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
56
extern int do_lqarx(unsigned long ea, unsigned long *regs);
57
extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
58
unsigned int *crp);
59
#endif
60
61
#ifdef __LITTLE_ENDIAN__
62
#define IS_LE 1
63
#define IS_BE 0
64
#else
65
#define IS_LE 0
66
#define IS_BE 1
67
#endif
68
69
/*
70
* Emulate the truncation of 64 bit values in 32-bit mode.
71
*/
72
static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
73
unsigned long val)
74
{
75
if ((msr & MSR_64BIT) == 0)
76
val &= 0xffffffffUL;
77
return val;
78
}
79
80
/*
81
* Determine whether a conditional branch instruction would branch.
82
*/
83
static nokprobe_inline int branch_taken(unsigned int instr,
84
const struct pt_regs *regs,
85
struct instruction_op *op)
86
{
87
unsigned int bo = (instr >> 21) & 0x1f;
88
unsigned int bi;
89
90
if ((bo & 4) == 0) {
91
/* decrement counter */
92
op->type |= DECCTR;
93
if (((bo >> 1) & 1) ^ (regs->ctr == 1))
94
return 0;
95
}
96
if ((bo & 0x10) == 0) {
97
/* check bit from CR */
98
bi = (instr >> 16) & 0x1f;
99
if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
100
return 0;
101
}
102
return 1;
103
}
104
105
static nokprobe_inline long address_ok(struct pt_regs *regs,
106
unsigned long ea, int nb)
107
{
108
if (!user_mode(regs))
109
return 1;
110
if (access_ok((void __user *)ea, nb))
111
return 1;
112
if (access_ok((void __user *)ea, 1))
113
/* Access overlaps the end of the user region */
114
regs->dar = TASK_SIZE_MAX - 1;
115
else
116
regs->dar = ea;
117
return 0;
118
}
119
120
/*
121
* Calculate effective address for a D-form instruction
122
*/
123
static nokprobe_inline unsigned long dform_ea(unsigned int instr,
124
const struct pt_regs *regs)
125
{
126
int ra;
127
unsigned long ea;
128
129
ra = (instr >> 16) & 0x1f;
130
ea = (signed short) instr; /* sign-extend */
131
if (ra)
132
ea += regs->gpr[ra];
133
134
return ea;
135
}
136
137
#ifdef __powerpc64__
138
/*
139
* Calculate effective address for a DS-form instruction
140
*/
141
static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
142
const struct pt_regs *regs)
143
{
144
int ra;
145
unsigned long ea;
146
147
ra = (instr >> 16) & 0x1f;
148
ea = (signed short) (instr & ~3); /* sign-extend */
149
if (ra)
150
ea += regs->gpr[ra];
151
152
return ea;
153
}
154
155
/*
156
* Calculate effective address for a DQ-form instruction
157
*/
158
static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
159
const struct pt_regs *regs)
160
{
161
int ra;
162
unsigned long ea;
163
164
ra = (instr >> 16) & 0x1f;
165
ea = (signed short) (instr & ~0xf); /* sign-extend */
166
if (ra)
167
ea += regs->gpr[ra];
168
169
return ea;
170
}
171
#endif /* __powerpc64 */
172
173
/*
174
* Calculate effective address for an X-form instruction
175
*/
176
static nokprobe_inline unsigned long xform_ea(unsigned int instr,
177
const struct pt_regs *regs)
178
{
179
int ra, rb;
180
unsigned long ea;
181
182
ra = (instr >> 16) & 0x1f;
183
rb = (instr >> 11) & 0x1f;
184
ea = regs->gpr[rb];
185
if (ra)
186
ea += regs->gpr[ra];
187
188
return ea;
189
}
190
191
/*
192
* Calculate effective address for a MLS:D-form / 8LS:D-form
193
* prefixed instruction
194
*/
195
static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
196
unsigned int suffix,
197
const struct pt_regs *regs)
198
{
199
int ra, prefix_r;
200
unsigned int dd;
201
unsigned long ea, d0, d1, d;
202
203
prefix_r = GET_PREFIX_R(instr);
204
ra = GET_PREFIX_RA(suffix);
205
206
d0 = instr & 0x3ffff;
207
d1 = suffix & 0xffff;
208
d = (d0 << 16) | d1;
209
210
/*
211
* sign extend a 34 bit number
212
*/
213
dd = (unsigned int)(d >> 2);
214
ea = (signed int)dd;
215
ea = (ea << 2) | (d & 0x3);
216
217
if (!prefix_r && ra)
218
ea += regs->gpr[ra];
219
else if (!prefix_r && !ra)
220
; /* Leave ea as is */
221
else if (prefix_r)
222
ea += regs->nip;
223
224
/*
225
* (prefix_r && ra) is an invalid form. Should already be
226
* checked for by caller!
227
*/
228
229
return ea;
230
}
231
232
/*
233
* Return the largest power of 2, not greater than sizeof(unsigned long),
234
* such that x is a multiple of it.
235
*/
236
static nokprobe_inline unsigned long max_align(unsigned long x)
237
{
238
x |= sizeof(unsigned long);
239
return x & -x; /* isolates rightmost bit */
240
}
241
242
static nokprobe_inline unsigned long byterev_2(unsigned long x)
243
{
244
return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
245
}
246
247
static nokprobe_inline unsigned long byterev_4(unsigned long x)
248
{
249
return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
250
((x & 0xff00) << 8) | ((x & 0xff) << 24);
251
}
252
253
#ifdef __powerpc64__
254
static nokprobe_inline unsigned long byterev_8(unsigned long x)
255
{
256
return (byterev_4(x) << 32) | byterev_4(x >> 32);
257
}
258
#endif
259
260
static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
261
{
262
switch (nb) {
263
case 2:
264
*(u16 *)ptr = byterev_2(*(u16 *)ptr);
265
break;
266
case 4:
267
*(u32 *)ptr = byterev_4(*(u32 *)ptr);
268
break;
269
#ifdef __powerpc64__
270
case 8:
271
*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
272
break;
273
case 16: {
274
unsigned long *up = (unsigned long *)ptr;
275
unsigned long tmp;
276
tmp = byterev_8(up[0]);
277
up[0] = byterev_8(up[1]);
278
up[1] = tmp;
279
break;
280
}
281
case 32: {
282
unsigned long *up = (unsigned long *)ptr;
283
unsigned long tmp;
284
285
tmp = byterev_8(up[0]);
286
up[0] = byterev_8(up[3]);
287
up[3] = tmp;
288
tmp = byterev_8(up[2]);
289
up[2] = byterev_8(up[1]);
290
up[1] = tmp;
291
break;
292
}
293
294
#endif
295
default:
296
WARN_ON_ONCE(1);
297
}
298
}
299
300
static __always_inline int
301
__read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
302
{
303
unsigned long x = 0;
304
305
switch (nb) {
306
case 1:
307
unsafe_get_user(x, (unsigned char __user *)ea, Efault);
308
break;
309
case 2:
310
unsafe_get_user(x, (unsigned short __user *)ea, Efault);
311
break;
312
case 4:
313
unsafe_get_user(x, (unsigned int __user *)ea, Efault);
314
break;
315
#ifdef __powerpc64__
316
case 8:
317
unsafe_get_user(x, (unsigned long __user *)ea, Efault);
318
break;
319
#endif
320
}
321
*dest = x;
322
return 0;
323
324
Efault:
325
regs->dar = ea;
326
return -EFAULT;
327
}
328
329
static nokprobe_inline int
330
read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
331
{
332
int err;
333
334
if (is_kernel_addr(ea))
335
return __read_mem_aligned(dest, ea, nb, regs);
336
337
if (user_read_access_begin((void __user *)ea, nb)) {
338
err = __read_mem_aligned(dest, ea, nb, regs);
339
user_read_access_end();
340
} else {
341
err = -EFAULT;
342
regs->dar = ea;
343
}
344
345
return err;
346
}
347
348
/*
349
* Copy from userspace to a buffer, using the largest possible
350
* aligned accesses, up to sizeof(long).
351
*/
352
static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
353
{
354
int c;
355
356
for (; nb > 0; nb -= c) {
357
c = max_align(ea);
358
if (c > nb)
359
c = max_align(nb);
360
switch (c) {
361
case 1:
362
unsafe_get_user(*dest, (u8 __user *)ea, Efault);
363
break;
364
case 2:
365
unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
366
break;
367
case 4:
368
unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
369
break;
370
#ifdef __powerpc64__
371
case 8:
372
unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
373
break;
374
#endif
375
}
376
dest += c;
377
ea += c;
378
}
379
return 0;
380
381
Efault:
382
regs->dar = ea;
383
return -EFAULT;
384
}
385
386
static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
387
{
388
int err;
389
390
if (is_kernel_addr(ea))
391
return __copy_mem_in(dest, ea, nb, regs);
392
393
if (user_read_access_begin((void __user *)ea, nb)) {
394
err = __copy_mem_in(dest, ea, nb, regs);
395
user_read_access_end();
396
} else {
397
err = -EFAULT;
398
regs->dar = ea;
399
}
400
401
return err;
402
}
403
404
static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
405
unsigned long ea, int nb,
406
struct pt_regs *regs)
407
{
408
union {
409
unsigned long ul;
410
u8 b[sizeof(unsigned long)];
411
} u;
412
int i;
413
int err;
414
415
u.ul = 0;
416
i = IS_BE ? sizeof(unsigned long) - nb : 0;
417
err = copy_mem_in(&u.b[i], ea, nb, regs);
418
if (!err)
419
*dest = u.ul;
420
return err;
421
}
422
423
/*
424
* Read memory at address ea for nb bytes, return 0 for success
425
* or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
426
* If nb < sizeof(long), the result is right-justified on BE systems.
427
*/
428
static int read_mem(unsigned long *dest, unsigned long ea, int nb,
429
struct pt_regs *regs)
430
{
431
if (!address_ok(regs, ea, nb))
432
return -EFAULT;
433
if ((ea & (nb - 1)) == 0)
434
return read_mem_aligned(dest, ea, nb, regs);
435
return read_mem_unaligned(dest, ea, nb, regs);
436
}
437
NOKPROBE_SYMBOL(read_mem);
438
439
static __always_inline int
440
__write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
441
{
442
switch (nb) {
443
case 1:
444
unsafe_put_user(val, (unsigned char __user *)ea, Efault);
445
break;
446
case 2:
447
unsafe_put_user(val, (unsigned short __user *)ea, Efault);
448
break;
449
case 4:
450
unsafe_put_user(val, (unsigned int __user *)ea, Efault);
451
break;
452
#ifdef __powerpc64__
453
case 8:
454
unsafe_put_user(val, (unsigned long __user *)ea, Efault);
455
break;
456
#endif
457
}
458
return 0;
459
460
Efault:
461
regs->dar = ea;
462
return -EFAULT;
463
}
464
465
static nokprobe_inline int
466
write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
467
{
468
int err;
469
470
if (is_kernel_addr(ea))
471
return __write_mem_aligned(val, ea, nb, regs);
472
473
if (user_write_access_begin((void __user *)ea, nb)) {
474
err = __write_mem_aligned(val, ea, nb, regs);
475
user_write_access_end();
476
} else {
477
err = -EFAULT;
478
regs->dar = ea;
479
}
480
481
return err;
482
}
483
484
/*
485
* Copy from a buffer to userspace, using the largest possible
486
* aligned accesses, up to sizeof(long).
487
*/
488
static __always_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
489
{
490
int c;
491
492
for (; nb > 0; nb -= c) {
493
c = max_align(ea);
494
if (c > nb)
495
c = max_align(nb);
496
switch (c) {
497
case 1:
498
unsafe_put_user(*dest, (u8 __user *)ea, Efault);
499
break;
500
case 2:
501
unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
502
break;
503
case 4:
504
unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
505
break;
506
#ifdef __powerpc64__
507
case 8:
508
unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
509
break;
510
#endif
511
}
512
dest += c;
513
ea += c;
514
}
515
return 0;
516
517
Efault:
518
regs->dar = ea;
519
return -EFAULT;
520
}
521
522
static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
523
{
524
int err;
525
526
if (is_kernel_addr(ea))
527
return __copy_mem_out(dest, ea, nb, regs);
528
529
if (user_write_access_begin((void __user *)ea, nb)) {
530
err = __copy_mem_out(dest, ea, nb, regs);
531
user_write_access_end();
532
} else {
533
err = -EFAULT;
534
regs->dar = ea;
535
}
536
537
return err;
538
}
539
540
static nokprobe_inline int write_mem_unaligned(unsigned long val,
541
unsigned long ea, int nb,
542
struct pt_regs *regs)
543
{
544
union {
545
unsigned long ul;
546
u8 b[sizeof(unsigned long)];
547
} u;
548
int i;
549
550
u.ul = val;
551
i = IS_BE ? sizeof(unsigned long) - nb : 0;
552
return copy_mem_out(&u.b[i], ea, nb, regs);
553
}
554
555
/*
556
* Write memory at address ea for nb bytes, return 0 for success
557
* or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
558
*/
559
static int write_mem(unsigned long val, unsigned long ea, int nb,
560
struct pt_regs *regs)
561
{
562
if (!address_ok(regs, ea, nb))
563
return -EFAULT;
564
if ((ea & (nb - 1)) == 0)
565
return write_mem_aligned(val, ea, nb, regs);
566
return write_mem_unaligned(val, ea, nb, regs);
567
}
568
NOKPROBE_SYMBOL(write_mem);
569
570
#ifdef CONFIG_PPC_FPU
571
/*
572
* These access either the real FP register or the image in the
573
* thread_struct, depending on regs->msr & MSR_FP.
574
*/
575
static int do_fp_load(struct instruction_op *op, unsigned long ea,
576
struct pt_regs *regs, bool cross_endian)
577
{
578
int err, rn, nb;
579
union {
580
int i;
581
unsigned int u;
582
float f;
583
double d[2];
584
unsigned long l[2];
585
u8 b[2 * sizeof(double)];
586
} u;
587
588
nb = GETSIZE(op->type);
589
if (nb > sizeof(u))
590
return -EINVAL;
591
if (!address_ok(regs, ea, nb))
592
return -EFAULT;
593
rn = op->reg;
594
err = copy_mem_in(u.b, ea, nb, regs);
595
if (err)
596
return err;
597
if (unlikely(cross_endian)) {
598
do_byte_reverse(u.b, min(nb, 8));
599
if (nb == 16)
600
do_byte_reverse(&u.b[8], 8);
601
}
602
preempt_disable();
603
if (nb == 4) {
604
if (op->type & FPCONV)
605
conv_sp_to_dp(&u.f, &u.d[0]);
606
else if (op->type & SIGNEXT)
607
u.l[0] = u.i;
608
else
609
u.l[0] = u.u;
610
}
611
if (regs->msr & MSR_FP)
612
put_fpr(rn, &u.d[0]);
613
else
614
current->thread.TS_FPR(rn) = u.l[0];
615
if (nb == 16) {
616
/* lfdp */
617
rn |= 1;
618
if (regs->msr & MSR_FP)
619
put_fpr(rn, &u.d[1]);
620
else
621
current->thread.TS_FPR(rn) = u.l[1];
622
}
623
preempt_enable();
624
return 0;
625
}
626
NOKPROBE_SYMBOL(do_fp_load);
627
628
static int do_fp_store(struct instruction_op *op, unsigned long ea,
629
struct pt_regs *regs, bool cross_endian)
630
{
631
int rn, nb;
632
union {
633
unsigned int u;
634
float f;
635
double d[2];
636
unsigned long l[2];
637
u8 b[2 * sizeof(double)];
638
} u;
639
640
nb = GETSIZE(op->type);
641
if (nb > sizeof(u))
642
return -EINVAL;
643
if (!address_ok(regs, ea, nb))
644
return -EFAULT;
645
rn = op->reg;
646
preempt_disable();
647
if (regs->msr & MSR_FP)
648
get_fpr(rn, &u.d[0]);
649
else
650
u.l[0] = current->thread.TS_FPR(rn);
651
if (nb == 4) {
652
if (op->type & FPCONV)
653
conv_dp_to_sp(&u.d[0], &u.f);
654
else
655
u.u = u.l[0];
656
}
657
if (nb == 16) {
658
rn |= 1;
659
if (regs->msr & MSR_FP)
660
get_fpr(rn, &u.d[1]);
661
else
662
u.l[1] = current->thread.TS_FPR(rn);
663
}
664
preempt_enable();
665
if (unlikely(cross_endian)) {
666
do_byte_reverse(u.b, min(nb, 8));
667
if (nb == 16)
668
do_byte_reverse(&u.b[8], 8);
669
}
670
return copy_mem_out(u.b, ea, nb, regs);
671
}
672
NOKPROBE_SYMBOL(do_fp_store);
673
#endif
674
675
#ifdef CONFIG_ALTIVEC
676
/* For Altivec/VMX, no need to worry about alignment */
677
static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
678
int size, struct pt_regs *regs,
679
bool cross_endian)
680
{
681
int err;
682
union {
683
__vector128 v;
684
u8 b[sizeof(__vector128)];
685
} u = {};
686
687
if (size > sizeof(u))
688
return -EINVAL;
689
690
if (!address_ok(regs, ea & ~0xfUL, 16))
691
return -EFAULT;
692
/* align to multiple of size */
693
ea &= ~(size - 1);
694
err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
695
if (err)
696
return err;
697
if (unlikely(cross_endian))
698
do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
699
preempt_disable();
700
if (regs->msr & MSR_VEC)
701
put_vr(rn, &u.v);
702
else
703
current->thread.vr_state.vr[rn] = u.v;
704
preempt_enable();
705
return 0;
706
}
707
708
static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
709
int size, struct pt_regs *regs,
710
bool cross_endian)
711
{
712
union {
713
__vector128 v;
714
u8 b[sizeof(__vector128)];
715
} u;
716
717
if (size > sizeof(u))
718
return -EINVAL;
719
720
if (!address_ok(regs, ea & ~0xfUL, 16))
721
return -EFAULT;
722
/* align to multiple of size */
723
ea &= ~(size - 1);
724
725
preempt_disable();
726
if (regs->msr & MSR_VEC)
727
get_vr(rn, &u.v);
728
else
729
u.v = current->thread.vr_state.vr[rn];
730
preempt_enable();
731
if (unlikely(cross_endian))
732
do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
733
return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
734
}
735
#endif /* CONFIG_ALTIVEC */
736
737
#ifdef __powerpc64__
738
static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
739
int reg, bool cross_endian)
740
{
741
int err;
742
743
if (!address_ok(regs, ea, 16))
744
return -EFAULT;
745
/* if aligned, should be atomic */
746
if ((ea & 0xf) == 0) {
747
err = do_lq(ea, &regs->gpr[reg]);
748
} else {
749
err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
750
if (!err)
751
err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
752
}
753
if (!err && unlikely(cross_endian))
754
do_byte_reverse(&regs->gpr[reg], 16);
755
return err;
756
}
757
758
static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
759
int reg, bool cross_endian)
760
{
761
int err;
762
unsigned long vals[2];
763
764
if (!address_ok(regs, ea, 16))
765
return -EFAULT;
766
vals[0] = regs->gpr[reg];
767
vals[1] = regs->gpr[reg + 1];
768
if (unlikely(cross_endian))
769
do_byte_reverse(vals, 16);
770
771
/* if aligned, should be atomic */
772
if ((ea & 0xf) == 0)
773
return do_stq(ea, vals[0], vals[1]);
774
775
err = write_mem(vals[IS_LE], ea, 8, regs);
776
if (!err)
777
err = write_mem(vals[IS_BE], ea + 8, 8, regs);
778
return err;
779
}
780
#endif /* __powerpc64 */
781
782
#ifdef CONFIG_VSX
783
static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
784
const void *mem, bool rev)
785
{
786
int size, read_size;
787
int i, j;
788
const unsigned int *wp;
789
const unsigned short *hp;
790
const unsigned char *bp;
791
792
size = GETSIZE(op->type);
793
reg->d[0] = reg->d[1] = 0;
794
795
switch (op->element_size) {
796
case 32:
797
/* [p]lxvp[x] */
798
case 16:
799
/* whole vector; lxv[x] or lxvl[l] */
800
if (size == 0)
801
break;
802
memcpy(reg, mem, size);
803
if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
804
rev = !rev;
805
if (rev)
806
do_byte_reverse(reg, size);
807
break;
808
case 8:
809
/* scalar loads, lxvd2x, lxvdsx */
810
read_size = (size >= 8) ? 8 : size;
811
i = IS_LE ? 8 : 8 - read_size;
812
memcpy(&reg->b[i], mem, read_size);
813
if (rev)
814
do_byte_reverse(&reg->b[i], 8);
815
if (size < 8) {
816
if (op->type & SIGNEXT) {
817
/* size == 4 is the only case here */
818
reg->d[IS_LE] = (signed int) reg->d[IS_LE];
819
} else if (op->vsx_flags & VSX_FPCONV) {
820
preempt_disable();
821
conv_sp_to_dp(&reg->fp[1 + IS_LE],
822
&reg->dp[IS_LE]);
823
preempt_enable();
824
}
825
} else {
826
if (size == 16) {
827
unsigned long v = *(unsigned long *)(mem + 8);
828
reg->d[IS_BE] = !rev ? v : byterev_8(v);
829
} else if (op->vsx_flags & VSX_SPLAT)
830
reg->d[IS_BE] = reg->d[IS_LE];
831
}
832
break;
833
case 4:
834
/* lxvw4x, lxvwsx */
835
wp = mem;
836
for (j = 0; j < size / 4; ++j) {
837
i = IS_LE ? 3 - j : j;
838
reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
839
}
840
if (op->vsx_flags & VSX_SPLAT) {
841
u32 val = reg->w[IS_LE ? 3 : 0];
842
for (; j < 4; ++j) {
843
i = IS_LE ? 3 - j : j;
844
reg->w[i] = val;
845
}
846
}
847
break;
848
case 2:
849
/* lxvh8x */
850
hp = mem;
851
for (j = 0; j < size / 2; ++j) {
852
i = IS_LE ? 7 - j : j;
853
reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
854
}
855
break;
856
case 1:
857
/* lxvb16x */
858
bp = mem;
859
for (j = 0; j < size; ++j) {
860
i = IS_LE ? 15 - j : j;
861
reg->b[i] = *bp++;
862
}
863
break;
864
}
865
}
866
867
static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
868
void *mem, bool rev)
869
{
870
int size, write_size;
871
int i, j;
872
union vsx_reg buf;
873
unsigned int *wp;
874
unsigned short *hp;
875
unsigned char *bp;
876
877
size = GETSIZE(op->type);
878
879
switch (op->element_size) {
880
case 32:
881
/* [p]stxvp[x] */
882
if (size == 0)
883
break;
884
if (rev) {
885
/* reverse 32 bytes */
886
union vsx_reg buf32[2];
887
buf32[0].d[0] = byterev_8(reg[1].d[1]);
888
buf32[0].d[1] = byterev_8(reg[1].d[0]);
889
buf32[1].d[0] = byterev_8(reg[0].d[1]);
890
buf32[1].d[1] = byterev_8(reg[0].d[0]);
891
memcpy(mem, buf32, size);
892
} else {
893
memcpy(mem, reg, size);
894
}
895
break;
896
case 16:
897
/* stxv, stxvx, stxvl, stxvll */
898
if (size == 0)
899
break;
900
if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
901
rev = !rev;
902
if (rev) {
903
/* reverse 16 bytes */
904
buf.d[0] = byterev_8(reg->d[1]);
905
buf.d[1] = byterev_8(reg->d[0]);
906
reg = &buf;
907
}
908
memcpy(mem, reg, size);
909
break;
910
case 8:
911
/* scalar stores, stxvd2x */
912
write_size = (size >= 8) ? 8 : size;
913
i = IS_LE ? 8 : 8 - write_size;
914
if (size < 8 && op->vsx_flags & VSX_FPCONV) {
915
buf.d[0] = buf.d[1] = 0;
916
preempt_disable();
917
conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
918
preempt_enable();
919
reg = &buf;
920
}
921
memcpy(mem, &reg->b[i], write_size);
922
if (size == 16)
923
memcpy(mem + 8, &reg->d[IS_BE], 8);
924
if (unlikely(rev)) {
925
do_byte_reverse(mem, write_size);
926
if (size == 16)
927
do_byte_reverse(mem + 8, 8);
928
}
929
break;
930
case 4:
931
/* stxvw4x */
932
wp = mem;
933
for (j = 0; j < size / 4; ++j) {
934
i = IS_LE ? 3 - j : j;
935
*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
936
}
937
break;
938
case 2:
939
/* stxvh8x */
940
hp = mem;
941
for (j = 0; j < size / 2; ++j) {
942
i = IS_LE ? 7 - j : j;
943
*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
944
}
945
break;
946
case 1:
947
/* stvxb16x */
948
bp = mem;
949
for (j = 0; j < size; ++j) {
950
i = IS_LE ? 15 - j : j;
951
*bp++ = reg->b[i];
952
}
953
break;
954
}
955
}
956
957
static nokprobe_inline int do_vsx_load(struct instruction_op *op,
958
unsigned long ea, struct pt_regs *regs,
959
bool cross_endian)
960
{
961
int reg = op->reg;
962
int i, j, nr_vsx_regs;
963
u8 mem[32];
964
union vsx_reg buf[2];
965
int size = GETSIZE(op->type);
966
967
if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
968
return -EFAULT;
969
970
nr_vsx_regs = max(1ul, size / sizeof(__vector128));
971
emulate_vsx_load(op, buf, mem, cross_endian);
972
preempt_disable();
973
if (reg < 32) {
974
/* FP regs + extensions */
975
if (regs->msr & MSR_FP) {
976
for (i = 0; i < nr_vsx_regs; i++) {
977
j = IS_LE ? nr_vsx_regs - i - 1 : i;
978
load_vsrn(reg + i, &buf[j].v);
979
}
980
} else {
981
for (i = 0; i < nr_vsx_regs; i++) {
982
j = IS_LE ? nr_vsx_regs - i - 1 : i;
983
current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
984
current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
985
}
986
}
987
} else {
988
if (regs->msr & MSR_VEC) {
989
for (i = 0; i < nr_vsx_regs; i++) {
990
j = IS_LE ? nr_vsx_regs - i - 1 : i;
991
load_vsrn(reg + i, &buf[j].v);
992
}
993
} else {
994
for (i = 0; i < nr_vsx_regs; i++) {
995
j = IS_LE ? nr_vsx_regs - i - 1 : i;
996
current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
997
}
998
}
999
}
1000
preempt_enable();
1001
return 0;
1002
}
1003
1004
static nokprobe_inline int do_vsx_store(struct instruction_op *op,
1005
unsigned long ea, struct pt_regs *regs,
1006
bool cross_endian)
1007
{
1008
int reg = op->reg;
1009
int i, j, nr_vsx_regs;
1010
u8 mem[32];
1011
union vsx_reg buf[2];
1012
int size = GETSIZE(op->type);
1013
1014
if (!address_ok(regs, ea, size))
1015
return -EFAULT;
1016
1017
nr_vsx_regs = max(1ul, size / sizeof(__vector128));
1018
preempt_disable();
1019
if (reg < 32) {
1020
/* FP regs + extensions */
1021
if (regs->msr & MSR_FP) {
1022
for (i = 0; i < nr_vsx_regs; i++) {
1023
j = IS_LE ? nr_vsx_regs - i - 1 : i;
1024
store_vsrn(reg + i, &buf[j].v);
1025
}
1026
} else {
1027
for (i = 0; i < nr_vsx_regs; i++) {
1028
j = IS_LE ? nr_vsx_regs - i - 1 : i;
1029
buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
1030
buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
1031
}
1032
}
1033
} else {
1034
if (regs->msr & MSR_VEC) {
1035
for (i = 0; i < nr_vsx_regs; i++) {
1036
j = IS_LE ? nr_vsx_regs - i - 1 : i;
1037
store_vsrn(reg + i, &buf[j].v);
1038
}
1039
} else {
1040
for (i = 0; i < nr_vsx_regs; i++) {
1041
j = IS_LE ? nr_vsx_regs - i - 1 : i;
1042
buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
1043
}
1044
}
1045
}
1046
preempt_enable();
1047
emulate_vsx_store(op, buf, mem, cross_endian);
1048
return copy_mem_out(mem, ea, size, regs);
1049
}
1050
#endif /* CONFIG_VSX */
1051
1052
static __always_inline int __emulate_dcbz(unsigned long ea)
1053
{
1054
unsigned long i;
1055
unsigned long size = l1_dcache_bytes();
1056
1057
for (i = 0; i < size; i += sizeof(long))
1058
unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
1059
1060
return 0;
1061
1062
Efault:
1063
return -EFAULT;
1064
}
1065
1066
int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
1067
{
1068
int err;
1069
unsigned long size = l1_dcache_bytes();
1070
1071
ea = truncate_if_32bit(regs->msr, ea);
1072
ea &= ~(size - 1);
1073
if (!address_ok(regs, ea, size))
1074
return -EFAULT;
1075
1076
if (is_kernel_addr(ea)) {
1077
err = __emulate_dcbz(ea);
1078
} else if (user_write_access_begin((void __user *)ea, size)) {
1079
err = __emulate_dcbz(ea);
1080
user_write_access_end();
1081
} else {
1082
err = -EFAULT;
1083
}
1084
1085
if (err)
1086
regs->dar = ea;
1087
1088
1089
return err;
1090
}
1091
NOKPROBE_SYMBOL(emulate_dcbz);
1092
1093
#define __put_user_asmx(x, addr, err, op, cr) \
1094
__asm__ __volatile__( \
1095
".machine push\n" \
1096
".machine power8\n" \
1097
"1: " op " %2,0,%3\n" \
1098
".machine pop\n" \
1099
" mfcr %1\n" \
1100
"2:\n" \
1101
".section .fixup,\"ax\"\n" \
1102
"3: li %0,%4\n" \
1103
" b 2b\n" \
1104
".previous\n" \
1105
EX_TABLE(1b, 3b) \
1106
: "=r" (err), "=r" (cr) \
1107
: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1108
1109
#define __get_user_asmx(x, addr, err, op) \
1110
__asm__ __volatile__( \
1111
".machine push\n" \
1112
".machine power8\n" \
1113
"1: "op" %1,0,%2\n" \
1114
".machine pop\n" \
1115
"2:\n" \
1116
".section .fixup,\"ax\"\n" \
1117
"3: li %0,%3\n" \
1118
" b 2b\n" \
1119
".previous\n" \
1120
EX_TABLE(1b, 3b) \
1121
: "=r" (err), "=r" (x) \
1122
: "r" (addr), "i" (-EFAULT), "0" (err))
1123
1124
#define __cacheop_user_asmx(addr, err, op) \
1125
__asm__ __volatile__( \
1126
"1: "op" 0,%1\n" \
1127
"2:\n" \
1128
".section .fixup,\"ax\"\n" \
1129
"3: li %0,%3\n" \
1130
" b 2b\n" \
1131
".previous\n" \
1132
EX_TABLE(1b, 3b) \
1133
: "=r" (err) \
1134
: "r" (addr), "i" (-EFAULT), "0" (err))
1135
1136
static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1137
struct instruction_op *op)
1138
{
1139
long val = op->val;
1140
1141
op->type |= SETCC;
1142
op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1143
if (!(regs->msr & MSR_64BIT))
1144
val = (int) val;
1145
if (val < 0)
1146
op->ccval |= 0x80000000;
1147
else if (val > 0)
1148
op->ccval |= 0x40000000;
1149
else
1150
op->ccval |= 0x20000000;
1151
}
1152
1153
static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1154
{
1155
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1156
if (val)
1157
op->xerval |= XER_CA32;
1158
else
1159
op->xerval &= ~XER_CA32;
1160
}
1161
}
1162
1163
static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1164
struct instruction_op *op, int rd,
1165
unsigned long val1, unsigned long val2,
1166
unsigned long carry_in)
1167
{
1168
unsigned long val = val1 + val2;
1169
1170
if (carry_in)
1171
++val;
1172
op->type = COMPUTE | SETREG | SETXER;
1173
op->reg = rd;
1174
op->val = val;
1175
val = truncate_if_32bit(regs->msr, val);
1176
val1 = truncate_if_32bit(regs->msr, val1);
1177
op->xerval = regs->xer;
1178
if (val < val1 || (carry_in && val == val1))
1179
op->xerval |= XER_CA;
1180
else
1181
op->xerval &= ~XER_CA;
1182
1183
set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1184
(carry_in && (unsigned int)val == (unsigned int)val1));
1185
}
1186
1187
static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1188
struct instruction_op *op,
1189
long v1, long v2, int crfld)
1190
{
1191
unsigned int crval, shift;
1192
1193
op->type = COMPUTE | SETCC;
1194
crval = (regs->xer >> 31) & 1; /* get SO bit */
1195
if (v1 < v2)
1196
crval |= 8;
1197
else if (v1 > v2)
1198
crval |= 4;
1199
else
1200
crval |= 2;
1201
shift = (7 - crfld) * 4;
1202
op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1203
}
1204
1205
static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1206
struct instruction_op *op,
1207
unsigned long v1,
1208
unsigned long v2, int crfld)
1209
{
1210
unsigned int crval, shift;
1211
1212
op->type = COMPUTE | SETCC;
1213
crval = (regs->xer >> 31) & 1; /* get SO bit */
1214
if (v1 < v2)
1215
crval |= 8;
1216
else if (v1 > v2)
1217
crval |= 4;
1218
else
1219
crval |= 2;
1220
shift = (7 - crfld) * 4;
1221
op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1222
}
1223
1224
static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1225
struct instruction_op *op,
1226
unsigned long v1, unsigned long v2)
1227
{
1228
unsigned long long out_val, mask;
1229
int i;
1230
1231
out_val = 0;
1232
for (i = 0; i < 8; i++) {
1233
mask = 0xffUL << (i * 8);
1234
if ((v1 & mask) == (v2 & mask))
1235
out_val |= mask;
1236
}
1237
op->val = out_val;
1238
}
1239
1240
/*
1241
* The size parameter is used to adjust the equivalent popcnt instruction.
1242
* popcntb = 8, popcntw = 32, popcntd = 64
1243
*/
1244
static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1245
struct instruction_op *op,
1246
unsigned long v1, int size)
1247
{
1248
unsigned long long out = v1;
1249
1250
out -= (out >> 1) & 0x5555555555555555ULL;
1251
out = (0x3333333333333333ULL & out) +
1252
(0x3333333333333333ULL & (out >> 2));
1253
out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1254
1255
if (size == 8) { /* popcntb */
1256
op->val = out;
1257
return;
1258
}
1259
out += out >> 8;
1260
out += out >> 16;
1261
if (size == 32) { /* popcntw */
1262
op->val = out & 0x0000003f0000003fULL;
1263
return;
1264
}
1265
1266
out = (out + (out >> 32)) & 0x7f;
1267
op->val = out; /* popcntd */
1268
}
1269
1270
#ifdef CONFIG_PPC64
1271
static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1272
struct instruction_op *op,
1273
unsigned long v1, unsigned long v2)
1274
{
1275
unsigned char perm, idx;
1276
unsigned int i;
1277
1278
perm = 0;
1279
for (i = 0; i < 8; i++) {
1280
idx = (v1 >> (i * 8)) & 0xff;
1281
if (idx < 64)
1282
if (v2 & PPC_BIT(idx))
1283
perm |= 1 << i;
1284
}
1285
op->val = perm;
1286
}
1287
#endif /* CONFIG_PPC64 */
1288
/*
1289
* The size parameter adjusts the equivalent prty instruction.
1290
* prtyw = 32, prtyd = 64
1291
*/
1292
static nokprobe_inline void do_prty(const struct pt_regs *regs,
1293
struct instruction_op *op,
1294
unsigned long v, int size)
1295
{
1296
unsigned long long res = v ^ (v >> 8);
1297
1298
res ^= res >> 16;
1299
if (size == 32) { /* prtyw */
1300
op->val = res & 0x0000000100000001ULL;
1301
return;
1302
}
1303
1304
res ^= res >> 32;
1305
op->val = res & 1; /*prtyd */
1306
}
1307
1308
static nokprobe_inline int trap_compare(long v1, long v2)
1309
{
1310
int ret = 0;
1311
1312
if (v1 < v2)
1313
ret |= 0x10;
1314
else if (v1 > v2)
1315
ret |= 0x08;
1316
else
1317
ret |= 0x04;
1318
if ((unsigned long)v1 < (unsigned long)v2)
1319
ret |= 0x02;
1320
else if ((unsigned long)v1 > (unsigned long)v2)
1321
ret |= 0x01;
1322
return ret;
1323
}
1324
1325
/*
1326
* Elements of 32-bit rotate and mask instructions.
1327
*/
1328
#define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
1329
((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1330
#ifdef __powerpc64__
1331
#define MASK64_L(mb) (~0UL >> (mb))
1332
#define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
1333
#define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1334
#define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1335
#else
1336
#define DATA32(x) (x)
1337
#endif
1338
#define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1339
1340
/*
1341
* Decode an instruction, and return information about it in *op
1342
* without changing *regs.
1343
* Integer arithmetic and logical instructions, branches, and barrier
1344
* instructions can be emulated just using the information in *op.
1345
*
1346
* Return value is 1 if the instruction can be emulated just by
1347
* updating *regs with the information in *op, -1 if we need the
1348
* GPRs but *regs doesn't contain the full register set, or 0
1349
* otherwise.
1350
*/
1351
int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1352
ppc_inst_t instr)
1353
{
1354
#ifdef CONFIG_PPC64
1355
unsigned int suffixopcode, prefixtype, prefix_r;
1356
#endif
1357
unsigned int opcode, ra, rb, rc, rd, spr, u;
1358
unsigned long int imm;
1359
unsigned long int val, val2;
1360
unsigned int mb, me, sh;
1361
unsigned int word, suffix;
1362
long ival;
1363
1364
word = ppc_inst_val(instr);
1365
suffix = ppc_inst_suffix(instr);
1366
1367
op->type = COMPUTE;
1368
1369
opcode = ppc_inst_primary_opcode(instr);
1370
switch (opcode) {
1371
case 16: /* bc */
1372
op->type = BRANCH;
1373
imm = (signed short)(word & 0xfffc);
1374
if ((word & 2) == 0)
1375
imm += regs->nip;
1376
op->val = truncate_if_32bit(regs->msr, imm);
1377
if (word & 1)
1378
op->type |= SETLK;
1379
if (branch_taken(word, regs, op))
1380
op->type |= BRTAKEN;
1381
return 1;
1382
case 17: /* sc */
1383
if ((word & 0xfe2) == 2)
1384
op->type = SYSCALL;
1385
else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1386
(word & 0xfe3) == 1) { /* scv */
1387
op->type = SYSCALL_VECTORED_0;
1388
if (!cpu_has_feature(CPU_FTR_ARCH_300))
1389
goto unknown_opcode;
1390
} else
1391
op->type = UNKNOWN;
1392
return 0;
1393
case 18: /* b */
1394
op->type = BRANCH | BRTAKEN;
1395
imm = word & 0x03fffffc;
1396
if (imm & 0x02000000)
1397
imm -= 0x04000000;
1398
if ((word & 2) == 0)
1399
imm += regs->nip;
1400
op->val = truncate_if_32bit(regs->msr, imm);
1401
if (word & 1)
1402
op->type |= SETLK;
1403
return 1;
1404
case 19:
1405
switch ((word >> 1) & 0x3ff) {
1406
case 0: /* mcrf */
1407
op->type = COMPUTE + SETCC;
1408
rd = 7 - ((word >> 23) & 0x7);
1409
ra = 7 - ((word >> 18) & 0x7);
1410
rd *= 4;
1411
ra *= 4;
1412
val = (regs->ccr >> ra) & 0xf;
1413
op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1414
return 1;
1415
1416
case 16: /* bclr */
1417
case 528: /* bcctr */
1418
op->type = BRANCH;
1419
imm = (word & 0x400)? regs->ctr: regs->link;
1420
op->val = truncate_if_32bit(regs->msr, imm);
1421
if (word & 1)
1422
op->type |= SETLK;
1423
if (branch_taken(word, regs, op))
1424
op->type |= BRTAKEN;
1425
return 1;
1426
1427
case 18: /* rfid, scary */
1428
if (user_mode(regs))
1429
goto priv;
1430
op->type = RFI;
1431
return 0;
1432
1433
case 150: /* isync */
1434
op->type = BARRIER | BARRIER_ISYNC;
1435
return 1;
1436
1437
case 33: /* crnor */
1438
case 129: /* crandc */
1439
case 193: /* crxor */
1440
case 225: /* crnand */
1441
case 257: /* crand */
1442
case 289: /* creqv */
1443
case 417: /* crorc */
1444
case 449: /* cror */
1445
op->type = COMPUTE + SETCC;
1446
ra = (word >> 16) & 0x1f;
1447
rb = (word >> 11) & 0x1f;
1448
rd = (word >> 21) & 0x1f;
1449
ra = (regs->ccr >> (31 - ra)) & 1;
1450
rb = (regs->ccr >> (31 - rb)) & 1;
1451
val = (word >> (6 + ra * 2 + rb)) & 1;
1452
op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1453
(val << (31 - rd));
1454
return 1;
1455
}
1456
break;
1457
case 31:
1458
switch ((word >> 1) & 0x3ff) {
1459
case 598: /* sync */
1460
op->type = BARRIER + BARRIER_SYNC;
1461
#ifdef __powerpc64__
1462
switch ((word >> 21) & 3) {
1463
case 1: /* lwsync */
1464
op->type = BARRIER + BARRIER_LWSYNC;
1465
break;
1466
case 2: /* ptesync */
1467
op->type = BARRIER + BARRIER_PTESYNC;
1468
break;
1469
}
1470
#endif
1471
return 1;
1472
1473
case 854: /* eieio */
1474
op->type = BARRIER + BARRIER_EIEIO;
1475
return 1;
1476
}
1477
break;
1478
}
1479
1480
rd = (word >> 21) & 0x1f;
1481
ra = (word >> 16) & 0x1f;
1482
rb = (word >> 11) & 0x1f;
1483
rc = (word >> 6) & 0x1f;
1484
1485
switch (opcode) {
1486
#ifdef __powerpc64__
1487
case 1:
1488
if (!cpu_has_feature(CPU_FTR_ARCH_31))
1489
goto unknown_opcode;
1490
1491
prefix_r = GET_PREFIX_R(word);
1492
ra = GET_PREFIX_RA(suffix);
1493
rd = (suffix >> 21) & 0x1f;
1494
op->reg = rd;
1495
op->val = regs->gpr[rd];
1496
suffixopcode = get_op(suffix);
1497
prefixtype = (word >> 24) & 0x3;
1498
switch (prefixtype) {
1499
case 2:
1500
if (prefix_r && ra)
1501
return 0;
1502
switch (suffixopcode) {
1503
case 14: /* paddi */
1504
op->type = COMPUTE | PREFIXED;
1505
op->val = mlsd_8lsd_ea(word, suffix, regs);
1506
goto compute_done;
1507
}
1508
}
1509
break;
1510
case 2: /* tdi */
1511
if (rd & trap_compare(regs->gpr[ra], (short) word))
1512
goto trap;
1513
return 1;
1514
#endif
1515
case 3: /* twi */
1516
if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1517
goto trap;
1518
return 1;
1519
1520
#ifdef __powerpc64__
1521
case 4:
1522
/*
1523
* There are very many instructions with this primary opcode
1524
* introduced in the ISA as early as v2.03. However, the ones
1525
* we currently emulate were all introduced with ISA 3.0
1526
*/
1527
if (!cpu_has_feature(CPU_FTR_ARCH_300))
1528
goto unknown_opcode;
1529
1530
switch (word & 0x3f) {
1531
case 48: /* maddhd */
1532
asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1533
"=r" (op->val) : "r" (regs->gpr[ra]),
1534
"r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1535
goto compute_done;
1536
1537
case 49: /* maddhdu */
1538
asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1539
"=r" (op->val) : "r" (regs->gpr[ra]),
1540
"r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1541
goto compute_done;
1542
1543
case 51: /* maddld */
1544
asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1545
"=r" (op->val) : "r" (regs->gpr[ra]),
1546
"r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1547
goto compute_done;
1548
}
1549
1550
/*
1551
* There are other instructions from ISA 3.0 with the same
1552
* primary opcode which do not have emulation support yet.
1553
*/
1554
goto unknown_opcode;
1555
#endif
1556
1557
case 7: /* mulli */
1558
op->val = regs->gpr[ra] * (short) word;
1559
goto compute_done;
1560
1561
case 8: /* subfic */
1562
imm = (short) word;
1563
add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1564
return 1;
1565
1566
case 10: /* cmpli */
1567
imm = (unsigned short) word;
1568
val = regs->gpr[ra];
1569
#ifdef __powerpc64__
1570
if ((rd & 1) == 0)
1571
val = (unsigned int) val;
1572
#endif
1573
do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1574
return 1;
1575
1576
case 11: /* cmpi */
1577
imm = (short) word;
1578
val = regs->gpr[ra];
1579
#ifdef __powerpc64__
1580
if ((rd & 1) == 0)
1581
val = (int) val;
1582
#endif
1583
do_cmp_signed(regs, op, val, imm, rd >> 2);
1584
return 1;
1585
1586
case 12: /* addic */
1587
imm = (short) word;
1588
add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1589
return 1;
1590
1591
case 13: /* addic. */
1592
imm = (short) word;
1593
add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1594
set_cr0(regs, op);
1595
return 1;
1596
1597
case 14: /* addi */
1598
imm = (short) word;
1599
if (ra)
1600
imm += regs->gpr[ra];
1601
op->val = imm;
1602
goto compute_done;
1603
1604
case 15: /* addis */
1605
imm = ((short) word) << 16;
1606
if (ra)
1607
imm += regs->gpr[ra];
1608
op->val = imm;
1609
goto compute_done;
1610
1611
case 19:
1612
if (((word >> 1) & 0x1f) == 2) {
1613
/* addpcis */
1614
if (!cpu_has_feature(CPU_FTR_ARCH_300))
1615
goto unknown_opcode;
1616
imm = (short) (word & 0xffc1); /* d0 + d2 fields */
1617
imm |= (word >> 15) & 0x3e; /* d1 field */
1618
op->val = regs->nip + (imm << 16) + 4;
1619
goto compute_done;
1620
}
1621
op->type = UNKNOWN;
1622
return 0;
1623
1624
case 20: /* rlwimi */
1625
mb = (word >> 6) & 0x1f;
1626
me = (word >> 1) & 0x1f;
1627
val = DATA32(regs->gpr[rd]);
1628
imm = MASK32(mb, me);
1629
op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1630
goto logical_done;
1631
1632
case 21: /* rlwinm */
1633
mb = (word >> 6) & 0x1f;
1634
me = (word >> 1) & 0x1f;
1635
val = DATA32(regs->gpr[rd]);
1636
op->val = ROTATE(val, rb) & MASK32(mb, me);
1637
goto logical_done;
1638
1639
case 23: /* rlwnm */
1640
mb = (word >> 6) & 0x1f;
1641
me = (word >> 1) & 0x1f;
1642
rb = regs->gpr[rb] & 0x1f;
1643
val = DATA32(regs->gpr[rd]);
1644
op->val = ROTATE(val, rb) & MASK32(mb, me);
1645
goto logical_done;
1646
1647
case 24: /* ori */
1648
op->val = regs->gpr[rd] | (unsigned short) word;
1649
goto logical_done_nocc;
1650
1651
case 25: /* oris */
1652
imm = (unsigned short) word;
1653
op->val = regs->gpr[rd] | (imm << 16);
1654
goto logical_done_nocc;
1655
1656
case 26: /* xori */
1657
op->val = regs->gpr[rd] ^ (unsigned short) word;
1658
goto logical_done_nocc;
1659
1660
case 27: /* xoris */
1661
imm = (unsigned short) word;
1662
op->val = regs->gpr[rd] ^ (imm << 16);
1663
goto logical_done_nocc;
1664
1665
case 28: /* andi. */
1666
op->val = regs->gpr[rd] & (unsigned short) word;
1667
set_cr0(regs, op);
1668
goto logical_done_nocc;
1669
1670
case 29: /* andis. */
1671
imm = (unsigned short) word;
1672
op->val = regs->gpr[rd] & (imm << 16);
1673
set_cr0(regs, op);
1674
goto logical_done_nocc;
1675
1676
#ifdef __powerpc64__
1677
case 30: /* rld* */
1678
mb = ((word >> 6) & 0x1f) | (word & 0x20);
1679
val = regs->gpr[rd];
1680
if ((word & 0x10) == 0) {
1681
sh = rb | ((word & 2) << 4);
1682
val = ROTATE(val, sh);
1683
switch ((word >> 2) & 3) {
1684
case 0: /* rldicl */
1685
val &= MASK64_L(mb);
1686
break;
1687
case 1: /* rldicr */
1688
val &= MASK64_R(mb);
1689
break;
1690
case 2: /* rldic */
1691
val &= MASK64(mb, 63 - sh);
1692
break;
1693
case 3: /* rldimi */
1694
imm = MASK64(mb, 63 - sh);
1695
val = (regs->gpr[ra] & ~imm) |
1696
(val & imm);
1697
}
1698
op->val = val;
1699
goto logical_done;
1700
} else {
1701
sh = regs->gpr[rb] & 0x3f;
1702
val = ROTATE(val, sh);
1703
switch ((word >> 1) & 7) {
1704
case 0: /* rldcl */
1705
op->val = val & MASK64_L(mb);
1706
goto logical_done;
1707
case 1: /* rldcr */
1708
op->val = val & MASK64_R(mb);
1709
goto logical_done;
1710
}
1711
}
1712
#endif
1713
op->type = UNKNOWN; /* illegal instruction */
1714
return 0;
1715
1716
case 31:
1717
/* isel occupies 32 minor opcodes */
1718
if (((word >> 1) & 0x1f) == 15) {
1719
mb = (word >> 6) & 0x1f; /* bc field */
1720
val = (regs->ccr >> (31 - mb)) & 1;
1721
val2 = (ra) ? regs->gpr[ra] : 0;
1722
1723
op->val = (val) ? val2 : regs->gpr[rb];
1724
goto compute_done;
1725
}
1726
1727
switch ((word >> 1) & 0x3ff) {
1728
case 4: /* tw */
1729
if (rd == 0x1f ||
1730
(rd & trap_compare((int)regs->gpr[ra],
1731
(int)regs->gpr[rb])))
1732
goto trap;
1733
return 1;
1734
#ifdef __powerpc64__
1735
case 68: /* td */
1736
if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1737
goto trap;
1738
return 1;
1739
#endif
1740
case 83: /* mfmsr */
1741
if (user_mode(regs))
1742
goto priv;
1743
op->type = MFMSR;
1744
op->reg = rd;
1745
return 0;
1746
case 146: /* mtmsr */
1747
if (user_mode(regs))
1748
goto priv;
1749
op->type = MTMSR;
1750
op->reg = rd;
1751
op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1752
return 0;
1753
#ifdef CONFIG_PPC64
1754
case 178: /* mtmsrd */
1755
if (user_mode(regs))
1756
goto priv;
1757
op->type = MTMSR;
1758
op->reg = rd;
1759
/* only MSR_EE and MSR_RI get changed if bit 15 set */
1760
/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1761
imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1762
op->val = imm;
1763
return 0;
1764
#endif
1765
1766
case 19: /* mfcr */
1767
imm = 0xffffffffUL;
1768
if ((word >> 20) & 1) {
1769
imm = 0xf0000000UL;
1770
for (sh = 0; sh < 8; ++sh) {
1771
if (word & (0x80000 >> sh))
1772
break;
1773
imm >>= 4;
1774
}
1775
}
1776
op->val = regs->ccr & imm;
1777
goto compute_done;
1778
1779
case 128: /* setb */
1780
if (!cpu_has_feature(CPU_FTR_ARCH_300))
1781
goto unknown_opcode;
1782
/*
1783
* 'ra' encodes the CR field number (bfa) in the top 3 bits.
1784
* Since each CR field is 4 bits,
1785
* we can simply mask off the bottom two bits (bfa * 4)
1786
* to yield the first bit in the CR field.
1787
*/
1788
ra = ra & ~0x3;
1789
/* 'val' stores bits of the CR field (bfa) */
1790
val = regs->ccr >> (CR0_SHIFT - ra);
1791
/* checks if the LT bit of CR field (bfa) is set */
1792
if (val & 8)
1793
op->val = -1;
1794
/* checks if the GT bit of CR field (bfa) is set */
1795
else if (val & 4)
1796
op->val = 1;
1797
else
1798
op->val = 0;
1799
goto compute_done;
1800
1801
case 144: /* mtcrf */
1802
op->type = COMPUTE + SETCC;
1803
imm = 0xf0000000UL;
1804
val = regs->gpr[rd];
1805
op->ccval = regs->ccr;
1806
for (sh = 0; sh < 8; ++sh) {
1807
if (word & (0x80000 >> sh))
1808
op->ccval = (op->ccval & ~imm) |
1809
(val & imm);
1810
imm >>= 4;
1811
}
1812
return 1;
1813
1814
case 339: /* mfspr */
1815
spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1816
op->type = MFSPR;
1817
op->reg = rd;
1818
op->spr = spr;
1819
if (spr == SPRN_XER || spr == SPRN_LR ||
1820
spr == SPRN_CTR)
1821
return 1;
1822
return 0;
1823
1824
case 467: /* mtspr */
1825
spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1826
op->type = MTSPR;
1827
op->val = regs->gpr[rd];
1828
op->spr = spr;
1829
if (spr == SPRN_XER || spr == SPRN_LR ||
1830
spr == SPRN_CTR)
1831
return 1;
1832
return 0;
1833
1834
/*
1835
* Compare instructions
1836
*/
1837
case 0: /* cmp */
1838
val = regs->gpr[ra];
1839
val2 = regs->gpr[rb];
1840
#ifdef __powerpc64__
1841
if ((rd & 1) == 0) {
1842
/* word (32-bit) compare */
1843
val = (int) val;
1844
val2 = (int) val2;
1845
}
1846
#endif
1847
do_cmp_signed(regs, op, val, val2, rd >> 2);
1848
return 1;
1849
1850
case 32: /* cmpl */
1851
val = regs->gpr[ra];
1852
val2 = regs->gpr[rb];
1853
#ifdef __powerpc64__
1854
if ((rd & 1) == 0) {
1855
/* word (32-bit) compare */
1856
val = (unsigned int) val;
1857
val2 = (unsigned int) val2;
1858
}
1859
#endif
1860
do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1861
return 1;
1862
1863
case 508: /* cmpb */
1864
do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1865
goto logical_done_nocc;
1866
1867
/*
1868
* Arithmetic instructions
1869
*/
1870
case 8: /* subfc */
1871
add_with_carry(regs, op, rd, ~regs->gpr[ra],
1872
regs->gpr[rb], 1);
1873
goto arith_done;
1874
#ifdef __powerpc64__
1875
case 9: /* mulhdu */
1876
asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1877
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1878
goto arith_done;
1879
#endif
1880
case 10: /* addc */
1881
add_with_carry(regs, op, rd, regs->gpr[ra],
1882
regs->gpr[rb], 0);
1883
goto arith_done;
1884
1885
case 11: /* mulhwu */
1886
asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1887
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1888
goto arith_done;
1889
1890
case 40: /* subf */
1891
op->val = regs->gpr[rb] - regs->gpr[ra];
1892
goto arith_done;
1893
#ifdef __powerpc64__
1894
case 73: /* mulhd */
1895
asm("mulhd %0,%1,%2" : "=r" (op->val) :
1896
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1897
goto arith_done;
1898
#endif
1899
case 75: /* mulhw */
1900
asm("mulhw %0,%1,%2" : "=r" (op->val) :
1901
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1902
goto arith_done;
1903
1904
case 104: /* neg */
1905
op->val = -regs->gpr[ra];
1906
goto arith_done;
1907
1908
case 136: /* subfe */
1909
add_with_carry(regs, op, rd, ~regs->gpr[ra],
1910
regs->gpr[rb], regs->xer & XER_CA);
1911
goto arith_done;
1912
1913
case 138: /* adde */
1914
add_with_carry(regs, op, rd, regs->gpr[ra],
1915
regs->gpr[rb], regs->xer & XER_CA);
1916
goto arith_done;
1917
1918
case 200: /* subfze */
1919
add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1920
regs->xer & XER_CA);
1921
goto arith_done;
1922
1923
case 202: /* addze */
1924
add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1925
regs->xer & XER_CA);
1926
goto arith_done;
1927
1928
case 232: /* subfme */
1929
add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1930
regs->xer & XER_CA);
1931
goto arith_done;
1932
#ifdef __powerpc64__
1933
case 233: /* mulld */
1934
op->val = regs->gpr[ra] * regs->gpr[rb];
1935
goto arith_done;
1936
#endif
1937
case 234: /* addme */
1938
add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1939
regs->xer & XER_CA);
1940
goto arith_done;
1941
1942
case 235: /* mullw */
1943
op->val = (long)(int) regs->gpr[ra] *
1944
(int) regs->gpr[rb];
1945
1946
goto arith_done;
1947
#ifdef __powerpc64__
1948
case 265: /* modud */
1949
if (!cpu_has_feature(CPU_FTR_ARCH_300))
1950
goto unknown_opcode;
1951
op->val = regs->gpr[ra] % regs->gpr[rb];
1952
goto compute_done;
1953
#endif
1954
case 266: /* add */
1955
op->val = regs->gpr[ra] + regs->gpr[rb];
1956
goto arith_done;
1957
1958
case 267: /* moduw */
1959
if (!cpu_has_feature(CPU_FTR_ARCH_300))
1960
goto unknown_opcode;
1961
op->val = (unsigned int) regs->gpr[ra] %
1962
(unsigned int) regs->gpr[rb];
1963
goto compute_done;
1964
#ifdef __powerpc64__
1965
case 457: /* divdu */
1966
op->val = regs->gpr[ra] / regs->gpr[rb];
1967
goto arith_done;
1968
#endif
1969
case 459: /* divwu */
1970
op->val = (unsigned int) regs->gpr[ra] /
1971
(unsigned int) regs->gpr[rb];
1972
goto arith_done;
1973
#ifdef __powerpc64__
1974
case 489: /* divd */
1975
op->val = (long int) regs->gpr[ra] /
1976
(long int) regs->gpr[rb];
1977
goto arith_done;
1978
#endif
1979
case 491: /* divw */
1980
op->val = (int) regs->gpr[ra] /
1981
(int) regs->gpr[rb];
1982
goto arith_done;
1983
#ifdef __powerpc64__
1984
case 425: /* divde[.] */
1985
asm volatile(PPC_DIVDE(%0, %1, %2) :
1986
"=r" (op->val) : "r" (regs->gpr[ra]),
1987
"r" (regs->gpr[rb]));
1988
goto arith_done;
1989
case 393: /* divdeu[.] */
1990
asm volatile(PPC_DIVDEU(%0, %1, %2) :
1991
"=r" (op->val) : "r" (regs->gpr[ra]),
1992
"r" (regs->gpr[rb]));
1993
goto arith_done;
1994
#endif
1995
case 755: /* darn */
1996
if (!cpu_has_feature(CPU_FTR_ARCH_300))
1997
goto unknown_opcode;
1998
switch (ra & 0x3) {
1999
case 0:
2000
/* 32-bit conditioned */
2001
asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
2002
goto compute_done;
2003
2004
case 1:
2005
/* 64-bit conditioned */
2006
asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
2007
goto compute_done;
2008
2009
case 2:
2010
/* 64-bit raw */
2011
asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
2012
goto compute_done;
2013
}
2014
2015
goto unknown_opcode;
2016
#ifdef __powerpc64__
2017
case 777: /* modsd */
2018
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2019
goto unknown_opcode;
2020
op->val = (long int) regs->gpr[ra] %
2021
(long int) regs->gpr[rb];
2022
goto compute_done;
2023
#endif
2024
case 779: /* modsw */
2025
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2026
goto unknown_opcode;
2027
op->val = (int) regs->gpr[ra] %
2028
(int) regs->gpr[rb];
2029
goto compute_done;
2030
2031
2032
/*
2033
* Logical instructions
2034
*/
2035
case 26: /* cntlzw */
2036
val = (unsigned int) regs->gpr[rd];
2037
op->val = ( val ? __builtin_clz(val) : 32 );
2038
goto logical_done;
2039
#ifdef __powerpc64__
2040
case 58: /* cntlzd */
2041
val = regs->gpr[rd];
2042
op->val = ( val ? __builtin_clzl(val) : 64 );
2043
goto logical_done;
2044
#endif
2045
case 28: /* and */
2046
op->val = regs->gpr[rd] & regs->gpr[rb];
2047
goto logical_done;
2048
2049
case 60: /* andc */
2050
op->val = regs->gpr[rd] & ~regs->gpr[rb];
2051
goto logical_done;
2052
2053
case 122: /* popcntb */
2054
do_popcnt(regs, op, regs->gpr[rd], 8);
2055
goto logical_done_nocc;
2056
2057
case 124: /* nor */
2058
op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
2059
goto logical_done;
2060
2061
case 154: /* prtyw */
2062
do_prty(regs, op, regs->gpr[rd], 32);
2063
goto logical_done_nocc;
2064
2065
case 186: /* prtyd */
2066
do_prty(regs, op, regs->gpr[rd], 64);
2067
goto logical_done_nocc;
2068
#ifdef CONFIG_PPC64
2069
case 252: /* bpermd */
2070
do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
2071
goto logical_done_nocc;
2072
#endif
2073
case 284: /* xor */
2074
op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
2075
goto logical_done;
2076
2077
case 316: /* xor */
2078
op->val = regs->gpr[rd] ^ regs->gpr[rb];
2079
goto logical_done;
2080
2081
case 378: /* popcntw */
2082
do_popcnt(regs, op, regs->gpr[rd], 32);
2083
goto logical_done_nocc;
2084
2085
case 412: /* orc */
2086
op->val = regs->gpr[rd] | ~regs->gpr[rb];
2087
goto logical_done;
2088
2089
case 444: /* or */
2090
op->val = regs->gpr[rd] | regs->gpr[rb];
2091
goto logical_done;
2092
2093
case 476: /* nand */
2094
op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2095
goto logical_done;
2096
#ifdef CONFIG_PPC64
2097
case 506: /* popcntd */
2098
do_popcnt(regs, op, regs->gpr[rd], 64);
2099
goto logical_done_nocc;
2100
#endif
2101
case 538: /* cnttzw */
2102
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2103
goto unknown_opcode;
2104
val = (unsigned int) regs->gpr[rd];
2105
op->val = (val ? __builtin_ctz(val) : 32);
2106
goto logical_done;
2107
#ifdef __powerpc64__
2108
case 570: /* cnttzd */
2109
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2110
goto unknown_opcode;
2111
val = regs->gpr[rd];
2112
op->val = (val ? __builtin_ctzl(val) : 64);
2113
goto logical_done;
2114
#endif
2115
case 922: /* extsh */
2116
op->val = (signed short) regs->gpr[rd];
2117
goto logical_done;
2118
2119
case 954: /* extsb */
2120
op->val = (signed char) regs->gpr[rd];
2121
goto logical_done;
2122
#ifdef __powerpc64__
2123
case 986: /* extsw */
2124
op->val = (signed int) regs->gpr[rd];
2125
goto logical_done;
2126
#endif
2127
2128
/*
2129
* Shift instructions
2130
*/
2131
case 24: /* slw */
2132
sh = regs->gpr[rb] & 0x3f;
2133
if (sh < 32)
2134
op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2135
else
2136
op->val = 0;
2137
goto logical_done;
2138
2139
case 536: /* srw */
2140
sh = regs->gpr[rb] & 0x3f;
2141
if (sh < 32)
2142
op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2143
else
2144
op->val = 0;
2145
goto logical_done;
2146
2147
case 792: /* sraw */
2148
op->type = COMPUTE + SETREG + SETXER;
2149
sh = regs->gpr[rb] & 0x3f;
2150
ival = (signed int) regs->gpr[rd];
2151
op->val = ival >> (sh < 32 ? sh : 31);
2152
op->xerval = regs->xer;
2153
if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2154
op->xerval |= XER_CA;
2155
else
2156
op->xerval &= ~XER_CA;
2157
set_ca32(op, op->xerval & XER_CA);
2158
goto logical_done;
2159
2160
case 824: /* srawi */
2161
op->type = COMPUTE + SETREG + SETXER;
2162
sh = rb;
2163
ival = (signed int) regs->gpr[rd];
2164
op->val = ival >> sh;
2165
op->xerval = regs->xer;
2166
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2167
op->xerval |= XER_CA;
2168
else
2169
op->xerval &= ~XER_CA;
2170
set_ca32(op, op->xerval & XER_CA);
2171
goto logical_done;
2172
2173
#ifdef __powerpc64__
2174
case 27: /* sld */
2175
sh = regs->gpr[rb] & 0x7f;
2176
if (sh < 64)
2177
op->val = regs->gpr[rd] << sh;
2178
else
2179
op->val = 0;
2180
goto logical_done;
2181
2182
case 539: /* srd */
2183
sh = regs->gpr[rb] & 0x7f;
2184
if (sh < 64)
2185
op->val = regs->gpr[rd] >> sh;
2186
else
2187
op->val = 0;
2188
goto logical_done;
2189
2190
case 794: /* srad */
2191
op->type = COMPUTE + SETREG + SETXER;
2192
sh = regs->gpr[rb] & 0x7f;
2193
ival = (signed long int) regs->gpr[rd];
2194
op->val = ival >> (sh < 64 ? sh : 63);
2195
op->xerval = regs->xer;
2196
if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2197
op->xerval |= XER_CA;
2198
else
2199
op->xerval &= ~XER_CA;
2200
set_ca32(op, op->xerval & XER_CA);
2201
goto logical_done;
2202
2203
case 826: /* sradi with sh_5 = 0 */
2204
case 827: /* sradi with sh_5 = 1 */
2205
op->type = COMPUTE + SETREG + SETXER;
2206
sh = rb | ((word & 2) << 4);
2207
ival = (signed long int) regs->gpr[rd];
2208
op->val = ival >> sh;
2209
op->xerval = regs->xer;
2210
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2211
op->xerval |= XER_CA;
2212
else
2213
op->xerval &= ~XER_CA;
2214
set_ca32(op, op->xerval & XER_CA);
2215
goto logical_done;
2216
2217
case 890: /* extswsli with sh_5 = 0 */
2218
case 891: /* extswsli with sh_5 = 1 */
2219
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2220
goto unknown_opcode;
2221
op->type = COMPUTE + SETREG;
2222
sh = rb | ((word & 2) << 4);
2223
val = (signed int) regs->gpr[rd];
2224
if (sh)
2225
op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2226
else
2227
op->val = val;
2228
goto logical_done;
2229
2230
#endif /* __powerpc64__ */
2231
2232
/*
2233
* Cache instructions
2234
*/
2235
case 54: /* dcbst */
2236
op->type = MKOP(CACHEOP, DCBST, 0);
2237
op->ea = xform_ea(word, regs);
2238
return 0;
2239
2240
case 86: /* dcbf */
2241
op->type = MKOP(CACHEOP, DCBF, 0);
2242
op->ea = xform_ea(word, regs);
2243
return 0;
2244
2245
case 246: /* dcbtst */
2246
op->type = MKOP(CACHEOP, DCBTST, 0);
2247
op->ea = xform_ea(word, regs);
2248
op->reg = rd;
2249
return 0;
2250
2251
case 278: /* dcbt */
2252
op->type = MKOP(CACHEOP, DCBTST, 0);
2253
op->ea = xform_ea(word, regs);
2254
op->reg = rd;
2255
return 0;
2256
2257
case 982: /* icbi */
2258
op->type = MKOP(CACHEOP, ICBI, 0);
2259
op->ea = xform_ea(word, regs);
2260
return 0;
2261
2262
case 1014: /* dcbz */
2263
op->type = MKOP(CACHEOP, DCBZ, 0);
2264
op->ea = xform_ea(word, regs);
2265
return 0;
2266
}
2267
break;
2268
}
2269
2270
/*
2271
* Loads and stores.
2272
*/
2273
op->type = UNKNOWN;
2274
op->update_reg = ra;
2275
op->reg = rd;
2276
op->val = regs->gpr[rd];
2277
u = (word >> 20) & UPDATE;
2278
op->vsx_flags = 0;
2279
2280
switch (opcode) {
2281
case 31:
2282
u = word & UPDATE;
2283
op->ea = xform_ea(word, regs);
2284
switch ((word >> 1) & 0x3ff) {
2285
case 20: /* lwarx */
2286
op->type = MKOP(LARX, 0, 4);
2287
break;
2288
2289
case 150: /* stwcx. */
2290
op->type = MKOP(STCX, 0, 4);
2291
break;
2292
2293
#ifdef CONFIG_PPC_HAS_LBARX_LHARX
2294
case 52: /* lbarx */
2295
op->type = MKOP(LARX, 0, 1);
2296
break;
2297
2298
case 694: /* stbcx. */
2299
op->type = MKOP(STCX, 0, 1);
2300
break;
2301
2302
case 116: /* lharx */
2303
op->type = MKOP(LARX, 0, 2);
2304
break;
2305
2306
case 726: /* sthcx. */
2307
op->type = MKOP(STCX, 0, 2);
2308
break;
2309
#endif
2310
#ifdef __powerpc64__
2311
case 84: /* ldarx */
2312
op->type = MKOP(LARX, 0, 8);
2313
break;
2314
2315
case 214: /* stdcx. */
2316
op->type = MKOP(STCX, 0, 8);
2317
break;
2318
2319
case 276: /* lqarx */
2320
if (!((rd & 1) || rd == ra || rd == rb))
2321
op->type = MKOP(LARX, 0, 16);
2322
break;
2323
2324
case 182: /* stqcx. */
2325
if (!(rd & 1))
2326
op->type = MKOP(STCX, 0, 16);
2327
break;
2328
#endif
2329
2330
case 23: /* lwzx */
2331
case 55: /* lwzux */
2332
op->type = MKOP(LOAD, u, 4);
2333
break;
2334
2335
case 87: /* lbzx */
2336
case 119: /* lbzux */
2337
op->type = MKOP(LOAD, u, 1);
2338
break;
2339
2340
#ifdef CONFIG_ALTIVEC
2341
/*
2342
* Note: for the load/store vector element instructions,
2343
* bits of the EA say which field of the VMX register to use.
2344
*/
2345
case 7: /* lvebx */
2346
op->type = MKOP(LOAD_VMX, 0, 1);
2347
op->element_size = 1;
2348
break;
2349
2350
case 39: /* lvehx */
2351
op->type = MKOP(LOAD_VMX, 0, 2);
2352
op->element_size = 2;
2353
break;
2354
2355
case 71: /* lvewx */
2356
op->type = MKOP(LOAD_VMX, 0, 4);
2357
op->element_size = 4;
2358
break;
2359
2360
case 103: /* lvx */
2361
case 359: /* lvxl */
2362
op->type = MKOP(LOAD_VMX, 0, 16);
2363
op->element_size = 16;
2364
break;
2365
2366
case 135: /* stvebx */
2367
op->type = MKOP(STORE_VMX, 0, 1);
2368
op->element_size = 1;
2369
break;
2370
2371
case 167: /* stvehx */
2372
op->type = MKOP(STORE_VMX, 0, 2);
2373
op->element_size = 2;
2374
break;
2375
2376
case 199: /* stvewx */
2377
op->type = MKOP(STORE_VMX, 0, 4);
2378
op->element_size = 4;
2379
break;
2380
2381
case 231: /* stvx */
2382
case 487: /* stvxl */
2383
op->type = MKOP(STORE_VMX, 0, 16);
2384
break;
2385
#endif /* CONFIG_ALTIVEC */
2386
2387
#ifdef __powerpc64__
2388
case 21: /* ldx */
2389
case 53: /* ldux */
2390
op->type = MKOP(LOAD, u, 8);
2391
break;
2392
2393
case 149: /* stdx */
2394
case 181: /* stdux */
2395
op->type = MKOP(STORE, u, 8);
2396
break;
2397
#endif
2398
2399
case 151: /* stwx */
2400
case 183: /* stwux */
2401
op->type = MKOP(STORE, u, 4);
2402
break;
2403
2404
case 215: /* stbx */
2405
case 247: /* stbux */
2406
op->type = MKOP(STORE, u, 1);
2407
break;
2408
2409
case 279: /* lhzx */
2410
case 311: /* lhzux */
2411
op->type = MKOP(LOAD, u, 2);
2412
break;
2413
2414
#ifdef __powerpc64__
2415
case 341: /* lwax */
2416
case 373: /* lwaux */
2417
op->type = MKOP(LOAD, SIGNEXT | u, 4);
2418
break;
2419
#endif
2420
2421
case 343: /* lhax */
2422
case 375: /* lhaux */
2423
op->type = MKOP(LOAD, SIGNEXT | u, 2);
2424
break;
2425
2426
case 407: /* sthx */
2427
case 439: /* sthux */
2428
op->type = MKOP(STORE, u, 2);
2429
break;
2430
2431
#ifdef __powerpc64__
2432
case 532: /* ldbrx */
2433
op->type = MKOP(LOAD, BYTEREV, 8);
2434
break;
2435
2436
#endif
2437
case 533: /* lswx */
2438
op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2439
break;
2440
2441
case 534: /* lwbrx */
2442
op->type = MKOP(LOAD, BYTEREV, 4);
2443
break;
2444
2445
case 597: /* lswi */
2446
if (rb == 0)
2447
rb = 32; /* # bytes to load */
2448
op->type = MKOP(LOAD_MULTI, 0, rb);
2449
op->ea = ra ? regs->gpr[ra] : 0;
2450
break;
2451
2452
#ifdef CONFIG_PPC_FPU
2453
case 535: /* lfsx */
2454
case 567: /* lfsux */
2455
op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2456
break;
2457
2458
case 599: /* lfdx */
2459
case 631: /* lfdux */
2460
op->type = MKOP(LOAD_FP, u, 8);
2461
break;
2462
2463
case 663: /* stfsx */
2464
case 695: /* stfsux */
2465
op->type = MKOP(STORE_FP, u | FPCONV, 4);
2466
break;
2467
2468
case 727: /* stfdx */
2469
case 759: /* stfdux */
2470
op->type = MKOP(STORE_FP, u, 8);
2471
break;
2472
2473
#ifdef __powerpc64__
2474
case 791: /* lfdpx */
2475
op->type = MKOP(LOAD_FP, 0, 16);
2476
break;
2477
2478
case 855: /* lfiwax */
2479
op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2480
break;
2481
2482
case 887: /* lfiwzx */
2483
op->type = MKOP(LOAD_FP, 0, 4);
2484
break;
2485
2486
case 919: /* stfdpx */
2487
op->type = MKOP(STORE_FP, 0, 16);
2488
break;
2489
2490
case 983: /* stfiwx */
2491
op->type = MKOP(STORE_FP, 0, 4);
2492
break;
2493
#endif /* __powerpc64 */
2494
#endif /* CONFIG_PPC_FPU */
2495
2496
#ifdef __powerpc64__
2497
case 660: /* stdbrx */
2498
op->type = MKOP(STORE, BYTEREV, 8);
2499
op->val = byterev_8(regs->gpr[rd]);
2500
break;
2501
2502
#endif
2503
case 661: /* stswx */
2504
op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2505
break;
2506
2507
case 662: /* stwbrx */
2508
op->type = MKOP(STORE, BYTEREV, 4);
2509
op->val = byterev_4(regs->gpr[rd]);
2510
break;
2511
2512
case 725: /* stswi */
2513
if (rb == 0)
2514
rb = 32; /* # bytes to store */
2515
op->type = MKOP(STORE_MULTI, 0, rb);
2516
op->ea = ra ? regs->gpr[ra] : 0;
2517
break;
2518
2519
case 790: /* lhbrx */
2520
op->type = MKOP(LOAD, BYTEREV, 2);
2521
break;
2522
2523
case 918: /* sthbrx */
2524
op->type = MKOP(STORE, BYTEREV, 2);
2525
op->val = byterev_2(regs->gpr[rd]);
2526
break;
2527
2528
#ifdef CONFIG_VSX
2529
case 12: /* lxsiwzx */
2530
op->reg = rd | ((word & 1) << 5);
2531
op->type = MKOP(LOAD_VSX, 0, 4);
2532
op->element_size = 8;
2533
break;
2534
2535
case 76: /* lxsiwax */
2536
op->reg = rd | ((word & 1) << 5);
2537
op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2538
op->element_size = 8;
2539
break;
2540
2541
case 140: /* stxsiwx */
2542
op->reg = rd | ((word & 1) << 5);
2543
op->type = MKOP(STORE_VSX, 0, 4);
2544
op->element_size = 8;
2545
break;
2546
2547
case 268: /* lxvx */
2548
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2549
goto unknown_opcode;
2550
op->reg = rd | ((word & 1) << 5);
2551
op->type = MKOP(LOAD_VSX, 0, 16);
2552
op->element_size = 16;
2553
op->vsx_flags = VSX_CHECK_VEC;
2554
break;
2555
2556
case 269: /* lxvl */
2557
case 301: { /* lxvll */
2558
int nb;
2559
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2560
goto unknown_opcode;
2561
op->reg = rd | ((word & 1) << 5);
2562
op->ea = ra ? regs->gpr[ra] : 0;
2563
nb = regs->gpr[rb] & 0xff;
2564
if (nb > 16)
2565
nb = 16;
2566
op->type = MKOP(LOAD_VSX, 0, nb);
2567
op->element_size = 16;
2568
op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2569
VSX_CHECK_VEC;
2570
break;
2571
}
2572
case 332: /* lxvdsx */
2573
op->reg = rd | ((word & 1) << 5);
2574
op->type = MKOP(LOAD_VSX, 0, 8);
2575
op->element_size = 8;
2576
op->vsx_flags = VSX_SPLAT;
2577
break;
2578
2579
case 333: /* lxvpx */
2580
if (!cpu_has_feature(CPU_FTR_ARCH_31))
2581
goto unknown_opcode;
2582
op->reg = VSX_REGISTER_XTP(rd);
2583
op->type = MKOP(LOAD_VSX, 0, 32);
2584
op->element_size = 32;
2585
break;
2586
2587
case 364: /* lxvwsx */
2588
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2589
goto unknown_opcode;
2590
op->reg = rd | ((word & 1) << 5);
2591
op->type = MKOP(LOAD_VSX, 0, 4);
2592
op->element_size = 4;
2593
op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2594
break;
2595
2596
case 396: /* stxvx */
2597
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2598
goto unknown_opcode;
2599
op->reg = rd | ((word & 1) << 5);
2600
op->type = MKOP(STORE_VSX, 0, 16);
2601
op->element_size = 16;
2602
op->vsx_flags = VSX_CHECK_VEC;
2603
break;
2604
2605
case 397: /* stxvl */
2606
case 429: { /* stxvll */
2607
int nb;
2608
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2609
goto unknown_opcode;
2610
op->reg = rd | ((word & 1) << 5);
2611
op->ea = ra ? regs->gpr[ra] : 0;
2612
nb = regs->gpr[rb] & 0xff;
2613
if (nb > 16)
2614
nb = 16;
2615
op->type = MKOP(STORE_VSX, 0, nb);
2616
op->element_size = 16;
2617
op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2618
VSX_CHECK_VEC;
2619
break;
2620
}
2621
case 461: /* stxvpx */
2622
if (!cpu_has_feature(CPU_FTR_ARCH_31))
2623
goto unknown_opcode;
2624
op->reg = VSX_REGISTER_XTP(rd);
2625
op->type = MKOP(STORE_VSX, 0, 32);
2626
op->element_size = 32;
2627
break;
2628
case 524: /* lxsspx */
2629
op->reg = rd | ((word & 1) << 5);
2630
op->type = MKOP(LOAD_VSX, 0, 4);
2631
op->element_size = 8;
2632
op->vsx_flags = VSX_FPCONV;
2633
break;
2634
2635
case 588: /* lxsdx */
2636
op->reg = rd | ((word & 1) << 5);
2637
op->type = MKOP(LOAD_VSX, 0, 8);
2638
op->element_size = 8;
2639
break;
2640
2641
case 652: /* stxsspx */
2642
op->reg = rd | ((word & 1) << 5);
2643
op->type = MKOP(STORE_VSX, 0, 4);
2644
op->element_size = 8;
2645
op->vsx_flags = VSX_FPCONV;
2646
break;
2647
2648
case 716: /* stxsdx */
2649
op->reg = rd | ((word & 1) << 5);
2650
op->type = MKOP(STORE_VSX, 0, 8);
2651
op->element_size = 8;
2652
break;
2653
2654
case 780: /* lxvw4x */
2655
op->reg = rd | ((word & 1) << 5);
2656
op->type = MKOP(LOAD_VSX, 0, 16);
2657
op->element_size = 4;
2658
break;
2659
2660
case 781: /* lxsibzx */
2661
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2662
goto unknown_opcode;
2663
op->reg = rd | ((word & 1) << 5);
2664
op->type = MKOP(LOAD_VSX, 0, 1);
2665
op->element_size = 8;
2666
op->vsx_flags = VSX_CHECK_VEC;
2667
break;
2668
2669
case 812: /* lxvh8x */
2670
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2671
goto unknown_opcode;
2672
op->reg = rd | ((word & 1) << 5);
2673
op->type = MKOP(LOAD_VSX, 0, 16);
2674
op->element_size = 2;
2675
op->vsx_flags = VSX_CHECK_VEC;
2676
break;
2677
2678
case 813: /* lxsihzx */
2679
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2680
goto unknown_opcode;
2681
op->reg = rd | ((word & 1) << 5);
2682
op->type = MKOP(LOAD_VSX, 0, 2);
2683
op->element_size = 8;
2684
op->vsx_flags = VSX_CHECK_VEC;
2685
break;
2686
2687
case 844: /* lxvd2x */
2688
op->reg = rd | ((word & 1) << 5);
2689
op->type = MKOP(LOAD_VSX, 0, 16);
2690
op->element_size = 8;
2691
break;
2692
2693
case 876: /* lxvb16x */
2694
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2695
goto unknown_opcode;
2696
op->reg = rd | ((word & 1) << 5);
2697
op->type = MKOP(LOAD_VSX, 0, 16);
2698
op->element_size = 1;
2699
op->vsx_flags = VSX_CHECK_VEC;
2700
break;
2701
2702
case 908: /* stxvw4x */
2703
op->reg = rd | ((word & 1) << 5);
2704
op->type = MKOP(STORE_VSX, 0, 16);
2705
op->element_size = 4;
2706
break;
2707
2708
case 909: /* stxsibx */
2709
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2710
goto unknown_opcode;
2711
op->reg = rd | ((word & 1) << 5);
2712
op->type = MKOP(STORE_VSX, 0, 1);
2713
op->element_size = 8;
2714
op->vsx_flags = VSX_CHECK_VEC;
2715
break;
2716
2717
case 940: /* stxvh8x */
2718
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2719
goto unknown_opcode;
2720
op->reg = rd | ((word & 1) << 5);
2721
op->type = MKOP(STORE_VSX, 0, 16);
2722
op->element_size = 2;
2723
op->vsx_flags = VSX_CHECK_VEC;
2724
break;
2725
2726
case 941: /* stxsihx */
2727
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2728
goto unknown_opcode;
2729
op->reg = rd | ((word & 1) << 5);
2730
op->type = MKOP(STORE_VSX, 0, 2);
2731
op->element_size = 8;
2732
op->vsx_flags = VSX_CHECK_VEC;
2733
break;
2734
2735
case 972: /* stxvd2x */
2736
op->reg = rd | ((word & 1) << 5);
2737
op->type = MKOP(STORE_VSX, 0, 16);
2738
op->element_size = 8;
2739
break;
2740
2741
case 1004: /* stxvb16x */
2742
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2743
goto unknown_opcode;
2744
op->reg = rd | ((word & 1) << 5);
2745
op->type = MKOP(STORE_VSX, 0, 16);
2746
op->element_size = 1;
2747
op->vsx_flags = VSX_CHECK_VEC;
2748
break;
2749
2750
#endif /* CONFIG_VSX */
2751
}
2752
break;
2753
2754
case 32: /* lwz */
2755
case 33: /* lwzu */
2756
op->type = MKOP(LOAD, u, 4);
2757
op->ea = dform_ea(word, regs);
2758
break;
2759
2760
case 34: /* lbz */
2761
case 35: /* lbzu */
2762
op->type = MKOP(LOAD, u, 1);
2763
op->ea = dform_ea(word, regs);
2764
break;
2765
2766
case 36: /* stw */
2767
case 37: /* stwu */
2768
op->type = MKOP(STORE, u, 4);
2769
op->ea = dform_ea(word, regs);
2770
break;
2771
2772
case 38: /* stb */
2773
case 39: /* stbu */
2774
op->type = MKOP(STORE, u, 1);
2775
op->ea = dform_ea(word, regs);
2776
break;
2777
2778
case 40: /* lhz */
2779
case 41: /* lhzu */
2780
op->type = MKOP(LOAD, u, 2);
2781
op->ea = dform_ea(word, regs);
2782
break;
2783
2784
case 42: /* lha */
2785
case 43: /* lhau */
2786
op->type = MKOP(LOAD, SIGNEXT | u, 2);
2787
op->ea = dform_ea(word, regs);
2788
break;
2789
2790
case 44: /* sth */
2791
case 45: /* sthu */
2792
op->type = MKOP(STORE, u, 2);
2793
op->ea = dform_ea(word, regs);
2794
break;
2795
2796
case 46: /* lmw */
2797
if (ra >= rd)
2798
break; /* invalid form, ra in range to load */
2799
op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2800
op->ea = dform_ea(word, regs);
2801
break;
2802
2803
case 47: /* stmw */
2804
op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2805
op->ea = dform_ea(word, regs);
2806
break;
2807
2808
#ifdef CONFIG_PPC_FPU
2809
case 48: /* lfs */
2810
case 49: /* lfsu */
2811
op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2812
op->ea = dform_ea(word, regs);
2813
break;
2814
2815
case 50: /* lfd */
2816
case 51: /* lfdu */
2817
op->type = MKOP(LOAD_FP, u, 8);
2818
op->ea = dform_ea(word, regs);
2819
break;
2820
2821
case 52: /* stfs */
2822
case 53: /* stfsu */
2823
op->type = MKOP(STORE_FP, u | FPCONV, 4);
2824
op->ea = dform_ea(word, regs);
2825
break;
2826
2827
case 54: /* stfd */
2828
case 55: /* stfdu */
2829
op->type = MKOP(STORE_FP, u, 8);
2830
op->ea = dform_ea(word, regs);
2831
break;
2832
#endif
2833
2834
#ifdef __powerpc64__
2835
case 56: /* lq */
2836
if (!((rd & 1) || (rd == ra)))
2837
op->type = MKOP(LOAD, 0, 16);
2838
op->ea = dqform_ea(word, regs);
2839
break;
2840
#endif
2841
2842
#ifdef CONFIG_VSX
2843
case 57: /* lfdp, lxsd, lxssp */
2844
op->ea = dsform_ea(word, regs);
2845
switch (word & 3) {
2846
case 0: /* lfdp */
2847
if (rd & 1)
2848
break; /* reg must be even */
2849
op->type = MKOP(LOAD_FP, 0, 16);
2850
break;
2851
case 2: /* lxsd */
2852
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2853
goto unknown_opcode;
2854
op->reg = rd + 32;
2855
op->type = MKOP(LOAD_VSX, 0, 8);
2856
op->element_size = 8;
2857
op->vsx_flags = VSX_CHECK_VEC;
2858
break;
2859
case 3: /* lxssp */
2860
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2861
goto unknown_opcode;
2862
op->reg = rd + 32;
2863
op->type = MKOP(LOAD_VSX, 0, 4);
2864
op->element_size = 8;
2865
op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2866
break;
2867
}
2868
break;
2869
#endif /* CONFIG_VSX */
2870
2871
#ifdef __powerpc64__
2872
case 58: /* ld[u], lwa */
2873
op->ea = dsform_ea(word, regs);
2874
switch (word & 3) {
2875
case 0: /* ld */
2876
op->type = MKOP(LOAD, 0, 8);
2877
break;
2878
case 1: /* ldu */
2879
op->type = MKOP(LOAD, UPDATE, 8);
2880
break;
2881
case 2: /* lwa */
2882
op->type = MKOP(LOAD, SIGNEXT, 4);
2883
break;
2884
}
2885
break;
2886
#endif
2887
2888
#ifdef CONFIG_VSX
2889
case 6:
2890
if (!cpu_has_feature(CPU_FTR_ARCH_31))
2891
goto unknown_opcode;
2892
op->ea = dqform_ea(word, regs);
2893
op->reg = VSX_REGISTER_XTP(rd);
2894
op->element_size = 32;
2895
switch (word & 0xf) {
2896
case 0: /* lxvp */
2897
op->type = MKOP(LOAD_VSX, 0, 32);
2898
break;
2899
case 1: /* stxvp */
2900
op->type = MKOP(STORE_VSX, 0, 32);
2901
break;
2902
}
2903
break;
2904
2905
case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
2906
switch (word & 7) {
2907
case 0: /* stfdp with LSB of DS field = 0 */
2908
case 4: /* stfdp with LSB of DS field = 1 */
2909
op->ea = dsform_ea(word, regs);
2910
op->type = MKOP(STORE_FP, 0, 16);
2911
break;
2912
2913
case 1: /* lxv */
2914
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2915
goto unknown_opcode;
2916
op->ea = dqform_ea(word, regs);
2917
if (word & 8)
2918
op->reg = rd + 32;
2919
op->type = MKOP(LOAD_VSX, 0, 16);
2920
op->element_size = 16;
2921
op->vsx_flags = VSX_CHECK_VEC;
2922
break;
2923
2924
case 2: /* stxsd with LSB of DS field = 0 */
2925
case 6: /* stxsd with LSB of DS field = 1 */
2926
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2927
goto unknown_opcode;
2928
op->ea = dsform_ea(word, regs);
2929
op->reg = rd + 32;
2930
op->type = MKOP(STORE_VSX, 0, 8);
2931
op->element_size = 8;
2932
op->vsx_flags = VSX_CHECK_VEC;
2933
break;
2934
2935
case 3: /* stxssp with LSB of DS field = 0 */
2936
case 7: /* stxssp with LSB of DS field = 1 */
2937
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2938
goto unknown_opcode;
2939
op->ea = dsform_ea(word, regs);
2940
op->reg = rd + 32;
2941
op->type = MKOP(STORE_VSX, 0, 4);
2942
op->element_size = 8;
2943
op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2944
break;
2945
2946
case 5: /* stxv */
2947
if (!cpu_has_feature(CPU_FTR_ARCH_300))
2948
goto unknown_opcode;
2949
op->ea = dqform_ea(word, regs);
2950
if (word & 8)
2951
op->reg = rd + 32;
2952
op->type = MKOP(STORE_VSX, 0, 16);
2953
op->element_size = 16;
2954
op->vsx_flags = VSX_CHECK_VEC;
2955
break;
2956
}
2957
break;
2958
#endif /* CONFIG_VSX */
2959
2960
#ifdef __powerpc64__
2961
case 62: /* std[u] */
2962
op->ea = dsform_ea(word, regs);
2963
switch (word & 3) {
2964
case 0: /* std */
2965
op->type = MKOP(STORE, 0, 8);
2966
break;
2967
case 1: /* stdu */
2968
op->type = MKOP(STORE, UPDATE, 8);
2969
break;
2970
case 2: /* stq */
2971
if (!(rd & 1))
2972
op->type = MKOP(STORE, 0, 16);
2973
break;
2974
}
2975
break;
2976
case 1: /* Prefixed instructions */
2977
if (!cpu_has_feature(CPU_FTR_ARCH_31))
2978
goto unknown_opcode;
2979
2980
prefix_r = GET_PREFIX_R(word);
2981
ra = GET_PREFIX_RA(suffix);
2982
op->update_reg = ra;
2983
rd = (suffix >> 21) & 0x1f;
2984
op->reg = rd;
2985
op->val = regs->gpr[rd];
2986
2987
suffixopcode = get_op(suffix);
2988
prefixtype = (word >> 24) & 0x3;
2989
switch (prefixtype) {
2990
case 0: /* Type 00 Eight-Byte Load/Store */
2991
if (prefix_r && ra)
2992
break;
2993
op->ea = mlsd_8lsd_ea(word, suffix, regs);
2994
switch (suffixopcode) {
2995
case 41: /* plwa */
2996
op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2997
break;
2998
#ifdef CONFIG_VSX
2999
case 42: /* plxsd */
3000
op->reg = rd + 32;
3001
op->type = MKOP(LOAD_VSX, PREFIXED, 8);
3002
op->element_size = 8;
3003
op->vsx_flags = VSX_CHECK_VEC;
3004
break;
3005
case 43: /* plxssp */
3006
op->reg = rd + 32;
3007
op->type = MKOP(LOAD_VSX, PREFIXED, 4);
3008
op->element_size = 8;
3009
op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3010
break;
3011
case 46: /* pstxsd */
3012
op->reg = rd + 32;
3013
op->type = MKOP(STORE_VSX, PREFIXED, 8);
3014
op->element_size = 8;
3015
op->vsx_flags = VSX_CHECK_VEC;
3016
break;
3017
case 47: /* pstxssp */
3018
op->reg = rd + 32;
3019
op->type = MKOP(STORE_VSX, PREFIXED, 4);
3020
op->element_size = 8;
3021
op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3022
break;
3023
case 51: /* plxv1 */
3024
op->reg += 32;
3025
fallthrough;
3026
case 50: /* plxv0 */
3027
op->type = MKOP(LOAD_VSX, PREFIXED, 16);
3028
op->element_size = 16;
3029
op->vsx_flags = VSX_CHECK_VEC;
3030
break;
3031
case 55: /* pstxv1 */
3032
op->reg = rd + 32;
3033
fallthrough;
3034
case 54: /* pstxv0 */
3035
op->type = MKOP(STORE_VSX, PREFIXED, 16);
3036
op->element_size = 16;
3037
op->vsx_flags = VSX_CHECK_VEC;
3038
break;
3039
#endif /* CONFIG_VSX */
3040
case 56: /* plq */
3041
op->type = MKOP(LOAD, PREFIXED, 16);
3042
break;
3043
case 57: /* pld */
3044
op->type = MKOP(LOAD, PREFIXED, 8);
3045
break;
3046
#ifdef CONFIG_VSX
3047
case 58: /* plxvp */
3048
op->reg = VSX_REGISTER_XTP(rd);
3049
op->type = MKOP(LOAD_VSX, PREFIXED, 32);
3050
op->element_size = 32;
3051
break;
3052
#endif /* CONFIG_VSX */
3053
case 60: /* pstq */
3054
op->type = MKOP(STORE, PREFIXED, 16);
3055
break;
3056
case 61: /* pstd */
3057
op->type = MKOP(STORE, PREFIXED, 8);
3058
break;
3059
#ifdef CONFIG_VSX
3060
case 62: /* pstxvp */
3061
op->reg = VSX_REGISTER_XTP(rd);
3062
op->type = MKOP(STORE_VSX, PREFIXED, 32);
3063
op->element_size = 32;
3064
break;
3065
#endif /* CONFIG_VSX */
3066
}
3067
break;
3068
case 1: /* Type 01 Eight-Byte Register-to-Register */
3069
break;
3070
case 2: /* Type 10 Modified Load/Store */
3071
if (prefix_r && ra)
3072
break;
3073
op->ea = mlsd_8lsd_ea(word, suffix, regs);
3074
switch (suffixopcode) {
3075
case 32: /* plwz */
3076
op->type = MKOP(LOAD, PREFIXED, 4);
3077
break;
3078
case 34: /* plbz */
3079
op->type = MKOP(LOAD, PREFIXED, 1);
3080
break;
3081
case 36: /* pstw */
3082
op->type = MKOP(STORE, PREFIXED, 4);
3083
break;
3084
case 38: /* pstb */
3085
op->type = MKOP(STORE, PREFIXED, 1);
3086
break;
3087
case 40: /* plhz */
3088
op->type = MKOP(LOAD, PREFIXED, 2);
3089
break;
3090
case 42: /* plha */
3091
op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
3092
break;
3093
case 44: /* psth */
3094
op->type = MKOP(STORE, PREFIXED, 2);
3095
break;
3096
case 48: /* plfs */
3097
op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3098
break;
3099
case 50: /* plfd */
3100
op->type = MKOP(LOAD_FP, PREFIXED, 8);
3101
break;
3102
case 52: /* pstfs */
3103
op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3104
break;
3105
case 54: /* pstfd */
3106
op->type = MKOP(STORE_FP, PREFIXED, 8);
3107
break;
3108
}
3109
break;
3110
case 3: /* Type 11 Modified Register-to-Register */
3111
break;
3112
}
3113
#endif /* __powerpc64__ */
3114
3115
}
3116
3117
if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
3118
switch (GETTYPE(op->type)) {
3119
case LOAD:
3120
if (ra == rd)
3121
goto unknown_opcode;
3122
fallthrough;
3123
case STORE:
3124
case LOAD_FP:
3125
case STORE_FP:
3126
if (ra == 0)
3127
goto unknown_opcode;
3128
}
3129
}
3130
3131
#ifdef CONFIG_VSX
3132
if ((GETTYPE(op->type) == LOAD_VSX ||
3133
GETTYPE(op->type) == STORE_VSX) &&
3134
!cpu_has_feature(CPU_FTR_VSX)) {
3135
return -1;
3136
}
3137
#endif /* CONFIG_VSX */
3138
3139
return 0;
3140
3141
unknown_opcode:
3142
op->type = UNKNOWN;
3143
return 0;
3144
3145
logical_done:
3146
if (word & 1)
3147
set_cr0(regs, op);
3148
logical_done_nocc:
3149
op->reg = ra;
3150
op->type |= SETREG;
3151
return 1;
3152
3153
arith_done:
3154
if (word & 1)
3155
set_cr0(regs, op);
3156
compute_done:
3157
op->reg = rd;
3158
op->type |= SETREG;
3159
return 1;
3160
3161
priv:
3162
op->type = INTERRUPT | 0x700;
3163
op->val = SRR1_PROGPRIV;
3164
return 0;
3165
3166
trap:
3167
op->type = INTERRUPT | 0x700;
3168
op->val = SRR1_PROGTRAP;
3169
return 0;
3170
}
3171
EXPORT_SYMBOL_GPL(analyse_instr);
3172
NOKPROBE_SYMBOL(analyse_instr);
3173
3174
/*
3175
* For PPC32 we always use stwu with r1 to change the stack pointer.
3176
* So this emulated store may corrupt the exception frame, now we
3177
* have to provide the exception frame trampoline, which is pushed
3178
* below the kprobed function stack. So we only update gpr[1] but
3179
* don't emulate the real store operation. We will do real store
3180
* operation safely in exception return code by checking this flag.
3181
*/
3182
static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3183
{
3184
/*
3185
* Check if we already set since that means we'll
3186
* lose the previous value.
3187
*/
3188
WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3189
set_thread_flag(TIF_EMULATE_STACK_STORE);
3190
return 0;
3191
}
3192
3193
static nokprobe_inline void do_signext(unsigned long *valp, int size)
3194
{
3195
switch (size) {
3196
case 2:
3197
*valp = (signed short) *valp;
3198
break;
3199
case 4:
3200
*valp = (signed int) *valp;
3201
break;
3202
}
3203
}
3204
3205
static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3206
{
3207
switch (size) {
3208
case 2:
3209
*valp = byterev_2(*valp);
3210
break;
3211
case 4:
3212
*valp = byterev_4(*valp);
3213
break;
3214
#ifdef __powerpc64__
3215
case 8:
3216
*valp = byterev_8(*valp);
3217
break;
3218
#endif
3219
}
3220
}
3221
3222
/*
3223
* Emulate an instruction that can be executed just by updating
3224
* fields in *regs.
3225
*/
3226
void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3227
{
3228
unsigned long next_pc;
3229
3230
next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3231
switch (GETTYPE(op->type)) {
3232
case COMPUTE:
3233
if (op->type & SETREG)
3234
regs->gpr[op->reg] = op->val;
3235
if (op->type & SETCC)
3236
regs->ccr = op->ccval;
3237
if (op->type & SETXER)
3238
regs->xer = op->xerval;
3239
break;
3240
3241
case BRANCH:
3242
if (op->type & SETLK)
3243
regs->link = next_pc;
3244
if (op->type & BRTAKEN)
3245
next_pc = op->val;
3246
if (op->type & DECCTR)
3247
--regs->ctr;
3248
break;
3249
3250
case BARRIER:
3251
switch (op->type & BARRIER_MASK) {
3252
case BARRIER_SYNC:
3253
mb();
3254
break;
3255
case BARRIER_ISYNC:
3256
isync();
3257
break;
3258
case BARRIER_EIEIO:
3259
eieio();
3260
break;
3261
#ifdef CONFIG_PPC64
3262
case BARRIER_LWSYNC:
3263
asm volatile("lwsync" : : : "memory");
3264
break;
3265
case BARRIER_PTESYNC:
3266
asm volatile("ptesync" : : : "memory");
3267
break;
3268
#endif
3269
}
3270
break;
3271
3272
case MFSPR:
3273
switch (op->spr) {
3274
case SPRN_XER:
3275
regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3276
break;
3277
case SPRN_LR:
3278
regs->gpr[op->reg] = regs->link;
3279
break;
3280
case SPRN_CTR:
3281
regs->gpr[op->reg] = regs->ctr;
3282
break;
3283
default:
3284
WARN_ON_ONCE(1);
3285
}
3286
break;
3287
3288
case MTSPR:
3289
switch (op->spr) {
3290
case SPRN_XER:
3291
regs->xer = op->val & 0xffffffffUL;
3292
break;
3293
case SPRN_LR:
3294
regs->link = op->val;
3295
break;
3296
case SPRN_CTR:
3297
regs->ctr = op->val;
3298
break;
3299
default:
3300
WARN_ON_ONCE(1);
3301
}
3302
break;
3303
3304
default:
3305
WARN_ON_ONCE(1);
3306
}
3307
regs_set_return_ip(regs, next_pc);
3308
}
3309
NOKPROBE_SYMBOL(emulate_update_regs);
3310
3311
/*
3312
* Emulate a previously-analysed load or store instruction.
3313
* Return values are:
3314
* 0 = instruction emulated successfully
3315
* -EFAULT = address out of range or access faulted (regs->dar
3316
* contains the faulting address)
3317
* -EACCES = misaligned access, instruction requires alignment
3318
* -EINVAL = unknown operation in *op
3319
*/
3320
int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3321
{
3322
int err, size, type;
3323
int i, rd, nb;
3324
unsigned int cr;
3325
unsigned long val;
3326
unsigned long ea;
3327
bool cross_endian;
3328
3329
err = 0;
3330
size = GETSIZE(op->type);
3331
type = GETTYPE(op->type);
3332
cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3333
ea = truncate_if_32bit(regs->msr, op->ea);
3334
3335
switch (type) {
3336
case LARX:
3337
if (ea & (size - 1))
3338
return -EACCES; /* can't handle misaligned */
3339
if (!address_ok(regs, ea, size))
3340
return -EFAULT;
3341
err = 0;
3342
val = 0;
3343
switch (size) {
3344
#ifdef CONFIG_PPC_HAS_LBARX_LHARX
3345
case 1:
3346
__get_user_asmx(val, ea, err, "lbarx");
3347
break;
3348
case 2:
3349
__get_user_asmx(val, ea, err, "lharx");
3350
break;
3351
#endif
3352
case 4:
3353
__get_user_asmx(val, ea, err, "lwarx");
3354
break;
3355
#ifdef __powerpc64__
3356
case 8:
3357
__get_user_asmx(val, ea, err, "ldarx");
3358
break;
3359
case 16:
3360
err = do_lqarx(ea, &regs->gpr[op->reg]);
3361
break;
3362
#endif
3363
default:
3364
return -EINVAL;
3365
}
3366
if (err) {
3367
regs->dar = ea;
3368
break;
3369
}
3370
if (size < 16)
3371
regs->gpr[op->reg] = val;
3372
break;
3373
3374
case STCX:
3375
if (ea & (size - 1))
3376
return -EACCES; /* can't handle misaligned */
3377
if (!address_ok(regs, ea, size))
3378
return -EFAULT;
3379
err = 0;
3380
switch (size) {
3381
#ifdef __powerpc64__
3382
case 1:
3383
__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3384
break;
3385
case 2:
3386
__put_user_asmx(op->val, ea, err, "sthcx.", cr);
3387
break;
3388
#endif
3389
case 4:
3390
__put_user_asmx(op->val, ea, err, "stwcx.", cr);
3391
break;
3392
#ifdef __powerpc64__
3393
case 8:
3394
__put_user_asmx(op->val, ea, err, "stdcx.", cr);
3395
break;
3396
case 16:
3397
err = do_stqcx(ea, regs->gpr[op->reg],
3398
regs->gpr[op->reg + 1], &cr);
3399
break;
3400
#endif
3401
default:
3402
return -EINVAL;
3403
}
3404
if (!err)
3405
regs->ccr = (regs->ccr & 0x0fffffff) |
3406
(cr & 0xe0000000) |
3407
((regs->xer >> 3) & 0x10000000);
3408
else
3409
regs->dar = ea;
3410
break;
3411
3412
case LOAD:
3413
#ifdef __powerpc64__
3414
if (size == 16) {
3415
err = emulate_lq(regs, ea, op->reg, cross_endian);
3416
break;
3417
}
3418
#endif
3419
err = read_mem(&regs->gpr[op->reg], ea, size, regs);
3420
if (!err) {
3421
if (op->type & SIGNEXT)
3422
do_signext(&regs->gpr[op->reg], size);
3423
if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3424
do_byterev(&regs->gpr[op->reg], size);
3425
}
3426
break;
3427
3428
#ifdef CONFIG_PPC_FPU
3429
case LOAD_FP:
3430
/*
3431
* If the instruction is in userspace, we can emulate it even
3432
* if the VMX state is not live, because we have the state
3433
* stored in the thread_struct. If the instruction is in
3434
* the kernel, we must not touch the state in the thread_struct.
3435
*/
3436
if (!user_mode(regs) && !(regs->msr & MSR_FP))
3437
return 0;
3438
err = do_fp_load(op, ea, regs, cross_endian);
3439
break;
3440
#endif
3441
#ifdef CONFIG_ALTIVEC
3442
case LOAD_VMX:
3443
if (!user_mode(regs) && !(regs->msr & MSR_VEC))
3444
return 0;
3445
err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3446
break;
3447
#endif
3448
#ifdef CONFIG_VSX
3449
case LOAD_VSX: {
3450
unsigned long msrbit = MSR_VSX;
3451
3452
/*
3453
* Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3454
* when the target of the instruction is a vector register.
3455
*/
3456
if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3457
msrbit = MSR_VEC;
3458
if (!user_mode(regs) && !(regs->msr & msrbit))
3459
return 0;
3460
err = do_vsx_load(op, ea, regs, cross_endian);
3461
break;
3462
}
3463
#endif
3464
case LOAD_MULTI:
3465
if (!address_ok(regs, ea, size))
3466
return -EFAULT;
3467
rd = op->reg;
3468
for (i = 0; i < size; i += 4) {
3469
unsigned int v32 = 0;
3470
3471
nb = size - i;
3472
if (nb > 4)
3473
nb = 4;
3474
err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3475
if (err)
3476
break;
3477
if (unlikely(cross_endian))
3478
v32 = byterev_4(v32);
3479
regs->gpr[rd] = v32;
3480
ea += 4;
3481
/* reg number wraps from 31 to 0 for lsw[ix] */
3482
rd = (rd + 1) & 0x1f;
3483
}
3484
break;
3485
3486
case STORE:
3487
#ifdef __powerpc64__
3488
if (size == 16) {
3489
err = emulate_stq(regs, ea, op->reg, cross_endian);
3490
break;
3491
}
3492
#endif
3493
if ((op->type & UPDATE) && size == sizeof(long) &&
3494
op->reg == 1 && op->update_reg == 1 && !user_mode(regs) &&
3495
ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3496
err = handle_stack_update(ea, regs);
3497
break;
3498
}
3499
if (unlikely(cross_endian))
3500
do_byterev(&op->val, size);
3501
err = write_mem(op->val, ea, size, regs);
3502
break;
3503
3504
#ifdef CONFIG_PPC_FPU
3505
case STORE_FP:
3506
if (!user_mode(regs) && !(regs->msr & MSR_FP))
3507
return 0;
3508
err = do_fp_store(op, ea, regs, cross_endian);
3509
break;
3510
#endif
3511
#ifdef CONFIG_ALTIVEC
3512
case STORE_VMX:
3513
if (!user_mode(regs) && !(regs->msr & MSR_VEC))
3514
return 0;
3515
err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3516
break;
3517
#endif
3518
#ifdef CONFIG_VSX
3519
case STORE_VSX: {
3520
unsigned long msrbit = MSR_VSX;
3521
3522
/*
3523
* Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3524
* when the target of the instruction is a vector register.
3525
*/
3526
if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3527
msrbit = MSR_VEC;
3528
if (!user_mode(regs) && !(regs->msr & msrbit))
3529
return 0;
3530
err = do_vsx_store(op, ea, regs, cross_endian);
3531
break;
3532
}
3533
#endif
3534
case STORE_MULTI:
3535
if (!address_ok(regs, ea, size))
3536
return -EFAULT;
3537
rd = op->reg;
3538
for (i = 0; i < size; i += 4) {
3539
unsigned int v32 = regs->gpr[rd];
3540
3541
nb = size - i;
3542
if (nb > 4)
3543
nb = 4;
3544
if (unlikely(cross_endian))
3545
v32 = byterev_4(v32);
3546
err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3547
if (err)
3548
break;
3549
ea += 4;
3550
/* reg number wraps from 31 to 0 for stsw[ix] */
3551
rd = (rd + 1) & 0x1f;
3552
}
3553
break;
3554
3555
default:
3556
return -EINVAL;
3557
}
3558
3559
if (err)
3560
return err;
3561
3562
if (op->type & UPDATE)
3563
regs->gpr[op->update_reg] = op->ea;
3564
3565
return 0;
3566
}
3567
NOKPROBE_SYMBOL(emulate_loadstore);
3568
3569
/*
3570
* Emulate instructions that cause a transfer of control,
3571
* loads and stores, and a few other instructions.
3572
* Returns 1 if the step was emulated, 0 if not,
3573
* or -1 if the instruction is one that should not be stepped,
3574
* such as an rfid, or a mtmsrd that would clear MSR_RI.
3575
*/
3576
int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
3577
{
3578
struct instruction_op op;
3579
int r, err, type;
3580
unsigned long val;
3581
unsigned long ea;
3582
3583
r = analyse_instr(&op, regs, instr);
3584
if (r < 0)
3585
return r;
3586
if (r > 0) {
3587
emulate_update_regs(regs, &op);
3588
return 1;
3589
}
3590
3591
err = 0;
3592
type = GETTYPE(op.type);
3593
3594
if (OP_IS_LOAD_STORE(type)) {
3595
err = emulate_loadstore(regs, &op);
3596
if (err)
3597
return 0;
3598
goto instr_done;
3599
}
3600
3601
switch (type) {
3602
case CACHEOP:
3603
ea = truncate_if_32bit(regs->msr, op.ea);
3604
if (!address_ok(regs, ea, 8))
3605
return 0;
3606
switch (op.type & CACHEOP_MASK) {
3607
case DCBST:
3608
__cacheop_user_asmx(ea, err, "dcbst");
3609
break;
3610
case DCBF:
3611
__cacheop_user_asmx(ea, err, "dcbf");
3612
break;
3613
case DCBTST:
3614
if (op.reg == 0)
3615
prefetchw((void *) ea);
3616
break;
3617
case DCBT:
3618
if (op.reg == 0)
3619
prefetch((void *) ea);
3620
break;
3621
case ICBI:
3622
__cacheop_user_asmx(ea, err, "icbi");
3623
break;
3624
case DCBZ:
3625
err = emulate_dcbz(ea, regs);
3626
break;
3627
}
3628
if (err) {
3629
regs->dar = ea;
3630
return 0;
3631
}
3632
goto instr_done;
3633
3634
case MFMSR:
3635
regs->gpr[op.reg] = regs->msr & MSR_MASK;
3636
goto instr_done;
3637
3638
case MTMSR:
3639
val = regs->gpr[op.reg];
3640
if ((val & MSR_RI) == 0)
3641
/* can't step mtmsr[d] that would clear MSR_RI */
3642
return -1;
3643
/* here op.val is the mask of bits to change */
3644
regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
3645
goto instr_done;
3646
3647
case SYSCALL: /* sc */
3648
/*
3649
* Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
3650
* single step a system call instruction:
3651
*
3652
* Successful completion for an instruction means that the
3653
* instruction caused no other interrupt. Thus a Trace
3654
* interrupt never occurs for a System Call or System Call
3655
* Vectored instruction, or for a Trap instruction that
3656
* traps.
3657
*/
3658
return -1;
3659
case SYSCALL_VECTORED_0: /* scv 0 */
3660
return -1;
3661
case RFI:
3662
return -1;
3663
}
3664
return 0;
3665
3666
instr_done:
3667
regs_set_return_ip(regs,
3668
truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
3669
return 1;
3670
}
3671
NOKPROBE_SYMBOL(emulate_step);
3672
3673