Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/m68k/kernel/signal_mm.c
10817 views
1
/*
2
* linux/arch/m68k/kernel/signal.c
3
*
4
* Copyright (C) 1991, 1992 Linus Torvalds
5
*
6
* This file is subject to the terms and conditions of the GNU General Public
7
* License. See the file COPYING in the main directory of this archive
8
* for more details.
9
*/
10
11
/*
12
* Linux/m68k support by Hamish Macdonald
13
*
14
* 68060 fixes by Jesper Skov
15
*
16
* 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
17
*
18
* mathemu support by Roman Zippel
19
* (Note: fpstate in the signal context is completely ignored for the emulator
20
* and the internal floating point format is put on stack)
21
*/
22
23
/*
24
* ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25
* Atari :-) Current limitation: Only one sigstack can be active at one time.
26
* If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27
* SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
28
* signal handlers!
29
*/
30
31
#include <linux/sched.h>
32
#include <linux/mm.h>
33
#include <linux/kernel.h>
34
#include <linux/signal.h>
35
#include <linux/syscalls.h>
36
#include <linux/errno.h>
37
#include <linux/wait.h>
38
#include <linux/ptrace.h>
39
#include <linux/unistd.h>
40
#include <linux/stddef.h>
41
#include <linux/highuid.h>
42
#include <linux/personality.h>
43
#include <linux/tty.h>
44
#include <linux/binfmts.h>
45
#include <linux/module.h>
46
47
#include <asm/setup.h>
48
#include <asm/uaccess.h>
49
#include <asm/pgtable.h>
50
#include <asm/traps.h>
51
#include <asm/ucontext.h>
52
53
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
54
55
static const int frame_extra_sizes[16] = {
56
[1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
57
[2] = sizeof(((struct frame *)0)->un.fmt2),
58
[3] = sizeof(((struct frame *)0)->un.fmt3),
59
[4] = sizeof(((struct frame *)0)->un.fmt4),
60
[5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
61
[6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
62
[7] = sizeof(((struct frame *)0)->un.fmt7),
63
[8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
64
[9] = sizeof(((struct frame *)0)->un.fmt9),
65
[10] = sizeof(((struct frame *)0)->un.fmta),
66
[11] = sizeof(((struct frame *)0)->un.fmtb),
67
[12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
68
[13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
69
[14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
70
[15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
71
};
72
73
int handle_kernel_fault(struct pt_regs *regs)
74
{
75
const struct exception_table_entry *fixup;
76
struct pt_regs *tregs;
77
78
/* Are we prepared to handle this kernel fault? */
79
fixup = search_exception_tables(regs->pc);
80
if (!fixup)
81
return 0;
82
83
/* Create a new four word stack frame, discarding the old one. */
84
regs->stkadj = frame_extra_sizes[regs->format];
85
tregs = (struct pt_regs *)((long)regs + regs->stkadj);
86
tregs->vector = regs->vector;
87
tregs->format = 0;
88
tregs->pc = fixup->fixup;
89
tregs->sr = regs->sr;
90
91
return 1;
92
}
93
94
/*
95
* Atomically swap in the new signal mask, and wait for a signal.
96
*/
97
asmlinkage int
98
sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
99
{
100
mask &= _BLOCKABLE;
101
spin_lock_irq(&current->sighand->siglock);
102
current->saved_sigmask = current->blocked;
103
siginitset(&current->blocked, mask);
104
recalc_sigpending();
105
spin_unlock_irq(&current->sighand->siglock);
106
107
current->state = TASK_INTERRUPTIBLE;
108
schedule();
109
set_restore_sigmask();
110
111
return -ERESTARTNOHAND;
112
}
113
114
asmlinkage int
115
sys_sigaction(int sig, const struct old_sigaction __user *act,
116
struct old_sigaction __user *oact)
117
{
118
struct k_sigaction new_ka, old_ka;
119
int ret;
120
121
if (act) {
122
old_sigset_t mask;
123
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
124
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
125
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
126
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
127
__get_user(mask, &act->sa_mask))
128
return -EFAULT;
129
siginitset(&new_ka.sa.sa_mask, mask);
130
}
131
132
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
133
134
if (!ret && oact) {
135
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
136
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
137
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
138
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
139
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
140
return -EFAULT;
141
}
142
143
return ret;
144
}
145
146
asmlinkage int
147
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
148
{
149
return do_sigaltstack(uss, uoss, rdusp());
150
}
151
152
153
/*
154
* Do a signal return; undo the signal stack.
155
*
156
* Keep the return code on the stack quadword aligned!
157
* That makes the cache flush below easier.
158
*/
159
160
struct sigframe
161
{
162
char __user *pretcode;
163
int sig;
164
int code;
165
struct sigcontext __user *psc;
166
char retcode[8];
167
unsigned long extramask[_NSIG_WORDS-1];
168
struct sigcontext sc;
169
};
170
171
struct rt_sigframe
172
{
173
char __user *pretcode;
174
int sig;
175
struct siginfo __user *pinfo;
176
void __user *puc;
177
char retcode[8];
178
struct siginfo info;
179
struct ucontext uc;
180
};
181
182
183
static unsigned char fpu_version; /* version number of fpu, set by setup_frame */
184
185
static inline int restore_fpu_state(struct sigcontext *sc)
186
{
187
int err = 1;
188
189
if (FPU_IS_EMU) {
190
/* restore registers */
191
memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
192
memcpy(current->thread.fp, sc->sc_fpregs, 24);
193
return 0;
194
}
195
196
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
197
/* Verify the frame format. */
198
if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
199
goto out;
200
if (CPU_IS_020_OR_030) {
201
if (m68k_fputype & FPU_68881 &&
202
!(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
203
goto out;
204
if (m68k_fputype & FPU_68882 &&
205
!(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
206
goto out;
207
} else if (CPU_IS_040) {
208
if (!(sc->sc_fpstate[1] == 0x00 ||
209
sc->sc_fpstate[1] == 0x28 ||
210
sc->sc_fpstate[1] == 0x60))
211
goto out;
212
} else if (CPU_IS_060) {
213
if (!(sc->sc_fpstate[3] == 0x00 ||
214
sc->sc_fpstate[3] == 0x60 ||
215
sc->sc_fpstate[3] == 0xe0))
216
goto out;
217
} else
218
goto out;
219
220
__asm__ volatile (".chip 68k/68881\n\t"
221
"fmovemx %0,%%fp0-%%fp1\n\t"
222
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
223
".chip 68k"
224
: /* no outputs */
225
: "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
226
}
227
__asm__ volatile (".chip 68k/68881\n\t"
228
"frestore %0\n\t"
229
".chip 68k" : : "m" (*sc->sc_fpstate));
230
err = 0;
231
232
out:
233
return err;
234
}
235
236
#define FPCONTEXT_SIZE 216
237
#define uc_fpstate uc_filler[0]
238
#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
239
#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
240
241
static inline int rt_restore_fpu_state(struct ucontext __user *uc)
242
{
243
unsigned char fpstate[FPCONTEXT_SIZE];
244
int context_size = CPU_IS_060 ? 8 : 0;
245
fpregset_t fpregs;
246
int err = 1;
247
248
if (FPU_IS_EMU) {
249
/* restore fpu control register */
250
if (__copy_from_user(current->thread.fpcntl,
251
uc->uc_mcontext.fpregs.f_fpcntl, 12))
252
goto out;
253
/* restore all other fpu register */
254
if (__copy_from_user(current->thread.fp,
255
uc->uc_mcontext.fpregs.f_fpregs, 96))
256
goto out;
257
return 0;
258
}
259
260
if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
261
goto out;
262
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
263
if (!CPU_IS_060)
264
context_size = fpstate[1];
265
/* Verify the frame format. */
266
if (!CPU_IS_060 && (fpstate[0] != fpu_version))
267
goto out;
268
if (CPU_IS_020_OR_030) {
269
if (m68k_fputype & FPU_68881 &&
270
!(context_size == 0x18 || context_size == 0xb4))
271
goto out;
272
if (m68k_fputype & FPU_68882 &&
273
!(context_size == 0x38 || context_size == 0xd4))
274
goto out;
275
} else if (CPU_IS_040) {
276
if (!(context_size == 0x00 ||
277
context_size == 0x28 ||
278
context_size == 0x60))
279
goto out;
280
} else if (CPU_IS_060) {
281
if (!(fpstate[3] == 0x00 ||
282
fpstate[3] == 0x60 ||
283
fpstate[3] == 0xe0))
284
goto out;
285
} else
286
goto out;
287
if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
288
sizeof(fpregs)))
289
goto out;
290
__asm__ volatile (".chip 68k/68881\n\t"
291
"fmovemx %0,%%fp0-%%fp7\n\t"
292
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
293
".chip 68k"
294
: /* no outputs */
295
: "m" (*fpregs.f_fpregs),
296
"m" (*fpregs.f_fpcntl));
297
}
298
if (context_size &&
299
__copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
300
context_size))
301
goto out;
302
__asm__ volatile (".chip 68k/68881\n\t"
303
"frestore %0\n\t"
304
".chip 68k" : : "m" (*fpstate));
305
err = 0;
306
307
out:
308
return err;
309
}
310
311
static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
312
void __user *fp)
313
{
314
int fsize = frame_extra_sizes[formatvec >> 12];
315
if (fsize < 0) {
316
/*
317
* user process trying to return with weird frame format
318
*/
319
#ifdef DEBUG
320
printk("user process returning with weird frame format\n");
321
#endif
322
return 1;
323
}
324
if (!fsize) {
325
regs->format = formatvec >> 12;
326
regs->vector = formatvec & 0xfff;
327
} else {
328
struct switch_stack *sw = (struct switch_stack *)regs - 1;
329
unsigned long buf[fsize / 2]; /* yes, twice as much */
330
331
/* that'll make sure that expansion won't crap over data */
332
if (copy_from_user(buf + fsize / 4, fp, fsize))
333
return 1;
334
335
/* point of no return */
336
regs->format = formatvec >> 12;
337
regs->vector = formatvec & 0xfff;
338
#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
339
__asm__ __volatile__
340
(" movel %0,%/a0\n\t"
341
" subl %1,%/a0\n\t" /* make room on stack */
342
" movel %/a0,%/sp\n\t" /* set stack pointer */
343
/* move switch_stack and pt_regs */
344
"1: movel %0@+,%/a0@+\n\t"
345
" dbra %2,1b\n\t"
346
" lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
347
" lsrl #2,%1\n\t"
348
" subql #1,%1\n\t"
349
/* copy to the gap we'd made */
350
"2: movel %4@+,%/a0@+\n\t"
351
" dbra %1,2b\n\t"
352
" bral ret_from_signal\n"
353
: /* no outputs, it doesn't ever return */
354
: "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
355
"n" (frame_offset), "a" (buf + fsize/4)
356
: "a0");
357
#undef frame_offset
358
}
359
return 0;
360
}
361
362
static inline int
363
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
364
{
365
int formatvec;
366
struct sigcontext context;
367
int err;
368
369
/* Always make any pending restarted system calls return -EINTR */
370
current_thread_info()->restart_block.fn = do_no_restart_syscall;
371
372
/* get previous context */
373
if (copy_from_user(&context, usc, sizeof(context)))
374
goto badframe;
375
376
/* restore passed registers */
377
regs->d0 = context.sc_d0;
378
regs->d1 = context.sc_d1;
379
regs->a0 = context.sc_a0;
380
regs->a1 = context.sc_a1;
381
regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
382
regs->pc = context.sc_pc;
383
regs->orig_d0 = -1; /* disable syscall checks */
384
wrusp(context.sc_usp);
385
formatvec = context.sc_formatvec;
386
387
err = restore_fpu_state(&context);
388
389
if (err || mangle_kernel_stack(regs, formatvec, fp))
390
goto badframe;
391
392
return 0;
393
394
badframe:
395
return 1;
396
}
397
398
static inline int
399
rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
400
struct ucontext __user *uc)
401
{
402
int temp;
403
greg_t __user *gregs = uc->uc_mcontext.gregs;
404
unsigned long usp;
405
int err;
406
407
/* Always make any pending restarted system calls return -EINTR */
408
current_thread_info()->restart_block.fn = do_no_restart_syscall;
409
410
err = __get_user(temp, &uc->uc_mcontext.version);
411
if (temp != MCONTEXT_VERSION)
412
goto badframe;
413
/* restore passed registers */
414
err |= __get_user(regs->d0, &gregs[0]);
415
err |= __get_user(regs->d1, &gregs[1]);
416
err |= __get_user(regs->d2, &gregs[2]);
417
err |= __get_user(regs->d3, &gregs[3]);
418
err |= __get_user(regs->d4, &gregs[4]);
419
err |= __get_user(regs->d5, &gregs[5]);
420
err |= __get_user(sw->d6, &gregs[6]);
421
err |= __get_user(sw->d7, &gregs[7]);
422
err |= __get_user(regs->a0, &gregs[8]);
423
err |= __get_user(regs->a1, &gregs[9]);
424
err |= __get_user(regs->a2, &gregs[10]);
425
err |= __get_user(sw->a3, &gregs[11]);
426
err |= __get_user(sw->a4, &gregs[12]);
427
err |= __get_user(sw->a5, &gregs[13]);
428
err |= __get_user(sw->a6, &gregs[14]);
429
err |= __get_user(usp, &gregs[15]);
430
wrusp(usp);
431
err |= __get_user(regs->pc, &gregs[16]);
432
err |= __get_user(temp, &gregs[17]);
433
regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
434
regs->orig_d0 = -1; /* disable syscall checks */
435
err |= __get_user(temp, &uc->uc_formatvec);
436
437
err |= rt_restore_fpu_state(uc);
438
439
if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
440
goto badframe;
441
442
if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
443
goto badframe;
444
445
return 0;
446
447
badframe:
448
return 1;
449
}
450
451
asmlinkage int do_sigreturn(unsigned long __unused)
452
{
453
struct switch_stack *sw = (struct switch_stack *) &__unused;
454
struct pt_regs *regs = (struct pt_regs *) (sw + 1);
455
unsigned long usp = rdusp();
456
struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
457
sigset_t set;
458
459
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
460
goto badframe;
461
if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
462
(_NSIG_WORDS > 1 &&
463
__copy_from_user(&set.sig[1], &frame->extramask,
464
sizeof(frame->extramask))))
465
goto badframe;
466
467
sigdelsetmask(&set, ~_BLOCKABLE);
468
current->blocked = set;
469
recalc_sigpending();
470
471
if (restore_sigcontext(regs, &frame->sc, frame + 1))
472
goto badframe;
473
return regs->d0;
474
475
badframe:
476
force_sig(SIGSEGV, current);
477
return 0;
478
}
479
480
asmlinkage int do_rt_sigreturn(unsigned long __unused)
481
{
482
struct switch_stack *sw = (struct switch_stack *) &__unused;
483
struct pt_regs *regs = (struct pt_regs *) (sw + 1);
484
unsigned long usp = rdusp();
485
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
486
sigset_t set;
487
488
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
489
goto badframe;
490
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
491
goto badframe;
492
493
sigdelsetmask(&set, ~_BLOCKABLE);
494
current->blocked = set;
495
recalc_sigpending();
496
497
if (rt_restore_ucontext(regs, sw, &frame->uc))
498
goto badframe;
499
return regs->d0;
500
501
badframe:
502
force_sig(SIGSEGV, current);
503
return 0;
504
}
505
506
/*
507
* Set up a signal frame.
508
*/
509
510
static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
511
{
512
if (FPU_IS_EMU) {
513
/* save registers */
514
memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
515
memcpy(sc->sc_fpregs, current->thread.fp, 24);
516
return;
517
}
518
519
__asm__ volatile (".chip 68k/68881\n\t"
520
"fsave %0\n\t"
521
".chip 68k"
522
: : "m" (*sc->sc_fpstate) : "memory");
523
524
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
525
fpu_version = sc->sc_fpstate[0];
526
if (CPU_IS_020_OR_030 &&
527
regs->vector >= (VEC_FPBRUC * 4) &&
528
regs->vector <= (VEC_FPNAN * 4)) {
529
/* Clear pending exception in 68882 idle frame */
530
if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
531
sc->sc_fpstate[0x38] |= 1 << 3;
532
}
533
__asm__ volatile (".chip 68k/68881\n\t"
534
"fmovemx %%fp0-%%fp1,%0\n\t"
535
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
536
".chip 68k"
537
: "=m" (*sc->sc_fpregs),
538
"=m" (*sc->sc_fpcntl)
539
: /* no inputs */
540
: "memory");
541
}
542
}
543
544
static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
545
{
546
unsigned char fpstate[FPCONTEXT_SIZE];
547
int context_size = CPU_IS_060 ? 8 : 0;
548
int err = 0;
549
550
if (FPU_IS_EMU) {
551
/* save fpu control register */
552
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
553
current->thread.fpcntl, 12);
554
/* save all other fpu register */
555
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
556
current->thread.fp, 96);
557
return err;
558
}
559
560
__asm__ volatile (".chip 68k/68881\n\t"
561
"fsave %0\n\t"
562
".chip 68k"
563
: : "m" (*fpstate) : "memory");
564
565
err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
566
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
567
fpregset_t fpregs;
568
if (!CPU_IS_060)
569
context_size = fpstate[1];
570
fpu_version = fpstate[0];
571
if (CPU_IS_020_OR_030 &&
572
regs->vector >= (VEC_FPBRUC * 4) &&
573
regs->vector <= (VEC_FPNAN * 4)) {
574
/* Clear pending exception in 68882 idle frame */
575
if (*(unsigned short *) fpstate == 0x1f38)
576
fpstate[0x38] |= 1 << 3;
577
}
578
__asm__ volatile (".chip 68k/68881\n\t"
579
"fmovemx %%fp0-%%fp7,%0\n\t"
580
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
581
".chip 68k"
582
: "=m" (*fpregs.f_fpregs),
583
"=m" (*fpregs.f_fpcntl)
584
: /* no inputs */
585
: "memory");
586
err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
587
sizeof(fpregs));
588
}
589
if (context_size)
590
err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
591
context_size);
592
return err;
593
}
594
595
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
596
unsigned long mask)
597
{
598
sc->sc_mask = mask;
599
sc->sc_usp = rdusp();
600
sc->sc_d0 = regs->d0;
601
sc->sc_d1 = regs->d1;
602
sc->sc_a0 = regs->a0;
603
sc->sc_a1 = regs->a1;
604
sc->sc_sr = regs->sr;
605
sc->sc_pc = regs->pc;
606
sc->sc_formatvec = regs->format << 12 | regs->vector;
607
save_fpu_state(sc, regs);
608
}
609
610
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
611
{
612
struct switch_stack *sw = (struct switch_stack *)regs - 1;
613
greg_t __user *gregs = uc->uc_mcontext.gregs;
614
int err = 0;
615
616
err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
617
err |= __put_user(regs->d0, &gregs[0]);
618
err |= __put_user(regs->d1, &gregs[1]);
619
err |= __put_user(regs->d2, &gregs[2]);
620
err |= __put_user(regs->d3, &gregs[3]);
621
err |= __put_user(regs->d4, &gregs[4]);
622
err |= __put_user(regs->d5, &gregs[5]);
623
err |= __put_user(sw->d6, &gregs[6]);
624
err |= __put_user(sw->d7, &gregs[7]);
625
err |= __put_user(regs->a0, &gregs[8]);
626
err |= __put_user(regs->a1, &gregs[9]);
627
err |= __put_user(regs->a2, &gregs[10]);
628
err |= __put_user(sw->a3, &gregs[11]);
629
err |= __put_user(sw->a4, &gregs[12]);
630
err |= __put_user(sw->a5, &gregs[13]);
631
err |= __put_user(sw->a6, &gregs[14]);
632
err |= __put_user(rdusp(), &gregs[15]);
633
err |= __put_user(regs->pc, &gregs[16]);
634
err |= __put_user(regs->sr, &gregs[17]);
635
err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
636
err |= rt_save_fpu_state(uc, regs);
637
return err;
638
}
639
640
static inline void push_cache (unsigned long vaddr)
641
{
642
/*
643
* Using the old cache_push_v() was really a big waste.
644
*
645
* What we are trying to do is to flush 8 bytes to ram.
646
* Flushing 2 cache lines of 16 bytes is much cheaper than
647
* flushing 1 or 2 pages, as previously done in
648
* cache_push_v().
649
* Jes
650
*/
651
if (CPU_IS_040) {
652
unsigned long temp;
653
654
__asm__ __volatile__ (".chip 68040\n\t"
655
"nop\n\t"
656
"ptestr (%1)\n\t"
657
"movec %%mmusr,%0\n\t"
658
".chip 68k"
659
: "=r" (temp)
660
: "a" (vaddr));
661
662
temp &= PAGE_MASK;
663
temp |= vaddr & ~PAGE_MASK;
664
665
__asm__ __volatile__ (".chip 68040\n\t"
666
"nop\n\t"
667
"cpushl %%bc,(%0)\n\t"
668
".chip 68k"
669
: : "a" (temp));
670
}
671
else if (CPU_IS_060) {
672
unsigned long temp;
673
__asm__ __volatile__ (".chip 68060\n\t"
674
"plpar (%0)\n\t"
675
".chip 68k"
676
: "=a" (temp)
677
: "0" (vaddr));
678
__asm__ __volatile__ (".chip 68060\n\t"
679
"cpushl %%bc,(%0)\n\t"
680
".chip 68k"
681
: : "a" (temp));
682
}
683
else {
684
/*
685
* 68030/68020 have no writeback cache;
686
* still need to clear icache.
687
* Note that vaddr is guaranteed to be long word aligned.
688
*/
689
unsigned long temp;
690
asm volatile ("movec %%cacr,%0" : "=r" (temp));
691
temp += 4;
692
asm volatile ("movec %0,%%caar\n\t"
693
"movec %1,%%cacr"
694
: : "r" (vaddr), "r" (temp));
695
asm volatile ("movec %0,%%caar\n\t"
696
"movec %1,%%cacr"
697
: : "r" (vaddr + 4), "r" (temp));
698
}
699
}
700
701
static inline void __user *
702
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
703
{
704
unsigned long usp;
705
706
/* Default to using normal stack. */
707
usp = rdusp();
708
709
/* This is the X/Open sanctioned signal stack switching. */
710
if (ka->sa.sa_flags & SA_ONSTACK) {
711
if (!sas_ss_flags(usp))
712
usp = current->sas_ss_sp + current->sas_ss_size;
713
}
714
return (void __user *)((usp - frame_size) & -8UL);
715
}
716
717
static int setup_frame (int sig, struct k_sigaction *ka,
718
sigset_t *set, struct pt_regs *regs)
719
{
720
struct sigframe __user *frame;
721
int fsize = frame_extra_sizes[regs->format];
722
struct sigcontext context;
723
int err = 0;
724
725
if (fsize < 0) {
726
#ifdef DEBUG
727
printk ("setup_frame: Unknown frame format %#x\n",
728
regs->format);
729
#endif
730
goto give_sigsegv;
731
}
732
733
frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
734
735
if (fsize)
736
err |= copy_to_user (frame + 1, regs + 1, fsize);
737
738
err |= __put_user((current_thread_info()->exec_domain
739
&& current_thread_info()->exec_domain->signal_invmap
740
&& sig < 32
741
? current_thread_info()->exec_domain->signal_invmap[sig]
742
: sig),
743
&frame->sig);
744
745
err |= __put_user(regs->vector, &frame->code);
746
err |= __put_user(&frame->sc, &frame->psc);
747
748
if (_NSIG_WORDS > 1)
749
err |= copy_to_user(frame->extramask, &set->sig[1],
750
sizeof(frame->extramask));
751
752
setup_sigcontext(&context, regs, set->sig[0]);
753
err |= copy_to_user (&frame->sc, &context, sizeof(context));
754
755
/* Set up to return from userspace. */
756
err |= __put_user(frame->retcode, &frame->pretcode);
757
/* moveq #,d0; trap #0 */
758
err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
759
(long __user *)(frame->retcode));
760
761
if (err)
762
goto give_sigsegv;
763
764
push_cache ((unsigned long) &frame->retcode);
765
766
/*
767
* Set up registers for signal handler. All the state we are about
768
* to destroy is successfully copied to sigframe.
769
*/
770
wrusp ((unsigned long) frame);
771
regs->pc = (unsigned long) ka->sa.sa_handler;
772
773
/*
774
* This is subtle; if we build more than one sigframe, all but the
775
* first one will see frame format 0 and have fsize == 0, so we won't
776
* screw stkadj.
777
*/
778
if (fsize)
779
regs->stkadj = fsize;
780
781
/* Prepare to skip over the extra stuff in the exception frame. */
782
if (regs->stkadj) {
783
struct pt_regs *tregs =
784
(struct pt_regs *)((ulong)regs + regs->stkadj);
785
#ifdef DEBUG
786
printk("Performing stackadjust=%04x\n", regs->stkadj);
787
#endif
788
/* This must be copied with decreasing addresses to
789
handle overlaps. */
790
tregs->vector = 0;
791
tregs->format = 0;
792
tregs->pc = regs->pc;
793
tregs->sr = regs->sr;
794
}
795
return 0;
796
797
give_sigsegv:
798
force_sigsegv(sig, current);
799
return err;
800
}
801
802
static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
803
sigset_t *set, struct pt_regs *regs)
804
{
805
struct rt_sigframe __user *frame;
806
int fsize = frame_extra_sizes[regs->format];
807
int err = 0;
808
809
if (fsize < 0) {
810
#ifdef DEBUG
811
printk ("setup_frame: Unknown frame format %#x\n",
812
regs->format);
813
#endif
814
goto give_sigsegv;
815
}
816
817
frame = get_sigframe(ka, regs, sizeof(*frame));
818
819
if (fsize)
820
err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
821
822
err |= __put_user((current_thread_info()->exec_domain
823
&& current_thread_info()->exec_domain->signal_invmap
824
&& sig < 32
825
? current_thread_info()->exec_domain->signal_invmap[sig]
826
: sig),
827
&frame->sig);
828
err |= __put_user(&frame->info, &frame->pinfo);
829
err |= __put_user(&frame->uc, &frame->puc);
830
err |= copy_siginfo_to_user(&frame->info, info);
831
832
/* Create the ucontext. */
833
err |= __put_user(0, &frame->uc.uc_flags);
834
err |= __put_user(NULL, &frame->uc.uc_link);
835
err |= __put_user((void __user *)current->sas_ss_sp,
836
&frame->uc.uc_stack.ss_sp);
837
err |= __put_user(sas_ss_flags(rdusp()),
838
&frame->uc.uc_stack.ss_flags);
839
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
840
err |= rt_setup_ucontext(&frame->uc, regs);
841
err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
842
843
/* Set up to return from userspace. */
844
err |= __put_user(frame->retcode, &frame->pretcode);
845
#ifdef __mcoldfire__
846
/* movel #__NR_rt_sigreturn,d0; trap #0 */
847
err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
848
err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
849
(long __user *)(frame->retcode + 4));
850
#else
851
/* moveq #,d0; notb d0; trap #0 */
852
err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
853
(long __user *)(frame->retcode + 0));
854
err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
855
#endif
856
857
if (err)
858
goto give_sigsegv;
859
860
push_cache ((unsigned long) &frame->retcode);
861
862
/*
863
* Set up registers for signal handler. All the state we are about
864
* to destroy is successfully copied to sigframe.
865
*/
866
wrusp ((unsigned long) frame);
867
regs->pc = (unsigned long) ka->sa.sa_handler;
868
869
/*
870
* This is subtle; if we build more than one sigframe, all but the
871
* first one will see frame format 0 and have fsize == 0, so we won't
872
* screw stkadj.
873
*/
874
if (fsize)
875
regs->stkadj = fsize;
876
877
/* Prepare to skip over the extra stuff in the exception frame. */
878
if (regs->stkadj) {
879
struct pt_regs *tregs =
880
(struct pt_regs *)((ulong)regs + regs->stkadj);
881
#ifdef DEBUG
882
printk("Performing stackadjust=%04x\n", regs->stkadj);
883
#endif
884
/* This must be copied with decreasing addresses to
885
handle overlaps. */
886
tregs->vector = 0;
887
tregs->format = 0;
888
tregs->pc = regs->pc;
889
tregs->sr = regs->sr;
890
}
891
return 0;
892
893
give_sigsegv:
894
force_sigsegv(sig, current);
895
return err;
896
}
897
898
static inline void
899
handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
900
{
901
switch (regs->d0) {
902
case -ERESTARTNOHAND:
903
if (!has_handler)
904
goto do_restart;
905
regs->d0 = -EINTR;
906
break;
907
908
case -ERESTART_RESTARTBLOCK:
909
if (!has_handler) {
910
regs->d0 = __NR_restart_syscall;
911
regs->pc -= 2;
912
break;
913
}
914
regs->d0 = -EINTR;
915
break;
916
917
case -ERESTARTSYS:
918
if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
919
regs->d0 = -EINTR;
920
break;
921
}
922
/* fallthrough */
923
case -ERESTARTNOINTR:
924
do_restart:
925
regs->d0 = regs->orig_d0;
926
regs->pc -= 2;
927
break;
928
}
929
}
930
931
void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
932
{
933
if (regs->orig_d0 < 0)
934
return;
935
switch (regs->d0) {
936
case -ERESTARTNOHAND:
937
case -ERESTARTSYS:
938
case -ERESTARTNOINTR:
939
regs->d0 = regs->orig_d0;
940
regs->orig_d0 = -1;
941
regs->pc -= 2;
942
break;
943
}
944
}
945
946
/*
947
* OK, we're invoking a handler
948
*/
949
static void
950
handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
951
sigset_t *oldset, struct pt_regs *regs)
952
{
953
int err;
954
/* are we from a system call? */
955
if (regs->orig_d0 >= 0)
956
/* If so, check system call restarting.. */
957
handle_restart(regs, ka, 1);
958
959
/* set up the stack frame */
960
if (ka->sa.sa_flags & SA_SIGINFO)
961
err = setup_rt_frame(sig, ka, info, oldset, regs);
962
else
963
err = setup_frame(sig, ka, oldset, regs);
964
965
if (err)
966
return;
967
968
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
969
if (!(ka->sa.sa_flags & SA_NODEFER))
970
sigaddset(&current->blocked,sig);
971
recalc_sigpending();
972
973
if (test_thread_flag(TIF_DELAYED_TRACE)) {
974
regs->sr &= ~0x8000;
975
send_sig(SIGTRAP, current, 1);
976
}
977
978
clear_thread_flag(TIF_RESTORE_SIGMASK);
979
}
980
981
/*
982
* Note that 'init' is a special process: it doesn't get signals it doesn't
983
* want to handle. Thus you cannot kill init even with a SIGKILL even by
984
* mistake.
985
*/
986
asmlinkage void do_signal(struct pt_regs *regs)
987
{
988
siginfo_t info;
989
struct k_sigaction ka;
990
int signr;
991
sigset_t *oldset;
992
993
current->thread.esp0 = (unsigned long) regs;
994
995
if (test_thread_flag(TIF_RESTORE_SIGMASK))
996
oldset = &current->saved_sigmask;
997
else
998
oldset = &current->blocked;
999
1000
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
1001
if (signr > 0) {
1002
/* Whee! Actually deliver the signal. */
1003
handle_signal(signr, &ka, &info, oldset, regs);
1004
return;
1005
}
1006
1007
/* Did we come from a system call? */
1008
if (regs->orig_d0 >= 0)
1009
/* Restart the system call - no handlers present */
1010
handle_restart(regs, NULL, 0);
1011
1012
/* If there's no signal to deliver, we just restore the saved mask. */
1013
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
1014
clear_thread_flag(TIF_RESTORE_SIGMASK);
1015
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
1016
}
1017
}
1018
1019