Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/arm64/fp/fp-ptrace.c
26295 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2023 ARM Limited.
4
* Original author: Mark Brown <[email protected]>
5
*/
6
7
#define _GNU_SOURCE
8
9
#include <errno.h>
10
#include <stdbool.h>
11
#include <stddef.h>
12
#include <stdio.h>
13
#include <stdlib.h>
14
#include <string.h>
15
#include <unistd.h>
16
17
#include <sys/auxv.h>
18
#include <sys/prctl.h>
19
#include <sys/ptrace.h>
20
#include <sys/types.h>
21
#include <sys/uio.h>
22
#include <sys/wait.h>
23
24
#include <linux/kernel.h>
25
26
#include <asm/sigcontext.h>
27
#include <asm/sve_context.h>
28
#include <asm/ptrace.h>
29
30
#include "../../kselftest.h"
31
32
#include "fp-ptrace.h"
33
34
#include <linux/bits.h>
35
36
#define FPMR_LSCALE2_MASK GENMASK(37, 32)
37
#define FPMR_NSCALE_MASK GENMASK(31, 24)
38
#define FPMR_LSCALE_MASK GENMASK(22, 16)
39
#define FPMR_OSC_MASK GENMASK(15, 15)
40
#define FPMR_OSM_MASK GENMASK(14, 14)
41
42
/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
43
#ifndef NT_ARM_SVE
44
#define NT_ARM_SVE 0x405
45
#endif
46
47
#ifndef NT_ARM_SSVE
48
#define NT_ARM_SSVE 0x40b
49
#endif
50
51
#ifndef NT_ARM_ZA
52
#define NT_ARM_ZA 0x40c
53
#endif
54
55
#ifndef NT_ARM_ZT
56
#define NT_ARM_ZT 0x40d
57
#endif
58
59
#ifndef NT_ARM_FPMR
60
#define NT_ARM_FPMR 0x40e
61
#endif
62
63
#define ARCH_VQ_MAX 256
64
65
/* VL 128..2048 in powers of 2 */
66
#define MAX_NUM_VLS 5
67
68
/*
69
* FPMR bits we can set without doing feature checks to see if values
70
* are valid.
71
*/
72
#define FPMR_SAFE_BITS (FPMR_LSCALE2_MASK | FPMR_NSCALE_MASK | \
73
FPMR_LSCALE_MASK | FPMR_OSC_MASK | FPMR_OSM_MASK)
74
75
#define NUM_FPR 32
76
__uint128_t v_in[NUM_FPR];
77
__uint128_t v_expected[NUM_FPR];
78
__uint128_t v_out[NUM_FPR];
79
80
char z_in[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)];
81
char z_expected[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)];
82
char z_out[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)];
83
84
char p_in[__SVE_PREGS_SIZE(ARCH_VQ_MAX)];
85
char p_expected[__SVE_PREGS_SIZE(ARCH_VQ_MAX)];
86
char p_out[__SVE_PREGS_SIZE(ARCH_VQ_MAX)];
87
88
char ffr_in[__SVE_PREG_SIZE(ARCH_VQ_MAX)];
89
char ffr_expected[__SVE_PREG_SIZE(ARCH_VQ_MAX)];
90
char ffr_out[__SVE_PREG_SIZE(ARCH_VQ_MAX)];
91
92
char za_in[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)];
93
char za_expected[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)];
94
char za_out[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)];
95
96
char zt_in[ZT_SIG_REG_BYTES];
97
char zt_expected[ZT_SIG_REG_BYTES];
98
char zt_out[ZT_SIG_REG_BYTES];
99
100
uint64_t fpmr_in, fpmr_expected, fpmr_out;
101
102
uint64_t sve_vl_out;
103
uint64_t sme_vl_out;
104
uint64_t svcr_in, svcr_expected, svcr_out;
105
106
void load_and_save(int flags);
107
108
static bool got_alarm;
109
110
static void handle_alarm(int sig, siginfo_t *info, void *context)
111
{
112
got_alarm = true;
113
}
114
115
#ifdef CONFIG_CPU_BIG_ENDIAN
116
static __uint128_t arm64_cpu_to_le128(__uint128_t x)
117
{
118
u64 a = swab64(x);
119
u64 b = swab64(x >> 64);
120
121
return ((__uint128_t)a << 64) | b;
122
}
123
#else
124
static __uint128_t arm64_cpu_to_le128(__uint128_t x)
125
{
126
return x;
127
}
128
#endif
129
130
#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
131
132
static bool sve_supported(void)
133
{
134
return getauxval(AT_HWCAP) & HWCAP_SVE;
135
}
136
137
static bool sme_supported(void)
138
{
139
return getauxval(AT_HWCAP2) & HWCAP2_SME;
140
}
141
142
static bool sme2_supported(void)
143
{
144
return getauxval(AT_HWCAP2) & HWCAP2_SME2;
145
}
146
147
static bool fa64_supported(void)
148
{
149
return getauxval(AT_HWCAP2) & HWCAP2_SME_FA64;
150
}
151
152
static bool fpmr_supported(void)
153
{
154
return getauxval(AT_HWCAP2) & HWCAP2_FPMR;
155
}
156
157
static bool compare_buffer(const char *name, void *out,
158
void *expected, size_t size)
159
{
160
void *tmp;
161
162
if (memcmp(out, expected, size) == 0)
163
return true;
164
165
ksft_print_msg("Mismatch in %s\n", name);
166
167
/* Did we just get zeros back? */
168
tmp = malloc(size);
169
if (!tmp) {
170
ksft_print_msg("OOM allocating %lu bytes for %s\n",
171
size, name);
172
ksft_exit_fail();
173
}
174
memset(tmp, 0, size);
175
176
if (memcmp(out, tmp, size) == 0)
177
ksft_print_msg("%s is zero\n", name);
178
179
free(tmp);
180
181
return false;
182
}
183
184
struct test_config {
185
int sve_vl_in;
186
int sve_vl_expected;
187
int sme_vl_in;
188
int sme_vl_expected;
189
int svcr_in;
190
int svcr_expected;
191
};
192
193
struct test_definition {
194
const char *name;
195
bool sve_vl_change;
196
bool (*supported)(struct test_config *config);
197
void (*set_expected_values)(struct test_config *config);
198
void (*modify_values)(pid_t child, struct test_config *test_config);
199
};
200
201
static int vl_in(struct test_config *config)
202
{
203
int vl;
204
205
if (config->svcr_in & SVCR_SM)
206
vl = config->sme_vl_in;
207
else
208
vl = config->sve_vl_in;
209
210
return vl;
211
}
212
213
static int vl_expected(struct test_config *config)
214
{
215
int vl;
216
217
if (config->svcr_expected & SVCR_SM)
218
vl = config->sme_vl_expected;
219
else
220
vl = config->sve_vl_expected;
221
222
return vl;
223
}
224
225
static void run_child(struct test_config *config)
226
{
227
int ret, flags;
228
229
/* Let the parent attach to us */
230
ret = ptrace(PTRACE_TRACEME, 0, 0, 0);
231
if (ret < 0)
232
ksft_exit_fail_msg("PTRACE_TRACEME failed: %s (%d)\n",
233
strerror(errno), errno);
234
235
/* VL setup */
236
if (sve_supported()) {
237
ret = prctl(PR_SVE_SET_VL, config->sve_vl_in);
238
if (ret != config->sve_vl_in) {
239
ksft_print_msg("Failed to set SVE VL %d: %d\n",
240
config->sve_vl_in, ret);
241
}
242
}
243
244
if (sme_supported()) {
245
ret = prctl(PR_SME_SET_VL, config->sme_vl_in);
246
if (ret != config->sme_vl_in) {
247
ksft_print_msg("Failed to set SME VL %d: %d\n",
248
config->sme_vl_in, ret);
249
}
250
}
251
252
/* Load values and wait for the parent */
253
flags = 0;
254
if (sve_supported())
255
flags |= HAVE_SVE;
256
if (sme_supported())
257
flags |= HAVE_SME;
258
if (sme2_supported())
259
flags |= HAVE_SME2;
260
if (fa64_supported())
261
flags |= HAVE_FA64;
262
if (fpmr_supported())
263
flags |= HAVE_FPMR;
264
265
load_and_save(flags);
266
267
exit(0);
268
}
269
270
static void read_one_child_regs(pid_t child, char *name,
271
struct iovec *iov_parent,
272
struct iovec *iov_child)
273
{
274
int len = iov_parent->iov_len;
275
int ret;
276
277
ret = process_vm_readv(child, iov_parent, 1, iov_child, 1, 0);
278
if (ret == -1)
279
ksft_print_msg("%s read failed: %s (%d)\n",
280
name, strerror(errno), errno);
281
else if (ret != len)
282
ksft_print_msg("Short read of %s: %d\n", name, ret);
283
}
284
285
static void read_child_regs(pid_t child)
286
{
287
struct iovec iov_parent, iov_child;
288
289
/*
290
* Since the child fork()ed from us the buffer addresses are
291
* the same in parent and child.
292
*/
293
iov_parent.iov_base = &v_out;
294
iov_parent.iov_len = sizeof(v_out);
295
iov_child.iov_base = &v_out;
296
iov_child.iov_len = sizeof(v_out);
297
read_one_child_regs(child, "FPSIMD", &iov_parent, &iov_child);
298
299
if (sve_supported() || sme_supported()) {
300
iov_parent.iov_base = &sve_vl_out;
301
iov_parent.iov_len = sizeof(sve_vl_out);
302
iov_child.iov_base = &sve_vl_out;
303
iov_child.iov_len = sizeof(sve_vl_out);
304
read_one_child_regs(child, "SVE VL", &iov_parent, &iov_child);
305
306
iov_parent.iov_base = &z_out;
307
iov_parent.iov_len = sizeof(z_out);
308
iov_child.iov_base = &z_out;
309
iov_child.iov_len = sizeof(z_out);
310
read_one_child_regs(child, "Z", &iov_parent, &iov_child);
311
312
iov_parent.iov_base = &p_out;
313
iov_parent.iov_len = sizeof(p_out);
314
iov_child.iov_base = &p_out;
315
iov_child.iov_len = sizeof(p_out);
316
read_one_child_regs(child, "P", &iov_parent, &iov_child);
317
318
iov_parent.iov_base = &ffr_out;
319
iov_parent.iov_len = sizeof(ffr_out);
320
iov_child.iov_base = &ffr_out;
321
iov_child.iov_len = sizeof(ffr_out);
322
read_one_child_regs(child, "FFR", &iov_parent, &iov_child);
323
}
324
325
if (sme_supported()) {
326
iov_parent.iov_base = &sme_vl_out;
327
iov_parent.iov_len = sizeof(sme_vl_out);
328
iov_child.iov_base = &sme_vl_out;
329
iov_child.iov_len = sizeof(sme_vl_out);
330
read_one_child_regs(child, "SME VL", &iov_parent, &iov_child);
331
332
iov_parent.iov_base = &svcr_out;
333
iov_parent.iov_len = sizeof(svcr_out);
334
iov_child.iov_base = &svcr_out;
335
iov_child.iov_len = sizeof(svcr_out);
336
read_one_child_regs(child, "SVCR", &iov_parent, &iov_child);
337
338
iov_parent.iov_base = &za_out;
339
iov_parent.iov_len = sizeof(za_out);
340
iov_child.iov_base = &za_out;
341
iov_child.iov_len = sizeof(za_out);
342
read_one_child_regs(child, "ZA", &iov_parent, &iov_child);
343
}
344
345
if (sme2_supported()) {
346
iov_parent.iov_base = &zt_out;
347
iov_parent.iov_len = sizeof(zt_out);
348
iov_child.iov_base = &zt_out;
349
iov_child.iov_len = sizeof(zt_out);
350
read_one_child_regs(child, "ZT", &iov_parent, &iov_child);
351
}
352
353
if (fpmr_supported()) {
354
iov_parent.iov_base = &fpmr_out;
355
iov_parent.iov_len = sizeof(fpmr_out);
356
iov_child.iov_base = &fpmr_out;
357
iov_child.iov_len = sizeof(fpmr_out);
358
read_one_child_regs(child, "FPMR", &iov_parent, &iov_child);
359
}
360
}
361
362
static bool continue_breakpoint(pid_t child,
363
enum __ptrace_request restart_type)
364
{
365
struct user_pt_regs pt_regs;
366
struct iovec iov;
367
int ret;
368
369
/* Get PC */
370
iov.iov_base = &pt_regs;
371
iov.iov_len = sizeof(pt_regs);
372
ret = ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov);
373
if (ret < 0) {
374
ksft_print_msg("Failed to get PC: %s (%d)\n",
375
strerror(errno), errno);
376
return false;
377
}
378
379
/* Skip over the BRK */
380
pt_regs.pc += 4;
381
ret = ptrace(PTRACE_SETREGSET, child, NT_PRSTATUS, &iov);
382
if (ret < 0) {
383
ksft_print_msg("Failed to skip BRK: %s (%d)\n",
384
strerror(errno), errno);
385
return false;
386
}
387
388
/* Restart */
389
ret = ptrace(restart_type, child, 0, 0);
390
if (ret < 0) {
391
ksft_print_msg("Failed to restart child: %s (%d)\n",
392
strerror(errno), errno);
393
return false;
394
}
395
396
return true;
397
}
398
399
static bool check_ptrace_values_sve(pid_t child, struct test_config *config)
400
{
401
struct user_sve_header *sve;
402
struct user_fpsimd_state *fpsimd;
403
struct iovec iov;
404
int ret, vq;
405
bool pass = true;
406
407
if (!sve_supported())
408
return true;
409
410
vq = __sve_vq_from_vl(config->sve_vl_in);
411
412
iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
413
iov.iov_base = malloc(iov.iov_len);
414
if (!iov.iov_base) {
415
ksft_print_msg("OOM allocating %lu byte SVE buffer\n",
416
iov.iov_len);
417
return false;
418
}
419
420
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_SVE, &iov);
421
if (ret != 0) {
422
ksft_print_msg("Failed to read initial SVE: %s (%d)\n",
423
strerror(errno), errno);
424
pass = false;
425
goto out;
426
}
427
428
sve = iov.iov_base;
429
430
if (sve->vl != config->sve_vl_in) {
431
ksft_print_msg("Mismatch in initial SVE VL: %d != %d\n",
432
sve->vl, config->sve_vl_in);
433
pass = false;
434
}
435
436
/* If we are in streaming mode we should just read FPSIMD */
437
if ((config->svcr_in & SVCR_SM) && (sve->flags & SVE_PT_REGS_SVE)) {
438
ksft_print_msg("NT_ARM_SVE reports SVE with PSTATE.SM\n");
439
pass = false;
440
}
441
442
if (svcr_in & SVCR_SM) {
443
if (sve->size != sizeof(sve)) {
444
ksft_print_msg("NT_ARM_SVE reports data with PSTATE.SM\n");
445
pass = false;
446
}
447
} else {
448
if (sve->size != SVE_PT_SIZE(vq, sve->flags)) {
449
ksft_print_msg("Mismatch in SVE header size: %d != %lu\n",
450
sve->size, SVE_PT_SIZE(vq, sve->flags));
451
pass = false;
452
}
453
}
454
455
/* The registers might be in completely different formats! */
456
if (sve->flags & SVE_PT_REGS_SVE) {
457
if (!compare_buffer("initial SVE Z",
458
iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
459
z_in, SVE_PT_SVE_ZREGS_SIZE(vq)))
460
pass = false;
461
462
if (!compare_buffer("initial SVE P",
463
iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0),
464
p_in, SVE_PT_SVE_PREGS_SIZE(vq)))
465
pass = false;
466
467
if (!compare_buffer("initial SVE FFR",
468
iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq),
469
ffr_in, SVE_PT_SVE_PREG_SIZE(vq)))
470
pass = false;
471
} else {
472
fpsimd = iov.iov_base + SVE_PT_FPSIMD_OFFSET;
473
if (!compare_buffer("initial V via SVE", &fpsimd->vregs[0],
474
v_in, sizeof(v_in)))
475
pass = false;
476
}
477
478
out:
479
free(iov.iov_base);
480
return pass;
481
}
482
483
static bool check_ptrace_values_ssve(pid_t child, struct test_config *config)
484
{
485
struct user_sve_header *sve;
486
struct user_fpsimd_state *fpsimd;
487
struct iovec iov;
488
int ret, vq;
489
bool pass = true;
490
491
if (!sme_supported())
492
return true;
493
494
vq = __sve_vq_from_vl(config->sme_vl_in);
495
496
iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
497
iov.iov_base = malloc(iov.iov_len);
498
if (!iov.iov_base) {
499
ksft_print_msg("OOM allocating %lu byte SSVE buffer\n",
500
iov.iov_len);
501
return false;
502
}
503
504
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_SSVE, &iov);
505
if (ret != 0) {
506
ksft_print_msg("Failed to read initial SSVE: %s (%d)\n",
507
strerror(errno), errno);
508
pass = false;
509
goto out;
510
}
511
512
sve = iov.iov_base;
513
514
if (sve->vl != config->sme_vl_in) {
515
ksft_print_msg("Mismatch in initial SSVE VL: %d != %d\n",
516
sve->vl, config->sme_vl_in);
517
pass = false;
518
}
519
520
if ((config->svcr_in & SVCR_SM) && !(sve->flags & SVE_PT_REGS_SVE)) {
521
ksft_print_msg("NT_ARM_SSVE reports FPSIMD with PSTATE.SM\n");
522
pass = false;
523
}
524
525
if (!(svcr_in & SVCR_SM)) {
526
if (sve->size != sizeof(sve)) {
527
ksft_print_msg("NT_ARM_SSVE reports data without PSTATE.SM\n");
528
pass = false;
529
}
530
} else {
531
if (sve->size != SVE_PT_SIZE(vq, sve->flags)) {
532
ksft_print_msg("Mismatch in SSVE header size: %d != %lu\n",
533
sve->size, SVE_PT_SIZE(vq, sve->flags));
534
pass = false;
535
}
536
}
537
538
/* The registers might be in completely different formats! */
539
if (sve->flags & SVE_PT_REGS_SVE) {
540
if (!compare_buffer("initial SSVE Z",
541
iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
542
z_in, SVE_PT_SVE_ZREGS_SIZE(vq)))
543
pass = false;
544
545
if (!compare_buffer("initial SSVE P",
546
iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0),
547
p_in, SVE_PT_SVE_PREGS_SIZE(vq)))
548
pass = false;
549
550
if (!compare_buffer("initial SSVE FFR",
551
iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq),
552
ffr_in, SVE_PT_SVE_PREG_SIZE(vq)))
553
pass = false;
554
} else {
555
fpsimd = iov.iov_base + SVE_PT_FPSIMD_OFFSET;
556
if (!compare_buffer("initial V via SSVE",
557
&fpsimd->vregs[0], v_in, sizeof(v_in)))
558
pass = false;
559
}
560
561
out:
562
free(iov.iov_base);
563
return pass;
564
}
565
566
static bool check_ptrace_values_za(pid_t child, struct test_config *config)
567
{
568
struct user_za_header *za;
569
struct iovec iov;
570
int ret, vq;
571
bool pass = true;
572
573
if (!sme_supported())
574
return true;
575
576
vq = __sve_vq_from_vl(config->sme_vl_in);
577
578
iov.iov_len = ZA_SIG_CONTEXT_SIZE(vq);
579
iov.iov_base = malloc(iov.iov_len);
580
if (!iov.iov_base) {
581
ksft_print_msg("OOM allocating %lu byte ZA buffer\n",
582
iov.iov_len);
583
return false;
584
}
585
586
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_ZA, &iov);
587
if (ret != 0) {
588
ksft_print_msg("Failed to read initial ZA: %s (%d)\n",
589
strerror(errno), errno);
590
pass = false;
591
goto out;
592
}
593
594
za = iov.iov_base;
595
596
if (za->vl != config->sme_vl_in) {
597
ksft_print_msg("Mismatch in initial SME VL: %d != %d\n",
598
za->vl, config->sme_vl_in);
599
pass = false;
600
}
601
602
/* If PSTATE.ZA is not set we should just read the header */
603
if (config->svcr_in & SVCR_ZA) {
604
if (za->size != ZA_PT_SIZE(vq)) {
605
ksft_print_msg("Unexpected ZA ptrace read size: %d != %lu\n",
606
za->size, ZA_PT_SIZE(vq));
607
pass = false;
608
}
609
610
if (!compare_buffer("initial ZA",
611
iov.iov_base + ZA_PT_ZA_OFFSET,
612
za_in, ZA_PT_ZA_SIZE(vq)))
613
pass = false;
614
} else {
615
if (za->size != sizeof(*za)) {
616
ksft_print_msg("Unexpected ZA ptrace read size: %d != %lu\n",
617
za->size, sizeof(*za));
618
pass = false;
619
}
620
}
621
622
out:
623
free(iov.iov_base);
624
return pass;
625
}
626
627
static bool check_ptrace_values_zt(pid_t child, struct test_config *config)
628
{
629
uint8_t buf[512];
630
struct iovec iov;
631
int ret;
632
633
if (!sme2_supported())
634
return true;
635
636
iov.iov_base = &buf;
637
iov.iov_len = ZT_SIG_REG_BYTES;
638
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_ZT, &iov);
639
if (ret != 0) {
640
ksft_print_msg("Failed to read initial ZT: %s (%d)\n",
641
strerror(errno), errno);
642
return false;
643
}
644
645
return compare_buffer("initial ZT", buf, zt_in, ZT_SIG_REG_BYTES);
646
}
647
648
static bool check_ptrace_values_fpmr(pid_t child, struct test_config *config)
649
{
650
uint64_t val;
651
struct iovec iov;
652
int ret;
653
654
if (!fpmr_supported())
655
return true;
656
657
iov.iov_base = &val;
658
iov.iov_len = sizeof(val);
659
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_FPMR, &iov);
660
if (ret != 0) {
661
ksft_print_msg("Failed to read initial FPMR: %s (%d)\n",
662
strerror(errno), errno);
663
return false;
664
}
665
666
return compare_buffer("initial FPMR", &val, &fpmr_in, sizeof(val));
667
}
668
669
static bool check_ptrace_values(pid_t child, struct test_config *config)
670
{
671
bool pass = true;
672
struct user_fpsimd_state fpsimd;
673
struct iovec iov;
674
int ret;
675
676
iov.iov_base = &fpsimd;
677
iov.iov_len = sizeof(fpsimd);
678
ret = ptrace(PTRACE_GETREGSET, child, NT_PRFPREG, &iov);
679
if (ret == 0) {
680
if (!compare_buffer("initial V", &fpsimd.vregs, v_in,
681
sizeof(v_in))) {
682
pass = false;
683
}
684
} else {
685
ksft_print_msg("Failed to read initial V: %s (%d)\n",
686
strerror(errno), errno);
687
pass = false;
688
}
689
690
if (!check_ptrace_values_sve(child, config))
691
pass = false;
692
693
if (!check_ptrace_values_ssve(child, config))
694
pass = false;
695
696
if (!check_ptrace_values_za(child, config))
697
pass = false;
698
699
if (!check_ptrace_values_zt(child, config))
700
pass = false;
701
702
if (!check_ptrace_values_fpmr(child, config))
703
pass = false;
704
705
return pass;
706
}
707
708
static bool run_parent(pid_t child, struct test_definition *test,
709
struct test_config *config)
710
{
711
int wait_status, ret;
712
pid_t pid;
713
bool pass;
714
715
/* Initial attach */
716
while (1) {
717
pid = waitpid(child, &wait_status, 0);
718
if (pid < 0) {
719
if (errno == EINTR)
720
continue;
721
ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
722
strerror(errno), errno);
723
}
724
725
if (pid == child)
726
break;
727
}
728
729
if (WIFEXITED(wait_status)) {
730
ksft_print_msg("Child exited loading values with status %d\n",
731
WEXITSTATUS(wait_status));
732
pass = false;
733
goto out;
734
}
735
736
if (WIFSIGNALED(wait_status)) {
737
ksft_print_msg("Child died from signal %d loading values\n",
738
WTERMSIG(wait_status));
739
pass = false;
740
goto out;
741
}
742
743
/* Read initial values via ptrace */
744
pass = check_ptrace_values(child, config);
745
746
/* Do whatever writes we want to do */
747
if (test->modify_values)
748
test->modify_values(child, config);
749
750
if (!continue_breakpoint(child, PTRACE_CONT))
751
goto cleanup;
752
753
while (1) {
754
pid = waitpid(child, &wait_status, 0);
755
if (pid < 0) {
756
if (errno == EINTR)
757
continue;
758
ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
759
strerror(errno), errno);
760
}
761
762
if (pid == child)
763
break;
764
}
765
766
if (WIFEXITED(wait_status)) {
767
ksft_print_msg("Child exited saving values with status %d\n",
768
WEXITSTATUS(wait_status));
769
pass = false;
770
goto out;
771
}
772
773
if (WIFSIGNALED(wait_status)) {
774
ksft_print_msg("Child died from signal %d saving values\n",
775
WTERMSIG(wait_status));
776
pass = false;
777
goto out;
778
}
779
780
/* See what happened as a result */
781
read_child_regs(child);
782
783
if (!continue_breakpoint(child, PTRACE_DETACH))
784
goto cleanup;
785
786
/* The child should exit cleanly */
787
got_alarm = false;
788
alarm(1);
789
while (1) {
790
if (got_alarm) {
791
ksft_print_msg("Wait for child timed out\n");
792
goto cleanup;
793
}
794
795
pid = waitpid(child, &wait_status, 0);
796
if (pid < 0) {
797
if (errno == EINTR)
798
continue;
799
ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
800
strerror(errno), errno);
801
}
802
803
if (pid == child)
804
break;
805
}
806
alarm(0);
807
808
if (got_alarm) {
809
ksft_print_msg("Timed out waiting for child\n");
810
pass = false;
811
goto cleanup;
812
}
813
814
if (pid == child && WIFSIGNALED(wait_status)) {
815
ksft_print_msg("Child died from signal %d cleaning up\n",
816
WTERMSIG(wait_status));
817
pass = false;
818
goto out;
819
}
820
821
if (pid == child && WIFEXITED(wait_status)) {
822
if (WEXITSTATUS(wait_status) != 0) {
823
ksft_print_msg("Child exited with error %d\n",
824
WEXITSTATUS(wait_status));
825
pass = false;
826
}
827
} else {
828
ksft_print_msg("Child did not exit cleanly\n");
829
pass = false;
830
goto cleanup;
831
}
832
833
goto out;
834
835
cleanup:
836
ret = kill(child, SIGKILL);
837
if (ret != 0) {
838
ksft_print_msg("kill() failed: %s (%d)\n",
839
strerror(errno), errno);
840
return false;
841
}
842
843
while (1) {
844
pid = waitpid(child, &wait_status, 0);
845
if (pid < 0) {
846
if (errno == EINTR)
847
continue;
848
ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
849
strerror(errno), errno);
850
}
851
852
if (pid == child)
853
break;
854
}
855
856
out:
857
return pass;
858
}
859
860
static void fill_random(void *buf, size_t size)
861
{
862
int i;
863
uint32_t *lbuf = buf;
864
865
/* random() returns a 32 bit number regardless of the size of long */
866
for (i = 0; i < size / sizeof(uint32_t); i++)
867
lbuf[i] = random();
868
}
869
870
static void fill_random_ffr(void *buf, size_t vq)
871
{
872
uint8_t *lbuf = buf;
873
int bits, i;
874
875
/*
876
* Only values with a continuous set of 0..n bits set are
877
* valid for FFR, set all bits then clear a random number of
878
* high bits.
879
*/
880
memset(buf, 0, __SVE_FFR_SIZE(vq));
881
882
bits = random() % (__SVE_FFR_SIZE(vq) * 8);
883
for (i = 0; i < bits / 8; i++)
884
lbuf[i] = 0xff;
885
if (bits / 8 != __SVE_FFR_SIZE(vq))
886
lbuf[i] = (1 << (bits % 8)) - 1;
887
}
888
889
static void fpsimd_to_sve(__uint128_t *v, char *z, int vl)
890
{
891
int vq = __sve_vq_from_vl(vl);
892
int i;
893
__uint128_t *p;
894
895
if (!vl)
896
return;
897
898
for (i = 0; i < __SVE_NUM_ZREGS; i++) {
899
p = (__uint128_t *)&z[__SVE_ZREG_OFFSET(vq, i)];
900
*p = arm64_cpu_to_le128(v[i]);
901
}
902
}
903
904
static void set_initial_values(struct test_config *config)
905
{
906
int vq = __sve_vq_from_vl(vl_in(config));
907
int sme_vq = __sve_vq_from_vl(config->sme_vl_in);
908
909
svcr_in = config->svcr_in;
910
svcr_expected = config->svcr_expected;
911
svcr_out = 0;
912
913
fill_random(&v_in, sizeof(v_in));
914
memcpy(v_expected, v_in, sizeof(v_in));
915
memset(v_out, 0, sizeof(v_out));
916
917
/* Changes will be handled in the test case */
918
if (sve_supported() || (config->svcr_in & SVCR_SM)) {
919
/* The low 128 bits of Z are shared with the V registers */
920
fill_random(&z_in, __SVE_ZREGS_SIZE(vq));
921
fpsimd_to_sve(v_in, z_in, vl_in(config));
922
memcpy(z_expected, z_in, __SVE_ZREGS_SIZE(vq));
923
memset(z_out, 0, sizeof(z_out));
924
925
fill_random(&p_in, __SVE_PREGS_SIZE(vq));
926
memcpy(p_expected, p_in, __SVE_PREGS_SIZE(vq));
927
memset(p_out, 0, sizeof(p_out));
928
929
if ((config->svcr_in & SVCR_SM) && !fa64_supported())
930
memset(ffr_in, 0, __SVE_PREG_SIZE(vq));
931
else
932
fill_random_ffr(&ffr_in, vq);
933
memcpy(ffr_expected, ffr_in, __SVE_PREG_SIZE(vq));
934
memset(ffr_out, 0, __SVE_PREG_SIZE(vq));
935
}
936
937
if (config->svcr_in & SVCR_ZA)
938
fill_random(za_in, ZA_SIG_REGS_SIZE(sme_vq));
939
else
940
memset(za_in, 0, ZA_SIG_REGS_SIZE(sme_vq));
941
if (config->svcr_expected & SVCR_ZA)
942
memcpy(za_expected, za_in, ZA_SIG_REGS_SIZE(sme_vq));
943
else
944
memset(za_expected, 0, ZA_SIG_REGS_SIZE(sme_vq));
945
if (sme_supported())
946
memset(za_out, 0, sizeof(za_out));
947
948
if (sme2_supported()) {
949
if (config->svcr_in & SVCR_ZA)
950
fill_random(zt_in, ZT_SIG_REG_BYTES);
951
else
952
memset(zt_in, 0, ZT_SIG_REG_BYTES);
953
if (config->svcr_expected & SVCR_ZA)
954
memcpy(zt_expected, zt_in, ZT_SIG_REG_BYTES);
955
else
956
memset(zt_expected, 0, ZT_SIG_REG_BYTES);
957
memset(zt_out, 0, sizeof(zt_out));
958
}
959
960
if (fpmr_supported()) {
961
fill_random(&fpmr_in, sizeof(fpmr_in));
962
fpmr_in &= FPMR_SAFE_BITS;
963
fpmr_expected = fpmr_in;
964
} else {
965
fpmr_in = 0;
966
fpmr_expected = 0;
967
fpmr_out = 0;
968
}
969
}
970
971
static bool check_memory_values(struct test_config *config)
972
{
973
bool pass = true;
974
int vq, sme_vq;
975
976
if (!compare_buffer("saved V", v_out, v_expected, sizeof(v_out)))
977
pass = false;
978
979
vq = __sve_vq_from_vl(vl_expected(config));
980
sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
981
982
if (svcr_out != svcr_expected) {
983
ksft_print_msg("Mismatch in saved SVCR %lx != %lx\n",
984
svcr_out, svcr_expected);
985
pass = false;
986
}
987
988
if (sve_vl_out != config->sve_vl_expected) {
989
ksft_print_msg("Mismatch in SVE VL: %ld != %d\n",
990
sve_vl_out, config->sve_vl_expected);
991
pass = false;
992
}
993
994
if (sme_vl_out != config->sme_vl_expected) {
995
ksft_print_msg("Mismatch in SME VL: %ld != %d\n",
996
sme_vl_out, config->sme_vl_expected);
997
pass = false;
998
}
999
1000
if (!compare_buffer("saved Z", z_out, z_expected,
1001
__SVE_ZREGS_SIZE(vq)))
1002
pass = false;
1003
1004
if (!compare_buffer("saved P", p_out, p_expected,
1005
__SVE_PREGS_SIZE(vq)))
1006
pass = false;
1007
1008
if (!compare_buffer("saved FFR", ffr_out, ffr_expected,
1009
__SVE_PREG_SIZE(vq)))
1010
pass = false;
1011
1012
if (!compare_buffer("saved ZA", za_out, za_expected,
1013
ZA_PT_ZA_SIZE(sme_vq)))
1014
pass = false;
1015
1016
if (!compare_buffer("saved ZT", zt_out, zt_expected, ZT_SIG_REG_BYTES))
1017
pass = false;
1018
1019
if (fpmr_out != fpmr_expected) {
1020
ksft_print_msg("Mismatch in saved FPMR: %lx != %lx\n",
1021
fpmr_out, fpmr_expected);
1022
pass = false;
1023
}
1024
1025
return pass;
1026
}
1027
1028
static bool sve_sme_same(struct test_config *config)
1029
{
1030
if (config->sve_vl_in != config->sve_vl_expected)
1031
return false;
1032
1033
if (config->sme_vl_in != config->sme_vl_expected)
1034
return false;
1035
1036
if (config->svcr_in != config->svcr_expected)
1037
return false;
1038
1039
return true;
1040
}
1041
1042
static bool sve_write_supported(struct test_config *config)
1043
{
1044
if (!sve_supported() && !sme_supported())
1045
return false;
1046
1047
if ((config->svcr_in & SVCR_ZA) != (config->svcr_expected & SVCR_ZA))
1048
return false;
1049
1050
if (config->svcr_expected & SVCR_SM) {
1051
if (config->sve_vl_in != config->sve_vl_expected) {
1052
return false;
1053
}
1054
1055
/* Changing the SME VL disables ZA */
1056
if ((config->svcr_expected & SVCR_ZA) &&
1057
(config->sme_vl_in != config->sme_vl_expected)) {
1058
return false;
1059
}
1060
} else {
1061
if (config->sme_vl_in != config->sme_vl_expected) {
1062
return false;
1063
}
1064
1065
if (!sve_supported())
1066
return false;
1067
}
1068
1069
return true;
1070
}
1071
1072
static bool sve_write_fpsimd_supported(struct test_config *config)
1073
{
1074
if (!sve_supported())
1075
return false;
1076
1077
if ((config->svcr_in & SVCR_ZA) != (config->svcr_expected & SVCR_ZA))
1078
return false;
1079
1080
if (config->svcr_expected & SVCR_SM)
1081
return false;
1082
1083
if (config->sme_vl_in != config->sme_vl_expected)
1084
return false;
1085
1086
return true;
1087
}
1088
1089
static void fpsimd_write_expected(struct test_config *config)
1090
{
1091
int vl;
1092
1093
fill_random(&v_expected, sizeof(v_expected));
1094
1095
/* The SVE registers are flushed by a FPSIMD write */
1096
vl = vl_expected(config);
1097
1098
memset(z_expected, 0, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl)));
1099
memset(p_expected, 0, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl)));
1100
memset(ffr_expected, 0, __SVE_PREG_SIZE(__sve_vq_from_vl(vl)));
1101
1102
fpsimd_to_sve(v_expected, z_expected, vl);
1103
}
1104
1105
static void fpsimd_write(pid_t child, struct test_config *test_config)
1106
{
1107
struct user_fpsimd_state fpsimd;
1108
struct iovec iov;
1109
int ret;
1110
1111
memset(&fpsimd, 0, sizeof(fpsimd));
1112
memcpy(&fpsimd.vregs, v_expected, sizeof(v_expected));
1113
1114
iov.iov_base = &fpsimd;
1115
iov.iov_len = sizeof(fpsimd);
1116
ret = ptrace(PTRACE_SETREGSET, child, NT_PRFPREG, &iov);
1117
if (ret == -1)
1118
ksft_print_msg("FPSIMD set failed: (%s) %d\n",
1119
strerror(errno), errno);
1120
}
1121
1122
static bool fpmr_write_supported(struct test_config *config)
1123
{
1124
if (!fpmr_supported())
1125
return false;
1126
1127
if (!sve_sme_same(config))
1128
return false;
1129
1130
return true;
1131
}
1132
1133
static void fpmr_write_expected(struct test_config *config)
1134
{
1135
fill_random(&fpmr_expected, sizeof(fpmr_expected));
1136
fpmr_expected &= FPMR_SAFE_BITS;
1137
}
1138
1139
static void fpmr_write(pid_t child, struct test_config *config)
1140
{
1141
struct iovec iov;
1142
int ret;
1143
1144
iov.iov_len = sizeof(fpmr_expected);
1145
iov.iov_base = &fpmr_expected;
1146
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_FPMR, &iov);
1147
if (ret != 0)
1148
ksft_print_msg("Failed to write FPMR: %s (%d)\n",
1149
strerror(errno), errno);
1150
}
1151
1152
static void sve_write_expected(struct test_config *config)
1153
{
1154
int vl = vl_expected(config);
1155
int sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
1156
1157
if (!vl)
1158
return;
1159
1160
fill_random(z_expected, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl)));
1161
fill_random(p_expected, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl)));
1162
1163
if ((svcr_expected & SVCR_SM) && !fa64_supported())
1164
memset(ffr_expected, 0, __SVE_PREG_SIZE(sme_vq));
1165
else
1166
fill_random_ffr(ffr_expected, __sve_vq_from_vl(vl));
1167
1168
/* Share the low bits of Z with V */
1169
fill_random(&v_expected, sizeof(v_expected));
1170
fpsimd_to_sve(v_expected, z_expected, vl);
1171
1172
if (config->sme_vl_in != config->sme_vl_expected) {
1173
memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq));
1174
memset(zt_expected, 0, sizeof(zt_expected));
1175
}
1176
}
1177
1178
static void sve_write_sve(pid_t child, struct test_config *config)
1179
{
1180
struct user_sve_header *sve;
1181
struct iovec iov;
1182
int ret, vl, vq, regset;
1183
1184
vl = vl_expected(config);
1185
vq = __sve_vq_from_vl(vl);
1186
1187
if (!vl)
1188
return;
1189
1190
iov.iov_len = SVE_PT_SIZE(vq, SVE_PT_REGS_SVE);
1191
iov.iov_base = malloc(iov.iov_len);
1192
if (!iov.iov_base) {
1193
ksft_print_msg("Failed allocating %lu byte SVE write buffer\n",
1194
iov.iov_len);
1195
return;
1196
}
1197
memset(iov.iov_base, 0, iov.iov_len);
1198
1199
sve = iov.iov_base;
1200
sve->size = iov.iov_len;
1201
sve->flags = SVE_PT_REGS_SVE;
1202
sve->vl = vl;
1203
1204
memcpy(iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
1205
z_expected, SVE_PT_SVE_ZREGS_SIZE(vq));
1206
memcpy(iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0),
1207
p_expected, SVE_PT_SVE_PREGS_SIZE(vq));
1208
memcpy(iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq),
1209
ffr_expected, SVE_PT_SVE_PREG_SIZE(vq));
1210
1211
if (svcr_expected & SVCR_SM)
1212
regset = NT_ARM_SSVE;
1213
else
1214
regset = NT_ARM_SVE;
1215
1216
ret = ptrace(PTRACE_SETREGSET, child, regset, &iov);
1217
if (ret != 0)
1218
ksft_print_msg("Failed to write SVE: %s (%d)\n",
1219
strerror(errno), errno);
1220
1221
free(iov.iov_base);
1222
}
1223
1224
static void sve_write_fpsimd(pid_t child, struct test_config *config)
1225
{
1226
struct user_sve_header *sve;
1227
struct user_fpsimd_state *fpsimd;
1228
struct iovec iov;
1229
int ret, vl, vq;
1230
1231
vl = vl_expected(config);
1232
vq = __sve_vq_from_vl(vl);
1233
1234
if (!vl)
1235
return;
1236
1237
iov.iov_len = SVE_PT_SIZE(vq, SVE_PT_REGS_FPSIMD);
1238
iov.iov_base = malloc(iov.iov_len);
1239
if (!iov.iov_base) {
1240
ksft_print_msg("Failed allocating %lu byte SVE write buffer\n",
1241
iov.iov_len);
1242
return;
1243
}
1244
memset(iov.iov_base, 0, iov.iov_len);
1245
1246
sve = iov.iov_base;
1247
sve->size = iov.iov_len;
1248
sve->flags = SVE_PT_REGS_FPSIMD;
1249
sve->vl = vl;
1250
1251
fpsimd = iov.iov_base + SVE_PT_REGS_OFFSET;
1252
memcpy(&fpsimd->vregs, v_expected, sizeof(v_expected));
1253
1254
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_SVE, &iov);
1255
if (ret != 0)
1256
ksft_print_msg("Failed to write SVE: %s (%d)\n",
1257
strerror(errno), errno);
1258
1259
free(iov.iov_base);
1260
}
1261
1262
static bool za_write_supported(struct test_config *config)
1263
{
1264
if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM))
1265
return false;
1266
1267
return true;
1268
}
1269
1270
static void za_write_expected(struct test_config *config)
1271
{
1272
int sme_vq, sve_vq;
1273
1274
sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
1275
1276
if (config->svcr_expected & SVCR_ZA) {
1277
fill_random(za_expected, ZA_PT_ZA_SIZE(sme_vq));
1278
} else {
1279
memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq));
1280
memset(zt_expected, 0, sizeof(zt_expected));
1281
}
1282
1283
/* Changing the SME VL flushes ZT, SVE state */
1284
if (config->sme_vl_in != config->sme_vl_expected) {
1285
sve_vq = __sve_vq_from_vl(vl_expected(config));
1286
memset(z_expected, 0, __SVE_ZREGS_SIZE(sve_vq));
1287
memset(p_expected, 0, __SVE_PREGS_SIZE(sve_vq));
1288
memset(ffr_expected, 0, __SVE_PREG_SIZE(sve_vq));
1289
memset(zt_expected, 0, sizeof(zt_expected));
1290
1291
fpsimd_to_sve(v_expected, z_expected, vl_expected(config));
1292
}
1293
}
1294
1295
static void za_write(pid_t child, struct test_config *config)
1296
{
1297
struct user_za_header *za;
1298
struct iovec iov;
1299
int ret, vq;
1300
1301
vq = __sve_vq_from_vl(config->sme_vl_expected);
1302
1303
if (config->svcr_expected & SVCR_ZA)
1304
iov.iov_len = ZA_PT_SIZE(vq);
1305
else
1306
iov.iov_len = sizeof(*za);
1307
iov.iov_base = malloc(iov.iov_len);
1308
if (!iov.iov_base) {
1309
ksft_print_msg("Failed allocating %lu byte ZA write buffer\n",
1310
iov.iov_len);
1311
return;
1312
}
1313
memset(iov.iov_base, 0, iov.iov_len);
1314
1315
za = iov.iov_base;
1316
za->size = iov.iov_len;
1317
za->vl = config->sme_vl_expected;
1318
if (config->svcr_expected & SVCR_ZA)
1319
memcpy(iov.iov_base + ZA_PT_ZA_OFFSET, za_expected,
1320
ZA_PT_ZA_SIZE(vq));
1321
1322
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_ZA, &iov);
1323
if (ret != 0)
1324
ksft_print_msg("Failed to write ZA: %s (%d)\n",
1325
strerror(errno), errno);
1326
1327
free(iov.iov_base);
1328
}
1329
1330
static bool zt_write_supported(struct test_config *config)
1331
{
1332
if (!sme2_supported())
1333
return false;
1334
if (config->sme_vl_in != config->sme_vl_expected)
1335
return false;
1336
if (!(config->svcr_expected & SVCR_ZA))
1337
return false;
1338
if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM))
1339
return false;
1340
1341
return true;
1342
}
1343
1344
static void zt_write_expected(struct test_config *config)
1345
{
1346
int sme_vq;
1347
1348
sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
1349
1350
if (config->svcr_expected & SVCR_ZA) {
1351
fill_random(zt_expected, sizeof(zt_expected));
1352
} else {
1353
memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq));
1354
memset(zt_expected, 0, sizeof(zt_expected));
1355
}
1356
}
1357
1358
static void zt_write(pid_t child, struct test_config *config)
1359
{
1360
struct iovec iov;
1361
int ret;
1362
1363
iov.iov_len = ZT_SIG_REG_BYTES;
1364
iov.iov_base = zt_expected;
1365
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_ZT, &iov);
1366
if (ret != 0)
1367
ksft_print_msg("Failed to write ZT: %s (%d)\n",
1368
strerror(errno), errno);
1369
}
1370
1371
/* Actually run a test */
1372
static void run_test(struct test_definition *test, struct test_config *config)
1373
{
1374
pid_t child;
1375
char name[1024];
1376
bool pass;
1377
1378
if (sve_supported() && sme_supported())
1379
snprintf(name, sizeof(name), "%s, SVE %d->%d, SME %d/%x->%d/%x",
1380
test->name,
1381
config->sve_vl_in, config->sve_vl_expected,
1382
config->sme_vl_in, config->svcr_in,
1383
config->sme_vl_expected, config->svcr_expected);
1384
else if (sve_supported())
1385
snprintf(name, sizeof(name), "%s, SVE %d->%d", test->name,
1386
config->sve_vl_in, config->sve_vl_expected);
1387
else if (sme_supported())
1388
snprintf(name, sizeof(name), "%s, SME %d/%x->%d/%x",
1389
test->name,
1390
config->sme_vl_in, config->svcr_in,
1391
config->sme_vl_expected, config->svcr_expected);
1392
else
1393
snprintf(name, sizeof(name), "%s", test->name);
1394
1395
if (test->supported && !test->supported(config)) {
1396
ksft_test_result_skip("%s\n", name);
1397
return;
1398
}
1399
1400
set_initial_values(config);
1401
1402
if (test->set_expected_values)
1403
test->set_expected_values(config);
1404
1405
child = fork();
1406
if (child < 0)
1407
ksft_exit_fail_msg("fork() failed: %s (%d)\n",
1408
strerror(errno), errno);
1409
/* run_child() never returns */
1410
if (child == 0)
1411
run_child(config);
1412
1413
pass = run_parent(child, test, config);
1414
if (!check_memory_values(config))
1415
pass = false;
1416
1417
ksft_test_result(pass, "%s\n", name);
1418
}
1419
1420
static void run_tests(struct test_definition defs[], int count,
1421
struct test_config *config)
1422
{
1423
int i;
1424
1425
for (i = 0; i < count; i++)
1426
run_test(&defs[i], config);
1427
}
1428
1429
static struct test_definition base_test_defs[] = {
1430
{
1431
.name = "No writes",
1432
.supported = sve_sme_same,
1433
},
1434
{
1435
.name = "FPSIMD write",
1436
.supported = sve_sme_same,
1437
.set_expected_values = fpsimd_write_expected,
1438
.modify_values = fpsimd_write,
1439
},
1440
{
1441
.name = "FPMR write",
1442
.supported = fpmr_write_supported,
1443
.set_expected_values = fpmr_write_expected,
1444
.modify_values = fpmr_write,
1445
},
1446
};
1447
1448
static struct test_definition sve_test_defs[] = {
1449
{
1450
.name = "SVE write",
1451
.supported = sve_write_supported,
1452
.set_expected_values = sve_write_expected,
1453
.modify_values = sve_write_sve,
1454
},
1455
{
1456
.name = "SVE write FPSIMD format",
1457
.supported = sve_write_fpsimd_supported,
1458
.set_expected_values = fpsimd_write_expected,
1459
.modify_values = sve_write_fpsimd,
1460
},
1461
};
1462
1463
static struct test_definition za_test_defs[] = {
1464
{
1465
.name = "ZA write",
1466
.supported = za_write_supported,
1467
.set_expected_values = za_write_expected,
1468
.modify_values = za_write,
1469
},
1470
};
1471
1472
static struct test_definition zt_test_defs[] = {
1473
{
1474
.name = "ZT write",
1475
.supported = zt_write_supported,
1476
.set_expected_values = zt_write_expected,
1477
.modify_values = zt_write,
1478
},
1479
};
1480
1481
static int sve_vls[MAX_NUM_VLS], sme_vls[MAX_NUM_VLS];
1482
static int sve_vl_count, sme_vl_count;
1483
1484
static void probe_vls(const char *name, int vls[], int *vl_count, int set_vl)
1485
{
1486
unsigned int vq;
1487
int vl;
1488
1489
*vl_count = 0;
1490
1491
for (vq = ARCH_VQ_MAX; vq > 0; vq /= 2) {
1492
vl = prctl(set_vl, vq * 16);
1493
if (vl == -1)
1494
ksft_exit_fail_msg("SET_VL failed: %s (%d)\n",
1495
strerror(errno), errno);
1496
1497
vl &= PR_SVE_VL_LEN_MASK;
1498
1499
if (*vl_count && (vl == vls[*vl_count - 1]))
1500
break;
1501
1502
vq = sve_vq_from_vl(vl);
1503
1504
vls[*vl_count] = vl;
1505
*vl_count += 1;
1506
}
1507
1508
if (*vl_count > 2) {
1509
/* Just use the minimum and maximum */
1510
vls[1] = vls[*vl_count - 1];
1511
ksft_print_msg("%d %s VLs, using %d and %d\n",
1512
*vl_count, name, vls[0], vls[1]);
1513
*vl_count = 2;
1514
} else {
1515
ksft_print_msg("%d %s VLs\n", *vl_count, name);
1516
}
1517
}
1518
1519
static struct {
1520
int svcr_in, svcr_expected;
1521
} svcr_combinations[] = {
1522
{ .svcr_in = 0, .svcr_expected = 0, },
1523
{ .svcr_in = 0, .svcr_expected = SVCR_SM, },
1524
{ .svcr_in = 0, .svcr_expected = SVCR_ZA, },
1525
/* Can't enable both SM and ZA with a single ptrace write */
1526
1527
{ .svcr_in = SVCR_SM, .svcr_expected = 0, },
1528
{ .svcr_in = SVCR_SM, .svcr_expected = SVCR_SM, },
1529
{ .svcr_in = SVCR_SM, .svcr_expected = SVCR_ZA, },
1530
{ .svcr_in = SVCR_SM, .svcr_expected = SVCR_SM | SVCR_ZA, },
1531
1532
{ .svcr_in = SVCR_ZA, .svcr_expected = 0, },
1533
{ .svcr_in = SVCR_ZA, .svcr_expected = SVCR_SM, },
1534
{ .svcr_in = SVCR_ZA, .svcr_expected = SVCR_ZA, },
1535
{ .svcr_in = SVCR_ZA, .svcr_expected = SVCR_SM | SVCR_ZA, },
1536
1537
{ .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = 0, },
1538
{ .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_SM, },
1539
{ .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_ZA, },
1540
{ .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_SM | SVCR_ZA, },
1541
};
1542
1543
static void run_sve_tests(void)
1544
{
1545
struct test_config test_config;
1546
int i, j;
1547
1548
if (!sve_supported())
1549
return;
1550
1551
test_config.sme_vl_in = sme_vls[0];
1552
test_config.sme_vl_expected = sme_vls[0];
1553
test_config.svcr_in = 0;
1554
test_config.svcr_expected = 0;
1555
1556
for (i = 0; i < sve_vl_count; i++) {
1557
test_config.sve_vl_in = sve_vls[i];
1558
1559
for (j = 0; j < sve_vl_count; j++) {
1560
test_config.sve_vl_expected = sve_vls[j];
1561
1562
run_tests(base_test_defs,
1563
ARRAY_SIZE(base_test_defs),
1564
&test_config);
1565
if (sve_supported())
1566
run_tests(sve_test_defs,
1567
ARRAY_SIZE(sve_test_defs),
1568
&test_config);
1569
}
1570
}
1571
1572
}
1573
1574
static void run_sme_tests(void)
1575
{
1576
struct test_config test_config;
1577
int i, j, k;
1578
1579
if (!sme_supported())
1580
return;
1581
1582
test_config.sve_vl_in = sve_vls[0];
1583
test_config.sve_vl_expected = sve_vls[0];
1584
1585
/*
1586
* Every SME VL/SVCR combination
1587
*/
1588
for (i = 0; i < sme_vl_count; i++) {
1589
test_config.sme_vl_in = sme_vls[i];
1590
1591
for (j = 0; j < sme_vl_count; j++) {
1592
test_config.sme_vl_expected = sme_vls[j];
1593
1594
for (k = 0; k < ARRAY_SIZE(svcr_combinations); k++) {
1595
test_config.svcr_in = svcr_combinations[k].svcr_in;
1596
test_config.svcr_expected = svcr_combinations[k].svcr_expected;
1597
1598
run_tests(base_test_defs,
1599
ARRAY_SIZE(base_test_defs),
1600
&test_config);
1601
run_tests(sve_test_defs,
1602
ARRAY_SIZE(sve_test_defs),
1603
&test_config);
1604
run_tests(za_test_defs,
1605
ARRAY_SIZE(za_test_defs),
1606
&test_config);
1607
1608
if (sme2_supported())
1609
run_tests(zt_test_defs,
1610
ARRAY_SIZE(zt_test_defs),
1611
&test_config);
1612
}
1613
}
1614
}
1615
}
1616
1617
int main(void)
1618
{
1619
struct test_config test_config;
1620
struct sigaction sa;
1621
int tests, ret, tmp;
1622
1623
srandom(getpid());
1624
1625
ksft_print_header();
1626
1627
if (sve_supported()) {
1628
probe_vls("SVE", sve_vls, &sve_vl_count, PR_SVE_SET_VL);
1629
1630
tests = ARRAY_SIZE(base_test_defs) +
1631
ARRAY_SIZE(sve_test_defs);
1632
tests *= sve_vl_count * sve_vl_count;
1633
} else {
1634
/* Only run the FPSIMD tests */
1635
sve_vl_count = 1;
1636
tests = ARRAY_SIZE(base_test_defs);
1637
}
1638
1639
if (sme_supported()) {
1640
probe_vls("SME", sme_vls, &sme_vl_count, PR_SME_SET_VL);
1641
1642
tmp = ARRAY_SIZE(base_test_defs) + ARRAY_SIZE(sve_test_defs)
1643
+ ARRAY_SIZE(za_test_defs);
1644
1645
if (sme2_supported())
1646
tmp += ARRAY_SIZE(zt_test_defs);
1647
1648
tmp *= sme_vl_count * sme_vl_count;
1649
tmp *= ARRAY_SIZE(svcr_combinations);
1650
tests += tmp;
1651
} else {
1652
sme_vl_count = 1;
1653
}
1654
1655
if (sme2_supported())
1656
ksft_print_msg("SME2 supported\n");
1657
1658
if (fa64_supported())
1659
ksft_print_msg("FA64 supported\n");
1660
1661
if (fpmr_supported())
1662
ksft_print_msg("FPMR supported\n");
1663
1664
ksft_set_plan(tests);
1665
1666
/* Get signal handers ready before we start any children */
1667
memset(&sa, 0, sizeof(sa));
1668
sa.sa_sigaction = handle_alarm;
1669
sa.sa_flags = SA_RESTART | SA_SIGINFO;
1670
sigemptyset(&sa.sa_mask);
1671
ret = sigaction(SIGALRM, &sa, NULL);
1672
if (ret < 0)
1673
ksft_print_msg("Failed to install SIGALRM handler: %s (%d)\n",
1674
strerror(errno), errno);
1675
1676
/*
1677
* Run the test set if there is no SVE or SME, with those we
1678
* have to pick a VL for each run.
1679
*/
1680
if (!sve_supported() && !sme_supported()) {
1681
test_config.sve_vl_in = 0;
1682
test_config.sve_vl_expected = 0;
1683
test_config.sme_vl_in = 0;
1684
test_config.sme_vl_expected = 0;
1685
test_config.svcr_in = 0;
1686
test_config.svcr_expected = 0;
1687
1688
run_tests(base_test_defs, ARRAY_SIZE(base_test_defs),
1689
&test_config);
1690
}
1691
1692
run_sve_tests();
1693
run_sme_tests();
1694
1695
ksft_finished();
1696
}
1697
1698