Path: blob/master/tools/testing/selftests/kvm/s390/memop.c
38237 views
// SPDX-License-Identifier: GPL-2.0-or-later1/*2* Test for s390x KVM_S390_MEM_OP3*4* Copyright (C) 2019, Red Hat, Inc.5*/6#include <stdio.h>7#include <stdlib.h>8#include <string.h>9#include <sys/ioctl.h>10#include <pthread.h>1112#include <linux/bits.h>1314#include "test_util.h"15#include "kvm_util.h"16#include "kselftest.h"17#include "ucall_common.h"18#include "processor.h"1920enum mop_target {21LOGICAL,22SIDA,23ABSOLUTE,24INVALID,25};2627enum mop_access_mode {28READ,29WRITE,30CMPXCHG,31};3233struct mop_desc {34uintptr_t gaddr;35uintptr_t gaddr_v;36uint64_t set_flags;37unsigned int f_check : 1;38unsigned int f_inject : 1;39unsigned int f_key : 1;40unsigned int _gaddr_v : 1;41unsigned int _set_flags : 1;42unsigned int _sida_offset : 1;43unsigned int _ar : 1;44uint32_t size;45enum mop_target target;46enum mop_access_mode mode;47void *buf;48uint32_t sida_offset;49void *old;50uint8_t old_value[16];51bool *cmpxchg_success;52uint8_t ar;53uint8_t key;54};5556const uint8_t NO_KEY = 0xff;5758static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc)59{60struct kvm_s390_mem_op ksmo = {61.gaddr = (uintptr_t)desc->gaddr,62.size = desc->size,63.buf = ((uintptr_t)desc->buf),64.reserved = "ignored_ignored_ignored_ignored"65};6667switch (desc->target) {68case LOGICAL:69if (desc->mode == READ)70ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;71if (desc->mode == WRITE)72ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;73break;74case SIDA:75if (desc->mode == READ)76ksmo.op = KVM_S390_MEMOP_SIDA_READ;77if (desc->mode == WRITE)78ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;79break;80case ABSOLUTE:81if (desc->mode == READ)82ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;83if (desc->mode == WRITE)84ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;85if (desc->mode == CMPXCHG) {86ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG;87ksmo.old_addr = (uint64_t)desc->old;88memcpy(desc->old_value, desc->old, desc->size);89}90break;91case INVALID:92ksmo.op = -1;93}94if (desc->f_check)95ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;96if (desc->f_inject)97ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;98if (desc->_set_flags)99ksmo.flags = desc->set_flags;100if (desc->f_key && desc->key != NO_KEY) {101ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;102ksmo.key = desc->key;103}104if (desc->_ar)105ksmo.ar = desc->ar;106else107ksmo.ar = 0;108if (desc->_sida_offset)109ksmo.sida_offset = desc->sida_offset;110111return ksmo;112}113114struct test_info {115struct kvm_vm *vm;116struct kvm_vcpu *vcpu;117};118119#define PRINT_MEMOP false120static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)121{122if (!PRINT_MEMOP)123return;124125if (!vcpu)126printf("vm memop(");127else128printf("vcpu memop(");129switch (ksmo->op) {130case KVM_S390_MEMOP_LOGICAL_READ:131printf("LOGICAL, READ, ");132break;133case KVM_S390_MEMOP_LOGICAL_WRITE:134printf("LOGICAL, WRITE, ");135break;136case KVM_S390_MEMOP_SIDA_READ:137printf("SIDA, READ, ");138break;139case KVM_S390_MEMOP_SIDA_WRITE:140printf("SIDA, WRITE, ");141break;142case KVM_S390_MEMOP_ABSOLUTE_READ:143printf("ABSOLUTE, READ, ");144break;145case KVM_S390_MEMOP_ABSOLUTE_WRITE:146printf("ABSOLUTE, WRITE, ");147break;148case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:149printf("ABSOLUTE, CMPXCHG, ");150break;151}152printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u, old_addr=%llx",153ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key,154ksmo->old_addr);155if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)156printf(", CHECK_ONLY");157if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)158printf(", INJECT_EXCEPTION");159if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)160printf(", SKEY_PROTECTION");161puts(")");162}163164static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,165struct mop_desc *desc)166{167struct kvm_vcpu *vcpu = info.vcpu;168169if (!vcpu)170return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);171else172return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);173}174175static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,176struct mop_desc *desc)177{178int r;179180r = err_memop_ioctl(info, ksmo, desc);181if (ksmo->op == KVM_S390_MEMOP_ABSOLUTE_CMPXCHG) {182if (desc->cmpxchg_success) {183int diff = memcmp(desc->old_value, desc->old, desc->size);184*desc->cmpxchg_success = !diff;185}186}187TEST_ASSERT(!r, __KVM_IOCTL_ERROR("KVM_S390_MEM_OP", r));188}189190#define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \191({ \192struct test_info __info = (info_p); \193struct mop_desc __desc = { \194.target = (mop_target_p), \195.mode = (access_mode_p), \196.buf = (buf_p), \197.size = (size_p), \198__VA_ARGS__ \199}; \200struct kvm_s390_mem_op __ksmo; \201\202if (__desc._gaddr_v) { \203if (__desc.target == ABSOLUTE) \204__desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v); \205else \206__desc.gaddr = __desc.gaddr_v; \207} \208__ksmo = ksmo_from_desc(&__desc); \209print_memop(__info.vcpu, &__ksmo); \210err##memop_ioctl(__info, &__ksmo, &__desc); \211})212213#define MOP(...) MEMOP(, __VA_ARGS__)214#define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)215216#define GADDR(a) .gaddr = ((uintptr_t)a)217#define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)218#define CHECK_ONLY .f_check = 1219#define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)220#define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)221#define AR(a) ._ar = 1, .ar = (a)222#define KEY(a) .f_key = 1, .key = (a)223#define INJECT .f_inject = 1224#define CMPXCHG_OLD(o) .old = (o)225#define CMPXCHG_SUCCESS(s) .cmpxchg_success = (s)226227#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })228229#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))230#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))231232static uint8_t __aligned(PAGE_SIZE) mem1[65536];233static uint8_t __aligned(PAGE_SIZE) mem2[65536];234235struct test_default {236struct kvm_vm *kvm_vm;237struct test_info vm;238struct test_info vcpu;239struct kvm_run *run;240int size;241};242243static struct test_default test_default_init(void *guest_code)244{245struct kvm_vcpu *vcpu;246struct test_default t;247248t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));249t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);250t.vm = (struct test_info) { t.kvm_vm, NULL };251t.vcpu = (struct test_info) { t.kvm_vm, vcpu };252t.run = vcpu->run;253return t;254}255256enum stage {257/* Synced state set by host, e.g. DAT */258STAGE_INITED,259/* Guest did nothing */260STAGE_IDLED,261/* Guest set storage keys (specifics up to test case) */262STAGE_SKEYS_SET,263/* Guest copied memory (locations up to test case) */264STAGE_COPIED,265/* End of guest code reached */266STAGE_DONE,267};268269#define HOST_SYNC(info_p, stage) \270({ \271struct test_info __info = (info_p); \272struct kvm_vcpu *__vcpu = __info.vcpu; \273struct ucall uc; \274int __stage = (stage); \275\276vcpu_run(__vcpu); \277get_ucall(__vcpu, &uc); \278if (uc.cmd == UCALL_ABORT) { \279REPORT_GUEST_ASSERT(uc); \280} \281TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \282TEST_ASSERT_EQ(uc.args[1], __stage); \283}) \284285static void prepare_mem12(void)286{287int i;288289for (i = 0; i < sizeof(mem1); i++)290mem1[i] = rand();291memset(mem2, 0xaa, sizeof(mem2));292}293294#define ASSERT_MEM_EQ(p1, p2, size) \295TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")296297static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,298enum mop_target mop_target, uint32_t size, uint8_t key)299{300prepare_mem12();301CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,302GADDR_V(mem1), KEY(key));303HOST_SYNC(copy_cpu, STAGE_COPIED);304CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,305GADDR_V(mem2), KEY(key));306ASSERT_MEM_EQ(mem1, mem2, size);307}308309static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,310enum mop_target mop_target, uint32_t size, uint8_t key)311{312prepare_mem12();313CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));314HOST_SYNC(copy_cpu, STAGE_COPIED);315CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,316GADDR_V(mem2), KEY(key));317ASSERT_MEM_EQ(mem1, mem2, size);318}319320static void default_cmpxchg(struct test_default *test, uint8_t key)321{322for (int size = 1; size <= 16; size *= 2) {323for (int offset = 0; offset < 16; offset += size) {324uint8_t __aligned(16) new[16] = {};325uint8_t __aligned(16) old[16];326bool succ;327328prepare_mem12();329default_write_read(test->vcpu, test->vcpu, LOGICAL, 16, NO_KEY);330331memcpy(&old, mem1, 16);332MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,333size, GADDR_V(mem1 + offset),334CMPXCHG_OLD(old + offset),335CMPXCHG_SUCCESS(&succ), KEY(key));336HOST_SYNC(test->vcpu, STAGE_COPIED);337MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));338TEST_ASSERT(succ, "exchange of values should succeed");339memcpy(mem1 + offset, new + offset, size);340ASSERT_MEM_EQ(mem1, mem2, 16);341342memcpy(&old, mem1, 16);343new[offset]++;344old[offset]++;345MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,346size, GADDR_V(mem1 + offset),347CMPXCHG_OLD(old + offset),348CMPXCHG_SUCCESS(&succ), KEY(key));349HOST_SYNC(test->vcpu, STAGE_COPIED);350MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));351TEST_ASSERT(!succ, "exchange of values should not succeed");352ASSERT_MEM_EQ(mem1, mem2, 16);353ASSERT_MEM_EQ(&old, mem1, 16);354}355}356}357358static void guest_copy(void)359{360GUEST_SYNC(STAGE_INITED);361memcpy(&mem2, &mem1, sizeof(mem2));362GUEST_SYNC(STAGE_COPIED);363}364365static void test_copy(void)366{367struct test_default t = test_default_init(guest_copy);368369HOST_SYNC(t.vcpu, STAGE_INITED);370371default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);372373kvm_vm_free(t.kvm_vm);374}375376static void test_copy_access_register(void)377{378struct test_default t = test_default_init(guest_copy);379380HOST_SYNC(t.vcpu, STAGE_INITED);381382prepare_mem12();383t.run->psw_mask &= ~(3UL << (63 - 17));384t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */385386/*387* Primary address space gets used if an access register388* contains zero. The host makes use of AR[1] so is a good389* candidate to ensure the guest AR (of zero) is used.390*/391CHECK_N_DO(MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size,392GADDR_V(mem1), AR(1));393HOST_SYNC(t.vcpu, STAGE_COPIED);394395CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, t.size,396GADDR_V(mem2), AR(1));397ASSERT_MEM_EQ(mem1, mem2, t.size);398399kvm_vm_free(t.kvm_vm);400}401402static void set_storage_key_range(void *addr, size_t len, uint8_t key)403{404uintptr_t _addr, abs, i;405int not_mapped = 0;406407_addr = (uintptr_t)addr;408for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {409abs = i;410asm volatile (411"lra %[abs], 0(0,%[abs])\n"412" jz 0f\n"413" llill %[not_mapped],1\n"414" j 1f\n"415"0: sske %[key], %[abs]\n"416"1:"417: [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)418: [key] "r" (key)419: "cc"420);421GUEST_ASSERT_EQ(not_mapped, 0);422}423}424425static void guest_copy_key(void)426{427set_storage_key_range(mem1, sizeof(mem1), 0x90);428set_storage_key_range(mem2, sizeof(mem2), 0x90);429GUEST_SYNC(STAGE_SKEYS_SET);430431for (;;) {432memcpy(&mem2, &mem1, sizeof(mem2));433GUEST_SYNC(STAGE_COPIED);434}435}436437static void test_copy_key(void)438{439struct test_default t = test_default_init(guest_copy_key);440441HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);442443/* vm, no key */444default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY);445446/* vm/vcpu, machting key or key 0 */447default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0);448default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);449default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0);450default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);451/*452* There used to be different code paths for key handling depending on453* if the region crossed a page boundary.454* There currently are not, but the more tests the merrier.455*/456default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0);457default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9);458default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0);459default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9);460461/* vm/vcpu, mismatching keys on read, but no fetch protection */462default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);463default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2);464465kvm_vm_free(t.kvm_vm);466}467468static void test_cmpxchg_key(void)469{470struct test_default t = test_default_init(guest_copy_key);471472HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);473474default_cmpxchg(&t, NO_KEY);475default_cmpxchg(&t, 0);476default_cmpxchg(&t, 9);477478kvm_vm_free(t.kvm_vm);479}480481static __uint128_t cut_to_size(int size, __uint128_t val)482{483switch (size) {484case 1:485return (uint8_t)val;486case 2:487return (uint16_t)val;488case 4:489return (uint32_t)val;490case 8:491return (uint64_t)val;492case 16:493return val;494}495GUEST_FAIL("Invalid size = %u", size);496return 0;497}498499static bool popcount_eq(__uint128_t a, __uint128_t b)500{501unsigned int count_a, count_b;502503count_a = __builtin_popcountl((uint64_t)(a >> 64)) +504__builtin_popcountl((uint64_t)a);505count_b = __builtin_popcountl((uint64_t)(b >> 64)) +506__builtin_popcountl((uint64_t)b);507return count_a == count_b;508}509510static __uint128_t rotate(int size, __uint128_t val, int amount)511{512unsigned int bits = size * 8;513514amount = (amount + bits) % bits;515val = cut_to_size(size, val);516if (!amount)517return val;518return (val << (bits - amount)) | (val >> amount);519}520521const unsigned int max_block = 16;522523static void choose_block(bool guest, int i, int *size, int *offset)524{525unsigned int rand;526527rand = i;528if (guest) {529rand = rand * 19 + 11;530*size = 1 << ((rand % 3) + 2);531rand = rand * 19 + 11;532*offset = (rand % max_block) & ~(*size - 1);533} else {534rand = rand * 17 + 5;535*size = 1 << (rand % 5);536rand = rand * 17 + 5;537*offset = (rand % max_block) & ~(*size - 1);538}539}540541static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old)542{543unsigned int rand;544int amount;545bool swap;546547rand = i;548rand = rand * 3 + 1;549if (guest)550rand = rand * 3 + 1;551swap = rand % 2 == 0;552if (swap) {553int i, j;554__uint128_t new;555uint8_t byte0, byte1;556557rand = rand * 3 + 1;558i = rand % size;559rand = rand * 3 + 1;560j = rand % size;561if (i == j)562return old;563new = rotate(16, old, i * 8);564byte0 = new & 0xff;565new &= ~0xff;566new = rotate(16, new, -i * 8);567new = rotate(16, new, j * 8);568byte1 = new & 0xff;569new = (new & ~0xff) | byte0;570new = rotate(16, new, -j * 8);571new = rotate(16, new, i * 8);572new = new | byte1;573new = rotate(16, new, -i * 8);574return new;575}576rand = rand * 3 + 1;577amount = rand % (size * 8);578return rotate(size, old, amount);579}580581static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t new)582{583bool ret;584585switch (size) {586case 4: {587uint32_t old = *old_addr;588589asm volatile ("cs %[old],%[new],%[address]"590: [old] "+d" (old),591[address] "+Q" (*(uint32_t *)(target))592: [new] "d" ((uint32_t)new)593: "cc"594);595ret = old == (uint32_t)*old_addr;596*old_addr = old;597return ret;598}599case 8: {600uint64_t old = *old_addr;601602asm volatile ("csg %[old],%[new],%[address]"603: [old] "+d" (old),604[address] "+Q" (*(uint64_t *)(target))605: [new] "d" ((uint64_t)new)606: "cc"607);608ret = old == (uint64_t)*old_addr;609*old_addr = old;610return ret;611}612case 16: {613__uint128_t old = *old_addr;614615asm volatile ("cdsg %[old],%[new],%[address]"616: [old] "+d" (old),617[address] "+Q" (*(__uint128_t *)(target))618: [new] "d" (new)619: "cc"620);621ret = old == *old_addr;622*old_addr = old;623return ret;624}625}626GUEST_FAIL("Invalid size = %u", size);627return 0;628}629630const unsigned int cmpxchg_iter_outer = 100, cmpxchg_iter_inner = 10000;631632static void guest_cmpxchg_key(void)633{634int size, offset;635__uint128_t old, new;636637set_storage_key_range(mem1, max_block, 0x10);638set_storage_key_range(mem2, max_block, 0x10);639GUEST_SYNC(STAGE_SKEYS_SET);640641for (int i = 0; i < cmpxchg_iter_outer; i++) {642do {643old = 1;644} while (!_cmpxchg(16, mem1, &old, 0));645for (int j = 0; j < cmpxchg_iter_inner; j++) {646choose_block(true, i + j, &size, &offset);647do {648new = permutate_bits(true, i + j, size, old);649} while (!_cmpxchg(size, mem2 + offset, &old, new));650}651}652653GUEST_SYNC(STAGE_DONE);654}655656static void *run_guest(void *data)657{658struct test_info *info = data;659660HOST_SYNC(*info, STAGE_DONE);661return NULL;662}663664static char *quad_to_char(__uint128_t *quad, int size)665{666return ((char *)quad) + (sizeof(*quad) - size);667}668669static void test_cmpxchg_key_concurrent(void)670{671struct test_default t = test_default_init(guest_cmpxchg_key);672int size, offset;673__uint128_t old, new;674bool success;675pthread_t thread;676677HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);678prepare_mem12();679MOP(t.vcpu, LOGICAL, WRITE, mem1, max_block, GADDR_V(mem2));680pthread_create(&thread, NULL, run_guest, &t.vcpu);681682for (int i = 0; i < cmpxchg_iter_outer; i++) {683do {684old = 0;685new = 1;686MOP(t.vm, ABSOLUTE, CMPXCHG, &new,687sizeof(new), GADDR_V(mem1),688CMPXCHG_OLD(&old),689CMPXCHG_SUCCESS(&success), KEY(1));690} while (!success);691for (int j = 0; j < cmpxchg_iter_inner; j++) {692choose_block(false, i + j, &size, &offset);693do {694new = permutate_bits(false, i + j, size, old);695MOP(t.vm, ABSOLUTE, CMPXCHG, quad_to_char(&new, size),696size, GADDR_V(mem2 + offset),697CMPXCHG_OLD(quad_to_char(&old, size)),698CMPXCHG_SUCCESS(&success), KEY(1));699} while (!success);700}701}702703pthread_join(thread, NULL);704705MOP(t.vcpu, LOGICAL, READ, mem2, max_block, GADDR_V(mem2));706TEST_ASSERT(popcount_eq(*(__uint128_t *)mem1, *(__uint128_t *)mem2),707"Must retain number of set bits");708709kvm_vm_free(t.kvm_vm);710}711712static void guest_copy_key_fetch_prot(void)713{714/*715* For some reason combining the first sync with override enablement716* results in an exception when calling HOST_SYNC.717*/718GUEST_SYNC(STAGE_INITED);719/* Storage protection override applies to both store and fetch. */720set_storage_key_range(mem1, sizeof(mem1), 0x98);721set_storage_key_range(mem2, sizeof(mem2), 0x98);722GUEST_SYNC(STAGE_SKEYS_SET);723724for (;;) {725memcpy(&mem2, &mem1, sizeof(mem2));726GUEST_SYNC(STAGE_COPIED);727}728}729730static void test_copy_key_storage_prot_override(void)731{732struct test_default t = test_default_init(guest_copy_key_fetch_prot);733734HOST_SYNC(t.vcpu, STAGE_INITED);735t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;736t.run->kvm_dirty_regs = KVM_SYNC_CRS;737HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);738739/* vcpu, mismatching keys, storage protection override in effect */740default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);741742kvm_vm_free(t.kvm_vm);743}744745static void test_copy_key_fetch_prot(void)746{747struct test_default t = test_default_init(guest_copy_key_fetch_prot);748749HOST_SYNC(t.vcpu, STAGE_INITED);750HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);751752/* vm/vcpu, matching key, fetch protection in effect */753default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);754default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);755756kvm_vm_free(t.kvm_vm);757}758759#define ERR_PROT_MOP(...) \760({ \761int rv; \762\763rv = ERR_MOP(__VA_ARGS__); \764TEST_ASSERT(rv == 4, "Should result in protection exception"); \765})766767static void guest_error_key(void)768{769GUEST_SYNC(STAGE_INITED);770set_storage_key_range(mem1, PAGE_SIZE, 0x18);771set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);772GUEST_SYNC(STAGE_SKEYS_SET);773GUEST_SYNC(STAGE_IDLED);774}775776static void test_errors_key(void)777{778struct test_default t = test_default_init(guest_error_key);779780HOST_SYNC(t.vcpu, STAGE_INITED);781HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);782783/* vm/vcpu, mismatching keys, fetch protection in effect */784CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));785CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem1), KEY(2));786CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));787CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem1), KEY(2));788789kvm_vm_free(t.kvm_vm);790}791792static void test_errors_cmpxchg_key(void)793{794struct test_default t = test_default_init(guest_copy_key_fetch_prot);795int i;796797HOST_SYNC(t.vcpu, STAGE_INITED);798HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);799800for (i = 1; i <= 16; i *= 2) {801__uint128_t old = 0;802803ERR_PROT_MOP(t.vm, ABSOLUTE, CMPXCHG, mem2, i, GADDR_V(mem2),804CMPXCHG_OLD(&old), KEY(2));805}806807kvm_vm_free(t.kvm_vm);808}809810static void test_termination(void)811{812struct test_default t = test_default_init(guest_error_key);813uint64_t prefix;814uint64_t teid;815uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);816uint64_t psw[2];817818HOST_SYNC(t.vcpu, STAGE_INITED);819HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);820821/* vcpu, mismatching keys after first page */822ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);823/*824* The memop injected a program exception and the test needs to check the825* Translation-Exception Identification (TEID). It is necessary to run826* the guest in order to be able to read the TEID from guest memory.827* Set the guest program new PSW, so the guest state is not clobbered.828*/829prefix = t.run->s.regs.prefix;830psw[0] = t.run->psw_mask;831psw[1] = t.run->psw_addr;832MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));833HOST_SYNC(t.vcpu, STAGE_IDLED);834MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));835/* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */836TEST_ASSERT_EQ(teid & teid_mask, 0);837838kvm_vm_free(t.kvm_vm);839}840841static void test_errors_key_storage_prot_override(void)842{843struct test_default t = test_default_init(guest_copy_key_fetch_prot);844845HOST_SYNC(t.vcpu, STAGE_INITED);846t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;847t.run->kvm_dirty_regs = KVM_SYNC_CRS;848HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);849850/* vm, mismatching keys, storage protection override not applicable to vm */851CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));852CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));853854kvm_vm_free(t.kvm_vm);855}856857const uint64_t last_page_addr = -PAGE_SIZE;858859static void guest_copy_key_fetch_prot_override(void)860{861int i;862char *page_0 = 0;863864GUEST_SYNC(STAGE_INITED);865set_storage_key_range(0, PAGE_SIZE, 0x18);866set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);867asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0L), [key] "r"(0x18) : "cc");868GUEST_SYNC(STAGE_SKEYS_SET);869870for (;;) {871for (i = 0; i < PAGE_SIZE; i++)872page_0[i] = mem1[i];873GUEST_SYNC(STAGE_COPIED);874}875}876877static void test_copy_key_fetch_prot_override(void)878{879struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);880vm_vaddr_t guest_0_page, guest_last_page;881882guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);883guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);884if (guest_0_page != 0 || guest_last_page != last_page_addr) {885print_skip("did not allocate guest pages at required positions");886goto out;887}888889HOST_SYNC(t.vcpu, STAGE_INITED);890t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;891t.run->kvm_dirty_regs = KVM_SYNC_CRS;892HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);893894/* vcpu, mismatching keys on fetch, fetch protection override applies */895prepare_mem12();896MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));897HOST_SYNC(t.vcpu, STAGE_COPIED);898CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));899ASSERT_MEM_EQ(mem1, mem2, 2048);900901/*902* vcpu, mismatching keys on fetch, fetch protection override applies,903* wraparound904*/905prepare_mem12();906MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));907HOST_SYNC(t.vcpu, STAGE_COPIED);908CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,909GADDR_V(guest_last_page), KEY(2));910ASSERT_MEM_EQ(mem1, mem2, 2048);911912out:913kvm_vm_free(t.kvm_vm);914}915916static void test_errors_key_fetch_prot_override_not_enabled(void)917{918struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);919vm_vaddr_t guest_0_page, guest_last_page;920921guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);922guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);923if (guest_0_page != 0 || guest_last_page != last_page_addr) {924print_skip("did not allocate guest pages at required positions");925goto out;926}927HOST_SYNC(t.vcpu, STAGE_INITED);928HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);929930/* vcpu, mismatching keys on fetch, fetch protection override not enabled */931CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));932933out:934kvm_vm_free(t.kvm_vm);935}936937static void test_errors_key_fetch_prot_override_enabled(void)938{939struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);940vm_vaddr_t guest_0_page, guest_last_page;941942guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);943guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);944if (guest_0_page != 0 || guest_last_page != last_page_addr) {945print_skip("did not allocate guest pages at required positions");946goto out;947}948HOST_SYNC(t.vcpu, STAGE_INITED);949t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;950t.run->kvm_dirty_regs = KVM_SYNC_CRS;951HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);952953/*954* vcpu, mismatching keys on fetch,955* fetch protection override does not apply because memory range exceeded956*/957CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));958CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,959GADDR_V(guest_last_page), KEY(2));960/* vm, fetch protected override does not apply */961CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));962CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));963964out:965kvm_vm_free(t.kvm_vm);966}967968static void guest_idle(void)969{970GUEST_SYNC(STAGE_INITED); /* for consistency's sake */971for (;;)972GUEST_SYNC(STAGE_IDLED);973}974975static void _test_errors_common(struct test_info info, enum mop_target target, int size)976{977int rv;978979/* Bad size: */980rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));981TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");982983/* Zero size: */984rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));985TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),986"ioctl allows 0 as size");987988/* Bad flags: */989rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));990TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");991992/* Bad guest address: */993rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);994TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address with CHECK_ONLY");995rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL));996TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address on write");997998/* Bad host address: */999rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));1000TEST_ASSERT(rv == -1 && errno == EFAULT,1001"ioctl does not report bad host memory address");10021003/* Bad key: */1004rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));1005TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");1006}10071008static void test_errors(void)1009{1010struct test_default t = test_default_init(guest_idle);1011int rv;10121013HOST_SYNC(t.vcpu, STAGE_INITED);10141015_test_errors_common(t.vcpu, LOGICAL, t.size);1016_test_errors_common(t.vm, ABSOLUTE, t.size);10171018/* Bad operation: */1019rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));1020TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");1021/* virtual addresses are not translated when passing INVALID */1022rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));1023TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");10241025/* Bad access register: */1026t.run->psw_mask &= ~(3UL << (63 - 17));1027t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */1028HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */1029rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));1030TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");1031t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */1032HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */10331034/* Check that the SIDA calls are rejected for non-protected guests */1035rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));1036TEST_ASSERT(rv == -1 && errno == EINVAL,1037"ioctl does not reject SIDA_READ in non-protected mode");1038rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));1039TEST_ASSERT(rv == -1 && errno == EINVAL,1040"ioctl does not reject SIDA_WRITE in non-protected mode");10411042kvm_vm_free(t.kvm_vm);1043}10441045static void test_errors_cmpxchg(void)1046{1047struct test_default t = test_default_init(guest_idle);1048__uint128_t old;1049int rv, i, power = 1;10501051HOST_SYNC(t.vcpu, STAGE_INITED);10521053for (i = 0; i < 32; i++) {1054if (i == power) {1055power *= 2;1056continue;1057}1058rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1),1059CMPXCHG_OLD(&old));1060TEST_ASSERT(rv == -1 && errno == EINVAL,1061"ioctl allows bad size for cmpxchg");1062}1063for (i = 1; i <= 16; i *= 2) {1064rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR((void *)~0xfffUL),1065CMPXCHG_OLD(&old));1066TEST_ASSERT(rv > 0, "ioctl allows bad guest address for cmpxchg");1067}1068for (i = 2; i <= 16; i *= 2) {1069rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1 + 1),1070CMPXCHG_OLD(&old));1071TEST_ASSERT(rv == -1 && errno == EINVAL,1072"ioctl allows bad alignment for cmpxchg");1073}10741075kvm_vm_free(t.kvm_vm);1076}10771078int main(int argc, char *argv[])1079{1080int extension_cap, idx;10811082TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));1083extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);10841085struct testdef {1086const char *name;1087void (*test)(void);1088bool requirements_met;1089} testlist[] = {1090{1091.name = "simple copy",1092.test = test_copy,1093.requirements_met = true,1094},1095{1096.name = "generic error checks",1097.test = test_errors,1098.requirements_met = true,1099},1100{1101.name = "copy with storage keys",1102.test = test_copy_key,1103.requirements_met = extension_cap > 0,1104},1105{1106.name = "cmpxchg with storage keys",1107.test = test_cmpxchg_key,1108.requirements_met = extension_cap & 0x2,1109},1110{1111.name = "concurrently cmpxchg with storage keys",1112.test = test_cmpxchg_key_concurrent,1113.requirements_met = extension_cap & 0x2,1114},1115{1116.name = "copy with key storage protection override",1117.test = test_copy_key_storage_prot_override,1118.requirements_met = extension_cap > 0,1119},1120{1121.name = "copy with key fetch protection",1122.test = test_copy_key_fetch_prot,1123.requirements_met = extension_cap > 0,1124},1125{1126.name = "copy with key fetch protection override",1127.test = test_copy_key_fetch_prot_override,1128.requirements_met = extension_cap > 0,1129},1130{1131.name = "copy with access register mode",1132.test = test_copy_access_register,1133.requirements_met = true,1134},1135{1136.name = "error checks with key",1137.test = test_errors_key,1138.requirements_met = extension_cap > 0,1139},1140{1141.name = "error checks for cmpxchg with key",1142.test = test_errors_cmpxchg_key,1143.requirements_met = extension_cap & 0x2,1144},1145{1146.name = "error checks for cmpxchg",1147.test = test_errors_cmpxchg,1148.requirements_met = extension_cap & 0x2,1149},1150{1151.name = "termination",1152.test = test_termination,1153.requirements_met = extension_cap > 0,1154},1155{1156.name = "error checks with key storage protection override",1157.test = test_errors_key_storage_prot_override,1158.requirements_met = extension_cap > 0,1159},1160{1161.name = "error checks without key fetch prot override",1162.test = test_errors_key_fetch_prot_override_not_enabled,1163.requirements_met = extension_cap > 0,1164},1165{1166.name = "error checks with key fetch prot override",1167.test = test_errors_key_fetch_prot_override_enabled,1168.requirements_met = extension_cap > 0,1169},1170};11711172ksft_print_header();1173ksft_set_plan(ARRAY_SIZE(testlist));11741175for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {1176if (testlist[idx].requirements_met) {1177testlist[idx].test();1178ksft_test_result_pass("%s\n", testlist[idx].name);1179} else {1180ksft_test_result_skip("%s - requirements not met (kernel has extension cap %#x)\n",1181testlist[idx].name, extension_cap);1182}1183}11841185ksft_finished(); /* Print results and exit() accordingly */1186}118711881189