Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/kvm/diag.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* handling diagnose instructions
4
*
5
* Copyright IBM Corp. 2008, 2020
6
*
7
* Author(s): Carsten Otte <[email protected]>
8
* Christian Borntraeger <[email protected]>
9
*/
10
11
#include <linux/kvm.h>
12
#include <linux/kvm_host.h>
13
#include <asm/gmap.h>
14
#include <asm/gmap_helpers.h>
15
#include <asm/virtio-ccw.h>
16
#include "kvm-s390.h"
17
#include "trace.h"
18
#include "trace-s390.h"
19
#include "gaccess.h"
20
21
static void do_discard_gfn_range(struct kvm_vcpu *vcpu, gfn_t gfn_start, gfn_t gfn_end)
22
{
23
struct kvm_memslot_iter iter;
24
struct kvm_memory_slot *slot;
25
struct kvm_memslots *slots;
26
unsigned long start, end;
27
28
slots = kvm_vcpu_memslots(vcpu);
29
30
kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
31
slot = iter.slot;
32
start = __gfn_to_hva_memslot(slot, max(gfn_start, slot->base_gfn));
33
end = __gfn_to_hva_memslot(slot, min(gfn_end, slot->base_gfn + slot->npages));
34
gmap_helper_discard(vcpu->kvm->mm, start, end);
35
}
36
}
37
38
static int diag_release_pages(struct kvm_vcpu *vcpu)
39
{
40
unsigned long start, end;
41
unsigned long prefix = kvm_s390_get_prefix(vcpu);
42
43
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
44
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
45
vcpu->stat.instruction_diagnose_10++;
46
47
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
48
|| start < 2 * PAGE_SIZE)
49
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
50
51
VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
52
53
mmap_read_lock(vcpu->kvm->mm);
54
/*
55
* We checked for start >= end above, so lets check for the
56
* fast path (no prefix swap page involved)
57
*/
58
if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
59
do_discard_gfn_range(vcpu, gpa_to_gfn(start), gpa_to_gfn(end));
60
} else {
61
/*
62
* This is slow path. gmap_discard will check for start
63
* so lets split this into before prefix, prefix, after
64
* prefix and let gmap_discard make some of these calls
65
* NOPs.
66
*/
67
do_discard_gfn_range(vcpu, gpa_to_gfn(start), gpa_to_gfn(prefix));
68
if (start <= prefix)
69
do_discard_gfn_range(vcpu, 0, 1);
70
if (end > prefix + PAGE_SIZE)
71
do_discard_gfn_range(vcpu, 1, 2);
72
do_discard_gfn_range(vcpu, gpa_to_gfn(prefix) + 2, gpa_to_gfn(end));
73
}
74
mmap_read_unlock(vcpu->kvm->mm);
75
return 0;
76
}
77
78
static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
79
{
80
struct prs_parm {
81
u16 code;
82
u16 subcode;
83
u16 parm_len;
84
u16 parm_version;
85
u64 token_addr;
86
u64 select_mask;
87
u64 compare_mask;
88
u64 zarch;
89
};
90
struct prs_parm parm;
91
int rc;
92
u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
93
u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
94
95
VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
96
vcpu->run->s.regs.gprs[rx]);
97
vcpu->stat.instruction_diagnose_258++;
98
if (vcpu->run->s.regs.gprs[rx] & 7)
99
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
100
rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
101
if (rc)
102
return kvm_s390_inject_prog_cond(vcpu, rc);
103
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
104
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
105
106
switch (parm.subcode) {
107
case 0: /* TOKEN */
108
VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
109
"select mask 0x%llx compare mask 0x%llx",
110
parm.token_addr, parm.select_mask, parm.compare_mask);
111
if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
112
/*
113
* If the pagefault handshake is already activated,
114
* the token must not be changed. We have to return
115
* decimal 8 instead, as mandated in SC24-6084.
116
*/
117
vcpu->run->s.regs.gprs[ry] = 8;
118
return 0;
119
}
120
121
if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
122
parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
123
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
124
125
if (!kvm_is_gpa_in_memslot(vcpu->kvm, parm.token_addr))
126
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
127
128
vcpu->arch.pfault_token = parm.token_addr;
129
vcpu->arch.pfault_select = parm.select_mask;
130
vcpu->arch.pfault_compare = parm.compare_mask;
131
vcpu->run->s.regs.gprs[ry] = 0;
132
rc = 0;
133
break;
134
case 1: /*
135
* CANCEL
136
* Specification allows to let already pending tokens survive
137
* the cancel, therefore to reduce code complexity, we assume
138
* all outstanding tokens are already pending.
139
*/
140
VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
141
if (parm.token_addr || parm.select_mask ||
142
parm.compare_mask || parm.zarch)
143
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
144
145
vcpu->run->s.regs.gprs[ry] = 0;
146
/*
147
* If the pfault handling was not established or is already
148
* canceled SC24-6084 requests to return decimal 4.
149
*/
150
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
151
vcpu->run->s.regs.gprs[ry] = 4;
152
else
153
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
154
155
rc = 0;
156
break;
157
default:
158
rc = -EOPNOTSUPP;
159
break;
160
}
161
162
return rc;
163
}
164
165
static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
166
{
167
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
168
vcpu->stat.instruction_diagnose_44++;
169
kvm_vcpu_on_spin(vcpu, true);
170
return 0;
171
}
172
173
static int forward_cnt;
174
static unsigned long cur_slice;
175
176
static int diag9c_forwarding_overrun(void)
177
{
178
/* Reset the count on a new slice */
179
if (time_after(jiffies, cur_slice)) {
180
cur_slice = jiffies;
181
forward_cnt = diag9c_forwarding_hz / HZ;
182
}
183
return forward_cnt-- <= 0 ? 1 : 0;
184
}
185
186
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
187
{
188
struct kvm_vcpu *tcpu;
189
int tcpu_cpu;
190
int tid;
191
192
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
193
vcpu->stat.instruction_diagnose_9c++;
194
195
/* yield to self */
196
if (tid == vcpu->vcpu_id)
197
goto no_yield;
198
199
/* yield to invalid */
200
tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
201
if (!tcpu)
202
goto no_yield;
203
204
/* target guest VCPU already running */
205
tcpu_cpu = READ_ONCE(tcpu->cpu);
206
if (tcpu_cpu >= 0) {
207
if (!diag9c_forwarding_hz || diag9c_forwarding_overrun())
208
goto no_yield;
209
210
/* target host CPU already running */
211
if (!vcpu_is_preempted(tcpu_cpu))
212
goto no_yield;
213
smp_yield_cpu(tcpu_cpu);
214
VCPU_EVENT(vcpu, 5,
215
"diag time slice end directed to %d: yield forwarded",
216
tid);
217
vcpu->stat.diag_9c_forward++;
218
return 0;
219
}
220
221
if (kvm_vcpu_yield_to(tcpu) <= 0)
222
goto no_yield;
223
224
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
225
return 0;
226
no_yield:
227
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
228
vcpu->stat.diag_9c_ignored++;
229
return 0;
230
}
231
232
static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
233
{
234
unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
235
unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
236
237
VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
238
vcpu->stat.instruction_diagnose_308++;
239
switch (subcode) {
240
case 3:
241
vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
242
break;
243
case 4:
244
vcpu->run->s390_reset_flags = 0;
245
break;
246
default:
247
return -EOPNOTSUPP;
248
}
249
250
/*
251
* no need to check the return value of vcpu_stop as it can only have
252
* an error for protvirt, but protvirt means user cpu state
253
*/
254
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
255
kvm_s390_vcpu_stop(vcpu);
256
vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
257
vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
258
vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
259
vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
260
VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
261
vcpu->run->s390_reset_flags);
262
trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
263
return -EREMOTE;
264
}
265
266
static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
267
{
268
int ret;
269
270
vcpu->stat.instruction_diagnose_500++;
271
/* No virtio-ccw notification? Get out quickly. */
272
if (!vcpu->kvm->arch.css_support ||
273
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
274
return -EOPNOTSUPP;
275
276
VCPU_EVENT(vcpu, 4, "diag 0x500 schid 0x%8.8x queue 0x%x cookie 0x%llx",
277
(u32) vcpu->run->s.regs.gprs[2],
278
(u32) vcpu->run->s.regs.gprs[3],
279
vcpu->run->s.regs.gprs[4]);
280
281
/*
282
* The layout is as follows:
283
* - gpr 2 contains the subchannel id (passed as addr)
284
* - gpr 3 contains the virtqueue index (passed as datamatch)
285
* - gpr 4 contains the index on the bus (optionally)
286
*/
287
ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
288
vcpu->run->s.regs.gprs[2] & 0xffffffff,
289
8, &vcpu->run->s.regs.gprs[3],
290
vcpu->run->s.regs.gprs[4]);
291
292
/*
293
* Return cookie in gpr 2, but don't overwrite the register if the
294
* diagnose will be handled by userspace.
295
*/
296
if (ret != -EOPNOTSUPP)
297
vcpu->run->s.regs.gprs[2] = ret;
298
/* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
299
return ret < 0 ? ret : 0;
300
}
301
302
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
303
{
304
int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
305
306
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
307
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
308
309
trace_kvm_s390_handle_diag(vcpu, code);
310
switch (code) {
311
case 0x10:
312
return diag_release_pages(vcpu);
313
case 0x44:
314
return __diag_time_slice_end(vcpu);
315
case 0x9c:
316
return __diag_time_slice_end_directed(vcpu);
317
case 0x258:
318
return __diag_page_ref_service(vcpu);
319
case 0x308:
320
return __diag_ipl_functions(vcpu);
321
case 0x500:
322
return __diag_virtio_hypercall(vcpu);
323
default:
324
vcpu->stat.instruction_diagnose_other++;
325
return -EOPNOTSUPP;
326
}
327
}
328
329