Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/kvm/gaccess.h
26424 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* access guest memory
4
*
5
* Copyright IBM Corp. 2008, 2014
6
*
7
* Author(s): Carsten Otte <[email protected]>
8
*/
9
10
#ifndef __KVM_S390_GACCESS_H
11
#define __KVM_S390_GACCESS_H
12
13
#include <linux/compiler.h>
14
#include <linux/kvm_host.h>
15
#include <linux/uaccess.h>
16
#include <linux/ptrace.h>
17
#include "kvm-s390.h"
18
19
/**
20
* kvm_s390_real_to_abs - convert guest real address to guest absolute address
21
* @prefix - guest prefix
22
* @gra - guest real address
23
*
24
* Returns the guest absolute address that corresponds to the passed guest real
25
* address @gra of by applying the given prefix.
26
*/
27
static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
28
{
29
if (gra < 2 * PAGE_SIZE)
30
gra += prefix;
31
else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
32
gra -= prefix;
33
return gra;
34
}
35
36
/**
37
* kvm_s390_real_to_abs - convert guest real address to guest absolute address
38
* @vcpu - guest virtual cpu
39
* @gra - guest real address
40
*
41
* Returns the guest absolute address that corresponds to the passed guest real
42
* address @gra of a virtual guest cpu by applying its prefix.
43
*/
44
static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
45
unsigned long gra)
46
{
47
return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
48
}
49
50
/**
51
* _kvm_s390_logical_to_effective - convert guest logical to effective address
52
* @psw: psw of the guest
53
* @ga: guest logical address
54
*
55
* Convert a guest logical address to an effective address by applying the
56
* rules of the addressing mode defined by bits 31 and 32 of the given PSW
57
* (extendended/basic addressing mode).
58
*
59
* Depending on the addressing mode, the upper 40 bits (24 bit addressing
60
* mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
61
* mode) of @ga will be zeroed and the remaining bits will be returned.
62
*/
63
static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
64
unsigned long ga)
65
{
66
if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
67
return ga;
68
if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
69
return ga & ((1UL << 31) - 1);
70
return ga & ((1UL << 24) - 1);
71
}
72
73
/**
74
* kvm_s390_logical_to_effective - convert guest logical to effective address
75
* @vcpu: guest virtual cpu
76
* @ga: guest logical address
77
*
78
* Convert a guest vcpu logical address to a guest vcpu effective address by
79
* applying the rules of the vcpu's addressing mode defined by PSW bits 31
80
* and 32 (extendended/basic addressing mode).
81
*
82
* Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
83
* mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
84
* of @ga will be zeroed and the remaining bits will be returned.
85
*/
86
static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
87
unsigned long ga)
88
{
89
return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
90
}
91
92
/*
93
* put_guest_lc, read_guest_lc and write_guest_lc are guest access functions
94
* which shall only be used to access the lowcore of a vcpu.
95
* These functions should be used for e.g. interrupt handlers where no
96
* guest memory access protection facilities, like key or low address
97
* protection, are applicable.
98
* At a later point guest vcpu lowcore access should happen via pinned
99
* prefix pages, so that these pages can be accessed directly via the
100
* kernel mapping. All of these *_lc functions can be removed then.
101
*/
102
103
/**
104
* put_guest_lc - write a simple variable to a guest vcpu's lowcore
105
* @vcpu: virtual cpu
106
* @x: value to copy to guest
107
* @gra: vcpu's destination guest real address
108
*
109
* Copies a simple value from kernel space to a guest vcpu's lowcore.
110
* The size of the variable may be 1, 2, 4 or 8 bytes. The destination
111
* must be located in the vcpu's lowcore. Otherwise the result is undefined.
112
*
113
* Returns zero on success or -EFAULT on error.
114
*
115
* Note: an error indicates that either the kernel is out of memory or
116
* the guest memory mapping is broken. In any case the best solution
117
* would be to terminate the guest.
118
* It is wrong to inject a guest exception.
119
*/
120
#define put_guest_lc(vcpu, x, gra) \
121
({ \
122
struct kvm_vcpu *__vcpu = (vcpu); \
123
__typeof__(*(gra)) __x = (x); \
124
unsigned long __gpa; \
125
\
126
__gpa = (unsigned long)(gra); \
127
__gpa += kvm_s390_get_prefix(__vcpu); \
128
kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \
129
})
130
131
/**
132
* write_guest_lc - copy data from kernel space to guest vcpu's lowcore
133
* @vcpu: virtual cpu
134
* @gra: vcpu's source guest real address
135
* @data: source address in kernel space
136
* @len: number of bytes to copy
137
*
138
* Copy data from kernel space to guest vcpu's lowcore. The entire range must
139
* be located within the vcpu's lowcore, otherwise the result is undefined.
140
*
141
* Returns zero on success or -EFAULT on error.
142
*
143
* Note: an error indicates that either the kernel is out of memory or
144
* the guest memory mapping is broken. In any case the best solution
145
* would be to terminate the guest.
146
* It is wrong to inject a guest exception.
147
*/
148
static inline __must_check
149
int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
150
unsigned long len)
151
{
152
unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
153
154
return kvm_write_guest(vcpu->kvm, gpa, data, len);
155
}
156
157
/**
158
* read_guest_lc - copy data from guest vcpu's lowcore to kernel space
159
* @vcpu: virtual cpu
160
* @gra: vcpu's source guest real address
161
* @data: destination address in kernel space
162
* @len: number of bytes to copy
163
*
164
* Copy data from guest vcpu's lowcore to kernel space. The entire range must
165
* be located within the vcpu's lowcore, otherwise the result is undefined.
166
*
167
* Returns zero on success or -EFAULT on error.
168
*
169
* Note: an error indicates that either the kernel is out of memory or
170
* the guest memory mapping is broken. In any case the best solution
171
* would be to terminate the guest.
172
* It is wrong to inject a guest exception.
173
*/
174
static inline __must_check
175
int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
176
unsigned long len)
177
{
178
unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
179
180
return kvm_read_guest(vcpu->kvm, gpa, data, len);
181
}
182
183
enum gacc_mode {
184
GACC_FETCH,
185
GACC_STORE,
186
GACC_IFETCH,
187
};
188
189
int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
190
unsigned long *gpa, enum gacc_mode mode,
191
u8 access_key);
192
193
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
194
unsigned long length, enum gacc_mode mode, u8 access_key);
195
196
int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
197
enum gacc_mode mode, u8 access_key);
198
199
int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
200
unsigned long len, enum gacc_mode mode, u8 access_key);
201
202
int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
203
void *data, unsigned long len, enum gacc_mode mode,
204
u8 access_key);
205
206
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
207
void *data, unsigned long len, enum gacc_mode mode);
208
209
int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len, __uint128_t *old,
210
__uint128_t new, u8 access_key, bool *success);
211
212
/**
213
* write_guest_with_key - copy data from kernel space to guest space
214
* @vcpu: virtual cpu
215
* @ga: guest address
216
* @ar: access register
217
* @data: source address in kernel space
218
* @len: number of bytes to copy
219
* @access_key: access key the storage key needs to match
220
*
221
* Copy @len bytes from @data (kernel space) to @ga (guest address).
222
* In order to copy data to guest space the PSW of the vcpu is inspected:
223
* If DAT is off data will be copied to guest real or absolute memory.
224
* If DAT is on data will be copied to the address space as specified by
225
* the address space bits of the PSW:
226
* Primary, secondary, home space or access register mode.
227
* The addressing mode of the PSW is also inspected, so that address wrap
228
* around is taken into account for 24-, 31- and 64-bit addressing mode,
229
* if the to be copied data crosses page boundaries in guest address space.
230
* In addition low address, DAT and key protection checks are performed before
231
* copying any data.
232
*
233
* This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu.
234
* In case of an access exception (e.g. protection exception) pgm will contain
235
* all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()'
236
* will inject a correct exception into the guest.
237
* If no access exception happened, the contents of pgm are undefined when
238
* this function returns.
239
*
240
* Returns: - zero on success
241
* - a negative value if e.g. the guest mapping is broken or in
242
* case of out-of-memory. In this case the contents of pgm are
243
* undefined. Also parts of @data may have been copied to guest
244
* space.
245
* - a positive value if an access exception happened. In this case
246
* the returned value is the program interruption code and the
247
* contents of pgm may be used to inject an exception into the
248
* guest. No data has been copied to guest space.
249
*
250
* Note: in case an access exception is recognized no data has been copied to
251
* guest space (this is also true, if the to be copied data would cross
252
* one or more page boundaries in guest space).
253
* Therefore this function may be used for nullifying and suppressing
254
* instruction emulation.
255
* It may also be used for terminating instructions, if it is undefined
256
* if data has been changed in guest space in case of an exception.
257
*/
258
static inline __must_check
259
int write_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
260
void *data, unsigned long len, u8 access_key)
261
{
262
return access_guest_with_key(vcpu, ga, ar, data, len, GACC_STORE,
263
access_key);
264
}
265
266
/**
267
* write_guest - copy data from kernel space to guest space
268
* @vcpu: virtual cpu
269
* @ga: guest address
270
* @ar: access register
271
* @data: source address in kernel space
272
* @len: number of bytes to copy
273
*
274
* The behaviour of write_guest is identical to write_guest_with_key, except
275
* that the PSW access key is used instead of an explicit argument.
276
*/
277
static inline __must_check
278
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
279
unsigned long len)
280
{
281
u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key;
282
283
return write_guest_with_key(vcpu, ga, ar, data, len, access_key);
284
}
285
286
/**
287
* read_guest_with_key - copy data from guest space to kernel space
288
* @vcpu: virtual cpu
289
* @ga: guest address
290
* @ar: access register
291
* @data: destination address in kernel space
292
* @len: number of bytes to copy
293
* @access_key: access key the storage key needs to match
294
*
295
* Copy @len bytes from @ga (guest address) to @data (kernel space).
296
*
297
* The behaviour of read_guest_with_key is identical to write_guest_with_key,
298
* except that data will be copied from guest space to kernel space.
299
*/
300
static inline __must_check
301
int read_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
302
void *data, unsigned long len, u8 access_key)
303
{
304
return access_guest_with_key(vcpu, ga, ar, data, len, GACC_FETCH,
305
access_key);
306
}
307
308
/**
309
* read_guest - copy data from guest space to kernel space
310
* @vcpu: virtual cpu
311
* @ga: guest address
312
* @ar: access register
313
* @data: destination address in kernel space
314
* @len: number of bytes to copy
315
*
316
* Copy @len bytes from @ga (guest address) to @data (kernel space).
317
*
318
* The behaviour of read_guest is identical to read_guest_with_key, except
319
* that the PSW access key is used instead of an explicit argument.
320
*/
321
static inline __must_check
322
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
323
unsigned long len)
324
{
325
u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key;
326
327
return read_guest_with_key(vcpu, ga, ar, data, len, access_key);
328
}
329
330
/**
331
* read_guest_instr - copy instruction data from guest space to kernel space
332
* @vcpu: virtual cpu
333
* @ga: guest address
334
* @data: destination address in kernel space
335
* @len: number of bytes to copy
336
*
337
* Copy @len bytes from the given address (guest space) to @data (kernel
338
* space).
339
*
340
* The behaviour of read_guest_instr is identical to read_guest, except that
341
* instruction data will be read from primary space when in home-space or
342
* address-space mode.
343
*/
344
static inline __must_check
345
int read_guest_instr(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
346
unsigned long len)
347
{
348
u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key;
349
350
return access_guest_with_key(vcpu, ga, 0, data, len, GACC_IFETCH,
351
access_key);
352
}
353
354
/**
355
* write_guest_abs - copy data from kernel space to guest space absolute
356
* @vcpu: virtual cpu
357
* @gpa: guest physical (absolute) address
358
* @data: source address in kernel space
359
* @len: number of bytes to copy
360
*
361
* Copy @len bytes from @data (kernel space) to @gpa (guest absolute address).
362
* It is up to the caller to ensure that the entire guest memory range is
363
* valid memory before calling this function.
364
* Guest low address and key protection are not checked.
365
*
366
* Returns zero on success or -EFAULT on error.
367
*
368
* If an error occurs data may have been copied partially to guest memory.
369
*/
370
static inline __must_check
371
int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
372
unsigned long len)
373
{
374
return kvm_write_guest(vcpu->kvm, gpa, data, len);
375
}
376
377
/**
378
* read_guest_abs - copy data from guest space absolute to kernel space
379
* @vcpu: virtual cpu
380
* @gpa: guest physical (absolute) address
381
* @data: destination address in kernel space
382
* @len: number of bytes to copy
383
*
384
* Copy @len bytes from @gpa (guest absolute address) to @data (kernel space).
385
* It is up to the caller to ensure that the entire guest memory range is
386
* valid memory before calling this function.
387
* Guest key protection is not checked.
388
*
389
* Returns zero on success or -EFAULT on error.
390
*
391
* If an error occurs data may have been copied partially to kernel space.
392
*/
393
static inline __must_check
394
int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
395
unsigned long len)
396
{
397
return kvm_read_guest(vcpu->kvm, gpa, data, len);
398
}
399
400
/**
401
* write_guest_real - copy data from kernel space to guest space real
402
* @vcpu: virtual cpu
403
* @gra: guest real address
404
* @data: source address in kernel space
405
* @len: number of bytes to copy
406
*
407
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
408
* Guest low address and key protection are not checked.
409
*
410
* Returns zero on success, -EFAULT when copying from @data failed, or
411
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
412
* is also stored to allow injecting into the guest (if applicable) using
413
* kvm_s390_inject_prog_cond().
414
*
415
* If an error occurs data may have been copied partially to guest memory.
416
*/
417
static inline __must_check
418
int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
419
unsigned long len)
420
{
421
return access_guest_real(vcpu, gra, data, len, 1);
422
}
423
424
/**
425
* read_guest_real - copy data from guest space real to kernel space
426
* @vcpu: virtual cpu
427
* @gra: guest real address
428
* @data: destination address in kernel space
429
* @len: number of bytes to copy
430
*
431
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
432
* Guest key protection is not checked.
433
*
434
* Returns zero on success, -EFAULT when copying to @data failed, or
435
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
436
* is also stored to allow injecting into the guest (if applicable) using
437
* kvm_s390_inject_prog_cond().
438
*
439
* If an error occurs data may have been copied partially to kernel space.
440
*/
441
static inline __must_check
442
int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
443
unsigned long len)
444
{
445
return access_guest_real(vcpu, gra, data, len, 0);
446
}
447
448
void ipte_lock(struct kvm *kvm);
449
void ipte_unlock(struct kvm *kvm);
450
int ipte_lock_held(struct kvm *kvm);
451
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
452
453
/* MVPG PEI indication bits */
454
#define PEI_DAT_PROT 2
455
#define PEI_NOT_PTE 4
456
457
int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
458
unsigned long saddr, unsigned long *datptr);
459
460
#endif /* __KVM_S390_GACCESS_H */
461
462