Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/kvm/intc/ipi.c
52017 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2024 Loongson Technology Corporation Limited
4
*/
5
6
#include <linux/kvm_host.h>
7
#include <asm/kvm_ipi.h>
8
#include <asm/kvm_vcpu.h>
9
10
static void ipi_set(struct kvm_vcpu *vcpu, uint32_t data)
11
{
12
uint32_t status;
13
struct kvm_interrupt irq;
14
15
spin_lock(&vcpu->arch.ipi_state.lock);
16
status = vcpu->arch.ipi_state.status;
17
vcpu->arch.ipi_state.status |= data;
18
spin_unlock(&vcpu->arch.ipi_state.lock);
19
if ((status == 0) && data) {
20
irq.irq = LARCH_INT_IPI;
21
kvm_vcpu_ioctl_interrupt(vcpu, &irq);
22
}
23
}
24
25
static void ipi_send(struct kvm *kvm, uint64_t data)
26
{
27
int cpu;
28
struct kvm_vcpu *vcpu;
29
30
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
31
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
32
if (unlikely(vcpu == NULL)) {
33
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
34
return;
35
}
36
37
ipi_set(vcpu, BIT(data & 0x1f));
38
}
39
40
static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data)
41
{
42
uint32_t status;
43
struct kvm_interrupt irq;
44
45
spin_lock(&vcpu->arch.ipi_state.lock);
46
vcpu->arch.ipi_state.status &= ~data;
47
status = vcpu->arch.ipi_state.status;
48
spin_unlock(&vcpu->arch.ipi_state.lock);
49
if (status == 0) {
50
irq.irq = -LARCH_INT_IPI;
51
kvm_vcpu_ioctl_interrupt(vcpu, &irq);
52
}
53
}
54
55
static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len)
56
{
57
uint64_t data = 0;
58
59
spin_lock(&vcpu->arch.ipi_state.lock);
60
data = *(ulong *)((void *)vcpu->arch.ipi_state.buf + (offset - 0x20));
61
spin_unlock(&vcpu->arch.ipi_state.lock);
62
63
switch (len) {
64
case 1:
65
return data & 0xff;
66
case 2:
67
return data & 0xffff;
68
case 4:
69
return data & 0xffffffff;
70
case 8:
71
return data;
72
default:
73
kvm_err("%s: unknown data len: %d\n", __func__, len);
74
return 0;
75
}
76
}
77
78
static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int len)
79
{
80
void *pbuf;
81
82
spin_lock(&vcpu->arch.ipi_state.lock);
83
pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20);
84
85
switch (len) {
86
case 1:
87
*(unsigned char *)pbuf = (unsigned char)data;
88
break;
89
case 2:
90
*(unsigned short *)pbuf = (unsigned short)data;
91
break;
92
case 4:
93
*(unsigned int *)pbuf = (unsigned int)data;
94
break;
95
case 8:
96
*(unsigned long *)pbuf = (unsigned long)data;
97
break;
98
default:
99
kvm_err("%s: unknown data len: %d\n", __func__, len);
100
}
101
spin_unlock(&vcpu->arch.ipi_state.lock);
102
}
103
104
static int mail_send(struct kvm *kvm, uint64_t data)
105
{
106
int i, cpu, mailbox, offset;
107
uint32_t val = 0, mask = 0;
108
struct kvm_vcpu *vcpu;
109
110
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
111
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
112
if (unlikely(vcpu == NULL)) {
113
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
114
return 0;
115
}
116
mailbox = ((data & 0xffffffff) >> 2) & 0x7;
117
offset = IOCSR_IPI_BUF_20 + mailbox * 4;
118
if ((data >> 27) & 0xf) {
119
val = read_mailbox(vcpu, offset, 4);
120
for (i = 0; i < 4; i++)
121
if (data & (BIT(27 + i)))
122
mask |= (0xff << (i * 8));
123
val &= mask;
124
}
125
126
val |= ((uint32_t)(data >> 32) & ~mask);
127
write_mailbox(vcpu, offset, val, 4);
128
129
return 0;
130
}
131
132
static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
133
{
134
int i, idx, ret;
135
uint64_t val = 0, mask = 0;
136
137
/*
138
* Bit 27-30 is mask for byte writing.
139
* If the mask is 0, we need not to do anything.
140
*/
141
if ((data >> 27) & 0xf) {
142
/* Read the old val */
143
idx = srcu_read_lock(&vcpu->kvm->srcu);
144
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
145
srcu_read_unlock(&vcpu->kvm->srcu, idx);
146
if (unlikely(ret)) {
147
kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
148
return 0;
149
}
150
/* Construct the mask by scanning the bit 27-30 */
151
for (i = 0; i < 4; i++) {
152
if (data & (BIT(27 + i)))
153
mask |= (0xff << (i * 8));
154
}
155
/* Save the old part of val */
156
val &= mask;
157
}
158
val |= ((uint32_t)(data >> 32) & ~mask);
159
idx = srcu_read_lock(&vcpu->kvm->srcu);
160
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
161
srcu_read_unlock(&vcpu->kvm->srcu, idx);
162
if (unlikely(ret))
163
kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
164
165
return 0;
166
}
167
168
static int any_send(struct kvm *kvm, uint64_t data)
169
{
170
int cpu, offset;
171
struct kvm_vcpu *vcpu;
172
173
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
174
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
175
if (unlikely(vcpu == NULL)) {
176
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
177
return 0;
178
}
179
offset = data & 0xffff;
180
181
return send_ipi_data(vcpu, offset, data);
182
}
183
184
static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val)
185
{
186
uint32_t offset;
187
uint64_t res = 0;
188
189
offset = (uint32_t)(addr & 0x1ff);
190
WARN_ON_ONCE(offset & (len - 1));
191
192
switch (offset) {
193
case IOCSR_IPI_STATUS:
194
spin_lock(&vcpu->arch.ipi_state.lock);
195
res = vcpu->arch.ipi_state.status;
196
spin_unlock(&vcpu->arch.ipi_state.lock);
197
break;
198
case IOCSR_IPI_EN:
199
spin_lock(&vcpu->arch.ipi_state.lock);
200
res = vcpu->arch.ipi_state.en;
201
spin_unlock(&vcpu->arch.ipi_state.lock);
202
break;
203
case IOCSR_IPI_SET:
204
case IOCSR_IPI_CLEAR:
205
break;
206
case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
207
if (offset + len > IOCSR_IPI_BUF_38 + 8) {
208
kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
209
__func__, offset, len);
210
break;
211
}
212
res = read_mailbox(vcpu, offset, len);
213
break;
214
default:
215
kvm_err("%s: unknown addr: %llx\n", __func__, addr);
216
break;
217
}
218
*(uint64_t *)val = res;
219
220
return 0;
221
}
222
223
static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val)
224
{
225
uint64_t data;
226
uint32_t offset;
227
228
data = *(uint64_t *)val;
229
230
offset = (uint32_t)(addr & 0x1ff);
231
WARN_ON_ONCE(offset & (len - 1));
232
233
switch (offset) {
234
case IOCSR_IPI_STATUS:
235
break;
236
case IOCSR_IPI_EN:
237
spin_lock(&vcpu->arch.ipi_state.lock);
238
vcpu->arch.ipi_state.en = data;
239
spin_unlock(&vcpu->arch.ipi_state.lock);
240
break;
241
case IOCSR_IPI_SET:
242
ipi_set(vcpu, data);
243
break;
244
case IOCSR_IPI_CLEAR:
245
/* Just clear the status of the current vcpu */
246
ipi_clear(vcpu, data);
247
break;
248
case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7:
249
if (offset + len > IOCSR_IPI_BUF_38 + 8) {
250
kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
251
__func__, offset, len);
252
break;
253
}
254
write_mailbox(vcpu, offset, data, len);
255
break;
256
case IOCSR_IPI_SEND:
257
ipi_send(vcpu->kvm, data);
258
break;
259
case IOCSR_MAIL_SEND:
260
mail_send(vcpu->kvm, data);
261
break;
262
case IOCSR_ANY_SEND:
263
any_send(vcpu->kvm, data);
264
break;
265
default:
266
kvm_err("%s: unknown addr: %llx\n", __func__, addr);
267
break;
268
}
269
270
return 0;
271
}
272
273
static int kvm_ipi_read(struct kvm_vcpu *vcpu,
274
struct kvm_io_device *dev,
275
gpa_t addr, int len, void *val)
276
{
277
vcpu->stat.ipi_read_exits++;
278
return loongarch_ipi_readl(vcpu, addr, len, val);
279
}
280
281
static int kvm_ipi_write(struct kvm_vcpu *vcpu,
282
struct kvm_io_device *dev,
283
gpa_t addr, int len, const void *val)
284
{
285
vcpu->stat.ipi_write_exits++;
286
return loongarch_ipi_writel(vcpu, addr, len, val);
287
}
288
289
static const struct kvm_io_device_ops kvm_ipi_ops = {
290
.read = kvm_ipi_read,
291
.write = kvm_ipi_write,
292
};
293
294
static int kvm_ipi_regs_access(struct kvm_device *dev,
295
struct kvm_device_attr *attr,
296
bool is_write)
297
{
298
int len = 4;
299
int cpu, addr;
300
uint64_t val;
301
void *p = NULL;
302
struct kvm_vcpu *vcpu;
303
304
cpu = (attr->attr >> 16) & 0x3ff;
305
addr = attr->attr & 0xff;
306
307
vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu);
308
if (unlikely(vcpu == NULL)) {
309
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
310
return -EINVAL;
311
}
312
313
switch (addr) {
314
case IOCSR_IPI_STATUS:
315
p = &vcpu->arch.ipi_state.status;
316
break;
317
case IOCSR_IPI_EN:
318
p = &vcpu->arch.ipi_state.en;
319
break;
320
case IOCSR_IPI_SET:
321
p = &vcpu->arch.ipi_state.set;
322
break;
323
case IOCSR_IPI_CLEAR:
324
p = &vcpu->arch.ipi_state.clear;
325
break;
326
case IOCSR_IPI_BUF_20:
327
p = &vcpu->arch.ipi_state.buf[0];
328
len = 8;
329
break;
330
case IOCSR_IPI_BUF_28:
331
p = &vcpu->arch.ipi_state.buf[1];
332
len = 8;
333
break;
334
case IOCSR_IPI_BUF_30:
335
p = &vcpu->arch.ipi_state.buf[2];
336
len = 8;
337
break;
338
case IOCSR_IPI_BUF_38:
339
p = &vcpu->arch.ipi_state.buf[3];
340
len = 8;
341
break;
342
default:
343
kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr);
344
return -EINVAL;
345
}
346
347
if (is_write) {
348
if (len == 4) {
349
if (get_user(val, (uint32_t __user *)attr->addr))
350
return -EFAULT;
351
*(uint32_t *)p = (uint32_t)val;
352
} else if (len == 8) {
353
if (get_user(val, (uint64_t __user *)attr->addr))
354
return -EFAULT;
355
*(uint64_t *)p = val;
356
}
357
} else {
358
if (len == 4) {
359
val = *(uint32_t *)p;
360
return put_user(val, (uint32_t __user *)attr->addr);
361
} else if (len == 8) {
362
val = *(uint64_t *)p;
363
return put_user(val, (uint64_t __user *)attr->addr);
364
}
365
}
366
367
return 0;
368
}
369
370
static int kvm_ipi_get_attr(struct kvm_device *dev,
371
struct kvm_device_attr *attr)
372
{
373
switch (attr->group) {
374
case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
375
return kvm_ipi_regs_access(dev, attr, false);
376
default:
377
kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
378
return -EINVAL;
379
}
380
}
381
382
static int kvm_ipi_set_attr(struct kvm_device *dev,
383
struct kvm_device_attr *attr)
384
{
385
switch (attr->group) {
386
case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
387
return kvm_ipi_regs_access(dev, attr, true);
388
default:
389
kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
390
return -EINVAL;
391
}
392
}
393
394
static int kvm_ipi_create(struct kvm_device *dev, u32 type)
395
{
396
int ret;
397
struct kvm *kvm;
398
struct kvm_io_device *device;
399
struct loongarch_ipi *s;
400
401
if (!dev) {
402
kvm_err("%s: kvm_device ptr is invalid!\n", __func__);
403
return -EINVAL;
404
}
405
406
kvm = dev->kvm;
407
if (kvm->arch.ipi) {
408
kvm_err("%s: LoongArch IPI has already been created!\n", __func__);
409
return -EINVAL;
410
}
411
412
s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL);
413
if (!s)
414
return -ENOMEM;
415
416
spin_lock_init(&s->lock);
417
s->kvm = kvm;
418
419
/*
420
* Initialize IOCSR device
421
*/
422
device = &s->device;
423
kvm_iodevice_init(device, &kvm_ipi_ops);
424
mutex_lock(&kvm->slots_lock);
425
ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, IOCSR_IPI_BASE, IOCSR_IPI_SIZE, device);
426
mutex_unlock(&kvm->slots_lock);
427
if (ret < 0) {
428
kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__, ret);
429
goto err;
430
}
431
432
kvm->arch.ipi = s;
433
return 0;
434
435
err:
436
kfree(s);
437
return -EFAULT;
438
}
439
440
static void kvm_ipi_destroy(struct kvm_device *dev)
441
{
442
struct kvm *kvm;
443
struct loongarch_ipi *ipi;
444
445
if (!dev || !dev->kvm || !dev->kvm->arch.ipi)
446
return;
447
448
kvm = dev->kvm;
449
ipi = kvm->arch.ipi;
450
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
451
kfree(ipi);
452
kfree(dev);
453
}
454
455
static struct kvm_device_ops kvm_ipi_dev_ops = {
456
.name = "kvm-loongarch-ipi",
457
.create = kvm_ipi_create,
458
.destroy = kvm_ipi_destroy,
459
.set_attr = kvm_ipi_set_attr,
460
.get_attr = kvm_ipi_get_attr,
461
};
462
463
int kvm_loongarch_register_ipi_device(void)
464
{
465
return kvm_register_device_ops(&kvm_ipi_dev_ops, KVM_DEV_TYPE_LOONGARCH_IPI);
466
}
467
468