Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/book3s/vas-api.c
26530 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* VAS user space API for its accelerators (Only NX-GZIP is supported now)
4
* Copyright (C) 2019 Haren Myneni, IBM Corp
5
*/
6
7
#define pr_fmt(fmt) "vas-api: " fmt
8
9
#include <linux/kernel.h>
10
#include <linux/device.h>
11
#include <linux/cdev.h>
12
#include <linux/fs.h>
13
#include <linux/slab.h>
14
#include <linux/uaccess.h>
15
#include <linux/kthread.h>
16
#include <linux/sched/signal.h>
17
#include <linux/mmu_context.h>
18
#include <linux/io.h>
19
#include <asm/vas.h>
20
#include <uapi/asm/vas-api.h>
21
22
/*
23
* The driver creates the device node that can be used as follows:
24
* For NX-GZIP
25
*
26
* fd = open("/dev/crypto/nx-gzip", O_RDWR);
27
* rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
28
* paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
29
* vas_copy(&crb, 0, 1);
30
* vas_paste(paste_addr, 0, 1);
31
* close(fd) or exit process to close window.
32
*
33
* where "vas_copy" and "vas_paste" are defined in copy-paste.h.
34
* copy/paste returns to the user space directly. So refer NX hardware
35
* documentation for exact copy/paste usage and completion / error
36
* conditions.
37
*/
38
39
/*
40
* Wrapper object for the nx-gzip device - there is just one instance of
41
* this node for the whole system.
42
*/
43
static struct coproc_dev {
44
struct cdev cdev;
45
struct device *device;
46
char *name;
47
dev_t devt;
48
struct class *class;
49
enum vas_cop_type cop_type;
50
const struct vas_user_win_ops *vops;
51
} coproc_device;
52
53
struct coproc_instance {
54
struct coproc_dev *coproc;
55
struct vas_window *txwin;
56
};
57
58
static char *coproc_devnode(const struct device *dev, umode_t *mode)
59
{
60
return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
61
}
62
63
/*
64
* Take reference to pid and mm
65
*/
66
int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
67
{
68
/*
69
* Window opened by a child thread may not be closed when
70
* it exits. So take reference to its pid and release it
71
* when the window is free by parent thread.
72
* Acquire a reference to the task's pid to make sure
73
* pid will not be re-used - needed only for multithread
74
* applications.
75
*/
76
task_ref->pid = get_task_pid(current, PIDTYPE_PID);
77
/*
78
* Acquire a reference to the task's mm.
79
*/
80
task_ref->mm = get_task_mm(current);
81
if (!task_ref->mm) {
82
put_pid(task_ref->pid);
83
pr_err("pid(%d): mm_struct is not found\n",
84
current->pid);
85
return -EPERM;
86
}
87
88
mmgrab(task_ref->mm);
89
mmput(task_ref->mm);
90
/*
91
* Process closes window during exit. In the case of
92
* multithread application, the child thread can open
93
* window and can exit without closing it. So takes tgid
94
* reference until window closed to make sure tgid is not
95
* reused.
96
*/
97
task_ref->tgid = find_get_pid(task_tgid_vnr(current));
98
99
return 0;
100
}
101
102
/*
103
* Successful return must release the task reference with
104
* put_task_struct
105
*/
106
static bool ref_get_pid_and_task(struct vas_user_win_ref *task_ref,
107
struct task_struct **tskp, struct pid **pidp)
108
{
109
struct task_struct *tsk;
110
struct pid *pid;
111
112
pid = task_ref->pid;
113
tsk = get_pid_task(pid, PIDTYPE_PID);
114
if (!tsk) {
115
pid = task_ref->tgid;
116
tsk = get_pid_task(pid, PIDTYPE_PID);
117
/*
118
* Parent thread (tgid) will be closing window when it
119
* exits. So should not get here.
120
*/
121
if (WARN_ON_ONCE(!tsk))
122
return false;
123
}
124
125
/* Return if the task is exiting. */
126
if (tsk->flags & PF_EXITING) {
127
put_task_struct(tsk);
128
return false;
129
}
130
131
*tskp = tsk;
132
*pidp = pid;
133
134
return true;
135
}
136
137
/*
138
* Update the CSB to indicate a translation error.
139
*
140
* User space will be polling on CSB after the request is issued.
141
* If NX can handle the request without any issues, it updates CSB.
142
* Whereas if NX encounters page fault, the kernel will handle the
143
* fault and update CSB with translation error.
144
*
145
* If we are unable to update the CSB means copy_to_user failed due to
146
* invalid csb_addr, send a signal to the process.
147
*/
148
void vas_update_csb(struct coprocessor_request_block *crb,
149
struct vas_user_win_ref *task_ref)
150
{
151
struct coprocessor_status_block csb;
152
struct kernel_siginfo info;
153
struct task_struct *tsk;
154
void __user *csb_addr;
155
struct pid *pid;
156
int rc;
157
158
/*
159
* NX user space windows can not be opened for task->mm=NULL
160
* and faults will not be generated for kernel requests.
161
*/
162
if (WARN_ON_ONCE(!task_ref->mm))
163
return;
164
165
csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
166
167
memset(&csb, 0, sizeof(csb));
168
csb.cc = CSB_CC_FAULT_ADDRESS;
169
csb.ce = CSB_CE_TERMINATION;
170
csb.cs = 0;
171
csb.count = 0;
172
173
/*
174
* NX operates and returns in BE format as defined CRB struct.
175
* So saves fault_storage_addr in BE as NX pastes in FIFO and
176
* expects user space to convert to CPU format.
177
*/
178
csb.address = crb->stamp.nx.fault_storage_addr;
179
csb.flags = 0;
180
181
/*
182
* Process closes send window after all pending NX requests are
183
* completed. In multi-thread applications, a child thread can
184
* open a window and can exit without closing it. May be some
185
* requests are pending or this window can be used by other
186
* threads later. We should handle faults if NX encounters
187
* pages faults on these requests. Update CSB with translation
188
* error and fault address. If csb_addr passed by user space is
189
* invalid, send SEGV signal to pid saved in window. If the
190
* child thread is not running, send the signal to tgid.
191
* Parent thread (tgid) will close this window upon its exit.
192
*
193
* pid and mm references are taken when window is opened by
194
* process (pid). So tgid is used only when child thread opens
195
* a window and exits without closing it.
196
*/
197
198
if (!ref_get_pid_and_task(task_ref, &tsk, &pid))
199
return;
200
201
kthread_use_mm(task_ref->mm);
202
rc = copy_to_user(csb_addr, &csb, sizeof(csb));
203
/*
204
* User space polls on csb.flags (first byte). So add barrier
205
* then copy first byte with csb flags update.
206
*/
207
if (!rc) {
208
csb.flags = CSB_V;
209
/* Make sure update to csb.flags is visible now */
210
smp_mb();
211
rc = copy_to_user(csb_addr, &csb, sizeof(u8));
212
}
213
kthread_unuse_mm(task_ref->mm);
214
put_task_struct(tsk);
215
216
/* Success */
217
if (!rc)
218
return;
219
220
221
pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
222
csb_addr, pid_vnr(pid));
223
224
clear_siginfo(&info);
225
info.si_signo = SIGSEGV;
226
info.si_errno = EFAULT;
227
info.si_code = SEGV_MAPERR;
228
info.si_addr = csb_addr;
229
/*
230
* process will be polling on csb.flags after request is sent to
231
* NX. So generally CSB update should not fail except when an
232
* application passes invalid csb_addr. So an error message will
233
* be displayed and leave it to user space whether to ignore or
234
* handle this signal.
235
*/
236
rcu_read_lock();
237
rc = kill_pid_info(SIGSEGV, &info, pid);
238
rcu_read_unlock();
239
240
pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc);
241
}
242
243
void vas_dump_crb(struct coprocessor_request_block *crb)
244
{
245
struct data_descriptor_entry *dde;
246
struct nx_fault_stamp *nx;
247
248
dde = &crb->source;
249
pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
250
be64_to_cpu(dde->address), be32_to_cpu(dde->length),
251
dde->count, dde->index, dde->flags);
252
253
dde = &crb->target;
254
pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
255
be64_to_cpu(dde->address), be32_to_cpu(dde->length),
256
dde->count, dde->index, dde->flags);
257
258
nx = &crb->stamp.nx;
259
pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
260
be32_to_cpu(nx->pswid),
261
be64_to_cpu(crb->stamp.nx.fault_storage_addr),
262
nx->flags, nx->fault_status);
263
}
264
265
static int coproc_open(struct inode *inode, struct file *fp)
266
{
267
struct coproc_instance *cp_inst;
268
269
cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
270
if (!cp_inst)
271
return -ENOMEM;
272
273
cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
274
cdev);
275
fp->private_data = cp_inst;
276
277
return 0;
278
}
279
280
static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
281
{
282
void __user *uptr = (void __user *)arg;
283
struct vas_tx_win_open_attr uattr;
284
struct coproc_instance *cp_inst;
285
struct vas_window *txwin;
286
int rc;
287
288
cp_inst = fp->private_data;
289
290
/*
291
* One window for file descriptor
292
*/
293
if (cp_inst->txwin)
294
return -EEXIST;
295
296
rc = copy_from_user(&uattr, uptr, sizeof(uattr));
297
if (rc) {
298
pr_err("copy_from_user() returns %d\n", rc);
299
return -EFAULT;
300
}
301
302
if (uattr.version != 1) {
303
pr_err("Invalid window open API version\n");
304
return -EINVAL;
305
}
306
307
if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->open_win) {
308
pr_err("VAS API is not registered\n");
309
return -EACCES;
310
}
311
312
txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
313
cp_inst->coproc->cop_type);
314
if (IS_ERR(txwin)) {
315
pr_err_ratelimited("VAS window open failed rc=%ld\n",
316
PTR_ERR(txwin));
317
return PTR_ERR(txwin);
318
}
319
320
mutex_init(&txwin->task_ref.mmap_mutex);
321
cp_inst->txwin = txwin;
322
323
return 0;
324
}
325
326
static int coproc_release(struct inode *inode, struct file *fp)
327
{
328
struct coproc_instance *cp_inst = fp->private_data;
329
int rc;
330
331
if (cp_inst->txwin) {
332
if (cp_inst->coproc->vops &&
333
cp_inst->coproc->vops->close_win) {
334
rc = cp_inst->coproc->vops->close_win(cp_inst->txwin);
335
if (rc)
336
return rc;
337
}
338
cp_inst->txwin = NULL;
339
}
340
341
kfree(cp_inst);
342
fp->private_data = NULL;
343
344
/*
345
* We don't know here if user has other receive windows
346
* open, so we can't really call clear_thread_tidr().
347
* So, once the process calls set_thread_tidr(), the
348
* TIDR value sticks around until process exits, resulting
349
* in an extra copy in restore_sprs().
350
*/
351
352
return 0;
353
}
354
355
/*
356
* If the executed instruction that caused the fault was a paste, then
357
* clear regs CR0[EQ], advance NIP, and return 0. Else return error code.
358
*/
359
static int do_fail_paste(void)
360
{
361
struct pt_regs *regs = current->thread.regs;
362
u32 instword;
363
364
if (WARN_ON_ONCE(!regs))
365
return -EINVAL;
366
367
if (WARN_ON_ONCE(!user_mode(regs)))
368
return -EINVAL;
369
370
/*
371
* If we couldn't translate the instruction, the driver should
372
* return success without handling the fault, it will be retried
373
* or the instruction fetch will fault.
374
*/
375
if (get_user(instword, (u32 __user *)(regs->nip)))
376
return -EAGAIN;
377
378
/*
379
* Not a paste instruction, driver may fail the fault.
380
*/
381
if ((instword & PPC_INST_PASTE_MASK) != PPC_INST_PASTE)
382
return -ENOENT;
383
384
regs->ccr &= ~0xe0000000; /* Clear CR0[0-2] to fail paste */
385
regs_add_return_ip(regs, 4); /* Emulate the paste */
386
387
return 0;
388
}
389
390
/*
391
* This fault handler is invoked when the core generates page fault on
392
* the paste address. Happens if the kernel closes window in hypervisor
393
* (on pseries) due to lost credit or the paste address is not mapped.
394
*/
395
static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
396
{
397
struct vm_area_struct *vma = vmf->vma;
398
struct file *fp = vma->vm_file;
399
struct coproc_instance *cp_inst = fp->private_data;
400
struct vas_window *txwin;
401
vm_fault_t fault;
402
u64 paste_addr;
403
int ret;
404
405
/*
406
* window is not opened. Shouldn't expect this error.
407
*/
408
if (!cp_inst || !cp_inst->txwin) {
409
pr_err("Unexpected fault on paste address with TX window closed\n");
410
return VM_FAULT_SIGBUS;
411
}
412
413
txwin = cp_inst->txwin;
414
/*
415
* When the LPAR lost credits due to core removal or during
416
* migration, invalidate the existing mapping for the current
417
* paste addresses and set windows in-active (zap_vma_pages in
418
* reconfig_close_windows()).
419
* New mapping will be done later after migration or new credits
420
* available. So continue to receive faults if the user space
421
* issue NX request.
422
*/
423
if (txwin->task_ref.vma != vmf->vma) {
424
pr_err("No previous mapping with paste address\n");
425
return VM_FAULT_SIGBUS;
426
}
427
428
/*
429
* The window may be inactive due to lost credit (Ex: core
430
* removal with DLPAR). If the window is active again when
431
* the credit is available, map the new paste address at the
432
* window virtual address.
433
*/
434
scoped_guard(mutex, &txwin->task_ref.mmap_mutex) {
435
if (txwin->status == VAS_WIN_ACTIVE) {
436
paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
437
if (paste_addr) {
438
fault = vmf_insert_pfn(vma, vma->vm_start,
439
(paste_addr >> PAGE_SHIFT));
440
return fault;
441
}
442
}
443
}
444
445
/*
446
* Received this fault due to closing the actual window.
447
* It can happen during migration or lost credits.
448
* Since no mapping, return the paste instruction failure
449
* to the user space.
450
*/
451
ret = do_fail_paste();
452
/*
453
* The user space can retry several times until success (needed
454
* for migration) or should fallback to SW compression or
455
* manage with the existing open windows if available.
456
* Looking at sysfs interface, it can determine whether these
457
* failures are coming during migration or core removal:
458
* nr_used_credits > nr_total_credits when lost credits
459
*/
460
if (!ret || (ret == -EAGAIN))
461
return VM_FAULT_NOPAGE;
462
463
return VM_FAULT_SIGBUS;
464
}
465
466
/*
467
* During mmap() paste address, mapping VMA is saved in VAS window
468
* struct which is used to unmap during migration if the window is
469
* still open. But the user space can remove this mapping with
470
* munmap() before closing the window and the VMA address will
471
* be invalid. Set VAS window VMA to NULL in this function which
472
* is called before VMA free.
473
*/
474
static void vas_mmap_close(struct vm_area_struct *vma)
475
{
476
struct file *fp = vma->vm_file;
477
struct coproc_instance *cp_inst = fp->private_data;
478
struct vas_window *txwin;
479
480
/* Should not happen */
481
if (!cp_inst || !cp_inst->txwin) {
482
pr_err("No attached VAS window for the paste address mmap\n");
483
return;
484
}
485
486
txwin = cp_inst->txwin;
487
/*
488
* task_ref.vma is set in coproc_mmap() during mmap paste
489
* address. So it has to be the same VMA that is getting freed.
490
*/
491
if (WARN_ON(txwin->task_ref.vma != vma)) {
492
pr_err("Invalid paste address mmaping\n");
493
return;
494
}
495
496
scoped_guard(mutex, &txwin->task_ref.mmap_mutex)
497
txwin->task_ref.vma = NULL;
498
}
499
500
static const struct vm_operations_struct vas_vm_ops = {
501
.close = vas_mmap_close,
502
.fault = vas_mmap_fault,
503
};
504
505
static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
506
{
507
struct coproc_instance *cp_inst = fp->private_data;
508
struct vas_window *txwin;
509
unsigned long pfn;
510
u64 paste_addr;
511
pgprot_t prot;
512
int rc;
513
514
txwin = cp_inst->txwin;
515
516
if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
517
pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n",
518
(vma->vm_end - vma->vm_start), PAGE_SIZE);
519
return -EINVAL;
520
}
521
522
/*
523
* Map complete page to the paste address. So the user
524
* space should pass 0ULL to the offset parameter.
525
*/
526
if (vma->vm_pgoff) {
527
pr_debug("Page offset unsupported to map paste address\n");
528
return -EINVAL;
529
}
530
531
/* Ensure instance has an open send window */
532
if (!txwin) {
533
pr_err("No send window open?\n");
534
return -EINVAL;
535
}
536
537
if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
538
pr_err("VAS API is not registered\n");
539
return -EACCES;
540
}
541
542
/*
543
* The initial mmap is done after the window is opened
544
* with ioctl. But before mmap(), this window can be closed in
545
* the hypervisor due to lost credit (core removal on pseries).
546
* So if the window is not active, return mmap() failure with
547
* -EACCES and expects the user space reissue mmap() when it
548
* is active again or open new window when the credit is available.
549
* mmap_mutex protects the paste address mmap() with DLPAR
550
* close/open event and allows mmap() only when the window is
551
* active.
552
*/
553
guard(mutex)(&txwin->task_ref.mmap_mutex);
554
if (txwin->status != VAS_WIN_ACTIVE) {
555
pr_err("Window is not active\n");
556
return -EACCES;
557
}
558
559
paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
560
if (!paste_addr) {
561
pr_err("Window paste address failed\n");
562
return -EINVAL;
563
}
564
565
pfn = paste_addr >> PAGE_SHIFT;
566
567
/* flags, page_prot from cxl_mmap(), except we want cachable */
568
vm_flags_set(vma, VM_IO | VM_PFNMAP);
569
vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
570
571
prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
572
573
rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
574
vma->vm_end - vma->vm_start, prot);
575
576
pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr,
577
vma->vm_start, rc);
578
579
txwin->task_ref.vma = vma;
580
vma->vm_ops = &vas_vm_ops;
581
582
return rc;
583
}
584
585
static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
586
{
587
switch (cmd) {
588
case VAS_TX_WIN_OPEN:
589
return coproc_ioc_tx_win_open(fp, arg);
590
default:
591
return -EINVAL;
592
}
593
}
594
595
static struct file_operations coproc_fops = {
596
.open = coproc_open,
597
.release = coproc_release,
598
.mmap = coproc_mmap,
599
.unlocked_ioctl = coproc_ioctl,
600
};
601
602
/*
603
* Supporting only nx-gzip coprocessor type now, but this API code
604
* extended to other coprocessor types later.
605
*/
606
int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
607
const char *name,
608
const struct vas_user_win_ops *vops)
609
{
610
int rc = -EINVAL;
611
dev_t devno;
612
613
rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
614
if (rc) {
615
pr_err("Unable to allocate coproc major number: %i\n", rc);
616
return rc;
617
}
618
619
pr_devel("%s device allocated, dev [%i,%i]\n", name,
620
MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
621
622
coproc_device.class = class_create(name);
623
if (IS_ERR(coproc_device.class)) {
624
rc = PTR_ERR(coproc_device.class);
625
pr_err("Unable to create %s class %d\n", name, rc);
626
goto err_class;
627
}
628
coproc_device.class->devnode = coproc_devnode;
629
coproc_device.cop_type = cop_type;
630
coproc_device.vops = vops;
631
632
coproc_fops.owner = mod;
633
cdev_init(&coproc_device.cdev, &coproc_fops);
634
635
devno = MKDEV(MAJOR(coproc_device.devt), 0);
636
rc = cdev_add(&coproc_device.cdev, devno, 1);
637
if (rc) {
638
pr_err("cdev_add() failed %d\n", rc);
639
goto err_cdev;
640
}
641
642
coproc_device.device = device_create(coproc_device.class, NULL,
643
devno, NULL, name, MINOR(devno));
644
if (IS_ERR(coproc_device.device)) {
645
rc = PTR_ERR(coproc_device.device);
646
pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
647
goto err;
648
}
649
650
pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno));
651
652
return 0;
653
654
err:
655
cdev_del(&coproc_device.cdev);
656
err_cdev:
657
class_destroy(coproc_device.class);
658
err_class:
659
unregister_chrdev_region(coproc_device.devt, 1);
660
return rc;
661
}
662
663
void vas_unregister_coproc_api(void)
664
{
665
dev_t devno;
666
667
cdev_del(&coproc_device.cdev);
668
devno = MKDEV(MAJOR(coproc_device.devt), 0);
669
device_destroy(coproc_device.class, devno);
670
671
class_destroy(coproc_device.class);
672
unregister_chrdev_region(coproc_device.devt, 1);
673
}
674
675