Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/cell/spufs/inode.c
51823 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
3
/*
4
* SPU file system
5
*
6
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
7
*
8
* Author: Arnd Bergmann <[email protected]>
9
*/
10
11
#include <linux/file.h>
12
#include <linux/fs.h>
13
#include <linux/fs_context.h>
14
#include <linux/fs_parser.h>
15
#include <linux/fsnotify.h>
16
#include <linux/backing-dev.h>
17
#include <linux/init.h>
18
#include <linux/ioctl.h>
19
#include <linux/module.h>
20
#include <linux/mount.h>
21
#include <linux/namei.h>
22
#include <linux/pagemap.h>
23
#include <linux/poll.h>
24
#include <linux/of.h>
25
#include <linux/seq_file.h>
26
#include <linux/slab.h>
27
28
#include <asm/spu.h>
29
#include <asm/spu_priv1.h>
30
#include <linux/uaccess.h>
31
32
#include "spufs.h"
33
34
struct spufs_sb_info {
35
bool debug;
36
};
37
38
static struct kmem_cache *spufs_inode_cache;
39
char *isolated_loader;
40
static int isolated_loader_size;
41
42
static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb)
43
{
44
return sb->s_fs_info;
45
}
46
47
static struct inode *
48
spufs_alloc_inode(struct super_block *sb)
49
{
50
struct spufs_inode_info *ei;
51
52
ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
53
if (!ei)
54
return NULL;
55
56
ei->i_gang = NULL;
57
ei->i_ctx = NULL;
58
ei->i_openers = 0;
59
60
return &ei->vfs_inode;
61
}
62
63
static void spufs_free_inode(struct inode *inode)
64
{
65
kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
66
}
67
68
static void
69
spufs_init_once(void *p)
70
{
71
struct spufs_inode_info *ei = p;
72
73
inode_init_once(&ei->vfs_inode);
74
}
75
76
static struct inode *
77
spufs_new_inode(struct super_block *sb, umode_t mode)
78
{
79
struct inode *inode;
80
81
inode = new_inode(sb);
82
if (!inode)
83
goto out;
84
85
inode->i_ino = get_next_ino();
86
inode->i_mode = mode;
87
inode->i_uid = current_fsuid();
88
inode->i_gid = current_fsgid();
89
simple_inode_init_ts(inode);
90
out:
91
return inode;
92
}
93
94
static int
95
spufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
96
struct iattr *attr)
97
{
98
struct inode *inode = d_inode(dentry);
99
100
if ((attr->ia_valid & ATTR_SIZE) &&
101
(attr->ia_size != inode->i_size))
102
return -EINVAL;
103
setattr_copy(&nop_mnt_idmap, inode, attr);
104
mark_inode_dirty(inode);
105
return 0;
106
}
107
108
109
static int
110
spufs_new_file(struct super_block *sb, struct dentry *dentry,
111
const struct file_operations *fops, umode_t mode,
112
size_t size, struct spu_context *ctx)
113
{
114
static const struct inode_operations spufs_file_iops = {
115
.setattr = spufs_setattr,
116
};
117
struct inode *inode;
118
int ret;
119
120
ret = -ENOSPC;
121
inode = spufs_new_inode(sb, S_IFREG | mode);
122
if (!inode)
123
goto out;
124
125
ret = 0;
126
inode->i_op = &spufs_file_iops;
127
inode->i_fop = fops;
128
inode->i_size = size;
129
inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
130
d_make_persistent(dentry, inode);
131
out:
132
return ret;
133
}
134
135
static void
136
spufs_evict_inode(struct inode *inode)
137
{
138
struct spufs_inode_info *ei = SPUFS_I(inode);
139
clear_inode(inode);
140
if (ei->i_ctx)
141
put_spu_context(ei->i_ctx);
142
if (ei->i_gang)
143
put_spu_gang(ei->i_gang);
144
}
145
146
/* Caller must hold parent->i_mutex */
147
static void spufs_rmdir(struct inode *parent, struct dentry *dir)
148
{
149
struct spu_context *ctx = SPUFS_I(d_inode(dir))->i_ctx;
150
151
locked_recursive_removal(dir, NULL);
152
spu_forget(ctx);
153
}
154
155
static int spufs_fill_dir(struct dentry *dir,
156
const struct spufs_tree_descr *files, umode_t mode,
157
struct spu_context *ctx)
158
{
159
while (files->name && files->name[0]) {
160
int ret;
161
struct dentry *dentry = d_alloc_name(dir, files->name);
162
if (!dentry)
163
return -ENOMEM;
164
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
165
files->mode & mode, files->size, ctx);
166
dput(dentry);
167
if (ret)
168
return ret;
169
files++;
170
}
171
return 0;
172
}
173
174
static void unuse_gang(struct dentry *dir)
175
{
176
struct inode *inode = dir->d_inode;
177
struct spu_gang *gang = SPUFS_I(inode)->i_gang;
178
179
if (gang) {
180
bool dead;
181
182
inode_lock(inode); // exclusion with spufs_create_context()
183
dead = !--gang->alive;
184
inode_unlock(inode);
185
186
if (dead)
187
simple_recursive_removal(dir, NULL);
188
}
189
}
190
191
static int spufs_dir_close(struct inode *inode, struct file *file)
192
{
193
struct inode *parent;
194
struct dentry *dir;
195
196
dir = file->f_path.dentry;
197
parent = d_inode(dir->d_parent);
198
199
inode_lock_nested(parent, I_MUTEX_PARENT);
200
spufs_rmdir(parent, dir);
201
inode_unlock(parent);
202
203
unuse_gang(dir->d_parent);
204
return dcache_dir_close(inode, file);
205
}
206
207
const struct file_operations spufs_context_fops = {
208
.open = dcache_dir_open,
209
.release = spufs_dir_close,
210
.llseek = dcache_dir_lseek,
211
.read = generic_read_dir,
212
.iterate_shared = dcache_readdir,
213
.fsync = noop_fsync,
214
};
215
EXPORT_SYMBOL_GPL(spufs_context_fops);
216
217
static int
218
spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
219
umode_t mode)
220
{
221
int ret;
222
struct inode *inode;
223
struct spu_context *ctx;
224
225
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
226
if (!inode)
227
return -ENOSPC;
228
229
inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR);
230
ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
231
SPUFS_I(inode)->i_ctx = ctx;
232
if (!ctx) {
233
iput(inode);
234
return -ENOSPC;
235
}
236
237
ctx->flags = flags;
238
inode->i_op = &simple_dir_inode_operations;
239
inode->i_fop = &simple_dir_operations;
240
241
inode_lock(inode);
242
243
inc_nlink(dir);
244
inc_nlink(inode);
245
246
d_make_persistent(dentry, inode);
247
248
if (flags & SPU_CREATE_NOSCHED)
249
ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
250
mode, ctx);
251
else
252
ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
253
254
if (!ret && spufs_get_sb_info(dir->i_sb)->debug)
255
ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
256
mode, ctx);
257
258
inode_unlock(inode);
259
260
if (ret)
261
spufs_rmdir(dir, dentry);
262
263
return ret;
264
}
265
266
static int spufs_context_open(const struct path *path)
267
{
268
FD_PREPARE(fdf, 0, dentry_open(path, O_RDONLY, current_cred()));
269
if (fdf.err)
270
return fdf.err;
271
fd_prepare_file(fdf)->f_op = &spufs_context_fops;
272
return fd_publish(fdf);
273
}
274
275
static struct spu_context *
276
spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
277
struct file *filp)
278
{
279
struct spu_context *tmp, *neighbor, *err;
280
int count, node;
281
int aff_supp;
282
283
aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next,
284
struct spu, cbe_list))->aff_list);
285
286
if (!aff_supp)
287
return ERR_PTR(-EINVAL);
288
289
if (flags & SPU_CREATE_GANG)
290
return ERR_PTR(-EINVAL);
291
292
if (flags & SPU_CREATE_AFFINITY_MEM &&
293
gang->aff_ref_ctx &&
294
gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM)
295
return ERR_PTR(-EEXIST);
296
297
if (gang->aff_flags & AFF_MERGED)
298
return ERR_PTR(-EBUSY);
299
300
neighbor = NULL;
301
if (flags & SPU_CREATE_AFFINITY_SPU) {
302
if (!filp || filp->f_op != &spufs_context_fops)
303
return ERR_PTR(-EINVAL);
304
305
neighbor = get_spu_context(
306
SPUFS_I(file_inode(filp))->i_ctx);
307
308
if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
309
!list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
310
!list_entry(neighbor->aff_list.next, struct spu_context,
311
aff_list)->aff_head) {
312
err = ERR_PTR(-EEXIST);
313
goto out_put_neighbor;
314
}
315
316
if (gang != neighbor->gang) {
317
err = ERR_PTR(-EINVAL);
318
goto out_put_neighbor;
319
}
320
321
count = 1;
322
list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
323
count++;
324
if (list_empty(&neighbor->aff_list))
325
count++;
326
327
for (node = 0; node < MAX_NUMNODES; node++) {
328
if ((cbe_spu_info[node].n_spus - atomic_read(
329
&cbe_spu_info[node].reserved_spus)) >= count)
330
break;
331
}
332
333
if (node == MAX_NUMNODES) {
334
err = ERR_PTR(-EEXIST);
335
goto out_put_neighbor;
336
}
337
}
338
339
return neighbor;
340
341
out_put_neighbor:
342
put_spu_context(neighbor);
343
return err;
344
}
345
346
static void
347
spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
348
struct spu_context *neighbor)
349
{
350
if (flags & SPU_CREATE_AFFINITY_MEM)
351
ctx->gang->aff_ref_ctx = ctx;
352
353
if (flags & SPU_CREATE_AFFINITY_SPU) {
354
if (list_empty(&neighbor->aff_list)) {
355
list_add_tail(&neighbor->aff_list,
356
&ctx->gang->aff_list_head);
357
neighbor->aff_head = 1;
358
}
359
360
if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head)
361
|| list_entry(neighbor->aff_list.next, struct spu_context,
362
aff_list)->aff_head) {
363
list_add(&ctx->aff_list, &neighbor->aff_list);
364
} else {
365
list_add_tail(&ctx->aff_list, &neighbor->aff_list);
366
if (neighbor->aff_head) {
367
neighbor->aff_head = 0;
368
ctx->aff_head = 1;
369
}
370
}
371
372
if (!ctx->gang->aff_ref_ctx)
373
ctx->gang->aff_ref_ctx = ctx;
374
}
375
}
376
377
static int
378
spufs_create_context(struct inode *inode, struct dentry *dentry,
379
struct vfsmount *mnt, int flags, umode_t mode,
380
struct file *aff_filp)
381
{
382
int ret;
383
int affinity;
384
struct spu_gang *gang = SPUFS_I(inode)->i_gang;
385
struct spu_context *neighbor;
386
struct path path = {.mnt = mnt, .dentry = dentry};
387
388
if ((flags & SPU_CREATE_NOSCHED) &&
389
!capable(CAP_SYS_NICE))
390
return -EPERM;
391
392
if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
393
== SPU_CREATE_ISOLATE)
394
return -EINVAL;
395
396
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
397
return -ENODEV;
398
399
if (gang) {
400
if (!gang->alive)
401
return -ENOENT;
402
gang->alive++;
403
}
404
405
neighbor = NULL;
406
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
407
if (affinity) {
408
if (!gang)
409
return -EINVAL;
410
mutex_lock(&gang->aff_mutex);
411
neighbor = spufs_assert_affinity(flags, gang, aff_filp);
412
if (IS_ERR(neighbor)) {
413
ret = PTR_ERR(neighbor);
414
goto out_aff_unlock;
415
}
416
}
417
418
ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
419
if (ret) {
420
if (neighbor)
421
put_spu_context(neighbor);
422
goto out_aff_unlock;
423
}
424
425
if (affinity) {
426
spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
427
neighbor);
428
if (neighbor)
429
put_spu_context(neighbor);
430
}
431
432
ret = spufs_context_open(&path);
433
if (ret < 0)
434
spufs_rmdir(inode, dentry);
435
436
out_aff_unlock:
437
if (affinity)
438
mutex_unlock(&gang->aff_mutex);
439
if (ret && gang)
440
gang->alive--; // can't reach 0
441
return ret;
442
}
443
444
static int
445
spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
446
{
447
int ret;
448
struct inode *inode;
449
struct spu_gang *gang;
450
451
ret = -ENOSPC;
452
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
453
if (!inode)
454
goto out;
455
456
ret = 0;
457
inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR);
458
gang = alloc_spu_gang();
459
SPUFS_I(inode)->i_ctx = NULL;
460
SPUFS_I(inode)->i_gang = gang;
461
if (!gang) {
462
ret = -ENOMEM;
463
goto out_iput;
464
}
465
466
inode->i_op = &simple_dir_inode_operations;
467
inode->i_fop = &simple_dir_operations;
468
469
inc_nlink(dir);
470
inc_nlink(inode);
471
d_make_persistent(dentry, inode);
472
return ret;
473
474
out_iput:
475
iput(inode);
476
out:
477
return ret;
478
}
479
480
static int spufs_gang_close(struct inode *inode, struct file *file)
481
{
482
unuse_gang(file->f_path.dentry);
483
return dcache_dir_close(inode, file);
484
}
485
486
static const struct file_operations spufs_gang_fops = {
487
.open = dcache_dir_open,
488
.release = spufs_gang_close,
489
.llseek = dcache_dir_lseek,
490
.read = generic_read_dir,
491
.iterate_shared = dcache_readdir,
492
.fsync = noop_fsync,
493
};
494
495
static int spufs_gang_open(const struct path *path)
496
{
497
/*
498
* get references for dget and mntget, will be released
499
* in error path of *_open().
500
*/
501
FD_PREPARE(fdf, 0, dentry_open(path, O_RDONLY, current_cred()));
502
if (fdf.err)
503
return fdf.err;
504
fd_prepare_file(fdf)->f_op = &spufs_gang_fops;
505
return fd_publish(fdf);
506
}
507
508
static int spufs_create_gang(struct inode *inode,
509
struct dentry *dentry,
510
struct vfsmount *mnt, umode_t mode)
511
{
512
struct path path = {.mnt = mnt, .dentry = dentry};
513
int ret;
514
515
ret = spufs_mkgang(inode, dentry, mode & 0777);
516
if (!ret) {
517
ret = spufs_gang_open(&path);
518
if (ret < 0)
519
unuse_gang(dentry);
520
}
521
return ret;
522
}
523
524
525
static struct file_system_type spufs_type;
526
527
long spufs_create(const struct path *path, struct dentry *dentry,
528
unsigned int flags, umode_t mode, struct file *filp)
529
{
530
struct inode *dir = d_inode(path->dentry);
531
int ret;
532
533
/* check if we are on spufs */
534
if (path->dentry->d_sb->s_type != &spufs_type)
535
return -EINVAL;
536
537
/* don't accept undefined flags */
538
if (flags & (~SPU_CREATE_FLAG_ALL))
539
return -EINVAL;
540
541
/* only threads can be underneath a gang */
542
if (path->dentry != path->dentry->d_sb->s_root)
543
if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang)
544
return -EINVAL;
545
546
mode &= ~current_umask();
547
548
if (flags & SPU_CREATE_GANG)
549
ret = spufs_create_gang(dir, dentry, path->mnt, mode);
550
else
551
ret = spufs_create_context(dir, dentry, path->mnt, flags, mode,
552
filp);
553
if (ret >= 0)
554
fsnotify_mkdir(dir, dentry);
555
556
return ret;
557
}
558
559
/* File system initialization */
560
struct spufs_fs_context {
561
kuid_t uid;
562
kgid_t gid;
563
umode_t mode;
564
};
565
566
enum {
567
Opt_uid, Opt_gid, Opt_mode, Opt_debug,
568
};
569
570
static const struct fs_parameter_spec spufs_fs_parameters[] = {
571
fsparam_u32 ("gid", Opt_gid),
572
fsparam_u32oct ("mode", Opt_mode),
573
fsparam_u32 ("uid", Opt_uid),
574
fsparam_flag ("debug", Opt_debug),
575
{}
576
};
577
578
static int spufs_show_options(struct seq_file *m, struct dentry *root)
579
{
580
struct spufs_sb_info *sbi = spufs_get_sb_info(root->d_sb);
581
struct inode *inode = root->d_inode;
582
583
if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
584
seq_printf(m, ",uid=%u",
585
from_kuid_munged(&init_user_ns, inode->i_uid));
586
if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID))
587
seq_printf(m, ",gid=%u",
588
from_kgid_munged(&init_user_ns, inode->i_gid));
589
if ((inode->i_mode & S_IALLUGO) != 0775)
590
seq_printf(m, ",mode=%o", inode->i_mode);
591
if (sbi->debug)
592
seq_puts(m, ",debug");
593
return 0;
594
}
595
596
static int spufs_parse_param(struct fs_context *fc, struct fs_parameter *param)
597
{
598
struct spufs_fs_context *ctx = fc->fs_private;
599
struct spufs_sb_info *sbi = fc->s_fs_info;
600
struct fs_parse_result result;
601
kuid_t uid;
602
kgid_t gid;
603
int opt;
604
605
opt = fs_parse(fc, spufs_fs_parameters, param, &result);
606
if (opt < 0)
607
return opt;
608
609
switch (opt) {
610
case Opt_uid:
611
uid = make_kuid(current_user_ns(), result.uint_32);
612
if (!uid_valid(uid))
613
return invalf(fc, "Unknown uid");
614
ctx->uid = uid;
615
break;
616
case Opt_gid:
617
gid = make_kgid(current_user_ns(), result.uint_32);
618
if (!gid_valid(gid))
619
return invalf(fc, "Unknown gid");
620
ctx->gid = gid;
621
break;
622
case Opt_mode:
623
ctx->mode = result.uint_32 & S_IALLUGO;
624
break;
625
case Opt_debug:
626
sbi->debug = true;
627
break;
628
}
629
630
return 0;
631
}
632
633
static void spufs_exit_isolated_loader(void)
634
{
635
free_pages((unsigned long) isolated_loader,
636
get_order(isolated_loader_size));
637
}
638
639
static void __init
640
spufs_init_isolated_loader(void)
641
{
642
struct device_node *dn;
643
const char *loader;
644
int size;
645
646
dn = of_find_node_by_path("/spu-isolation");
647
if (!dn)
648
return;
649
650
loader = of_get_property(dn, "loader", &size);
651
of_node_put(dn);
652
if (!loader)
653
return;
654
655
/* the loader must be align on a 16 byte boundary */
656
isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size));
657
if (!isolated_loader)
658
return;
659
660
isolated_loader_size = size;
661
memcpy(isolated_loader, loader, size);
662
printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
663
}
664
665
static int spufs_create_root(struct super_block *sb, struct fs_context *fc)
666
{
667
struct spufs_fs_context *ctx = fc->fs_private;
668
struct inode *inode;
669
670
if (!spu_management_ops)
671
return -ENODEV;
672
673
inode = spufs_new_inode(sb, S_IFDIR | ctx->mode);
674
if (!inode)
675
return -ENOMEM;
676
677
inode->i_uid = ctx->uid;
678
inode->i_gid = ctx->gid;
679
inode->i_op = &simple_dir_inode_operations;
680
inode->i_fop = &simple_dir_operations;
681
SPUFS_I(inode)->i_ctx = NULL;
682
inc_nlink(inode);
683
684
sb->s_root = d_make_root(inode);
685
if (!sb->s_root)
686
return -ENOMEM;
687
return 0;
688
}
689
690
static const struct super_operations spufs_ops = {
691
.alloc_inode = spufs_alloc_inode,
692
.free_inode = spufs_free_inode,
693
.statfs = simple_statfs,
694
.evict_inode = spufs_evict_inode,
695
.show_options = spufs_show_options,
696
};
697
698
static int spufs_fill_super(struct super_block *sb, struct fs_context *fc)
699
{
700
sb->s_maxbytes = MAX_LFS_FILESIZE;
701
sb->s_blocksize = PAGE_SIZE;
702
sb->s_blocksize_bits = PAGE_SHIFT;
703
sb->s_magic = SPUFS_MAGIC;
704
sb->s_op = &spufs_ops;
705
706
return spufs_create_root(sb, fc);
707
}
708
709
static int spufs_get_tree(struct fs_context *fc)
710
{
711
return get_tree_single(fc, spufs_fill_super);
712
}
713
714
static void spufs_free_fc(struct fs_context *fc)
715
{
716
kfree(fc->s_fs_info);
717
}
718
719
static const struct fs_context_operations spufs_context_ops = {
720
.free = spufs_free_fc,
721
.parse_param = spufs_parse_param,
722
.get_tree = spufs_get_tree,
723
};
724
725
static int spufs_init_fs_context(struct fs_context *fc)
726
{
727
struct spufs_fs_context *ctx;
728
struct spufs_sb_info *sbi;
729
730
ctx = kzalloc(sizeof(struct spufs_fs_context), GFP_KERNEL);
731
if (!ctx)
732
goto nomem;
733
734
sbi = kzalloc(sizeof(struct spufs_sb_info), GFP_KERNEL);
735
if (!sbi)
736
goto nomem_ctx;
737
738
ctx->uid = current_uid();
739
ctx->gid = current_gid();
740
ctx->mode = 0755;
741
742
fc->fs_private = ctx;
743
fc->s_fs_info = sbi;
744
fc->ops = &spufs_context_ops;
745
return 0;
746
747
nomem_ctx:
748
kfree(ctx);
749
nomem:
750
return -ENOMEM;
751
}
752
753
static struct file_system_type spufs_type = {
754
.owner = THIS_MODULE,
755
.name = "spufs",
756
.init_fs_context = spufs_init_fs_context,
757
.parameters = spufs_fs_parameters,
758
.kill_sb = kill_anon_super,
759
};
760
MODULE_ALIAS_FS("spufs");
761
762
static int __init spufs_init(void)
763
{
764
int ret;
765
766
ret = -ENODEV;
767
if (!spu_management_ops)
768
goto out;
769
770
ret = -ENOMEM;
771
spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
772
sizeof(struct spufs_inode_info), 0,
773
SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, spufs_init_once);
774
775
if (!spufs_inode_cache)
776
goto out;
777
ret = spu_sched_init();
778
if (ret)
779
goto out_cache;
780
ret = register_spu_syscalls(&spufs_calls);
781
if (ret)
782
goto out_sched;
783
ret = register_filesystem(&spufs_type);
784
if (ret)
785
goto out_syscalls;
786
787
spufs_init_isolated_loader();
788
789
return 0;
790
791
out_syscalls:
792
unregister_spu_syscalls(&spufs_calls);
793
out_sched:
794
spu_sched_exit();
795
out_cache:
796
kmem_cache_destroy(spufs_inode_cache);
797
out:
798
return ret;
799
}
800
module_init(spufs_init);
801
802
static void __exit spufs_exit(void)
803
{
804
spu_sched_exit();
805
spufs_exit_isolated_loader();
806
unregister_spu_syscalls(&spufs_calls);
807
unregister_filesystem(&spufs_type);
808
kmem_cache_destroy(spufs_inode_cache);
809
}
810
module_exit(spufs_exit);
811
812
MODULE_DESCRIPTION("SPU file system");
813
MODULE_LICENSE("GPL");
814
MODULE_AUTHOR("Arnd Bergmann <[email protected]>");
815
816
817