Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/cell/spufs/context.c
26498 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* SPU file system -- SPU context management
4
*
5
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6
*
7
* Author: Arnd Bergmann <[email protected]>
8
*/
9
10
#include <linux/fs.h>
11
#include <linux/mm.h>
12
#include <linux/slab.h>
13
#include <linux/atomic.h>
14
#include <linux/sched.h>
15
#include <linux/sched/mm.h>
16
17
#include <asm/spu.h>
18
#include <asm/spu_csa.h>
19
#include "spufs.h"
20
#include "sputrace.h"
21
22
23
atomic_t nr_spu_contexts = ATOMIC_INIT(0);
24
25
struct spu_context *alloc_spu_context(struct spu_gang *gang)
26
{
27
struct spu_context *ctx;
28
29
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
30
if (!ctx)
31
goto out;
32
/* Binding to physical processor deferred
33
* until spu_activate().
34
*/
35
if (spu_init_csa(&ctx->csa))
36
goto out_free;
37
spin_lock_init(&ctx->mmio_lock);
38
mutex_init(&ctx->mapping_lock);
39
kref_init(&ctx->kref);
40
mutex_init(&ctx->state_mutex);
41
mutex_init(&ctx->run_mutex);
42
init_waitqueue_head(&ctx->ibox_wq);
43
init_waitqueue_head(&ctx->wbox_wq);
44
init_waitqueue_head(&ctx->stop_wq);
45
init_waitqueue_head(&ctx->mfc_wq);
46
init_waitqueue_head(&ctx->run_wq);
47
ctx->state = SPU_STATE_SAVED;
48
ctx->ops = &spu_backing_ops;
49
ctx->owner = get_task_mm(current);
50
INIT_LIST_HEAD(&ctx->rq);
51
INIT_LIST_HEAD(&ctx->aff_list);
52
if (gang)
53
spu_gang_add_ctx(gang, ctx);
54
55
__spu_update_sched_info(ctx);
56
spu_set_timeslice(ctx);
57
ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
58
ctx->stats.tstamp = ktime_get_ns();
59
60
atomic_inc(&nr_spu_contexts);
61
goto out;
62
out_free:
63
kfree(ctx);
64
ctx = NULL;
65
out:
66
return ctx;
67
}
68
69
void destroy_spu_context(struct kref *kref)
70
{
71
struct spu_context *ctx;
72
ctx = container_of(kref, struct spu_context, kref);
73
spu_context_nospu_trace(destroy_spu_context__enter, ctx);
74
mutex_lock(&ctx->state_mutex);
75
spu_deactivate(ctx);
76
mutex_unlock(&ctx->state_mutex);
77
spu_fini_csa(&ctx->csa);
78
if (ctx->gang)
79
spu_gang_remove_ctx(ctx->gang, ctx);
80
if (ctx->prof_priv_kref)
81
kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
82
BUG_ON(!list_empty(&ctx->rq));
83
atomic_dec(&nr_spu_contexts);
84
kfree(ctx->switch_log);
85
kfree(ctx);
86
}
87
88
struct spu_context * get_spu_context(struct spu_context *ctx)
89
{
90
kref_get(&ctx->kref);
91
return ctx;
92
}
93
94
int put_spu_context(struct spu_context *ctx)
95
{
96
return kref_put(&ctx->kref, &destroy_spu_context);
97
}
98
99
/* give up the mm reference when the context is about to be destroyed */
100
void spu_forget(struct spu_context *ctx)
101
{
102
struct mm_struct *mm;
103
104
/*
105
* This is basically an open-coded spu_acquire_saved, except that
106
* we don't acquire the state mutex interruptible, and we don't
107
* want this context to be rescheduled on release.
108
*/
109
mutex_lock(&ctx->state_mutex);
110
if (ctx->state != SPU_STATE_SAVED)
111
spu_deactivate(ctx);
112
113
mm = ctx->owner;
114
ctx->owner = NULL;
115
mmput(mm);
116
spu_release(ctx);
117
}
118
119
void spu_unmap_mappings(struct spu_context *ctx)
120
{
121
mutex_lock(&ctx->mapping_lock);
122
if (ctx->local_store)
123
unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
124
if (ctx->mfc)
125
unmap_mapping_range(ctx->mfc, 0, SPUFS_MFC_MAP_SIZE, 1);
126
if (ctx->cntl)
127
unmap_mapping_range(ctx->cntl, 0, SPUFS_CNTL_MAP_SIZE, 1);
128
if (ctx->signal1)
129
unmap_mapping_range(ctx->signal1, 0, SPUFS_SIGNAL_MAP_SIZE, 1);
130
if (ctx->signal2)
131
unmap_mapping_range(ctx->signal2, 0, SPUFS_SIGNAL_MAP_SIZE, 1);
132
if (ctx->mss)
133
unmap_mapping_range(ctx->mss, 0, SPUFS_MSS_MAP_SIZE, 1);
134
if (ctx->psmap)
135
unmap_mapping_range(ctx->psmap, 0, SPUFS_PS_MAP_SIZE, 1);
136
mutex_unlock(&ctx->mapping_lock);
137
}
138
139
/**
140
* spu_acquire_saved - lock spu contex and make sure it is in saved state
141
* @ctx: spu contex to lock
142
*/
143
int spu_acquire_saved(struct spu_context *ctx)
144
{
145
int ret;
146
147
spu_context_nospu_trace(spu_acquire_saved__enter, ctx);
148
149
ret = spu_acquire(ctx);
150
if (ret)
151
return ret;
152
153
if (ctx->state != SPU_STATE_SAVED) {
154
set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
155
spu_deactivate(ctx);
156
}
157
158
return 0;
159
}
160
161
/**
162
* spu_release_saved - unlock spu context and return it to the runqueue
163
* @ctx: context to unlock
164
*/
165
void spu_release_saved(struct spu_context *ctx)
166
{
167
BUG_ON(ctx->state != SPU_STATE_SAVED);
168
169
if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) &&
170
test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
171
spu_activate(ctx, 0);
172
173
spu_release(ctx);
174
}
175
176
177