Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/cell/spufs/spufs.h
26498 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* SPU file system
4
*
5
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6
*
7
* Author: Arnd Bergmann <[email protected]>
8
*/
9
#ifndef SPUFS_H
10
#define SPUFS_H
11
12
#include <linux/kref.h>
13
#include <linux/mutex.h>
14
#include <linux/spinlock.h>
15
#include <linux/fs.h>
16
#include <linux/cpumask.h>
17
#include <linux/sched/signal.h>
18
19
#include <asm/spu.h>
20
#include <asm/spu_csa.h>
21
#include <asm/spu_info.h>
22
23
#define SPUFS_PS_MAP_SIZE 0x20000
24
#define SPUFS_MFC_MAP_SIZE 0x1000
25
#define SPUFS_CNTL_MAP_SIZE 0x1000
26
#define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE
27
#define SPUFS_MSS_MAP_SIZE 0x1000
28
29
/* The magic number for our file system */
30
enum {
31
SPUFS_MAGIC = 0x23c9b64e,
32
};
33
34
struct spu_context_ops;
35
struct spu_gang;
36
37
/* ctx->sched_flags */
38
enum {
39
SPU_SCHED_NOTIFY_ACTIVE,
40
SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */
41
SPU_SCHED_SPU_RUN, /* context is within spu_run */
42
};
43
44
enum {
45
SWITCH_LOG_BUFSIZE = 4096,
46
};
47
48
enum {
49
SWITCH_LOG_START,
50
SWITCH_LOG_STOP,
51
SWITCH_LOG_EXIT,
52
};
53
54
struct switch_log {
55
wait_queue_head_t wait;
56
unsigned long head;
57
unsigned long tail;
58
struct switch_log_entry {
59
struct timespec64 tstamp;
60
s32 spu_id;
61
u32 type;
62
u32 val;
63
u64 timebase;
64
} log[];
65
};
66
67
struct spu_context {
68
struct spu *spu; /* pointer to a physical SPU */
69
struct spu_state csa; /* SPU context save area. */
70
spinlock_t mmio_lock; /* protects mmio access */
71
struct address_space *local_store; /* local store mapping. */
72
struct address_space *mfc; /* 'mfc' area mappings. */
73
struct address_space *cntl; /* 'control' area mappings. */
74
struct address_space *signal1; /* 'signal1' area mappings. */
75
struct address_space *signal2; /* 'signal2' area mappings. */
76
struct address_space *mss; /* 'mss' area mappings. */
77
struct address_space *psmap; /* 'psmap' area mappings. */
78
struct mutex mapping_lock;
79
u64 object_id; /* user space pointer for GNU Debugger */
80
81
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
82
struct mutex state_mutex;
83
struct mutex run_mutex;
84
85
struct mm_struct *owner;
86
87
struct kref kref;
88
wait_queue_head_t ibox_wq;
89
wait_queue_head_t wbox_wq;
90
wait_queue_head_t stop_wq;
91
wait_queue_head_t mfc_wq;
92
wait_queue_head_t run_wq;
93
u32 tagwait;
94
struct spu_context_ops *ops;
95
struct work_struct reap_work;
96
unsigned long flags;
97
unsigned long event_return;
98
99
struct list_head gang_list;
100
struct spu_gang *gang;
101
struct kref *prof_priv_kref;
102
void ( * prof_priv_release) (struct kref *kref);
103
104
/* owner thread */
105
pid_t tid;
106
107
/* scheduler fields */
108
struct list_head rq;
109
unsigned int time_slice;
110
unsigned long sched_flags;
111
cpumask_t cpus_allowed;
112
int policy;
113
int prio;
114
int last_ran;
115
116
/* statistics */
117
struct {
118
/* updates protected by ctx->state_mutex */
119
enum spu_utilization_state util_state;
120
unsigned long long tstamp; /* time of last state switch */
121
unsigned long long times[SPU_UTIL_MAX];
122
unsigned long long vol_ctx_switch;
123
unsigned long long invol_ctx_switch;
124
unsigned long long min_flt;
125
unsigned long long maj_flt;
126
unsigned long long hash_flt;
127
unsigned long long slb_flt;
128
unsigned long long slb_flt_base; /* # at last ctx switch */
129
unsigned long long class2_intr;
130
unsigned long long class2_intr_base; /* # at last ctx switch */
131
unsigned long long libassist;
132
} stats;
133
134
/* context switch log */
135
struct switch_log *switch_log;
136
137
struct list_head aff_list;
138
int aff_head;
139
int aff_offset;
140
};
141
142
struct spu_gang {
143
struct list_head list;
144
struct mutex mutex;
145
struct kref kref;
146
int contexts;
147
148
struct spu_context *aff_ref_ctx;
149
struct list_head aff_list_head;
150
struct mutex aff_mutex;
151
int aff_flags;
152
struct spu *aff_ref_spu;
153
atomic_t aff_sched_count;
154
155
int alive;
156
};
157
158
/* Flag bits for spu_gang aff_flags */
159
#define AFF_OFFSETS_SET 1
160
#define AFF_MERGED 2
161
162
struct mfc_dma_command {
163
int32_t pad; /* reserved */
164
uint32_t lsa; /* local storage address */
165
uint64_t ea; /* effective address */
166
uint16_t size; /* transfer size */
167
uint16_t tag; /* command tag */
168
uint16_t class; /* class ID */
169
uint16_t cmd; /* command opcode */
170
};
171
172
173
/* SPU context query/set operations. */
174
struct spu_context_ops {
175
int (*mbox_read) (struct spu_context * ctx, u32 * data);
176
u32(*mbox_stat_read) (struct spu_context * ctx);
177
__poll_t (*mbox_stat_poll)(struct spu_context *ctx, __poll_t events);
178
int (*ibox_read) (struct spu_context * ctx, u32 * data);
179
int (*wbox_write) (struct spu_context * ctx, u32 data);
180
u32(*signal1_read) (struct spu_context * ctx);
181
void (*signal1_write) (struct spu_context * ctx, u32 data);
182
u32(*signal2_read) (struct spu_context * ctx);
183
void (*signal2_write) (struct spu_context * ctx, u32 data);
184
void (*signal1_type_set) (struct spu_context * ctx, u64 val);
185
u64(*signal1_type_get) (struct spu_context * ctx);
186
void (*signal2_type_set) (struct spu_context * ctx, u64 val);
187
u64(*signal2_type_get) (struct spu_context * ctx);
188
u32(*npc_read) (struct spu_context * ctx);
189
void (*npc_write) (struct spu_context * ctx, u32 data);
190
u32(*status_read) (struct spu_context * ctx);
191
char*(*get_ls) (struct spu_context * ctx);
192
void (*privcntl_write) (struct spu_context *ctx, u64 data);
193
u32 (*runcntl_read) (struct spu_context * ctx);
194
void (*runcntl_write) (struct spu_context * ctx, u32 data);
195
void (*runcntl_stop) (struct spu_context * ctx);
196
void (*master_start) (struct spu_context * ctx);
197
void (*master_stop) (struct spu_context * ctx);
198
int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
199
u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
200
u32 (*get_mfc_free_elements)(struct spu_context *ctx);
201
int (*send_mfc_command)(struct spu_context * ctx,
202
struct mfc_dma_command * cmd);
203
void (*dma_info_read) (struct spu_context * ctx,
204
struct spu_dma_info * info);
205
void (*proxydma_info_read) (struct spu_context * ctx,
206
struct spu_proxydma_info * info);
207
void (*restart_dma)(struct spu_context *ctx);
208
};
209
210
extern struct spu_context_ops spu_hw_ops;
211
extern struct spu_context_ops spu_backing_ops;
212
213
struct spufs_inode_info {
214
struct spu_context *i_ctx;
215
struct spu_gang *i_gang;
216
struct inode vfs_inode;
217
int i_openers;
218
};
219
#define SPUFS_I(inode) \
220
container_of(inode, struct spufs_inode_info, vfs_inode)
221
222
struct spufs_tree_descr {
223
const char *name;
224
const struct file_operations *ops;
225
umode_t mode;
226
size_t size;
227
};
228
229
extern const struct spufs_tree_descr spufs_dir_contents[];
230
extern const struct spufs_tree_descr spufs_dir_nosched_contents[];
231
extern const struct spufs_tree_descr spufs_dir_debug_contents[];
232
233
/* system call implementation */
234
extern struct spufs_calls spufs_calls;
235
struct coredump_params;
236
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
237
long spufs_create(const struct path *nd, struct dentry *dentry, unsigned int flags,
238
umode_t mode, struct file *filp);
239
/* ELF coredump callbacks for writing SPU ELF notes */
240
extern int spufs_coredump_extra_notes_size(void);
241
extern int spufs_coredump_extra_notes_write(struct coredump_params *cprm);
242
243
extern const struct file_operations spufs_context_fops;
244
245
/* gang management */
246
struct spu_gang *alloc_spu_gang(void);
247
struct spu_gang *get_spu_gang(struct spu_gang *gang);
248
int put_spu_gang(struct spu_gang *gang);
249
void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
250
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
251
252
/* fault handling */
253
int spufs_handle_class1(struct spu_context *ctx);
254
int spufs_handle_class0(struct spu_context *ctx);
255
256
/* affinity */
257
struct spu *affinity_check(struct spu_context *ctx);
258
259
/* context management */
260
extern atomic_t nr_spu_contexts;
261
static inline int __must_check spu_acquire(struct spu_context *ctx)
262
{
263
return mutex_lock_interruptible(&ctx->state_mutex);
264
}
265
266
static inline void spu_release(struct spu_context *ctx)
267
{
268
mutex_unlock(&ctx->state_mutex);
269
}
270
271
struct spu_context * alloc_spu_context(struct spu_gang *gang);
272
void destroy_spu_context(struct kref *kref);
273
struct spu_context * get_spu_context(struct spu_context *ctx);
274
int put_spu_context(struct spu_context *ctx);
275
void spu_unmap_mappings(struct spu_context *ctx);
276
277
void spu_forget(struct spu_context *ctx);
278
int __must_check spu_acquire_saved(struct spu_context *ctx);
279
void spu_release_saved(struct spu_context *ctx);
280
281
int spu_stopped(struct spu_context *ctx, u32 * stat);
282
void spu_del_from_rq(struct spu_context *ctx);
283
int spu_activate(struct spu_context *ctx, unsigned long flags);
284
void spu_deactivate(struct spu_context *ctx);
285
void spu_yield(struct spu_context *ctx);
286
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
287
u32 type, u32 val);
288
void spu_set_timeslice(struct spu_context *ctx);
289
void spu_update_sched_info(struct spu_context *ctx);
290
void __spu_update_sched_info(struct spu_context *ctx);
291
int __init spu_sched_init(void);
292
void spu_sched_exit(void);
293
294
extern char *isolated_loader;
295
296
/*
297
* spufs_wait
298
* Same as wait_event_interruptible(), except that here
299
* we need to call spu_release(ctx) before sleeping, and
300
* then spu_acquire(ctx) when awoken.
301
*
302
* Returns with state_mutex re-acquired when successful or
303
* with -ERESTARTSYS and the state_mutex dropped when interrupted.
304
*/
305
306
#define spufs_wait(wq, condition) \
307
({ \
308
int __ret = 0; \
309
DEFINE_WAIT(__wait); \
310
for (;;) { \
311
prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
312
if (condition) \
313
break; \
314
spu_release(ctx); \
315
if (signal_pending(current)) { \
316
__ret = -ERESTARTSYS; \
317
break; \
318
} \
319
schedule(); \
320
__ret = spu_acquire(ctx); \
321
if (__ret) \
322
break; \
323
} \
324
finish_wait(&(wq), &__wait); \
325
__ret; \
326
})
327
328
size_t spu_wbox_write(struct spu_context *ctx, u32 data);
329
size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
330
331
/* irq callback funcs. */
332
void spufs_ibox_callback(struct spu *spu);
333
void spufs_wbox_callback(struct spu *spu);
334
void spufs_stop_callback(struct spu *spu, int irq);
335
void spufs_mfc_callback(struct spu *spu);
336
void spufs_dma_callback(struct spu *spu, int type);
337
338
struct spufs_coredump_reader {
339
char *name;
340
ssize_t (*dump)(struct spu_context *ctx, struct coredump_params *cprm);
341
u64 (*get)(struct spu_context *ctx);
342
size_t size;
343
};
344
extern const struct spufs_coredump_reader spufs_coredump_read[];
345
346
extern int spu_init_csa(struct spu_state *csa);
347
extern void spu_fini_csa(struct spu_state *csa);
348
extern int spu_save(struct spu_state *prev, struct spu *spu);
349
extern int spu_restore(struct spu_state *new, struct spu *spu);
350
extern int spu_switch(struct spu_state *prev, struct spu_state *new,
351
struct spu *spu);
352
extern int spu_alloc_lscsa(struct spu_state *csa);
353
extern void spu_free_lscsa(struct spu_state *csa);
354
355
extern void spuctx_switch_state(struct spu_context *ctx,
356
enum spu_utilization_state new_state);
357
358
#endif
359
360