Path: blob/master/arch/powerpc/platforms/cell/spufs/file.c
26498 views
// SPDX-License-Identifier: GPL-2.0-or-later1/*2* SPU file system -- file contents3*4* (C) Copyright IBM Deutschland Entwicklung GmbH 20055*6* Author: Arnd Bergmann <[email protected]>7*/89#undef DEBUG1011#include <linux/coredump.h>12#include <linux/fs.h>13#include <linux/ioctl.h>14#include <linux/export.h>15#include <linux/pagemap.h>16#include <linux/poll.h>17#include <linux/ptrace.h>18#include <linux/seq_file.h>19#include <linux/slab.h>2021#include <asm/io.h>22#include <asm/time.h>23#include <asm/spu.h>24#include <asm/spu_info.h>25#include <linux/uaccess.h>2627#include "spufs.h"28#include "sputrace.h"2930#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)3132/* Simple attribute files */33struct spufs_attr {34int (*get)(void *, u64 *);35int (*set)(void *, u64);36char get_buf[24]; /* enough to store a u64 and "\n\0" */37char set_buf[24];38void *data;39const char *fmt; /* format for read operation */40struct mutex mutex; /* protects access to these buffers */41};4243static int spufs_attr_open(struct inode *inode, struct file *file,44int (*get)(void *, u64 *), int (*set)(void *, u64),45const char *fmt)46{47struct spufs_attr *attr;4849attr = kmalloc(sizeof(*attr), GFP_KERNEL);50if (!attr)51return -ENOMEM;5253attr->get = get;54attr->set = set;55attr->data = inode->i_private;56attr->fmt = fmt;57mutex_init(&attr->mutex);58file->private_data = attr;5960return nonseekable_open(inode, file);61}6263static int spufs_attr_release(struct inode *inode, struct file *file)64{65kfree(file->private_data);66return 0;67}6869static ssize_t spufs_attr_read(struct file *file, char __user *buf,70size_t len, loff_t *ppos)71{72struct spufs_attr *attr;73size_t size;74ssize_t ret;7576attr = file->private_data;77if (!attr->get)78return -EACCES;7980ret = mutex_lock_interruptible(&attr->mutex);81if (ret)82return ret;8384if (*ppos) { /* continued read */85size = strlen(attr->get_buf);86} else { /* first read */87u64 val;88ret = attr->get(attr->data, &val);89if (ret)90goto out;9192size = scnprintf(attr->get_buf, sizeof(attr->get_buf),93attr->fmt, (unsigned long long)val);94}9596ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);97out:98mutex_unlock(&attr->mutex);99return ret;100}101102static ssize_t spufs_attr_write(struct file *file, const char __user *buf,103size_t len, loff_t *ppos)104{105struct spufs_attr *attr;106u64 val;107size_t size;108ssize_t ret;109110attr = file->private_data;111if (!attr->set)112return -EACCES;113114ret = mutex_lock_interruptible(&attr->mutex);115if (ret)116return ret;117118ret = -EFAULT;119size = min(sizeof(attr->set_buf) - 1, len);120if (copy_from_user(attr->set_buf, buf, size))121goto out;122123ret = len; /* claim we got the whole input */124attr->set_buf[size] = '\0';125val = simple_strtol(attr->set_buf, NULL, 0);126attr->set(attr->data, val);127out:128mutex_unlock(&attr->mutex);129return ret;130}131132static ssize_t spufs_dump_emit(struct coredump_params *cprm, void *buf,133size_t size)134{135if (!dump_emit(cprm, buf, size))136return -EIO;137return size;138}139140#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \141static int __fops ## _open(struct inode *inode, struct file *file) \142{ \143__simple_attr_check_format(__fmt, 0ull); \144return spufs_attr_open(inode, file, __get, __set, __fmt); \145} \146static const struct file_operations __fops = { \147.open = __fops ## _open, \148.release = spufs_attr_release, \149.read = spufs_attr_read, \150.write = spufs_attr_write, \151.llseek = generic_file_llseek, \152};153154155static int156spufs_mem_open(struct inode *inode, struct file *file)157{158struct spufs_inode_info *i = SPUFS_I(inode);159struct spu_context *ctx = i->i_ctx;160161mutex_lock(&ctx->mapping_lock);162file->private_data = ctx;163if (!i->i_openers++)164ctx->local_store = inode->i_mapping;165mutex_unlock(&ctx->mapping_lock);166return 0;167}168169static int170spufs_mem_release(struct inode *inode, struct file *file)171{172struct spufs_inode_info *i = SPUFS_I(inode);173struct spu_context *ctx = i->i_ctx;174175mutex_lock(&ctx->mapping_lock);176if (!--i->i_openers)177ctx->local_store = NULL;178mutex_unlock(&ctx->mapping_lock);179return 0;180}181182static ssize_t183spufs_mem_dump(struct spu_context *ctx, struct coredump_params *cprm)184{185return spufs_dump_emit(cprm, ctx->ops->get_ls(ctx), LS_SIZE);186}187188static ssize_t189spufs_mem_read(struct file *file, char __user *buffer,190size_t size, loff_t *pos)191{192struct spu_context *ctx = file->private_data;193ssize_t ret;194195ret = spu_acquire(ctx);196if (ret)197return ret;198ret = simple_read_from_buffer(buffer, size, pos, ctx->ops->get_ls(ctx),199LS_SIZE);200spu_release(ctx);201202return ret;203}204205static ssize_t206spufs_mem_write(struct file *file, const char __user *buffer,207size_t size, loff_t *ppos)208{209struct spu_context *ctx = file->private_data;210char *local_store;211loff_t pos = *ppos;212int ret;213214if (pos > LS_SIZE)215return -EFBIG;216217ret = spu_acquire(ctx);218if (ret)219return ret;220221local_store = ctx->ops->get_ls(ctx);222size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);223spu_release(ctx);224225return size;226}227228static vm_fault_t229spufs_mem_mmap_fault(struct vm_fault *vmf)230{231struct vm_area_struct *vma = vmf->vma;232struct spu_context *ctx = vma->vm_file->private_data;233unsigned long pfn, offset;234vm_fault_t ret;235236offset = vmf->pgoff << PAGE_SHIFT;237if (offset >= LS_SIZE)238return VM_FAULT_SIGBUS;239240pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",241vmf->address, offset);242243if (spu_acquire(ctx))244return VM_FAULT_NOPAGE;245246if (ctx->state == SPU_STATE_SAVED) {247vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);248pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);249} else {250vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);251pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;252}253ret = vmf_insert_pfn(vma, vmf->address, pfn);254255spu_release(ctx);256257return ret;258}259260static int spufs_mem_mmap_access(struct vm_area_struct *vma,261unsigned long address,262void *buf, int len, int write)263{264struct spu_context *ctx = vma->vm_file->private_data;265unsigned long offset = address - vma->vm_start;266char *local_store;267268if (write && !(vma->vm_flags & VM_WRITE))269return -EACCES;270if (spu_acquire(ctx))271return -EINTR;272if ((offset + len) > vma->vm_end)273len = vma->vm_end - offset;274local_store = ctx->ops->get_ls(ctx);275if (write)276memcpy_toio(local_store + offset, buf, len);277else278memcpy_fromio(buf, local_store + offset, len);279spu_release(ctx);280return len;281}282283static const struct vm_operations_struct spufs_mem_mmap_vmops = {284.fault = spufs_mem_mmap_fault,285.access = spufs_mem_mmap_access,286};287288static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)289{290if (!(vma->vm_flags & VM_SHARED))291return -EINVAL;292293vm_flags_set(vma, VM_IO | VM_PFNMAP);294vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);295296vma->vm_ops = &spufs_mem_mmap_vmops;297return 0;298}299300static const struct file_operations spufs_mem_fops = {301.open = spufs_mem_open,302.release = spufs_mem_release,303.read = spufs_mem_read,304.write = spufs_mem_write,305.llseek = generic_file_llseek,306.mmap = spufs_mem_mmap,307};308309static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,310unsigned long ps_offs,311unsigned long ps_size)312{313struct spu_context *ctx = vmf->vma->vm_file->private_data;314unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;315int err = 0;316vm_fault_t ret = VM_FAULT_NOPAGE;317318spu_context_nospu_trace(spufs_ps_fault__enter, ctx);319320if (offset >= ps_size)321return VM_FAULT_SIGBUS;322323if (fatal_signal_pending(current))324return VM_FAULT_SIGBUS;325326/*327* Because we release the mmap_lock, the context may be destroyed while328* we're in spu_wait. Grab an extra reference so it isn't destroyed329* in the meantime.330*/331get_spu_context(ctx);332333/*334* We have to wait for context to be loaded before we have335* pages to hand out to the user, but we don't want to wait336* with the mmap_lock held.337* It is possible to drop the mmap_lock here, but then we need338* to return VM_FAULT_NOPAGE because the mappings may have339* hanged.340*/341if (spu_acquire(ctx))342goto refault;343344if (ctx->state == SPU_STATE_SAVED) {345mmap_read_unlock(current->mm);346spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);347err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);348spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);349mmap_read_lock(current->mm);350} else {351area = ctx->spu->problem_phys + ps_offs;352ret = vmf_insert_pfn(vmf->vma, vmf->address,353(area + offset) >> PAGE_SHIFT);354spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);355}356357if (!err)358spu_release(ctx);359360refault:361put_spu_context(ctx);362return ret;363}364365#if SPUFS_MMAP_4K366static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf)367{368return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);369}370371static const struct vm_operations_struct spufs_cntl_mmap_vmops = {372.fault = spufs_cntl_mmap_fault,373};374375/*376* mmap support for problem state control area [0x4000 - 0x4fff].377*/378static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)379{380if (!(vma->vm_flags & VM_SHARED))381return -EINVAL;382383vm_flags_set(vma, VM_IO | VM_PFNMAP);384vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);385386vma->vm_ops = &spufs_cntl_mmap_vmops;387return 0;388}389#else /* SPUFS_MMAP_4K */390#define spufs_cntl_mmap NULL391#endif /* !SPUFS_MMAP_4K */392393static int spufs_cntl_get(void *data, u64 *val)394{395struct spu_context *ctx = data;396int ret;397398ret = spu_acquire(ctx);399if (ret)400return ret;401*val = ctx->ops->status_read(ctx);402spu_release(ctx);403404return 0;405}406407static int spufs_cntl_set(void *data, u64 val)408{409struct spu_context *ctx = data;410int ret;411412ret = spu_acquire(ctx);413if (ret)414return ret;415ctx->ops->runcntl_write(ctx, val);416spu_release(ctx);417418return 0;419}420421static int spufs_cntl_open(struct inode *inode, struct file *file)422{423struct spufs_inode_info *i = SPUFS_I(inode);424struct spu_context *ctx = i->i_ctx;425426mutex_lock(&ctx->mapping_lock);427file->private_data = ctx;428if (!i->i_openers++)429ctx->cntl = inode->i_mapping;430mutex_unlock(&ctx->mapping_lock);431return simple_attr_open(inode, file, spufs_cntl_get,432spufs_cntl_set, "0x%08lx");433}434435static int436spufs_cntl_release(struct inode *inode, struct file *file)437{438struct spufs_inode_info *i = SPUFS_I(inode);439struct spu_context *ctx = i->i_ctx;440441simple_attr_release(inode, file);442443mutex_lock(&ctx->mapping_lock);444if (!--i->i_openers)445ctx->cntl = NULL;446mutex_unlock(&ctx->mapping_lock);447return 0;448}449450static const struct file_operations spufs_cntl_fops = {451.open = spufs_cntl_open,452.release = spufs_cntl_release,453.read = simple_attr_read,454.write = simple_attr_write,455.mmap = spufs_cntl_mmap,456};457458static int459spufs_regs_open(struct inode *inode, struct file *file)460{461struct spufs_inode_info *i = SPUFS_I(inode);462file->private_data = i->i_ctx;463return 0;464}465466static ssize_t467spufs_regs_dump(struct spu_context *ctx, struct coredump_params *cprm)468{469return spufs_dump_emit(cprm, ctx->csa.lscsa->gprs,470sizeof(ctx->csa.lscsa->gprs));471}472473static ssize_t474spufs_regs_read(struct file *file, char __user *buffer,475size_t size, loff_t *pos)476{477int ret;478struct spu_context *ctx = file->private_data;479480/* pre-check for file position: if we'd return EOF, there's no point481* causing a deschedule */482if (*pos >= sizeof(ctx->csa.lscsa->gprs))483return 0;484485ret = spu_acquire_saved(ctx);486if (ret)487return ret;488ret = simple_read_from_buffer(buffer, size, pos, ctx->csa.lscsa->gprs,489sizeof(ctx->csa.lscsa->gprs));490spu_release_saved(ctx);491return ret;492}493494static ssize_t495spufs_regs_write(struct file *file, const char __user *buffer,496size_t size, loff_t *pos)497{498struct spu_context *ctx = file->private_data;499struct spu_lscsa *lscsa = ctx->csa.lscsa;500int ret;501502if (*pos >= sizeof(lscsa->gprs))503return -EFBIG;504505ret = spu_acquire_saved(ctx);506if (ret)507return ret;508509size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,510buffer, size);511512spu_release_saved(ctx);513return size;514}515516static const struct file_operations spufs_regs_fops = {517.open = spufs_regs_open,518.read = spufs_regs_read,519.write = spufs_regs_write,520.llseek = generic_file_llseek,521};522523static ssize_t524spufs_fpcr_dump(struct spu_context *ctx, struct coredump_params *cprm)525{526return spufs_dump_emit(cprm, &ctx->csa.lscsa->fpcr,527sizeof(ctx->csa.lscsa->fpcr));528}529530static ssize_t531spufs_fpcr_read(struct file *file, char __user * buffer,532size_t size, loff_t * pos)533{534int ret;535struct spu_context *ctx = file->private_data;536537ret = spu_acquire_saved(ctx);538if (ret)539return ret;540ret = simple_read_from_buffer(buffer, size, pos, &ctx->csa.lscsa->fpcr,541sizeof(ctx->csa.lscsa->fpcr));542spu_release_saved(ctx);543return ret;544}545546static ssize_t547spufs_fpcr_write(struct file *file, const char __user * buffer,548size_t size, loff_t * pos)549{550struct spu_context *ctx = file->private_data;551struct spu_lscsa *lscsa = ctx->csa.lscsa;552int ret;553554if (*pos >= sizeof(lscsa->fpcr))555return -EFBIG;556557ret = spu_acquire_saved(ctx);558if (ret)559return ret;560561size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,562buffer, size);563564spu_release_saved(ctx);565return size;566}567568static const struct file_operations spufs_fpcr_fops = {569.open = spufs_regs_open,570.read = spufs_fpcr_read,571.write = spufs_fpcr_write,572.llseek = generic_file_llseek,573};574575/* generic open function for all pipe-like files */576static int spufs_pipe_open(struct inode *inode, struct file *file)577{578struct spufs_inode_info *i = SPUFS_I(inode);579file->private_data = i->i_ctx;580581return stream_open(inode, file);582}583584/*585* Read as many bytes from the mailbox as possible, until586* one of the conditions becomes true:587*588* - no more data available in the mailbox589* - end of the user provided buffer590* - end of the mapped area591*/592static ssize_t spufs_mbox_read(struct file *file, char __user *buf,593size_t len, loff_t *pos)594{595struct spu_context *ctx = file->private_data;596u32 mbox_data, __user *udata = (void __user *)buf;597ssize_t count;598599if (len < 4)600return -EINVAL;601602count = spu_acquire(ctx);603if (count)604return count;605606for (count = 0; (count + 4) <= len; count += 4, udata++) {607int ret;608ret = ctx->ops->mbox_read(ctx, &mbox_data);609if (ret == 0)610break;611612/*613* at the end of the mapped area, we can fault614* but still need to return the data we have615* read successfully so far.616*/617ret = put_user(mbox_data, udata);618if (ret) {619if (!count)620count = -EFAULT;621break;622}623}624spu_release(ctx);625626if (!count)627count = -EAGAIN;628629return count;630}631632static const struct file_operations spufs_mbox_fops = {633.open = spufs_pipe_open,634.read = spufs_mbox_read,635};636637static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,638size_t len, loff_t *pos)639{640struct spu_context *ctx = file->private_data;641ssize_t ret;642u32 mbox_stat;643644if (len < 4)645return -EINVAL;646647ret = spu_acquire(ctx);648if (ret)649return ret;650651mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;652653spu_release(ctx);654655if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))656return -EFAULT;657658return 4;659}660661static const struct file_operations spufs_mbox_stat_fops = {662.open = spufs_pipe_open,663.read = spufs_mbox_stat_read,664};665666/* low-level ibox access function */667size_t spu_ibox_read(struct spu_context *ctx, u32 *data)668{669return ctx->ops->ibox_read(ctx, data);670}671672/* interrupt-level ibox callback function. */673void spufs_ibox_callback(struct spu *spu)674{675struct spu_context *ctx = spu->ctx;676677if (ctx)678wake_up_all(&ctx->ibox_wq);679}680681/*682* Read as many bytes from the interrupt mailbox as possible, until683* one of the conditions becomes true:684*685* - no more data available in the mailbox686* - end of the user provided buffer687* - end of the mapped area688*689* If the file is opened without O_NONBLOCK, we wait here until690* any data is available, but return when we have been able to691* read something.692*/693static ssize_t spufs_ibox_read(struct file *file, char __user *buf,694size_t len, loff_t *pos)695{696struct spu_context *ctx = file->private_data;697u32 ibox_data, __user *udata = (void __user *)buf;698ssize_t count;699700if (len < 4)701return -EINVAL;702703count = spu_acquire(ctx);704if (count)705goto out;706707/* wait only for the first element */708count = 0;709if (file->f_flags & O_NONBLOCK) {710if (!spu_ibox_read(ctx, &ibox_data)) {711count = -EAGAIN;712goto out_unlock;713}714} else {715count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));716if (count)717goto out;718}719720/* if we can't write at all, return -EFAULT */721count = put_user(ibox_data, udata);722if (count)723goto out_unlock;724725for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {726int ret;727ret = ctx->ops->ibox_read(ctx, &ibox_data);728if (ret == 0)729break;730/*731* at the end of the mapped area, we can fault732* but still need to return the data we have733* read successfully so far.734*/735ret = put_user(ibox_data, udata);736if (ret)737break;738}739740out_unlock:741spu_release(ctx);742out:743return count;744}745746static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait)747{748struct spu_context *ctx = file->private_data;749__poll_t mask;750751poll_wait(file, &ctx->ibox_wq, wait);752753/*754* For now keep this uninterruptible and also ignore the rule755* that poll should not sleep. Will be fixed later.756*/757mutex_lock(&ctx->state_mutex);758mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM);759spu_release(ctx);760761return mask;762}763764static const struct file_operations spufs_ibox_fops = {765.open = spufs_pipe_open,766.read = spufs_ibox_read,767.poll = spufs_ibox_poll,768};769770static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,771size_t len, loff_t *pos)772{773struct spu_context *ctx = file->private_data;774ssize_t ret;775u32 ibox_stat;776777if (len < 4)778return -EINVAL;779780ret = spu_acquire(ctx);781if (ret)782return ret;783ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;784spu_release(ctx);785786if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))787return -EFAULT;788789return 4;790}791792static const struct file_operations spufs_ibox_stat_fops = {793.open = spufs_pipe_open,794.read = spufs_ibox_stat_read,795};796797/* low-level mailbox write */798size_t spu_wbox_write(struct spu_context *ctx, u32 data)799{800return ctx->ops->wbox_write(ctx, data);801}802803/* interrupt-level wbox callback function. */804void spufs_wbox_callback(struct spu *spu)805{806struct spu_context *ctx = spu->ctx;807808if (ctx)809wake_up_all(&ctx->wbox_wq);810}811812/*813* Write as many bytes to the interrupt mailbox as possible, until814* one of the conditions becomes true:815*816* - the mailbox is full817* - end of the user provided buffer818* - end of the mapped area819*820* If the file is opened without O_NONBLOCK, we wait here until821* space is available, but return when we have been able to822* write something.823*/824static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,825size_t len, loff_t *pos)826{827struct spu_context *ctx = file->private_data;828u32 wbox_data, __user *udata = (void __user *)buf;829ssize_t count;830831if (len < 4)832return -EINVAL;833834if (get_user(wbox_data, udata))835return -EFAULT;836837count = spu_acquire(ctx);838if (count)839goto out;840841/*842* make sure we can at least write one element, by waiting843* in case of !O_NONBLOCK844*/845count = 0;846if (file->f_flags & O_NONBLOCK) {847if (!spu_wbox_write(ctx, wbox_data)) {848count = -EAGAIN;849goto out_unlock;850}851} else {852count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));853if (count)854goto out;855}856857858/* write as much as possible */859for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {860int ret;861ret = get_user(wbox_data, udata);862if (ret)863break;864865ret = spu_wbox_write(ctx, wbox_data);866if (ret == 0)867break;868}869870out_unlock:871spu_release(ctx);872out:873return count;874}875876static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait)877{878struct spu_context *ctx = file->private_data;879__poll_t mask;880881poll_wait(file, &ctx->wbox_wq, wait);882883/*884* For now keep this uninterruptible and also ignore the rule885* that poll should not sleep. Will be fixed later.886*/887mutex_lock(&ctx->state_mutex);888mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM);889spu_release(ctx);890891return mask;892}893894static const struct file_operations spufs_wbox_fops = {895.open = spufs_pipe_open,896.write = spufs_wbox_write,897.poll = spufs_wbox_poll,898};899900static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,901size_t len, loff_t *pos)902{903struct spu_context *ctx = file->private_data;904ssize_t ret;905u32 wbox_stat;906907if (len < 4)908return -EINVAL;909910ret = spu_acquire(ctx);911if (ret)912return ret;913wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;914spu_release(ctx);915916if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))917return -EFAULT;918919return 4;920}921922static const struct file_operations spufs_wbox_stat_fops = {923.open = spufs_pipe_open,924.read = spufs_wbox_stat_read,925};926927static int spufs_signal1_open(struct inode *inode, struct file *file)928{929struct spufs_inode_info *i = SPUFS_I(inode);930struct spu_context *ctx = i->i_ctx;931932mutex_lock(&ctx->mapping_lock);933file->private_data = ctx;934if (!i->i_openers++)935ctx->signal1 = inode->i_mapping;936mutex_unlock(&ctx->mapping_lock);937return nonseekable_open(inode, file);938}939940static int941spufs_signal1_release(struct inode *inode, struct file *file)942{943struct spufs_inode_info *i = SPUFS_I(inode);944struct spu_context *ctx = i->i_ctx;945946mutex_lock(&ctx->mapping_lock);947if (!--i->i_openers)948ctx->signal1 = NULL;949mutex_unlock(&ctx->mapping_lock);950return 0;951}952953static ssize_t spufs_signal1_dump(struct spu_context *ctx,954struct coredump_params *cprm)955{956if (!ctx->csa.spu_chnlcnt_RW[3])957return 0;958return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[3],959sizeof(ctx->csa.spu_chnldata_RW[3]));960}961962static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,963size_t len)964{965if (len < sizeof(ctx->csa.spu_chnldata_RW[3]))966return -EINVAL;967if (!ctx->csa.spu_chnlcnt_RW[3])968return 0;969if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[3],970sizeof(ctx->csa.spu_chnldata_RW[3])))971return -EFAULT;972return sizeof(ctx->csa.spu_chnldata_RW[3]);973}974975static ssize_t spufs_signal1_read(struct file *file, char __user *buf,976size_t len, loff_t *pos)977{978int ret;979struct spu_context *ctx = file->private_data;980981ret = spu_acquire_saved(ctx);982if (ret)983return ret;984ret = __spufs_signal1_read(ctx, buf, len);985spu_release_saved(ctx);986987return ret;988}989990static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,991size_t len, loff_t *pos)992{993struct spu_context *ctx;994ssize_t ret;995u32 data;996997ctx = file->private_data;998999if (len < 4)1000return -EINVAL;10011002if (copy_from_user(&data, buf, 4))1003return -EFAULT;10041005ret = spu_acquire(ctx);1006if (ret)1007return ret;1008ctx->ops->signal1_write(ctx, data);1009spu_release(ctx);10101011return 4;1012}10131014static vm_fault_t1015spufs_signal1_mmap_fault(struct vm_fault *vmf)1016{1017#if SPUFS_SIGNAL_MAP_SIZE == 0x10001018return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);1019#elif SPUFS_SIGNAL_MAP_SIZE == 0x100001020/* For 64k pages, both signal1 and signal2 can be used to mmap the whole1021* signal 1 and 2 area1022*/1023return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);1024#else1025#error unsupported page size1026#endif1027}10281029static const struct vm_operations_struct spufs_signal1_mmap_vmops = {1030.fault = spufs_signal1_mmap_fault,1031};10321033static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)1034{1035if (!(vma->vm_flags & VM_SHARED))1036return -EINVAL;10371038vm_flags_set(vma, VM_IO | VM_PFNMAP);1039vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);10401041vma->vm_ops = &spufs_signal1_mmap_vmops;1042return 0;1043}10441045static const struct file_operations spufs_signal1_fops = {1046.open = spufs_signal1_open,1047.release = spufs_signal1_release,1048.read = spufs_signal1_read,1049.write = spufs_signal1_write,1050.mmap = spufs_signal1_mmap,1051};10521053static const struct file_operations spufs_signal1_nosched_fops = {1054.open = spufs_signal1_open,1055.release = spufs_signal1_release,1056.write = spufs_signal1_write,1057.mmap = spufs_signal1_mmap,1058};10591060static int spufs_signal2_open(struct inode *inode, struct file *file)1061{1062struct spufs_inode_info *i = SPUFS_I(inode);1063struct spu_context *ctx = i->i_ctx;10641065mutex_lock(&ctx->mapping_lock);1066file->private_data = ctx;1067if (!i->i_openers++)1068ctx->signal2 = inode->i_mapping;1069mutex_unlock(&ctx->mapping_lock);1070return nonseekable_open(inode, file);1071}10721073static int1074spufs_signal2_release(struct inode *inode, struct file *file)1075{1076struct spufs_inode_info *i = SPUFS_I(inode);1077struct spu_context *ctx = i->i_ctx;10781079mutex_lock(&ctx->mapping_lock);1080if (!--i->i_openers)1081ctx->signal2 = NULL;1082mutex_unlock(&ctx->mapping_lock);1083return 0;1084}10851086static ssize_t spufs_signal2_dump(struct spu_context *ctx,1087struct coredump_params *cprm)1088{1089if (!ctx->csa.spu_chnlcnt_RW[4])1090return 0;1091return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[4],1092sizeof(ctx->csa.spu_chnldata_RW[4]));1093}10941095static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,1096size_t len)1097{1098if (len < sizeof(ctx->csa.spu_chnldata_RW[4]))1099return -EINVAL;1100if (!ctx->csa.spu_chnlcnt_RW[4])1101return 0;1102if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[4],1103sizeof(ctx->csa.spu_chnldata_RW[4])))1104return -EFAULT;1105return sizeof(ctx->csa.spu_chnldata_RW[4]);1106}11071108static ssize_t spufs_signal2_read(struct file *file, char __user *buf,1109size_t len, loff_t *pos)1110{1111struct spu_context *ctx = file->private_data;1112int ret;11131114ret = spu_acquire_saved(ctx);1115if (ret)1116return ret;1117ret = __spufs_signal2_read(ctx, buf, len);1118spu_release_saved(ctx);11191120return ret;1121}11221123static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,1124size_t len, loff_t *pos)1125{1126struct spu_context *ctx;1127ssize_t ret;1128u32 data;11291130ctx = file->private_data;11311132if (len < 4)1133return -EINVAL;11341135if (copy_from_user(&data, buf, 4))1136return -EFAULT;11371138ret = spu_acquire(ctx);1139if (ret)1140return ret;1141ctx->ops->signal2_write(ctx, data);1142spu_release(ctx);11431144return 4;1145}11461147#if SPUFS_MMAP_4K1148static vm_fault_t1149spufs_signal2_mmap_fault(struct vm_fault *vmf)1150{1151#if SPUFS_SIGNAL_MAP_SIZE == 0x10001152return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);1153#elif SPUFS_SIGNAL_MAP_SIZE == 0x100001154/* For 64k pages, both signal1 and signal2 can be used to mmap the whole1155* signal 1 and 2 area1156*/1157return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);1158#else1159#error unsupported page size1160#endif1161}11621163static const struct vm_operations_struct spufs_signal2_mmap_vmops = {1164.fault = spufs_signal2_mmap_fault,1165};11661167static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)1168{1169if (!(vma->vm_flags & VM_SHARED))1170return -EINVAL;11711172vm_flags_set(vma, VM_IO | VM_PFNMAP);1173vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);11741175vma->vm_ops = &spufs_signal2_mmap_vmops;1176return 0;1177}1178#else /* SPUFS_MMAP_4K */1179#define spufs_signal2_mmap NULL1180#endif /* !SPUFS_MMAP_4K */11811182static const struct file_operations spufs_signal2_fops = {1183.open = spufs_signal2_open,1184.release = spufs_signal2_release,1185.read = spufs_signal2_read,1186.write = spufs_signal2_write,1187.mmap = spufs_signal2_mmap,1188};11891190static const struct file_operations spufs_signal2_nosched_fops = {1191.open = spufs_signal2_open,1192.release = spufs_signal2_release,1193.write = spufs_signal2_write,1194.mmap = spufs_signal2_mmap,1195};11961197/*1198* This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the1199* work of acquiring (or not) the SPU context before calling through1200* to the actual get routine. The set routine is called directly.1201*/1202#define SPU_ATTR_NOACQUIRE 01203#define SPU_ATTR_ACQUIRE 11204#define SPU_ATTR_ACQUIRE_SAVED 212051206#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \1207static int __##__get(void *data, u64 *val) \1208{ \1209struct spu_context *ctx = data; \1210int ret = 0; \1211\1212if (__acquire == SPU_ATTR_ACQUIRE) { \1213ret = spu_acquire(ctx); \1214if (ret) \1215return ret; \1216*val = __get(ctx); \1217spu_release(ctx); \1218} else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \1219ret = spu_acquire_saved(ctx); \1220if (ret) \1221return ret; \1222*val = __get(ctx); \1223spu_release_saved(ctx); \1224} else \1225*val = __get(ctx); \1226\1227return 0; \1228} \1229DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);12301231static int spufs_signal1_type_set(void *data, u64 val)1232{1233struct spu_context *ctx = data;1234int ret;12351236ret = spu_acquire(ctx);1237if (ret)1238return ret;1239ctx->ops->signal1_type_set(ctx, val);1240spu_release(ctx);12411242return 0;1243}12441245static u64 spufs_signal1_type_get(struct spu_context *ctx)1246{1247return ctx->ops->signal1_type_get(ctx);1248}1249DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,1250spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);125112521253static int spufs_signal2_type_set(void *data, u64 val)1254{1255struct spu_context *ctx = data;1256int ret;12571258ret = spu_acquire(ctx);1259if (ret)1260return ret;1261ctx->ops->signal2_type_set(ctx, val);1262spu_release(ctx);12631264return 0;1265}12661267static u64 spufs_signal2_type_get(struct spu_context *ctx)1268{1269return ctx->ops->signal2_type_get(ctx);1270}1271DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,1272spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);12731274#if SPUFS_MMAP_4K1275static vm_fault_t1276spufs_mss_mmap_fault(struct vm_fault *vmf)1277{1278return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE);1279}12801281static const struct vm_operations_struct spufs_mss_mmap_vmops = {1282.fault = spufs_mss_mmap_fault,1283};12841285/*1286* mmap support for problem state MFC DMA area [0x0000 - 0x0fff].1287*/1288static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)1289{1290if (!(vma->vm_flags & VM_SHARED))1291return -EINVAL;12921293vm_flags_set(vma, VM_IO | VM_PFNMAP);1294vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);12951296vma->vm_ops = &spufs_mss_mmap_vmops;1297return 0;1298}1299#else /* SPUFS_MMAP_4K */1300#define spufs_mss_mmap NULL1301#endif /* !SPUFS_MMAP_4K */13021303static int spufs_mss_open(struct inode *inode, struct file *file)1304{1305struct spufs_inode_info *i = SPUFS_I(inode);1306struct spu_context *ctx = i->i_ctx;13071308file->private_data = i->i_ctx;13091310mutex_lock(&ctx->mapping_lock);1311if (!i->i_openers++)1312ctx->mss = inode->i_mapping;1313mutex_unlock(&ctx->mapping_lock);1314return nonseekable_open(inode, file);1315}13161317static int1318spufs_mss_release(struct inode *inode, struct file *file)1319{1320struct spufs_inode_info *i = SPUFS_I(inode);1321struct spu_context *ctx = i->i_ctx;13221323mutex_lock(&ctx->mapping_lock);1324if (!--i->i_openers)1325ctx->mss = NULL;1326mutex_unlock(&ctx->mapping_lock);1327return 0;1328}13291330static const struct file_operations spufs_mss_fops = {1331.open = spufs_mss_open,1332.release = spufs_mss_release,1333.mmap = spufs_mss_mmap,1334};13351336static vm_fault_t1337spufs_psmap_mmap_fault(struct vm_fault *vmf)1338{1339return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE);1340}13411342static const struct vm_operations_struct spufs_psmap_mmap_vmops = {1343.fault = spufs_psmap_mmap_fault,1344};13451346/*1347* mmap support for full problem state area [0x00000 - 0x1ffff].1348*/1349static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)1350{1351if (!(vma->vm_flags & VM_SHARED))1352return -EINVAL;13531354vm_flags_set(vma, VM_IO | VM_PFNMAP);1355vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);13561357vma->vm_ops = &spufs_psmap_mmap_vmops;1358return 0;1359}13601361static int spufs_psmap_open(struct inode *inode, struct file *file)1362{1363struct spufs_inode_info *i = SPUFS_I(inode);1364struct spu_context *ctx = i->i_ctx;13651366mutex_lock(&ctx->mapping_lock);1367file->private_data = i->i_ctx;1368if (!i->i_openers++)1369ctx->psmap = inode->i_mapping;1370mutex_unlock(&ctx->mapping_lock);1371return nonseekable_open(inode, file);1372}13731374static int1375spufs_psmap_release(struct inode *inode, struct file *file)1376{1377struct spufs_inode_info *i = SPUFS_I(inode);1378struct spu_context *ctx = i->i_ctx;13791380mutex_lock(&ctx->mapping_lock);1381if (!--i->i_openers)1382ctx->psmap = NULL;1383mutex_unlock(&ctx->mapping_lock);1384return 0;1385}13861387static const struct file_operations spufs_psmap_fops = {1388.open = spufs_psmap_open,1389.release = spufs_psmap_release,1390.mmap = spufs_psmap_mmap,1391};139213931394#if SPUFS_MMAP_4K1395static vm_fault_t1396spufs_mfc_mmap_fault(struct vm_fault *vmf)1397{1398return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE);1399}14001401static const struct vm_operations_struct spufs_mfc_mmap_vmops = {1402.fault = spufs_mfc_mmap_fault,1403};14041405/*1406* mmap support for problem state MFC DMA area [0x0000 - 0x0fff].1407*/1408static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)1409{1410if (!(vma->vm_flags & VM_SHARED))1411return -EINVAL;14121413vm_flags_set(vma, VM_IO | VM_PFNMAP);1414vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);14151416vma->vm_ops = &spufs_mfc_mmap_vmops;1417return 0;1418}1419#else /* SPUFS_MMAP_4K */1420#define spufs_mfc_mmap NULL1421#endif /* !SPUFS_MMAP_4K */14221423static int spufs_mfc_open(struct inode *inode, struct file *file)1424{1425struct spufs_inode_info *i = SPUFS_I(inode);1426struct spu_context *ctx = i->i_ctx;14271428/* we don't want to deal with DMA into other processes */1429if (ctx->owner != current->mm)1430return -EINVAL;14311432if (atomic_read(&inode->i_count) != 1)1433return -EBUSY;14341435mutex_lock(&ctx->mapping_lock);1436file->private_data = ctx;1437if (!i->i_openers++)1438ctx->mfc = inode->i_mapping;1439mutex_unlock(&ctx->mapping_lock);1440return nonseekable_open(inode, file);1441}14421443static int1444spufs_mfc_release(struct inode *inode, struct file *file)1445{1446struct spufs_inode_info *i = SPUFS_I(inode);1447struct spu_context *ctx = i->i_ctx;14481449mutex_lock(&ctx->mapping_lock);1450if (!--i->i_openers)1451ctx->mfc = NULL;1452mutex_unlock(&ctx->mapping_lock);1453return 0;1454}14551456/* interrupt-level mfc callback function. */1457void spufs_mfc_callback(struct spu *spu)1458{1459struct spu_context *ctx = spu->ctx;14601461if (ctx)1462wake_up_all(&ctx->mfc_wq);1463}14641465static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)1466{1467/* See if there is one tag group is complete */1468/* FIXME we need locking around tagwait */1469*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;1470ctx->tagwait &= ~*status;1471if (*status)1472return 1;14731474/* enable interrupt waiting for any tag group,1475may silently fail if interrupts are already enabled */1476ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);1477return 0;1478}14791480static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,1481size_t size, loff_t *pos)1482{1483struct spu_context *ctx = file->private_data;1484int ret = -EINVAL;1485u32 status;14861487if (size != 4)1488goto out;14891490ret = spu_acquire(ctx);1491if (ret)1492return ret;14931494ret = -EINVAL;1495if (file->f_flags & O_NONBLOCK) {1496status = ctx->ops->read_mfc_tagstatus(ctx);1497if (!(status & ctx->tagwait))1498ret = -EAGAIN;1499else1500/* XXX(hch): shouldn't we clear ret here? */1501ctx->tagwait &= ~status;1502} else {1503ret = spufs_wait(ctx->mfc_wq,1504spufs_read_mfc_tagstatus(ctx, &status));1505if (ret)1506goto out;1507}1508spu_release(ctx);15091510ret = 4;1511if (copy_to_user(buffer, &status, 4))1512ret = -EFAULT;15131514out:1515return ret;1516}15171518static int spufs_check_valid_dma(struct mfc_dma_command *cmd)1519{1520pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,1521cmd->ea, cmd->size, cmd->tag, cmd->cmd);15221523switch (cmd->cmd) {1524case MFC_PUT_CMD:1525case MFC_PUTF_CMD:1526case MFC_PUTB_CMD:1527case MFC_GET_CMD:1528case MFC_GETF_CMD:1529case MFC_GETB_CMD:1530break;1531default:1532pr_debug("invalid DMA opcode %x\n", cmd->cmd);1533return -EIO;1534}15351536if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {1537pr_debug("invalid DMA alignment, ea %llx lsa %x\n",1538cmd->ea, cmd->lsa);1539return -EIO;1540}15411542switch (cmd->size & 0xf) {1543case 1:1544break;1545case 2:1546if (cmd->lsa & 1)1547goto error;1548break;1549case 4:1550if (cmd->lsa & 3)1551goto error;1552break;1553case 8:1554if (cmd->lsa & 7)1555goto error;1556break;1557case 0:1558if (cmd->lsa & 15)1559goto error;1560break;1561error:1562default:1563pr_debug("invalid DMA alignment %x for size %x\n",1564cmd->lsa & 0xf, cmd->size);1565return -EIO;1566}15671568if (cmd->size > 16 * 1024) {1569pr_debug("invalid DMA size %x\n", cmd->size);1570return -EIO;1571}15721573if (cmd->tag & 0xfff0) {1574/* we reserve the higher tag numbers for kernel use */1575pr_debug("invalid DMA tag\n");1576return -EIO;1577}15781579if (cmd->class) {1580/* not supported in this version */1581pr_debug("invalid DMA class\n");1582return -EIO;1583}15841585return 0;1586}15871588static int spu_send_mfc_command(struct spu_context *ctx,1589struct mfc_dma_command cmd,1590int *error)1591{1592*error = ctx->ops->send_mfc_command(ctx, &cmd);1593if (*error == -EAGAIN) {1594/* wait for any tag group to complete1595so we have space for the new command */1596ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);1597/* try again, because the queue might be1598empty again */1599*error = ctx->ops->send_mfc_command(ctx, &cmd);1600if (*error == -EAGAIN)1601return 0;1602}1603return 1;1604}16051606static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,1607size_t size, loff_t *pos)1608{1609struct spu_context *ctx = file->private_data;1610struct mfc_dma_command cmd;1611int ret = -EINVAL;16121613if (size != sizeof cmd)1614goto out;16151616ret = -EFAULT;1617if (copy_from_user(&cmd, buffer, sizeof cmd))1618goto out;16191620ret = spufs_check_valid_dma(&cmd);1621if (ret)1622goto out;16231624ret = spu_acquire(ctx);1625if (ret)1626goto out;16271628ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);1629if (ret)1630goto out;16311632if (file->f_flags & O_NONBLOCK) {1633ret = ctx->ops->send_mfc_command(ctx, &cmd);1634} else {1635int status;1636ret = spufs_wait(ctx->mfc_wq,1637spu_send_mfc_command(ctx, cmd, &status));1638if (ret)1639goto out;1640if (status)1641ret = status;1642}16431644if (ret)1645goto out_unlock;16461647ctx->tagwait |= 1 << cmd.tag;1648ret = size;16491650out_unlock:1651spu_release(ctx);1652out:1653return ret;1654}16551656static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait)1657{1658struct spu_context *ctx = file->private_data;1659u32 free_elements, tagstatus;1660__poll_t mask;16611662poll_wait(file, &ctx->mfc_wq, wait);16631664/*1665* For now keep this uninterruptible and also ignore the rule1666* that poll should not sleep. Will be fixed later.1667*/1668mutex_lock(&ctx->state_mutex);1669ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);1670free_elements = ctx->ops->get_mfc_free_elements(ctx);1671tagstatus = ctx->ops->read_mfc_tagstatus(ctx);1672spu_release(ctx);16731674mask = 0;1675if (free_elements & 0xffff)1676mask |= EPOLLOUT | EPOLLWRNORM;1677if (tagstatus & ctx->tagwait)1678mask |= EPOLLIN | EPOLLRDNORM;16791680pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,1681free_elements, tagstatus, ctx->tagwait);16821683return mask;1684}16851686static int spufs_mfc_flush(struct file *file, fl_owner_t id)1687{1688struct spu_context *ctx = file->private_data;1689int ret;16901691ret = spu_acquire(ctx);1692if (ret)1693return ret;16941695spu_release(ctx);16961697return 0;1698}16991700static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)1701{1702struct inode *inode = file_inode(file);1703int err = file_write_and_wait_range(file, start, end);1704if (!err) {1705inode_lock(inode);1706err = spufs_mfc_flush(file, NULL);1707inode_unlock(inode);1708}1709return err;1710}17111712static const struct file_operations spufs_mfc_fops = {1713.open = spufs_mfc_open,1714.release = spufs_mfc_release,1715.read = spufs_mfc_read,1716.write = spufs_mfc_write,1717.poll = spufs_mfc_poll,1718.flush = spufs_mfc_flush,1719.fsync = spufs_mfc_fsync,1720.mmap = spufs_mfc_mmap,1721};17221723static int spufs_npc_set(void *data, u64 val)1724{1725struct spu_context *ctx = data;1726int ret;17271728ret = spu_acquire(ctx);1729if (ret)1730return ret;1731ctx->ops->npc_write(ctx, val);1732spu_release(ctx);17331734return 0;1735}17361737static u64 spufs_npc_get(struct spu_context *ctx)1738{1739return ctx->ops->npc_read(ctx);1740}1741DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,1742"0x%llx\n", SPU_ATTR_ACQUIRE);17431744static int spufs_decr_set(void *data, u64 val)1745{1746struct spu_context *ctx = data;1747struct spu_lscsa *lscsa = ctx->csa.lscsa;1748int ret;17491750ret = spu_acquire_saved(ctx);1751if (ret)1752return ret;1753lscsa->decr.slot[0] = (u32) val;1754spu_release_saved(ctx);17551756return 0;1757}17581759static u64 spufs_decr_get(struct spu_context *ctx)1760{1761struct spu_lscsa *lscsa = ctx->csa.lscsa;1762return lscsa->decr.slot[0];1763}1764DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,1765"0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);17661767static int spufs_decr_status_set(void *data, u64 val)1768{1769struct spu_context *ctx = data;1770int ret;17711772ret = spu_acquire_saved(ctx);1773if (ret)1774return ret;1775if (val)1776ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;1777else1778ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;1779spu_release_saved(ctx);17801781return 0;1782}17831784static u64 spufs_decr_status_get(struct spu_context *ctx)1785{1786if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)1787return SPU_DECR_STATUS_RUNNING;1788else1789return 0;1790}1791DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,1792spufs_decr_status_set, "0x%llx\n",1793SPU_ATTR_ACQUIRE_SAVED);17941795static int spufs_event_mask_set(void *data, u64 val)1796{1797struct spu_context *ctx = data;1798struct spu_lscsa *lscsa = ctx->csa.lscsa;1799int ret;18001801ret = spu_acquire_saved(ctx);1802if (ret)1803return ret;1804lscsa->event_mask.slot[0] = (u32) val;1805spu_release_saved(ctx);18061807return 0;1808}18091810static u64 spufs_event_mask_get(struct spu_context *ctx)1811{1812struct spu_lscsa *lscsa = ctx->csa.lscsa;1813return lscsa->event_mask.slot[0];1814}18151816DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,1817spufs_event_mask_set, "0x%llx\n",1818SPU_ATTR_ACQUIRE_SAVED);18191820static u64 spufs_event_status_get(struct spu_context *ctx)1821{1822struct spu_state *state = &ctx->csa;1823u64 stat;1824stat = state->spu_chnlcnt_RW[0];1825if (stat)1826return state->spu_chnldata_RW[0];1827return 0;1828}1829DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,1830NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)18311832static int spufs_srr0_set(void *data, u64 val)1833{1834struct spu_context *ctx = data;1835struct spu_lscsa *lscsa = ctx->csa.lscsa;1836int ret;18371838ret = spu_acquire_saved(ctx);1839if (ret)1840return ret;1841lscsa->srr0.slot[0] = (u32) val;1842spu_release_saved(ctx);18431844return 0;1845}18461847static u64 spufs_srr0_get(struct spu_context *ctx)1848{1849struct spu_lscsa *lscsa = ctx->csa.lscsa;1850return lscsa->srr0.slot[0];1851}1852DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,1853"0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)18541855static u64 spufs_id_get(struct spu_context *ctx)1856{1857u64 num;18581859if (ctx->state == SPU_STATE_RUNNABLE)1860num = ctx->spu->number;1861else1862num = (unsigned int)-1;18631864return num;1865}1866DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",1867SPU_ATTR_ACQUIRE)18681869static u64 spufs_object_id_get(struct spu_context *ctx)1870{1871/* FIXME: Should there really be no locking here? */1872return ctx->object_id;1873}18741875static int spufs_object_id_set(void *data, u64 id)1876{1877struct spu_context *ctx = data;1878ctx->object_id = id;18791880return 0;1881}18821883DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,1884spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);18851886static u64 spufs_lslr_get(struct spu_context *ctx)1887{1888return ctx->csa.priv2.spu_lslr_RW;1889}1890DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",1891SPU_ATTR_ACQUIRE_SAVED);18921893static int spufs_info_open(struct inode *inode, struct file *file)1894{1895struct spufs_inode_info *i = SPUFS_I(inode);1896struct spu_context *ctx = i->i_ctx;1897file->private_data = ctx;1898return 0;1899}19001901static int spufs_caps_show(struct seq_file *s, void *private)1902{1903struct spu_context *ctx = s->private;19041905if (!(ctx->flags & SPU_CREATE_NOSCHED))1906seq_puts(s, "sched\n");1907if (!(ctx->flags & SPU_CREATE_ISOLATE))1908seq_puts(s, "step\n");1909return 0;1910}19111912static int spufs_caps_open(struct inode *inode, struct file *file)1913{1914return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);1915}19161917static const struct file_operations spufs_caps_fops = {1918.open = spufs_caps_open,1919.read = seq_read,1920.llseek = seq_lseek,1921.release = single_release,1922};19231924static ssize_t spufs_mbox_info_dump(struct spu_context *ctx,1925struct coredump_params *cprm)1926{1927if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))1928return 0;1929return spufs_dump_emit(cprm, &ctx->csa.prob.pu_mb_R,1930sizeof(ctx->csa.prob.pu_mb_R));1931}19321933static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,1934size_t len, loff_t *pos)1935{1936struct spu_context *ctx = file->private_data;1937u32 stat, data;1938int ret;19391940ret = spu_acquire_saved(ctx);1941if (ret)1942return ret;1943spin_lock(&ctx->csa.register_lock);1944stat = ctx->csa.prob.mb_stat_R;1945data = ctx->csa.prob.pu_mb_R;1946spin_unlock(&ctx->csa.register_lock);1947spu_release_saved(ctx);19481949/* EOF if there's no entry in the mbox */1950if (!(stat & 0x0000ff))1951return 0;19521953return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));1954}19551956static const struct file_operations spufs_mbox_info_fops = {1957.open = spufs_info_open,1958.read = spufs_mbox_info_read,1959.llseek = generic_file_llseek,1960};19611962static ssize_t spufs_ibox_info_dump(struct spu_context *ctx,1963struct coredump_params *cprm)1964{1965if (!(ctx->csa.prob.mb_stat_R & 0xff0000))1966return 0;1967return spufs_dump_emit(cprm, &ctx->csa.priv2.puint_mb_R,1968sizeof(ctx->csa.priv2.puint_mb_R));1969}19701971static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,1972size_t len, loff_t *pos)1973{1974struct spu_context *ctx = file->private_data;1975u32 stat, data;1976int ret;19771978ret = spu_acquire_saved(ctx);1979if (ret)1980return ret;1981spin_lock(&ctx->csa.register_lock);1982stat = ctx->csa.prob.mb_stat_R;1983data = ctx->csa.priv2.puint_mb_R;1984spin_unlock(&ctx->csa.register_lock);1985spu_release_saved(ctx);19861987/* EOF if there's no entry in the ibox */1988if (!(stat & 0xff0000))1989return 0;19901991return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));1992}19931994static const struct file_operations spufs_ibox_info_fops = {1995.open = spufs_info_open,1996.read = spufs_ibox_info_read,1997.llseek = generic_file_llseek,1998};19992000static size_t spufs_wbox_info_cnt(struct spu_context *ctx)2001{2002return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);2003}20042005static ssize_t spufs_wbox_info_dump(struct spu_context *ctx,2006struct coredump_params *cprm)2007{2008return spufs_dump_emit(cprm, &ctx->csa.spu_mailbox_data,2009spufs_wbox_info_cnt(ctx));2010}20112012static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,2013size_t len, loff_t *pos)2014{2015struct spu_context *ctx = file->private_data;2016u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];2017int ret, count;20182019ret = spu_acquire_saved(ctx);2020if (ret)2021return ret;2022spin_lock(&ctx->csa.register_lock);2023count = spufs_wbox_info_cnt(ctx);2024memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));2025spin_unlock(&ctx->csa.register_lock);2026spu_release_saved(ctx);20272028return simple_read_from_buffer(buf, len, pos, &data,2029count * sizeof(u32));2030}20312032static const struct file_operations spufs_wbox_info_fops = {2033.open = spufs_info_open,2034.read = spufs_wbox_info_read,2035.llseek = generic_file_llseek,2036};20372038static void spufs_get_dma_info(struct spu_context *ctx,2039struct spu_dma_info *info)2040{2041int i;20422043info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;2044info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];2045info->dma_info_status = ctx->csa.spu_chnldata_RW[24];2046info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];2047info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];2048for (i = 0; i < 16; i++) {2049struct mfc_cq_sr *qp = &info->dma_info_command_data[i];2050struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];20512052qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;2053qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;2054qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;2055qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;2056}2057}20582059static ssize_t spufs_dma_info_dump(struct spu_context *ctx,2060struct coredump_params *cprm)2061{2062struct spu_dma_info info;20632064spufs_get_dma_info(ctx, &info);2065return spufs_dump_emit(cprm, &info, sizeof(info));2066}20672068static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,2069size_t len, loff_t *pos)2070{2071struct spu_context *ctx = file->private_data;2072struct spu_dma_info info;2073int ret;20742075ret = spu_acquire_saved(ctx);2076if (ret)2077return ret;2078spin_lock(&ctx->csa.register_lock);2079spufs_get_dma_info(ctx, &info);2080spin_unlock(&ctx->csa.register_lock);2081spu_release_saved(ctx);20822083return simple_read_from_buffer(buf, len, pos, &info,2084sizeof(info));2085}20862087static const struct file_operations spufs_dma_info_fops = {2088.open = spufs_info_open,2089.read = spufs_dma_info_read,2090};20912092static void spufs_get_proxydma_info(struct spu_context *ctx,2093struct spu_proxydma_info *info)2094{2095int i;20962097info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;2098info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;2099info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;21002101for (i = 0; i < 8; i++) {2102struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];2103struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];21042105qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;2106qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;2107qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;2108qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;2109}2110}21112112static ssize_t spufs_proxydma_info_dump(struct spu_context *ctx,2113struct coredump_params *cprm)2114{2115struct spu_proxydma_info info;21162117spufs_get_proxydma_info(ctx, &info);2118return spufs_dump_emit(cprm, &info, sizeof(info));2119}21202121static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,2122size_t len, loff_t *pos)2123{2124struct spu_context *ctx = file->private_data;2125struct spu_proxydma_info info;2126int ret;21272128if (len < sizeof(info))2129return -EINVAL;21302131ret = spu_acquire_saved(ctx);2132if (ret)2133return ret;2134spin_lock(&ctx->csa.register_lock);2135spufs_get_proxydma_info(ctx, &info);2136spin_unlock(&ctx->csa.register_lock);2137spu_release_saved(ctx);21382139return simple_read_from_buffer(buf, len, pos, &info,2140sizeof(info));2141}21422143static const struct file_operations spufs_proxydma_info_fops = {2144.open = spufs_info_open,2145.read = spufs_proxydma_info_read,2146};21472148static int spufs_show_tid(struct seq_file *s, void *private)2149{2150struct spu_context *ctx = s->private;21512152seq_printf(s, "%d\n", ctx->tid);2153return 0;2154}21552156static int spufs_tid_open(struct inode *inode, struct file *file)2157{2158return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);2159}21602161static const struct file_operations spufs_tid_fops = {2162.open = spufs_tid_open,2163.read = seq_read,2164.llseek = seq_lseek,2165.release = single_release,2166};21672168static const char *ctx_state_names[] = {2169"user", "system", "iowait", "loaded"2170};21712172static unsigned long long spufs_acct_time(struct spu_context *ctx,2173enum spu_utilization_state state)2174{2175unsigned long long time = ctx->stats.times[state];21762177/*2178* In general, utilization statistics are updated by the controlling2179* thread as the spu context moves through various well defined2180* state transitions, but if the context is lazily loaded its2181* utilization statistics are not updated as the controlling thread2182* is not tightly coupled with the execution of the spu context. We2183* calculate and apply the time delta from the last recorded state2184* of the spu context.2185*/2186if (ctx->spu && ctx->stats.util_state == state) {2187time += ktime_get_ns() - ctx->stats.tstamp;2188}21892190return time / NSEC_PER_MSEC;2191}21922193static unsigned long long spufs_slb_flts(struct spu_context *ctx)2194{2195unsigned long long slb_flts = ctx->stats.slb_flt;21962197if (ctx->state == SPU_STATE_RUNNABLE) {2198slb_flts += (ctx->spu->stats.slb_flt -2199ctx->stats.slb_flt_base);2200}22012202return slb_flts;2203}22042205static unsigned long long spufs_class2_intrs(struct spu_context *ctx)2206{2207unsigned long long class2_intrs = ctx->stats.class2_intr;22082209if (ctx->state == SPU_STATE_RUNNABLE) {2210class2_intrs += (ctx->spu->stats.class2_intr -2211ctx->stats.class2_intr_base);2212}22132214return class2_intrs;2215}221622172218static int spufs_show_stat(struct seq_file *s, void *private)2219{2220struct spu_context *ctx = s->private;2221int ret;22222223ret = spu_acquire(ctx);2224if (ret)2225return ret;22262227seq_printf(s, "%s %llu %llu %llu %llu "2228"%llu %llu %llu %llu %llu %llu %llu %llu\n",2229ctx_state_names[ctx->stats.util_state],2230spufs_acct_time(ctx, SPU_UTIL_USER),2231spufs_acct_time(ctx, SPU_UTIL_SYSTEM),2232spufs_acct_time(ctx, SPU_UTIL_IOWAIT),2233spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),2234ctx->stats.vol_ctx_switch,2235ctx->stats.invol_ctx_switch,2236spufs_slb_flts(ctx),2237ctx->stats.hash_flt,2238ctx->stats.min_flt,2239ctx->stats.maj_flt,2240spufs_class2_intrs(ctx),2241ctx->stats.libassist);2242spu_release(ctx);2243return 0;2244}22452246static int spufs_stat_open(struct inode *inode, struct file *file)2247{2248return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);2249}22502251static const struct file_operations spufs_stat_fops = {2252.open = spufs_stat_open,2253.read = seq_read,2254.llseek = seq_lseek,2255.release = single_release,2256};22572258static inline int spufs_switch_log_used(struct spu_context *ctx)2259{2260return (ctx->switch_log->head - ctx->switch_log->tail) %2261SWITCH_LOG_BUFSIZE;2262}22632264static inline int spufs_switch_log_avail(struct spu_context *ctx)2265{2266return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);2267}22682269static int spufs_switch_log_open(struct inode *inode, struct file *file)2270{2271struct spu_context *ctx = SPUFS_I(inode)->i_ctx;2272int rc;22732274rc = spu_acquire(ctx);2275if (rc)2276return rc;22772278if (ctx->switch_log) {2279rc = -EBUSY;2280goto out;2281}22822283ctx->switch_log = kmalloc(struct_size(ctx->switch_log, log,2284SWITCH_LOG_BUFSIZE), GFP_KERNEL);22852286if (!ctx->switch_log) {2287rc = -ENOMEM;2288goto out;2289}22902291ctx->switch_log->head = ctx->switch_log->tail = 0;2292init_waitqueue_head(&ctx->switch_log->wait);2293rc = 0;22942295out:2296spu_release(ctx);2297return rc;2298}22992300static int spufs_switch_log_release(struct inode *inode, struct file *file)2301{2302struct spu_context *ctx = SPUFS_I(inode)->i_ctx;2303int rc;23042305rc = spu_acquire(ctx);2306if (rc)2307return rc;23082309kfree(ctx->switch_log);2310ctx->switch_log = NULL;2311spu_release(ctx);23122313return 0;2314}23152316static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)2317{2318struct switch_log_entry *p;23192320p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;23212322return snprintf(tbuf, n, "%llu.%09u %d %u %u %llu\n",2323(unsigned long long) p->tstamp.tv_sec,2324(unsigned int) p->tstamp.tv_nsec,2325p->spu_id,2326(unsigned int) p->type,2327(unsigned int) p->val,2328(unsigned long long) p->timebase);2329}23302331static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,2332size_t len, loff_t *ppos)2333{2334struct inode *inode = file_inode(file);2335struct spu_context *ctx = SPUFS_I(inode)->i_ctx;2336int error = 0, cnt = 0;23372338if (!buf)2339return -EINVAL;23402341error = spu_acquire(ctx);2342if (error)2343return error;23442345while (cnt < len) {2346char tbuf[128];2347int width;23482349if (spufs_switch_log_used(ctx) == 0) {2350if (cnt > 0) {2351/* If there's data ready to go, we can2352* just return straight away */2353break;23542355} else if (file->f_flags & O_NONBLOCK) {2356error = -EAGAIN;2357break;23582359} else {2360/* spufs_wait will drop the mutex and2361* re-acquire, but since we're in read(), the2362* file cannot be _released (and so2363* ctx->switch_log is stable).2364*/2365error = spufs_wait(ctx->switch_log->wait,2366spufs_switch_log_used(ctx) > 0);23672368/* On error, spufs_wait returns without the2369* state mutex held */2370if (error)2371return error;23722373/* We may have had entries read from underneath2374* us while we dropped the mutex in spufs_wait,2375* so re-check */2376if (spufs_switch_log_used(ctx) == 0)2377continue;2378}2379}23802381width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));2382if (width < len)2383ctx->switch_log->tail =2384(ctx->switch_log->tail + 1) %2385SWITCH_LOG_BUFSIZE;2386else2387/* If the record is greater than space available return2388* partial buffer (so far) */2389break;23902391error = copy_to_user(buf + cnt, tbuf, width);2392if (error)2393break;2394cnt += width;2395}23962397spu_release(ctx);23982399return cnt == 0 ? error : cnt;2400}24012402static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait)2403{2404struct inode *inode = file_inode(file);2405struct spu_context *ctx = SPUFS_I(inode)->i_ctx;2406__poll_t mask = 0;2407int rc;24082409poll_wait(file, &ctx->switch_log->wait, wait);24102411rc = spu_acquire(ctx);2412if (rc)2413return rc;24142415if (spufs_switch_log_used(ctx) > 0)2416mask |= EPOLLIN;24172418spu_release(ctx);24192420return mask;2421}24222423static const struct file_operations spufs_switch_log_fops = {2424.open = spufs_switch_log_open,2425.read = spufs_switch_log_read,2426.poll = spufs_switch_log_poll,2427.release = spufs_switch_log_release,2428};24292430/**2431* Log a context switch event to a switch log reader.2432*2433* Must be called with ctx->state_mutex held.2434*/2435void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,2436u32 type, u32 val)2437{2438if (!ctx->switch_log)2439return;24402441if (spufs_switch_log_avail(ctx) > 1) {2442struct switch_log_entry *p;24432444p = ctx->switch_log->log + ctx->switch_log->head;2445ktime_get_ts64(&p->tstamp);2446p->timebase = get_tb();2447p->spu_id = spu ? spu->number : -1;2448p->type = type;2449p->val = val;24502451ctx->switch_log->head =2452(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;2453}24542455wake_up(&ctx->switch_log->wait);2456}24572458static int spufs_show_ctx(struct seq_file *s, void *private)2459{2460struct spu_context *ctx = s->private;2461u64 mfc_control_RW;24622463mutex_lock(&ctx->state_mutex);2464if (ctx->spu) {2465struct spu *spu = ctx->spu;2466struct spu_priv2 __iomem *priv2 = spu->priv2;24672468spin_lock_irq(&spu->register_lock);2469mfc_control_RW = in_be64(&priv2->mfc_control_RW);2470spin_unlock_irq(&spu->register_lock);2471} else {2472struct spu_state *csa = &ctx->csa;24732474mfc_control_RW = csa->priv2.mfc_control_RW;2475}24762477seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"2478" %c %llx %llx %llx %llx %x %x\n",2479ctx->state == SPU_STATE_SAVED ? 'S' : 'R',2480ctx->flags,2481ctx->sched_flags,2482ctx->prio,2483ctx->time_slice,2484ctx->spu ? ctx->spu->number : -1,2485!list_empty(&ctx->rq) ? 'q' : ' ',2486ctx->csa.class_0_pending,2487ctx->csa.class_0_dar,2488ctx->csa.class_1_dsisr,2489mfc_control_RW,2490ctx->ops->runcntl_read(ctx),2491ctx->ops->status_read(ctx));24922493mutex_unlock(&ctx->state_mutex);24942495return 0;2496}24972498static int spufs_ctx_open(struct inode *inode, struct file *file)2499{2500return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);2501}25022503static const struct file_operations spufs_ctx_fops = {2504.open = spufs_ctx_open,2505.read = seq_read,2506.llseek = seq_lseek,2507.release = single_release,2508};25092510const struct spufs_tree_descr spufs_dir_contents[] = {2511{ "capabilities", &spufs_caps_fops, 0444, },2512{ "mem", &spufs_mem_fops, 0666, LS_SIZE, },2513{ "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },2514{ "mbox", &spufs_mbox_fops, 0444, },2515{ "ibox", &spufs_ibox_fops, 0444, },2516{ "wbox", &spufs_wbox_fops, 0222, },2517{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },2518{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },2519{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },2520{ "signal1", &spufs_signal1_fops, 0666, },2521{ "signal2", &spufs_signal2_fops, 0666, },2522{ "signal1_type", &spufs_signal1_type, 0666, },2523{ "signal2_type", &spufs_signal2_type, 0666, },2524{ "cntl", &spufs_cntl_fops, 0666, },2525{ "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },2526{ "lslr", &spufs_lslr_ops, 0444, },2527{ "mfc", &spufs_mfc_fops, 0666, },2528{ "mss", &spufs_mss_fops, 0666, },2529{ "npc", &spufs_npc_ops, 0666, },2530{ "srr0", &spufs_srr0_ops, 0666, },2531{ "decr", &spufs_decr_ops, 0666, },2532{ "decr_status", &spufs_decr_status_ops, 0666, },2533{ "event_mask", &spufs_event_mask_ops, 0666, },2534{ "event_status", &spufs_event_status_ops, 0444, },2535{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },2536{ "phys-id", &spufs_id_ops, 0666, },2537{ "object-id", &spufs_object_id_ops, 0666, },2538{ "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },2539{ "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },2540{ "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },2541{ "dma_info", &spufs_dma_info_fops, 0444,2542sizeof(struct spu_dma_info), },2543{ "proxydma_info", &spufs_proxydma_info_fops, 0444,2544sizeof(struct spu_proxydma_info)},2545{ "tid", &spufs_tid_fops, 0444, },2546{ "stat", &spufs_stat_fops, 0444, },2547{ "switch_log", &spufs_switch_log_fops, 0444 },2548{},2549};25502551const struct spufs_tree_descr spufs_dir_nosched_contents[] = {2552{ "capabilities", &spufs_caps_fops, 0444, },2553{ "mem", &spufs_mem_fops, 0666, LS_SIZE, },2554{ "mbox", &spufs_mbox_fops, 0444, },2555{ "ibox", &spufs_ibox_fops, 0444, },2556{ "wbox", &spufs_wbox_fops, 0222, },2557{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },2558{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },2559{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },2560{ "signal1", &spufs_signal1_nosched_fops, 0222, },2561{ "signal2", &spufs_signal2_nosched_fops, 0222, },2562{ "signal1_type", &spufs_signal1_type, 0666, },2563{ "signal2_type", &spufs_signal2_type, 0666, },2564{ "mss", &spufs_mss_fops, 0666, },2565{ "mfc", &spufs_mfc_fops, 0666, },2566{ "cntl", &spufs_cntl_fops, 0666, },2567{ "npc", &spufs_npc_ops, 0666, },2568{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },2569{ "phys-id", &spufs_id_ops, 0666, },2570{ "object-id", &spufs_object_id_ops, 0666, },2571{ "tid", &spufs_tid_fops, 0444, },2572{ "stat", &spufs_stat_fops, 0444, },2573{},2574};25752576const struct spufs_tree_descr spufs_dir_debug_contents[] = {2577{ ".ctx", &spufs_ctx_fops, 0444, },2578{},2579};25802581const struct spufs_coredump_reader spufs_coredump_read[] = {2582{ "regs", spufs_regs_dump, NULL, sizeof(struct spu_reg128[128])},2583{ "fpcr", spufs_fpcr_dump, NULL, sizeof(struct spu_reg128) },2584{ "lslr", NULL, spufs_lslr_get, 19 },2585{ "decr", NULL, spufs_decr_get, 19 },2586{ "decr_status", NULL, spufs_decr_status_get, 19 },2587{ "mem", spufs_mem_dump, NULL, LS_SIZE, },2588{ "signal1", spufs_signal1_dump, NULL, sizeof(u32) },2589{ "signal1_type", NULL, spufs_signal1_type_get, 19 },2590{ "signal2", spufs_signal2_dump, NULL, sizeof(u32) },2591{ "signal2_type", NULL, spufs_signal2_type_get, 19 },2592{ "event_mask", NULL, spufs_event_mask_get, 19 },2593{ "event_status", NULL, spufs_event_status_get, 19 },2594{ "mbox_info", spufs_mbox_info_dump, NULL, sizeof(u32) },2595{ "ibox_info", spufs_ibox_info_dump, NULL, sizeof(u32) },2596{ "wbox_info", spufs_wbox_info_dump, NULL, 4 * sizeof(u32)},2597{ "dma_info", spufs_dma_info_dump, NULL, sizeof(struct spu_dma_info)},2598{ "proxydma_info", spufs_proxydma_info_dump,2599NULL, sizeof(struct spu_proxydma_info)},2600{ "object-id", NULL, spufs_object_id_get, 19 },2601{ "npc", NULL, spufs_npc_get, 19 },2602{ NULL },2603};260426052606