Path: blob/master/arch/powerpc/platforms/ps3/spu.c
10818 views
/*1* PS3 Platform spu routines.2*3* Copyright (C) 2006 Sony Computer Entertainment Inc.4* Copyright 2006 Sony Corp.5*6* This program is free software; you can redistribute it and/or modify7* it under the terms of the GNU General Public License as published by8* the Free Software Foundation; version 2 of the License.9*10* This program is distributed in the hope that it will be useful,11* but WITHOUT ANY WARRANTY; without even the implied warranty of12* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the13* GNU General Public License for more details.14*15* You should have received a copy of the GNU General Public License16* along with this program; if not, write to the Free Software17* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA18*/1920#include <linux/kernel.h>21#include <linux/init.h>22#include <linux/slab.h>23#include <linux/mmzone.h>24#include <linux/io.h>25#include <linux/mm.h>2627#include <asm/spu.h>28#include <asm/spu_priv1.h>29#include <asm/lv1call.h>30#include <asm/ps3.h>3132#include "../cell/spufs/spufs.h"33#include "platform.h"3435/* spu_management_ops */3637/**38* enum spe_type - Type of spe to create.39* @spe_type_logical: Standard logical spe.40*41* For use with lv1_construct_logical_spe(). The current HV does not support42* any types other than those listed.43*/4445enum spe_type {46SPE_TYPE_LOGICAL = 0,47};4849/**50* struct spe_shadow - logical spe shadow register area.51*52* Read-only shadow of spe registers.53*/5455struct spe_shadow {56u8 padding_0140[0x0140];57u64 int_status_class0_RW; /* 0x0140 */58u64 int_status_class1_RW; /* 0x0148 */59u64 int_status_class2_RW; /* 0x0150 */60u8 padding_0158[0x0610-0x0158];61u64 mfc_dsisr_RW; /* 0x0610 */62u8 padding_0618[0x0620-0x0618];63u64 mfc_dar_RW; /* 0x0620 */64u8 padding_0628[0x0800-0x0628];65u64 mfc_dsipr_R; /* 0x0800 */66u8 padding_0808[0x0810-0x0808];67u64 mfc_lscrr_R; /* 0x0810 */68u8 padding_0818[0x0c00-0x0818];69u64 mfc_cer_R; /* 0x0c00 */70u8 padding_0c08[0x0f00-0x0c08];71u64 spe_execution_status; /* 0x0f00 */72u8 padding_0f08[0x1000-0x0f08];73};7475/**76* enum spe_ex_state - Logical spe execution state.77* @spe_ex_state_unexecutable: Uninitialized.78* @spe_ex_state_executable: Enabled, not ready.79* @spe_ex_state_executed: Ready for use.80*81* The execution state (status) of the logical spe as reported in82* struct spe_shadow:spe_execution_status.83*/8485enum spe_ex_state {86SPE_EX_STATE_UNEXECUTABLE = 0,87SPE_EX_STATE_EXECUTABLE = 2,88SPE_EX_STATE_EXECUTED = 3,89};9091/**92* struct priv1_cache - Cached values of priv1 registers.93* @masks[]: Array of cached spe interrupt masks, indexed by class.94* @sr1: Cached mfc_sr1 register.95* @tclass_id: Cached mfc_tclass_id register.96*/9798struct priv1_cache {99u64 masks[3];100u64 sr1;101u64 tclass_id;102};103104/**105* struct spu_pdata - Platform state variables.106* @spe_id: HV spe id returned by lv1_construct_logical_spe().107* @resource_id: HV spe resource id returned by108* ps3_repository_read_spe_resource_id().109* @priv2_addr: lpar address of spe priv2 area returned by110* lv1_construct_logical_spe().111* @shadow_addr: lpar address of spe register shadow area returned by112* lv1_construct_logical_spe().113* @shadow: Virtual (ioremap) address of spe register shadow area.114* @cache: Cached values of priv1 registers.115*/116117struct spu_pdata {118u64 spe_id;119u64 resource_id;120u64 priv2_addr;121u64 shadow_addr;122struct spe_shadow __iomem *shadow;123struct priv1_cache cache;124};125126static struct spu_pdata *spu_pdata(struct spu *spu)127{128return spu->pdata;129}130131#define dump_areas(_a, _b, _c, _d, _e) \132_dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__)133static void _dump_areas(unsigned int spe_id, unsigned long priv2,134unsigned long problem, unsigned long ls, unsigned long shadow,135const char* func, int line)136{137pr_debug("%s:%d: spe_id: %xh (%u)\n", func, line, spe_id, spe_id);138pr_debug("%s:%d: priv2: %lxh\n", func, line, priv2);139pr_debug("%s:%d: problem: %lxh\n", func, line, problem);140pr_debug("%s:%d: ls: %lxh\n", func, line, ls);141pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow);142}143144inline u64 ps3_get_spe_id(void *arg)145{146return spu_pdata(arg)->spe_id;147}148EXPORT_SYMBOL_GPL(ps3_get_spe_id);149150static unsigned long get_vas_id(void)151{152u64 id;153154lv1_get_logical_ppe_id(&id);155lv1_get_virtual_address_space_id_of_ppe(id, &id);156157return id;158}159160static int __init construct_spu(struct spu *spu)161{162int result;163u64 unused;164u64 problem_phys;165u64 local_store_phys;166167result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT,168PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL,169&spu_pdata(spu)->priv2_addr, &problem_phys,170&local_store_phys, &unused,171&spu_pdata(spu)->shadow_addr,172&spu_pdata(spu)->spe_id);173spu->problem_phys = problem_phys;174spu->local_store_phys = local_store_phys;175176if (result) {177pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",178__func__, __LINE__, ps3_result(result));179return result;180}181182return result;183}184185static void spu_unmap(struct spu *spu)186{187iounmap(spu->priv2);188iounmap(spu->problem);189iounmap((__force u8 __iomem *)spu->local_store);190iounmap(spu_pdata(spu)->shadow);191}192193/**194* setup_areas - Map the spu regions into the address space.195*196* The current HV requires the spu shadow regs to be mapped with the197* PTE page protection bits set as read-only (PP=3). This implementation198* uses the low level __ioremap() to bypass the page protection settings199* inforced by ioremap_prot() to get the needed PTE bits set for the200* shadow regs.201*/202203static int __init setup_areas(struct spu *spu)204{205struct table {char* name; unsigned long addr; unsigned long size;};206static const unsigned long shadow_flags = _PAGE_NO_CACHE | 3;207208spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr,209sizeof(struct spe_shadow),210shadow_flags);211if (!spu_pdata(spu)->shadow) {212pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);213goto fail_ioremap;214}215216spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,217LS_SIZE, _PAGE_NO_CACHE);218219if (!spu->local_store) {220pr_debug("%s:%d: ioremap local_store failed\n",221__func__, __LINE__);222goto fail_ioremap;223}224225spu->problem = ioremap(spu->problem_phys,226sizeof(struct spu_problem));227228if (!spu->problem) {229pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);230goto fail_ioremap;231}232233spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,234sizeof(struct spu_priv2));235236if (!spu->priv2) {237pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);238goto fail_ioremap;239}240241dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,242spu->problem_phys, spu->local_store_phys,243spu_pdata(spu)->shadow_addr);244dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,245(unsigned long)spu->problem, (unsigned long)spu->local_store,246(unsigned long)spu_pdata(spu)->shadow);247248return 0;249250fail_ioremap:251spu_unmap(spu);252253return -ENOMEM;254}255256static int __init setup_interrupts(struct spu *spu)257{258int result;259260result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,2610, &spu->irqs[0]);262263if (result)264goto fail_alloc_0;265266result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,2671, &spu->irqs[1]);268269if (result)270goto fail_alloc_1;271272result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,2732, &spu->irqs[2]);274275if (result)276goto fail_alloc_2;277278return result;279280fail_alloc_2:281ps3_spe_irq_destroy(spu->irqs[1]);282fail_alloc_1:283ps3_spe_irq_destroy(spu->irqs[0]);284fail_alloc_0:285spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;286return result;287}288289static int __init enable_spu(struct spu *spu)290{291int result;292293result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id,294spu_pdata(spu)->resource_id);295296if (result) {297pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n",298__func__, __LINE__, ps3_result(result));299goto fail_enable;300}301302result = setup_areas(spu);303304if (result)305goto fail_areas;306307result = setup_interrupts(spu);308309if (result)310goto fail_interrupts;311312return 0;313314fail_interrupts:315spu_unmap(spu);316fail_areas:317lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);318fail_enable:319return result;320}321322static int ps3_destroy_spu(struct spu *spu)323{324int result;325326pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);327328result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);329BUG_ON(result);330331ps3_spe_irq_destroy(spu->irqs[2]);332ps3_spe_irq_destroy(spu->irqs[1]);333ps3_spe_irq_destroy(spu->irqs[0]);334335spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;336337spu_unmap(spu);338339result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id);340BUG_ON(result);341342kfree(spu->pdata);343spu->pdata = NULL;344345return 0;346}347348static int __init ps3_create_spu(struct spu *spu, void *data)349{350int result;351352pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);353354spu->pdata = kzalloc(sizeof(struct spu_pdata),355GFP_KERNEL);356357if (!spu->pdata) {358result = -ENOMEM;359goto fail_malloc;360}361362spu_pdata(spu)->resource_id = (unsigned long)data;363364/* Init cached reg values to HV defaults. */365366spu_pdata(spu)->cache.sr1 = 0x33;367368result = construct_spu(spu);369370if (result)371goto fail_construct;372373/* For now, just go ahead and enable it. */374375result = enable_spu(spu);376377if (result)378goto fail_enable;379380/* Make sure the spu is in SPE_EX_STATE_EXECUTED. */381382/* need something better here!!! */383while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status)384!= SPE_EX_STATE_EXECUTED)385(void)0;386387return result;388389fail_enable:390fail_construct:391ps3_destroy_spu(spu);392fail_malloc:393return result;394}395396static int __init ps3_enumerate_spus(int (*fn)(void *data))397{398int result;399unsigned int num_resource_id;400unsigned int i;401402result = ps3_repository_read_num_spu_resource_id(&num_resource_id);403404pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__,405num_resource_id);406407/*408* For now, just create logical spus equal to the number409* of physical spus reserved for the partition.410*/411412for (i = 0; i < num_resource_id; i++) {413enum ps3_spu_resource_type resource_type;414unsigned int resource_id;415416result = ps3_repository_read_spu_resource_id(i,417&resource_type, &resource_id);418419if (result)420break;421422if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) {423result = fn((void*)(unsigned long)resource_id);424425if (result)426break;427}428}429430if (result) {431printk(KERN_WARNING "%s:%d: Error initializing spus\n",432__func__, __LINE__);433return result;434}435436return num_resource_id;437}438439static int ps3_init_affinity(void)440{441return 0;442}443444/**445* ps3_enable_spu - Enable SPU run control.446*447* An outstanding enhancement for the PS3 would be to add a guard to check448* for incorrect access to the spu problem state when the spu context is449* disabled. This check could be implemented with a flag added to the spu450* context that would inhibit mapping problem state pages, and a routine451* to unmap spu problem state pages. When the spu is enabled with452* ps3_enable_spu() the flag would be set allowing pages to be mapped,453* and when the spu is disabled with ps3_disable_spu() the flag would be454* cleared and the mapped problem state pages would be unmapped.455*/456457static void ps3_enable_spu(struct spu_context *ctx)458{459}460461static void ps3_disable_spu(struct spu_context *ctx)462{463ctx->ops->runcntl_stop(ctx);464}465466const struct spu_management_ops spu_management_ps3_ops = {467.enumerate_spus = ps3_enumerate_spus,468.create_spu = ps3_create_spu,469.destroy_spu = ps3_destroy_spu,470.enable_spu = ps3_enable_spu,471.disable_spu = ps3_disable_spu,472.init_affinity = ps3_init_affinity,473};474475/* spu_priv1_ops */476477static void int_mask_and(struct spu *spu, int class, u64 mask)478{479u64 old_mask;480481/* are these serialized by caller??? */482old_mask = spu_int_mask_get(spu, class);483spu_int_mask_set(spu, class, old_mask & mask);484}485486static void int_mask_or(struct spu *spu, int class, u64 mask)487{488u64 old_mask;489490old_mask = spu_int_mask_get(spu, class);491spu_int_mask_set(spu, class, old_mask | mask);492}493494static void int_mask_set(struct spu *spu, int class, u64 mask)495{496spu_pdata(spu)->cache.masks[class] = mask;497lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class,498spu_pdata(spu)->cache.masks[class]);499}500501static u64 int_mask_get(struct spu *spu, int class)502{503return spu_pdata(spu)->cache.masks[class];504}505506static void int_stat_clear(struct spu *spu, int class, u64 stat)507{508/* Note that MFC_DSISR will be cleared when class1[MF] is set. */509510lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class,511stat, 0);512}513514static u64 int_stat_get(struct spu *spu, int class)515{516u64 stat;517518lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat);519return stat;520}521522static void cpu_affinity_set(struct spu *spu, int cpu)523{524/* No support. */525}526527static u64 mfc_dar_get(struct spu *spu)528{529return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW);530}531532static void mfc_dsisr_set(struct spu *spu, u64 dsisr)533{534/* Nothing to do, cleared in int_stat_clear(). */535}536537static u64 mfc_dsisr_get(struct spu *spu)538{539return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW);540}541542static void mfc_sdr_setup(struct spu *spu)543{544/* Nothing to do. */545}546547static void mfc_sr1_set(struct spu *spu, u64 sr1)548{549/* Check bits allowed by HV. */550551static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK552| MFC_STATE1_PROBLEM_STATE_MASK);553554BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed));555556spu_pdata(spu)->cache.sr1 = sr1;557lv1_set_spe_privilege_state_area_1_register(558spu_pdata(spu)->spe_id,559offsetof(struct spu_priv1, mfc_sr1_RW),560spu_pdata(spu)->cache.sr1);561}562563static u64 mfc_sr1_get(struct spu *spu)564{565return spu_pdata(spu)->cache.sr1;566}567568static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)569{570spu_pdata(spu)->cache.tclass_id = tclass_id;571lv1_set_spe_privilege_state_area_1_register(572spu_pdata(spu)->spe_id,573offsetof(struct spu_priv1, mfc_tclass_id_RW),574spu_pdata(spu)->cache.tclass_id);575}576577static u64 mfc_tclass_id_get(struct spu *spu)578{579return spu_pdata(spu)->cache.tclass_id;580}581582static void tlb_invalidate(struct spu *spu)583{584/* Nothing to do. */585}586587static void resource_allocation_groupID_set(struct spu *spu, u64 id)588{589/* No support. */590}591592static u64 resource_allocation_groupID_get(struct spu *spu)593{594return 0; /* No support. */595}596597static void resource_allocation_enable_set(struct spu *spu, u64 enable)598{599/* No support. */600}601602static u64 resource_allocation_enable_get(struct spu *spu)603{604return 0; /* No support. */605}606607const struct spu_priv1_ops spu_priv1_ps3_ops = {608.int_mask_and = int_mask_and,609.int_mask_or = int_mask_or,610.int_mask_set = int_mask_set,611.int_mask_get = int_mask_get,612.int_stat_clear = int_stat_clear,613.int_stat_get = int_stat_get,614.cpu_affinity_set = cpu_affinity_set,615.mfc_dar_get = mfc_dar_get,616.mfc_dsisr_set = mfc_dsisr_set,617.mfc_dsisr_get = mfc_dsisr_get,618.mfc_sdr_setup = mfc_sdr_setup,619.mfc_sr1_set = mfc_sr1_set,620.mfc_sr1_get = mfc_sr1_get,621.mfc_tclass_id_set = mfc_tclass_id_set,622.mfc_tclass_id_get = mfc_tclass_id_get,623.tlb_invalidate = tlb_invalidate,624.resource_allocation_groupID_set = resource_allocation_groupID_set,625.resource_allocation_groupID_get = resource_allocation_groupID_get,626.resource_allocation_enable_set = resource_allocation_enable_set,627.resource_allocation_enable_get = resource_allocation_enable_get,628};629630void ps3_spu_set_platform(void)631{632spu_priv1_ops = &spu_priv1_ps3_ops;633spu_management_ops = &spu_management_ps3_ops;634}635636637