Path: blob/master/arch/tile/include/asm/irqflags.h
10819 views
/*1* Copyright 2010 Tilera Corporation. All Rights Reserved.2*3* This program is free software; you can redistribute it and/or4* modify it under the terms of the GNU General Public License5* as published by the Free Software Foundation, version 2.6*7* This program is distributed in the hope that it will be useful, but8* WITHOUT ANY WARRANTY; without even the implied warranty of9* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or10* NON INFRINGEMENT. See the GNU General Public License for11* more details.12*/1314#ifndef _ASM_TILE_IRQFLAGS_H15#define _ASM_TILE_IRQFLAGS_H1617#include <arch/interrupts.h>18#include <arch/chip.h>1920#if !defined(__tilegx__) && defined(__ASSEMBLY__)2122/*23* The set of interrupts we want to allow when interrupts are nominally24* disabled. The remainder are effectively "NMI" interrupts from25* the point of view of the generic Linux code. Note that synchronous26* interrupts (aka "non-queued") are not blocked by the mask in any case.27*/28#if CHIP_HAS_AUX_PERF_COUNTERS()29#define LINUX_MASKABLE_INTERRUPTS_HI \30(~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))31#else32#define LINUX_MASKABLE_INTERRUPTS_HI \33(~(INT_MASK_HI(INT_PERF_COUNT)))34#endif3536#else3738#if CHIP_HAS_AUX_PERF_COUNTERS()39#define LINUX_MASKABLE_INTERRUPTS \40(~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))41#else42#define LINUX_MASKABLE_INTERRUPTS \43(~(INT_MASK(INT_PERF_COUNT)))44#endif4546#endif4748#ifndef __ASSEMBLY__4950/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */51#include <asm/percpu.h>52#include <arch/spr_def.h>5354/* Set and clear kernel interrupt masks. */55#if CHIP_HAS_SPLIT_INTR_MASK()56#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 3257# error Fix assumptions about which word various interrupts are in58#endif59#define interrupt_mask_set(n) do { \60int __n = (n); \61int __mask = 1 << (__n & 0x1f); \62if (__n < 32) \63__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \64else \65__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \66} while (0)67#define interrupt_mask_reset(n) do { \68int __n = (n); \69int __mask = 1 << (__n & 0x1f); \70if (__n < 32) \71__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \72else \73__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \74} while (0)75#define interrupt_mask_check(n) ({ \76int __n = (n); \77(((__n < 32) ? \78__insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \79__insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \80>> (__n & 0x1f)) & 1; \81})82#define interrupt_mask_set_mask(mask) do { \83unsigned long long __m = (mask); \84__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \85__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \86} while (0)87#define interrupt_mask_reset_mask(mask) do { \88unsigned long long __m = (mask); \89__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \90__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \91} while (0)92#else93#define interrupt_mask_set(n) \94__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))95#define interrupt_mask_reset(n) \96__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))97#define interrupt_mask_check(n) \98((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)99#define interrupt_mask_set_mask(mask) \100__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))101#define interrupt_mask_reset_mask(mask) \102__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))103#endif104105/*106* The set of interrupts we want active if irqs are enabled.107* Note that in particular, the tile timer interrupt comes and goes108* from this set, since we have no other way to turn off the timer.109* Likewise, INTCTRL_K is removed and re-added during device110* interrupts, as is the the hardwall UDN_FIREWALL interrupt.111* We use a low bit (MEM_ERROR) as our sentinel value and make sure it112* is always claimed as an "active interrupt" so we can query that bit113* to know our current state.114*/115DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);116#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)117118/* Disable interrupts. */119#define arch_local_irq_disable() \120interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)121122/* Disable all interrupts, including NMIs. */123#define arch_local_irq_disable_all() \124interrupt_mask_set_mask(-1UL)125126/* Re-enable all maskable interrupts. */127#define arch_local_irq_enable() \128interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask))129130/* Disable or enable interrupts based on flag argument. */131#define arch_local_irq_restore(disabled) do { \132if (disabled) \133arch_local_irq_disable(); \134else \135arch_local_irq_enable(); \136} while (0)137138/* Return true if "flags" argument means interrupts are disabled. */139#define arch_irqs_disabled_flags(flags) ((flags) != 0)140141/* Return true if interrupts are currently disabled. */142#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)143144/* Save whether interrupts are currently disabled. */145#define arch_local_save_flags() arch_irqs_disabled()146147/* Save whether interrupts are currently disabled, then disable them. */148#define arch_local_irq_save() ({ \149unsigned long __flags = arch_local_save_flags(); \150arch_local_irq_disable(); \151__flags; })152153/* Prevent the given interrupt from being enabled next time we enable irqs. */154#define arch_local_irq_mask(interrupt) \155(__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))156157/* Prevent the given interrupt from being enabled immediately. */158#define arch_local_irq_mask_now(interrupt) do { \159arch_local_irq_mask(interrupt); \160interrupt_mask_set(interrupt); \161} while (0)162163/* Allow the given interrupt to be enabled next time we enable irqs. */164#define arch_local_irq_unmask(interrupt) \165(__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))166167/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */168#define arch_local_irq_unmask_now(interrupt) do { \169arch_local_irq_unmask(interrupt); \170if (!irqs_disabled()) \171interrupt_mask_reset(interrupt); \172} while (0)173174#else /* __ASSEMBLY__ */175176/* We provide a somewhat more restricted set for assembly. */177178#ifdef __tilegx__179180#if INT_MEM_ERROR != 0181# error Fix IRQ_DISABLED() macro182#endif183184/* Return 0 or 1 to indicate whether interrupts are currently disabled. */185#define IRQS_DISABLED(tmp) \186mfspr tmp, SPR_INTERRUPT_MASK_K; \187andi tmp, tmp, 1188189/* Load up a pointer to &interrupts_enabled_mask. */190#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \191moveli reg, hw2_last(interrupts_enabled_mask); \192shl16insli reg, reg, hw1(interrupts_enabled_mask); \193shl16insli reg, reg, hw0(interrupts_enabled_mask); \194add reg, reg, tp195196/* Disable interrupts. */197#define IRQ_DISABLE(tmp0, tmp1) \198moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \199shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \200shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \201mtspr SPR_INTERRUPT_MASK_SET_K, tmp0202203/* Disable ALL synchronous interrupts (used by NMI entry). */204#define IRQ_DISABLE_ALL(tmp) \205movei tmp, -1; \206mtspr SPR_INTERRUPT_MASK_SET_K, tmp207208/* Enable interrupts. */209#define IRQ_ENABLE(tmp0, tmp1) \210GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \211ld tmp0, tmp0; \212mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0213214#else /* !__tilegx__ */215216/*217* Return 0 or 1 to indicate whether interrupts are currently disabled.218* Note that it's important that we use a bit from the "low" mask word,219* since when we are enabling, that is the word we write first, so if we220* are interrupted after only writing half of the mask, the interrupt221* handler will correctly observe that we have interrupts enabled, and222* will enable interrupts itself on return from the interrupt handler223* (making the original code's write of the "high" mask word idempotent).224*/225#define IRQS_DISABLED(tmp) \226mfspr tmp, SPR_INTERRUPT_MASK_K_0; \227shri tmp, tmp, INT_MEM_ERROR; \228andi tmp, tmp, 1229230/* Load up a pointer to &interrupts_enabled_mask. */231#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \232moveli reg, lo16(interrupts_enabled_mask); \233auli reg, reg, ha16(interrupts_enabled_mask); \234add reg, reg, tp235236/* Disable interrupts. */237#define IRQ_DISABLE(tmp0, tmp1) \238{ \239movei tmp0, -1; \240moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \241}; \242{ \243mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \244auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \245}; \246mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1247248/* Disable ALL synchronous interrupts (used by NMI entry). */249#define IRQ_DISABLE_ALL(tmp) \250movei tmp, -1; \251mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \252mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp253254/* Enable interrupts. */255#define IRQ_ENABLE(tmp0, tmp1) \256GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \257{ \258lw tmp0, tmp0; \259addi tmp1, tmp0, 4 \260}; \261lw tmp1, tmp1; \262mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \263mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1264#endif265266/*267* Do the CPU's IRQ-state tracing from assembly code. We call a268* C function, but almost everywhere we do, we don't mind clobbering269* all the caller-saved registers.270*/271#ifdef CONFIG_TRACE_IRQFLAGS272# define TRACE_IRQS_ON jal trace_hardirqs_on273# define TRACE_IRQS_OFF jal trace_hardirqs_off274#else275# define TRACE_IRQS_ON276# define TRACE_IRQS_OFF277#endif278279#endif /* __ASSEMBLY__ */280281#endif /* _ASM_TILE_IRQFLAGS_H */282283284