Path: blob/master/arch/ia64/include/asm/asmmacro.h
17544 views
#ifndef _ASM_IA64_ASMMACRO_H1#define _ASM_IA64_ASMMACRO_H23/*4* Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co5* David Mosberger-Tang <[email protected]>6*/789#define ENTRY(name) \10.align 32; \11.proc name; \12name:1314#define ENTRY_MIN_ALIGN(name) \15.align 16; \16.proc name; \17name:1819#define GLOBAL_ENTRY(name) \20.global name; \21ENTRY(name)2223#define END(name) \24.endp name2526/*27* Helper macros to make unwind directives more readable:28*/2930/* prologue_gr: */31#define ASM_UNW_PRLG_RP 0x832#define ASM_UNW_PRLG_PFS 0x433#define ASM_UNW_PRLG_PSP 0x234#define ASM_UNW_PRLG_PR 0x135#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))3637/*38* Helper macros for accessing user memory.39*40* When adding any new .section/.previous entries here, make sure to41* also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or42* unpleasant things will happen.43*/4445.section "__ex_table", "a" // declare section & section attributes46.previous4748# define EX(y,x...) \49.xdata4 "__ex_table", 99f-., y-.; \50[99:] x51# define EXCLR(y,x...) \52.xdata4 "__ex_table", 99f-., y-.+4; \53[99:] x5455/*56* Tag MCA recoverable instruction ranges.57*/5859.section "__mca_table", "a" // declare section & section attributes60.previous6162# define MCA_RECOVER_RANGE(y) \63.xdata4 "__mca_table", y-., 99f-.; \64[99:]6566/*67* Mark instructions that need a load of a virtual address patched to be68* a load of a physical address. We use this either in critical performance69* path (ivt.S - TLB miss processing) or in places where it might not be70* safe to use a "tpa" instruction (mca_asm.S - error recovery).71*/72.section ".data..patch.vtop", "a" // declare section & section attributes73.previous7475#define LOAD_PHYSICAL(pr, reg, obj) \76[1:](pr)movl reg = obj; \77.xdata4 ".data..patch.vtop", 1b-.7879/*80* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,81* we'll patch out the work-around bundles with NOPs, so their impact is minimal.82*/83#define DO_MCKINLEY_E9_WORKAROUND8485#ifdef DO_MCKINLEY_E9_WORKAROUND86.section ".data..patch.mckinley_e9", "a"87.previous88/* workaround for Itanium 2 Errata 9: */89# define FSYS_RETURN \90.xdata4 ".data..patch.mckinley_e9", 1f-.; \911:{ .mib; \92nop.m 0; \93mov r16=ar.pfs; \94br.call.sptk.many b7=2f;; \95}; \962:{ .mib; \97nop.m 0; \98mov ar.pfs=r16; \99br.ret.sptk.many b6;; \100}101#else102# define FSYS_RETURN br.ret.sptk.many b6103#endif104105/*106* If physical stack register size is different from DEF_NUM_STACK_REG,107* dynamically patch the kernel for correct size.108*/109.section ".data..patch.phys_stack_reg", "a"110.previous111#define LOAD_PHYS_STACK_REG_SIZE(reg) \112[1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \113.xdata4 ".data..patch.phys_stack_reg", 1b-.114115/*116* Up until early 2004, use of .align within a function caused bad unwind info.117* TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing118* otherwise.119*/120#ifdef HAVE_WORKING_TEXT_ALIGN121# define TEXT_ALIGN(n) .align n122#else123# define TEXT_ALIGN(n)124#endif125126#ifdef HAVE_SERIALIZE_DIRECTIVE127# define dv_serialize_data .serialize.data128# define dv_serialize_instruction .serialize.instruction129#else130# define dv_serialize_data131# define dv_serialize_instruction132#endif133134#endif /* _ASM_IA64_ASMMACRO_H */135136137