/*1* arch/sh/lib/mcount.S2*3* Copyright (C) 2008, 2009 Paul Mundt4* Copyright (C) 2008, 2009 Matt Fleming5*6* This file is subject to the terms and conditions of the GNU General Public7* License. See the file "COPYING" in the main directory of this archive8* for more details.9*/10#include <asm/ftrace.h>11#include <asm/thread_info.h>12#include <asm/asm-offsets.h>1314#define MCOUNT_ENTER() \15mov.l r4, @-r15; \16mov.l r5, @-r15; \17mov.l r6, @-r15; \18mov.l r7, @-r15; \19sts.l pr, @-r15; \20\21mov.l @(20,r15),r4; \22sts pr, r52324#define MCOUNT_LEAVE() \25lds.l @r15+, pr; \26mov.l @r15+, r7; \27mov.l @r15+, r6; \28mov.l @r15+, r5; \29rts; \30mov.l @r15+, r43132#ifdef CONFIG_STACK_DEBUG33/*34* Perform diagnostic checks on the state of the kernel stack.35*36* Check for stack overflow. If there is less than 1KB free37* then it has overflowed.38*39* Make sure the stack pointer contains a valid address. Valid40* addresses for kernel stacks are anywhere after the bss41* (after _ebss) and anywhere in init_thread_union (init_stack).42*/43#define STACK_CHECK() \44mov #(THREAD_SIZE >> 10), r0; \45shll8 r0; \46shll2 r0; \47\48/* r1 = sp & (THREAD_SIZE - 1) */ \49mov #-1, r1; \50add r0, r1; \51and r15, r1; \52\53mov #TI_SIZE, r3; \54mov #(STACK_WARN >> 8), r2; \55shll8 r2; \56add r3, r2; \57\58/* Is the stack overflowing? */ \59cmp/hi r2, r1; \60bf stack_panic; \61\62/* If sp > _ebss then we're OK. */ \63mov.l .L_ebss, r1; \64cmp/hi r1, r15; \65bt 1f; \66\67/* If sp < init_stack, we're not OK. */ \68mov.l .L_init_thread_union, r1; \69cmp/hs r1, r15; \70bf stack_panic; \71\72/* If sp > init_stack && sp < _ebss, not OK. */ \73add r0, r1; \74cmp/hs r1, r15; \75bt stack_panic; \761:77#else78#define STACK_CHECK()79#endif /* CONFIG_STACK_DEBUG */8081.align 282.globl _mcount83.type _mcount,@function84.globl mcount85.type mcount,@function86_mcount:87mcount:88STACK_CHECK()8990#ifndef CONFIG_FUNCTION_TRACER91rts92nop93#else94#ifndef CONFIG_DYNAMIC_FTRACE95mov.l .Lfunction_trace_stop, r096mov.l @r0, r097tst r0, r098bf ftrace_stub99#endif100101MCOUNT_ENTER()102103#ifdef CONFIG_DYNAMIC_FTRACE104.globl mcount_call105mcount_call:106mov.l .Lftrace_stub, r6107#else108mov.l .Lftrace_trace_function, r6109mov.l ftrace_stub, r7110cmp/eq r6, r7111bt skip_trace112mov.l @r6, r6113#endif114115jsr @r6116nop117118#ifdef CONFIG_FUNCTION_GRAPH_TRACER119mov.l .Lftrace_graph_return, r6120mov.l .Lftrace_stub, r7121cmp/eq r6, r7122bt 1f123124mov.l .Lftrace_graph_caller, r0125jmp @r0126nop1271281:129mov.l .Lftrace_graph_entry, r6130mov.l .Lftrace_graph_entry_stub, r7131cmp/eq r6, r7132bt skip_trace133134mov.l .Lftrace_graph_caller, r0135jmp @r0136nop137138.align 2139.Lftrace_graph_return:140.long ftrace_graph_return141.Lftrace_graph_entry:142.long ftrace_graph_entry143.Lftrace_graph_entry_stub:144.long ftrace_graph_entry_stub145.Lftrace_graph_caller:146.long ftrace_graph_caller147#endif /* CONFIG_FUNCTION_GRAPH_TRACER */148149.globl skip_trace150skip_trace:151MCOUNT_LEAVE()152153.align 2154.Lftrace_trace_function:155.long ftrace_trace_function156157#ifdef CONFIG_DYNAMIC_FTRACE158#ifdef CONFIG_FUNCTION_GRAPH_TRACER159/*160* NOTE: Do not move either ftrace_graph_call or ftrace_caller161* as this will affect the calculation of GRAPH_INSN_OFFSET.162*/163.globl ftrace_graph_call164ftrace_graph_call:165mov.l .Lskip_trace, r0166jmp @r0167nop168169.align 2170.Lskip_trace:171.long skip_trace172#endif /* CONFIG_FUNCTION_GRAPH_TRACER */173174.globl ftrace_caller175ftrace_caller:176mov.l .Lfunction_trace_stop, r0177mov.l @r0, r0178tst r0, r0179bf ftrace_stub180181MCOUNT_ENTER()182183.globl ftrace_call184ftrace_call:185mov.l .Lftrace_stub, r6186jsr @r6187nop188189#ifdef CONFIG_FUNCTION_GRAPH_TRACER190bra ftrace_graph_call191nop192#else193MCOUNT_LEAVE()194#endif /* CONFIG_FUNCTION_GRAPH_TRACER */195#endif /* CONFIG_DYNAMIC_FTRACE */196197.align 2198.Lfunction_trace_stop:199.long function_trace_stop200201/*202* NOTE: From here on the locations of the .Lftrace_stub label and203* ftrace_stub itself are fixed. Adding additional data here will skew204* the displacement for the memory table and break the block replacement.205* Place new labels either after the ftrace_stub body, or before206* ftrace_caller. You have been warned.207*/208.Lftrace_stub:209.long ftrace_stub210211.globl ftrace_stub212ftrace_stub:213rts214nop215216#ifdef CONFIG_FUNCTION_GRAPH_TRACER217.globl ftrace_graph_caller218ftrace_graph_caller:219mov.l 2f, r0220mov.l @r0, r0221tst r0, r0222bt 1f223224mov.l 3f, r1225jmp @r1226nop2271:228/*229* MCOUNT_ENTER() pushed 5 registers onto the stack, so230* the stack address containing our return address is231* r15 + 20.232*/233mov #20, r0234add r15, r0235mov r0, r4236237mov.l .Lprepare_ftrace_return, r0238jsr @r0239nop240241MCOUNT_LEAVE()242243.align 22442: .long function_trace_stop2453: .long skip_trace246.Lprepare_ftrace_return:247.long prepare_ftrace_return248249.globl return_to_handler250return_to_handler:251/*252* Save the return values.253*/254mov.l r0, @-r15255mov.l r1, @-r15256257mov #0, r4258259mov.l .Lftrace_return_to_handler, r0260jsr @r0261nop262263/*264* The return value from ftrace_return_handler has the real265* address that we should return to.266*/267lds r0, pr268mov.l @r15+, r1269rts270mov.l @r15+, r0271272273.align 2274.Lftrace_return_to_handler:275.long ftrace_return_to_handler276#endif /* CONFIG_FUNCTION_GRAPH_TRACER */277#endif /* CONFIG_FUNCTION_TRACER */278279#ifdef CONFIG_STACK_DEBUG280.globl stack_panic281stack_panic:282mov.l .Ldump_stack, r0283jsr @r0284nop285286mov.l .Lpanic, r0287jsr @r0288mov.l .Lpanic_s, r4289290rts291nop292293.align 2294.L_ebss:295.long _ebss296.L_init_thread_union:297.long init_thread_union298.Lpanic:299.long panic300.Lpanic_s:301.long .Lpanic_str302.Ldump_stack:303.long dump_stack304305.section .rodata306.align 2307.Lpanic_str:308.string "Stack error"309#endif /* CONFIG_STACK_DEBUG */310311312