Path: blob/master/arch/blackfin/kernel/ftrace-entry.S
10817 views
/*1* mcount and friends -- ftrace stuff2*3* Copyright (C) 2009-2010 Analog Devices Inc.4* Licensed under the GPL-2 or later.5*/67#include <linux/linkage.h>8#include <asm/ftrace.h>910.text1112#ifdef CONFIG_DYNAMIC_FTRACE1314/* Simple stub so we can boot the kernel until runtime patching has15* disabled all calls to this. Then it'll be unused.16*/17ENTRY(__mcount)18# if ANOMALY_0500037119nop; nop; nop; nop;20# endif21rts;22ENDPROC(__mcount)2324/* GCC will have called us before setting up the function prologue, so we25* can clobber the normal scratch registers, but we need to make sure to26* save/restore the registers used for argument passing (R0-R2) in case27* the profiled function is using them. With data registers, R3 is the28* only one we can blow away. With pointer registers, we have P0-P2.29*30* Upon entry, the RETS will point to the top of the current profiled31* function. And since GCC pushed the previous RETS for us, the previous32* function will be waiting there. mmmm pie.33*/34ENTRY(_ftrace_caller)35# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST36/* optional micro optimization: return if stopped */37p1.l = _function_trace_stop;38p1.h = _function_trace_stop;39r3 = [p1];40cc = r3 == 0;41if ! cc jump _ftrace_stub (bp);42# endif4344/* save first/second/third function arg and the return register */45[--sp] = r2;46[--sp] = r0;47[--sp] = r1;48[--sp] = rets;4950/* function_trace_call(unsigned long ip, unsigned long parent_ip):51* ip: this point was called by ...52* parent_ip: ... this function53* the ip itself will need adjusting for the mcount call54*/55r0 = rets;56r1 = [sp + 16]; /* skip the 4 local regs on stack */57r0 += -MCOUNT_INSN_SIZE;5859.globl _ftrace_call60_ftrace_call:61call _ftrace_stub6263# ifdef CONFIG_FUNCTION_GRAPH_TRACER64.globl _ftrace_graph_call65_ftrace_graph_call:66nop; /* jump _ftrace_graph_caller; */67# endif6869/* restore state and get out of dodge */70.Lfinish_trace:71rets = [sp++];72r1 = [sp++];73r0 = [sp++];74r2 = [sp++];7576.globl _ftrace_stub77_ftrace_stub:78rts;79ENDPROC(_ftrace_caller)8081#else8283/* See documentation for _ftrace_caller */84ENTRY(__mcount)85# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST86/* optional micro optimization: return if stopped */87p1.l = _function_trace_stop;88p1.h = _function_trace_stop;89r3 = [p1];90cc = r3 == 0;91if ! cc jump _ftrace_stub (bp);92# endif9394/* save third function arg early so we can do testing below */95[--sp] = r2;9697/* load the function pointer to the tracer */98p0.l = _ftrace_trace_function;99p0.h = _ftrace_trace_function;100r3 = [p0];101102/* optional micro optimization: don't call the stub tracer */103r2.l = _ftrace_stub;104r2.h = _ftrace_stub;105cc = r2 == r3;106if ! cc jump .Ldo_trace;107108# ifdef CONFIG_FUNCTION_GRAPH_TRACER109/* if the ftrace_graph_return function pointer is not set to110* the ftrace_stub entry, call prepare_ftrace_return().111*/112p0.l = _ftrace_graph_return;113p0.h = _ftrace_graph_return;114r3 = [p0];115cc = r2 == r3;116if ! cc jump _ftrace_graph_caller;117118/* similarly, if the ftrace_graph_entry function pointer is not119* set to the ftrace_graph_entry_stub entry, ...120*/121p0.l = _ftrace_graph_entry;122p0.h = _ftrace_graph_entry;123r2.l = _ftrace_graph_entry_stub;124r2.h = _ftrace_graph_entry_stub;125r3 = [p0];126cc = r2 == r3;127if ! cc jump _ftrace_graph_caller;128# endif129130r2 = [sp++];131rts;132133.Ldo_trace:134135/* save first/second function arg and the return register */136[--sp] = r0;137[--sp] = r1;138[--sp] = rets;139140/* setup the tracer function */141p0 = r3;142143/* function_trace_call(unsigned long ip, unsigned long parent_ip):144* ip: this point was called by ...145* parent_ip: ... this function146* the ip itself will need adjusting for the mcount call147*/148r0 = rets;149r1 = [sp + 16]; /* skip the 4 local regs on stack */150r0 += -MCOUNT_INSN_SIZE;151152/* call the tracer */153call (p0);154155/* restore state and get out of dodge */156.Lfinish_trace:157rets = [sp++];158r1 = [sp++];159r0 = [sp++];160r2 = [sp++];161162.globl _ftrace_stub163_ftrace_stub:164rts;165ENDPROC(__mcount)166167#endif168169#ifdef CONFIG_FUNCTION_GRAPH_TRACER170/* The prepare_ftrace_return() function is similar to the trace function171* except it takes a pointer to the location of the frompc. This is so172* the prepare_ftrace_return() can hijack it temporarily for probing173* purposes.174*/175ENTRY(_ftrace_graph_caller)176# ifndef CONFIG_DYNAMIC_FTRACE177/* save first/second function arg and the return register */178[--sp] = r0;179[--sp] = r1;180[--sp] = rets;181182/* prepare_ftrace_return(parent, self_addr, frame_pointer) */183r0 = sp; /* unsigned long *parent */184r1 = rets; /* unsigned long self_addr */185# else186r0 = sp; /* unsigned long *parent */187r1 = [sp]; /* unsigned long self_addr */188# endif189# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST190r2 = fp; /* unsigned long frame_pointer */191# endif192r0 += 16; /* skip the 4 local regs on stack */193r1 += -MCOUNT_INSN_SIZE;194call _prepare_ftrace_return;195196jump .Lfinish_trace;197ENDPROC(_ftrace_graph_caller)198199/* Undo the rewrite caused by ftrace_graph_caller(). The common function200* ftrace_return_to_handler() will return the original rets so we can201* restore it and be on our way.202*/203ENTRY(_return_to_handler)204/* make sure original return values are saved */205[--sp] = p0;206[--sp] = r0;207[--sp] = r1;208209/* get original return address */210# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST211r0 = fp; /* Blackfin is sane, so omit this */212# endif213call _ftrace_return_to_handler;214rets = r0;215216/* anomaly 05000371 - make sure we have at least three instructions217* between rets setting and the return218*/219r1 = [sp++];220r0 = [sp++];221p0 = [sp++];222rts;223ENDPROC(_return_to_handler)224#endif225226227