Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/blackfin/kernel/ftrace-entry.S
10817 views
1
/*
2
* mcount and friends -- ftrace stuff
3
*
4
* Copyright (C) 2009-2010 Analog Devices Inc.
5
* Licensed under the GPL-2 or later.
6
*/
7
8
#include <linux/linkage.h>
9
#include <asm/ftrace.h>
10
11
.text
12
13
#ifdef CONFIG_DYNAMIC_FTRACE
14
15
/* Simple stub so we can boot the kernel until runtime patching has
16
* disabled all calls to this. Then it'll be unused.
17
*/
18
ENTRY(__mcount)
19
# if ANOMALY_05000371
20
nop; nop; nop; nop;
21
# endif
22
rts;
23
ENDPROC(__mcount)
24
25
/* GCC will have called us before setting up the function prologue, so we
26
* can clobber the normal scratch registers, but we need to make sure to
27
* save/restore the registers used for argument passing (R0-R2) in case
28
* the profiled function is using them. With data registers, R3 is the
29
* only one we can blow away. With pointer registers, we have P0-P2.
30
*
31
* Upon entry, the RETS will point to the top of the current profiled
32
* function. And since GCC pushed the previous RETS for us, the previous
33
* function will be waiting there. mmmm pie.
34
*/
35
ENTRY(_ftrace_caller)
36
# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
37
/* optional micro optimization: return if stopped */
38
p1.l = _function_trace_stop;
39
p1.h = _function_trace_stop;
40
r3 = [p1];
41
cc = r3 == 0;
42
if ! cc jump _ftrace_stub (bp);
43
# endif
44
45
/* save first/second/third function arg and the return register */
46
[--sp] = r2;
47
[--sp] = r0;
48
[--sp] = r1;
49
[--sp] = rets;
50
51
/* function_trace_call(unsigned long ip, unsigned long parent_ip):
52
* ip: this point was called by ...
53
* parent_ip: ... this function
54
* the ip itself will need adjusting for the mcount call
55
*/
56
r0 = rets;
57
r1 = [sp + 16]; /* skip the 4 local regs on stack */
58
r0 += -MCOUNT_INSN_SIZE;
59
60
.globl _ftrace_call
61
_ftrace_call:
62
call _ftrace_stub
63
64
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
65
.globl _ftrace_graph_call
66
_ftrace_graph_call:
67
nop; /* jump _ftrace_graph_caller; */
68
# endif
69
70
/* restore state and get out of dodge */
71
.Lfinish_trace:
72
rets = [sp++];
73
r1 = [sp++];
74
r0 = [sp++];
75
r2 = [sp++];
76
77
.globl _ftrace_stub
78
_ftrace_stub:
79
rts;
80
ENDPROC(_ftrace_caller)
81
82
#else
83
84
/* See documentation for _ftrace_caller */
85
ENTRY(__mcount)
86
# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
87
/* optional micro optimization: return if stopped */
88
p1.l = _function_trace_stop;
89
p1.h = _function_trace_stop;
90
r3 = [p1];
91
cc = r3 == 0;
92
if ! cc jump _ftrace_stub (bp);
93
# endif
94
95
/* save third function arg early so we can do testing below */
96
[--sp] = r2;
97
98
/* load the function pointer to the tracer */
99
p0.l = _ftrace_trace_function;
100
p0.h = _ftrace_trace_function;
101
r3 = [p0];
102
103
/* optional micro optimization: don't call the stub tracer */
104
r2.l = _ftrace_stub;
105
r2.h = _ftrace_stub;
106
cc = r2 == r3;
107
if ! cc jump .Ldo_trace;
108
109
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
110
/* if the ftrace_graph_return function pointer is not set to
111
* the ftrace_stub entry, call prepare_ftrace_return().
112
*/
113
p0.l = _ftrace_graph_return;
114
p0.h = _ftrace_graph_return;
115
r3 = [p0];
116
cc = r2 == r3;
117
if ! cc jump _ftrace_graph_caller;
118
119
/* similarly, if the ftrace_graph_entry function pointer is not
120
* set to the ftrace_graph_entry_stub entry, ...
121
*/
122
p0.l = _ftrace_graph_entry;
123
p0.h = _ftrace_graph_entry;
124
r2.l = _ftrace_graph_entry_stub;
125
r2.h = _ftrace_graph_entry_stub;
126
r3 = [p0];
127
cc = r2 == r3;
128
if ! cc jump _ftrace_graph_caller;
129
# endif
130
131
r2 = [sp++];
132
rts;
133
134
.Ldo_trace:
135
136
/* save first/second function arg and the return register */
137
[--sp] = r0;
138
[--sp] = r1;
139
[--sp] = rets;
140
141
/* setup the tracer function */
142
p0 = r3;
143
144
/* function_trace_call(unsigned long ip, unsigned long parent_ip):
145
* ip: this point was called by ...
146
* parent_ip: ... this function
147
* the ip itself will need adjusting for the mcount call
148
*/
149
r0 = rets;
150
r1 = [sp + 16]; /* skip the 4 local regs on stack */
151
r0 += -MCOUNT_INSN_SIZE;
152
153
/* call the tracer */
154
call (p0);
155
156
/* restore state and get out of dodge */
157
.Lfinish_trace:
158
rets = [sp++];
159
r1 = [sp++];
160
r0 = [sp++];
161
r2 = [sp++];
162
163
.globl _ftrace_stub
164
_ftrace_stub:
165
rts;
166
ENDPROC(__mcount)
167
168
#endif
169
170
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
171
/* The prepare_ftrace_return() function is similar to the trace function
172
* except it takes a pointer to the location of the frompc. This is so
173
* the prepare_ftrace_return() can hijack it temporarily for probing
174
* purposes.
175
*/
176
ENTRY(_ftrace_graph_caller)
177
# ifndef CONFIG_DYNAMIC_FTRACE
178
/* save first/second function arg and the return register */
179
[--sp] = r0;
180
[--sp] = r1;
181
[--sp] = rets;
182
183
/* prepare_ftrace_return(parent, self_addr, frame_pointer) */
184
r0 = sp; /* unsigned long *parent */
185
r1 = rets; /* unsigned long self_addr */
186
# else
187
r0 = sp; /* unsigned long *parent */
188
r1 = [sp]; /* unsigned long self_addr */
189
# endif
190
# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
191
r2 = fp; /* unsigned long frame_pointer */
192
# endif
193
r0 += 16; /* skip the 4 local regs on stack */
194
r1 += -MCOUNT_INSN_SIZE;
195
call _prepare_ftrace_return;
196
197
jump .Lfinish_trace;
198
ENDPROC(_ftrace_graph_caller)
199
200
/* Undo the rewrite caused by ftrace_graph_caller(). The common function
201
* ftrace_return_to_handler() will return the original rets so we can
202
* restore it and be on our way.
203
*/
204
ENTRY(_return_to_handler)
205
/* make sure original return values are saved */
206
[--sp] = p0;
207
[--sp] = r0;
208
[--sp] = r1;
209
210
/* get original return address */
211
# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
212
r0 = fp; /* Blackfin is sane, so omit this */
213
# endif
214
call _ftrace_return_to_handler;
215
rets = r0;
216
217
/* anomaly 05000371 - make sure we have at least three instructions
218
* between rets setting and the return
219
*/
220
r1 = [sp++];
221
r0 = [sp++];
222
p0 = [sp++];
223
rts;
224
ENDPROC(_return_to_handler)
225
#endif
226
227