Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arc/kernel/stacktrace.c
26489 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* stacktrace.c : stacktracing APIs needed by rest of kernel
4
* (wrappers over ARC dwarf based unwinder)
5
*
6
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
7
*
8
* vineetg: aug 2009
9
* -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
10
* for displaying task's kernel mode call stack in /proc/<pid>/stack
11
* -Iterator based approach to have single copy of unwinding core and APIs
12
* needing unwinding, implement the logic in iterator regarding:
13
* = which frame onwards to start capture
14
* = which frame to stop capturing (wchan)
15
* = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
16
*
17
* vineetg: March 2009
18
* -Implemented correct versions of thread_saved_pc() and __get_wchan()
19
*
20
* rajeshwarr: 2008
21
* -Initial implementation
22
*/
23
24
#include <linux/ptrace.h>
25
#include <linux/export.h>
26
#include <linux/stacktrace.h>
27
#include <linux/kallsyms.h>
28
#include <linux/sched/debug.h>
29
30
#include <asm/arcregs.h>
31
#include <asm/unwind.h>
32
#include <asm/stacktrace.h>
33
#include <asm/switch_to.h>
34
35
/*-------------------------------------------------------------------------
36
* Unwinder Iterator
37
*-------------------------------------------------------------------------
38
*/
39
40
#ifdef CONFIG_ARC_DW2_UNWIND
41
42
static int
43
seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
44
struct unwind_frame_info *frame_info)
45
{
46
if (regs) {
47
/*
48
* Asynchronous unwinding of intr/exception
49
* - Just uses the pt_regs passed
50
*/
51
frame_info->task = tsk;
52
53
frame_info->regs.r27 = regs->fp;
54
frame_info->regs.r28 = regs->sp;
55
frame_info->regs.r31 = regs->blink;
56
frame_info->regs.r63 = regs->ret;
57
frame_info->call_frame = 0;
58
} else if (tsk == NULL || tsk == current) {
59
/*
60
* synchronous unwinding (e.g. dump_stack)
61
* - uses current values of SP and friends
62
*/
63
unsigned long fp, sp, blink, ret;
64
frame_info->task = current;
65
66
__asm__ __volatile__(
67
"mov %0,r27\n\t"
68
"mov %1,r28\n\t"
69
"mov %2,r31\n\t"
70
"mov %3,r63\n\t"
71
: "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
72
);
73
74
frame_info->regs.r27 = fp;
75
frame_info->regs.r28 = sp;
76
frame_info->regs.r31 = blink;
77
frame_info->regs.r63 = ret;
78
frame_info->call_frame = 0;
79
} else {
80
/*
81
* Asynchronous unwinding of a likely sleeping task
82
* - first ensure it is actually sleeping
83
* - if so, it will be in __switch_to, kernel mode SP of task
84
* is safe-kept and BLINK at a well known location in there
85
*/
86
87
if (task_is_running(tsk))
88
return -1;
89
90
frame_info->task = tsk;
91
92
frame_info->regs.r27 = TSK_K_FP(tsk);
93
frame_info->regs.r28 = TSK_K_ESP(tsk);
94
frame_info->regs.r31 = TSK_K_BLINK(tsk);
95
frame_info->regs.r63 = (unsigned int)__switch_to;
96
97
/* In the prologue of __switch_to, first FP is saved on stack
98
* and then SP is copied to FP. Dwarf assumes cfa as FP based
99
* but we didn't save FP. The value retrieved above is FP's
100
* state in previous frame.
101
* As a work around for this, we unwind from __switch_to start
102
* and adjust SP accordingly. The other limitation is that
103
* __switch_to macro is dwarf rules are not generated for inline
104
* assembly code
105
*/
106
frame_info->regs.r27 = 0;
107
frame_info->regs.r28 += 60;
108
frame_info->call_frame = 0;
109
110
}
111
return 0;
112
}
113
114
#endif
115
116
notrace noinline unsigned int
117
arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
118
int (*consumer_fn) (unsigned int, void *), void *arg)
119
{
120
#ifdef CONFIG_ARC_DW2_UNWIND
121
int ret = 0, cnt = 0;
122
unsigned int address;
123
struct unwind_frame_info frame_info;
124
125
if (seed_unwind_frame_info(tsk, regs, &frame_info))
126
return 0;
127
128
while (1) {
129
address = UNW_PC(&frame_info);
130
131
if (!address || !__kernel_text_address(address))
132
break;
133
134
if (consumer_fn(address, arg) == -1)
135
break;
136
137
ret = arc_unwind(&frame_info);
138
if (ret)
139
break;
140
141
frame_info.regs.r63 = frame_info.regs.r31;
142
143
if (cnt++ > 128) {
144
printk("unwinder looping too long, aborting !\n");
145
return 0;
146
}
147
}
148
149
return address; /* return the last address it saw */
150
#else
151
/* On ARC, only Dward based unwinder works. fp based backtracing is
152
* not possible (-fno-omit-frame-pointer) because of the way function
153
* prologue is setup (callee regs saved and then fp set and not other
154
* way around
155
*/
156
pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
157
return 0;
158
159
#endif
160
}
161
162
/*-------------------------------------------------------------------------
163
* callbacks called by unwinder iterator to implement kernel APIs
164
*
165
* The callback can return -1 to force the iterator to stop, which by default
166
* keeps going till the bottom-most frame.
167
*-------------------------------------------------------------------------
168
*/
169
170
/* Call-back which plugs into unwinding core to dump the stack in
171
* case of panic/OOPs/BUG etc
172
*/
173
static int __print_sym(unsigned int address, void *arg)
174
{
175
const char *loglvl = arg;
176
177
printk("%s %pS\n", loglvl, (void *)address);
178
return 0;
179
}
180
181
#ifdef CONFIG_STACKTRACE
182
183
/* Call-back which plugs into unwinding core to capture the
184
* traces needed by kernel on /proc/<pid>/stack
185
*/
186
static int __collect_all(unsigned int address, void *arg)
187
{
188
struct stack_trace *trace = arg;
189
190
if (trace->skip > 0)
191
trace->skip--;
192
else
193
trace->entries[trace->nr_entries++] = address;
194
195
if (trace->nr_entries >= trace->max_entries)
196
return -1;
197
198
return 0;
199
}
200
201
static int __collect_all_but_sched(unsigned int address, void *arg)
202
{
203
struct stack_trace *trace = arg;
204
205
if (in_sched_functions(address))
206
return 0;
207
208
if (trace->skip > 0)
209
trace->skip--;
210
else
211
trace->entries[trace->nr_entries++] = address;
212
213
if (trace->nr_entries >= trace->max_entries)
214
return -1;
215
216
return 0;
217
}
218
219
#endif
220
221
static int __get_first_nonsched(unsigned int address, void *unused)
222
{
223
if (in_sched_functions(address))
224
return 0;
225
226
return -1;
227
}
228
229
/*-------------------------------------------------------------------------
230
* APIs expected by various kernel sub-systems
231
*-------------------------------------------------------------------------
232
*/
233
234
noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
235
const char *loglvl)
236
{
237
printk("%s\nStack Trace:\n", loglvl);
238
arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
239
}
240
EXPORT_SYMBOL(show_stacktrace);
241
242
/* Expected by sched Code */
243
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
244
{
245
show_stacktrace(tsk, NULL, loglvl);
246
}
247
248
/* Another API expected by schedular, shows up in "ps" as Wait Channel
249
* Of course just returning schedule( ) would be pointless so unwind until
250
* the function is not in schedular code
251
*/
252
unsigned int __get_wchan(struct task_struct *tsk)
253
{
254
return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
255
}
256
257
#ifdef CONFIG_STACKTRACE
258
259
/*
260
* API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
261
* A typical use is when /proc/<pid>/stack is queried by userland
262
*/
263
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
264
{
265
/* Assumes @tsk is sleeping so unwinds from __switch_to */
266
arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
267
}
268
269
void save_stack_trace(struct stack_trace *trace)
270
{
271
/* Pass NULL for task so it unwinds the current call frame */
272
arc_unwind_core(NULL, NULL, __collect_all, trace);
273
}
274
EXPORT_SYMBOL_GPL(save_stack_trace);
275
#endif
276
277