Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/kernel/perf_callchain.c
26292 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* ARM callchain support
4
*
5
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
6
* Copyright (C) 2010 ARM Ltd., Will Deacon <[email protected]>
7
*
8
* This code is based on the ARM OProfile backtrace code.
9
*/
10
#include <linux/perf_event.h>
11
#include <linux/uaccess.h>
12
13
#include <asm/stacktrace.h>
14
15
/*
16
* The registers we're interested in are at the end of the variable
17
* length saved register structure. The fp points at the end of this
18
* structure so the address of this struct is:
19
* (struct frame_tail *)(xxx->fp)-1
20
*
21
* This code has been adapted from the ARM OProfile support.
22
*/
23
struct frame_tail {
24
struct frame_tail __user *fp;
25
unsigned long sp;
26
unsigned long lr;
27
} __attribute__((packed));
28
29
/*
30
* Get the return address for a single stackframe and return a pointer to the
31
* next frame tail.
32
*/
33
static struct frame_tail __user *
34
user_backtrace(struct frame_tail __user *tail,
35
struct perf_callchain_entry_ctx *entry)
36
{
37
struct frame_tail buftail;
38
unsigned long err;
39
40
if (!access_ok(tail, sizeof(buftail)))
41
return NULL;
42
43
pagefault_disable();
44
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
45
pagefault_enable();
46
47
if (err)
48
return NULL;
49
50
perf_callchain_store(entry, buftail.lr);
51
52
/*
53
* Frame pointers should strictly progress back up the stack
54
* (towards higher addresses).
55
*/
56
if (tail + 1 >= buftail.fp)
57
return NULL;
58
59
return buftail.fp - 1;
60
}
61
62
void
63
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
64
{
65
struct frame_tail __user *tail;
66
67
perf_callchain_store(entry, regs->ARM_pc);
68
69
if (!current->mm)
70
return;
71
72
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
73
74
while ((entry->nr < entry->max_stack) &&
75
tail && !((unsigned long)tail & 0x3))
76
tail = user_backtrace(tail, entry);
77
}
78
79
/*
80
* Gets called by walk_stackframe() for every stackframe. This will be called
81
* whist unwinding the stackframe and is like a subroutine return so we use
82
* the PC.
83
*/
84
static bool
85
callchain_trace(void *data, unsigned long pc)
86
{
87
struct perf_callchain_entry_ctx *entry = data;
88
return perf_callchain_store(entry, pc) == 0;
89
}
90
91
void
92
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
93
{
94
struct stackframe fr;
95
96
arm_get_current_stackframe(regs, &fr);
97
walk_stackframe(&fr, callchain_trace, entry);
98
}
99
100