Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/trace/perf.h
26278 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
3
#undef TRACE_SYSTEM_VAR
4
5
#ifdef CONFIG_PERF_EVENTS
6
7
#include "stages/stage6_event_callback.h"
8
9
#undef __perf_count
10
#define __perf_count(c) (__count = (c))
11
12
#undef __perf_task
13
#define __perf_task(t) (__task = (t))
14
15
#undef __DECLARE_EVENT_CLASS
16
#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
17
static notrace void \
18
do_perf_trace_##call(void *__data, proto) \
19
{ \
20
struct trace_event_call *event_call = __data; \
21
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
22
struct trace_event_raw_##call *entry; \
23
struct pt_regs *__regs; \
24
u64 __count = 1; \
25
struct task_struct *__task = NULL; \
26
struct hlist_head *head; \
27
int __entry_size; \
28
int __data_size; \
29
int rctx; \
30
\
31
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
32
\
33
head = this_cpu_ptr(event_call->perf_events); \
34
if (!bpf_prog_array_valid(event_call) && \
35
__builtin_constant_p(!__task) && !__task && \
36
hlist_empty(head)) \
37
return; \
38
\
39
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
40
sizeof(u64)); \
41
__entry_size -= sizeof(u32); \
42
\
43
entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
44
if (!entry) \
45
return; \
46
\
47
perf_fetch_caller_regs(__regs); \
48
\
49
tstruct \
50
\
51
{ assign; } \
52
\
53
perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
54
event_call, __count, __regs, \
55
head, __task); \
56
}
57
58
/*
59
* Define unused __count and __task variables to use @args to pass
60
* arguments to do_perf_trace_##call. This is needed because the
61
* macros __perf_count and __perf_task introduce the side-effect to
62
* store copies into those local variables.
63
*/
64
#undef DECLARE_EVENT_CLASS
65
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
66
__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
67
PARAMS(assign), PARAMS(print)) \
68
static notrace void \
69
perf_trace_##call(void *__data, proto) \
70
{ \
71
u64 __count __attribute__((unused)); \
72
struct task_struct *__task __attribute__((unused)); \
73
\
74
do_perf_trace_##call(__data, args); \
75
}
76
77
#undef DECLARE_EVENT_SYSCALL_CLASS
78
#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
79
__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
80
PARAMS(assign), PARAMS(print)) \
81
static notrace void \
82
perf_trace_##call(void *__data, proto) \
83
{ \
84
u64 __count __attribute__((unused)); \
85
struct task_struct *__task __attribute__((unused)); \
86
\
87
might_fault(); \
88
preempt_disable_notrace(); \
89
do_perf_trace_##call(__data, args); \
90
preempt_enable_notrace(); \
91
}
92
93
/*
94
* This part is compiled out, it is only here as a build time check
95
* to make sure that if the tracepoint handling changes, the
96
* perf probe will fail to compile unless it too is updated.
97
*/
98
#undef DEFINE_EVENT
99
#define DEFINE_EVENT(template, call, proto, args) \
100
static inline void perf_test_probe_##call(void) \
101
{ \
102
check_trace_callback_type_##call(perf_trace_##template); \
103
}
104
105
106
#undef DEFINE_EVENT_PRINT
107
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
108
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
109
110
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
111
112
#undef __DECLARE_EVENT_CLASS
113
114
#endif /* CONFIG_PERF_EVENTS */
115
116