#include <linux/init.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/jump_label.h>
#include <asm/assembly.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
#include <asm/text-patching.h>
#define __hot __section(".text.hot")
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable);
static void __hot prepare_ftrace_return(unsigned long *parent,
unsigned long self_addr)
{
unsigned long old;
extern int parisc_return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
old = *parent;
if (!function_graph_enter(old, self_addr, 0, NULL))
*parent = (unsigned long) &parisc_return_to_handler;
}
#endif
static ftrace_func_t ftrace_func;
asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3,
struct ftrace_regs *fregs)
{
extern struct ftrace_ops *function_trace_op;
ftrace_func(self_addr, parent, function_trace_op, fregs);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (static_branch_unlikely(&ftrace_graph_enable)) {
unsigned long *parent_rp;
parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
if (*parent_rp != parent)
return;
prepare_ftrace_return(parent_rp, self_addr);
return;
}
#endif
}
#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
int ftrace_enable_ftrace_graph_caller(void)
{
static_key_enable(&ftrace_graph_enable.key);
return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
static_key_disable(&ftrace_graph_enable.key);
return 0;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
int ftrace_update_ftrace_func(ftrace_func_t func)
{
ftrace_func = func;
return 0;
}
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
return 0;
}
unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
u32 *tramp;
int size, ret, i;
void *ip;
#ifdef CONFIG_64BIT
unsigned long addr2 =
(unsigned long)dereference_function_descriptor((void *)addr);
u32 ftrace_trampoline[] = {
0x73c10208,
0x0c2110c1,
0xe820d002,
addr2 >> 32,
addr2 & 0xffffffff,
0xe83f1fd7,
};
u32 ftrace_trampoline_unaligned[] = {
addr2 >> 32,
addr2 & 0xffffffff,
0x37de0200,
0x73c13e01,
0x34213ff9,
0x50213fc1,
0xe820d002,
0xe83f1fcf,
};
BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
FTRACE_PATCHABLE_FUNCTION_SIZE);
#else
u32 ftrace_trampoline[] = {
(u32)addr,
0x6fc10080,
0x48213fd1,
0xe820c002,
0xe83f1fdf,
};
#endif
BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
FTRACE_PATCHABLE_FUNCTION_SIZE);
size = sizeof(ftrace_trampoline);
tramp = ftrace_trampoline;
#ifdef CONFIG_64BIT
if (rec->ip & 0x4) {
size = sizeof(ftrace_trampoline_unaligned);
tramp = ftrace_trampoline_unaligned;
}
#endif
ip = (void *)(rec->ip + 4 - size);
ret = copy_from_kernel_nofault(insn, ip, size);
if (ret)
return ret;
for (i = 0; i < size / 4; i++) {
if (insn[i] != INSN_NOP)
return -EINVAL;
}
__patch_text_multiple(ip, tramp, size);
return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
int i;
for (i = 0; i < ARRAY_SIZE(insn); i++)
insn[i] = INSN_NOP;
__patch_text((void *)rec->ip, INSN_NOP);
__patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
insn, sizeof(insn)-4);
return 0;
}
#endif
#ifdef CONFIG_KPROBES_ON_FTRACE
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe_ctlblk *kcb;
struct pt_regs *regs;
struct kprobe *p;
int bit;
if (unlikely(kprobe_ftrace_disabled))
return;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
regs = ftrace_get_regs(fregs);
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto out;
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
goto out;
}
__this_cpu_write(current_kprobe, p);
kcb = get_kprobe_ctlblk();
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
regs->iaoq[0] = ip;
regs->iaoq[1] = ip + 4;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
regs->iaoq[0] = ip + 4;
regs->iaoq[1] = ip + 8;
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
}
__this_cpu_write(current_kprobe, NULL);
out:
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
return 0;
}
#endif