Path: blob/master/arch/hexagon/include/uapi/asm/registers.h
49147 views
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */1/*2* Register definitions for the Hexagon architecture3*/456#ifndef _ASM_REGISTERS_H7#define _ASM_REGISTERS_H89#ifndef __ASSEMBLY__1011/* See kernel/entry.S for further documentation. */1213/*14* Entry code copies the event record out of guest registers into15* this structure (which is on the stack).16*/1718struct hvm_event_record {19unsigned long vmel; /* Event Linkage (return address) */20unsigned long vmest; /* Event context - pre-event SSR values */21unsigned long vmpsp; /* Previous stack pointer */22unsigned long vmbadva; /* Bad virtual address for addressing events */23};2425struct pt_regs {26long restart_r0; /* R0 checkpoint for syscall restart */27long syscall_nr; /* Only used in system calls */28union {29struct {30unsigned long usr;31unsigned long preds;32};33long long int predsusr;34};35union {36struct {37unsigned long m0;38unsigned long m1;39};40long long int m1m0;41};42union {43struct {44unsigned long sa1;45unsigned long lc1;46};47long long int lc1sa1;48};49union {50struct {51unsigned long sa0;52unsigned long lc0;53};54long long int lc0sa0;55};56union {57struct {58unsigned long ugp;59unsigned long gp;60};61long long int gpugp;62};63union {64struct {65unsigned long cs0;66unsigned long cs1;67};68long long int cs1cs0;69};70/*71* Be extremely careful with rearranging these, if at all. Some code72* assumes the 32 registers exist exactly like this in memory;73* e.g. kernel/ptrace.c74* e.g. kernel/signal.c (restore_sigcontext)75*/76union {77struct {78unsigned long r00;79unsigned long r01;80};81long long int r0100;82};83union {84struct {85unsigned long r02;86unsigned long r03;87};88long long int r0302;89};90union {91struct {92unsigned long r04;93unsigned long r05;94};95long long int r0504;96};97union {98struct {99unsigned long r06;100unsigned long r07;101};102long long int r0706;103};104union {105struct {106unsigned long r08;107unsigned long r09;108};109long long int r0908;110};111union {112struct {113unsigned long r10;114unsigned long r11;115};116long long int r1110;117};118union {119struct {120unsigned long r12;121unsigned long r13;122};123long long int r1312;124};125union {126struct {127unsigned long r14;128unsigned long r15;129};130long long int r1514;131};132union {133struct {134unsigned long r16;135unsigned long r17;136};137long long int r1716;138};139union {140struct {141unsigned long r18;142unsigned long r19;143};144long long int r1918;145};146union {147struct {148unsigned long r20;149unsigned long r21;150};151long long int r2120;152};153union {154struct {155unsigned long r22;156unsigned long r23;157};158long long int r2322;159};160union {161struct {162unsigned long r24;163unsigned long r25;164};165long long int r2524;166};167union {168struct {169unsigned long r26;170unsigned long r27;171};172long long int r2726;173};174union {175struct {176unsigned long r28;177unsigned long r29;178};179long long int r2928;180};181union {182struct {183unsigned long r30;184unsigned long r31;185};186long long int r3130;187};188/* VM dispatch pushes event record onto stack - we can build on it */189struct hvm_event_record hvmer;190};191192/* Defines to conveniently access the values */193194/*195* As of the VM spec 0.5, these registers are now set/retrieved via a196* VM call. On the in-bound side, we just fetch the values197* at the entry points and stuff them into the old record in pt_regs.198* However, on the outbound side, probably at VM rte, we set the199* registers back.200*/201202#define pt_elr(regs) ((regs)->hvmer.vmel)203#define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))204#define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))205#define user_mode(regs) \206(((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)207#define ints_enabled(regs) \208(((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)209#define pt_psp(regs) ((regs)->hvmer.vmpsp)210#define pt_badva(regs) ((regs)->hvmer.vmbadva)211212#define pt_set_singlestep(regs) ((regs)->hvmer.vmest |= (1<<HVM_VMEST_SS_SFT))213#define pt_clr_singlestep(regs) ((regs)->hvmer.vmest &= ~(1<<HVM_VMEST_SS_SFT))214215#define pt_set_rte_sp(regs, sp) do {\216pt_psp(regs) = (regs)->r29 = (sp);\217} while (0)218219#define pt_set_kmode(regs) \220(regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)221222#define pt_set_usermode(regs) \223(regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \224| (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)225226#endif /* ifndef __ASSEMBLY */227228#endif229230231