Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/amdxdna/amdxdna_ctx.h
51353 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4
*/
5
6
#ifndef _AMDXDNA_CTX_H_
7
#define _AMDXDNA_CTX_H_
8
9
#include <linux/bitfield.h>
10
11
#include "amdxdna_gem.h"
12
13
struct amdxdna_hwctx_priv;
14
15
enum ert_cmd_opcode {
16
ERT_START_CU = 0,
17
ERT_CMD_CHAIN = 19,
18
ERT_START_NPU = 20,
19
ERT_START_NPU_PREEMPT = 21,
20
ERT_START_NPU_PREEMPT_ELF = 22,
21
ERT_INVALID_CMD = ~0U,
22
};
23
24
enum ert_cmd_state {
25
ERT_CMD_STATE_INVALID,
26
ERT_CMD_STATE_NEW,
27
ERT_CMD_STATE_QUEUED,
28
ERT_CMD_STATE_RUNNING,
29
ERT_CMD_STATE_COMPLETED,
30
ERT_CMD_STATE_ERROR,
31
ERT_CMD_STATE_ABORT,
32
ERT_CMD_STATE_SUBMITTED,
33
ERT_CMD_STATE_TIMEOUT,
34
ERT_CMD_STATE_NORESPONSE,
35
};
36
37
/*
38
* Interpretation of the beginning of data payload for ERT_START_NPU in
39
* amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
40
*/
41
struct amdxdna_cmd_start_npu {
42
u64 buffer; /* instruction buffer address */
43
u32 buffer_size; /* size of buffer in bytes */
44
u32 prop_count; /* properties count */
45
u32 prop_args[]; /* properties and regular kernel arguments */
46
};
47
48
/*
49
* Interpretation of the beginning of data payload for ERT_CMD_CHAIN in
50
* amdxdna_cmd. The rest of the payload in amdxdna_cmd is cmd BO handles.
51
*/
52
struct amdxdna_cmd_chain {
53
u32 command_count;
54
u32 submit_index;
55
u32 error_index;
56
u32 reserved[3];
57
u64 data[] __counted_by(command_count);
58
};
59
60
/*
61
* Interpretation of the beginning of data payload for ERT_START_NPU_PREEMPT in
62
* amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
63
*/
64
struct amdxdna_cmd_preempt_data {
65
u64 inst_buf; /* instruction buffer address */
66
u64 save_buf; /* save buffer address */
67
u64 restore_buf; /* restore buffer address */
68
u32 inst_size; /* size of instruction buffer in bytes */
69
u32 save_size; /* size of save buffer in bytes */
70
u32 restore_size; /* size of restore buffer in bytes */
71
u32 inst_prop_cnt; /* properties count */
72
u32 prop_args[]; /* properties and regular kernel arguments */
73
};
74
75
/* Exec buffer command header format */
76
#define AMDXDNA_CMD_STATE GENMASK(3, 0)
77
#define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
78
#define AMDXDNA_CMD_COUNT GENMASK(22, 12)
79
#define AMDXDNA_CMD_OPCODE GENMASK(27, 23)
80
struct amdxdna_cmd {
81
u32 header;
82
u32 data[];
83
};
84
85
#define INVALID_CU_IDX (~0U)
86
87
struct amdxdna_hwctx {
88
struct amdxdna_client *client;
89
struct amdxdna_hwctx_priv *priv;
90
char *name;
91
92
u32 id;
93
u32 max_opc;
94
u32 num_tiles;
95
u32 mem_size;
96
u32 fw_ctx_id;
97
u32 col_list_len;
98
u32 *col_list;
99
u32 start_col;
100
u32 num_col;
101
u32 num_unused_col;
102
103
struct amdxdna_qos_info qos;
104
struct amdxdna_hwctx_param_config_cu *cus;
105
u32 syncobj_hdl;
106
107
atomic64_t job_submit_cnt;
108
atomic64_t job_free_cnt ____cacheline_aligned_in_smp;
109
};
110
111
#define drm_job_to_xdna_job(j) \
112
container_of(j, struct amdxdna_sched_job, base)
113
114
enum amdxdna_job_opcode {
115
SYNC_DEBUG_BO,
116
ATTACH_DEBUG_BO,
117
DETACH_DEBUG_BO,
118
};
119
120
struct amdxdna_drv_cmd {
121
enum amdxdna_job_opcode opcode;
122
u32 result;
123
};
124
125
struct amdxdna_sched_job {
126
struct drm_sched_job base;
127
struct kref refcnt;
128
struct amdxdna_hwctx *hwctx;
129
struct mm_struct *mm;
130
/* The fence to notice DRM scheduler that job is done by hardware */
131
struct dma_fence *fence;
132
/* user can wait on this fence */
133
struct dma_fence *out_fence;
134
bool job_done;
135
bool job_timeout;
136
u64 seq;
137
struct amdxdna_drv_cmd *drv_cmd;
138
struct amdxdna_gem_obj *cmd_bo;
139
size_t bo_cnt;
140
struct drm_gem_object *bos[] __counted_by(bo_cnt);
141
};
142
143
static inline u32
144
amdxdna_cmd_get_op(struct amdxdna_gem_obj *abo)
145
{
146
struct amdxdna_cmd *cmd = abo->mem.kva;
147
148
return FIELD_GET(AMDXDNA_CMD_OPCODE, cmd->header);
149
}
150
151
static inline void
152
amdxdna_cmd_set_state(struct amdxdna_gem_obj *abo, enum ert_cmd_state s)
153
{
154
struct amdxdna_cmd *cmd = abo->mem.kva;
155
156
cmd->header &= ~AMDXDNA_CMD_STATE;
157
cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, s);
158
}
159
160
static inline enum ert_cmd_state
161
amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
162
{
163
struct amdxdna_cmd *cmd = abo->mem.kva;
164
165
return FIELD_GET(AMDXDNA_CMD_STATE, cmd->header);
166
}
167
168
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
169
u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
170
171
void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
172
void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
173
int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
174
int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
175
int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl);
176
177
int amdxdna_cmd_submit(struct amdxdna_client *client,
178
struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdls,
179
u32 *arg_bo_hdls, u32 arg_bo_cnt,
180
u32 hwctx_hdl, u64 *seq);
181
182
int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
183
u64 seq, u32 timeout);
184
185
int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
186
int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
187
int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
188
int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
189
190
#endif /* _AMDXDNA_CTX_H_ */
191
192