Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/qaic/qaic.h
26428 views
1
/* SPDX-License-Identifier: GPL-2.0-only
2
*
3
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
4
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5
*/
6
7
#ifndef _QAIC_H_
8
#define _QAIC_H_
9
10
#include <linux/interrupt.h>
11
#include <linux/kref.h>
12
#include <linux/mhi.h>
13
#include <linux/mutex.h>
14
#include <linux/pci.h>
15
#include <linux/spinlock.h>
16
#include <linux/srcu.h>
17
#include <linux/wait.h>
18
#include <linux/workqueue.h>
19
#include <drm/drm_device.h>
20
#include <drm/drm_gem.h>
21
22
#define QAIC_DBC_BASE SZ_128K
23
#define QAIC_DBC_SIZE SZ_4K
24
25
#define QAIC_NO_PARTITION -1
26
27
#define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
28
29
#define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
30
#define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
31
#define to_drm(qddev) (&(qddev)->drm)
32
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
33
#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
34
35
enum aic_families {
36
FAMILY_AIC100,
37
FAMILY_AIC200,
38
FAMILY_MAX,
39
};
40
41
enum __packed dev_states {
42
/* Device is offline or will be very soon */
43
QAIC_OFFLINE,
44
/* Device is booting, not clear if it's in a usable state */
45
QAIC_BOOT,
46
/* Device is fully operational */
47
QAIC_ONLINE,
48
};
49
50
extern bool datapath_polling;
51
52
struct qaic_user {
53
/* Uniquely identifies this user for the device */
54
int handle;
55
struct kref ref_count;
56
/* Char device opened by this user */
57
struct qaic_drm_device *qddev;
58
/* Node in list of users that opened this drm device */
59
struct list_head node;
60
/* SRCU used to synchronize this user during cleanup */
61
struct srcu_struct qddev_lock;
62
atomic_t chunk_id;
63
};
64
65
struct dma_bridge_chan {
66
/* Pointer to device strcut maintained by driver */
67
struct qaic_device *qdev;
68
/* ID of this DMA bridge channel(DBC) */
69
unsigned int id;
70
/* Synchronizes access to xfer_list */
71
spinlock_t xfer_lock;
72
/* Base address of request queue */
73
void *req_q_base;
74
/* Base address of response queue */
75
void *rsp_q_base;
76
/*
77
* Base bus address of request queue. Response queue bus address can be
78
* calculated by adding request queue size to this variable
79
*/
80
dma_addr_t dma_addr;
81
/* Total size of request and response queue in byte */
82
u32 total_size;
83
/* Capacity of request/response queue */
84
u32 nelem;
85
/* The user that opened this DBC */
86
struct qaic_user *usr;
87
/*
88
* Request ID of next memory handle that goes in request queue. One
89
* memory handle can enqueue more than one request elements, all
90
* this requests that belong to same memory handle have same request ID
91
*/
92
u16 next_req_id;
93
/* true: DBC is in use; false: DBC not in use */
94
bool in_use;
95
/*
96
* Base address of device registers. Used to read/write request and
97
* response queue's head and tail pointer of this DBC.
98
*/
99
void __iomem *dbc_base;
100
/* Head of list where each node is a memory handle queued in request queue */
101
struct list_head xfer_list;
102
/* Synchronizes DBC readers during cleanup */
103
struct srcu_struct ch_lock;
104
/*
105
* When this DBC is released, any thread waiting on this wait queue is
106
* woken up
107
*/
108
wait_queue_head_t dbc_release;
109
/* Head of list where each node is a bo associated with this DBC */
110
struct list_head bo_lists;
111
/* The irq line for this DBC. Used for polling */
112
unsigned int irq;
113
/* Polling work item to simulate interrupts */
114
struct work_struct poll_work;
115
};
116
117
struct qaic_device {
118
/* Pointer to base PCI device struct of our physical device */
119
struct pci_dev *pdev;
120
/* Req. ID of request that will be queued next in MHI control device */
121
u32 next_seq_num;
122
/* Base address of the MHI bar */
123
void __iomem *bar_mhi;
124
/* Base address of the DBCs bar */
125
void __iomem *bar_dbc;
126
/* Controller structure for MHI devices */
127
struct mhi_controller *mhi_cntrl;
128
/* MHI control channel device */
129
struct mhi_device *cntl_ch;
130
/* List of requests queued in MHI control device */
131
struct list_head cntl_xfer_list;
132
/* Synchronizes MHI control device transactions and its xfer list */
133
struct mutex cntl_mutex;
134
/* Array of DBC struct of this device */
135
struct dma_bridge_chan *dbc;
136
/* Work queue for tasks related to MHI control device */
137
struct workqueue_struct *cntl_wq;
138
/* Synchronizes all the users of device during cleanup */
139
struct srcu_struct dev_lock;
140
/* Track the state of the device during resets */
141
enum dev_states dev_state;
142
/* true: single MSI is used to operate device */
143
bool single_msi;
144
/*
145
* true: A tx MHI transaction has failed and a rx buffer is still queued
146
* in control device. Such a buffer is considered lost rx buffer
147
* false: No rx buffer is lost in control device
148
*/
149
bool cntl_lost_buf;
150
/* Maximum number of DBC supported by this device */
151
u32 num_dbc;
152
/* Reference to the drm_device for this device when it is created */
153
struct qaic_drm_device *qddev;
154
/* Generate the CRC of a control message */
155
u32 (*gen_crc)(void *msg);
156
/* Validate the CRC of a control message */
157
bool (*valid_crc)(void *msg);
158
/* MHI "QAIC_TIMESYNC" channel device */
159
struct mhi_device *qts_ch;
160
/* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
161
struct workqueue_struct *qts_wq;
162
/* Head of list of page allocated by MHI bootlog device */
163
struct list_head bootlog;
164
/* MHI bootlog channel device */
165
struct mhi_device *bootlog_ch;
166
/* Work queue for tasks related to MHI bootlog device */
167
struct workqueue_struct *bootlog_wq;
168
/* Synchronizes access of pages in MHI bootlog device */
169
struct mutex bootlog_mutex;
170
/* MHI RAS channel device */
171
struct mhi_device *ras_ch;
172
/* Correctable error count */
173
unsigned int ce_count;
174
/* Un-correctable error count */
175
unsigned int ue_count;
176
/* Un-correctable non-fatal error count */
177
unsigned int ue_nf_count;
178
};
179
180
struct qaic_drm_device {
181
/* The drm device struct of this drm device */
182
struct drm_device drm;
183
/* Pointer to the root device struct driven by this driver */
184
struct qaic_device *qdev;
185
/*
186
* The physical device can be partition in number of logical devices.
187
* And each logical device is given a partition id. This member stores
188
* that id. QAIC_NO_PARTITION is a sentinel used to mark that this drm
189
* device is the actual physical device
190
*/
191
s32 partition_id;
192
/* Head in list of users who have opened this drm device */
193
struct list_head users;
194
/* Synchronizes access to users list */
195
struct mutex users_mutex;
196
};
197
198
struct qaic_bo {
199
struct drm_gem_object base;
200
/* Scatter/gather table for allocate/imported BO */
201
struct sg_table *sgt;
202
/* Head in list of slices of this BO */
203
struct list_head slices;
204
/* Total nents, for all slices of this BO */
205
int total_slice_nents;
206
/*
207
* Direction of transfer. It can assume only two value DMA_TO_DEVICE and
208
* DMA_FROM_DEVICE.
209
*/
210
int dir;
211
/* The pointer of the DBC which operates on this BO */
212
struct dma_bridge_chan *dbc;
213
/* Number of slice that belongs to this buffer */
214
u32 nr_slice;
215
/* Number of slice that have been transferred by DMA engine */
216
u32 nr_slice_xfer_done;
217
/*
218
* If true then user has attached slicing information to this BO by
219
* calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
220
*/
221
bool sliced;
222
/* Request ID of this BO if it is queued for execution */
223
u16 req_id;
224
/* Wait on this for completion of DMA transfer of this BO */
225
struct completion xfer_done;
226
/*
227
* Node in linked list where head is dbc->xfer_list.
228
* This link list contain BO's that are queued for DMA transfer.
229
*/
230
struct list_head xfer_list;
231
/*
232
* Node in linked list where head is dbc->bo_lists.
233
* This link list contain BO's that are associated with the DBC it is
234
* linked to.
235
*/
236
struct list_head bo_list;
237
struct {
238
/*
239
* Latest timestamp(ns) at which kernel received a request to
240
* execute this BO
241
*/
242
u64 req_received_ts;
243
/*
244
* Latest timestamp(ns) at which kernel enqueued requests of
245
* this BO for execution in DMA queue
246
*/
247
u64 req_submit_ts;
248
/*
249
* Latest timestamp(ns) at which kernel received a completion
250
* interrupt for requests of this BO
251
*/
252
u64 req_processed_ts;
253
/*
254
* Number of elements already enqueued in DMA queue before
255
* enqueuing requests of this BO
256
*/
257
u32 queue_level_before;
258
} perf_stats;
259
/* Synchronizes BO operations */
260
struct mutex lock;
261
};
262
263
struct bo_slice {
264
/* Mapped pages */
265
struct sg_table *sgt;
266
/* Number of requests required to queue in DMA queue */
267
int nents;
268
/* See enum dma_data_direction */
269
int dir;
270
/* Actual requests that will be copied in DMA queue */
271
struct dbc_req *reqs;
272
struct kref ref_count;
273
/* true: No DMA transfer required */
274
bool no_xfer;
275
/* Pointer to the parent BO handle */
276
struct qaic_bo *bo;
277
/* Node in list of slices maintained by parent BO */
278
struct list_head slice;
279
/* Size of this slice in bytes */
280
u64 size;
281
/* Offset of this slice in buffer */
282
u64 offset;
283
};
284
285
int get_dbc_req_elem_size(void);
286
int get_dbc_rsp_elem_size(void);
287
int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor);
288
int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
289
void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
290
291
void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
292
293
int qaic_control_open(struct qaic_device *qdev);
294
void qaic_control_close(struct qaic_device *qdev);
295
void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr);
296
297
irqreturn_t dbc_irq_threaded_fn(int irq, void *data);
298
irqreturn_t dbc_irq_handler(int irq, void *data);
299
int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
300
void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
301
void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
302
void release_dbc(struct qaic_device *qdev, u32 dbc_id);
303
void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail);
304
305
void wake_all_cntl(struct qaic_device *qdev);
306
void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
307
308
struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
309
310
int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
311
int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
312
int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
313
int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
314
int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
315
int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
316
int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
317
int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
318
void irq_polling_work(struct work_struct *work);
319
320
#endif /* _QAIC_H_ */
321
322