Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/ivpu/ivpu_ms.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0-only OR MIT
2
/*
3
* Copyright (C) 2020-2024 Intel Corporation
4
*/
5
6
#include <drm/drm_file.h>
7
#include <linux/pm_runtime.h>
8
9
#include "ivpu_drv.h"
10
#include "ivpu_gem.h"
11
#include "ivpu_jsm_msg.h"
12
#include "ivpu_ms.h"
13
#include "ivpu_pm.h"
14
15
#define MS_INFO_BUFFER_SIZE SZ_64K
16
#define MS_NUM_BUFFERS 2
17
#define MS_READ_PERIOD_MULTIPLIER 2
18
#define MS_MIN_SAMPLE_PERIOD_NS 1000000
19
20
static struct ivpu_ms_instance *
21
get_instance_by_mask(struct ivpu_file_priv *file_priv, u64 metric_mask)
22
{
23
struct ivpu_ms_instance *ms;
24
25
lockdep_assert_held(&file_priv->ms_lock);
26
27
list_for_each_entry(ms, &file_priv->ms_instance_list, ms_instance_node)
28
if (ms->mask == metric_mask)
29
return ms;
30
31
return NULL;
32
}
33
34
int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
35
{
36
struct ivpu_file_priv *file_priv = file->driver_priv;
37
struct drm_ivpu_metric_streamer_start *args = data;
38
struct ivpu_device *vdev = file_priv->vdev;
39
struct ivpu_ms_instance *ms;
40
u64 single_buff_size;
41
u32 sample_size;
42
int ret;
43
44
if (!args->metric_group_mask || !args->read_period_samples ||
45
args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
46
return -EINVAL;
47
48
ret = ivpu_rpm_get(vdev);
49
if (ret < 0)
50
return ret;
51
52
mutex_lock(&file_priv->ms_lock);
53
54
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
55
ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask);
56
ret = -EALREADY;
57
goto unlock;
58
}
59
60
ms = kzalloc(sizeof(*ms), GFP_KERNEL);
61
if (!ms) {
62
ret = -ENOMEM;
63
goto unlock;
64
}
65
66
ms->mask = args->metric_group_mask;
67
68
ret = ivpu_jsm_metric_streamer_info(vdev, ms->mask, 0, 0, &sample_size, NULL);
69
if (ret)
70
goto err_free_ms;
71
72
single_buff_size = sample_size *
73
((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER);
74
ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS),
75
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
76
if (!ms->bo) {
77
ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size);
78
ret = -ENOMEM;
79
goto err_free_ms;
80
}
81
82
ms->buff_size = ivpu_bo_size(ms->bo) / MS_NUM_BUFFERS;
83
ms->active_buff_vpu_addr = ms->bo->vpu_addr;
84
ms->inactive_buff_vpu_addr = ms->bo->vpu_addr + ms->buff_size;
85
ms->active_buff_ptr = ivpu_bo_vaddr(ms->bo);
86
ms->inactive_buff_ptr = ivpu_bo_vaddr(ms->bo) + ms->buff_size;
87
88
ret = ivpu_jsm_metric_streamer_start(vdev, ms->mask, args->sampling_period_ns,
89
ms->active_buff_vpu_addr, ms->buff_size);
90
if (ret)
91
goto err_free_bo;
92
93
args->sample_size = sample_size;
94
args->max_data_size = ivpu_bo_size(ms->bo);
95
list_add_tail(&ms->ms_instance_node, &file_priv->ms_instance_list);
96
goto unlock;
97
98
err_free_bo:
99
ivpu_bo_free(ms->bo);
100
err_free_ms:
101
kfree(ms);
102
unlock:
103
mutex_unlock(&file_priv->ms_lock);
104
105
ivpu_rpm_put(vdev);
106
return ret;
107
}
108
109
static int
110
copy_leftover_bytes(struct ivpu_ms_instance *ms,
111
void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
112
{
113
u64 copy_bytes;
114
115
if (ms->leftover_bytes) {
116
copy_bytes = min(user_size - *user_bytes_copied, ms->leftover_bytes);
117
if (copy_to_user(user_ptr + *user_bytes_copied, ms->leftover_addr, copy_bytes))
118
return -EFAULT;
119
120
ms->leftover_bytes -= copy_bytes;
121
ms->leftover_addr += copy_bytes;
122
*user_bytes_copied += copy_bytes;
123
}
124
125
return 0;
126
}
127
128
static int
129
copy_samples_to_user(struct ivpu_device *vdev, struct ivpu_ms_instance *ms,
130
void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
131
{
132
u64 bytes_written;
133
int ret;
134
135
*user_bytes_copied = 0;
136
137
ret = copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
138
if (ret)
139
return ret;
140
141
if (*user_bytes_copied == user_size)
142
return 0;
143
144
ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, ms->inactive_buff_vpu_addr,
145
ms->buff_size, &bytes_written);
146
if (ret)
147
return ret;
148
149
swap(ms->active_buff_vpu_addr, ms->inactive_buff_vpu_addr);
150
swap(ms->active_buff_ptr, ms->inactive_buff_ptr);
151
152
ms->leftover_bytes = bytes_written;
153
ms->leftover_addr = ms->inactive_buff_ptr;
154
155
return copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
156
}
157
158
int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
159
{
160
struct drm_ivpu_metric_streamer_get_data *args = data;
161
struct ivpu_file_priv *file_priv = file->driver_priv;
162
struct ivpu_device *vdev = file_priv->vdev;
163
struct ivpu_ms_instance *ms;
164
u64 bytes_written;
165
int ret;
166
167
if (!args->metric_group_mask)
168
return -EINVAL;
169
170
ret = ivpu_rpm_get(vdev);
171
if (ret < 0)
172
return ret;
173
174
mutex_lock(&file_priv->ms_lock);
175
176
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
177
if (!ms) {
178
ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask);
179
ret = -EINVAL;
180
goto unlock;
181
}
182
183
if (!args->buffer_size) {
184
ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, 0, 0, &bytes_written);
185
if (ret)
186
goto unlock;
187
args->data_size = bytes_written + ms->leftover_bytes;
188
goto unlock;
189
}
190
191
if (!args->buffer_ptr) {
192
ret = -EINVAL;
193
goto unlock;
194
}
195
196
ret = copy_samples_to_user(vdev, ms, u64_to_user_ptr(args->buffer_ptr),
197
args->buffer_size, &args->data_size);
198
unlock:
199
mutex_unlock(&file_priv->ms_lock);
200
201
ivpu_rpm_put(vdev);
202
return ret;
203
}
204
205
static void free_instance(struct ivpu_file_priv *file_priv, struct ivpu_ms_instance *ms)
206
{
207
lockdep_assert_held(&file_priv->ms_lock);
208
209
list_del(&ms->ms_instance_node);
210
ivpu_jsm_metric_streamer_stop(file_priv->vdev, ms->mask);
211
ivpu_bo_free(ms->bo);
212
kfree(ms);
213
}
214
215
int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
216
{
217
struct ivpu_file_priv *file_priv = file->driver_priv;
218
struct drm_ivpu_metric_streamer_stop *args = data;
219
struct ivpu_device *vdev = file_priv->vdev;
220
struct ivpu_ms_instance *ms;
221
int ret;
222
223
if (!args->metric_group_mask)
224
return -EINVAL;
225
226
ret = ivpu_rpm_get(vdev);
227
if (ret < 0)
228
return ret;
229
230
mutex_lock(&file_priv->ms_lock);
231
232
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
233
if (ms)
234
free_instance(file_priv, ms);
235
236
mutex_unlock(&file_priv->ms_lock);
237
238
ivpu_rpm_put(vdev);
239
return ms ? 0 : -EINVAL;
240
}
241
242
static inline struct ivpu_bo *get_ms_info_bo(struct ivpu_file_priv *file_priv)
243
{
244
lockdep_assert_held(&file_priv->ms_lock);
245
246
if (file_priv->ms_info_bo)
247
return file_priv->ms_info_bo;
248
249
file_priv->ms_info_bo = ivpu_bo_create_global(file_priv->vdev, MS_INFO_BUFFER_SIZE,
250
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
251
return file_priv->ms_info_bo;
252
}
253
254
int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
255
{
256
struct drm_ivpu_metric_streamer_get_data *args = data;
257
struct ivpu_file_priv *file_priv = file->driver_priv;
258
struct ivpu_device *vdev = file_priv->vdev;
259
struct ivpu_bo *bo;
260
u64 info_size;
261
int ret;
262
263
if (!args->metric_group_mask)
264
return -EINVAL;
265
266
if (!args->buffer_size)
267
return ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask,
268
0, 0, NULL, &args->data_size);
269
if (!args->buffer_ptr)
270
return -EINVAL;
271
272
mutex_lock(&file_priv->ms_lock);
273
274
bo = get_ms_info_bo(file_priv);
275
if (!bo) {
276
ret = -ENOMEM;
277
goto unlock;
278
}
279
280
ret = ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask, bo->vpu_addr,
281
ivpu_bo_size(bo), NULL, &info_size);
282
if (ret)
283
goto unlock;
284
285
if (args->buffer_size < info_size) {
286
ret = -ENOSPC;
287
goto unlock;
288
}
289
290
if (copy_to_user(u64_to_user_ptr(args->buffer_ptr), ivpu_bo_vaddr(bo), info_size))
291
ret = -EFAULT;
292
293
args->data_size = info_size;
294
unlock:
295
mutex_unlock(&file_priv->ms_lock);
296
297
return ret;
298
}
299
300
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
301
{
302
struct ivpu_ms_instance *ms, *tmp;
303
struct ivpu_device *vdev = file_priv->vdev;
304
305
pm_runtime_get_sync(vdev->drm.dev);
306
307
mutex_lock(&file_priv->ms_lock);
308
309
if (file_priv->ms_info_bo) {
310
ivpu_bo_free(file_priv->ms_info_bo);
311
file_priv->ms_info_bo = NULL;
312
}
313
314
list_for_each_entry_safe(ms, tmp, &file_priv->ms_instance_list, ms_instance_node)
315
free_instance(file_priv, ms);
316
317
mutex_unlock(&file_priv->ms_lock);
318
319
pm_runtime_put_autosuspend(vdev->drm.dev);
320
}
321
322
void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
323
{
324
struct ivpu_file_priv *file_priv;
325
unsigned long ctx_id;
326
327
mutex_lock(&vdev->context_list_lock);
328
329
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
330
ivpu_ms_cleanup(file_priv);
331
332
mutex_unlock(&vdev->context_list_lock);
333
}
334
335