Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/ivpu/ivpu_job.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020-2025 Intel Corporation
4
*/
5
6
#include <drm/drm_file.h>
7
8
#include <linux/bitfield.h>
9
#include <linux/highmem.h>
10
#include <linux/pci.h>
11
#include <linux/pm_runtime.h>
12
#include <linux/module.h>
13
#include <uapi/drm/ivpu_accel.h>
14
15
#include "ivpu_drv.h"
16
#include "ivpu_fw.h"
17
#include "ivpu_hw.h"
18
#include "ivpu_ipc.h"
19
#include "ivpu_job.h"
20
#include "ivpu_jsm_msg.h"
21
#include "ivpu_mmu.h"
22
#include "ivpu_pm.h"
23
#include "ivpu_trace.h"
24
#include "vpu_boot_api.h"
25
26
#define CMD_BUF_IDX 0
27
#define JOB_MAX_BUFFER_COUNT 65535
28
29
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
30
{
31
ivpu_hw_db_set(vdev, cmdq->db_id);
32
}
33
34
static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
35
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
36
{
37
u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
38
u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
39
40
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
41
ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
42
return 0;
43
44
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
45
primary_size, DRM_IVPU_BO_WC);
46
if (!cmdq->primary_preempt_buf) {
47
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
48
return -ENOMEM;
49
}
50
51
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
52
secondary_size, DRM_IVPU_BO_WC);
53
if (!cmdq->secondary_preempt_buf) {
54
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
55
goto err_free_primary;
56
}
57
58
return 0;
59
60
err_free_primary:
61
ivpu_bo_free(cmdq->primary_preempt_buf);
62
cmdq->primary_preempt_buf = NULL;
63
return -ENOMEM;
64
}
65
66
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
67
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
68
{
69
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
70
return;
71
72
if (cmdq->primary_preempt_buf)
73
ivpu_bo_free(cmdq->primary_preempt_buf);
74
if (cmdq->secondary_preempt_buf)
75
ivpu_bo_free(cmdq->secondary_preempt_buf);
76
}
77
78
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
79
{
80
struct ivpu_device *vdev = file_priv->vdev;
81
struct ivpu_cmdq *cmdq;
82
int ret;
83
84
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
85
if (!cmdq)
86
return NULL;
87
88
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
89
if (!cmdq->mem)
90
goto err_free_cmdq;
91
92
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
93
if (ret)
94
ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
95
96
return cmdq;
97
98
err_free_cmdq:
99
kfree(cmdq);
100
return NULL;
101
}
102
103
/**
104
* ivpu_cmdq_get_entry_count - Calculate the number of entries in the command queue.
105
* @cmdq: Pointer to the command queue structure.
106
*
107
* Returns the number of entries that can fit in the command queue memory.
108
*/
109
static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
110
{
111
size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
112
113
return size / sizeof(struct vpu_job_queue_entry);
114
}
115
116
/**
117
* ivpu_cmdq_get_flags - Get command queue flags based on input flags and test mode.
118
* @vdev: Pointer to the ivpu device structure.
119
* @flags: Input flags to determine the command queue flags.
120
*
121
* Returns the calculated command queue flags, considering both the input flags
122
* and the current test mode settings.
123
*/
124
static u32 ivpu_cmdq_get_flags(struct ivpu_device *vdev, u32 flags)
125
{
126
u32 cmdq_flags = 0;
127
128
if ((flags & DRM_IVPU_CMDQ_FLAG_TURBO) && (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX))
129
cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
130
131
/* Test mode can override the TURBO flag coming from the application */
132
if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_ENABLE)
133
cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
134
if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_DISABLE)
135
cmdq_flags &= ~VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
136
137
return cmdq_flags;
138
}
139
140
static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
141
{
142
ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
143
ivpu_bo_free(cmdq->mem);
144
kfree(cmdq);
145
}
146
147
static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority, u32 flags)
148
{
149
struct ivpu_device *vdev = file_priv->vdev;
150
struct ivpu_cmdq *cmdq = NULL;
151
int ret;
152
153
lockdep_assert_held(&file_priv->lock);
154
155
cmdq = ivpu_cmdq_alloc(file_priv);
156
if (!cmdq) {
157
ivpu_err(vdev, "Failed to allocate command queue\n");
158
return NULL;
159
}
160
ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
161
&file_priv->cmdq_id_next, GFP_KERNEL);
162
if (ret < 0) {
163
ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
164
goto err_free_cmdq;
165
}
166
167
cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
168
cmdq->priority = priority;
169
170
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
171
cmdq->jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
172
cmdq->jobq->header.flags = ivpu_cmdq_get_flags(vdev, flags);
173
174
ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d, flags 0x%08x\n",
175
cmdq->id, file_priv->ctx.id, cmdq->jobq->header.flags);
176
return cmdq;
177
178
err_free_cmdq:
179
ivpu_cmdq_free(file_priv, cmdq);
180
return NULL;
181
}
182
183
static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
184
u8 priority)
185
{
186
struct ivpu_device *vdev = file_priv->vdev;
187
int ret;
188
189
ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
190
task_pid_nr(current), engine,
191
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
192
if (ret)
193
return ret;
194
195
ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
196
priority);
197
if (ret)
198
return ret;
199
200
return 0;
201
}
202
203
static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
204
{
205
struct ivpu_device *vdev = file_priv->vdev;
206
int ret;
207
208
ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
209
GFP_KERNEL);
210
if (ret < 0) {
211
ivpu_err(vdev, "Failed to allocate doorbell ID: %d\n", ret);
212
return ret;
213
}
214
215
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
216
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
217
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
218
else
219
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
220
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
221
222
if (!ret)
223
ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
224
cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
225
else
226
xa_erase(&vdev->db_xa, cmdq->db_id);
227
228
return ret;
229
}
230
231
static void ivpu_cmdq_jobq_reset(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
232
{
233
jobq->header.head = 0;
234
jobq->header.tail = 0;
235
236
wmb(); /* Flush WC buffer for jobq->header */
237
}
238
239
static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
240
{
241
struct ivpu_device *vdev = file_priv->vdev;
242
int ret;
243
244
lockdep_assert_held(&file_priv->lock);
245
246
if (cmdq->db_id)
247
return 0;
248
249
ivpu_cmdq_jobq_reset(vdev, cmdq->jobq);
250
251
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
252
ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, cmdq->priority);
253
if (ret)
254
return ret;
255
}
256
257
ret = ivpu_register_db(file_priv, cmdq);
258
if (ret)
259
return ret;
260
261
return 0;
262
}
263
264
static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
265
{
266
struct ivpu_device *vdev = file_priv->vdev;
267
int ret;
268
269
lockdep_assert_held(&file_priv->lock);
270
271
if (!cmdq->db_id)
272
return 0;
273
274
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
275
if (!ret)
276
ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
277
278
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
279
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
280
if (!ret)
281
ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n",
282
cmdq->id, file_priv->ctx.id);
283
}
284
285
xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
286
cmdq->db_id = 0;
287
288
return 0;
289
}
290
291
static inline u8 ivpu_job_to_jsm_priority(u8 priority)
292
{
293
if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
294
return VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL;
295
296
return priority - 1;
297
}
298
299
static void ivpu_cmdq_destroy(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
300
{
301
ivpu_cmdq_unregister(file_priv, cmdq);
302
xa_erase(&file_priv->cmdq_xa, cmdq->id);
303
ivpu_cmdq_free(file_priv, cmdq);
304
}
305
306
static struct ivpu_cmdq *ivpu_cmdq_acquire_legacy(struct ivpu_file_priv *file_priv, u8 priority)
307
{
308
struct ivpu_cmdq *cmdq;
309
unsigned long id;
310
311
lockdep_assert_held(&file_priv->lock);
312
313
xa_for_each(&file_priv->cmdq_xa, id, cmdq)
314
if (cmdq->is_legacy && cmdq->priority == priority)
315
break;
316
317
if (!cmdq) {
318
cmdq = ivpu_cmdq_create(file_priv, priority, 0);
319
if (!cmdq)
320
return NULL;
321
cmdq->is_legacy = true;
322
}
323
324
return cmdq;
325
}
326
327
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 cmdq_id)
328
{
329
struct ivpu_device *vdev = file_priv->vdev;
330
struct ivpu_cmdq *cmdq;
331
332
lockdep_assert_held(&file_priv->lock);
333
334
cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
335
if (!cmdq) {
336
ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
337
return NULL;
338
}
339
340
return cmdq;
341
}
342
343
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
344
{
345
struct ivpu_cmdq *cmdq;
346
unsigned long cmdq_id;
347
348
lockdep_assert_held(&file_priv->lock);
349
350
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
351
ivpu_cmdq_destroy(file_priv, cmdq);
352
}
353
354
/*
355
* Mark the doorbell as unregistered
356
* This function needs to be called when the VPU hardware is restarted
357
* and FW loses job queue state. The next time job queue is used it
358
* will be registered again.
359
*/
360
static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
361
{
362
struct ivpu_cmdq *cmdq;
363
unsigned long cmdq_id;
364
365
mutex_lock(&file_priv->lock);
366
367
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
368
xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
369
cmdq->db_id = 0;
370
}
371
372
mutex_unlock(&file_priv->lock);
373
}
374
375
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
376
{
377
struct ivpu_file_priv *file_priv;
378
unsigned long ctx_id;
379
380
mutex_lock(&vdev->context_list_lock);
381
382
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
383
ivpu_cmdq_reset(file_priv);
384
385
mutex_unlock(&vdev->context_list_lock);
386
}
387
388
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
389
{
390
struct ivpu_device *vdev = file_priv->vdev;
391
struct ivpu_cmdq *cmdq;
392
unsigned long cmdq_id;
393
394
lockdep_assert_held(&file_priv->lock);
395
ivpu_dbg(vdev, JOB, "Context ID: %u abort\n", file_priv->ctx.id);
396
397
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
398
ivpu_cmdq_unregister(file_priv, cmdq);
399
400
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
401
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
402
403
ivpu_mmu_disable_ssid_events(vdev, file_priv->ctx.id);
404
405
file_priv->aborted = true;
406
}
407
408
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
409
{
410
struct ivpu_device *vdev = job->vdev;
411
struct vpu_job_queue_header *header = &cmdq->jobq->header;
412
struct vpu_job_queue_entry *entry;
413
u32 tail = READ_ONCE(header->tail);
414
u32 next_entry = (tail + 1) % cmdq->entry_count;
415
416
/* Check if there is space left in job queue */
417
if (next_entry == header->head) {
418
ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
419
job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
420
return -EBUSY;
421
}
422
423
entry = &cmdq->jobq->slot[tail].job;
424
entry->batch_buf_addr = job->cmd_buf_vpu_addr;
425
entry->job_id = job->job_id;
426
entry->flags = 0;
427
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
428
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
429
430
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
431
if (cmdq->primary_preempt_buf) {
432
entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
433
entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
434
}
435
436
if (cmdq->secondary_preempt_buf) {
437
entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
438
entry->secondary_preempt_buf_size =
439
ivpu_bo_size(cmdq->secondary_preempt_buf);
440
}
441
}
442
443
wmb(); /* Ensure that tail is updated after filling entry */
444
header->tail = next_entry;
445
wmb(); /* Flush WC buffer for jobq header */
446
447
return 0;
448
}
449
450
struct ivpu_fence {
451
struct dma_fence base;
452
spinlock_t lock; /* protects base */
453
struct ivpu_device *vdev;
454
};
455
456
static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence)
457
{
458
return container_of(fence, struct ivpu_fence, base);
459
}
460
461
static const char *ivpu_fence_get_driver_name(struct dma_fence *fence)
462
{
463
return DRIVER_NAME;
464
}
465
466
static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence)
467
{
468
struct ivpu_fence *ivpu_fence = to_vpu_fence(fence);
469
470
return dev_name(ivpu_fence->vdev->drm.dev);
471
}
472
473
static const struct dma_fence_ops ivpu_fence_ops = {
474
.get_driver_name = ivpu_fence_get_driver_name,
475
.get_timeline_name = ivpu_fence_get_timeline_name,
476
};
477
478
static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
479
{
480
struct ivpu_fence *fence;
481
482
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
483
if (!fence)
484
return NULL;
485
486
fence->vdev = vdev;
487
spin_lock_init(&fence->lock);
488
dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1);
489
490
return &fence->base;
491
}
492
493
static void ivpu_job_destroy(struct ivpu_job *job)
494
{
495
struct ivpu_device *vdev = job->vdev;
496
u32 i;
497
498
ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d cmdq_id %u engine %d",
499
job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx);
500
501
for (i = 0; i < job->bo_count; i++)
502
if (job->bos[i])
503
drm_gem_object_put(&job->bos[i]->base.base);
504
505
dma_fence_put(job->done_fence);
506
ivpu_file_priv_put(&job->file_priv);
507
kfree(job);
508
}
509
510
static struct ivpu_job *
511
ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
512
{
513
struct ivpu_device *vdev = file_priv->vdev;
514
struct ivpu_job *job;
515
516
job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
517
if (!job)
518
return NULL;
519
520
job->vdev = vdev;
521
job->engine_idx = engine_idx;
522
job->bo_count = bo_count;
523
job->done_fence = ivpu_fence_create(vdev);
524
if (!job->done_fence) {
525
ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
526
goto err_free_job;
527
}
528
529
job->file_priv = ivpu_file_priv_get(file_priv);
530
531
trace_job("create", job);
532
ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
533
return job;
534
535
err_free_job:
536
kfree(job);
537
return NULL;
538
}
539
540
static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *vdev, u32 job_id)
541
{
542
struct ivpu_job *job;
543
544
lockdep_assert_held(&vdev->submitted_jobs_lock);
545
546
job = xa_erase(&vdev->submitted_jobs_xa, job_id);
547
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
548
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
549
vdev->busy_time);
550
}
551
552
return job;
553
}
554
555
static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
556
{
557
struct ivpu_job *job;
558
559
lockdep_assert_held(&vdev->submitted_jobs_lock);
560
561
job = xa_load(&vdev->submitted_jobs_xa, job_id);
562
if (!job)
563
return -ENOENT;
564
565
if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
566
guard(mutex)(&job->file_priv->lock);
567
568
if (job->file_priv->has_mmu_faults)
569
return 0;
570
571
/*
572
* Mark context as faulty and defer destruction of the job to jobs abort thread
573
* handler to synchronize between both faults and jobs returning context violation
574
* status and ensure both are handled in the same way
575
*/
576
job->file_priv->has_mmu_faults = true;
577
queue_work(system_wq, &vdev->context_abort_work);
578
return 0;
579
}
580
581
job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
582
if (!job)
583
return -ENOENT;
584
585
if (job->file_priv->has_mmu_faults)
586
job_status = DRM_IVPU_JOB_STATUS_ABORTED;
587
588
job->bos[CMD_BUF_IDX]->job_status = job_status;
589
dma_fence_signal(job->done_fence);
590
591
trace_job("done", job);
592
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n",
593
job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status);
594
595
ivpu_job_destroy(job);
596
ivpu_stop_job_timeout_detection(vdev);
597
598
ivpu_rpm_put(vdev);
599
600
if (!xa_empty(&vdev->submitted_jobs_xa))
601
ivpu_start_job_timeout_detection(vdev);
602
603
return 0;
604
}
605
606
void ivpu_jobs_abort_all(struct ivpu_device *vdev)
607
{
608
struct ivpu_job *job;
609
unsigned long id;
610
611
mutex_lock(&vdev->submitted_jobs_lock);
612
613
xa_for_each(&vdev->submitted_jobs_xa, id, job)
614
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
615
616
mutex_unlock(&vdev->submitted_jobs_lock);
617
}
618
619
void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
620
{
621
struct ivpu_job *job;
622
unsigned long id;
623
624
mutex_lock(&vdev->submitted_jobs_lock);
625
626
xa_for_each(&vdev->submitted_jobs_xa, id, job)
627
if (job->file_priv->ctx.id == ctx_id && job->cmdq_id == cmdq_id)
628
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
629
630
mutex_unlock(&vdev->submitted_jobs_lock);
631
}
632
633
static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
634
{
635
struct ivpu_file_priv *file_priv = job->file_priv;
636
struct ivpu_device *vdev = job->vdev;
637
struct ivpu_cmdq *cmdq;
638
bool is_first_job;
639
int ret;
640
641
ret = ivpu_rpm_get(vdev);
642
if (ret < 0)
643
return ret;
644
645
mutex_lock(&vdev->submitted_jobs_lock);
646
mutex_lock(&file_priv->lock);
647
648
if (cmdq_id == 0)
649
cmdq = ivpu_cmdq_acquire_legacy(file_priv, priority);
650
else
651
cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id);
652
if (!cmdq) {
653
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id);
654
ret = -EINVAL;
655
goto err_unlock;
656
}
657
658
ret = ivpu_cmdq_register(file_priv, cmdq);
659
if (ret) {
660
ivpu_err(vdev, "Failed to register command queue: %d\n", ret);
661
goto err_unlock;
662
}
663
664
job->cmdq_id = cmdq->id;
665
666
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
667
ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
668
&file_priv->job_id_next, GFP_KERNEL);
669
if (ret < 0) {
670
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
671
file_priv->ctx.id);
672
ret = -EBUSY;
673
goto err_unlock;
674
}
675
676
ret = ivpu_cmdq_push_job(cmdq, job);
677
if (ret)
678
goto err_erase_xa;
679
680
ivpu_start_job_timeout_detection(vdev);
681
682
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
683
cmdq->jobq->header.head = cmdq->jobq->header.tail;
684
wmb(); /* Flush WC buffer for jobq header */
685
} else {
686
ivpu_cmdq_ring_db(vdev, cmdq);
687
if (is_first_job)
688
vdev->busy_start_ts = ktime_get();
689
}
690
691
trace_job("submit", job);
692
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d cmdq_id %u engine %d prio %d addr 0x%llx next %d\n",
693
job->job_id, file_priv->ctx.id, cmdq->id, job->engine_idx, cmdq->priority,
694
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
695
696
mutex_unlock(&file_priv->lock);
697
698
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
699
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
700
}
701
702
mutex_unlock(&vdev->submitted_jobs_lock);
703
704
return 0;
705
706
err_erase_xa:
707
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
708
err_unlock:
709
mutex_unlock(&file_priv->lock);
710
mutex_unlock(&vdev->submitted_jobs_lock);
711
ivpu_rpm_put(vdev);
712
return ret;
713
}
714
715
static int
716
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
717
u32 buf_count, u32 commands_offset)
718
{
719
struct ivpu_file_priv *file_priv = job->file_priv;
720
struct ivpu_device *vdev = file_priv->vdev;
721
struct ww_acquire_ctx acquire_ctx;
722
enum dma_resv_usage usage;
723
struct ivpu_bo *bo;
724
int ret;
725
u32 i;
726
727
for (i = 0; i < buf_count; i++) {
728
struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
729
730
if (!obj)
731
return -ENOENT;
732
733
job->bos[i] = to_ivpu_bo(obj);
734
735
ret = ivpu_bo_pin(job->bos[i]);
736
if (ret)
737
return ret;
738
}
739
740
bo = job->bos[CMD_BUF_IDX];
741
if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
742
ivpu_warn(vdev, "Buffer is already in use\n");
743
return -EBUSY;
744
}
745
746
if (commands_offset >= ivpu_bo_size(bo)) {
747
ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
748
return -EINVAL;
749
}
750
751
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
752
753
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
754
&acquire_ctx);
755
if (ret) {
756
ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
757
return ret;
758
}
759
760
for (i = 0; i < buf_count; i++) {
761
ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
762
if (ret) {
763
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
764
goto unlock_reservations;
765
}
766
}
767
768
for (i = 0; i < buf_count; i++) {
769
usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
770
dma_resv_add_fence(job->bos[i]->base.base.resv, job->done_fence, usage);
771
}
772
773
unlock_reservations:
774
drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
775
776
wmb(); /* Flush write combining buffers */
777
778
return ret;
779
}
780
781
static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
782
u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
783
u8 priority)
784
{
785
struct ivpu_device *vdev = file_priv->vdev;
786
struct ivpu_job *job;
787
u32 *buf_handles;
788
int idx, ret;
789
790
buf_handles = kcalloc(buffer_count, sizeof(u32), GFP_KERNEL);
791
if (!buf_handles)
792
return -ENOMEM;
793
794
ret = copy_from_user(buf_handles, buffers_ptr, buffer_count * sizeof(u32));
795
if (ret) {
796
ret = -EFAULT;
797
goto err_free_handles;
798
}
799
800
if (!drm_dev_enter(&vdev->drm, &idx)) {
801
ret = -ENODEV;
802
goto err_free_handles;
803
}
804
805
ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u cmdq_id %u buf_count %u\n",
806
file_priv->ctx.id, cmdq_id, buffer_count);
807
808
job = ivpu_job_create(file_priv, engine, buffer_count);
809
if (!job) {
810
ivpu_err(vdev, "Failed to create job\n");
811
ret = -ENOMEM;
812
goto err_exit_dev;
813
}
814
815
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset);
816
if (ret) {
817
ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
818
goto err_destroy_job;
819
}
820
821
down_read(&vdev->pm->reset_lock);
822
ret = ivpu_job_submit(job, priority, cmdq_id);
823
up_read(&vdev->pm->reset_lock);
824
if (ret)
825
goto err_signal_fence;
826
827
drm_dev_exit(idx);
828
kfree(buf_handles);
829
return ret;
830
831
err_signal_fence:
832
dma_fence_signal(job->done_fence);
833
err_destroy_job:
834
ivpu_job_destroy(job);
835
err_exit_dev:
836
drm_dev_exit(idx);
837
err_free_handles:
838
kfree(buf_handles);
839
return ret;
840
}
841
842
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
843
{
844
struct ivpu_file_priv *file_priv = file->driver_priv;
845
struct drm_ivpu_submit *args = data;
846
u8 priority;
847
848
if (args->engine != DRM_IVPU_ENGINE_COMPUTE)
849
return -EINVAL;
850
851
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
852
return -EINVAL;
853
854
if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
855
return -EINVAL;
856
857
if (!IS_ALIGNED(args->commands_offset, 8))
858
return -EINVAL;
859
860
if (!file_priv->ctx.id)
861
return -EINVAL;
862
863
if (file_priv->has_mmu_faults)
864
return -EBADFD;
865
866
priority = ivpu_job_to_jsm_priority(args->priority);
867
868
return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
869
(void __user *)args->buffers_ptr, args->commands_offset, priority);
870
}
871
872
int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
873
{
874
struct ivpu_file_priv *file_priv = file->driver_priv;
875
struct drm_ivpu_cmdq_submit *args = data;
876
877
if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
878
return -ENODEV;
879
880
if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
881
return -EINVAL;
882
883
if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
884
return -EINVAL;
885
886
if (!IS_ALIGNED(args->commands_offset, 8))
887
return -EINVAL;
888
889
if (!file_priv->ctx.id)
890
return -EINVAL;
891
892
if (file_priv->has_mmu_faults)
893
return -EBADFD;
894
895
return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
896
(void __user *)args->buffers_ptr, args->commands_offset, 0);
897
}
898
899
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
900
{
901
struct ivpu_file_priv *file_priv = file->driver_priv;
902
struct ivpu_device *vdev = file_priv->vdev;
903
struct drm_ivpu_cmdq_create *args = data;
904
struct ivpu_cmdq *cmdq;
905
int ret;
906
907
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
908
return -ENODEV;
909
910
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
911
return -EINVAL;
912
913
ret = ivpu_rpm_get(vdev);
914
if (ret < 0)
915
return ret;
916
917
mutex_lock(&file_priv->lock);
918
919
cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), args->flags);
920
if (cmdq)
921
args->cmdq_id = cmdq->id;
922
923
mutex_unlock(&file_priv->lock);
924
925
ivpu_rpm_put(vdev);
926
927
return cmdq ? 0 : -ENOMEM;
928
}
929
930
int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
931
{
932
struct ivpu_file_priv *file_priv = file->driver_priv;
933
struct ivpu_device *vdev = file_priv->vdev;
934
struct drm_ivpu_cmdq_destroy *args = data;
935
struct ivpu_cmdq *cmdq;
936
u32 cmdq_id = 0;
937
int ret;
938
939
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
940
return -ENODEV;
941
942
ret = ivpu_rpm_get(vdev);
943
if (ret < 0)
944
return ret;
945
946
mutex_lock(&file_priv->lock);
947
948
cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
949
if (!cmdq || cmdq->is_legacy) {
950
ret = -ENOENT;
951
} else {
952
cmdq_id = cmdq->id;
953
ivpu_cmdq_destroy(file_priv, cmdq);
954
ret = 0;
955
}
956
957
mutex_unlock(&file_priv->lock);
958
959
/* Abort any pending jobs only if cmdq was destroyed */
960
if (!ret)
961
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
962
963
ivpu_rpm_put(vdev);
964
965
return ret;
966
}
967
968
static void
969
ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
970
struct vpu_jsm_msg *jsm_msg)
971
{
972
struct vpu_ipc_msg_payload_job_done *payload;
973
974
if (!jsm_msg) {
975
ivpu_err(vdev, "IPC message has no JSM payload\n");
976
return;
977
}
978
979
if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
980
ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result);
981
return;
982
}
983
984
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
985
986
mutex_lock(&vdev->submitted_jobs_lock);
987
ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
988
mutex_unlock(&vdev->submitted_jobs_lock);
989
}
990
991
void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
992
{
993
ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer,
994
VPU_IPC_CHAN_JOB_RET, ivpu_job_done_callback);
995
}
996
997
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
998
{
999
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
1000
}
1001
1002
void ivpu_context_abort_work_fn(struct work_struct *work)
1003
{
1004
struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
1005
struct ivpu_file_priv *file_priv;
1006
struct ivpu_job *job;
1007
unsigned long ctx_id;
1008
unsigned long id;
1009
1010
if (drm_WARN_ON(&vdev->drm, pm_runtime_get_if_active(vdev->drm.dev) <= 0))
1011
return;
1012
1013
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
1014
if (ivpu_jsm_reset_engine(vdev, 0))
1015
return;
1016
1017
mutex_lock(&vdev->context_list_lock);
1018
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
1019
if (!file_priv->has_mmu_faults || file_priv->aborted)
1020
continue;
1021
1022
mutex_lock(&file_priv->lock);
1023
ivpu_context_abort_locked(file_priv);
1024
mutex_unlock(&file_priv->lock);
1025
}
1026
mutex_unlock(&vdev->context_list_lock);
1027
1028
/*
1029
* We will not receive new MMU event interrupts until existing events are discarded
1030
* however, we want to discard these events only after aborting the faulty context
1031
* to avoid generating new faults from that context
1032
*/
1033
ivpu_mmu_discard_events(vdev);
1034
1035
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
1036
goto runtime_put;
1037
1038
if (ivpu_jsm_hws_resume_engine(vdev, 0))
1039
return;
1040
/*
1041
* In hardware scheduling mode NPU already has stopped processing jobs
1042
* and won't send us any further notifications, thus we have to free job related resources
1043
* and notify userspace
1044
*/
1045
mutex_lock(&vdev->submitted_jobs_lock);
1046
xa_for_each(&vdev->submitted_jobs_xa, id, job)
1047
if (job->file_priv->aborted)
1048
ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
1049
mutex_unlock(&vdev->submitted_jobs_lock);
1050
1051
runtime_put:
1052
pm_runtime_mark_last_busy(vdev->drm.dev);
1053
pm_runtime_put_autosuspend(vdev->drm.dev);
1054
}
1055
1056