Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/host1x/fence.c
26444 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Syncpoint dma_fence implementation
4
*
5
* Copyright (c) 2020, NVIDIA Corporation.
6
*/
7
8
#include <linux/dma-fence.h>
9
#include <linux/file.h>
10
#include <linux/fs.h>
11
#include <linux/slab.h>
12
#include <linux/sync_file.h>
13
14
#include "fence.h"
15
#include "intr.h"
16
#include "syncpt.h"
17
18
static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
19
{
20
return "host1x";
21
}
22
23
static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
24
{
25
return "syncpoint";
26
}
27
28
static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
29
{
30
return container_of(f, struct host1x_syncpt_fence, base);
31
}
32
33
static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
34
{
35
struct host1x_syncpt_fence *sf = to_host1x_fence(f);
36
37
if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
38
return false;
39
40
/* Reference for interrupt path. */
41
dma_fence_get(f);
42
43
/*
44
* The dma_fence framework requires the fence driver to keep a
45
* reference to any fences for which 'enable_signaling' has been
46
* called (and that have not been signalled).
47
*
48
* We cannot currently always guarantee that all fences get signalled
49
* or cancelled. As such, for such situations, set up a timeout, so
50
* that long-lasting fences will get reaped eventually.
51
*/
52
if (sf->timeout) {
53
/* Reference for timeout path. */
54
dma_fence_get(f);
55
schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
56
}
57
58
host1x_intr_add_fence_locked(sf->sp->host, sf);
59
60
/*
61
* The fence may get signalled at any time after the above call,
62
* so we need to initialize all state used by signalling
63
* before it.
64
*/
65
66
return true;
67
}
68
69
static const struct dma_fence_ops host1x_syncpt_fence_ops = {
70
.get_driver_name = host1x_syncpt_fence_get_driver_name,
71
.get_timeline_name = host1x_syncpt_fence_get_timeline_name,
72
.enable_signaling = host1x_syncpt_fence_enable_signaling,
73
};
74
75
void host1x_fence_signal(struct host1x_syncpt_fence *f)
76
{
77
if (atomic_xchg(&f->signaling, 1)) {
78
/*
79
* Already on timeout path, but we removed the fence before
80
* timeout path could, so drop interrupt path reference.
81
*/
82
dma_fence_put(&f->base);
83
return;
84
}
85
86
if (f->timeout && cancel_delayed_work(&f->timeout_work)) {
87
/*
88
* We know that the timeout path will not be entered.
89
* Safe to drop the timeout path's reference now.
90
*/
91
dma_fence_put(&f->base);
92
}
93
94
dma_fence_signal_locked(&f->base);
95
dma_fence_put(&f->base);
96
}
97
98
static void do_fence_timeout(struct work_struct *work)
99
{
100
struct delayed_work *dwork = (struct delayed_work *)work;
101
struct host1x_syncpt_fence *f =
102
container_of(dwork, struct host1x_syncpt_fence, timeout_work);
103
104
if (atomic_xchg(&f->signaling, 1)) {
105
/* Already on interrupt path, drop timeout path reference if any. */
106
if (f->timeout)
107
dma_fence_put(&f->base);
108
return;
109
}
110
111
if (host1x_intr_remove_fence(f->sp->host, f)) {
112
/*
113
* Managed to remove fence from queue, so it's safe to drop
114
* the interrupt path's reference.
115
*/
116
dma_fence_put(&f->base);
117
}
118
119
dma_fence_set_error(&f->base, -ETIMEDOUT);
120
dma_fence_signal(&f->base);
121
if (f->timeout)
122
dma_fence_put(&f->base);
123
}
124
125
struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
126
bool timeout)
127
{
128
struct host1x_syncpt_fence *fence;
129
130
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
131
if (!fence)
132
return ERR_PTR(-ENOMEM);
133
134
fence->sp = sp;
135
fence->threshold = threshold;
136
fence->timeout = timeout;
137
138
dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock,
139
dma_fence_context_alloc(1), 0);
140
141
INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
142
143
return &fence->base;
144
}
145
EXPORT_SYMBOL(host1x_fence_create);
146
147
void host1x_fence_cancel(struct dma_fence *f)
148
{
149
struct host1x_syncpt_fence *sf = to_host1x_fence(f);
150
151
schedule_delayed_work(&sf->timeout_work, 0);
152
flush_delayed_work(&sf->timeout_work);
153
}
154
EXPORT_SYMBOL(host1x_fence_cancel);
155
156