Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/host1x/context.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (c) 2021, NVIDIA Corporation.
4
*/
5
6
#include <linux/device.h>
7
#include <linux/kref.h>
8
#include <linux/of.h>
9
#include <linux/of_device.h>
10
#include <linux/pid.h>
11
#include <linux/slab.h>
12
13
#include "context.h"
14
#include "dev.h"
15
16
static void host1x_memory_context_release(struct device *dev)
17
{
18
/* context device is freed in host1x_memory_context_list_free() */
19
}
20
21
int host1x_memory_context_list_init(struct host1x *host1x)
22
{
23
struct host1x_memory_context_list *cdl = &host1x->context_list;
24
struct device_node *node = host1x->dev->of_node;
25
struct host1x_memory_context *ctx;
26
unsigned int i;
27
int err;
28
29
cdl->devs = NULL;
30
cdl->len = 0;
31
mutex_init(&cdl->lock);
32
33
err = of_property_count_u32_elems(node, "iommu-map");
34
if (err < 0)
35
return 0;
36
37
cdl->len = err / 4;
38
cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
39
if (!cdl->devs)
40
return -ENOMEM;
41
42
for (i = 0; i < cdl->len; i++) {
43
ctx = &cdl->devs[i];
44
45
ctx->host = host1x;
46
47
device_initialize(&ctx->dev);
48
49
/*
50
* Due to an issue with T194 NVENC, only 38 bits can be used.
51
* Anyway, 256GiB of IOVA ought to be enough for anyone.
52
*/
53
ctx->dma_mask = DMA_BIT_MASK(38);
54
ctx->dev.dma_mask = &ctx->dma_mask;
55
ctx->dev.coherent_dma_mask = ctx->dma_mask;
56
dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
57
ctx->dev.bus = &host1x_context_device_bus_type;
58
ctx->dev.parent = host1x->dev;
59
ctx->dev.release = host1x_memory_context_release;
60
61
ctx->dev.dma_parms = &ctx->dma_parms;
62
dma_set_max_seg_size(&ctx->dev, UINT_MAX);
63
64
err = device_add(&ctx->dev);
65
if (err) {
66
dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
67
put_device(&ctx->dev);
68
goto unreg_devices;
69
}
70
71
err = of_dma_configure_id(&ctx->dev, node, true, &i);
72
if (err) {
73
dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
74
i, err);
75
device_unregister(&ctx->dev);
76
goto unreg_devices;
77
}
78
79
if (!tegra_dev_iommu_get_stream_id(&ctx->dev, &ctx->stream_id) ||
80
!device_iommu_mapped(&ctx->dev)) {
81
dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
82
device_unregister(&ctx->dev);
83
84
/*
85
* This means that if IOMMU is disabled but context devices
86
* are defined in the device tree, Host1x will fail to probe.
87
* That's probably OK in this time and age.
88
*/
89
err = -EINVAL;
90
91
goto unreg_devices;
92
}
93
}
94
95
return 0;
96
97
unreg_devices:
98
while (i--)
99
device_unregister(&cdl->devs[i].dev);
100
101
kfree(cdl->devs);
102
cdl->devs = NULL;
103
cdl->len = 0;
104
105
return err;
106
}
107
108
void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
109
{
110
unsigned int i;
111
112
for (i = 0; i < cdl->len; i++)
113
device_unregister(&cdl->devs[i].dev);
114
115
kfree(cdl->devs);
116
cdl->len = 0;
117
}
118
119
struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
120
struct device *dev,
121
struct pid *pid)
122
{
123
struct host1x_memory_context_list *cdl = &host1x->context_list;
124
struct host1x_memory_context *free = NULL;
125
int i;
126
127
if (!cdl->len)
128
return ERR_PTR(-EOPNOTSUPP);
129
130
mutex_lock(&cdl->lock);
131
132
for (i = 0; i < cdl->len; i++) {
133
struct host1x_memory_context *cd = &cdl->devs[i];
134
135
if (cd->dev.iommu->iommu_dev != dev->iommu->iommu_dev)
136
continue;
137
138
if (cd->owner == pid) {
139
refcount_inc(&cd->ref);
140
mutex_unlock(&cdl->lock);
141
return cd;
142
} else if (!cd->owner && !free) {
143
free = cd;
144
}
145
}
146
147
if (!free) {
148
mutex_unlock(&cdl->lock);
149
return ERR_PTR(-EBUSY);
150
}
151
152
refcount_set(&free->ref, 1);
153
free->owner = get_pid(pid);
154
155
mutex_unlock(&cdl->lock);
156
157
return free;
158
}
159
EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
160
161
void host1x_memory_context_get(struct host1x_memory_context *cd)
162
{
163
refcount_inc(&cd->ref);
164
}
165
EXPORT_SYMBOL_GPL(host1x_memory_context_get);
166
167
void host1x_memory_context_put(struct host1x_memory_context *cd)
168
{
169
struct host1x_memory_context_list *cdl = &cd->host->context_list;
170
171
if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) {
172
put_pid(cd->owner);
173
cd->owner = NULL;
174
mutex_unlock(&cdl->lock);
175
}
176
}
177
EXPORT_SYMBOL_GPL(host1x_memory_context_put);
178
179