Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/amdxdna/amdxdna_iommu.c
170891 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2025, Advanced Micro Devices, Inc.
4
*/
5
6
#include <drm/amdxdna_accel.h>
7
#include <linux/iommu.h>
8
#include <linux/iova.h>
9
10
#include "amdxdna_gem.h"
11
#include "amdxdna_pci_drv.h"
12
13
static bool force_iova;
14
module_param(force_iova, bool, 0600);
15
MODULE_PARM_DESC(force_iova, "Force use IOVA (Default false)");
16
17
static struct iova *amdxdna_iommu_alloc_iova(struct amdxdna_dev *xdna,
18
size_t size,
19
dma_addr_t *dma_addr,
20
bool size_aligned)
21
{
22
unsigned long shift, end;
23
struct iova *iova;
24
25
end = xdna->domain->geometry.aperture_end;
26
shift = iova_shift(&xdna->iovad);
27
size = iova_align(&xdna->iovad, size);
28
29
iova = alloc_iova(&xdna->iovad, size >> shift, end >> shift, size_aligned);
30
if (!iova)
31
return ERR_PTR(-ENOMEM);
32
33
*dma_addr = iova_dma_addr(&xdna->iovad, iova);
34
35
return iova;
36
}
37
38
int amdxdna_iommu_map_bo(struct amdxdna_dev *xdna, struct amdxdna_gem_obj *abo)
39
{
40
struct sg_table *sgt;
41
dma_addr_t dma_addr;
42
struct iova *iova;
43
size_t size;
44
45
if (abo->type != AMDXDNA_BO_DEV_HEAP && abo->type != AMDXDNA_BO_SHMEM)
46
return 0;
47
48
sgt = drm_gem_shmem_get_pages_sgt(&abo->base);
49
if (IS_ERR(sgt)) {
50
XDNA_ERR(xdna, "Get sgt failed, ret %ld", PTR_ERR(sgt));
51
return PTR_ERR(sgt);
52
}
53
54
if (!sgt->orig_nents || !sg_page(sgt->sgl)) {
55
XDNA_ERR(xdna, "sgl is zero length or not page backed");
56
return -EOPNOTSUPP;
57
}
58
59
iova = amdxdna_iommu_alloc_iova(xdna, abo->mem.size, &dma_addr,
60
(abo->type == AMDXDNA_BO_DEV_HEAP));
61
if (IS_ERR(iova)) {
62
XDNA_ERR(xdna, "Alloc iova failed, ret %ld", PTR_ERR(iova));
63
return PTR_ERR(iova);
64
}
65
66
size = iommu_map_sgtable(xdna->domain, dma_addr, sgt,
67
IOMMU_READ | IOMMU_WRITE);
68
if (size < abo->mem.size) {
69
__free_iova(&xdna->iovad, iova);
70
return -ENXIO;
71
}
72
73
abo->mem.dma_addr = dma_addr;
74
75
return 0;
76
}
77
78
void amdxdna_iommu_unmap_bo(struct amdxdna_dev *xdna, struct amdxdna_gem_obj *abo)
79
{
80
size_t size;
81
82
if (abo->mem.dma_addr == AMDXDNA_INVALID_ADDR)
83
return;
84
85
size = iova_align(&xdna->iovad, abo->mem.size);
86
iommu_unmap(xdna->domain, abo->mem.dma_addr, size);
87
free_iova(&xdna->iovad, iova_pfn(&xdna->iovad, abo->mem.dma_addr));
88
abo->mem.dma_addr = AMDXDNA_INVALID_ADDR;
89
}
90
91
void *amdxdna_iommu_alloc(struct amdxdna_dev *xdna, size_t size, dma_addr_t *dma_addr)
92
{
93
struct iova *iova;
94
void *cpu_addr;
95
int ret;
96
97
iova = amdxdna_iommu_alloc_iova(xdna, size, dma_addr, true);
98
if (IS_ERR(iova)) {
99
XDNA_ERR(xdna, "Alloc iova failed, ret %ld", PTR_ERR(iova));
100
return iova;
101
}
102
103
cpu_addr = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
104
if (!cpu_addr) {
105
ret = -ENOMEM;
106
goto free_iova;
107
}
108
109
ret = iommu_map(xdna->domain, *dma_addr, virt_to_phys(cpu_addr),
110
iova_align(&xdna->iovad, size),
111
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
112
if (ret)
113
goto free_iova;
114
115
return cpu_addr;
116
117
free_iova:
118
__free_iova(&xdna->iovad, iova);
119
return ERR_PTR(ret);
120
}
121
122
void amdxdna_iommu_free(struct amdxdna_dev *xdna, size_t size,
123
void *cpu_addr, dma_addr_t dma_addr)
124
{
125
iommu_unmap(xdna->domain, dma_addr, iova_align(&xdna->iovad, size));
126
free_iova(&xdna->iovad, iova_pfn(&xdna->iovad, dma_addr));
127
free_pages((unsigned long)cpu_addr, get_order(size));
128
}
129
130
int amdxdna_iommu_init(struct amdxdna_dev *xdna)
131
{
132
unsigned long order;
133
int ret;
134
135
xdna->group = iommu_group_get(xdna->ddev.dev);
136
if (!xdna->group || !force_iova)
137
return 0;
138
139
XDNA_WARN(xdna, "Enabled force_iova mode.");
140
xdna->domain = iommu_paging_domain_alloc_flags(xdna->ddev.dev,
141
IOMMU_HWPT_ALLOC_PASID);
142
if (IS_ERR(xdna->domain)) {
143
XDNA_ERR(xdna, "Failed to alloc iommu domain");
144
ret = PTR_ERR(xdna->domain);
145
goto put_group;
146
}
147
148
ret = iova_cache_get();
149
if (ret)
150
goto free_domain;
151
152
order = __ffs(xdna->domain->pgsize_bitmap);
153
init_iova_domain(&xdna->iovad, 1UL << order, 0);
154
155
ret = iommu_attach_group(xdna->domain, xdna->group);
156
if (ret)
157
goto put_iova;
158
159
return 0;
160
161
put_iova:
162
put_iova_domain(&xdna->iovad);
163
iova_cache_put();
164
free_domain:
165
iommu_domain_free(xdna->domain);
166
put_group:
167
iommu_group_put(xdna->group);
168
xdna->domain = NULL;
169
170
return ret;
171
}
172
173
void amdxdna_iommu_fini(struct amdxdna_dev *xdna)
174
{
175
if (xdna->domain) {
176
iommu_detach_group(xdna->domain, xdna->group);
177
put_iova_domain(&xdna->iovad);
178
iova_cache_put();
179
iommu_domain_free(xdna->domain);
180
}
181
182
if (xdna->group)
183
iommu_group_put(xdna->group);
184
}
185
186