Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm64/acpica/acpi_iort.c
39478 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (C) 2018 Marvell International Ltd.
5
*
6
* Author: Jayachandran C Nair <[email protected]>
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
*
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
* SUCH DAMAGE.
28
*/
29
30
#include "opt_acpi.h"
31
32
#include <sys/param.h>
33
#include <sys/bus.h>
34
#include <sys/kernel.h>
35
#include <sys/malloc.h>
36
37
#include <machine/intr.h>
38
39
#include <contrib/dev/acpica/include/acpi.h>
40
#include <contrib/dev/acpica/include/accommon.h>
41
#include <contrib/dev/acpica/include/actables.h>
42
43
#include <dev/acpica/acpivar.h>
44
45
/*
46
* Track next XREF available for ITS groups.
47
*/
48
static u_int acpi_its_xref = ACPI_MSI_XREF;
49
50
/*
51
* Some types of IORT nodes have a set of mappings. Each of them map
52
* a range of device IDs [base..end] from the current node to another
53
* node. The corresponding device IDs on destination node starts at
54
* outbase.
55
*/
56
struct iort_map_entry {
57
u_int base;
58
u_int end;
59
u_int outbase;
60
u_int flags;
61
u_int out_node_offset;
62
struct iort_node *out_node;
63
};
64
65
/*
66
* The ITS group node does not have any outgoing mappings. It has a
67
* of a list of GIC ITS blocks which can handle the device ID. We
68
* will store the PIC XREF used by the block and the blocks proximity
69
* data here, so that it can be retrieved together.
70
*/
71
struct iort_its_entry {
72
u_int its_id;
73
u_int xref;
74
int pxm;
75
};
76
77
struct iort_named_component
78
{
79
UINT32 NodeFlags;
80
UINT64 MemoryProperties;
81
UINT8 MemoryAddressLimit;
82
char DeviceName[32]; /* Path of namespace object */
83
};
84
85
/*
86
* IORT node. Each node has some device specific data depending on the
87
* type of the node. The node can also have a set of mappings, OR in
88
* case of ITS group nodes a set of ITS entries.
89
* The nodes are kept in a TAILQ by type.
90
*/
91
struct iort_node {
92
TAILQ_ENTRY(iort_node) next; /* next entry with same type */
93
enum AcpiIortNodeType type; /* ACPI type */
94
u_int node_offset; /* offset in IORT - node ID */
95
u_int nentries; /* items in array below */
96
u_int usecount; /* for bookkeeping */
97
u_int revision; /* node revision */
98
union {
99
struct iort_map_entry *mappings; /* node mappings */
100
struct iort_its_entry *its; /* ITS IDs array */
101
} entries;
102
union {
103
ACPI_IORT_ROOT_COMPLEX pci_rc; /* PCI root complex */
104
ACPI_IORT_SMMU smmu;
105
ACPI_IORT_SMMU_V3 smmu_v3;
106
struct iort_named_component named_comp;
107
} data;
108
};
109
110
/* Lists for each of the types. */
111
static TAILQ_HEAD(, iort_node) pci_nodes = TAILQ_HEAD_INITIALIZER(pci_nodes);
112
static TAILQ_HEAD(, iort_node) smmu_nodes = TAILQ_HEAD_INITIALIZER(smmu_nodes);
113
static TAILQ_HEAD(, iort_node) its_groups = TAILQ_HEAD_INITIALIZER(its_groups);
114
static TAILQ_HEAD(, iort_node) named_nodes = TAILQ_HEAD_INITIALIZER(named_nodes);
115
116
static int
117
iort_entry_get_id_mapping_index(struct iort_node *node)
118
{
119
120
switch(node->type) {
121
case ACPI_IORT_NODE_SMMU_V3:
122
/* The ID mapping field was added in version 1 */
123
if (node->revision < 1)
124
return (-1);
125
126
/*
127
* If all the control interrupts are GISCV based the ID
128
* mapping field is ignored.
129
*/
130
if (node->data.smmu_v3.EventGsiv != 0 &&
131
node->data.smmu_v3.PriGsiv != 0 &&
132
node->data.smmu_v3.GerrGsiv != 0 &&
133
node->data.smmu_v3.SyncGsiv != 0)
134
return (-1);
135
136
if (node->data.smmu_v3.IdMappingIndex >= node->nentries)
137
return (-1);
138
139
return (node->data.smmu_v3.IdMappingIndex);
140
case ACPI_IORT_NODE_PMCG:
141
return (0);
142
default:
143
break;
144
}
145
146
return (-1);
147
}
148
149
/*
150
* Lookup an ID in the mappings array. If successful, map the input ID
151
* to the output ID and return the output node found.
152
*/
153
static struct iort_node *
154
iort_entry_lookup(struct iort_node *node, u_int id, u_int *outid)
155
{
156
struct iort_map_entry *entry;
157
int i, id_map;
158
159
id_map = iort_entry_get_id_mapping_index(node);
160
entry = node->entries.mappings;
161
for (i = 0; i < node->nentries; i++, entry++) {
162
if (i == id_map)
163
continue;
164
if (entry->base <= id && id <= entry->end)
165
break;
166
}
167
if (i == node->nentries)
168
return (NULL);
169
if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0)
170
*outid = entry->outbase + (id - entry->base);
171
else
172
*outid = entry->outbase;
173
return (entry->out_node);
174
}
175
176
/*
177
* Perform an additional lookup in case of SMMU node and ITS outtype.
178
*/
179
static struct iort_node *
180
iort_smmu_trymap(struct iort_node *node, u_int outtype, u_int *outid)
181
{
182
/* Original node can be not found. */
183
if (!node)
184
return (NULL);
185
186
/* Node can be SMMU or ITS. If SMMU, we need another lookup. */
187
if (outtype == ACPI_IORT_NODE_ITS_GROUP &&
188
(node->type == ACPI_IORT_NODE_SMMU_V3 ||
189
node->type == ACPI_IORT_NODE_SMMU)) {
190
node = iort_entry_lookup(node, *outid, outid);
191
if (node == NULL)
192
return (NULL);
193
}
194
195
KASSERT(node->type == outtype, ("mapping fail"));
196
return (node);
197
}
198
199
/*
200
* Map a PCI RID to a SMMU node or an ITS node, based on outtype.
201
*/
202
static struct iort_node *
203
iort_pci_rc_map(u_int seg, u_int rid, u_int outtype, u_int *outid)
204
{
205
struct iort_node *node, *out_node;
206
u_int nxtid;
207
208
out_node = NULL;
209
TAILQ_FOREACH(node, &pci_nodes, next) {
210
if (node->data.pci_rc.PciSegmentNumber != seg)
211
continue;
212
out_node = iort_entry_lookup(node, rid, &nxtid);
213
if (out_node != NULL)
214
break;
215
}
216
217
out_node = iort_smmu_trymap(out_node, outtype, &nxtid);
218
if (out_node)
219
*outid = nxtid;
220
221
return (out_node);
222
}
223
224
/*
225
* Map a named component node to a SMMU node or an ITS node, based on outtype.
226
*/
227
static struct iort_node *
228
iort_named_comp_map(const char *devname, u_int rid, u_int outtype, u_int *outid)
229
{
230
struct iort_node *node, *out_node;
231
u_int nxtid;
232
233
out_node = NULL;
234
TAILQ_FOREACH(node, &named_nodes, next) {
235
if (strstr(node->data.named_comp.DeviceName, devname) == NULL)
236
continue;
237
out_node = iort_entry_lookup(node, rid, &nxtid);
238
if (out_node != NULL)
239
break;
240
}
241
242
out_node = iort_smmu_trymap(out_node, outtype, &nxtid);
243
if (out_node)
244
*outid = nxtid;
245
246
return (out_node);
247
}
248
249
#ifdef notyet
250
/*
251
* Not implemented, map a PCIe device to the SMMU it is associated with.
252
*/
253
int
254
acpi_iort_map_smmu(u_int seg, u_int devid, void **smmu, u_int *sid)
255
{
256
/* XXX: convert oref to SMMU device */
257
return (ENXIO);
258
}
259
#endif
260
261
/*
262
* Allocate memory for a node, initialize and copy mappings. 'start'
263
* argument provides the table start used to calculate the node offset.
264
*/
265
static void
266
iort_copy_data(struct iort_node *node, ACPI_IORT_NODE *node_entry)
267
{
268
ACPI_IORT_ID_MAPPING *map_entry;
269
struct iort_map_entry *mapping;
270
int i;
271
272
map_entry = ACPI_ADD_PTR(ACPI_IORT_ID_MAPPING, node_entry,
273
node_entry->MappingOffset);
274
node->nentries = node_entry->MappingCount;
275
node->usecount = 0;
276
mapping = malloc(sizeof(*mapping) * node->nentries, M_DEVBUF,
277
M_WAITOK | M_ZERO);
278
node->entries.mappings = mapping;
279
for (i = 0; i < node->nentries; i++, mapping++, map_entry++) {
280
mapping->base = map_entry->InputBase;
281
/*
282
* IdCount means "The number of IDs in the range minus one" (ARM DEN 0049D).
283
* We use <= for comparison against this field, so don't add one here.
284
*/
285
mapping->end = map_entry->InputBase + map_entry->IdCount;
286
mapping->outbase = map_entry->OutputBase;
287
mapping->out_node_offset = map_entry->OutputReference;
288
mapping->flags = map_entry->Flags;
289
mapping->out_node = NULL;
290
}
291
}
292
293
/*
294
* Allocate and copy an ITS group.
295
*/
296
static void
297
iort_copy_its(struct iort_node *node, ACPI_IORT_NODE *node_entry)
298
{
299
struct iort_its_entry *its;
300
ACPI_IORT_ITS_GROUP *itsg_entry;
301
UINT32 *id;
302
int i;
303
304
itsg_entry = (ACPI_IORT_ITS_GROUP *)node_entry->NodeData;
305
node->nentries = itsg_entry->ItsCount;
306
node->usecount = 0;
307
its = malloc(sizeof(*its) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO);
308
node->entries.its = its;
309
id = &itsg_entry->Identifiers[0];
310
for (i = 0; i < node->nentries; i++, its++, id++) {
311
its->its_id = *id;
312
its->pxm = -1;
313
its->xref = 0;
314
}
315
}
316
317
/*
318
* Walk the IORT table and add nodes to corresponding list.
319
*/
320
static void
321
iort_add_nodes(ACPI_IORT_NODE *node_entry, u_int node_offset)
322
{
323
ACPI_IORT_ROOT_COMPLEX *pci_rc;
324
ACPI_IORT_SMMU *smmu;
325
ACPI_IORT_SMMU_V3 *smmu_v3;
326
ACPI_IORT_NAMED_COMPONENT *named_comp;
327
struct iort_node *node;
328
329
node = malloc(sizeof(*node), M_DEVBUF, M_WAITOK | M_ZERO);
330
node->type = node_entry->Type;
331
node->node_offset = node_offset;
332
node->revision = node_entry->Revision;
333
334
/* copy nodes depending on type */
335
switch(node_entry->Type) {
336
case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
337
pci_rc = (ACPI_IORT_ROOT_COMPLEX *)node_entry->NodeData;
338
memcpy(&node->data.pci_rc, pci_rc, sizeof(*pci_rc));
339
iort_copy_data(node, node_entry);
340
TAILQ_INSERT_TAIL(&pci_nodes, node, next);
341
break;
342
case ACPI_IORT_NODE_SMMU:
343
smmu = (ACPI_IORT_SMMU *)node_entry->NodeData;
344
memcpy(&node->data.smmu, smmu, sizeof(*smmu));
345
iort_copy_data(node, node_entry);
346
TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
347
break;
348
case ACPI_IORT_NODE_SMMU_V3:
349
smmu_v3 = (ACPI_IORT_SMMU_V3 *)node_entry->NodeData;
350
memcpy(&node->data.smmu_v3, smmu_v3, sizeof(*smmu_v3));
351
iort_copy_data(node, node_entry);
352
TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
353
break;
354
case ACPI_IORT_NODE_ITS_GROUP:
355
iort_copy_its(node, node_entry);
356
TAILQ_INSERT_TAIL(&its_groups, node, next);
357
break;
358
case ACPI_IORT_NODE_NAMED_COMPONENT:
359
named_comp = (ACPI_IORT_NAMED_COMPONENT *)node_entry->NodeData;
360
memcpy(&node->data.named_comp, named_comp, sizeof(*named_comp));
361
362
/* Copy name of the node separately. */
363
strncpy(node->data.named_comp.DeviceName,
364
named_comp->DeviceName,
365
sizeof(node->data.named_comp.DeviceName));
366
node->data.named_comp.DeviceName[31] = 0;
367
368
iort_copy_data(node, node_entry);
369
TAILQ_INSERT_TAIL(&named_nodes, node, next);
370
break;
371
default:
372
printf("ACPI: IORT: Dropping unhandled type %u\n",
373
node_entry->Type);
374
free(node, M_DEVBUF);
375
break;
376
}
377
}
378
379
/*
380
* For the mapping entry given, walk thru all the possible destination
381
* nodes and resolve the output reference.
382
*/
383
static void
384
iort_resolve_node(struct iort_map_entry *entry, int check_smmu)
385
{
386
struct iort_node *node, *np;
387
388
node = NULL;
389
if (check_smmu) {
390
TAILQ_FOREACH(np, &smmu_nodes, next) {
391
if (entry->out_node_offset == np->node_offset) {
392
node = np;
393
break;
394
}
395
}
396
}
397
if (node == NULL) {
398
TAILQ_FOREACH(np, &its_groups, next) {
399
if (entry->out_node_offset == np->node_offset) {
400
node = np;
401
break;
402
}
403
}
404
}
405
if (node != NULL) {
406
node->usecount++;
407
entry->out_node = node;
408
} else {
409
printf("ACPI: IORT: Firmware Bug: no mapping for node %u\n",
410
entry->out_node_offset);
411
}
412
}
413
414
/*
415
* Resolve all output node references to node pointers.
416
*/
417
static void
418
iort_post_process_mappings(void)
419
{
420
struct iort_node *node;
421
int i;
422
423
TAILQ_FOREACH(node, &pci_nodes, next)
424
for (i = 0; i < node->nentries; i++)
425
iort_resolve_node(&node->entries.mappings[i], TRUE);
426
TAILQ_FOREACH(node, &smmu_nodes, next)
427
for (i = 0; i < node->nentries; i++)
428
iort_resolve_node(&node->entries.mappings[i], FALSE);
429
TAILQ_FOREACH(node, &named_nodes, next)
430
for (i = 0; i < node->nentries; i++)
431
iort_resolve_node(&node->entries.mappings[i], TRUE);
432
}
433
434
/*
435
* Walk MADT table, assign PIC xrefs to all ITS entries.
436
*/
437
static void
438
madt_resolve_its_xref(ACPI_SUBTABLE_HEADER *entry, void *arg)
439
{
440
ACPI_MADT_GENERIC_TRANSLATOR *gict;
441
struct iort_node *its_node;
442
struct iort_its_entry *its_entry;
443
u_int xref;
444
int i, matches;
445
446
if (entry->Type != ACPI_MADT_TYPE_GENERIC_TRANSLATOR)
447
return;
448
449
gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry;
450
matches = 0;
451
xref = acpi_its_xref++;
452
TAILQ_FOREACH(its_node, &its_groups, next) {
453
its_entry = its_node->entries.its;
454
for (i = 0; i < its_node->nentries; i++, its_entry++) {
455
if (its_entry->its_id == gict->TranslationId) {
456
its_entry->xref = xref;
457
matches++;
458
}
459
}
460
}
461
if (matches == 0)
462
printf("ACPI: IORT: Unused ITS block, ID %u\n",
463
gict->TranslationId);
464
}
465
466
/*
467
* Walk SRAT, assign proximity to all ITS entries.
468
*/
469
static void
470
srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg)
471
{
472
ACPI_SRAT_GIC_ITS_AFFINITY *gicits;
473
struct iort_node *its_node;
474
struct iort_its_entry *its_entry;
475
int *map_counts;
476
int i, matches, dom;
477
478
if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY)
479
return;
480
481
matches = 0;
482
map_counts = arg;
483
gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry;
484
dom = acpi_map_pxm_to_vm_domainid(gicits->ProximityDomain);
485
486
/*
487
* Catch firmware and config errors. map_counts keeps a
488
* count of ProximityDomain values mapping to a domain ID
489
*/
490
#if MAXMEMDOM > 1
491
if (dom == -1)
492
printf("Firmware Error: Proximity Domain %d could not be"
493
" mapped for GIC ITS ID %d!\n",
494
gicits->ProximityDomain, gicits->ItsId);
495
#endif
496
/* use dom + 1 as index to handle the case where dom == -1 */
497
i = ++map_counts[dom + 1];
498
if (i > 1) {
499
#ifdef NUMA
500
if (dom != -1)
501
printf("ERROR: Multiple Proximity Domains map to the"
502
" same NUMA domain %d!\n", dom);
503
#else
504
printf("WARNING: multiple Proximity Domains in SRAT but NUMA"
505
" NOT enabled!\n");
506
#endif
507
}
508
TAILQ_FOREACH(its_node, &its_groups, next) {
509
its_entry = its_node->entries.its;
510
for (i = 0; i < its_node->nentries; i++, its_entry++) {
511
if (its_entry->its_id == gicits->ItsId) {
512
its_entry->pxm = dom;
513
matches++;
514
}
515
}
516
}
517
if (matches == 0)
518
printf("ACPI: IORT: ITS block %u in SRAT not found in IORT!\n",
519
gicits->ItsId);
520
}
521
522
/*
523
* Cross check the ITS Id with MADT and (if available) SRAT.
524
*/
525
static int
526
iort_post_process_its(void)
527
{
528
ACPI_TABLE_MADT *madt;
529
ACPI_TABLE_SRAT *srat;
530
vm_paddr_t madt_pa, srat_pa;
531
int map_counts[MAXMEMDOM + 1] = { 0 };
532
533
/* Check ITS block in MADT */
534
madt_pa = acpi_find_table(ACPI_SIG_MADT);
535
KASSERT(madt_pa != 0, ("no MADT!"));
536
madt = acpi_map_table(madt_pa, ACPI_SIG_MADT);
537
KASSERT(madt != NULL, ("can't map MADT!"));
538
acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
539
madt_resolve_its_xref, NULL);
540
acpi_unmap_table(madt);
541
542
/* Get proximtiy if available */
543
srat_pa = acpi_find_table(ACPI_SIG_SRAT);
544
if (srat_pa != 0) {
545
srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT);
546
KASSERT(srat != NULL, ("can't map SRAT!"));
547
acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length,
548
srat_resolve_its_pxm, map_counts);
549
acpi_unmap_table(srat);
550
}
551
return (0);
552
}
553
554
/*
555
* Find, parse, and save IO Remapping Table ("IORT").
556
*/
557
static int
558
acpi_parse_iort(void *dummy __unused)
559
{
560
ACPI_TABLE_IORT *iort;
561
ACPI_IORT_NODE *node_entry;
562
vm_paddr_t iort_pa;
563
u_int node_offset;
564
565
iort_pa = acpi_find_table(ACPI_SIG_IORT);
566
if (iort_pa == 0)
567
return (ENXIO);
568
569
iort = acpi_map_table(iort_pa, ACPI_SIG_IORT);
570
if (iort == NULL) {
571
printf("ACPI: Unable to map the IORT table!\n");
572
return (ENXIO);
573
}
574
for (node_offset = iort->NodeOffset;
575
node_offset < iort->Header.Length;
576
node_offset += node_entry->Length) {
577
node_entry = ACPI_ADD_PTR(ACPI_IORT_NODE, iort, node_offset);
578
iort_add_nodes(node_entry, node_offset);
579
}
580
acpi_unmap_table(iort);
581
iort_post_process_mappings();
582
iort_post_process_its();
583
return (0);
584
}
585
SYSINIT(acpi_parse_iort, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_parse_iort, NULL);
586
587
/*
588
* Provide ITS ID to PIC xref mapping.
589
*/
590
int
591
acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm)
592
{
593
struct iort_node *its_node;
594
struct iort_its_entry *its_entry;
595
int i;
596
597
TAILQ_FOREACH(its_node, &its_groups, next) {
598
its_entry = its_node->entries.its;
599
for (i = 0; i < its_node->nentries; i++, its_entry++) {
600
if (its_entry->its_id == its_id) {
601
*xref = its_entry->xref;
602
*pxm = its_entry->pxm;
603
return (0);
604
}
605
}
606
}
607
return (ENOENT);
608
}
609
610
/*
611
* Find mapping for a PCIe device given segment and device ID
612
* returns the XREF for MSI interrupt setup and the device ID to
613
* use for the interrupt setup
614
*/
615
int
616
acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid)
617
{
618
struct iort_node *node;
619
620
node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_ITS_GROUP, devid);
621
if (node == NULL)
622
return (ENOENT);
623
624
/* This should be an ITS node */
625
KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group"));
626
627
/* return first node, we don't handle more than that now. */
628
*xref = node->entries.its[0].xref;
629
return (0);
630
}
631
632
int
633
acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, uint64_t *xref, u_int *sid)
634
{
635
ACPI_IORT_SMMU_V3 *smmu;
636
struct iort_node *node;
637
638
node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_SMMU_V3, sid);
639
if (node == NULL)
640
return (ENOENT);
641
642
/* This should be an SMMU node. */
643
KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node"));
644
645
smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3;
646
*xref = smmu->BaseAddress;
647
648
return (0);
649
}
650
651
/*
652
* Finds mapping for a named node given name and resource ID and returns the
653
* XREF for MSI interrupt setup and the device ID to use for the interrupt setup.
654
*/
655
int
656
acpi_iort_map_named_msi(const char *devname, u_int rid, u_int *xref,
657
u_int *devid)
658
{
659
struct iort_node *node;
660
661
node = iort_named_comp_map(devname, rid, ACPI_IORT_NODE_ITS_GROUP,
662
devid);
663
if (node == NULL)
664
return (ENOENT);
665
666
/* This should be an ITS node */
667
KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group"));
668
669
/* Return first node, we don't handle more than that now. */
670
*xref = node->entries.its[0].xref;
671
return (0);
672
}
673
674
int
675
acpi_iort_map_named_smmuv3(const char *devname, u_int rid, uint64_t *xref,
676
u_int *devid)
677
{
678
ACPI_IORT_SMMU_V3 *smmu;
679
struct iort_node *node;
680
681
node = iort_named_comp_map(devname, rid, ACPI_IORT_NODE_SMMU_V3, devid);
682
if (node == NULL)
683
return (ENOENT);
684
685
/* This should be an SMMU node. */
686
KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node"));
687
688
smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3;
689
*xref = smmu->BaseAddress;
690
691
return (0);
692
}
693
694