Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cxl/core/regs.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright(c) 2020 Intel Corporation. */
3
#include <linux/io-64-nonatomic-lo-hi.h>
4
#include <linux/device.h>
5
#include <linux/slab.h>
6
#include <linux/pci.h>
7
#include <cxlmem.h>
8
#include <cxlpci.h>
9
#include <pmu.h>
10
11
#include "core.h"
12
13
/**
14
* DOC: cxl registers
15
*
16
* CXL device capabilities are enumerated by PCI DVSEC (Designated
17
* Vendor-specific) and / or descriptors provided by platform firmware.
18
* They can be defined as a set like the device and component registers
19
* mandated by CXL Section 8.1.12.2 Memory Device PCIe Capabilities and
20
* Extended Capabilities, or they can be individual capabilities
21
* appended to bridged and endpoint devices.
22
*
23
* Provide common infrastructure for enumerating and mapping these
24
* discrete capabilities.
25
*/
26
27
/**
28
* cxl_probe_component_regs() - Detect CXL Component register blocks
29
* @dev: Host device of the @base mapping
30
* @base: Mapping containing the HDM Decoder Capability Header
31
* @map: Map object describing the register block information found
32
*
33
* See CXL 2.0 8.2.4 Component Register Layout and Definition
34
* See CXL 2.0 8.2.5.5 CXL Device Register Interface
35
*
36
* Probe for component register information and return it in map object.
37
*/
38
void cxl_probe_component_regs(struct device *dev, void __iomem *base,
39
struct cxl_component_reg_map *map)
40
{
41
int cap, cap_count;
42
u32 cap_array;
43
44
*map = (struct cxl_component_reg_map) { 0 };
45
46
/*
47
* CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
48
* CXL 2.0 8.2.4 Table 141.
49
*/
50
base += CXL_CM_OFFSET;
51
52
cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET);
53
54
if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
55
dev_dbg(dev,
56
"Couldn't locate the CXL.cache and CXL.mem capability array header.\n");
57
return;
58
}
59
60
/* It's assumed that future versions will be backward compatible */
61
cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
62
63
for (cap = 1; cap <= cap_count; cap++) {
64
void __iomem *register_block;
65
struct cxl_reg_map *rmap;
66
u16 cap_id, offset;
67
u32 length, hdr;
68
69
hdr = readl(base + cap * 0x4);
70
71
cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
72
offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
73
register_block = base + offset;
74
hdr = readl(register_block);
75
76
rmap = NULL;
77
switch (cap_id) {
78
case CXL_CM_CAP_CAP_ID_HDM: {
79
int decoder_cnt;
80
81
dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
82
offset);
83
84
decoder_cnt = cxl_hdm_decoder_count(hdr);
85
length = 0x20 * decoder_cnt + 0x10;
86
rmap = &map->hdm_decoder;
87
break;
88
}
89
case CXL_CM_CAP_CAP_ID_RAS:
90
dev_dbg(dev, "found RAS capability (0x%x)\n",
91
offset);
92
length = CXL_RAS_CAPABILITY_LENGTH;
93
rmap = &map->ras;
94
break;
95
default:
96
dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
97
offset);
98
break;
99
}
100
101
if (!rmap)
102
continue;
103
rmap->valid = true;
104
rmap->id = cap_id;
105
rmap->offset = CXL_CM_OFFSET + offset;
106
rmap->size = length;
107
}
108
}
109
EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, "CXL");
110
111
/**
112
* cxl_probe_device_regs() - Detect CXL Device register blocks
113
* @dev: Host device of the @base mapping
114
* @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
115
* @map: Map object describing the register block information found
116
*
117
* Probe for device register information and return it in map object.
118
*/
119
void cxl_probe_device_regs(struct device *dev, void __iomem *base,
120
struct cxl_device_reg_map *map)
121
{
122
int cap, cap_count;
123
u64 cap_array;
124
125
*map = (struct cxl_device_reg_map){ 0 };
126
127
cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
128
if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
129
CXLDEV_CAP_ARRAY_CAP_ID)
130
return;
131
132
cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
133
134
for (cap = 1; cap <= cap_count; cap++) {
135
struct cxl_reg_map *rmap;
136
u32 offset, length;
137
u16 cap_id;
138
139
cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
140
readl(base + cap * 0x10));
141
offset = readl(base + cap * 0x10 + 0x4);
142
length = readl(base + cap * 0x10 + 0x8);
143
144
rmap = NULL;
145
switch (cap_id) {
146
case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
147
dev_dbg(dev, "found Status capability (0x%x)\n", offset);
148
rmap = &map->status;
149
break;
150
case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
151
dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
152
rmap = &map->mbox;
153
break;
154
case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
155
dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
156
break;
157
case CXLDEV_CAP_CAP_ID_MEMDEV:
158
dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
159
rmap = &map->memdev;
160
break;
161
default:
162
if (cap_id >= 0x8000)
163
dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
164
else
165
dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
166
break;
167
}
168
169
if (!rmap)
170
continue;
171
rmap->valid = true;
172
rmap->id = cap_id;
173
rmap->offset = offset;
174
rmap->size = length;
175
}
176
}
177
EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, "CXL");
178
179
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
180
resource_size_t length)
181
{
182
void __iomem *ret_val;
183
struct resource *res;
184
185
if (WARN_ON_ONCE(addr == CXL_RESOURCE_NONE))
186
return NULL;
187
188
res = devm_request_mem_region(dev, addr, length, dev_name(dev));
189
if (!res) {
190
resource_size_t end = addr + length - 1;
191
192
dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
193
return NULL;
194
}
195
196
ret_val = devm_ioremap(dev, addr, length);
197
if (!ret_val)
198
dev_err(dev, "Failed to map region %pr\n", res);
199
200
return ret_val;
201
}
202
203
int cxl_map_component_regs(const struct cxl_register_map *map,
204
struct cxl_component_regs *regs,
205
unsigned long map_mask)
206
{
207
struct device *host = map->host;
208
struct mapinfo {
209
const struct cxl_reg_map *rmap;
210
void __iomem **addr;
211
} mapinfo[] = {
212
{ &map->component_map.hdm_decoder, &regs->hdm_decoder },
213
{ &map->component_map.ras, &regs->ras },
214
};
215
int i;
216
217
for (i = 0; i < ARRAY_SIZE(mapinfo); i++) {
218
struct mapinfo *mi = &mapinfo[i];
219
resource_size_t addr;
220
resource_size_t length;
221
222
if (!mi->rmap->valid)
223
continue;
224
if (!test_bit(mi->rmap->id, &map_mask))
225
continue;
226
addr = map->resource + mi->rmap->offset;
227
length = mi->rmap->size;
228
*(mi->addr) = devm_cxl_iomap_block(host, addr, length);
229
if (!*(mi->addr))
230
return -ENOMEM;
231
}
232
233
return 0;
234
}
235
EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, "CXL");
236
237
int cxl_map_device_regs(const struct cxl_register_map *map,
238
struct cxl_device_regs *regs)
239
{
240
struct device *host = map->host;
241
resource_size_t phys_addr = map->resource;
242
struct mapinfo {
243
const struct cxl_reg_map *rmap;
244
void __iomem **addr;
245
} mapinfo[] = {
246
{ &map->device_map.status, &regs->status, },
247
{ &map->device_map.mbox, &regs->mbox, },
248
{ &map->device_map.memdev, &regs->memdev, },
249
};
250
int i;
251
252
for (i = 0; i < ARRAY_SIZE(mapinfo); i++) {
253
struct mapinfo *mi = &mapinfo[i];
254
resource_size_t length;
255
resource_size_t addr;
256
257
if (!mi->rmap->valid)
258
continue;
259
260
addr = phys_addr + mi->rmap->offset;
261
length = mi->rmap->size;
262
*(mi->addr) = devm_cxl_iomap_block(host, addr, length);
263
if (!*(mi->addr))
264
return -ENOMEM;
265
}
266
267
return 0;
268
}
269
EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, "CXL");
270
271
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
272
struct cxl_register_map *map)
273
{
274
u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
275
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
276
u64 offset = ((u64)reg_hi << 32) |
277
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
278
279
if (offset > pci_resource_len(pdev, bar)) {
280
dev_warn(&pdev->dev,
281
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
282
&pdev->resource[bar], &offset, reg_type);
283
return false;
284
}
285
286
map->reg_type = reg_type;
287
map->resource = pci_resource_start(pdev, bar) + offset;
288
map->max_size = pci_resource_len(pdev, bar) - offset;
289
return true;
290
}
291
292
/*
293
* __cxl_find_regblock_instance() - Locate a register block or count instances by type / index
294
* Use CXL_INSTANCES_COUNT for @index if counting instances.
295
*
296
* __cxl_find_regblock_instance() may return:
297
* 0 - if register block enumerated.
298
* >= 0 - if counting instances.
299
* < 0 - error code otherwise.
300
*/
301
static int __cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
302
struct cxl_register_map *map, int index)
303
{
304
u32 regloc_size, regblocks;
305
int instance = 0;
306
int regloc, i;
307
308
*map = (struct cxl_register_map) {
309
.host = &pdev->dev,
310
.resource = CXL_RESOURCE_NONE,
311
};
312
313
regloc = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
314
CXL_DVSEC_REG_LOCATOR);
315
if (!regloc)
316
return -ENXIO;
317
318
pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, &regloc_size);
319
regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
320
321
regloc += CXL_DVSEC_REG_LOCATOR_BLOCK1_OFFSET;
322
regblocks = (regloc_size - CXL_DVSEC_REG_LOCATOR_BLOCK1_OFFSET) / 8;
323
324
for (i = 0; i < regblocks; i++, regloc += 8) {
325
u32 reg_lo, reg_hi;
326
327
pci_read_config_dword(pdev, regloc, &reg_lo);
328
pci_read_config_dword(pdev, regloc + 4, &reg_hi);
329
330
if (!cxl_decode_regblock(pdev, reg_lo, reg_hi, map))
331
continue;
332
333
if (map->reg_type == type) {
334
if (index == instance)
335
return 0;
336
instance++;
337
}
338
}
339
340
map->resource = CXL_RESOURCE_NONE;
341
if (index == CXL_INSTANCES_COUNT)
342
return instance;
343
344
return -ENODEV;
345
}
346
347
/**
348
* cxl_find_regblock_instance() - Locate a register block by type / index
349
* @pdev: The CXL PCI device to enumerate.
350
* @type: Register Block Indicator id
351
* @map: Enumeration output, clobbered on error
352
* @index: Index into which particular instance of a regblock wanted in the
353
* order found in register locator DVSEC.
354
*
355
* Return: 0 if register block enumerated, negative error code otherwise
356
*
357
* A CXL DVSEC may point to one or more register blocks, search for them
358
* by @type and @index.
359
*/
360
int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
361
struct cxl_register_map *map, unsigned int index)
362
{
363
return __cxl_find_regblock_instance(pdev, type, map, index);
364
}
365
EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, "CXL");
366
367
/**
368
* cxl_find_regblock() - Locate register blocks by type
369
* @pdev: The CXL PCI device to enumerate.
370
* @type: Register Block Indicator id
371
* @map: Enumeration output, clobbered on error
372
*
373
* Return: 0 if register block enumerated, negative error code otherwise
374
*
375
* A CXL DVSEC may point to one or more register blocks, search for them
376
* by @type.
377
*/
378
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
379
struct cxl_register_map *map)
380
{
381
return __cxl_find_regblock_instance(pdev, type, map, 0);
382
}
383
EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, "CXL");
384
385
/**
386
* cxl_count_regblock() - Count instances of a given regblock type.
387
* @pdev: The CXL PCI device to enumerate.
388
* @type: Register Block Indicator id
389
*
390
* Some regblocks may be repeated. Count how many instances.
391
*
392
* Return: non-negative count of matching regblocks, negative error code otherwise.
393
*/
394
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type)
395
{
396
struct cxl_register_map map;
397
398
return __cxl_find_regblock_instance(pdev, type, &map, CXL_INSTANCES_COUNT);
399
}
400
EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, "CXL");
401
402
int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs)
403
{
404
struct device *dev = map->host;
405
resource_size_t phys_addr;
406
407
phys_addr = map->resource;
408
regs->pmu = devm_cxl_iomap_block(dev, phys_addr, CXL_PMU_REGMAP_SIZE);
409
if (!regs->pmu)
410
return -ENOMEM;
411
412
return 0;
413
}
414
EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, "CXL");
415
416
static int cxl_map_regblock(struct cxl_register_map *map)
417
{
418
struct device *host = map->host;
419
420
map->base = ioremap(map->resource, map->max_size);
421
if (!map->base) {
422
dev_err(host, "failed to map registers\n");
423
return -ENOMEM;
424
}
425
426
dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource);
427
return 0;
428
}
429
430
static void cxl_unmap_regblock(struct cxl_register_map *map)
431
{
432
iounmap(map->base);
433
map->base = NULL;
434
}
435
436
static int cxl_probe_regs(struct cxl_register_map *map)
437
{
438
struct cxl_component_reg_map *comp_map;
439
struct cxl_device_reg_map *dev_map;
440
struct device *host = map->host;
441
void __iomem *base = map->base;
442
443
switch (map->reg_type) {
444
case CXL_REGLOC_RBI_COMPONENT:
445
comp_map = &map->component_map;
446
cxl_probe_component_regs(host, base, comp_map);
447
dev_dbg(host, "Set up component registers\n");
448
break;
449
case CXL_REGLOC_RBI_MEMDEV:
450
dev_map = &map->device_map;
451
cxl_probe_device_regs(host, base, dev_map);
452
if (!dev_map->status.valid || !dev_map->mbox.valid ||
453
!dev_map->memdev.valid) {
454
dev_err(host, "registers not found: %s%s%s\n",
455
!dev_map->status.valid ? "status " : "",
456
!dev_map->mbox.valid ? "mbox " : "",
457
!dev_map->memdev.valid ? "memdev " : "");
458
return -ENXIO;
459
}
460
461
dev_dbg(host, "Probing device registers...\n");
462
break;
463
default:
464
break;
465
}
466
467
return 0;
468
}
469
470
int cxl_setup_regs(struct cxl_register_map *map)
471
{
472
int rc;
473
474
rc = cxl_map_regblock(map);
475
if (rc)
476
return rc;
477
478
rc = cxl_probe_regs(map);
479
cxl_unmap_regblock(map);
480
481
return rc;
482
}
483
EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, "CXL");
484
485
u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb)
486
{
487
void __iomem *addr;
488
u16 offset = 0;
489
u32 cap_hdr;
490
491
if (WARN_ON_ONCE(rcrb == CXL_RESOURCE_NONE))
492
return 0;
493
494
if (!request_mem_region(rcrb, SZ_4K, dev_name(dev)))
495
return 0;
496
497
addr = ioremap(rcrb, SZ_4K);
498
if (!addr)
499
goto out;
500
501
cap_hdr = readl(addr + offset);
502
while (PCI_EXT_CAP_ID(cap_hdr) != PCI_EXT_CAP_ID_ERR) {
503
offset = PCI_EXT_CAP_NEXT(cap_hdr);
504
505
/* Offset 0 terminates capability list. */
506
if (!offset)
507
break;
508
cap_hdr = readl(addr + offset);
509
}
510
511
if (offset)
512
dev_dbg(dev, "found AER extended capability (0x%x)\n", offset);
513
514
iounmap(addr);
515
out:
516
release_mem_region(rcrb, SZ_4K);
517
518
return offset;
519
}
520
521
static resource_size_t cxl_rcrb_to_linkcap(struct device *dev, struct cxl_dport *dport)
522
{
523
resource_size_t rcrb = dport->rcrb.base;
524
void __iomem *addr;
525
u32 cap_hdr;
526
u16 offset;
527
528
if (!request_mem_region(rcrb, SZ_4K, "CXL RCRB"))
529
return CXL_RESOURCE_NONE;
530
531
addr = ioremap(rcrb, SZ_4K);
532
if (!addr) {
533
dev_err(dev, "Failed to map region %pr\n", addr);
534
release_mem_region(rcrb, SZ_4K);
535
return CXL_RESOURCE_NONE;
536
}
537
538
offset = FIELD_GET(PCI_RCRB_CAP_LIST_ID_MASK, readw(addr + PCI_CAPABILITY_LIST));
539
cap_hdr = readl(addr + offset);
540
while ((FIELD_GET(PCI_RCRB_CAP_HDR_ID_MASK, cap_hdr)) != PCI_CAP_ID_EXP) {
541
offset = FIELD_GET(PCI_RCRB_CAP_HDR_NEXT_MASK, cap_hdr);
542
if (offset == 0 || offset > SZ_4K) {
543
offset = 0;
544
break;
545
}
546
cap_hdr = readl(addr + offset);
547
}
548
549
iounmap(addr);
550
release_mem_region(rcrb, SZ_4K);
551
if (!offset)
552
return CXL_RESOURCE_NONE;
553
554
return offset;
555
}
556
557
int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport)
558
{
559
void __iomem *dport_pcie_cap = NULL;
560
resource_size_t pos;
561
struct cxl_rcrb_info *ri;
562
563
ri = &dport->rcrb;
564
pos = cxl_rcrb_to_linkcap(&pdev->dev, dport);
565
if (pos == CXL_RESOURCE_NONE)
566
return -ENXIO;
567
568
dport_pcie_cap = devm_cxl_iomap_block(&pdev->dev,
569
ri->base + pos,
570
PCI_CAP_EXP_SIZEOF);
571
dport->regs.rcd_pcie_cap = dport_pcie_cap;
572
573
return 0;
574
}
575
EXPORT_SYMBOL_NS_GPL(cxl_dport_map_rcd_linkcap, "CXL");
576
577
resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri,
578
enum cxl_rcrb which)
579
{
580
resource_size_t component_reg_phys;
581
resource_size_t rcrb = ri->base;
582
void __iomem *addr;
583
u32 bar0, bar1;
584
u32 id;
585
586
if (which == CXL_RCRB_UPSTREAM)
587
rcrb += SZ_4K;
588
589
/*
590
* RCRB's BAR[0..1] point to component block containing CXL
591
* subsystem component registers. MEMBAR extraction follows
592
* the PCI Base spec here, esp. 64 bit extraction and memory
593
* ranges alignment (6.0, 7.5.1.2.1).
594
*/
595
if (!request_mem_region(rcrb, SZ_4K, "CXL RCRB"))
596
return CXL_RESOURCE_NONE;
597
addr = ioremap(rcrb, SZ_4K);
598
if (!addr) {
599
dev_err(dev, "Failed to map region %pr\n", addr);
600
release_mem_region(rcrb, SZ_4K);
601
return CXL_RESOURCE_NONE;
602
}
603
604
id = readl(addr + PCI_VENDOR_ID);
605
bar0 = readl(addr + PCI_BASE_ADDRESS_0);
606
bar1 = readl(addr + PCI_BASE_ADDRESS_1);
607
iounmap(addr);
608
release_mem_region(rcrb, SZ_4K);
609
610
/*
611
* Sanity check, see CXL 3.0 Figure 9-8 CXL Device that Does Not
612
* Remap Upstream Port and Component Registers
613
*/
614
if (id == U32_MAX) {
615
if (which == CXL_RCRB_DOWNSTREAM)
616
dev_err(dev, "Failed to access Downstream Port RCRB\n");
617
return CXL_RESOURCE_NONE;
618
}
619
/* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
620
if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO))
621
return CXL_RESOURCE_NONE;
622
623
component_reg_phys = bar0 & PCI_BASE_ADDRESS_MEM_MASK;
624
if (bar0 & PCI_BASE_ADDRESS_MEM_TYPE_64)
625
component_reg_phys |= ((u64)bar1) << 32;
626
627
if (!component_reg_phys)
628
return CXL_RESOURCE_NONE;
629
630
/* MEMBAR is block size (64k) aligned. */
631
if (!IS_ALIGNED(component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE))
632
return CXL_RESOURCE_NONE;
633
634
return component_reg_phys;
635
}
636
637
resource_size_t cxl_rcd_component_reg_phys(struct device *dev,
638
struct cxl_dport *dport)
639
{
640
if (!dport->rch)
641
return CXL_RESOURCE_NONE;
642
return __rcrb_to_component(dev, &dport->rcrb, CXL_RCRB_UPSTREAM);
643
}
644
EXPORT_SYMBOL_NS_GPL(cxl_rcd_component_reg_phys, "CXL");
645
646