Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cxl/core/cdat.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright(c) 2023 Intel Corporation. All rights reserved. */
3
#include <linux/acpi.h>
4
#include <linux/xarray.h>
5
#include <linux/fw_table.h>
6
#include <linux/node.h>
7
#include <linux/overflow.h>
8
#include "cxlpci.h"
9
#include "cxlmem.h"
10
#include "core.h"
11
#include "cxl.h"
12
13
struct dsmas_entry {
14
struct range dpa_range;
15
u8 handle;
16
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
17
struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
18
int entries;
19
int qos_class;
20
};
21
22
static u32 cdat_normalize(u16 entry, u64 base, u8 type)
23
{
24
u32 value;
25
26
/*
27
* Check for invalid and overflow values
28
*/
29
if (entry == 0xffff || !entry)
30
return 0;
31
if (base > (UINT_MAX / (entry)))
32
return 0;
33
34
/*
35
* CDAT fields follow the format of HMAT fields. See table 5 Device
36
* Scoped Latency and Bandwidth Information Structure in Coherent Device
37
* Attribute Table (CDAT) Specification v1.01.
38
*/
39
value = entry * base;
40
switch (type) {
41
case ACPI_HMAT_ACCESS_LATENCY:
42
case ACPI_HMAT_READ_LATENCY:
43
case ACPI_HMAT_WRITE_LATENCY:
44
value = DIV_ROUND_UP(value, 1000);
45
break;
46
default:
47
break;
48
}
49
return value;
50
}
51
52
static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
53
const unsigned long end)
54
{
55
struct acpi_cdat_header *hdr = &header->cdat;
56
struct acpi_cdat_dsmas *dsmas;
57
int size = sizeof(*hdr) + sizeof(*dsmas);
58
struct xarray *dsmas_xa = arg;
59
struct dsmas_entry *dent;
60
u16 len;
61
int rc;
62
63
len = le16_to_cpu((__force __le16)hdr->length);
64
if (len != size || (unsigned long)hdr + len > end) {
65
pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
66
return -EINVAL;
67
}
68
69
/* Skip common header */
70
dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
71
72
dent = kzalloc(sizeof(*dent), GFP_KERNEL);
73
if (!dent)
74
return -ENOMEM;
75
76
dent->handle = dsmas->dsmad_handle;
77
dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
78
dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
79
le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
80
81
rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
82
if (rc) {
83
kfree(dent);
84
return rc;
85
}
86
87
return 0;
88
}
89
90
static void __cxl_access_coordinate_set(struct access_coordinate *coord,
91
int access, unsigned int val)
92
{
93
switch (access) {
94
case ACPI_HMAT_ACCESS_LATENCY:
95
coord->read_latency = val;
96
coord->write_latency = val;
97
break;
98
case ACPI_HMAT_READ_LATENCY:
99
coord->read_latency = val;
100
break;
101
case ACPI_HMAT_WRITE_LATENCY:
102
coord->write_latency = val;
103
break;
104
case ACPI_HMAT_ACCESS_BANDWIDTH:
105
coord->read_bandwidth = val;
106
coord->write_bandwidth = val;
107
break;
108
case ACPI_HMAT_READ_BANDWIDTH:
109
coord->read_bandwidth = val;
110
break;
111
case ACPI_HMAT_WRITE_BANDWIDTH:
112
coord->write_bandwidth = val;
113
break;
114
}
115
}
116
117
static void cxl_access_coordinate_set(struct access_coordinate *coord,
118
int access, unsigned int val)
119
{
120
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
121
__cxl_access_coordinate_set(&coord[i], access, val);
122
}
123
124
static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
125
const unsigned long end)
126
{
127
struct acpi_cdat_header *hdr = &header->cdat;
128
struct acpi_cdat_dslbis *dslbis;
129
int size = sizeof(*hdr) + sizeof(*dslbis);
130
struct xarray *dsmas_xa = arg;
131
struct dsmas_entry *dent;
132
__le64 le_base;
133
__le16 le_val;
134
u64 val;
135
u16 len;
136
137
len = le16_to_cpu((__force __le16)hdr->length);
138
if (len != size || (unsigned long)hdr + len > end) {
139
pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
140
return -EINVAL;
141
}
142
143
/* Skip common header */
144
dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
145
146
/* Skip unrecognized data type */
147
if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
148
return 0;
149
150
/* Not a memory type, skip */
151
if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
152
return 0;
153
154
dent = xa_load(dsmas_xa, dslbis->handle);
155
if (!dent) {
156
pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
157
return 0;
158
}
159
160
le_base = (__force __le64)dslbis->entry_base_unit;
161
le_val = (__force __le16)dslbis->entry[0];
162
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
163
dslbis->data_type);
164
165
cxl_access_coordinate_set(dent->cdat_coord, dslbis->data_type, val);
166
167
return 0;
168
}
169
170
static int cdat_table_parse_output(int rc)
171
{
172
if (rc < 0)
173
return rc;
174
if (rc == 0)
175
return -ENOENT;
176
177
return 0;
178
}
179
180
static int cxl_cdat_endpoint_process(struct cxl_port *port,
181
struct xarray *dsmas_xa)
182
{
183
int rc;
184
185
rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
186
dsmas_xa, port->cdat.table, port->cdat.length);
187
rc = cdat_table_parse_output(rc);
188
if (rc)
189
return rc;
190
191
rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
192
dsmas_xa, port->cdat.table, port->cdat.length);
193
return cdat_table_parse_output(rc);
194
}
195
196
static int cxl_port_perf_data_calculate(struct cxl_port *port,
197
struct xarray *dsmas_xa)
198
{
199
struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
200
struct dsmas_entry *dent;
201
int valid_entries = 0;
202
unsigned long index;
203
int rc;
204
205
rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
206
if (rc) {
207
dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
208
return rc;
209
}
210
211
struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
212
213
if (!cxl_root)
214
return -ENODEV;
215
216
if (!cxl_root->ops || !cxl_root->ops->qos_class)
217
return -EOPNOTSUPP;
218
219
xa_for_each(dsmas_xa, index, dent) {
220
int qos_class;
221
222
cxl_coordinates_combine(dent->coord, dent->cdat_coord, ep_c);
223
dent->entries = 1;
224
rc = cxl_root->ops->qos_class(cxl_root,
225
&dent->coord[ACCESS_COORDINATE_CPU],
226
1, &qos_class);
227
if (rc != 1)
228
continue;
229
230
valid_entries++;
231
dent->qos_class = qos_class;
232
}
233
234
if (!valid_entries)
235
return -ENOENT;
236
237
return 0;
238
}
239
240
static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
241
struct cxl_dpa_perf *dpa_perf)
242
{
243
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
244
dpa_perf->coord[i] = dent->coord[i];
245
dpa_perf->cdat_coord[i] = dent->cdat_coord[i];
246
}
247
dpa_perf->dpa_range = dent->dpa_range;
248
dpa_perf->qos_class = dent->qos_class;
249
dev_dbg(dev,
250
"DSMAS: dpa: %pra qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
251
&dent->dpa_range, dpa_perf->qos_class,
252
dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
253
dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
254
dent->coord[ACCESS_COORDINATE_CPU].read_latency,
255
dent->coord[ACCESS_COORDINATE_CPU].write_latency);
256
}
257
258
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
259
struct xarray *dsmas_xa)
260
{
261
struct device *dev = cxlds->dev;
262
struct dsmas_entry *dent;
263
unsigned long index;
264
265
xa_for_each(dsmas_xa, index, dent) {
266
bool found = false;
267
268
for (int i = 0; i < cxlds->nr_partitions; i++) {
269
struct resource *res = &cxlds->part[i].res;
270
struct range range = {
271
.start = res->start,
272
.end = res->end,
273
};
274
275
if (range_contains(&range, &dent->dpa_range)) {
276
update_perf_entry(dev, dent,
277
&cxlds->part[i].perf);
278
found = true;
279
break;
280
}
281
}
282
283
if (!found)
284
dev_dbg(dev, "no partition for dsmas dpa: %pra\n",
285
&dent->dpa_range);
286
}
287
}
288
289
static int match_cxlrd_qos_class(struct device *dev, void *data)
290
{
291
int dev_qos_class = *(int *)data;
292
struct cxl_root_decoder *cxlrd;
293
294
if (!is_root_decoder(dev))
295
return 0;
296
297
cxlrd = to_cxl_root_decoder(dev);
298
if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
299
return 0;
300
301
if (cxlrd->qos_class == dev_qos_class)
302
return 1;
303
304
return 0;
305
}
306
307
static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
308
{
309
*dpa_perf = (struct cxl_dpa_perf) {
310
.qos_class = CXL_QOS_CLASS_INVALID,
311
};
312
}
313
314
static bool cxl_qos_match(struct cxl_port *root_port,
315
struct cxl_dpa_perf *dpa_perf)
316
{
317
if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
318
return false;
319
320
if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
321
match_cxlrd_qos_class))
322
return false;
323
324
return true;
325
}
326
327
static int match_cxlrd_hb(struct device *dev, void *data)
328
{
329
struct device *host_bridge = data;
330
struct cxl_switch_decoder *cxlsd;
331
struct cxl_root_decoder *cxlrd;
332
333
if (!is_root_decoder(dev))
334
return 0;
335
336
cxlrd = to_cxl_root_decoder(dev);
337
cxlsd = &cxlrd->cxlsd;
338
339
guard(rwsem_read)(&cxl_rwsem.region);
340
for (int i = 0; i < cxlsd->nr_targets; i++) {
341
if (host_bridge == cxlsd->target[i]->dport_dev)
342
return 1;
343
}
344
345
return 0;
346
}
347
348
static void cxl_qos_class_verify(struct cxl_memdev *cxlmd)
349
{
350
struct cxl_dev_state *cxlds = cxlmd->cxlds;
351
struct cxl_port *root_port;
352
353
struct cxl_root *cxl_root __free(put_cxl_root) =
354
find_cxl_root(cxlmd->endpoint);
355
356
/*
357
* No need to reset_dpa_perf() here as find_cxl_root() is guaranteed to
358
* succeed when called in the cxl_endpoint_port_probe() path.
359
*/
360
if (!cxl_root)
361
return;
362
363
root_port = &cxl_root->port;
364
365
/*
366
* Save userspace from needing to check if a qos class has any matches
367
* by hiding qos class info if the memdev is not mapped by a root
368
* decoder, or the partition class does not match any root decoder
369
* class.
370
*/
371
if (!device_for_each_child(&root_port->dev,
372
cxlmd->endpoint->host_bridge,
373
match_cxlrd_hb)) {
374
for (int i = 0; i < cxlds->nr_partitions; i++) {
375
struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
376
377
reset_dpa_perf(perf);
378
}
379
return;
380
}
381
382
for (int i = 0; i < cxlds->nr_partitions; i++) {
383
struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
384
385
if (!cxl_qos_match(root_port, perf))
386
reset_dpa_perf(perf);
387
}
388
}
389
390
static void discard_dsmas(struct xarray *xa)
391
{
392
unsigned long index;
393
void *ent;
394
395
xa_for_each(xa, index, ent) {
396
xa_erase(xa, index);
397
kfree(ent);
398
}
399
xa_destroy(xa);
400
}
401
DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
402
403
void cxl_endpoint_parse_cdat(struct cxl_port *port)
404
{
405
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
406
struct cxl_dev_state *cxlds = cxlmd->cxlds;
407
struct xarray __dsmas_xa;
408
struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
409
int rc;
410
411
xa_init(&__dsmas_xa);
412
if (!port->cdat.table)
413
return;
414
415
rc = cxl_cdat_endpoint_process(port, dsmas_xa);
416
if (rc < 0) {
417
dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
418
return;
419
}
420
421
rc = cxl_port_perf_data_calculate(port, dsmas_xa);
422
if (rc) {
423
dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
424
return;
425
}
426
427
cxl_memdev_set_qos_class(cxlds, dsmas_xa);
428
cxl_qos_class_verify(cxlmd);
429
cxl_memdev_update_perf(cxlmd);
430
}
431
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, "CXL");
432
433
static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
434
const unsigned long end)
435
{
436
struct acpi_cdat_sslbis_table {
437
struct acpi_cdat_header header;
438
struct acpi_cdat_sslbis sslbis_header;
439
struct acpi_cdat_sslbe entries[];
440
} *tbl = (struct acpi_cdat_sslbis_table *)header;
441
int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
442
struct acpi_cdat_sslbis *sslbis;
443
struct cxl_port *port = arg;
444
struct device *dev = &port->dev;
445
int remain, entries, i;
446
u16 len;
447
448
len = le16_to_cpu((__force __le16)header->cdat.length);
449
remain = len - size;
450
if (!remain || remain % sizeof(tbl->entries[0]) ||
451
(unsigned long)header + len > end) {
452
dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
453
return -EINVAL;
454
}
455
456
sslbis = &tbl->sslbis_header;
457
/* Unrecognized data type, we can skip */
458
if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
459
return 0;
460
461
entries = remain / sizeof(tbl->entries[0]);
462
if (struct_size(tbl, entries, entries) != len)
463
return -EINVAL;
464
465
for (i = 0; i < entries; i++) {
466
u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id);
467
u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
468
__le64 le_base;
469
__le16 le_val;
470
struct cxl_dport *dport;
471
unsigned long index;
472
u16 dsp_id;
473
u64 val;
474
475
switch (x) {
476
case ACPI_CDAT_SSLBIS_US_PORT:
477
dsp_id = y;
478
break;
479
case ACPI_CDAT_SSLBIS_ANY_PORT:
480
switch (y) {
481
case ACPI_CDAT_SSLBIS_US_PORT:
482
dsp_id = x;
483
break;
484
case ACPI_CDAT_SSLBIS_ANY_PORT:
485
dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
486
break;
487
default:
488
dsp_id = y;
489
break;
490
}
491
break;
492
default:
493
dsp_id = x;
494
break;
495
}
496
497
le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
498
le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
499
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
500
sslbis->data_type);
501
502
xa_for_each(&port->dports, index, dport) {
503
if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
504
dsp_id == dport->port_id) {
505
cxl_access_coordinate_set(dport->coord,
506
sslbis->data_type,
507
val);
508
}
509
}
510
}
511
512
return 0;
513
}
514
515
void cxl_switch_parse_cdat(struct cxl_port *port)
516
{
517
int rc;
518
519
if (!port->cdat.table)
520
return;
521
522
rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
523
port, port->cdat.table, port->cdat.length);
524
rc = cdat_table_parse_output(rc);
525
if (rc)
526
dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
527
}
528
EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, "CXL");
529
530
static void __cxl_coordinates_combine(struct access_coordinate *out,
531
struct access_coordinate *c1,
532
struct access_coordinate *c2)
533
{
534
if (c1->write_bandwidth && c2->write_bandwidth)
535
out->write_bandwidth = min(c1->write_bandwidth,
536
c2->write_bandwidth);
537
out->write_latency = c1->write_latency + c2->write_latency;
538
539
if (c1->read_bandwidth && c2->read_bandwidth)
540
out->read_bandwidth = min(c1->read_bandwidth,
541
c2->read_bandwidth);
542
out->read_latency = c1->read_latency + c2->read_latency;
543
}
544
545
/**
546
* cxl_coordinates_combine - Combine the two input coordinates
547
*
548
* @out: Output coordinate of c1 and c2 combined
549
* @c1: input coordinates
550
* @c2: input coordinates
551
*/
552
void cxl_coordinates_combine(struct access_coordinate *out,
553
struct access_coordinate *c1,
554
struct access_coordinate *c2)
555
{
556
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
557
__cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
558
}
559
560
MODULE_IMPORT_NS("CXL");
561
562
static void cxl_bandwidth_add(struct access_coordinate *coord,
563
struct access_coordinate *c1,
564
struct access_coordinate *c2)
565
{
566
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
567
coord[i].read_bandwidth = c1[i].read_bandwidth +
568
c2[i].read_bandwidth;
569
coord[i].write_bandwidth = c1[i].write_bandwidth +
570
c2[i].write_bandwidth;
571
}
572
}
573
574
static bool dpa_perf_contains(struct cxl_dpa_perf *perf,
575
struct resource *dpa_res)
576
{
577
struct range dpa = {
578
.start = dpa_res->start,
579
.end = dpa_res->end,
580
};
581
582
return range_contains(&perf->dpa_range, &dpa);
583
}
584
585
static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled)
586
{
587
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
588
struct cxl_dev_state *cxlds = cxlmd->cxlds;
589
struct cxl_dpa_perf *perf;
590
591
if (cxled->part < 0)
592
return ERR_PTR(-EINVAL);
593
perf = &cxlds->part[cxled->part].perf;
594
595
if (!perf)
596
return ERR_PTR(-EINVAL);
597
598
if (!dpa_perf_contains(perf, cxled->dpa_res))
599
return ERR_PTR(-EINVAL);
600
601
return perf;
602
}
603
604
/*
605
* Transient context for containing the current calculation of bandwidth when
606
* doing walking the port hierarchy to deal with shared upstream link.
607
*/
608
struct cxl_perf_ctx {
609
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
610
struct cxl_port *port;
611
};
612
613
/**
614
* cxl_endpoint_gather_bandwidth - collect all the endpoint bandwidth in an xarray
615
* @cxlr: CXL region for the bandwidth calculation
616
* @cxled: endpoint decoder to start on
617
* @usp_xa: (output) the xarray that collects all the bandwidth coordinates
618
* indexed by the upstream device with data of 'struct cxl_perf_ctx'.
619
* @gp_is_root: (output) bool of whether the grandparent is cxl root.
620
*
621
* Return: 0 for success or -errno
622
*
623
* Collects aggregated endpoint bandwidth and store the bandwidth in
624
* an xarray indexed by the upstream device of the switch or the RP
625
* device. Each endpoint consists the minimum of the bandwidth from DSLBIS
626
* from the endpoint CDAT, the endpoint upstream link bandwidth, and the
627
* bandwidth from the SSLBIS of the switch CDAT for the switch upstream port to
628
* the downstream port that's associated with the endpoint. If the
629
* device is directly connected to a RP, then no SSLBIS is involved.
630
*/
631
static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
632
struct cxl_endpoint_decoder *cxled,
633
struct xarray *usp_xa,
634
bool *gp_is_root)
635
{
636
struct cxl_port *endpoint = to_cxl_port(cxled->cxld.dev.parent);
637
struct cxl_port *parent_port = to_cxl_port(endpoint->dev.parent);
638
struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
639
struct access_coordinate pci_coord[ACCESS_COORDINATE_MAX];
640
struct access_coordinate sw_coord[ACCESS_COORDINATE_MAX];
641
struct access_coordinate ep_coord[ACCESS_COORDINATE_MAX];
642
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
643
struct cxl_dev_state *cxlds = cxlmd->cxlds;
644
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
645
struct cxl_perf_ctx *perf_ctx;
646
struct cxl_dpa_perf *perf;
647
unsigned long index;
648
void *ptr;
649
int rc;
650
651
if (!dev_is_pci(cxlds->dev))
652
return -ENODEV;
653
654
if (cxlds->rcd)
655
return -ENODEV;
656
657
perf = cxled_get_dpa_perf(cxled);
658
if (IS_ERR(perf))
659
return PTR_ERR(perf);
660
661
*gp_is_root = is_cxl_root(gp_port);
662
663
/*
664
* If the grandparent is cxl root, then index is the root port,
665
* otherwise it's the parent switch upstream device.
666
*/
667
if (*gp_is_root)
668
index = (unsigned long)endpoint->parent_dport->dport_dev;
669
else
670
index = (unsigned long)parent_port->uport_dev;
671
672
perf_ctx = xa_load(usp_xa, index);
673
if (!perf_ctx) {
674
struct cxl_perf_ctx *c __free(kfree) =
675
kzalloc(sizeof(*perf_ctx), GFP_KERNEL);
676
677
if (!c)
678
return -ENOMEM;
679
ptr = xa_store(usp_xa, index, c, GFP_KERNEL);
680
if (xa_is_err(ptr))
681
return xa_err(ptr);
682
perf_ctx = no_free_ptr(c);
683
perf_ctx->port = parent_port;
684
}
685
686
/* Direct upstream link from EP bandwidth */
687
rc = cxl_pci_get_bandwidth(pdev, pci_coord);
688
if (rc < 0)
689
return rc;
690
691
/*
692
* Min of upstream link bandwidth and Endpoint CDAT bandwidth from
693
* DSLBIS.
694
*/
695
cxl_coordinates_combine(ep_coord, pci_coord, perf->cdat_coord);
696
697
/*
698
* If grandparent port is root, then there's no switch involved and
699
* the endpoint is connected to a root port.
700
*/
701
if (!*gp_is_root) {
702
/*
703
* Retrieve the switch SSLBIS for switch downstream port
704
* associated with the endpoint bandwidth.
705
*/
706
rc = cxl_port_get_switch_dport_bandwidth(endpoint, sw_coord);
707
if (rc)
708
return rc;
709
710
/*
711
* Min of the earlier coordinates with the switch SSLBIS
712
* bandwidth
713
*/
714
cxl_coordinates_combine(ep_coord, ep_coord, sw_coord);
715
}
716
717
/*
718
* Aggregate the computed bandwidth with the current aggregated bandwidth
719
* of the endpoints with the same switch upstream device or RP.
720
*/
721
cxl_bandwidth_add(perf_ctx->coord, perf_ctx->coord, ep_coord);
722
723
return 0;
724
}
725
726
static void free_perf_xa(struct xarray *xa)
727
{
728
struct cxl_perf_ctx *ctx;
729
unsigned long index;
730
731
if (!xa)
732
return;
733
734
xa_for_each(xa, index, ctx)
735
kfree(ctx);
736
xa_destroy(xa);
737
kfree(xa);
738
}
739
DEFINE_FREE(free_perf_xa, struct xarray *, if (_T) free_perf_xa(_T))
740
741
/**
742
* cxl_switch_gather_bandwidth - collect all the bandwidth at switch level in an xarray
743
* @cxlr: The region being operated on
744
* @input_xa: xarray indexed by upstream device of a switch with data of 'struct
745
* cxl_perf_ctx'
746
* @gp_is_root: (output) bool of whether the grandparent is cxl root.
747
*
748
* Return: a xarray of resulting cxl_perf_ctx per parent switch or root port
749
* or ERR_PTR(-errno)
750
*
751
* Iterate through the xarray. Take the minimum of the downstream calculated
752
* bandwidth, the upstream link bandwidth, and the SSLBIS of the upstream
753
* switch if exists. Sum the resulting bandwidth under the switch upstream
754
* device or a RP device. The function can be iterated over multiple switches
755
* if the switches are present.
756
*/
757
static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr,
758
struct xarray *input_xa,
759
bool *gp_is_root)
760
{
761
struct xarray *res_xa __free(free_perf_xa) =
762
kzalloc(sizeof(*res_xa), GFP_KERNEL);
763
struct access_coordinate coords[ACCESS_COORDINATE_MAX];
764
struct cxl_perf_ctx *ctx, *us_ctx;
765
unsigned long index, us_index;
766
int dev_count = 0;
767
int gp_count = 0;
768
void *ptr;
769
int rc;
770
771
if (!res_xa)
772
return ERR_PTR(-ENOMEM);
773
xa_init(res_xa);
774
775
xa_for_each(input_xa, index, ctx) {
776
struct device *dev = (struct device *)index;
777
struct cxl_port *port = ctx->port;
778
struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
779
struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
780
struct cxl_dport *dport = port->parent_dport;
781
bool is_root = false;
782
783
dev_count++;
784
if (is_cxl_root(gp_port)) {
785
is_root = true;
786
gp_count++;
787
}
788
789
/*
790
* If the grandparent is cxl root, then index is the root port,
791
* otherwise it's the parent switch upstream device.
792
*/
793
if (is_root)
794
us_index = (unsigned long)port->parent_dport->dport_dev;
795
else
796
us_index = (unsigned long)parent_port->uport_dev;
797
798
us_ctx = xa_load(res_xa, us_index);
799
if (!us_ctx) {
800
struct cxl_perf_ctx *n __free(kfree) =
801
kzalloc(sizeof(*n), GFP_KERNEL);
802
803
if (!n)
804
return ERR_PTR(-ENOMEM);
805
806
ptr = xa_store(res_xa, us_index, n, GFP_KERNEL);
807
if (xa_is_err(ptr))
808
return ERR_PTR(xa_err(ptr));
809
us_ctx = no_free_ptr(n);
810
us_ctx->port = parent_port;
811
}
812
813
/*
814
* If the device isn't an upstream PCIe port, there's something
815
* wrong with the topology.
816
*/
817
if (!dev_is_pci(dev))
818
return ERR_PTR(-EINVAL);
819
820
/* Retrieve the upstream link bandwidth */
821
rc = cxl_pci_get_bandwidth(to_pci_dev(dev), coords);
822
if (rc)
823
return ERR_PTR(-ENXIO);
824
825
/*
826
* Take the min of downstream bandwidth and the upstream link
827
* bandwidth.
828
*/
829
cxl_coordinates_combine(coords, coords, ctx->coord);
830
831
/*
832
* Take the min of the calculated bandwdith and the upstream
833
* switch SSLBIS bandwidth if there's a parent switch
834
*/
835
if (!is_root)
836
cxl_coordinates_combine(coords, coords, dport->coord);
837
838
/*
839
* Aggregate the calculated bandwidth common to an upstream
840
* switch.
841
*/
842
cxl_bandwidth_add(us_ctx->coord, us_ctx->coord, coords);
843
}
844
845
/* Asymmetric topology detected. */
846
if (gp_count) {
847
if (gp_count != dev_count) {
848
dev_dbg(&cxlr->dev,
849
"Asymmetric hierarchy detected, bandwidth not updated\n");
850
return ERR_PTR(-EOPNOTSUPP);
851
}
852
*gp_is_root = true;
853
}
854
855
return no_free_ptr(res_xa);
856
}
857
858
/**
859
* cxl_rp_gather_bandwidth - handle the root port level bandwidth collection
860
* @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
861
* below each root port device.
862
*
863
* Return: xarray that holds cxl_perf_ctx per host bridge or ERR_PTR(-errno)
864
*/
865
static struct xarray *cxl_rp_gather_bandwidth(struct xarray *xa)
866
{
867
struct xarray *hb_xa __free(free_perf_xa) =
868
kzalloc(sizeof(*hb_xa), GFP_KERNEL);
869
struct cxl_perf_ctx *ctx;
870
unsigned long index;
871
872
if (!hb_xa)
873
return ERR_PTR(-ENOMEM);
874
xa_init(hb_xa);
875
876
xa_for_each(xa, index, ctx) {
877
struct cxl_port *port = ctx->port;
878
unsigned long hb_index = (unsigned long)port->uport_dev;
879
struct cxl_perf_ctx *hb_ctx;
880
void *ptr;
881
882
hb_ctx = xa_load(hb_xa, hb_index);
883
if (!hb_ctx) {
884
struct cxl_perf_ctx *n __free(kfree) =
885
kzalloc(sizeof(*n), GFP_KERNEL);
886
887
if (!n)
888
return ERR_PTR(-ENOMEM);
889
ptr = xa_store(hb_xa, hb_index, n, GFP_KERNEL);
890
if (xa_is_err(ptr))
891
return ERR_PTR(xa_err(ptr));
892
hb_ctx = no_free_ptr(n);
893
hb_ctx->port = port;
894
}
895
896
cxl_bandwidth_add(hb_ctx->coord, hb_ctx->coord, ctx->coord);
897
}
898
899
return no_free_ptr(hb_xa);
900
}
901
902
/**
903
* cxl_hb_gather_bandwidth - handle the host bridge level bandwidth collection
904
* @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
905
* below each host bridge.
906
*
907
* Return: xarray that holds cxl_perf_ctx per ACPI0017 device or ERR_PTR(-errno)
908
*/
909
static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa)
910
{
911
struct xarray *mw_xa __free(free_perf_xa) =
912
kzalloc(sizeof(*mw_xa), GFP_KERNEL);
913
struct cxl_perf_ctx *ctx;
914
unsigned long index;
915
916
if (!mw_xa)
917
return ERR_PTR(-ENOMEM);
918
xa_init(mw_xa);
919
920
xa_for_each(xa, index, ctx) {
921
struct cxl_port *port = ctx->port;
922
struct cxl_port *parent_port;
923
struct cxl_perf_ctx *mw_ctx;
924
struct cxl_dport *dport;
925
unsigned long mw_index;
926
void *ptr;
927
928
parent_port = to_cxl_port(port->dev.parent);
929
mw_index = (unsigned long)parent_port->uport_dev;
930
931
mw_ctx = xa_load(mw_xa, mw_index);
932
if (!mw_ctx) {
933
struct cxl_perf_ctx *n __free(kfree) =
934
kzalloc(sizeof(*n), GFP_KERNEL);
935
936
if (!n)
937
return ERR_PTR(-ENOMEM);
938
ptr = xa_store(mw_xa, mw_index, n, GFP_KERNEL);
939
if (xa_is_err(ptr))
940
return ERR_PTR(xa_err(ptr));
941
mw_ctx = no_free_ptr(n);
942
}
943
944
dport = port->parent_dport;
945
cxl_coordinates_combine(ctx->coord, ctx->coord, dport->coord);
946
cxl_bandwidth_add(mw_ctx->coord, mw_ctx->coord, ctx->coord);
947
}
948
949
return no_free_ptr(mw_xa);
950
}
951
952
/**
953
* cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region
954
* @cxlr: The region being operated on
955
* @input_xa: xarray holds cxl_perf_ctx wht calculated bandwidth per ACPI0017 instance
956
*/
957
static void cxl_region_update_bandwidth(struct cxl_region *cxlr,
958
struct xarray *input_xa)
959
{
960
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
961
struct cxl_perf_ctx *ctx;
962
unsigned long index;
963
964
memset(coord, 0, sizeof(coord));
965
xa_for_each(input_xa, index, ctx)
966
cxl_bandwidth_add(coord, coord, ctx->coord);
967
968
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
969
cxlr->coord[i].read_bandwidth = coord[i].read_bandwidth;
970
cxlr->coord[i].write_bandwidth = coord[i].write_bandwidth;
971
}
972
}
973
974
/**
975
* cxl_region_shared_upstream_bandwidth_update - Recalculate the bandwidth for
976
* the region
977
* @cxlr: the cxl region to recalculate
978
*
979
* The function walks the topology from bottom up and calculates the bandwidth. It
980
* starts at the endpoints, processes at the switches if any, processes at the rootport
981
* level, at the host bridge level, and finally aggregates at the region.
982
*/
983
void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
984
{
985
struct xarray *working_xa;
986
int root_count = 0;
987
bool is_root;
988
int rc;
989
990
lockdep_assert_held(&cxl_rwsem.dpa);
991
992
struct xarray *usp_xa __free(free_perf_xa) =
993
kzalloc(sizeof(*usp_xa), GFP_KERNEL);
994
995
if (!usp_xa)
996
return;
997
998
xa_init(usp_xa);
999
1000
/* Collect bandwidth data from all the endpoints. */
1001
for (int i = 0; i < cxlr->params.nr_targets; i++) {
1002
struct cxl_endpoint_decoder *cxled = cxlr->params.targets[i];
1003
1004
is_root = false;
1005
rc = cxl_endpoint_gather_bandwidth(cxlr, cxled, usp_xa, &is_root);
1006
if (rc)
1007
return;
1008
root_count += is_root;
1009
}
1010
1011
/* Detect asymmetric hierarchy with some direct attached endpoints. */
1012
if (root_count && root_count != cxlr->params.nr_targets) {
1013
dev_dbg(&cxlr->dev,
1014
"Asymmetric hierarchy detected, bandwidth not updated\n");
1015
return;
1016
}
1017
1018
/*
1019
* Walk up one or more switches to deal with the bandwidth of the
1020
* switches if they exist. Endpoints directly attached to RPs skip
1021
* over this part.
1022
*/
1023
if (!root_count) {
1024
do {
1025
working_xa = cxl_switch_gather_bandwidth(cxlr, usp_xa,
1026
&is_root);
1027
if (IS_ERR(working_xa))
1028
return;
1029
free_perf_xa(usp_xa);
1030
usp_xa = working_xa;
1031
} while (!is_root);
1032
}
1033
1034
/* Handle the bandwidth at the root port of the hierarchy */
1035
working_xa = cxl_rp_gather_bandwidth(usp_xa);
1036
if (IS_ERR(working_xa))
1037
return;
1038
free_perf_xa(usp_xa);
1039
usp_xa = working_xa;
1040
1041
/* Handle the bandwidth at the host bridge of the hierarchy */
1042
working_xa = cxl_hb_gather_bandwidth(usp_xa);
1043
if (IS_ERR(working_xa))
1044
return;
1045
free_perf_xa(usp_xa);
1046
usp_xa = working_xa;
1047
1048
/*
1049
* Aggregate all the bandwidth collected per CFMWS (ACPI0017) and
1050
* update the region bandwidth with the final calculated values.
1051
*/
1052
cxl_region_update_bandwidth(cxlr, usp_xa);
1053
}
1054
1055
void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
1056
struct cxl_endpoint_decoder *cxled)
1057
{
1058
struct cxl_dpa_perf *perf;
1059
1060
lockdep_assert_held(&cxl_rwsem.dpa);
1061
1062
perf = cxled_get_dpa_perf(cxled);
1063
if (IS_ERR(perf))
1064
return;
1065
1066
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
1067
/* Get total bandwidth and the worst latency for the cxl region */
1068
cxlr->coord[i].read_latency = max_t(unsigned int,
1069
cxlr->coord[i].read_latency,
1070
perf->coord[i].read_latency);
1071
cxlr->coord[i].write_latency = max_t(unsigned int,
1072
cxlr->coord[i].write_latency,
1073
perf->coord[i].write_latency);
1074
cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
1075
cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
1076
}
1077
}
1078
1079
int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
1080
enum access_coordinate_class access)
1081
{
1082
return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
1083
}
1084
1085
bool cxl_need_node_perf_attrs_update(int nid)
1086
{
1087
return !acpi_node_backed_by_real_pxm(nid);
1088
}
1089
1090