Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/cxl/test/cxl.c
50607 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
// Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4
#include <linux/platform_device.h>
5
#include <linux/memory_hotplug.h>
6
#include <linux/genalloc.h>
7
#include <linux/module.h>
8
#include <linux/mutex.h>
9
#include <linux/acpi.h>
10
#include <linux/pci.h>
11
#include <linux/mm.h>
12
#include <cxlmem.h>
13
14
#include "../watermark.h"
15
#include "mock.h"
16
17
static int interleave_arithmetic;
18
static bool extended_linear_cache;
19
20
#define FAKE_QTG_ID 42
21
22
#define NR_CXL_HOST_BRIDGES 2
23
#define NR_CXL_SINGLE_HOST 1
24
#define NR_CXL_RCH 1
25
#define NR_CXL_ROOT_PORTS 2
26
#define NR_CXL_SWITCH_PORTS 2
27
#define NR_CXL_PORT_DECODERS 8
28
#define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
29
30
#define MOCK_AUTO_REGION_SIZE_DEFAULT SZ_512M
31
static int mock_auto_region_size = MOCK_AUTO_REGION_SIZE_DEFAULT;
32
33
static struct platform_device *cxl_acpi;
34
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
35
#define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
36
static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
37
static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
38
#define NR_MEM_MULTI \
39
(NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
40
static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
41
42
static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
43
static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
44
static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
45
#define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
46
static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
47
48
struct platform_device *cxl_mem[NR_MEM_MULTI];
49
struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
50
51
static struct platform_device *cxl_rch[NR_CXL_RCH];
52
static struct platform_device *cxl_rcd[NR_CXL_RCH];
53
54
static inline bool is_multi_bridge(struct device *dev)
55
{
56
int i;
57
58
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
59
if (&cxl_host_bridge[i]->dev == dev)
60
return true;
61
return false;
62
}
63
64
static inline bool is_single_bridge(struct device *dev)
65
{
66
int i;
67
68
for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
69
if (&cxl_hb_single[i]->dev == dev)
70
return true;
71
return false;
72
}
73
74
static struct acpi_device acpi0017_mock;
75
static struct acpi_device host_bridge[NR_BRIDGES] = {
76
[0] = {
77
.handle = &host_bridge[0],
78
.pnp.unique_id = "0",
79
},
80
[1] = {
81
.handle = &host_bridge[1],
82
.pnp.unique_id = "1",
83
},
84
[2] = {
85
.handle = &host_bridge[2],
86
.pnp.unique_id = "2",
87
},
88
[3] = {
89
.handle = &host_bridge[3],
90
.pnp.unique_id = "3",
91
},
92
};
93
94
static bool is_mock_dev(struct device *dev)
95
{
96
int i;
97
98
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
99
if (dev == &cxl_mem[i]->dev)
100
return true;
101
for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
102
if (dev == &cxl_mem_single[i]->dev)
103
return true;
104
for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++)
105
if (dev == &cxl_rcd[i]->dev)
106
return true;
107
if (dev == &cxl_acpi->dev)
108
return true;
109
return false;
110
}
111
112
static bool is_mock_adev(struct acpi_device *adev)
113
{
114
int i;
115
116
if (adev == &acpi0017_mock)
117
return true;
118
119
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
120
if (adev == &host_bridge[i])
121
return true;
122
123
return false;
124
}
125
126
static struct {
127
struct acpi_table_cedt cedt;
128
struct acpi_cedt_chbs chbs[NR_BRIDGES];
129
struct {
130
struct acpi_cedt_cfmws cfmws;
131
u32 target[1];
132
} cfmws0;
133
struct {
134
struct acpi_cedt_cfmws cfmws;
135
u32 target[2];
136
} cfmws1;
137
struct {
138
struct acpi_cedt_cfmws cfmws;
139
u32 target[1];
140
} cfmws2;
141
struct {
142
struct acpi_cedt_cfmws cfmws;
143
u32 target[2];
144
} cfmws3;
145
struct {
146
struct acpi_cedt_cfmws cfmws;
147
u32 target[1];
148
} cfmws4;
149
struct {
150
struct acpi_cedt_cfmws cfmws;
151
u32 target[1];
152
} cfmws5;
153
struct {
154
struct acpi_cedt_cfmws cfmws;
155
u32 target[1];
156
} cfmws6;
157
struct {
158
struct acpi_cedt_cfmws cfmws;
159
u32 target[2];
160
} cfmws7;
161
struct {
162
struct acpi_cedt_cfmws cfmws;
163
u32 target[3];
164
} cfmws8;
165
struct {
166
struct acpi_cedt_cxims cxims;
167
u64 xormap_list[2];
168
} cxims0;
169
} __packed mock_cedt = {
170
.cedt = {
171
.header = {
172
.signature = "CEDT",
173
.length = sizeof(mock_cedt),
174
.revision = 1,
175
},
176
},
177
.chbs[0] = {
178
.header = {
179
.type = ACPI_CEDT_TYPE_CHBS,
180
.length = sizeof(mock_cedt.chbs[0]),
181
},
182
.uid = 0,
183
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
184
},
185
.chbs[1] = {
186
.header = {
187
.type = ACPI_CEDT_TYPE_CHBS,
188
.length = sizeof(mock_cedt.chbs[0]),
189
},
190
.uid = 1,
191
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
192
},
193
.chbs[2] = {
194
.header = {
195
.type = ACPI_CEDT_TYPE_CHBS,
196
.length = sizeof(mock_cedt.chbs[0]),
197
},
198
.uid = 2,
199
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
200
},
201
.chbs[3] = {
202
.header = {
203
.type = ACPI_CEDT_TYPE_CHBS,
204
.length = sizeof(mock_cedt.chbs[0]),
205
},
206
.uid = 3,
207
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11,
208
},
209
.cfmws0 = {
210
.cfmws = {
211
.header = {
212
.type = ACPI_CEDT_TYPE_CFMWS,
213
.length = sizeof(mock_cedt.cfmws0),
214
},
215
.interleave_ways = 0,
216
.granularity = 4,
217
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
218
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
219
.qtg_id = FAKE_QTG_ID,
220
.window_size = SZ_256M * 4UL,
221
},
222
.target = { 0 },
223
},
224
.cfmws1 = {
225
.cfmws = {
226
.header = {
227
.type = ACPI_CEDT_TYPE_CFMWS,
228
.length = sizeof(mock_cedt.cfmws1),
229
},
230
.interleave_ways = 1,
231
.granularity = 4,
232
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
233
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
234
.qtg_id = FAKE_QTG_ID,
235
.window_size = SZ_256M * 8UL,
236
},
237
.target = { 0, 1, },
238
},
239
.cfmws2 = {
240
.cfmws = {
241
.header = {
242
.type = ACPI_CEDT_TYPE_CFMWS,
243
.length = sizeof(mock_cedt.cfmws2),
244
},
245
.interleave_ways = 0,
246
.granularity = 4,
247
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
248
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
249
.qtg_id = FAKE_QTG_ID,
250
.window_size = SZ_256M * 4UL,
251
},
252
.target = { 0 },
253
},
254
.cfmws3 = {
255
.cfmws = {
256
.header = {
257
.type = ACPI_CEDT_TYPE_CFMWS,
258
.length = sizeof(mock_cedt.cfmws3),
259
},
260
.interleave_ways = 1,
261
.granularity = 4,
262
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
263
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
264
.qtg_id = FAKE_QTG_ID,
265
.window_size = SZ_256M * 8UL,
266
},
267
.target = { 0, 1, },
268
},
269
.cfmws4 = {
270
.cfmws = {
271
.header = {
272
.type = ACPI_CEDT_TYPE_CFMWS,
273
.length = sizeof(mock_cedt.cfmws4),
274
},
275
.interleave_ways = 0,
276
.granularity = 4,
277
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
278
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
279
.qtg_id = FAKE_QTG_ID,
280
.window_size = SZ_256M * 4UL,
281
},
282
.target = { 2 },
283
},
284
.cfmws5 = {
285
.cfmws = {
286
.header = {
287
.type = ACPI_CEDT_TYPE_CFMWS,
288
.length = sizeof(mock_cedt.cfmws5),
289
},
290
.interleave_ways = 0,
291
.granularity = 4,
292
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
293
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
294
.qtg_id = FAKE_QTG_ID,
295
.window_size = SZ_256M,
296
},
297
.target = { 3 },
298
},
299
/* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */
300
.cfmws6 = {
301
.cfmws = {
302
.header = {
303
.type = ACPI_CEDT_TYPE_CFMWS,
304
.length = sizeof(mock_cedt.cfmws6),
305
},
306
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
307
.interleave_ways = 0,
308
.granularity = 4,
309
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
310
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
311
.qtg_id = FAKE_QTG_ID,
312
.window_size = SZ_256M * 8UL,
313
},
314
.target = { 0, },
315
},
316
.cfmws7 = {
317
.cfmws = {
318
.header = {
319
.type = ACPI_CEDT_TYPE_CFMWS,
320
.length = sizeof(mock_cedt.cfmws7),
321
},
322
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
323
.interleave_ways = 1,
324
.granularity = 0,
325
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
326
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
327
.qtg_id = FAKE_QTG_ID,
328
.window_size = SZ_256M * 8UL,
329
},
330
.target = { 0, 1, },
331
},
332
.cfmws8 = {
333
.cfmws = {
334
.header = {
335
.type = ACPI_CEDT_TYPE_CFMWS,
336
.length = sizeof(mock_cedt.cfmws8),
337
},
338
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
339
.interleave_ways = 8,
340
.granularity = 1,
341
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
342
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
343
.qtg_id = FAKE_QTG_ID,
344
.window_size = SZ_512M * 6UL,
345
},
346
.target = { 0, 1, 2, },
347
},
348
.cxims0 = {
349
.cxims = {
350
.header = {
351
.type = ACPI_CEDT_TYPE_CXIMS,
352
.length = sizeof(mock_cedt.cxims0),
353
},
354
.hbig = 0,
355
.nr_xormaps = 2,
356
},
357
.xormap_list = { 0x404100, 0x808200, },
358
},
359
};
360
361
struct acpi_cedt_cfmws *mock_cfmws[] = {
362
[0] = &mock_cedt.cfmws0.cfmws,
363
[1] = &mock_cedt.cfmws1.cfmws,
364
[2] = &mock_cedt.cfmws2.cfmws,
365
[3] = &mock_cedt.cfmws3.cfmws,
366
[4] = &mock_cedt.cfmws4.cfmws,
367
[5] = &mock_cedt.cfmws5.cfmws,
368
/* Modulo Math above, XOR Math below */
369
[6] = &mock_cedt.cfmws6.cfmws,
370
[7] = &mock_cedt.cfmws7.cfmws,
371
[8] = &mock_cedt.cfmws8.cfmws,
372
};
373
374
static int cfmws_start;
375
static int cfmws_end;
376
#define CFMWS_MOD_ARRAY_START 0
377
#define CFMWS_MOD_ARRAY_END 5
378
#define CFMWS_XOR_ARRAY_START 6
379
#define CFMWS_XOR_ARRAY_END 8
380
381
struct acpi_cedt_cxims *mock_cxims[1] = {
382
[0] = &mock_cedt.cxims0.cxims,
383
};
384
385
struct cxl_mock_res {
386
struct list_head list;
387
struct range range;
388
};
389
390
static LIST_HEAD(mock_res);
391
static DEFINE_MUTEX(mock_res_lock);
392
static struct gen_pool *cxl_mock_pool;
393
394
static void depopulate_all_mock_resources(void)
395
{
396
struct cxl_mock_res *res, *_res;
397
398
mutex_lock(&mock_res_lock);
399
list_for_each_entry_safe(res, _res, &mock_res, list) {
400
gen_pool_free(cxl_mock_pool, res->range.start,
401
range_len(&res->range));
402
list_del(&res->list);
403
kfree(res);
404
}
405
mutex_unlock(&mock_res_lock);
406
}
407
408
static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
409
{
410
struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
411
struct genpool_data_align data = {
412
.align = align,
413
};
414
unsigned long phys;
415
416
INIT_LIST_HEAD(&res->list);
417
phys = gen_pool_alloc_algo(cxl_mock_pool, size,
418
gen_pool_first_fit_align, &data);
419
if (!phys)
420
return NULL;
421
422
res->range = (struct range) {
423
.start = phys,
424
.end = phys + size - 1,
425
};
426
mutex_lock(&mock_res_lock);
427
list_add(&res->list, &mock_res);
428
mutex_unlock(&mock_res_lock);
429
430
return res;
431
}
432
433
/* Only update CFMWS0 as this is used by the auto region. */
434
static void cfmws_elc_update(struct acpi_cedt_cfmws *window, int index)
435
{
436
if (!extended_linear_cache)
437
return;
438
439
if (index != 0)
440
return;
441
442
/*
443
* The window size should be 2x of the CXL region size where half is
444
* DRAM and half is CXL
445
*/
446
window->window_size = mock_auto_region_size * 2;
447
}
448
449
static int populate_cedt(void)
450
{
451
struct cxl_mock_res *res;
452
int i;
453
454
for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
455
struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
456
resource_size_t size;
457
458
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
459
size = ACPI_CEDT_CHBS_LENGTH_CXL20;
460
else
461
size = ACPI_CEDT_CHBS_LENGTH_CXL11;
462
463
res = alloc_mock_res(size, size);
464
if (!res)
465
return -ENOMEM;
466
chbs->base = res->range.start;
467
chbs->length = size;
468
}
469
470
for (i = cfmws_start; i <= cfmws_end; i++) {
471
struct acpi_cedt_cfmws *window = mock_cfmws[i];
472
473
cfmws_elc_update(window, i);
474
res = alloc_mock_res(window->window_size, SZ_256M);
475
if (!res)
476
return -ENOMEM;
477
window->base_hpa = res->range.start;
478
}
479
480
return 0;
481
}
482
483
static bool is_mock_port(struct device *dev);
484
485
/*
486
* WARNING, this hack assumes the format of 'struct cxl_cfmws_context'
487
* and 'struct cxl_chbs_context' share the property that the first
488
* struct member is a cxl_test device being probed by the cxl_acpi
489
* driver.
490
*/
491
struct cxl_cedt_context {
492
struct device *dev;
493
};
494
495
static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
496
acpi_tbl_entry_handler_arg handler_arg,
497
void *arg)
498
{
499
struct cxl_cedt_context *ctx = arg;
500
struct device *dev = ctx->dev;
501
union acpi_subtable_headers *h;
502
unsigned long end;
503
int i;
504
505
if (!is_mock_port(dev) && !is_mock_dev(dev))
506
return acpi_table_parse_cedt(id, handler_arg, arg);
507
508
if (id == ACPI_CEDT_TYPE_CHBS)
509
for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
510
h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
511
end = (unsigned long)&mock_cedt.chbs[i + 1];
512
handler_arg(h, arg, end);
513
}
514
515
if (id == ACPI_CEDT_TYPE_CFMWS)
516
for (i = cfmws_start; i <= cfmws_end; i++) {
517
h = (union acpi_subtable_headers *) mock_cfmws[i];
518
end = (unsigned long) h + mock_cfmws[i]->header.length;
519
handler_arg(h, arg, end);
520
}
521
522
if (id == ACPI_CEDT_TYPE_CXIMS)
523
for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) {
524
h = (union acpi_subtable_headers *)mock_cxims[i];
525
end = (unsigned long)h + mock_cxims[i]->header.length;
526
handler_arg(h, arg, end);
527
}
528
529
return 0;
530
}
531
532
static bool is_mock_bridge(struct device *dev)
533
{
534
int i;
535
536
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
537
if (dev == &cxl_host_bridge[i]->dev)
538
return true;
539
for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
540
if (dev == &cxl_hb_single[i]->dev)
541
return true;
542
for (i = 0; i < ARRAY_SIZE(cxl_rch); i++)
543
if (dev == &cxl_rch[i]->dev)
544
return true;
545
546
return false;
547
}
548
549
static bool is_mock_port(struct device *dev)
550
{
551
int i;
552
553
if (is_mock_bridge(dev))
554
return true;
555
556
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
557
if (dev == &cxl_root_port[i]->dev)
558
return true;
559
560
for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
561
if (dev == &cxl_switch_uport[i]->dev)
562
return true;
563
564
for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
565
if (dev == &cxl_switch_dport[i]->dev)
566
return true;
567
568
for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
569
if (dev == &cxl_root_single[i]->dev)
570
return true;
571
572
for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
573
if (dev == &cxl_swu_single[i]->dev)
574
return true;
575
576
for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
577
if (dev == &cxl_swd_single[i]->dev)
578
return true;
579
580
if (is_cxl_memdev(dev))
581
return is_mock_dev(dev->parent);
582
583
return false;
584
}
585
586
static int host_bridge_index(struct acpi_device *adev)
587
{
588
return adev - host_bridge;
589
}
590
591
static struct acpi_device *find_host_bridge(acpi_handle handle)
592
{
593
int i;
594
595
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
596
if (handle == host_bridge[i].handle)
597
return &host_bridge[i];
598
return NULL;
599
}
600
601
static acpi_status
602
mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
603
struct acpi_object_list *arguments,
604
unsigned long long *data)
605
{
606
struct acpi_device *adev = find_host_bridge(handle);
607
608
if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
609
return acpi_evaluate_integer(handle, pathname, arguments, data);
610
611
*data = host_bridge_index(adev);
612
return AE_OK;
613
}
614
615
static int
616
mock_hmat_get_extended_linear_cache_size(struct resource *backing_res,
617
int nid, resource_size_t *cache_size)
618
{
619
struct acpi_cedt_cfmws *window = mock_cfmws[0];
620
struct resource cfmws0_res =
621
DEFINE_RES_MEM(window->base_hpa, window->window_size);
622
623
if (!extended_linear_cache ||
624
!resource_contains(&cfmws0_res, backing_res)) {
625
return hmat_get_extended_linear_cache_size(backing_res,
626
nid, cache_size);
627
}
628
629
*cache_size = mock_auto_region_size;
630
631
return 0;
632
}
633
634
static struct pci_bus mock_pci_bus[NR_BRIDGES];
635
static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
636
[0] = {
637
.bus = &mock_pci_bus[0],
638
},
639
[1] = {
640
.bus = &mock_pci_bus[1],
641
},
642
[2] = {
643
.bus = &mock_pci_bus[2],
644
},
645
[3] = {
646
.bus = &mock_pci_bus[3],
647
},
648
649
};
650
651
static bool is_mock_bus(struct pci_bus *bus)
652
{
653
int i;
654
655
for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
656
if (bus == &mock_pci_bus[i])
657
return true;
658
return false;
659
}
660
661
static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
662
{
663
struct acpi_device *adev = find_host_bridge(handle);
664
665
if (!adev)
666
return acpi_pci_find_root(handle);
667
return &mock_pci_root[host_bridge_index(adev)];
668
}
669
670
static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
671
struct cxl_endpoint_dvsec_info *info)
672
{
673
struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
674
struct device *dev = &port->dev;
675
676
if (!cxlhdm)
677
return ERR_PTR(-ENOMEM);
678
679
cxlhdm->port = port;
680
cxlhdm->interleave_mask = ~0U;
681
cxlhdm->iw_cap_mask = ~0UL;
682
dev_set_drvdata(dev, cxlhdm);
683
return cxlhdm;
684
}
685
686
struct target_map_ctx {
687
u32 *target_map;
688
int index;
689
int target_count;
690
};
691
692
static int map_targets(struct device *dev, void *data)
693
{
694
struct platform_device *pdev = to_platform_device(dev);
695
struct target_map_ctx *ctx = data;
696
697
ctx->target_map[ctx->index++] = pdev->id;
698
699
if (ctx->index > ctx->target_count) {
700
dev_WARN_ONCE(dev, 1, "too many targets found?\n");
701
return -ENXIO;
702
}
703
704
return 0;
705
}
706
707
static int mock_decoder_commit(struct cxl_decoder *cxld)
708
{
709
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
710
int id = cxld->id;
711
712
if (cxld->flags & CXL_DECODER_F_ENABLE)
713
return 0;
714
715
dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
716
if (cxl_num_decoders_committed(port) != id) {
717
dev_dbg(&port->dev,
718
"%s: out of order commit, expected decoder%d.%d\n",
719
dev_name(&cxld->dev), port->id,
720
cxl_num_decoders_committed(port));
721
return -EBUSY;
722
}
723
724
port->commit_end++;
725
cxld->flags |= CXL_DECODER_F_ENABLE;
726
727
return 0;
728
}
729
730
static void mock_decoder_reset(struct cxl_decoder *cxld)
731
{
732
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
733
int id = cxld->id;
734
735
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
736
return;
737
738
dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
739
if (port->commit_end == id)
740
cxl_port_commit_reap(cxld);
741
else
742
dev_dbg(&port->dev,
743
"%s: out of order reset, expected decoder%d.%d\n",
744
dev_name(&cxld->dev), port->id, port->commit_end);
745
cxld->flags &= ~CXL_DECODER_F_ENABLE;
746
}
747
748
static void default_mock_decoder(struct cxl_decoder *cxld)
749
{
750
cxld->hpa_range = (struct range){
751
.start = 0,
752
.end = -1,
753
};
754
755
cxld->interleave_ways = 1;
756
cxld->interleave_granularity = 256;
757
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
758
cxld->commit = mock_decoder_commit;
759
cxld->reset = mock_decoder_reset;
760
}
761
762
static int first_decoder(struct device *dev, const void *data)
763
{
764
struct cxl_decoder *cxld;
765
766
if (!is_switch_decoder(dev))
767
return 0;
768
cxld = to_cxl_decoder(dev);
769
if (cxld->id == 0)
770
return 1;
771
return 0;
772
}
773
774
static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
775
{
776
struct acpi_cedt_cfmws *window = mock_cfmws[0];
777
struct platform_device *pdev = NULL;
778
struct cxl_endpoint_decoder *cxled;
779
struct cxl_switch_decoder *cxlsd;
780
struct cxl_port *port, *iter;
781
struct cxl_memdev *cxlmd;
782
struct cxl_dport *dport;
783
struct device *dev;
784
bool hb0 = false;
785
u64 base;
786
int i;
787
788
if (is_endpoint_decoder(&cxld->dev)) {
789
cxled = to_cxl_endpoint_decoder(&cxld->dev);
790
cxlmd = cxled_to_memdev(cxled);
791
WARN_ON(!dev_is_platform(cxlmd->dev.parent));
792
pdev = to_platform_device(cxlmd->dev.parent);
793
794
/* check is endpoint is attach to host-bridge0 */
795
port = cxled_to_port(cxled);
796
do {
797
if (port->uport_dev == &cxl_host_bridge[0]->dev) {
798
hb0 = true;
799
break;
800
}
801
if (is_cxl_port(port->dev.parent))
802
port = to_cxl_port(port->dev.parent);
803
else
804
port = NULL;
805
} while (port);
806
port = cxled_to_port(cxled);
807
}
808
809
/*
810
* The first decoder on the first 2 devices on the first switch
811
* attached to host-bridge0 mock a fake / static RAM region. All
812
* other decoders are default disabled. Given the round robin
813
* assignment those devices are named cxl_mem.0, and cxl_mem.4.
814
*
815
* See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
816
*/
817
if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
818
default_mock_decoder(cxld);
819
return;
820
}
821
822
base = window->base_hpa;
823
if (extended_linear_cache)
824
base += mock_auto_region_size;
825
cxld->hpa_range = (struct range) {
826
.start = base,
827
.end = base + mock_auto_region_size - 1,
828
};
829
830
cxld->interleave_ways = 2;
831
eig_to_granularity(window->granularity, &cxld->interleave_granularity);
832
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
833
cxld->flags = CXL_DECODER_F_ENABLE;
834
cxled->state = CXL_DECODER_STATE_AUTO;
835
port->commit_end = cxld->id;
836
devm_cxl_dpa_reserve(cxled, 0,
837
mock_auto_region_size / cxld->interleave_ways, 0);
838
cxld->commit = mock_decoder_commit;
839
cxld->reset = mock_decoder_reset;
840
841
/*
842
* Now that endpoint decoder is set up, walk up the hierarchy
843
* and setup the switch and root port decoders targeting @cxlmd.
844
*/
845
iter = port;
846
for (i = 0; i < 2; i++) {
847
dport = iter->parent_dport;
848
iter = dport->port;
849
dev = device_find_child(&iter->dev, NULL, first_decoder);
850
/*
851
* Ancestor ports are guaranteed to be enumerated before
852
* @port, and all ports have at least one decoder.
853
*/
854
if (WARN_ON(!dev))
855
continue;
856
857
cxlsd = to_cxl_switch_decoder(dev);
858
if (i == 0) {
859
/* put cxl_mem.4 second in the decode order */
860
if (pdev->id == 4) {
861
cxlsd->target[1] = dport;
862
cxld->target_map[1] = dport->port_id;
863
} else {
864
cxlsd->target[0] = dport;
865
cxld->target_map[0] = dport->port_id;
866
}
867
} else {
868
cxlsd->target[0] = dport;
869
cxld->target_map[0] = dport->port_id;
870
}
871
cxld = &cxlsd->cxld;
872
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
873
cxld->flags = CXL_DECODER_F_ENABLE;
874
iter->commit_end = 0;
875
/*
876
* Switch targets 2 endpoints, while host bridge targets
877
* one root port
878
*/
879
if (i == 0)
880
cxld->interleave_ways = 2;
881
else
882
cxld->interleave_ways = 1;
883
cxld->interleave_granularity = 4096;
884
cxld->hpa_range = (struct range) {
885
.start = base,
886
.end = base + mock_auto_region_size - 1,
887
};
888
put_device(dev);
889
}
890
}
891
892
static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
893
struct cxl_endpoint_dvsec_info *info)
894
{
895
struct cxl_port *port = cxlhdm->port;
896
struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
897
int target_count, i;
898
899
if (is_cxl_endpoint(port))
900
target_count = 0;
901
else if (is_cxl_root(parent_port))
902
target_count = NR_CXL_ROOT_PORTS;
903
else
904
target_count = NR_CXL_SWITCH_PORTS;
905
906
for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
907
struct target_map_ctx ctx = {
908
.target_count = target_count,
909
};
910
struct cxl_decoder *cxld;
911
int rc;
912
913
if (target_count) {
914
struct cxl_switch_decoder *cxlsd;
915
916
cxlsd = cxl_switch_decoder_alloc(port, target_count);
917
if (IS_ERR(cxlsd)) {
918
dev_warn(&port->dev,
919
"Failed to allocate the decoder\n");
920
return PTR_ERR(cxlsd);
921
}
922
cxld = &cxlsd->cxld;
923
} else {
924
struct cxl_endpoint_decoder *cxled;
925
926
cxled = cxl_endpoint_decoder_alloc(port);
927
928
if (IS_ERR(cxled)) {
929
dev_warn(&port->dev,
930
"Failed to allocate the decoder\n");
931
return PTR_ERR(cxled);
932
}
933
cxld = &cxled->cxld;
934
}
935
936
ctx.target_map = cxld->target_map;
937
938
mock_init_hdm_decoder(cxld);
939
940
if (target_count) {
941
rc = device_for_each_child(port->uport_dev, &ctx,
942
map_targets);
943
if (rc) {
944
put_device(&cxld->dev);
945
return rc;
946
}
947
}
948
949
rc = cxl_decoder_add_locked(cxld);
950
if (rc) {
951
put_device(&cxld->dev);
952
dev_err(&port->dev, "Failed to add decoder\n");
953
return rc;
954
}
955
956
rc = cxl_decoder_autoremove(&port->dev, cxld);
957
if (rc)
958
return rc;
959
dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
960
}
961
962
return 0;
963
}
964
965
static int __mock_cxl_decoders_setup(struct cxl_port *port)
966
{
967
struct cxl_hdm *cxlhdm;
968
969
cxlhdm = mock_cxl_setup_hdm(port, NULL);
970
if (IS_ERR(cxlhdm)) {
971
if (PTR_ERR(cxlhdm) != -ENODEV)
972
dev_err(&port->dev, "Failed to map HDM decoder capability\n");
973
return PTR_ERR(cxlhdm);
974
}
975
976
return mock_cxl_enumerate_decoders(cxlhdm, NULL);
977
}
978
979
static int mock_cxl_switch_port_decoders_setup(struct cxl_port *port)
980
{
981
if (is_cxl_root(port) || is_cxl_endpoint(port))
982
return -EOPNOTSUPP;
983
984
return __mock_cxl_decoders_setup(port);
985
}
986
987
static int mock_cxl_endpoint_decoders_setup(struct cxl_port *port)
988
{
989
if (!is_cxl_endpoint(port))
990
return -EOPNOTSUPP;
991
992
return __mock_cxl_decoders_setup(port);
993
}
994
995
static int get_port_array(struct cxl_port *port,
996
struct platform_device ***port_array,
997
int *port_array_size)
998
{
999
struct platform_device **array;
1000
int array_size;
1001
1002
if (port->depth == 1) {
1003
if (is_multi_bridge(port->uport_dev)) {
1004
array_size = ARRAY_SIZE(cxl_root_port);
1005
array = cxl_root_port;
1006
} else if (is_single_bridge(port->uport_dev)) {
1007
array_size = ARRAY_SIZE(cxl_root_single);
1008
array = cxl_root_single;
1009
} else {
1010
dev_dbg(&port->dev, "%s: unknown bridge type\n",
1011
dev_name(port->uport_dev));
1012
return -ENXIO;
1013
}
1014
} else if (port->depth == 2) {
1015
struct cxl_port *parent = to_cxl_port(port->dev.parent);
1016
1017
if (is_multi_bridge(parent->uport_dev)) {
1018
array_size = ARRAY_SIZE(cxl_switch_dport);
1019
array = cxl_switch_dport;
1020
} else if (is_single_bridge(parent->uport_dev)) {
1021
array_size = ARRAY_SIZE(cxl_swd_single);
1022
array = cxl_swd_single;
1023
} else {
1024
dev_dbg(&port->dev, "%s: unknown bridge type\n",
1025
dev_name(port->uport_dev));
1026
return -ENXIO;
1027
}
1028
} else {
1029
dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
1030
port->depth);
1031
return -ENXIO;
1032
}
1033
1034
*port_array = array;
1035
*port_array_size = array_size;
1036
1037
return 0;
1038
}
1039
1040
static struct cxl_dport *mock_cxl_add_dport_by_dev(struct cxl_port *port,
1041
struct device *dport_dev)
1042
{
1043
struct platform_device **array;
1044
int rc, i, array_size;
1045
1046
rc = get_port_array(port, &array, &array_size);
1047
if (rc)
1048
return ERR_PTR(rc);
1049
1050
for (i = 0; i < array_size; i++) {
1051
struct platform_device *pdev = array[i];
1052
1053
if (pdev->dev.parent != port->uport_dev) {
1054
dev_dbg(&port->dev, "%s: mismatch parent %s\n",
1055
dev_name(port->uport_dev),
1056
dev_name(pdev->dev.parent));
1057
continue;
1058
}
1059
1060
if (&pdev->dev != dport_dev)
1061
continue;
1062
1063
return devm_cxl_add_dport(port, &pdev->dev, pdev->id,
1064
CXL_RESOURCE_NONE);
1065
}
1066
1067
return ERR_PTR(-ENODEV);
1068
}
1069
1070
/*
1071
* Faking the cxl_dpa_perf for the memdev when appropriate.
1072
*/
1073
static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
1074
struct cxl_dpa_perf *dpa_perf)
1075
{
1076
dpa_perf->qos_class = FAKE_QTG_ID;
1077
dpa_perf->dpa_range = *range;
1078
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
1079
dpa_perf->coord[i].read_latency = 500;
1080
dpa_perf->coord[i].write_latency = 500;
1081
dpa_perf->coord[i].read_bandwidth = 1000;
1082
dpa_perf->coord[i].write_bandwidth = 1000;
1083
}
1084
}
1085
1086
static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
1087
{
1088
struct cxl_root *cxl_root __free(put_cxl_root) =
1089
find_cxl_root(port);
1090
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
1091
struct cxl_dev_state *cxlds = cxlmd->cxlds;
1092
struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
1093
1094
if (!cxl_root)
1095
return;
1096
1097
for (int i = 0; i < cxlds->nr_partitions; i++) {
1098
struct resource *res = &cxlds->part[i].res;
1099
struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
1100
struct range range = {
1101
.start = res->start,
1102
.end = res->end,
1103
};
1104
1105
dpa_perf_setup(port, &range, perf);
1106
}
1107
1108
cxl_memdev_update_perf(cxlmd);
1109
1110
/*
1111
* This function is here to only test the topology iterator. It serves
1112
* no other purpose.
1113
*/
1114
cxl_endpoint_get_perf_coordinates(port, ep_c);
1115
}
1116
1117
static struct cxl_mock_ops cxl_mock_ops = {
1118
.is_mock_adev = is_mock_adev,
1119
.is_mock_bridge = is_mock_bridge,
1120
.is_mock_bus = is_mock_bus,
1121
.is_mock_port = is_mock_port,
1122
.is_mock_dev = is_mock_dev,
1123
.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
1124
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
1125
.acpi_pci_find_root = mock_acpi_pci_find_root,
1126
.devm_cxl_switch_port_decoders_setup = mock_cxl_switch_port_decoders_setup,
1127
.devm_cxl_endpoint_decoders_setup = mock_cxl_endpoint_decoders_setup,
1128
.cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
1129
.devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
1130
.hmat_get_extended_linear_cache_size =
1131
mock_hmat_get_extended_linear_cache_size,
1132
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
1133
};
1134
1135
static void mock_companion(struct acpi_device *adev, struct device *dev)
1136
{
1137
device_initialize(&adev->dev);
1138
fwnode_init(&adev->fwnode, NULL);
1139
dev->fwnode = &adev->fwnode;
1140
adev->fwnode.dev = dev;
1141
}
1142
1143
#ifndef SZ_64G
1144
#define SZ_64G (SZ_32G * 2)
1145
#endif
1146
1147
static __init int cxl_rch_topo_init(void)
1148
{
1149
int rc, i;
1150
1151
for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) {
1152
int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i;
1153
struct acpi_device *adev = &host_bridge[idx];
1154
struct platform_device *pdev;
1155
1156
pdev = platform_device_alloc("cxl_host_bridge", idx);
1157
if (!pdev)
1158
goto err_bridge;
1159
1160
mock_companion(adev, &pdev->dev);
1161
rc = platform_device_add(pdev);
1162
if (rc) {
1163
platform_device_put(pdev);
1164
goto err_bridge;
1165
}
1166
1167
cxl_rch[i] = pdev;
1168
mock_pci_bus[idx].bridge = &pdev->dev;
1169
rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1170
"firmware_node");
1171
if (rc)
1172
goto err_bridge;
1173
}
1174
1175
return 0;
1176
1177
err_bridge:
1178
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1179
struct platform_device *pdev = cxl_rch[i];
1180
1181
if (!pdev)
1182
continue;
1183
sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1184
platform_device_unregister(cxl_rch[i]);
1185
}
1186
1187
return rc;
1188
}
1189
1190
static void cxl_rch_topo_exit(void)
1191
{
1192
int i;
1193
1194
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1195
struct platform_device *pdev = cxl_rch[i];
1196
1197
if (!pdev)
1198
continue;
1199
sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1200
platform_device_unregister(cxl_rch[i]);
1201
}
1202
}
1203
1204
static __init int cxl_single_topo_init(void)
1205
{
1206
int i, rc;
1207
1208
for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
1209
struct acpi_device *adev =
1210
&host_bridge[NR_CXL_HOST_BRIDGES + i];
1211
struct platform_device *pdev;
1212
1213
pdev = platform_device_alloc("cxl_host_bridge",
1214
NR_CXL_HOST_BRIDGES + i);
1215
if (!pdev)
1216
goto err_bridge;
1217
1218
mock_companion(adev, &pdev->dev);
1219
rc = platform_device_add(pdev);
1220
if (rc) {
1221
platform_device_put(pdev);
1222
goto err_bridge;
1223
}
1224
1225
cxl_hb_single[i] = pdev;
1226
mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev;
1227
rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1228
"physical_node");
1229
if (rc)
1230
goto err_bridge;
1231
}
1232
1233
for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
1234
struct platform_device *bridge =
1235
cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
1236
struct platform_device *pdev;
1237
1238
pdev = platform_device_alloc("cxl_root_port",
1239
NR_MULTI_ROOT + i);
1240
if (!pdev)
1241
goto err_port;
1242
pdev->dev.parent = &bridge->dev;
1243
1244
rc = platform_device_add(pdev);
1245
if (rc) {
1246
platform_device_put(pdev);
1247
goto err_port;
1248
}
1249
cxl_root_single[i] = pdev;
1250
}
1251
1252
for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
1253
struct platform_device *root_port = cxl_root_single[i];
1254
struct platform_device *pdev;
1255
1256
pdev = platform_device_alloc("cxl_switch_uport",
1257
NR_MULTI_ROOT + i);
1258
if (!pdev)
1259
goto err_uport;
1260
pdev->dev.parent = &root_port->dev;
1261
1262
rc = platform_device_add(pdev);
1263
if (rc) {
1264
platform_device_put(pdev);
1265
goto err_uport;
1266
}
1267
cxl_swu_single[i] = pdev;
1268
}
1269
1270
for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
1271
struct platform_device *uport =
1272
cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
1273
struct platform_device *pdev;
1274
1275
pdev = platform_device_alloc("cxl_switch_dport",
1276
i + NR_MEM_MULTI);
1277
if (!pdev)
1278
goto err_dport;
1279
pdev->dev.parent = &uport->dev;
1280
1281
rc = platform_device_add(pdev);
1282
if (rc) {
1283
platform_device_put(pdev);
1284
goto err_dport;
1285
}
1286
cxl_swd_single[i] = pdev;
1287
}
1288
1289
return 0;
1290
1291
err_dport:
1292
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1293
platform_device_unregister(cxl_swd_single[i]);
1294
err_uport:
1295
for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1296
platform_device_unregister(cxl_swu_single[i]);
1297
err_port:
1298
for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1299
platform_device_unregister(cxl_root_single[i]);
1300
err_bridge:
1301
for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1302
struct platform_device *pdev = cxl_hb_single[i];
1303
1304
if (!pdev)
1305
continue;
1306
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1307
platform_device_unregister(cxl_hb_single[i]);
1308
}
1309
1310
return rc;
1311
}
1312
1313
static void cxl_single_topo_exit(void)
1314
{
1315
int i;
1316
1317
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1318
platform_device_unregister(cxl_swd_single[i]);
1319
for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1320
platform_device_unregister(cxl_swu_single[i]);
1321
for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1322
platform_device_unregister(cxl_root_single[i]);
1323
for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1324
struct platform_device *pdev = cxl_hb_single[i];
1325
1326
if (!pdev)
1327
continue;
1328
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1329
platform_device_unregister(cxl_hb_single[i]);
1330
}
1331
}
1332
1333
static void cxl_mem_exit(void)
1334
{
1335
int i;
1336
1337
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1338
platform_device_unregister(cxl_rcd[i]);
1339
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1340
platform_device_unregister(cxl_mem_single[i]);
1341
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1342
platform_device_unregister(cxl_mem[i]);
1343
}
1344
1345
static int cxl_mem_init(void)
1346
{
1347
int i, rc;
1348
1349
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
1350
struct platform_device *dport = cxl_switch_dport[i];
1351
struct platform_device *pdev;
1352
1353
pdev = platform_device_alloc("cxl_mem", i);
1354
if (!pdev)
1355
goto err_mem;
1356
pdev->dev.parent = &dport->dev;
1357
set_dev_node(&pdev->dev, i % 2);
1358
1359
rc = platform_device_add(pdev);
1360
if (rc) {
1361
platform_device_put(pdev);
1362
goto err_mem;
1363
}
1364
cxl_mem[i] = pdev;
1365
}
1366
1367
for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
1368
struct platform_device *dport = cxl_swd_single[i];
1369
struct platform_device *pdev;
1370
1371
pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
1372
if (!pdev)
1373
goto err_single;
1374
pdev->dev.parent = &dport->dev;
1375
set_dev_node(&pdev->dev, i % 2);
1376
1377
rc = platform_device_add(pdev);
1378
if (rc) {
1379
platform_device_put(pdev);
1380
goto err_single;
1381
}
1382
cxl_mem_single[i] = pdev;
1383
}
1384
1385
for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
1386
int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
1387
struct platform_device *rch = cxl_rch[i];
1388
struct platform_device *pdev;
1389
1390
pdev = platform_device_alloc("cxl_rcd", idx);
1391
if (!pdev)
1392
goto err_rcd;
1393
pdev->dev.parent = &rch->dev;
1394
set_dev_node(&pdev->dev, i % 2);
1395
1396
rc = platform_device_add(pdev);
1397
if (rc) {
1398
platform_device_put(pdev);
1399
goto err_rcd;
1400
}
1401
cxl_rcd[i] = pdev;
1402
}
1403
1404
return 0;
1405
1406
err_rcd:
1407
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1408
platform_device_unregister(cxl_rcd[i]);
1409
err_single:
1410
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1411
platform_device_unregister(cxl_mem_single[i]);
1412
err_mem:
1413
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1414
platform_device_unregister(cxl_mem[i]);
1415
return rc;
1416
}
1417
1418
static __init int cxl_test_init(void)
1419
{
1420
int rc, i;
1421
struct range mappable;
1422
1423
cxl_acpi_test();
1424
cxl_core_test();
1425
cxl_mem_test();
1426
cxl_pmem_test();
1427
cxl_port_test();
1428
1429
register_cxl_mock_ops(&cxl_mock_ops);
1430
1431
cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
1432
if (!cxl_mock_pool) {
1433
rc = -ENOMEM;
1434
goto err_gen_pool_create;
1435
}
1436
mappable = mhp_get_pluggable_range(true);
1437
1438
rc = gen_pool_add(cxl_mock_pool,
1439
min(iomem_resource.end + 1 - SZ_64G,
1440
mappable.end + 1 - SZ_64G),
1441
SZ_64G, NUMA_NO_NODE);
1442
if (rc)
1443
goto err_gen_pool_add;
1444
1445
if (interleave_arithmetic == 1) {
1446
cfmws_start = CFMWS_XOR_ARRAY_START;
1447
cfmws_end = CFMWS_XOR_ARRAY_END;
1448
} else {
1449
cfmws_start = CFMWS_MOD_ARRAY_START;
1450
cfmws_end = CFMWS_MOD_ARRAY_END;
1451
}
1452
1453
rc = populate_cedt();
1454
if (rc)
1455
goto err_populate;
1456
1457
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
1458
struct acpi_device *adev = &host_bridge[i];
1459
struct platform_device *pdev;
1460
1461
pdev = platform_device_alloc("cxl_host_bridge", i);
1462
if (!pdev)
1463
goto err_bridge;
1464
1465
mock_companion(adev, &pdev->dev);
1466
rc = platform_device_add(pdev);
1467
if (rc) {
1468
platform_device_put(pdev);
1469
goto err_bridge;
1470
}
1471
1472
cxl_host_bridge[i] = pdev;
1473
mock_pci_bus[i].bridge = &pdev->dev;
1474
rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1475
"physical_node");
1476
if (rc)
1477
goto err_bridge;
1478
}
1479
1480
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
1481
struct platform_device *bridge =
1482
cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
1483
struct platform_device *pdev;
1484
1485
pdev = platform_device_alloc("cxl_root_port", i);
1486
if (!pdev)
1487
goto err_port;
1488
pdev->dev.parent = &bridge->dev;
1489
1490
rc = platform_device_add(pdev);
1491
if (rc) {
1492
platform_device_put(pdev);
1493
goto err_port;
1494
}
1495
cxl_root_port[i] = pdev;
1496
}
1497
1498
BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
1499
for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
1500
struct platform_device *root_port = cxl_root_port[i];
1501
struct platform_device *pdev;
1502
1503
pdev = platform_device_alloc("cxl_switch_uport", i);
1504
if (!pdev)
1505
goto err_uport;
1506
pdev->dev.parent = &root_port->dev;
1507
1508
rc = platform_device_add(pdev);
1509
if (rc) {
1510
platform_device_put(pdev);
1511
goto err_uport;
1512
}
1513
cxl_switch_uport[i] = pdev;
1514
}
1515
1516
for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
1517
struct platform_device *uport =
1518
cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
1519
struct platform_device *pdev;
1520
1521
pdev = platform_device_alloc("cxl_switch_dport", i);
1522
if (!pdev)
1523
goto err_dport;
1524
pdev->dev.parent = &uport->dev;
1525
1526
rc = platform_device_add(pdev);
1527
if (rc) {
1528
platform_device_put(pdev);
1529
goto err_dport;
1530
}
1531
cxl_switch_dport[i] = pdev;
1532
}
1533
1534
rc = cxl_single_topo_init();
1535
if (rc)
1536
goto err_dport;
1537
1538
rc = cxl_rch_topo_init();
1539
if (rc)
1540
goto err_single;
1541
1542
cxl_acpi = platform_device_alloc("cxl_acpi", 0);
1543
if (!cxl_acpi)
1544
goto err_rch;
1545
1546
mock_companion(&acpi0017_mock, &cxl_acpi->dev);
1547
acpi0017_mock.dev.bus = &platform_bus_type;
1548
1549
rc = platform_device_add(cxl_acpi);
1550
if (rc)
1551
goto err_root;
1552
1553
rc = cxl_mem_init();
1554
if (rc)
1555
goto err_root;
1556
1557
return 0;
1558
1559
err_root:
1560
platform_device_put(cxl_acpi);
1561
err_rch:
1562
cxl_rch_topo_exit();
1563
err_single:
1564
cxl_single_topo_exit();
1565
err_dport:
1566
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1567
platform_device_unregister(cxl_switch_dport[i]);
1568
err_uport:
1569
for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1570
platform_device_unregister(cxl_switch_uport[i]);
1571
err_port:
1572
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1573
platform_device_unregister(cxl_root_port[i]);
1574
err_bridge:
1575
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1576
struct platform_device *pdev = cxl_host_bridge[i];
1577
1578
if (!pdev)
1579
continue;
1580
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1581
platform_device_unregister(cxl_host_bridge[i]);
1582
}
1583
err_populate:
1584
depopulate_all_mock_resources();
1585
err_gen_pool_add:
1586
gen_pool_destroy(cxl_mock_pool);
1587
err_gen_pool_create:
1588
unregister_cxl_mock_ops(&cxl_mock_ops);
1589
return rc;
1590
}
1591
1592
static __exit void cxl_test_exit(void)
1593
{
1594
int i;
1595
1596
cxl_mem_exit();
1597
platform_device_unregister(cxl_acpi);
1598
cxl_rch_topo_exit();
1599
cxl_single_topo_exit();
1600
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1601
platform_device_unregister(cxl_switch_dport[i]);
1602
for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1603
platform_device_unregister(cxl_switch_uport[i]);
1604
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1605
platform_device_unregister(cxl_root_port[i]);
1606
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1607
struct platform_device *pdev = cxl_host_bridge[i];
1608
1609
if (!pdev)
1610
continue;
1611
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1612
platform_device_unregister(cxl_host_bridge[i]);
1613
}
1614
depopulate_all_mock_resources();
1615
gen_pool_destroy(cxl_mock_pool);
1616
unregister_cxl_mock_ops(&cxl_mock_ops);
1617
}
1618
1619
module_param(interleave_arithmetic, int, 0444);
1620
MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
1621
module_param(extended_linear_cache, bool, 0444);
1622
MODULE_PARM_DESC(extended_linear_cache, "Enable extended linear cache support");
1623
module_init(cxl_test_init);
1624
module_exit(cxl_test_exit);
1625
MODULE_LICENSE("GPL v2");
1626
MODULE_DESCRIPTION("cxl_test: setup module");
1627
MODULE_IMPORT_NS("ACPI");
1628
MODULE_IMPORT_NS("CXL");
1629
1630