Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/cxl/test/cxl.c
29537 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
// Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4
#include <linux/platform_device.h>
5
#include <linux/memory_hotplug.h>
6
#include <linux/genalloc.h>
7
#include <linux/module.h>
8
#include <linux/mutex.h>
9
#include <linux/acpi.h>
10
#include <linux/pci.h>
11
#include <linux/mm.h>
12
#include <cxlmem.h>
13
14
#include "../watermark.h"
15
#include "mock.h"
16
17
static int interleave_arithmetic;
18
19
#define FAKE_QTG_ID 42
20
21
#define NR_CXL_HOST_BRIDGES 2
22
#define NR_CXL_SINGLE_HOST 1
23
#define NR_CXL_RCH 1
24
#define NR_CXL_ROOT_PORTS 2
25
#define NR_CXL_SWITCH_PORTS 2
26
#define NR_CXL_PORT_DECODERS 8
27
#define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
28
29
static struct platform_device *cxl_acpi;
30
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
31
#define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
32
static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
33
static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
34
#define NR_MEM_MULTI \
35
(NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
36
static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
37
38
static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
39
static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
40
static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
41
#define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
42
static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
43
44
struct platform_device *cxl_mem[NR_MEM_MULTI];
45
struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
46
47
static struct platform_device *cxl_rch[NR_CXL_RCH];
48
static struct platform_device *cxl_rcd[NR_CXL_RCH];
49
50
static inline bool is_multi_bridge(struct device *dev)
51
{
52
int i;
53
54
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
55
if (&cxl_host_bridge[i]->dev == dev)
56
return true;
57
return false;
58
}
59
60
static inline bool is_single_bridge(struct device *dev)
61
{
62
int i;
63
64
for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
65
if (&cxl_hb_single[i]->dev == dev)
66
return true;
67
return false;
68
}
69
70
static struct acpi_device acpi0017_mock;
71
static struct acpi_device host_bridge[NR_BRIDGES] = {
72
[0] = {
73
.handle = &host_bridge[0],
74
.pnp.unique_id = "0",
75
},
76
[1] = {
77
.handle = &host_bridge[1],
78
.pnp.unique_id = "1",
79
},
80
[2] = {
81
.handle = &host_bridge[2],
82
.pnp.unique_id = "2",
83
},
84
[3] = {
85
.handle = &host_bridge[3],
86
.pnp.unique_id = "3",
87
},
88
};
89
90
static bool is_mock_dev(struct device *dev)
91
{
92
int i;
93
94
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
95
if (dev == &cxl_mem[i]->dev)
96
return true;
97
for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
98
if (dev == &cxl_mem_single[i]->dev)
99
return true;
100
for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++)
101
if (dev == &cxl_rcd[i]->dev)
102
return true;
103
if (dev == &cxl_acpi->dev)
104
return true;
105
return false;
106
}
107
108
static bool is_mock_adev(struct acpi_device *adev)
109
{
110
int i;
111
112
if (adev == &acpi0017_mock)
113
return true;
114
115
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
116
if (adev == &host_bridge[i])
117
return true;
118
119
return false;
120
}
121
122
static struct {
123
struct acpi_table_cedt cedt;
124
struct acpi_cedt_chbs chbs[NR_BRIDGES];
125
struct {
126
struct acpi_cedt_cfmws cfmws;
127
u32 target[1];
128
} cfmws0;
129
struct {
130
struct acpi_cedt_cfmws cfmws;
131
u32 target[2];
132
} cfmws1;
133
struct {
134
struct acpi_cedt_cfmws cfmws;
135
u32 target[1];
136
} cfmws2;
137
struct {
138
struct acpi_cedt_cfmws cfmws;
139
u32 target[2];
140
} cfmws3;
141
struct {
142
struct acpi_cedt_cfmws cfmws;
143
u32 target[1];
144
} cfmws4;
145
struct {
146
struct acpi_cedt_cfmws cfmws;
147
u32 target[1];
148
} cfmws5;
149
struct {
150
struct acpi_cedt_cfmws cfmws;
151
u32 target[1];
152
} cfmws6;
153
struct {
154
struct acpi_cedt_cfmws cfmws;
155
u32 target[2];
156
} cfmws7;
157
struct {
158
struct acpi_cedt_cfmws cfmws;
159
u32 target[3];
160
} cfmws8;
161
struct {
162
struct acpi_cedt_cxims cxims;
163
u64 xormap_list[2];
164
} cxims0;
165
} __packed mock_cedt = {
166
.cedt = {
167
.header = {
168
.signature = "CEDT",
169
.length = sizeof(mock_cedt),
170
.revision = 1,
171
},
172
},
173
.chbs[0] = {
174
.header = {
175
.type = ACPI_CEDT_TYPE_CHBS,
176
.length = sizeof(mock_cedt.chbs[0]),
177
},
178
.uid = 0,
179
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
180
},
181
.chbs[1] = {
182
.header = {
183
.type = ACPI_CEDT_TYPE_CHBS,
184
.length = sizeof(mock_cedt.chbs[0]),
185
},
186
.uid = 1,
187
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
188
},
189
.chbs[2] = {
190
.header = {
191
.type = ACPI_CEDT_TYPE_CHBS,
192
.length = sizeof(mock_cedt.chbs[0]),
193
},
194
.uid = 2,
195
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
196
},
197
.chbs[3] = {
198
.header = {
199
.type = ACPI_CEDT_TYPE_CHBS,
200
.length = sizeof(mock_cedt.chbs[0]),
201
},
202
.uid = 3,
203
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11,
204
},
205
.cfmws0 = {
206
.cfmws = {
207
.header = {
208
.type = ACPI_CEDT_TYPE_CFMWS,
209
.length = sizeof(mock_cedt.cfmws0),
210
},
211
.interleave_ways = 0,
212
.granularity = 4,
213
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
214
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
215
.qtg_id = FAKE_QTG_ID,
216
.window_size = SZ_256M * 4UL,
217
},
218
.target = { 0 },
219
},
220
.cfmws1 = {
221
.cfmws = {
222
.header = {
223
.type = ACPI_CEDT_TYPE_CFMWS,
224
.length = sizeof(mock_cedt.cfmws1),
225
},
226
.interleave_ways = 1,
227
.granularity = 4,
228
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
229
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
230
.qtg_id = FAKE_QTG_ID,
231
.window_size = SZ_256M * 8UL,
232
},
233
.target = { 0, 1, },
234
},
235
.cfmws2 = {
236
.cfmws = {
237
.header = {
238
.type = ACPI_CEDT_TYPE_CFMWS,
239
.length = sizeof(mock_cedt.cfmws2),
240
},
241
.interleave_ways = 0,
242
.granularity = 4,
243
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
244
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
245
.qtg_id = FAKE_QTG_ID,
246
.window_size = SZ_256M * 4UL,
247
},
248
.target = { 0 },
249
},
250
.cfmws3 = {
251
.cfmws = {
252
.header = {
253
.type = ACPI_CEDT_TYPE_CFMWS,
254
.length = sizeof(mock_cedt.cfmws3),
255
},
256
.interleave_ways = 1,
257
.granularity = 4,
258
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
259
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
260
.qtg_id = FAKE_QTG_ID,
261
.window_size = SZ_256M * 8UL,
262
},
263
.target = { 0, 1, },
264
},
265
.cfmws4 = {
266
.cfmws = {
267
.header = {
268
.type = ACPI_CEDT_TYPE_CFMWS,
269
.length = sizeof(mock_cedt.cfmws4),
270
},
271
.interleave_ways = 0,
272
.granularity = 4,
273
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
274
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
275
.qtg_id = FAKE_QTG_ID,
276
.window_size = SZ_256M * 4UL,
277
},
278
.target = { 2 },
279
},
280
.cfmws5 = {
281
.cfmws = {
282
.header = {
283
.type = ACPI_CEDT_TYPE_CFMWS,
284
.length = sizeof(mock_cedt.cfmws5),
285
},
286
.interleave_ways = 0,
287
.granularity = 4,
288
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
289
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
290
.qtg_id = FAKE_QTG_ID,
291
.window_size = SZ_256M,
292
},
293
.target = { 3 },
294
},
295
/* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */
296
.cfmws6 = {
297
.cfmws = {
298
.header = {
299
.type = ACPI_CEDT_TYPE_CFMWS,
300
.length = sizeof(mock_cedt.cfmws6),
301
},
302
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
303
.interleave_ways = 0,
304
.granularity = 4,
305
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
306
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
307
.qtg_id = FAKE_QTG_ID,
308
.window_size = SZ_256M * 8UL,
309
},
310
.target = { 0, },
311
},
312
.cfmws7 = {
313
.cfmws = {
314
.header = {
315
.type = ACPI_CEDT_TYPE_CFMWS,
316
.length = sizeof(mock_cedt.cfmws7),
317
},
318
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
319
.interleave_ways = 1,
320
.granularity = 0,
321
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
322
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
323
.qtg_id = FAKE_QTG_ID,
324
.window_size = SZ_256M * 8UL,
325
},
326
.target = { 0, 1, },
327
},
328
.cfmws8 = {
329
.cfmws = {
330
.header = {
331
.type = ACPI_CEDT_TYPE_CFMWS,
332
.length = sizeof(mock_cedt.cfmws8),
333
},
334
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
335
.interleave_ways = 8,
336
.granularity = 1,
337
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
338
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
339
.qtg_id = FAKE_QTG_ID,
340
.window_size = SZ_512M * 6UL,
341
},
342
.target = { 0, 1, 2, },
343
},
344
.cxims0 = {
345
.cxims = {
346
.header = {
347
.type = ACPI_CEDT_TYPE_CXIMS,
348
.length = sizeof(mock_cedt.cxims0),
349
},
350
.hbig = 0,
351
.nr_xormaps = 2,
352
},
353
.xormap_list = { 0x404100, 0x808200, },
354
},
355
};
356
357
struct acpi_cedt_cfmws *mock_cfmws[] = {
358
[0] = &mock_cedt.cfmws0.cfmws,
359
[1] = &mock_cedt.cfmws1.cfmws,
360
[2] = &mock_cedt.cfmws2.cfmws,
361
[3] = &mock_cedt.cfmws3.cfmws,
362
[4] = &mock_cedt.cfmws4.cfmws,
363
[5] = &mock_cedt.cfmws5.cfmws,
364
/* Modulo Math above, XOR Math below */
365
[6] = &mock_cedt.cfmws6.cfmws,
366
[7] = &mock_cedt.cfmws7.cfmws,
367
[8] = &mock_cedt.cfmws8.cfmws,
368
};
369
370
static int cfmws_start;
371
static int cfmws_end;
372
#define CFMWS_MOD_ARRAY_START 0
373
#define CFMWS_MOD_ARRAY_END 5
374
#define CFMWS_XOR_ARRAY_START 6
375
#define CFMWS_XOR_ARRAY_END 8
376
377
struct acpi_cedt_cxims *mock_cxims[1] = {
378
[0] = &mock_cedt.cxims0.cxims,
379
};
380
381
struct cxl_mock_res {
382
struct list_head list;
383
struct range range;
384
};
385
386
static LIST_HEAD(mock_res);
387
static DEFINE_MUTEX(mock_res_lock);
388
static struct gen_pool *cxl_mock_pool;
389
390
static void depopulate_all_mock_resources(void)
391
{
392
struct cxl_mock_res *res, *_res;
393
394
mutex_lock(&mock_res_lock);
395
list_for_each_entry_safe(res, _res, &mock_res, list) {
396
gen_pool_free(cxl_mock_pool, res->range.start,
397
range_len(&res->range));
398
list_del(&res->list);
399
kfree(res);
400
}
401
mutex_unlock(&mock_res_lock);
402
}
403
404
static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
405
{
406
struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
407
struct genpool_data_align data = {
408
.align = align,
409
};
410
unsigned long phys;
411
412
INIT_LIST_HEAD(&res->list);
413
phys = gen_pool_alloc_algo(cxl_mock_pool, size,
414
gen_pool_first_fit_align, &data);
415
if (!phys)
416
return NULL;
417
418
res->range = (struct range) {
419
.start = phys,
420
.end = phys + size - 1,
421
};
422
mutex_lock(&mock_res_lock);
423
list_add(&res->list, &mock_res);
424
mutex_unlock(&mock_res_lock);
425
426
return res;
427
}
428
429
static int populate_cedt(void)
430
{
431
struct cxl_mock_res *res;
432
int i;
433
434
for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
435
struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
436
resource_size_t size;
437
438
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
439
size = ACPI_CEDT_CHBS_LENGTH_CXL20;
440
else
441
size = ACPI_CEDT_CHBS_LENGTH_CXL11;
442
443
res = alloc_mock_res(size, size);
444
if (!res)
445
return -ENOMEM;
446
chbs->base = res->range.start;
447
chbs->length = size;
448
}
449
450
for (i = cfmws_start; i <= cfmws_end; i++) {
451
struct acpi_cedt_cfmws *window = mock_cfmws[i];
452
453
res = alloc_mock_res(window->window_size, SZ_256M);
454
if (!res)
455
return -ENOMEM;
456
window->base_hpa = res->range.start;
457
}
458
459
return 0;
460
}
461
462
static bool is_mock_port(struct device *dev);
463
464
/*
465
* WARNING, this hack assumes the format of 'struct cxl_cfmws_context'
466
* and 'struct cxl_chbs_context' share the property that the first
467
* struct member is a cxl_test device being probed by the cxl_acpi
468
* driver.
469
*/
470
struct cxl_cedt_context {
471
struct device *dev;
472
};
473
474
static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
475
acpi_tbl_entry_handler_arg handler_arg,
476
void *arg)
477
{
478
struct cxl_cedt_context *ctx = arg;
479
struct device *dev = ctx->dev;
480
union acpi_subtable_headers *h;
481
unsigned long end;
482
int i;
483
484
if (!is_mock_port(dev) && !is_mock_dev(dev))
485
return acpi_table_parse_cedt(id, handler_arg, arg);
486
487
if (id == ACPI_CEDT_TYPE_CHBS)
488
for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
489
h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
490
end = (unsigned long)&mock_cedt.chbs[i + 1];
491
handler_arg(h, arg, end);
492
}
493
494
if (id == ACPI_CEDT_TYPE_CFMWS)
495
for (i = cfmws_start; i <= cfmws_end; i++) {
496
h = (union acpi_subtable_headers *) mock_cfmws[i];
497
end = (unsigned long) h + mock_cfmws[i]->header.length;
498
handler_arg(h, arg, end);
499
}
500
501
if (id == ACPI_CEDT_TYPE_CXIMS)
502
for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) {
503
h = (union acpi_subtable_headers *)mock_cxims[i];
504
end = (unsigned long)h + mock_cxims[i]->header.length;
505
handler_arg(h, arg, end);
506
}
507
508
return 0;
509
}
510
511
static bool is_mock_bridge(struct device *dev)
512
{
513
int i;
514
515
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
516
if (dev == &cxl_host_bridge[i]->dev)
517
return true;
518
for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
519
if (dev == &cxl_hb_single[i]->dev)
520
return true;
521
for (i = 0; i < ARRAY_SIZE(cxl_rch); i++)
522
if (dev == &cxl_rch[i]->dev)
523
return true;
524
525
return false;
526
}
527
528
static bool is_mock_port(struct device *dev)
529
{
530
int i;
531
532
if (is_mock_bridge(dev))
533
return true;
534
535
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
536
if (dev == &cxl_root_port[i]->dev)
537
return true;
538
539
for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
540
if (dev == &cxl_switch_uport[i]->dev)
541
return true;
542
543
for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
544
if (dev == &cxl_switch_dport[i]->dev)
545
return true;
546
547
for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
548
if (dev == &cxl_root_single[i]->dev)
549
return true;
550
551
for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
552
if (dev == &cxl_swu_single[i]->dev)
553
return true;
554
555
for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
556
if (dev == &cxl_swd_single[i]->dev)
557
return true;
558
559
if (is_cxl_memdev(dev))
560
return is_mock_dev(dev->parent);
561
562
return false;
563
}
564
565
static int host_bridge_index(struct acpi_device *adev)
566
{
567
return adev - host_bridge;
568
}
569
570
static struct acpi_device *find_host_bridge(acpi_handle handle)
571
{
572
int i;
573
574
for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
575
if (handle == host_bridge[i].handle)
576
return &host_bridge[i];
577
return NULL;
578
}
579
580
static acpi_status
581
mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
582
struct acpi_object_list *arguments,
583
unsigned long long *data)
584
{
585
struct acpi_device *adev = find_host_bridge(handle);
586
587
if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
588
return acpi_evaluate_integer(handle, pathname, arguments, data);
589
590
*data = host_bridge_index(adev);
591
return AE_OK;
592
}
593
594
static struct pci_bus mock_pci_bus[NR_BRIDGES];
595
static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
596
[0] = {
597
.bus = &mock_pci_bus[0],
598
},
599
[1] = {
600
.bus = &mock_pci_bus[1],
601
},
602
[2] = {
603
.bus = &mock_pci_bus[2],
604
},
605
[3] = {
606
.bus = &mock_pci_bus[3],
607
},
608
609
};
610
611
static bool is_mock_bus(struct pci_bus *bus)
612
{
613
int i;
614
615
for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
616
if (bus == &mock_pci_bus[i])
617
return true;
618
return false;
619
}
620
621
static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
622
{
623
struct acpi_device *adev = find_host_bridge(handle);
624
625
if (!adev)
626
return acpi_pci_find_root(handle);
627
return &mock_pci_root[host_bridge_index(adev)];
628
}
629
630
static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
631
struct cxl_endpoint_dvsec_info *info)
632
{
633
struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
634
struct device *dev = &port->dev;
635
636
if (!cxlhdm)
637
return ERR_PTR(-ENOMEM);
638
639
cxlhdm->port = port;
640
cxlhdm->interleave_mask = ~0U;
641
cxlhdm->iw_cap_mask = ~0UL;
642
dev_set_drvdata(dev, cxlhdm);
643
return cxlhdm;
644
}
645
646
struct target_map_ctx {
647
u32 *target_map;
648
int index;
649
int target_count;
650
};
651
652
static int map_targets(struct device *dev, void *data)
653
{
654
struct platform_device *pdev = to_platform_device(dev);
655
struct target_map_ctx *ctx = data;
656
657
ctx->target_map[ctx->index++] = pdev->id;
658
659
if (ctx->index > ctx->target_count) {
660
dev_WARN_ONCE(dev, 1, "too many targets found?\n");
661
return -ENXIO;
662
}
663
664
return 0;
665
}
666
667
static int mock_decoder_commit(struct cxl_decoder *cxld)
668
{
669
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
670
int id = cxld->id;
671
672
if (cxld->flags & CXL_DECODER_F_ENABLE)
673
return 0;
674
675
dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
676
if (cxl_num_decoders_committed(port) != id) {
677
dev_dbg(&port->dev,
678
"%s: out of order commit, expected decoder%d.%d\n",
679
dev_name(&cxld->dev), port->id,
680
cxl_num_decoders_committed(port));
681
return -EBUSY;
682
}
683
684
port->commit_end++;
685
cxld->flags |= CXL_DECODER_F_ENABLE;
686
687
return 0;
688
}
689
690
static void mock_decoder_reset(struct cxl_decoder *cxld)
691
{
692
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
693
int id = cxld->id;
694
695
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
696
return;
697
698
dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
699
if (port->commit_end == id)
700
cxl_port_commit_reap(cxld);
701
else
702
dev_dbg(&port->dev,
703
"%s: out of order reset, expected decoder%d.%d\n",
704
dev_name(&cxld->dev), port->id, port->commit_end);
705
cxld->flags &= ~CXL_DECODER_F_ENABLE;
706
}
707
708
static void default_mock_decoder(struct cxl_decoder *cxld)
709
{
710
cxld->hpa_range = (struct range){
711
.start = 0,
712
.end = -1,
713
};
714
715
cxld->interleave_ways = 1;
716
cxld->interleave_granularity = 256;
717
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
718
cxld->commit = mock_decoder_commit;
719
cxld->reset = mock_decoder_reset;
720
}
721
722
static int first_decoder(struct device *dev, const void *data)
723
{
724
struct cxl_decoder *cxld;
725
726
if (!is_switch_decoder(dev))
727
return 0;
728
cxld = to_cxl_decoder(dev);
729
if (cxld->id == 0)
730
return 1;
731
return 0;
732
}
733
734
static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
735
{
736
struct acpi_cedt_cfmws *window = mock_cfmws[0];
737
struct platform_device *pdev = NULL;
738
struct cxl_endpoint_decoder *cxled;
739
struct cxl_switch_decoder *cxlsd;
740
struct cxl_port *port, *iter;
741
const int size = SZ_512M;
742
struct cxl_memdev *cxlmd;
743
struct cxl_dport *dport;
744
struct device *dev;
745
bool hb0 = false;
746
u64 base;
747
int i;
748
749
if (is_endpoint_decoder(&cxld->dev)) {
750
cxled = to_cxl_endpoint_decoder(&cxld->dev);
751
cxlmd = cxled_to_memdev(cxled);
752
WARN_ON(!dev_is_platform(cxlmd->dev.parent));
753
pdev = to_platform_device(cxlmd->dev.parent);
754
755
/* check is endpoint is attach to host-bridge0 */
756
port = cxled_to_port(cxled);
757
do {
758
if (port->uport_dev == &cxl_host_bridge[0]->dev) {
759
hb0 = true;
760
break;
761
}
762
if (is_cxl_port(port->dev.parent))
763
port = to_cxl_port(port->dev.parent);
764
else
765
port = NULL;
766
} while (port);
767
port = cxled_to_port(cxled);
768
}
769
770
/*
771
* The first decoder on the first 2 devices on the first switch
772
* attached to host-bridge0 mock a fake / static RAM region. All
773
* other decoders are default disabled. Given the round robin
774
* assignment those devices are named cxl_mem.0, and cxl_mem.4.
775
*
776
* See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
777
*/
778
if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
779
default_mock_decoder(cxld);
780
return;
781
}
782
783
base = window->base_hpa;
784
cxld->hpa_range = (struct range) {
785
.start = base,
786
.end = base + size - 1,
787
};
788
789
cxld->interleave_ways = 2;
790
eig_to_granularity(window->granularity, &cxld->interleave_granularity);
791
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
792
cxld->flags = CXL_DECODER_F_ENABLE;
793
cxled->state = CXL_DECODER_STATE_AUTO;
794
port->commit_end = cxld->id;
795
devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0);
796
cxld->commit = mock_decoder_commit;
797
cxld->reset = mock_decoder_reset;
798
799
/*
800
* Now that endpoint decoder is set up, walk up the hierarchy
801
* and setup the switch and root port decoders targeting @cxlmd.
802
*/
803
iter = port;
804
for (i = 0; i < 2; i++) {
805
dport = iter->parent_dport;
806
iter = dport->port;
807
dev = device_find_child(&iter->dev, NULL, first_decoder);
808
/*
809
* Ancestor ports are guaranteed to be enumerated before
810
* @port, and all ports have at least one decoder.
811
*/
812
if (WARN_ON(!dev))
813
continue;
814
815
cxlsd = to_cxl_switch_decoder(dev);
816
if (i == 0) {
817
/* put cxl_mem.4 second in the decode order */
818
if (pdev->id == 4) {
819
cxlsd->target[1] = dport;
820
cxld->target_map[1] = dport->port_id;
821
} else {
822
cxlsd->target[0] = dport;
823
cxld->target_map[0] = dport->port_id;
824
}
825
} else {
826
cxlsd->target[0] = dport;
827
cxld->target_map[0] = dport->port_id;
828
}
829
cxld = &cxlsd->cxld;
830
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
831
cxld->flags = CXL_DECODER_F_ENABLE;
832
iter->commit_end = 0;
833
/*
834
* Switch targets 2 endpoints, while host bridge targets
835
* one root port
836
*/
837
if (i == 0)
838
cxld->interleave_ways = 2;
839
else
840
cxld->interleave_ways = 1;
841
cxld->interleave_granularity = 4096;
842
cxld->hpa_range = (struct range) {
843
.start = base,
844
.end = base + size - 1,
845
};
846
put_device(dev);
847
}
848
}
849
850
static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
851
struct cxl_endpoint_dvsec_info *info)
852
{
853
struct cxl_port *port = cxlhdm->port;
854
struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
855
int target_count, i;
856
857
if (is_cxl_endpoint(port))
858
target_count = 0;
859
else if (is_cxl_root(parent_port))
860
target_count = NR_CXL_ROOT_PORTS;
861
else
862
target_count = NR_CXL_SWITCH_PORTS;
863
864
for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
865
struct target_map_ctx ctx = {
866
.target_count = target_count,
867
};
868
struct cxl_decoder *cxld;
869
int rc;
870
871
if (target_count) {
872
struct cxl_switch_decoder *cxlsd;
873
874
cxlsd = cxl_switch_decoder_alloc(port, target_count);
875
if (IS_ERR(cxlsd)) {
876
dev_warn(&port->dev,
877
"Failed to allocate the decoder\n");
878
return PTR_ERR(cxlsd);
879
}
880
cxld = &cxlsd->cxld;
881
} else {
882
struct cxl_endpoint_decoder *cxled;
883
884
cxled = cxl_endpoint_decoder_alloc(port);
885
886
if (IS_ERR(cxled)) {
887
dev_warn(&port->dev,
888
"Failed to allocate the decoder\n");
889
return PTR_ERR(cxled);
890
}
891
cxld = &cxled->cxld;
892
}
893
894
ctx.target_map = cxld->target_map;
895
896
mock_init_hdm_decoder(cxld);
897
898
if (target_count) {
899
rc = device_for_each_child(port->uport_dev, &ctx,
900
map_targets);
901
if (rc) {
902
put_device(&cxld->dev);
903
return rc;
904
}
905
}
906
907
rc = cxl_decoder_add_locked(cxld);
908
if (rc) {
909
put_device(&cxld->dev);
910
dev_err(&port->dev, "Failed to add decoder\n");
911
return rc;
912
}
913
914
rc = cxl_decoder_autoremove(&port->dev, cxld);
915
if (rc)
916
return rc;
917
dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
918
}
919
920
return 0;
921
}
922
923
static int __mock_cxl_decoders_setup(struct cxl_port *port)
924
{
925
struct cxl_hdm *cxlhdm;
926
927
cxlhdm = mock_cxl_setup_hdm(port, NULL);
928
if (IS_ERR(cxlhdm)) {
929
if (PTR_ERR(cxlhdm) != -ENODEV)
930
dev_err(&port->dev, "Failed to map HDM decoder capability\n");
931
return PTR_ERR(cxlhdm);
932
}
933
934
return mock_cxl_enumerate_decoders(cxlhdm, NULL);
935
}
936
937
static int mock_cxl_switch_port_decoders_setup(struct cxl_port *port)
938
{
939
if (is_cxl_root(port) || is_cxl_endpoint(port))
940
return -EOPNOTSUPP;
941
942
return __mock_cxl_decoders_setup(port);
943
}
944
945
static int mock_cxl_endpoint_decoders_setup(struct cxl_port *port)
946
{
947
if (!is_cxl_endpoint(port))
948
return -EOPNOTSUPP;
949
950
return __mock_cxl_decoders_setup(port);
951
}
952
953
static int get_port_array(struct cxl_port *port,
954
struct platform_device ***port_array,
955
int *port_array_size)
956
{
957
struct platform_device **array;
958
int array_size;
959
960
if (port->depth == 1) {
961
if (is_multi_bridge(port->uport_dev)) {
962
array_size = ARRAY_SIZE(cxl_root_port);
963
array = cxl_root_port;
964
} else if (is_single_bridge(port->uport_dev)) {
965
array_size = ARRAY_SIZE(cxl_root_single);
966
array = cxl_root_single;
967
} else {
968
dev_dbg(&port->dev, "%s: unknown bridge type\n",
969
dev_name(port->uport_dev));
970
return -ENXIO;
971
}
972
} else if (port->depth == 2) {
973
struct cxl_port *parent = to_cxl_port(port->dev.parent);
974
975
if (is_multi_bridge(parent->uport_dev)) {
976
array_size = ARRAY_SIZE(cxl_switch_dport);
977
array = cxl_switch_dport;
978
} else if (is_single_bridge(parent->uport_dev)) {
979
array_size = ARRAY_SIZE(cxl_swd_single);
980
array = cxl_swd_single;
981
} else {
982
dev_dbg(&port->dev, "%s: unknown bridge type\n",
983
dev_name(port->uport_dev));
984
return -ENXIO;
985
}
986
} else {
987
dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
988
port->depth);
989
return -ENXIO;
990
}
991
992
*port_array = array;
993
*port_array_size = array_size;
994
995
return 0;
996
}
997
998
static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
999
{
1000
struct platform_device **array;
1001
int i, array_size;
1002
int rc;
1003
1004
rc = get_port_array(port, &array, &array_size);
1005
if (rc)
1006
return rc;
1007
1008
for (i = 0; i < array_size; i++) {
1009
struct platform_device *pdev = array[i];
1010
struct cxl_dport *dport;
1011
1012
if (pdev->dev.parent != port->uport_dev) {
1013
dev_dbg(&port->dev, "%s: mismatch parent %s\n",
1014
dev_name(port->uport_dev),
1015
dev_name(pdev->dev.parent));
1016
continue;
1017
}
1018
1019
dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
1020
CXL_RESOURCE_NONE);
1021
1022
if (IS_ERR(dport))
1023
return PTR_ERR(dport);
1024
}
1025
1026
return 0;
1027
}
1028
1029
static struct cxl_dport *mock_cxl_add_dport_by_dev(struct cxl_port *port,
1030
struct device *dport_dev)
1031
{
1032
struct platform_device **array;
1033
int rc, i, array_size;
1034
1035
rc = get_port_array(port, &array, &array_size);
1036
if (rc)
1037
return ERR_PTR(rc);
1038
1039
for (i = 0; i < array_size; i++) {
1040
struct platform_device *pdev = array[i];
1041
1042
if (pdev->dev.parent != port->uport_dev) {
1043
dev_dbg(&port->dev, "%s: mismatch parent %s\n",
1044
dev_name(port->uport_dev),
1045
dev_name(pdev->dev.parent));
1046
continue;
1047
}
1048
1049
if (&pdev->dev != dport_dev)
1050
continue;
1051
1052
return devm_cxl_add_dport(port, &pdev->dev, pdev->id,
1053
CXL_RESOURCE_NONE);
1054
}
1055
1056
return ERR_PTR(-ENODEV);
1057
}
1058
1059
/*
1060
* Faking the cxl_dpa_perf for the memdev when appropriate.
1061
*/
1062
static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
1063
struct cxl_dpa_perf *dpa_perf)
1064
{
1065
dpa_perf->qos_class = FAKE_QTG_ID;
1066
dpa_perf->dpa_range = *range;
1067
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
1068
dpa_perf->coord[i].read_latency = 500;
1069
dpa_perf->coord[i].write_latency = 500;
1070
dpa_perf->coord[i].read_bandwidth = 1000;
1071
dpa_perf->coord[i].write_bandwidth = 1000;
1072
}
1073
}
1074
1075
static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
1076
{
1077
struct cxl_root *cxl_root __free(put_cxl_root) =
1078
find_cxl_root(port);
1079
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
1080
struct cxl_dev_state *cxlds = cxlmd->cxlds;
1081
struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
1082
1083
if (!cxl_root)
1084
return;
1085
1086
for (int i = 0; i < cxlds->nr_partitions; i++) {
1087
struct resource *res = &cxlds->part[i].res;
1088
struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
1089
struct range range = {
1090
.start = res->start,
1091
.end = res->end,
1092
};
1093
1094
dpa_perf_setup(port, &range, perf);
1095
}
1096
1097
cxl_memdev_update_perf(cxlmd);
1098
1099
/*
1100
* This function is here to only test the topology iterator. It serves
1101
* no other purpose.
1102
*/
1103
cxl_endpoint_get_perf_coordinates(port, ep_c);
1104
}
1105
1106
static struct cxl_mock_ops cxl_mock_ops = {
1107
.is_mock_adev = is_mock_adev,
1108
.is_mock_bridge = is_mock_bridge,
1109
.is_mock_bus = is_mock_bus,
1110
.is_mock_port = is_mock_port,
1111
.is_mock_dev = is_mock_dev,
1112
.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
1113
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
1114
.acpi_pci_find_root = mock_acpi_pci_find_root,
1115
.devm_cxl_switch_port_decoders_setup = mock_cxl_switch_port_decoders_setup,
1116
.devm_cxl_endpoint_decoders_setup = mock_cxl_endpoint_decoders_setup,
1117
.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
1118
.cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
1119
.devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
1120
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
1121
};
1122
1123
static void mock_companion(struct acpi_device *adev, struct device *dev)
1124
{
1125
device_initialize(&adev->dev);
1126
fwnode_init(&adev->fwnode, NULL);
1127
dev->fwnode = &adev->fwnode;
1128
adev->fwnode.dev = dev;
1129
}
1130
1131
#ifndef SZ_64G
1132
#define SZ_64G (SZ_32G * 2)
1133
#endif
1134
1135
static __init int cxl_rch_topo_init(void)
1136
{
1137
int rc, i;
1138
1139
for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) {
1140
int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i;
1141
struct acpi_device *adev = &host_bridge[idx];
1142
struct platform_device *pdev;
1143
1144
pdev = platform_device_alloc("cxl_host_bridge", idx);
1145
if (!pdev)
1146
goto err_bridge;
1147
1148
mock_companion(adev, &pdev->dev);
1149
rc = platform_device_add(pdev);
1150
if (rc) {
1151
platform_device_put(pdev);
1152
goto err_bridge;
1153
}
1154
1155
cxl_rch[i] = pdev;
1156
mock_pci_bus[idx].bridge = &pdev->dev;
1157
rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1158
"firmware_node");
1159
if (rc)
1160
goto err_bridge;
1161
}
1162
1163
return 0;
1164
1165
err_bridge:
1166
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1167
struct platform_device *pdev = cxl_rch[i];
1168
1169
if (!pdev)
1170
continue;
1171
sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1172
platform_device_unregister(cxl_rch[i]);
1173
}
1174
1175
return rc;
1176
}
1177
1178
static void cxl_rch_topo_exit(void)
1179
{
1180
int i;
1181
1182
for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1183
struct platform_device *pdev = cxl_rch[i];
1184
1185
if (!pdev)
1186
continue;
1187
sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1188
platform_device_unregister(cxl_rch[i]);
1189
}
1190
}
1191
1192
static __init int cxl_single_topo_init(void)
1193
{
1194
int i, rc;
1195
1196
for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
1197
struct acpi_device *adev =
1198
&host_bridge[NR_CXL_HOST_BRIDGES + i];
1199
struct platform_device *pdev;
1200
1201
pdev = platform_device_alloc("cxl_host_bridge",
1202
NR_CXL_HOST_BRIDGES + i);
1203
if (!pdev)
1204
goto err_bridge;
1205
1206
mock_companion(adev, &pdev->dev);
1207
rc = platform_device_add(pdev);
1208
if (rc) {
1209
platform_device_put(pdev);
1210
goto err_bridge;
1211
}
1212
1213
cxl_hb_single[i] = pdev;
1214
mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev;
1215
rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1216
"physical_node");
1217
if (rc)
1218
goto err_bridge;
1219
}
1220
1221
for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
1222
struct platform_device *bridge =
1223
cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
1224
struct platform_device *pdev;
1225
1226
pdev = platform_device_alloc("cxl_root_port",
1227
NR_MULTI_ROOT + i);
1228
if (!pdev)
1229
goto err_port;
1230
pdev->dev.parent = &bridge->dev;
1231
1232
rc = platform_device_add(pdev);
1233
if (rc) {
1234
platform_device_put(pdev);
1235
goto err_port;
1236
}
1237
cxl_root_single[i] = pdev;
1238
}
1239
1240
for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
1241
struct platform_device *root_port = cxl_root_single[i];
1242
struct platform_device *pdev;
1243
1244
pdev = platform_device_alloc("cxl_switch_uport",
1245
NR_MULTI_ROOT + i);
1246
if (!pdev)
1247
goto err_uport;
1248
pdev->dev.parent = &root_port->dev;
1249
1250
rc = platform_device_add(pdev);
1251
if (rc) {
1252
platform_device_put(pdev);
1253
goto err_uport;
1254
}
1255
cxl_swu_single[i] = pdev;
1256
}
1257
1258
for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
1259
struct platform_device *uport =
1260
cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
1261
struct platform_device *pdev;
1262
1263
pdev = platform_device_alloc("cxl_switch_dport",
1264
i + NR_MEM_MULTI);
1265
if (!pdev)
1266
goto err_dport;
1267
pdev->dev.parent = &uport->dev;
1268
1269
rc = platform_device_add(pdev);
1270
if (rc) {
1271
platform_device_put(pdev);
1272
goto err_dport;
1273
}
1274
cxl_swd_single[i] = pdev;
1275
}
1276
1277
return 0;
1278
1279
err_dport:
1280
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1281
platform_device_unregister(cxl_swd_single[i]);
1282
err_uport:
1283
for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1284
platform_device_unregister(cxl_swu_single[i]);
1285
err_port:
1286
for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1287
platform_device_unregister(cxl_root_single[i]);
1288
err_bridge:
1289
for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1290
struct platform_device *pdev = cxl_hb_single[i];
1291
1292
if (!pdev)
1293
continue;
1294
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1295
platform_device_unregister(cxl_hb_single[i]);
1296
}
1297
1298
return rc;
1299
}
1300
1301
static void cxl_single_topo_exit(void)
1302
{
1303
int i;
1304
1305
for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1306
platform_device_unregister(cxl_swd_single[i]);
1307
for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1308
platform_device_unregister(cxl_swu_single[i]);
1309
for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1310
platform_device_unregister(cxl_root_single[i]);
1311
for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1312
struct platform_device *pdev = cxl_hb_single[i];
1313
1314
if (!pdev)
1315
continue;
1316
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1317
platform_device_unregister(cxl_hb_single[i]);
1318
}
1319
}
1320
1321
static void cxl_mem_exit(void)
1322
{
1323
int i;
1324
1325
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1326
platform_device_unregister(cxl_rcd[i]);
1327
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1328
platform_device_unregister(cxl_mem_single[i]);
1329
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1330
platform_device_unregister(cxl_mem[i]);
1331
}
1332
1333
static int cxl_mem_init(void)
1334
{
1335
int i, rc;
1336
1337
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
1338
struct platform_device *dport = cxl_switch_dport[i];
1339
struct platform_device *pdev;
1340
1341
pdev = platform_device_alloc("cxl_mem", i);
1342
if (!pdev)
1343
goto err_mem;
1344
pdev->dev.parent = &dport->dev;
1345
set_dev_node(&pdev->dev, i % 2);
1346
1347
rc = platform_device_add(pdev);
1348
if (rc) {
1349
platform_device_put(pdev);
1350
goto err_mem;
1351
}
1352
cxl_mem[i] = pdev;
1353
}
1354
1355
for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
1356
struct platform_device *dport = cxl_swd_single[i];
1357
struct platform_device *pdev;
1358
1359
pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
1360
if (!pdev)
1361
goto err_single;
1362
pdev->dev.parent = &dport->dev;
1363
set_dev_node(&pdev->dev, i % 2);
1364
1365
rc = platform_device_add(pdev);
1366
if (rc) {
1367
platform_device_put(pdev);
1368
goto err_single;
1369
}
1370
cxl_mem_single[i] = pdev;
1371
}
1372
1373
for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
1374
int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
1375
struct platform_device *rch = cxl_rch[i];
1376
struct platform_device *pdev;
1377
1378
pdev = platform_device_alloc("cxl_rcd", idx);
1379
if (!pdev)
1380
goto err_rcd;
1381
pdev->dev.parent = &rch->dev;
1382
set_dev_node(&pdev->dev, i % 2);
1383
1384
rc = platform_device_add(pdev);
1385
if (rc) {
1386
platform_device_put(pdev);
1387
goto err_rcd;
1388
}
1389
cxl_rcd[i] = pdev;
1390
}
1391
1392
return 0;
1393
1394
err_rcd:
1395
for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1396
platform_device_unregister(cxl_rcd[i]);
1397
err_single:
1398
for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1399
platform_device_unregister(cxl_mem_single[i]);
1400
err_mem:
1401
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1402
platform_device_unregister(cxl_mem[i]);
1403
return rc;
1404
}
1405
1406
static __init int cxl_test_init(void)
1407
{
1408
int rc, i;
1409
struct range mappable;
1410
1411
cxl_acpi_test();
1412
cxl_core_test();
1413
cxl_mem_test();
1414
cxl_pmem_test();
1415
cxl_port_test();
1416
1417
register_cxl_mock_ops(&cxl_mock_ops);
1418
1419
cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
1420
if (!cxl_mock_pool) {
1421
rc = -ENOMEM;
1422
goto err_gen_pool_create;
1423
}
1424
mappable = mhp_get_pluggable_range(true);
1425
1426
rc = gen_pool_add(cxl_mock_pool,
1427
min(iomem_resource.end + 1 - SZ_64G,
1428
mappable.end + 1 - SZ_64G),
1429
SZ_64G, NUMA_NO_NODE);
1430
if (rc)
1431
goto err_gen_pool_add;
1432
1433
if (interleave_arithmetic == 1) {
1434
cfmws_start = CFMWS_XOR_ARRAY_START;
1435
cfmws_end = CFMWS_XOR_ARRAY_END;
1436
} else {
1437
cfmws_start = CFMWS_MOD_ARRAY_START;
1438
cfmws_end = CFMWS_MOD_ARRAY_END;
1439
}
1440
1441
rc = populate_cedt();
1442
if (rc)
1443
goto err_populate;
1444
1445
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
1446
struct acpi_device *adev = &host_bridge[i];
1447
struct platform_device *pdev;
1448
1449
pdev = platform_device_alloc("cxl_host_bridge", i);
1450
if (!pdev)
1451
goto err_bridge;
1452
1453
mock_companion(adev, &pdev->dev);
1454
rc = platform_device_add(pdev);
1455
if (rc) {
1456
platform_device_put(pdev);
1457
goto err_bridge;
1458
}
1459
1460
cxl_host_bridge[i] = pdev;
1461
mock_pci_bus[i].bridge = &pdev->dev;
1462
rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1463
"physical_node");
1464
if (rc)
1465
goto err_bridge;
1466
}
1467
1468
for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
1469
struct platform_device *bridge =
1470
cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
1471
struct platform_device *pdev;
1472
1473
pdev = platform_device_alloc("cxl_root_port", i);
1474
if (!pdev)
1475
goto err_port;
1476
pdev->dev.parent = &bridge->dev;
1477
1478
rc = platform_device_add(pdev);
1479
if (rc) {
1480
platform_device_put(pdev);
1481
goto err_port;
1482
}
1483
cxl_root_port[i] = pdev;
1484
}
1485
1486
BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
1487
for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
1488
struct platform_device *root_port = cxl_root_port[i];
1489
struct platform_device *pdev;
1490
1491
pdev = platform_device_alloc("cxl_switch_uport", i);
1492
if (!pdev)
1493
goto err_uport;
1494
pdev->dev.parent = &root_port->dev;
1495
1496
rc = platform_device_add(pdev);
1497
if (rc) {
1498
platform_device_put(pdev);
1499
goto err_uport;
1500
}
1501
cxl_switch_uport[i] = pdev;
1502
}
1503
1504
for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
1505
struct platform_device *uport =
1506
cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
1507
struct platform_device *pdev;
1508
1509
pdev = platform_device_alloc("cxl_switch_dport", i);
1510
if (!pdev)
1511
goto err_dport;
1512
pdev->dev.parent = &uport->dev;
1513
1514
rc = platform_device_add(pdev);
1515
if (rc) {
1516
platform_device_put(pdev);
1517
goto err_dport;
1518
}
1519
cxl_switch_dport[i] = pdev;
1520
}
1521
1522
rc = cxl_single_topo_init();
1523
if (rc)
1524
goto err_dport;
1525
1526
rc = cxl_rch_topo_init();
1527
if (rc)
1528
goto err_single;
1529
1530
cxl_acpi = platform_device_alloc("cxl_acpi", 0);
1531
if (!cxl_acpi)
1532
goto err_rch;
1533
1534
mock_companion(&acpi0017_mock, &cxl_acpi->dev);
1535
acpi0017_mock.dev.bus = &platform_bus_type;
1536
1537
rc = platform_device_add(cxl_acpi);
1538
if (rc)
1539
goto err_root;
1540
1541
rc = cxl_mem_init();
1542
if (rc)
1543
goto err_root;
1544
1545
return 0;
1546
1547
err_root:
1548
platform_device_put(cxl_acpi);
1549
err_rch:
1550
cxl_rch_topo_exit();
1551
err_single:
1552
cxl_single_topo_exit();
1553
err_dport:
1554
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1555
platform_device_unregister(cxl_switch_dport[i]);
1556
err_uport:
1557
for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1558
platform_device_unregister(cxl_switch_uport[i]);
1559
err_port:
1560
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1561
platform_device_unregister(cxl_root_port[i]);
1562
err_bridge:
1563
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1564
struct platform_device *pdev = cxl_host_bridge[i];
1565
1566
if (!pdev)
1567
continue;
1568
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1569
platform_device_unregister(cxl_host_bridge[i]);
1570
}
1571
err_populate:
1572
depopulate_all_mock_resources();
1573
err_gen_pool_add:
1574
gen_pool_destroy(cxl_mock_pool);
1575
err_gen_pool_create:
1576
unregister_cxl_mock_ops(&cxl_mock_ops);
1577
return rc;
1578
}
1579
1580
static __exit void cxl_test_exit(void)
1581
{
1582
int i;
1583
1584
cxl_mem_exit();
1585
platform_device_unregister(cxl_acpi);
1586
cxl_rch_topo_exit();
1587
cxl_single_topo_exit();
1588
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1589
platform_device_unregister(cxl_switch_dport[i]);
1590
for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1591
platform_device_unregister(cxl_switch_uport[i]);
1592
for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1593
platform_device_unregister(cxl_root_port[i]);
1594
for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1595
struct platform_device *pdev = cxl_host_bridge[i];
1596
1597
if (!pdev)
1598
continue;
1599
sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1600
platform_device_unregister(cxl_host_bridge[i]);
1601
}
1602
depopulate_all_mock_resources();
1603
gen_pool_destroy(cxl_mock_pool);
1604
unregister_cxl_mock_ops(&cxl_mock_ops);
1605
}
1606
1607
module_param(interleave_arithmetic, int, 0444);
1608
MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
1609
module_init(cxl_test_init);
1610
module_exit(cxl_test_exit);
1611
MODULE_LICENSE("GPL v2");
1612
MODULE_DESCRIPTION("cxl_test: setup module");
1613
MODULE_IMPORT_NS("ACPI");
1614
MODULE_IMPORT_NS("CXL");
1615
1616