Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cxl/core/hdm.c
29536 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3
#include <linux/seq_file.h>
4
#include <linux/device.h>
5
#include <linux/delay.h>
6
7
#include "cxlmem.h"
8
#include "core.h"
9
10
/**
11
* DOC: cxl core hdm
12
*
13
* Compute Express Link Host Managed Device Memory, starting with the
14
* CXL 2.0 specification, is managed by an array of HDM Decoder register
15
* instances per CXL port and per CXL endpoint. Define common helpers
16
* for enumerating these registers and capabilities.
17
*/
18
19
struct cxl_rwsem cxl_rwsem = {
20
.region = __RWSEM_INITIALIZER(cxl_rwsem.region),
21
.dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
22
};
23
24
static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld)
25
{
26
int rc;
27
28
rc = cxl_decoder_add_locked(cxld);
29
if (rc) {
30
put_device(&cxld->dev);
31
dev_err(&port->dev, "Failed to add decoder\n");
32
return rc;
33
}
34
35
rc = cxl_decoder_autoremove(&port->dev, cxld);
36
if (rc)
37
return rc;
38
39
dev_dbg(port->uport_dev, "%s added to %s\n",
40
dev_name(&cxld->dev), dev_name(&port->dev));
41
42
return 0;
43
}
44
45
/*
46
* Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
47
* single ported host-bridges need not publish a decoder capability when a
48
* passthrough decode can be assumed, i.e. all transactions that the uport sees
49
* are claimed and passed to the single dport. Disable the range until the first
50
* CXL region is enumerated / activated.
51
*/
52
static int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
53
{
54
struct cxl_switch_decoder *cxlsd;
55
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
56
57
/*
58
* Capability checks are moot for passthrough decoders, support
59
* any and all possibilities.
60
*/
61
cxlhdm->interleave_mask = ~0U;
62
cxlhdm->iw_cap_mask = ~0UL;
63
64
cxlsd = cxl_switch_decoder_alloc(port, 1);
65
if (IS_ERR(cxlsd))
66
return PTR_ERR(cxlsd);
67
68
device_lock_assert(&port->dev);
69
70
return add_hdm_decoder(port, &cxlsd->cxld);
71
}
72
73
static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
74
{
75
u32 hdm_cap;
76
77
hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
78
cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
79
cxlhdm->target_count =
80
FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
81
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
82
cxlhdm->interleave_mask |= GENMASK(11, 8);
83
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
84
cxlhdm->interleave_mask |= GENMASK(14, 12);
85
cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
86
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
87
cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
88
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
89
cxlhdm->iw_cap_mask |= BIT(16);
90
}
91
92
static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
93
{
94
struct cxl_hdm *cxlhdm;
95
void __iomem *hdm;
96
u32 ctrl;
97
int i;
98
99
if (!info)
100
return false;
101
102
cxlhdm = dev_get_drvdata(&info->port->dev);
103
hdm = cxlhdm->regs.hdm_decoder;
104
105
if (!hdm)
106
return true;
107
108
/*
109
* If HDM decoders are present and the driver is in control of
110
* Mem_Enable skip DVSEC based emulation
111
*/
112
if (!info->mem_enabled)
113
return false;
114
115
/*
116
* If any decoders are committed already, there should not be any
117
* emulated DVSEC decoders.
118
*/
119
for (i = 0; i < cxlhdm->decoder_count; i++) {
120
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
121
dev_dbg(&info->port->dev,
122
"decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
123
info->port->id, i,
124
FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
125
readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
126
readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
127
readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
128
readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
129
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
130
return false;
131
}
132
133
return true;
134
}
135
136
/**
137
* devm_cxl_setup_hdm - map HDM decoder component registers
138
* @port: cxl_port to map
139
* @info: cached DVSEC range register info
140
*/
141
static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
142
struct cxl_endpoint_dvsec_info *info)
143
{
144
struct cxl_register_map *reg_map = &port->reg_map;
145
struct device *dev = &port->dev;
146
struct cxl_hdm *cxlhdm;
147
int rc;
148
149
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
150
if (!cxlhdm)
151
return ERR_PTR(-ENOMEM);
152
cxlhdm->port = port;
153
dev_set_drvdata(dev, cxlhdm);
154
155
/* Memory devices can configure device HDM using DVSEC range regs. */
156
if (reg_map->resource == CXL_RESOURCE_NONE) {
157
if (!info || !info->mem_enabled) {
158
dev_err(dev, "No component registers mapped\n");
159
return ERR_PTR(-ENXIO);
160
}
161
162
cxlhdm->decoder_count = info->ranges;
163
return cxlhdm;
164
}
165
166
if (!reg_map->component_map.hdm_decoder.valid) {
167
dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
168
/* unique error code to indicate no HDM decoder capability */
169
return ERR_PTR(-ENODEV);
170
}
171
172
rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
173
BIT(CXL_CM_CAP_CAP_ID_HDM));
174
if (rc) {
175
dev_err(dev, "Failed to map HDM capability.\n");
176
return ERR_PTR(rc);
177
}
178
179
parse_hdm_decoder_caps(cxlhdm);
180
if (cxlhdm->decoder_count == 0) {
181
dev_err(dev, "Spec violation. Caps invalid\n");
182
return ERR_PTR(-ENXIO);
183
}
184
185
/*
186
* Now that the hdm capability is parsed, decide if range
187
* register emulation is needed and fixup cxlhdm accordingly.
188
*/
189
if (should_emulate_decoders(info)) {
190
dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
191
str_plural(info->ranges));
192
cxlhdm->decoder_count = info->ranges;
193
}
194
195
return cxlhdm;
196
}
197
198
static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
199
{
200
unsigned long long start = r->start, end = r->end;
201
202
seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
203
r->name);
204
}
205
206
void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
207
{
208
struct resource *p1, *p2;
209
210
guard(rwsem_read)(&cxl_rwsem.dpa);
211
for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
212
__cxl_dpa_debug(file, p1, 0);
213
for (p2 = p1->child; p2; p2 = p2->sibling)
214
__cxl_dpa_debug(file, p2, 1);
215
}
216
}
217
EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
218
219
/* See request_skip() kernel-doc */
220
static resource_size_t __adjust_skip(struct cxl_dev_state *cxlds,
221
const resource_size_t skip_base,
222
const resource_size_t skip_len,
223
const char *requester)
224
{
225
const resource_size_t skip_end = skip_base + skip_len - 1;
226
227
for (int i = 0; i < cxlds->nr_partitions; i++) {
228
const struct resource *part_res = &cxlds->part[i].res;
229
resource_size_t adjust_start, adjust_end, size;
230
231
adjust_start = max(skip_base, part_res->start);
232
adjust_end = min(skip_end, part_res->end);
233
234
if (adjust_end < adjust_start)
235
continue;
236
237
size = adjust_end - adjust_start + 1;
238
239
if (!requester)
240
__release_region(&cxlds->dpa_res, adjust_start, size);
241
else if (!__request_region(&cxlds->dpa_res, adjust_start, size,
242
requester, 0))
243
return adjust_start - skip_base;
244
}
245
246
return skip_len;
247
}
248
#define release_skip(c, b, l) __adjust_skip((c), (b), (l), NULL)
249
250
/*
251
* Must be called in a context that synchronizes against this decoder's
252
* port ->remove() callback (like an endpoint decoder sysfs attribute)
253
*/
254
static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
255
{
256
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
257
struct cxl_port *port = cxled_to_port(cxled);
258
struct cxl_dev_state *cxlds = cxlmd->cxlds;
259
struct resource *res = cxled->dpa_res;
260
resource_size_t skip_start;
261
262
lockdep_assert_held_write(&cxl_rwsem.dpa);
263
264
/* save @skip_start, before @res is released */
265
skip_start = res->start - cxled->skip;
266
__release_region(&cxlds->dpa_res, res->start, resource_size(res));
267
if (cxled->skip)
268
release_skip(cxlds, skip_start, cxled->skip);
269
cxled->skip = 0;
270
cxled->dpa_res = NULL;
271
put_device(&cxled->cxld.dev);
272
port->hdm_end--;
273
}
274
275
static void cxl_dpa_release(void *cxled)
276
{
277
guard(rwsem_write)(&cxl_rwsem.dpa);
278
__cxl_dpa_release(cxled);
279
}
280
281
/*
282
* Must be called from context that will not race port device
283
* unregistration, like decoder sysfs attribute methods
284
*/
285
static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
286
{
287
struct cxl_port *port = cxled_to_port(cxled);
288
289
lockdep_assert_held_write(&cxl_rwsem.dpa);
290
devm_remove_action(&port->dev, cxl_dpa_release, cxled);
291
__cxl_dpa_release(cxled);
292
}
293
294
/**
295
* request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree
296
* @cxlds: CXL.mem device context that parents @cxled
297
* @cxled: Endpoint decoder establishing new allocation that skips lower DPA
298
* @skip_base: DPA < start of new DPA allocation (DPAnew)
299
* @skip_len: @skip_base + @skip_len == DPAnew
300
*
301
* DPA 'skip' arises from out-of-sequence DPA allocation events relative
302
* to free capacity across multiple partitions. It is a wasteful event
303
* as usable DPA gets thrown away, but if a deployment has, for example,
304
* a dual RAM+PMEM device, wants to use PMEM, and has unallocated RAM
305
* DPA, the free RAM DPA must be sacrificed to start allocating PMEM.
306
* See third "Implementation Note" in CXL 3.1 8.2.4.19.13 "Decoder
307
* Protection" for more details.
308
*
309
* A 'skip' always covers the last allocated DPA in a previous partition
310
* to the start of the current partition to allocate. Allocations never
311
* start in the middle of a partition, and allocations are always
312
* de-allocated in reverse order (see cxl_dpa_free(), or natural devm
313
* unwind order from forced in-order allocation).
314
*
315
* If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip'
316
* would always be contained to a single partition. Given
317
* @cxlds->nr_partitions may be > 2 it results in cases where the 'skip'
318
* might span "tail capacity of partition[0], all of partition[1], ...,
319
* all of partition[N-1]" to support allocating from partition[N]. That
320
* in turn interacts with the partition 'struct resource' boundaries
321
* within @cxlds->dpa_res whereby 'skip' requests need to be divided by
322
* partition. I.e. this is a quirk of using a 'struct resource' tree to
323
* detect range conflicts while also tracking partition boundaries in
324
* @cxlds->dpa_res.
325
*/
326
static int request_skip(struct cxl_dev_state *cxlds,
327
struct cxl_endpoint_decoder *cxled,
328
const resource_size_t skip_base,
329
const resource_size_t skip_len)
330
{
331
resource_size_t skipped = __adjust_skip(cxlds, skip_base, skip_len,
332
dev_name(&cxled->cxld.dev));
333
334
if (skipped == skip_len)
335
return 0;
336
337
dev_dbg(cxlds->dev,
338
"%s: failed to reserve skipped space (%pa %pa %pa)\n",
339
dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped);
340
341
release_skip(cxlds, skip_base, skipped);
342
343
return -EBUSY;
344
}
345
346
static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
347
resource_size_t base, resource_size_t len,
348
resource_size_t skipped)
349
{
350
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
351
struct cxl_port *port = cxled_to_port(cxled);
352
struct cxl_dev_state *cxlds = cxlmd->cxlds;
353
struct device *dev = &port->dev;
354
struct resource *res;
355
int rc;
356
357
lockdep_assert_held_write(&cxl_rwsem.dpa);
358
359
if (!len) {
360
dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
361
port->id, cxled->cxld.id);
362
return -EINVAL;
363
}
364
365
if (cxled->dpa_res) {
366
dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
367
port->id, cxled->cxld.id, cxled->dpa_res);
368
return -EBUSY;
369
}
370
371
if (port->hdm_end + 1 != cxled->cxld.id) {
372
/*
373
* Assumes alloc and commit order is always in hardware instance
374
* order per expectations from 8.2.5.12.20 Committing Decoder
375
* Programming that enforce decoder[m] committed before
376
* decoder[m+1] commit start.
377
*/
378
dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
379
cxled->cxld.id, port->id, port->hdm_end + 1);
380
return -EBUSY;
381
}
382
383
if (skipped) {
384
rc = request_skip(cxlds, cxled, base - skipped, skipped);
385
if (rc)
386
return rc;
387
}
388
res = __request_region(&cxlds->dpa_res, base, len,
389
dev_name(&cxled->cxld.dev), 0);
390
if (!res) {
391
dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
392
port->id, cxled->cxld.id);
393
if (skipped)
394
release_skip(cxlds, base - skipped, skipped);
395
return -EBUSY;
396
}
397
cxled->dpa_res = res;
398
cxled->skip = skipped;
399
400
/*
401
* When allocating new capacity, ->part is already set, when
402
* discovering decoder settings at initial enumeration, ->part
403
* is not set.
404
*/
405
if (cxled->part < 0)
406
for (int i = 0; cxlds->nr_partitions; i++)
407
if (resource_contains(&cxlds->part[i].res, res)) {
408
cxled->part = i;
409
break;
410
}
411
412
if (cxled->part < 0)
413
dev_warn(dev, "decoder%d.%d: %pr does not map any partition\n",
414
port->id, cxled->cxld.id, res);
415
416
port->hdm_end++;
417
get_device(&cxled->cxld.dev);
418
return 0;
419
}
420
421
static int add_dpa_res(struct device *dev, struct resource *parent,
422
struct resource *res, resource_size_t start,
423
resource_size_t size, const char *type)
424
{
425
int rc;
426
427
*res = (struct resource) {
428
.name = type,
429
.start = start,
430
.end = start + size - 1,
431
.flags = IORESOURCE_MEM,
432
};
433
if (resource_size(res) == 0) {
434
dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
435
return 0;
436
}
437
rc = request_resource(parent, res);
438
if (rc) {
439
dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
440
res, rc);
441
return rc;
442
}
443
444
dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
445
446
return 0;
447
}
448
449
static const char *cxl_mode_name(enum cxl_partition_mode mode)
450
{
451
switch (mode) {
452
case CXL_PARTMODE_RAM:
453
return "ram";
454
case CXL_PARTMODE_PMEM:
455
return "pmem";
456
default:
457
return "";
458
};
459
}
460
461
/* if this fails the caller must destroy @cxlds, there is no recovery */
462
int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
463
{
464
struct device *dev = cxlds->dev;
465
466
guard(rwsem_write)(&cxl_rwsem.dpa);
467
468
if (cxlds->nr_partitions)
469
return -EBUSY;
470
471
if (!info->size || !info->nr_partitions) {
472
cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
473
cxlds->nr_partitions = 0;
474
return 0;
475
}
476
477
cxlds->dpa_res = DEFINE_RES_MEM(0, info->size);
478
479
for (int i = 0; i < info->nr_partitions; i++) {
480
const struct cxl_dpa_part_info *part = &info->part[i];
481
int rc;
482
483
cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID;
484
cxlds->part[i].mode = part->mode;
485
486
/* Require ordered + contiguous partitions */
487
if (i) {
488
const struct cxl_dpa_part_info *prev = &info->part[i - 1];
489
490
if (prev->range.end + 1 != part->range.start)
491
return -EINVAL;
492
}
493
rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res,
494
part->range.start, range_len(&part->range),
495
cxl_mode_name(part->mode));
496
if (rc)
497
return rc;
498
cxlds->nr_partitions++;
499
}
500
501
return 0;
502
}
503
EXPORT_SYMBOL_GPL(cxl_dpa_setup);
504
505
int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
506
resource_size_t base, resource_size_t len,
507
resource_size_t skipped)
508
{
509
struct cxl_port *port = cxled_to_port(cxled);
510
int rc;
511
512
scoped_guard(rwsem_write, &cxl_rwsem.dpa)
513
rc = __cxl_dpa_reserve(cxled, base, len, skipped);
514
515
if (rc)
516
return rc;
517
518
return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
519
}
520
EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
521
522
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
523
{
524
guard(rwsem_read)(&cxl_rwsem.dpa);
525
if (cxled->dpa_res)
526
return resource_size(cxled->dpa_res);
527
528
return 0;
529
}
530
531
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
532
{
533
resource_size_t base = -1;
534
535
lockdep_assert_held(&cxl_rwsem.dpa);
536
if (cxled->dpa_res)
537
base = cxled->dpa_res->start;
538
539
return base;
540
}
541
542
bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr)
543
{
544
struct resource _addr = DEFINE_RES_MEM(addr, 1);
545
546
return resource_contains(res, &_addr);
547
}
548
549
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
550
{
551
struct cxl_port *port = cxled_to_port(cxled);
552
struct device *dev = &cxled->cxld.dev;
553
554
guard(rwsem_write)(&cxl_rwsem.dpa);
555
if (!cxled->dpa_res)
556
return 0;
557
if (cxled->cxld.region) {
558
dev_dbg(dev, "decoder assigned to: %s\n",
559
dev_name(&cxled->cxld.region->dev));
560
return -EBUSY;
561
}
562
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
563
dev_dbg(dev, "decoder enabled\n");
564
return -EBUSY;
565
}
566
if (cxled->cxld.id != port->hdm_end) {
567
dev_dbg(dev, "expected decoder%d.%d\n", port->id,
568
port->hdm_end);
569
return -EBUSY;
570
}
571
572
devm_cxl_dpa_release(cxled);
573
return 0;
574
}
575
576
int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
577
enum cxl_partition_mode mode)
578
{
579
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
580
struct cxl_dev_state *cxlds = cxlmd->cxlds;
581
struct device *dev = &cxled->cxld.dev;
582
int part;
583
584
guard(rwsem_write)(&cxl_rwsem.dpa);
585
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
586
return -EBUSY;
587
588
for (part = 0; part < cxlds->nr_partitions; part++)
589
if (cxlds->part[part].mode == mode)
590
break;
591
592
if (part >= cxlds->nr_partitions) {
593
dev_dbg(dev, "unsupported mode: %d\n", mode);
594
return -EINVAL;
595
}
596
597
if (!resource_size(&cxlds->part[part].res)) {
598
dev_dbg(dev, "no available capacity for mode: %d\n", mode);
599
return -ENXIO;
600
}
601
602
cxled->part = part;
603
return 0;
604
}
605
606
static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
607
{
608
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
609
struct cxl_dev_state *cxlds = cxlmd->cxlds;
610
struct device *dev = &cxled->cxld.dev;
611
struct resource *res, *prev = NULL;
612
resource_size_t start, avail, skip, skip_start;
613
struct resource *p, *last;
614
int part;
615
616
guard(rwsem_write)(&cxl_rwsem.dpa);
617
if (cxled->cxld.region) {
618
dev_dbg(dev, "decoder attached to %s\n",
619
dev_name(&cxled->cxld.region->dev));
620
return -EBUSY;
621
}
622
623
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
624
dev_dbg(dev, "decoder enabled\n");
625
return -EBUSY;
626
}
627
628
part = cxled->part;
629
if (part < 0) {
630
dev_dbg(dev, "partition not set\n");
631
return -EBUSY;
632
}
633
634
res = &cxlds->part[part].res;
635
for (p = res->child, last = NULL; p; p = p->sibling)
636
last = p;
637
if (last)
638
start = last->end + 1;
639
else
640
start = res->start;
641
642
/*
643
* To allocate at partition N, a skip needs to be calculated for all
644
* unallocated space at lower partitions indices.
645
*
646
* If a partition has any allocations, the search can end because a
647
* previous cxl_dpa_alloc() invocation is assumed to have accounted for
648
* all previous partitions.
649
*/
650
skip_start = CXL_RESOURCE_NONE;
651
for (int i = part; i; i--) {
652
prev = &cxlds->part[i - 1].res;
653
for (p = prev->child, last = NULL; p; p = p->sibling)
654
last = p;
655
if (last) {
656
skip_start = last->end + 1;
657
break;
658
}
659
skip_start = prev->start;
660
}
661
662
avail = res->end - start + 1;
663
if (skip_start == CXL_RESOURCE_NONE)
664
skip = 0;
665
else
666
skip = res->start - skip_start;
667
668
if (size > avail) {
669
dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size,
670
res->name, (u64)avail);
671
return -ENOSPC;
672
}
673
674
return __cxl_dpa_reserve(cxled, start, size, skip);
675
}
676
677
int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
678
{
679
struct cxl_port *port = cxled_to_port(cxled);
680
int rc;
681
682
rc = __cxl_dpa_alloc(cxled, size);
683
if (rc)
684
return rc;
685
686
return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
687
}
688
689
static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
690
{
691
u16 eig;
692
u8 eiw;
693
694
/*
695
* Input validation ensures these warns never fire, but otherwise
696
* suppress unititalized variable usage warnings.
697
*/
698
if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
699
"invalid interleave_ways: %d\n", cxld->interleave_ways))
700
return;
701
if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
702
"invalid interleave_granularity: %d\n",
703
cxld->interleave_granularity))
704
return;
705
706
u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
707
u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
708
*ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
709
}
710
711
static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
712
{
713
u32p_replace_bits(ctrl,
714
!!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
715
CXL_HDM_DECODER0_CTRL_HOSTONLY);
716
}
717
718
static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
719
{
720
struct cxl_dport **t = &cxlsd->target[0];
721
int ways = cxlsd->cxld.interleave_ways;
722
723
*tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
724
if (ways > 1)
725
*tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
726
if (ways > 2)
727
*tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
728
if (ways > 3)
729
*tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
730
if (ways > 4)
731
*tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
732
if (ways > 5)
733
*tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
734
if (ways > 6)
735
*tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
736
if (ways > 7)
737
*tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
738
}
739
740
/*
741
* Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
742
* committed or error within 10ms, but just be generous with 20ms to account for
743
* clock skew and other marginal behavior
744
*/
745
#define COMMIT_TIMEOUT_MS 20
746
static int cxld_await_commit(void __iomem *hdm, int id)
747
{
748
u32 ctrl;
749
int i;
750
751
for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
752
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
753
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
754
ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
755
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
756
return -EIO;
757
}
758
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
759
return 0;
760
fsleep(1000);
761
}
762
763
return -ETIMEDOUT;
764
}
765
766
static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm)
767
{
768
int id = cxld->id;
769
u64 base, size;
770
u32 ctrl;
771
772
/* common decoder settings */
773
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
774
cxld_set_interleave(cxld, &ctrl);
775
cxld_set_type(cxld, &ctrl);
776
base = cxld->hpa_range.start;
777
size = range_len(&cxld->hpa_range);
778
779
writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
780
writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
781
writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
782
writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
783
784
if (is_switch_decoder(&cxld->dev)) {
785
struct cxl_switch_decoder *cxlsd =
786
to_cxl_switch_decoder(&cxld->dev);
787
void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
788
void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
789
u64 targets;
790
791
cxlsd_set_targets(cxlsd, &targets);
792
writel(upper_32_bits(targets), tl_hi);
793
writel(lower_32_bits(targets), tl_lo);
794
} else {
795
struct cxl_endpoint_decoder *cxled =
796
to_cxl_endpoint_decoder(&cxld->dev);
797
void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
798
void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
799
800
writel(upper_32_bits(cxled->skip), sk_hi);
801
writel(lower_32_bits(cxled->skip), sk_lo);
802
}
803
804
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
805
}
806
807
static int cxl_decoder_commit(struct cxl_decoder *cxld)
808
{
809
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
810
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
811
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
812
int id = cxld->id, rc;
813
814
if (cxld->flags & CXL_DECODER_F_ENABLE)
815
return 0;
816
817
if (cxl_num_decoders_committed(port) != id) {
818
dev_dbg(&port->dev,
819
"%s: out of order commit, expected decoder%d.%d\n",
820
dev_name(&cxld->dev), port->id,
821
cxl_num_decoders_committed(port));
822
return -EBUSY;
823
}
824
825
/*
826
* For endpoint decoders hosted on CXL memory devices that
827
* support the sanitize operation, make sure sanitize is not in-flight.
828
*/
829
if (is_endpoint_decoder(&cxld->dev)) {
830
struct cxl_endpoint_decoder *cxled =
831
to_cxl_endpoint_decoder(&cxld->dev);
832
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
833
struct cxl_memdev_state *mds =
834
to_cxl_memdev_state(cxlmd->cxlds);
835
836
if (mds && mds->security.sanitize_active) {
837
dev_dbg(&cxlmd->dev,
838
"attempted to commit %s during sanitize\n",
839
dev_name(&cxld->dev));
840
return -EBUSY;
841
}
842
}
843
844
scoped_guard(rwsem_read, &cxl_rwsem.dpa)
845
setup_hw_decoder(cxld, hdm);
846
847
port->commit_end++;
848
rc = cxld_await_commit(hdm, cxld->id);
849
if (rc) {
850
dev_dbg(&port->dev, "%s: error %d committing decoder\n",
851
dev_name(&cxld->dev), rc);
852
cxld->reset(cxld);
853
return rc;
854
}
855
cxld->flags |= CXL_DECODER_F_ENABLE;
856
857
return 0;
858
}
859
860
static int commit_reap(struct device *dev, void *data)
861
{
862
struct cxl_port *port = to_cxl_port(dev->parent);
863
struct cxl_decoder *cxld;
864
865
if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
866
return 0;
867
868
cxld = to_cxl_decoder(dev);
869
if (port->commit_end == cxld->id &&
870
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
871
port->commit_end--;
872
dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
873
dev_name(&cxld->dev), port->commit_end);
874
}
875
876
return 0;
877
}
878
879
void cxl_port_commit_reap(struct cxl_decoder *cxld)
880
{
881
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
882
883
lockdep_assert_held_write(&cxl_rwsem.region);
884
885
/*
886
* Once the highest committed decoder is disabled, free any other
887
* decoders that were pinned allocated by out-of-order release.
888
*/
889
port->commit_end--;
890
dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
891
port->commit_end);
892
device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
893
commit_reap);
894
}
895
EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, "CXL");
896
897
static void cxl_decoder_reset(struct cxl_decoder *cxld)
898
{
899
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
900
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
901
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
902
int id = cxld->id;
903
u32 ctrl;
904
905
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
906
return;
907
908
if (port->commit_end == id)
909
cxl_port_commit_reap(cxld);
910
else
911
dev_dbg(&port->dev,
912
"%s: out of order reset, expected decoder%d.%d\n",
913
dev_name(&cxld->dev), port->id, port->commit_end);
914
915
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
916
ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
917
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
918
919
writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
920
writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
921
writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
922
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
923
924
cxld->flags &= ~CXL_DECODER_F_ENABLE;
925
926
/* Userspace is now responsible for reconfiguring this decoder */
927
if (is_endpoint_decoder(&cxld->dev)) {
928
struct cxl_endpoint_decoder *cxled;
929
930
cxled = to_cxl_endpoint_decoder(&cxld->dev);
931
cxled->state = CXL_DECODER_STATE_MANUAL;
932
}
933
}
934
935
static int cxl_setup_hdm_decoder_from_dvsec(
936
struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
937
int which, struct cxl_endpoint_dvsec_info *info)
938
{
939
struct cxl_endpoint_decoder *cxled;
940
u64 len;
941
int rc;
942
943
if (!is_cxl_endpoint(port))
944
return -EOPNOTSUPP;
945
946
cxled = to_cxl_endpoint_decoder(&cxld->dev);
947
len = range_len(&info->dvsec_range[which]);
948
if (!len)
949
return -ENOENT;
950
951
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
952
cxld->commit = NULL;
953
cxld->reset = NULL;
954
cxld->hpa_range = info->dvsec_range[which];
955
956
/*
957
* Set the emulated decoder as locked pending additional support to
958
* change the range registers at run time.
959
*/
960
cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
961
port->commit_end = cxld->id;
962
963
rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
964
if (rc) {
965
dev_err(&port->dev,
966
"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
967
port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
968
return rc;
969
}
970
*dpa_base += len;
971
cxled->state = CXL_DECODER_STATE_AUTO;
972
973
return 0;
974
}
975
976
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
977
void __iomem *hdm, int which,
978
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
979
{
980
struct cxl_endpoint_decoder *cxled = NULL;
981
u64 size, base, skip, dpa_size, lo, hi;
982
bool committed;
983
u32 remainder;
984
int i, rc;
985
u32 ctrl;
986
union {
987
u64 value;
988
unsigned char target_id[8];
989
} target_list;
990
991
if (should_emulate_decoders(info))
992
return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
993
which, info);
994
995
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
996
lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
997
hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
998
base = (hi << 32) + lo;
999
lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
1000
hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
1001
size = (hi << 32) + lo;
1002
committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
1003
cxld->commit = cxl_decoder_commit;
1004
cxld->reset = cxl_decoder_reset;
1005
1006
if (!committed)
1007
size = 0;
1008
if (base == U64_MAX || size == U64_MAX) {
1009
dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
1010
port->id, cxld->id);
1011
return -ENXIO;
1012
}
1013
1014
if (info)
1015
cxled = to_cxl_endpoint_decoder(&cxld->dev);
1016
cxld->hpa_range = (struct range) {
1017
.start = base,
1018
.end = base + size - 1,
1019
};
1020
1021
/* decoders are enabled if committed */
1022
if (committed) {
1023
cxld->flags |= CXL_DECODER_F_ENABLE;
1024
if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
1025
cxld->flags |= CXL_DECODER_F_LOCK;
1026
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
1027
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1028
else
1029
cxld->target_type = CXL_DECODER_DEVMEM;
1030
1031
guard(rwsem_write)(&cxl_rwsem.region);
1032
if (cxld->id != cxl_num_decoders_committed(port)) {
1033
dev_warn(&port->dev,
1034
"decoder%d.%d: Committed out of order\n",
1035
port->id, cxld->id);
1036
return -ENXIO;
1037
}
1038
1039
if (size == 0) {
1040
dev_warn(&port->dev,
1041
"decoder%d.%d: Committed with zero size\n",
1042
port->id, cxld->id);
1043
return -ENXIO;
1044
}
1045
port->commit_end = cxld->id;
1046
} else {
1047
if (cxled) {
1048
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1049
struct cxl_dev_state *cxlds = cxlmd->cxlds;
1050
1051
/*
1052
* Default by devtype until a device arrives that needs
1053
* more precision.
1054
*/
1055
if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
1056
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1057
else
1058
cxld->target_type = CXL_DECODER_DEVMEM;
1059
} else {
1060
/* To be overridden by region type at commit time */
1061
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1062
}
1063
1064
if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
1065
cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
1066
ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
1067
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
1068
}
1069
}
1070
rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
1071
&cxld->interleave_ways);
1072
if (rc) {
1073
dev_warn(&port->dev,
1074
"decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
1075
port->id, cxld->id, ctrl);
1076
return rc;
1077
}
1078
rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
1079
&cxld->interleave_granularity);
1080
if (rc) {
1081
dev_warn(&port->dev,
1082
"decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
1083
port->id, cxld->id, ctrl);
1084
return rc;
1085
}
1086
1087
dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
1088
port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
1089
cxld->interleave_ways, cxld->interleave_granularity);
1090
1091
if (!cxled) {
1092
lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
1093
hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
1094
target_list.value = (hi << 32) + lo;
1095
for (i = 0; i < cxld->interleave_ways; i++)
1096
cxld->target_map[i] = target_list.target_id[i];
1097
1098
return 0;
1099
}
1100
1101
if (!committed)
1102
return 0;
1103
1104
dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
1105
if (remainder) {
1106
dev_err(&port->dev,
1107
"decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
1108
port->id, cxld->id, size, cxld->interleave_ways);
1109
return -ENXIO;
1110
}
1111
lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
1112
hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
1113
skip = (hi << 32) + lo;
1114
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
1115
if (rc) {
1116
dev_err(&port->dev,
1117
"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
1118
port->id, cxld->id, *dpa_base,
1119
*dpa_base + dpa_size + skip - 1, rc);
1120
return rc;
1121
}
1122
*dpa_base += dpa_size + skip;
1123
1124
cxled->state = CXL_DECODER_STATE_AUTO;
1125
1126
return 0;
1127
}
1128
1129
static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
1130
{
1131
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1132
int committed, i;
1133
u32 ctrl;
1134
1135
if (!hdm)
1136
return;
1137
1138
/*
1139
* Since the register resource was recently claimed via request_region()
1140
* be careful about trusting the "not-committed" status until the commit
1141
* timeout has elapsed. The commit timeout is 10ms (CXL 2.0
1142
* 8.2.5.12.20), but double it to be tolerant of any clock skew between
1143
* host and target.
1144
*/
1145
for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
1146
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
1147
if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
1148
committed++;
1149
}
1150
1151
/* ensure that future checks of committed can be trusted */
1152
if (committed != cxlhdm->decoder_count)
1153
msleep(20);
1154
}
1155
1156
/**
1157
* devm_cxl_enumerate_decoders - add decoder objects per HDM register set
1158
* @cxlhdm: Structure to populate with HDM capabilities
1159
* @info: cached DVSEC range register info
1160
*/
1161
static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
1162
struct cxl_endpoint_dvsec_info *info)
1163
{
1164
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1165
struct cxl_port *port = cxlhdm->port;
1166
int i;
1167
u64 dpa_base = 0;
1168
1169
cxl_settle_decoders(cxlhdm);
1170
1171
for (i = 0; i < cxlhdm->decoder_count; i++) {
1172
int rc, target_count = cxlhdm->target_count;
1173
struct cxl_decoder *cxld;
1174
1175
if (is_cxl_endpoint(port)) {
1176
struct cxl_endpoint_decoder *cxled;
1177
1178
cxled = cxl_endpoint_decoder_alloc(port);
1179
if (IS_ERR(cxled)) {
1180
dev_warn(&port->dev,
1181
"Failed to allocate decoder%d.%d\n",
1182
port->id, i);
1183
return PTR_ERR(cxled);
1184
}
1185
cxld = &cxled->cxld;
1186
} else {
1187
struct cxl_switch_decoder *cxlsd;
1188
1189
cxlsd = cxl_switch_decoder_alloc(port, target_count);
1190
if (IS_ERR(cxlsd)) {
1191
dev_warn(&port->dev,
1192
"Failed to allocate decoder%d.%d\n",
1193
port->id, i);
1194
return PTR_ERR(cxlsd);
1195
}
1196
cxld = &cxlsd->cxld;
1197
}
1198
1199
rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info);
1200
if (rc) {
1201
dev_warn(&port->dev,
1202
"Failed to initialize decoder%d.%d\n",
1203
port->id, i);
1204
put_device(&cxld->dev);
1205
return rc;
1206
}
1207
rc = add_hdm_decoder(port, cxld);
1208
if (rc) {
1209
dev_warn(&port->dev,
1210
"Failed to add decoder%d.%d\n", port->id, i);
1211
return rc;
1212
}
1213
}
1214
1215
return 0;
1216
}
1217
1218
/**
1219
* __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
1220
* @port: CXL port context
1221
*
1222
* Return 0 or -errno on error
1223
*/
1224
int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
1225
{
1226
struct cxl_hdm *cxlhdm;
1227
1228
if (is_cxl_root(port) || is_cxl_endpoint(port))
1229
return -EOPNOTSUPP;
1230
1231
cxlhdm = devm_cxl_setup_hdm(port, NULL);
1232
if (!IS_ERR(cxlhdm))
1233
return devm_cxl_enumerate_decoders(cxlhdm, NULL);
1234
1235
if (PTR_ERR(cxlhdm) != -ENODEV) {
1236
dev_err(&port->dev, "Failed to map HDM decoder capability\n");
1237
return PTR_ERR(cxlhdm);
1238
}
1239
1240
if (cxl_port_get_possible_dports(port) == 1) {
1241
dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
1242
return devm_cxl_add_passthrough_decoder(port);
1243
}
1244
1245
dev_err(&port->dev, "HDM decoder capability not found\n");
1246
return -ENXIO;
1247
}
1248
EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL");
1249
1250
/**
1251
* devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
1252
* @port: CXL port context
1253
*
1254
* Return 0 or -errno on error
1255
*/
1256
int devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
1257
{
1258
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
1259
struct cxl_endpoint_dvsec_info info = { .port = port };
1260
struct cxl_dev_state *cxlds = cxlmd->cxlds;
1261
struct cxl_hdm *cxlhdm;
1262
int rc;
1263
1264
if (!is_cxl_endpoint(port))
1265
return -EOPNOTSUPP;
1266
1267
rc = cxl_dvsec_rr_decode(cxlds, &info);
1268
if (rc < 0)
1269
return rc;
1270
1271
cxlhdm = devm_cxl_setup_hdm(port, &info);
1272
if (IS_ERR(cxlhdm)) {
1273
if (PTR_ERR(cxlhdm) == -ENODEV)
1274
dev_err(&port->dev, "HDM decoder registers not found\n");
1275
return PTR_ERR(cxlhdm);
1276
}
1277
1278
rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
1279
if (rc)
1280
return rc;
1281
1282
return devm_cxl_enumerate_decoders(cxlhdm, &info);
1283
}
1284
EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");
1285
1286