Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/fwctl/pds/main.c
29524 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright(c) Advanced Micro Devices, Inc */
3
4
#include <linux/module.h>
5
#include <linux/auxiliary_bus.h>
6
#include <linux/pci.h>
7
#include <linux/vmalloc.h>
8
#include <linux/bitfield.h>
9
#include <linux/string.h>
10
11
#include <uapi/fwctl/fwctl.h>
12
#include <uapi/fwctl/pds.h>
13
#include <linux/fwctl.h>
14
15
#include <linux/pds/pds_common.h>
16
#include <linux/pds/pds_core_if.h>
17
#include <linux/pds/pds_adminq.h>
18
#include <linux/pds/pds_auxbus.h>
19
20
struct pdsfc_uctx {
21
struct fwctl_uctx uctx;
22
u32 uctx_caps;
23
};
24
25
struct pdsfc_rpc_endpoint_info {
26
u32 endpoint;
27
dma_addr_t operations_pa;
28
struct pds_fwctl_query_data *operations;
29
struct mutex lock; /* lock for endpoint info management */
30
};
31
32
struct pdsfc_dev {
33
struct fwctl_device fwctl;
34
struct pds_auxiliary_dev *padev;
35
u32 caps;
36
struct pds_fwctl_ident ident;
37
dma_addr_t endpoints_pa;
38
struct pds_fwctl_query_data *endpoints;
39
struct pdsfc_rpc_endpoint_info *endpoint_info;
40
};
41
42
static int pdsfc_open_uctx(struct fwctl_uctx *uctx)
43
{
44
struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
45
struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
46
47
pdsfc_uctx->uctx_caps = pdsfc->caps;
48
49
return 0;
50
}
51
52
static void pdsfc_close_uctx(struct fwctl_uctx *uctx)
53
{
54
}
55
56
static void *pdsfc_info(struct fwctl_uctx *uctx, size_t *length)
57
{
58
struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
59
struct fwctl_info_pds *info;
60
61
info = kzalloc(sizeof(*info), GFP_KERNEL);
62
if (!info)
63
return ERR_PTR(-ENOMEM);
64
65
info->uctx_caps = pdsfc_uctx->uctx_caps;
66
67
return info;
68
}
69
70
static int pdsfc_identify(struct pdsfc_dev *pdsfc)
71
{
72
struct device *dev = &pdsfc->fwctl.dev;
73
union pds_core_adminq_comp comp = {0};
74
union pds_core_adminq_cmd cmd;
75
struct pds_fwctl_ident *ident;
76
dma_addr_t ident_pa;
77
int err;
78
79
ident = dma_alloc_coherent(dev->parent, sizeof(*ident), &ident_pa, GFP_KERNEL);
80
if (!ident) {
81
dev_err(dev, "Failed to map ident buffer\n");
82
return -ENOMEM;
83
}
84
85
cmd = (union pds_core_adminq_cmd) {
86
.fwctl_ident = {
87
.opcode = PDS_FWCTL_CMD_IDENT,
88
.version = 0,
89
.len = cpu_to_le32(sizeof(*ident)),
90
.ident_pa = cpu_to_le64(ident_pa),
91
}
92
};
93
94
err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
95
if (err)
96
dev_err(dev, "Failed to send adminq cmd opcode: %u err: %d\n",
97
cmd.fwctl_ident.opcode, err);
98
else
99
pdsfc->ident = *ident;
100
101
dma_free_coherent(dev->parent, sizeof(*ident), ident, ident_pa);
102
103
return err;
104
}
105
106
static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc)
107
{
108
struct device *dev = &pdsfc->fwctl.dev;
109
u32 num_endpoints;
110
int i;
111
112
if (!pdsfc->endpoints)
113
return;
114
115
num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
116
for (i = 0; pdsfc->endpoint_info && i < num_endpoints; i++)
117
mutex_destroy(&pdsfc->endpoint_info[i].lock);
118
vfree(pdsfc->endpoint_info);
119
pdsfc->endpoint_info = NULL;
120
dma_free_coherent(dev->parent, PAGE_SIZE,
121
pdsfc->endpoints, pdsfc->endpoints_pa);
122
pdsfc->endpoints = NULL;
123
pdsfc->endpoints_pa = DMA_MAPPING_ERROR;
124
}
125
126
static void pdsfc_free_operations(struct pdsfc_dev *pdsfc)
127
{
128
struct device *dev = &pdsfc->fwctl.dev;
129
u32 num_endpoints;
130
int i;
131
132
num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
133
for (i = 0; i < num_endpoints; i++) {
134
struct pdsfc_rpc_endpoint_info *ei = &pdsfc->endpoint_info[i];
135
136
if (ei->operations) {
137
dma_free_coherent(dev->parent, PAGE_SIZE,
138
ei->operations, ei->operations_pa);
139
ei->operations = NULL;
140
ei->operations_pa = DMA_MAPPING_ERROR;
141
}
142
}
143
}
144
145
static struct pds_fwctl_query_data *pdsfc_get_endpoints(struct pdsfc_dev *pdsfc,
146
dma_addr_t *pa)
147
{
148
struct device *dev = &pdsfc->fwctl.dev;
149
union pds_core_adminq_comp comp = {0};
150
struct pds_fwctl_query_data *data;
151
union pds_core_adminq_cmd cmd;
152
dma_addr_t data_pa;
153
int err;
154
155
data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
156
if (!data) {
157
dev_err(dev, "Failed to map endpoint list\n");
158
return ERR_PTR(-ENOMEM);
159
}
160
161
cmd = (union pds_core_adminq_cmd) {
162
.fwctl_query = {
163
.opcode = PDS_FWCTL_CMD_QUERY,
164
.entity = PDS_FWCTL_RPC_ROOT,
165
.version = 0,
166
.query_data_buf_len = cpu_to_le32(PAGE_SIZE),
167
.query_data_buf_pa = cpu_to_le64(data_pa),
168
}
169
};
170
171
err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
172
if (err) {
173
dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
174
cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
175
dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
176
return ERR_PTR(err);
177
}
178
179
*pa = data_pa;
180
181
return data;
182
}
183
184
static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc)
185
{
186
struct pds_fwctl_query_data_endpoint *ep_entry;
187
u32 num_endpoints;
188
int i;
189
190
pdsfc->endpoints = pdsfc_get_endpoints(pdsfc, &pdsfc->endpoints_pa);
191
if (IS_ERR(pdsfc->endpoints))
192
return PTR_ERR(pdsfc->endpoints);
193
194
num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
195
pdsfc->endpoint_info = vcalloc(num_endpoints,
196
sizeof(*pdsfc->endpoint_info));
197
if (!pdsfc->endpoint_info) {
198
pdsfc_free_endpoints(pdsfc);
199
return -ENOMEM;
200
}
201
202
ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries;
203
for (i = 0; i < num_endpoints; i++) {
204
mutex_init(&pdsfc->endpoint_info[i].lock);
205
pdsfc->endpoint_info[i].endpoint = le32_to_cpu(ep_entry[i].id);
206
}
207
208
return 0;
209
}
210
211
static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc,
212
dma_addr_t *pa, u32 ep)
213
{
214
struct pds_fwctl_query_data_operation *entries;
215
struct device *dev = &pdsfc->fwctl.dev;
216
union pds_core_adminq_comp comp = {0};
217
struct pds_fwctl_query_data *data;
218
union pds_core_adminq_cmd cmd;
219
dma_addr_t data_pa;
220
u32 num_entries;
221
int err;
222
int i;
223
224
/* Query the operations list for the given endpoint */
225
data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
226
if (!data) {
227
dev_err(dev, "Failed to map operations list\n");
228
return ERR_PTR(-ENOMEM);
229
}
230
231
cmd = (union pds_core_adminq_cmd) {
232
.fwctl_query = {
233
.opcode = PDS_FWCTL_CMD_QUERY,
234
.entity = PDS_FWCTL_RPC_ENDPOINT,
235
.version = 0,
236
.query_data_buf_len = cpu_to_le32(PAGE_SIZE),
237
.query_data_buf_pa = cpu_to_le64(data_pa),
238
.ep = cpu_to_le32(ep),
239
}
240
};
241
242
err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
243
if (err) {
244
dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
245
cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
246
dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
247
return ERR_PTR(err);
248
}
249
250
*pa = data_pa;
251
252
entries = (struct pds_fwctl_query_data_operation *)data->entries;
253
num_entries = le32_to_cpu(data->num_entries);
254
dev_dbg(dev, "num_entries %d\n", num_entries);
255
for (i = 0; i < num_entries; i++) {
256
257
/* Translate FW command attribute to fwctl scope */
258
switch (entries[i].scope) {
259
case PDSFC_FW_CMD_ATTR_READ:
260
case PDSFC_FW_CMD_ATTR_WRITE:
261
case PDSFC_FW_CMD_ATTR_SYNC:
262
entries[i].scope = FWCTL_RPC_CONFIGURATION;
263
break;
264
case PDSFC_FW_CMD_ATTR_DEBUG_READ:
265
entries[i].scope = FWCTL_RPC_DEBUG_READ_ONLY;
266
break;
267
case PDSFC_FW_CMD_ATTR_DEBUG_WRITE:
268
entries[i].scope = FWCTL_RPC_DEBUG_WRITE;
269
break;
270
default:
271
entries[i].scope = FWCTL_RPC_DEBUG_WRITE_FULL;
272
break;
273
}
274
dev_dbg(dev, "endpoint %d operation: id %x scope %d\n",
275
ep, le32_to_cpu(entries[i].id), entries[i].scope);
276
}
277
278
return data;
279
}
280
281
static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
282
struct fwctl_rpc_pds *rpc,
283
enum fwctl_rpc_scope scope)
284
{
285
struct pds_fwctl_query_data_operation *op_entry;
286
struct pdsfc_rpc_endpoint_info *ep_info = NULL;
287
struct device *dev = &pdsfc->fwctl.dev;
288
u32 num_entries;
289
int i;
290
291
/* validate rpc in_len & out_len based
292
* on ident.max_req_sz & max_resp_sz
293
*/
294
if (rpc->in.len > le32_to_cpu(pdsfc->ident.max_req_sz)) {
295
dev_dbg(dev, "Invalid request size %u, max %u\n",
296
rpc->in.len, le32_to_cpu(pdsfc->ident.max_req_sz));
297
return -EINVAL;
298
}
299
300
if (rpc->out.len > le32_to_cpu(pdsfc->ident.max_resp_sz)) {
301
dev_dbg(dev, "Invalid response size %u, max %u\n",
302
rpc->out.len, le32_to_cpu(pdsfc->ident.max_resp_sz));
303
return -EINVAL;
304
}
305
306
num_entries = le32_to_cpu(pdsfc->endpoints->num_entries);
307
for (i = 0; i < num_entries; i++) {
308
if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) {
309
ep_info = &pdsfc->endpoint_info[i];
310
break;
311
}
312
}
313
if (!ep_info) {
314
dev_dbg(dev, "Invalid endpoint %d\n", rpc->in.ep);
315
return -EINVAL;
316
}
317
318
/* query and cache this endpoint's operations */
319
mutex_lock(&ep_info->lock);
320
if (!ep_info->operations) {
321
struct pds_fwctl_query_data *operations;
322
323
operations = pdsfc_get_operations(pdsfc,
324
&ep_info->operations_pa,
325
rpc->in.ep);
326
if (IS_ERR(operations)) {
327
mutex_unlock(&ep_info->lock);
328
return -ENOMEM;
329
}
330
ep_info->operations = operations;
331
}
332
mutex_unlock(&ep_info->lock);
333
334
/* reject unsupported and/or out of scope commands */
335
op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries;
336
num_entries = le32_to_cpu(ep_info->operations->num_entries);
337
for (i = 0; i < num_entries; i++) {
338
if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, le32_to_cpu(op_entry[i].id))) {
339
if (scope < op_entry[i].scope)
340
return -EPERM;
341
return 0;
342
}
343
}
344
345
dev_dbg(dev, "Invalid operation %d for endpoint %d\n", rpc->in.op, rpc->in.ep);
346
347
return -EINVAL;
348
}
349
350
static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
351
void *in, size_t in_len, size_t *out_len)
352
{
353
struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
354
struct device *dev = &uctx->fwctl->dev;
355
union pds_core_adminq_comp comp = {0};
356
dma_addr_t out_payload_dma_addr = 0;
357
dma_addr_t in_payload_dma_addr = 0;
358
struct fwctl_rpc_pds *rpc = in;
359
union pds_core_adminq_cmd cmd;
360
void *out_payload = NULL;
361
void *in_payload = NULL;
362
void *out = NULL;
363
int err;
364
365
err = pdsfc_validate_rpc(pdsfc, rpc, scope);
366
if (err)
367
return ERR_PTR(err);
368
369
if (rpc->in.len > 0) {
370
in_payload = memdup_user(u64_to_user_ptr(rpc->in.payload), rpc->in.len);
371
if (IS_ERR(in_payload)) {
372
dev_dbg(dev, "Failed to copy in_payload from user\n");
373
return in_payload;
374
}
375
376
in_payload_dma_addr = dma_map_single(dev->parent, in_payload,
377
rpc->in.len, DMA_TO_DEVICE);
378
err = dma_mapping_error(dev->parent, in_payload_dma_addr);
379
if (err) {
380
dev_dbg(dev, "Failed to map in_payload\n");
381
goto err_in_payload;
382
}
383
}
384
385
if (rpc->out.len > 0) {
386
out_payload = kzalloc(rpc->out.len, GFP_KERNEL);
387
if (!out_payload) {
388
dev_dbg(dev, "Failed to allocate out_payload\n");
389
err = -ENOMEM;
390
goto err_out_payload;
391
}
392
393
out_payload_dma_addr = dma_map_single(dev->parent, out_payload,
394
rpc->out.len, DMA_FROM_DEVICE);
395
err = dma_mapping_error(dev->parent, out_payload_dma_addr);
396
if (err) {
397
dev_dbg(dev, "Failed to map out_payload\n");
398
goto err_out_payload;
399
}
400
}
401
402
cmd = (union pds_core_adminq_cmd) {
403
.fwctl_rpc = {
404
.opcode = PDS_FWCTL_CMD_RPC,
405
.flags = cpu_to_le16(PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP),
406
.ep = cpu_to_le32(rpc->in.ep),
407
.op = cpu_to_le32(rpc->in.op),
408
.req_pa = cpu_to_le64(in_payload_dma_addr),
409
.req_sz = cpu_to_le32(rpc->in.len),
410
.resp_pa = cpu_to_le64(out_payload_dma_addr),
411
.resp_sz = cpu_to_le32(rpc->out.len),
412
}
413
};
414
415
err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
416
if (err) {
417
dev_dbg(dev, "%s: ep %d op %x req_pa %llx req_sz %d req_sg %d resp_pa %llx resp_sz %d resp_sg %d err %d\n",
418
__func__, rpc->in.ep, rpc->in.op,
419
cmd.fwctl_rpc.req_pa, cmd.fwctl_rpc.req_sz, cmd.fwctl_rpc.req_sg_elems,
420
cmd.fwctl_rpc.resp_pa, cmd.fwctl_rpc.resp_sz, cmd.fwctl_rpc.resp_sg_elems,
421
err);
422
goto done;
423
}
424
425
dynamic_hex_dump("out ", DUMP_PREFIX_OFFSET, 16, 1, out_payload, rpc->out.len, true);
426
427
if (copy_to_user(u64_to_user_ptr(rpc->out.payload), out_payload, rpc->out.len)) {
428
dev_dbg(dev, "Failed to copy out_payload to user\n");
429
out = ERR_PTR(-EFAULT);
430
goto done;
431
}
432
433
rpc->out.retval = le32_to_cpu(comp.fwctl_rpc.err);
434
*out_len = in_len;
435
out = in;
436
437
done:
438
if (out_payload_dma_addr)
439
dma_unmap_single(dev->parent, out_payload_dma_addr,
440
rpc->out.len, DMA_FROM_DEVICE);
441
err_out_payload:
442
kfree(out_payload);
443
444
if (in_payload_dma_addr)
445
dma_unmap_single(dev->parent, in_payload_dma_addr,
446
rpc->in.len, DMA_TO_DEVICE);
447
err_in_payload:
448
kfree(in_payload);
449
if (err)
450
return ERR_PTR(err);
451
452
return out;
453
}
454
455
static const struct fwctl_ops pdsfc_ops = {
456
.device_type = FWCTL_DEVICE_TYPE_PDS,
457
.uctx_size = sizeof(struct pdsfc_uctx),
458
.open_uctx = pdsfc_open_uctx,
459
.close_uctx = pdsfc_close_uctx,
460
.info = pdsfc_info,
461
.fw_rpc = pdsfc_fw_rpc,
462
};
463
464
static int pdsfc_probe(struct auxiliary_device *adev,
465
const struct auxiliary_device_id *id)
466
{
467
struct pds_auxiliary_dev *padev =
468
container_of(adev, struct pds_auxiliary_dev, aux_dev);
469
struct device *dev = &adev->dev;
470
struct pdsfc_dev *pdsfc;
471
int err;
472
473
pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops,
474
struct pdsfc_dev, fwctl);
475
if (!pdsfc)
476
return -ENOMEM;
477
pdsfc->padev = padev;
478
479
err = pdsfc_identify(pdsfc);
480
if (err) {
481
fwctl_put(&pdsfc->fwctl);
482
return dev_err_probe(dev, err, "Failed to identify device\n");
483
}
484
485
err = pdsfc_init_endpoints(pdsfc);
486
if (err) {
487
fwctl_put(&pdsfc->fwctl);
488
return dev_err_probe(dev, err, "Failed to init endpoints\n");
489
}
490
491
pdsfc->caps = PDS_FWCTL_QUERY_CAP | PDS_FWCTL_SEND_CAP;
492
493
err = fwctl_register(&pdsfc->fwctl);
494
if (err) {
495
pdsfc_free_endpoints(pdsfc);
496
fwctl_put(&pdsfc->fwctl);
497
return dev_err_probe(dev, err, "Failed to register device\n");
498
}
499
500
auxiliary_set_drvdata(adev, pdsfc);
501
502
return 0;
503
}
504
505
static void pdsfc_remove(struct auxiliary_device *adev)
506
{
507
struct pdsfc_dev *pdsfc = auxiliary_get_drvdata(adev);
508
509
fwctl_unregister(&pdsfc->fwctl);
510
pdsfc_free_operations(pdsfc);
511
pdsfc_free_endpoints(pdsfc);
512
513
fwctl_put(&pdsfc->fwctl);
514
}
515
516
static const struct auxiliary_device_id pdsfc_id_table[] = {
517
{.name = PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_FWCTL_STR },
518
{}
519
};
520
MODULE_DEVICE_TABLE(auxiliary, pdsfc_id_table);
521
522
static struct auxiliary_driver pdsfc_driver = {
523
.name = "pds_fwctl",
524
.probe = pdsfc_probe,
525
.remove = pdsfc_remove,
526
.id_table = pdsfc_id_table,
527
};
528
529
module_auxiliary_driver(pdsfc_driver);
530
531
MODULE_IMPORT_NS("FWCTL");
532
MODULE_DESCRIPTION("pds fwctl driver");
533
MODULE_AUTHOR("Shannon Nelson <[email protected]>");
534
MODULE_AUTHOR("Brett Creeley <[email protected]>");
535
MODULE_LICENSE("GPL");
536
537