Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/nvhe/ffa.c
29539 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4
* the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5
* Framework for Arm A-profile", which is specified by Arm in document
6
* number DEN0077.
7
*
8
* Copyright (C) 2022 - Google LLC
9
* Author: Andrew Walbran <[email protected]>
10
*
11
* This driver hooks into the SMC trapping logic for the host and intercepts
12
* all calls falling within the FF-A range. Each call is either:
13
*
14
* - Forwarded on unmodified to the SPMD at EL3
15
* - Rejected as "unsupported"
16
* - Accompanied by a host stage-2 page-table check/update and reissued
17
*
18
* Consequently, any attempts by the host to make guest memory pages
19
* accessible to the secure world using FF-A will be detected either here
20
* (in the case that the memory is already owned by the guest) or during
21
* donation to the guest (in the case that the memory was previously shared
22
* with the secure world).
23
*
24
* To allow the rolling-back of page-table updates and FF-A calls in the
25
* event of failure, operations involving the RXTX buffers are locked for
26
* the duration and are therefore serialised.
27
*/
28
29
#include <linux/arm-smccc.h>
30
#include <linux/arm_ffa.h>
31
#include <asm/kvm_pkvm.h>
32
33
#include <nvhe/ffa.h>
34
#include <nvhe/mem_protect.h>
35
#include <nvhe/memory.h>
36
#include <nvhe/trap_handler.h>
37
#include <nvhe/spinlock.h>
38
39
/*
40
* "ID value 0 must be returned at the Non-secure physical FF-A instance"
41
* We share this ID with the host.
42
*/
43
#define HOST_FFA_ID 0
44
45
/*
46
* A buffer to hold the maximum descriptor size we can see from the host,
47
* which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
48
* when resolving the handle on the reclaim path.
49
*/
50
struct kvm_ffa_descriptor_buffer {
51
void *buf;
52
size_t len;
53
};
54
55
static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
56
57
struct kvm_ffa_buffers {
58
hyp_spinlock_t lock;
59
void *tx;
60
void *rx;
61
};
62
63
/*
64
* Note that we don't currently lock these buffers explicitly, instead
65
* relying on the locking of the host FFA buffers as we only have one
66
* client.
67
*/
68
static struct kvm_ffa_buffers hyp_buffers;
69
static struct kvm_ffa_buffers host_buffers;
70
static u32 hyp_ffa_version;
71
static bool has_version_negotiated;
72
static hyp_spinlock_t version_lock;
73
74
static void ffa_to_smccc_error(struct arm_smccc_1_2_regs *res, u64 ffa_errno)
75
{
76
*res = (struct arm_smccc_1_2_regs) {
77
.a0 = FFA_ERROR,
78
.a2 = ffa_errno,
79
};
80
}
81
82
static void ffa_to_smccc_res_prop(struct arm_smccc_1_2_regs *res, int ret, u64 prop)
83
{
84
if (ret == FFA_RET_SUCCESS) {
85
*res = (struct arm_smccc_1_2_regs) { .a0 = FFA_SUCCESS,
86
.a2 = prop };
87
} else {
88
ffa_to_smccc_error(res, ret);
89
}
90
}
91
92
static void ffa_to_smccc_res(struct arm_smccc_1_2_regs *res, int ret)
93
{
94
ffa_to_smccc_res_prop(res, ret, 0);
95
}
96
97
static void ffa_set_retval(struct kvm_cpu_context *ctxt,
98
struct arm_smccc_1_2_regs *res)
99
{
100
cpu_reg(ctxt, 0) = res->a0;
101
cpu_reg(ctxt, 1) = res->a1;
102
cpu_reg(ctxt, 2) = res->a2;
103
cpu_reg(ctxt, 3) = res->a3;
104
cpu_reg(ctxt, 4) = res->a4;
105
cpu_reg(ctxt, 5) = res->a5;
106
cpu_reg(ctxt, 6) = res->a6;
107
cpu_reg(ctxt, 7) = res->a7;
108
109
/*
110
* DEN0028C 2.6: SMC32/HVC32 call from aarch64 must preserve x8-x30.
111
*
112
* In FF-A 1.2, we cannot rely on the function ID sent by the caller to
113
* detect 32-bit calls because the CPU cycle management interfaces (e.g.
114
* FFA_MSG_WAIT, FFA_RUN) are 32-bit only but can have 64-bit responses.
115
*
116
* FFA-1.3 introduces 64-bit variants of the CPU cycle management
117
* interfaces. Moreover, FF-A 1.3 clarifies that SMC32 direct requests
118
* complete with SMC32 direct reponses which *should* allow us use the
119
* function ID sent by the caller to determine whether to return x8-x17.
120
*
121
* Note that we also cannot rely on function IDs in the response.
122
*
123
* Given the above, assume SMC64 and send back x0-x17 unconditionally
124
* as the passthrough code (__kvm_hyp_host_forward_smc) does the same.
125
*/
126
cpu_reg(ctxt, 8) = res->a8;
127
cpu_reg(ctxt, 9) = res->a9;
128
cpu_reg(ctxt, 10) = res->a10;
129
cpu_reg(ctxt, 11) = res->a11;
130
cpu_reg(ctxt, 12) = res->a12;
131
cpu_reg(ctxt, 13) = res->a13;
132
cpu_reg(ctxt, 14) = res->a14;
133
cpu_reg(ctxt, 15) = res->a15;
134
cpu_reg(ctxt, 16) = res->a16;
135
cpu_reg(ctxt, 17) = res->a17;
136
}
137
138
static bool is_ffa_call(u64 func_id)
139
{
140
return ARM_SMCCC_IS_FAST_CALL(func_id) &&
141
ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
142
ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
143
ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
144
}
145
146
static int ffa_map_hyp_buffers(u64 ffa_page_count)
147
{
148
struct arm_smccc_1_2_regs res;
149
150
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
151
.a0 = FFA_FN64_RXTX_MAP,
152
.a1 = hyp_virt_to_phys(hyp_buffers.tx),
153
.a2 = hyp_virt_to_phys(hyp_buffers.rx),
154
.a3 = ffa_page_count,
155
}, &res);
156
157
return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
158
}
159
160
static int ffa_unmap_hyp_buffers(void)
161
{
162
struct arm_smccc_1_2_regs res;
163
164
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
165
.a0 = FFA_RXTX_UNMAP,
166
.a1 = HOST_FFA_ID,
167
}, &res);
168
169
return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
170
}
171
172
static void ffa_mem_frag_tx(struct arm_smccc_1_2_regs *res, u32 handle_lo,
173
u32 handle_hi, u32 fraglen, u32 endpoint_id)
174
{
175
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
176
.a0 = FFA_MEM_FRAG_TX,
177
.a1 = handle_lo,
178
.a2 = handle_hi,
179
.a3 = fraglen,
180
.a4 = endpoint_id,
181
}, res);
182
}
183
184
static void ffa_mem_frag_rx(struct arm_smccc_1_2_regs *res, u32 handle_lo,
185
u32 handle_hi, u32 fragoff)
186
{
187
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
188
.a0 = FFA_MEM_FRAG_RX,
189
.a1 = handle_lo,
190
.a2 = handle_hi,
191
.a3 = fragoff,
192
.a4 = HOST_FFA_ID,
193
}, res);
194
}
195
196
static void ffa_mem_xfer(struct arm_smccc_1_2_regs *res, u64 func_id, u32 len,
197
u32 fraglen)
198
{
199
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
200
.a0 = func_id,
201
.a1 = len,
202
.a2 = fraglen,
203
}, res);
204
}
205
206
static void ffa_mem_reclaim(struct arm_smccc_1_2_regs *res, u32 handle_lo,
207
u32 handle_hi, u32 flags)
208
{
209
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
210
.a0 = FFA_MEM_RECLAIM,
211
.a1 = handle_lo,
212
.a2 = handle_hi,
213
.a3 = flags,
214
}, res);
215
}
216
217
static void ffa_retrieve_req(struct arm_smccc_1_2_regs *res, u32 len)
218
{
219
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
220
.a0 = FFA_FN64_MEM_RETRIEVE_REQ,
221
.a1 = len,
222
.a2 = len,
223
}, res);
224
}
225
226
static void ffa_rx_release(struct arm_smccc_1_2_regs *res)
227
{
228
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
229
.a0 = FFA_RX_RELEASE,
230
}, res);
231
}
232
233
static void do_ffa_rxtx_map(struct arm_smccc_1_2_regs *res,
234
struct kvm_cpu_context *ctxt)
235
{
236
DECLARE_REG(phys_addr_t, tx, ctxt, 1);
237
DECLARE_REG(phys_addr_t, rx, ctxt, 2);
238
DECLARE_REG(u32, npages, ctxt, 3);
239
int ret = 0;
240
void *rx_virt, *tx_virt;
241
242
if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
243
ret = FFA_RET_INVALID_PARAMETERS;
244
goto out;
245
}
246
247
if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
248
ret = FFA_RET_INVALID_PARAMETERS;
249
goto out;
250
}
251
252
hyp_spin_lock(&host_buffers.lock);
253
if (host_buffers.tx) {
254
ret = FFA_RET_DENIED;
255
goto out_unlock;
256
}
257
258
/*
259
* Map our hypervisor buffers into the SPMD before mapping and
260
* pinning the host buffers in our own address space.
261
*/
262
ret = ffa_map_hyp_buffers(npages);
263
if (ret)
264
goto out_unlock;
265
266
ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
267
if (ret) {
268
ret = FFA_RET_INVALID_PARAMETERS;
269
goto err_unmap;
270
}
271
272
ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
273
if (ret) {
274
ret = FFA_RET_INVALID_PARAMETERS;
275
goto err_unshare_tx;
276
}
277
278
tx_virt = hyp_phys_to_virt(tx);
279
ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
280
if (ret) {
281
ret = FFA_RET_INVALID_PARAMETERS;
282
goto err_unshare_rx;
283
}
284
285
rx_virt = hyp_phys_to_virt(rx);
286
ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
287
if (ret) {
288
ret = FFA_RET_INVALID_PARAMETERS;
289
goto err_unpin_tx;
290
}
291
292
host_buffers.tx = tx_virt;
293
host_buffers.rx = rx_virt;
294
295
out_unlock:
296
hyp_spin_unlock(&host_buffers.lock);
297
out:
298
ffa_to_smccc_res(res, ret);
299
return;
300
301
err_unpin_tx:
302
hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
303
err_unshare_rx:
304
__pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
305
err_unshare_tx:
306
__pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
307
err_unmap:
308
ffa_unmap_hyp_buffers();
309
goto out_unlock;
310
}
311
312
static void do_ffa_rxtx_unmap(struct arm_smccc_1_2_regs *res,
313
struct kvm_cpu_context *ctxt)
314
{
315
DECLARE_REG(u32, id, ctxt, 1);
316
int ret = 0;
317
318
if (id != HOST_FFA_ID) {
319
ret = FFA_RET_INVALID_PARAMETERS;
320
goto out;
321
}
322
323
hyp_spin_lock(&host_buffers.lock);
324
if (!host_buffers.tx) {
325
ret = FFA_RET_INVALID_PARAMETERS;
326
goto out_unlock;
327
}
328
329
hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
330
WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
331
host_buffers.tx = NULL;
332
333
hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
334
WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
335
host_buffers.rx = NULL;
336
337
ffa_unmap_hyp_buffers();
338
339
out_unlock:
340
hyp_spin_unlock(&host_buffers.lock);
341
out:
342
ffa_to_smccc_res(res, ret);
343
}
344
345
static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
346
u32 nranges)
347
{
348
u32 i;
349
350
for (i = 0; i < nranges; ++i) {
351
struct ffa_mem_region_addr_range *range = &ranges[i];
352
u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
353
u64 pfn = hyp_phys_to_pfn(range->address);
354
355
if (!PAGE_ALIGNED(sz))
356
break;
357
358
if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
359
break;
360
}
361
362
return i;
363
}
364
365
static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
366
u32 nranges)
367
{
368
u32 i;
369
370
for (i = 0; i < nranges; ++i) {
371
struct ffa_mem_region_addr_range *range = &ranges[i];
372
u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
373
u64 pfn = hyp_phys_to_pfn(range->address);
374
375
if (!PAGE_ALIGNED(sz))
376
break;
377
378
if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
379
break;
380
}
381
382
return i;
383
}
384
385
static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
386
u32 nranges)
387
{
388
u32 nshared = __ffa_host_share_ranges(ranges, nranges);
389
int ret = 0;
390
391
if (nshared != nranges) {
392
WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
393
ret = FFA_RET_DENIED;
394
}
395
396
return ret;
397
}
398
399
static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
400
u32 nranges)
401
{
402
u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
403
int ret = 0;
404
405
if (nunshared != nranges) {
406
WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
407
ret = FFA_RET_DENIED;
408
}
409
410
return ret;
411
}
412
413
static void do_ffa_mem_frag_tx(struct arm_smccc_1_2_regs *res,
414
struct kvm_cpu_context *ctxt)
415
{
416
DECLARE_REG(u32, handle_lo, ctxt, 1);
417
DECLARE_REG(u32, handle_hi, ctxt, 2);
418
DECLARE_REG(u32, fraglen, ctxt, 3);
419
DECLARE_REG(u32, endpoint_id, ctxt, 4);
420
struct ffa_mem_region_addr_range *buf;
421
int ret = FFA_RET_INVALID_PARAMETERS;
422
u32 nr_ranges;
423
424
if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
425
goto out;
426
427
if (fraglen % sizeof(*buf))
428
goto out;
429
430
hyp_spin_lock(&host_buffers.lock);
431
if (!host_buffers.tx)
432
goto out_unlock;
433
434
buf = hyp_buffers.tx;
435
memcpy(buf, host_buffers.tx, fraglen);
436
nr_ranges = fraglen / sizeof(*buf);
437
438
ret = ffa_host_share_ranges(buf, nr_ranges);
439
if (ret) {
440
/*
441
* We're effectively aborting the transaction, so we need
442
* to restore the global state back to what it was prior to
443
* transmission of the first fragment.
444
*/
445
ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
446
WARN_ON(res->a0 != FFA_SUCCESS);
447
goto out_unlock;
448
}
449
450
ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
451
if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
452
WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
453
454
out_unlock:
455
hyp_spin_unlock(&host_buffers.lock);
456
out:
457
if (ret)
458
ffa_to_smccc_res(res, ret);
459
460
/*
461
* If for any reason this did not succeed, we're in trouble as we have
462
* now lost the content of the previous fragments and we can't rollback
463
* the host stage-2 changes. The pages previously marked as shared will
464
* remain stuck in that state forever, hence preventing the host from
465
* sharing/donating them again and may possibly lead to subsequent
466
* failures, but this will not compromise confidentiality.
467
*/
468
return;
469
}
470
471
static void __do_ffa_mem_xfer(const u64 func_id,
472
struct arm_smccc_1_2_regs *res,
473
struct kvm_cpu_context *ctxt)
474
{
475
DECLARE_REG(u32, len, ctxt, 1);
476
DECLARE_REG(u32, fraglen, ctxt, 2);
477
DECLARE_REG(u64, addr_mbz, ctxt, 3);
478
DECLARE_REG(u32, npages_mbz, ctxt, 4);
479
struct ffa_mem_region_attributes *ep_mem_access;
480
struct ffa_composite_mem_region *reg;
481
struct ffa_mem_region *buf;
482
u32 offset, nr_ranges;
483
int ret = 0;
484
485
if (addr_mbz || npages_mbz || fraglen > len ||
486
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
487
ret = FFA_RET_INVALID_PARAMETERS;
488
goto out;
489
}
490
491
if (fraglen < sizeof(struct ffa_mem_region) +
492
sizeof(struct ffa_mem_region_attributes)) {
493
ret = FFA_RET_INVALID_PARAMETERS;
494
goto out;
495
}
496
497
hyp_spin_lock(&host_buffers.lock);
498
if (!host_buffers.tx) {
499
ret = FFA_RET_INVALID_PARAMETERS;
500
goto out_unlock;
501
}
502
503
if (len > ffa_desc_buf.len) {
504
ret = FFA_RET_NO_MEMORY;
505
goto out_unlock;
506
}
507
508
buf = hyp_buffers.tx;
509
memcpy(buf, host_buffers.tx, fraglen);
510
511
ep_mem_access = (void *)buf +
512
ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
513
offset = ep_mem_access->composite_off;
514
if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
515
ret = FFA_RET_INVALID_PARAMETERS;
516
goto out_unlock;
517
}
518
519
if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
520
ret = FFA_RET_INVALID_PARAMETERS;
521
goto out_unlock;
522
}
523
524
reg = (void *)buf + offset;
525
nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
526
if (nr_ranges % sizeof(reg->constituents[0])) {
527
ret = FFA_RET_INVALID_PARAMETERS;
528
goto out_unlock;
529
}
530
531
nr_ranges /= sizeof(reg->constituents[0]);
532
ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
533
if (ret)
534
goto out_unlock;
535
536
ffa_mem_xfer(res, func_id, len, fraglen);
537
if (fraglen != len) {
538
if (res->a0 != FFA_MEM_FRAG_RX)
539
goto err_unshare;
540
541
if (res->a3 != fraglen)
542
goto err_unshare;
543
} else if (res->a0 != FFA_SUCCESS) {
544
goto err_unshare;
545
}
546
547
out_unlock:
548
hyp_spin_unlock(&host_buffers.lock);
549
out:
550
if (ret)
551
ffa_to_smccc_res(res, ret);
552
return;
553
554
err_unshare:
555
WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
556
goto out_unlock;
557
}
558
559
#define do_ffa_mem_xfer(fid, res, ctxt) \
560
do { \
561
BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \
562
(fid) != FFA_FN64_MEM_LEND); \
563
__do_ffa_mem_xfer((fid), (res), (ctxt)); \
564
} while (0);
565
566
static void do_ffa_mem_reclaim(struct arm_smccc_1_2_regs *res,
567
struct kvm_cpu_context *ctxt)
568
{
569
DECLARE_REG(u32, handle_lo, ctxt, 1);
570
DECLARE_REG(u32, handle_hi, ctxt, 2);
571
DECLARE_REG(u32, flags, ctxt, 3);
572
struct ffa_mem_region_attributes *ep_mem_access;
573
struct ffa_composite_mem_region *reg;
574
u32 offset, len, fraglen, fragoff;
575
struct ffa_mem_region *buf;
576
int ret = 0;
577
u64 handle;
578
579
handle = PACK_HANDLE(handle_lo, handle_hi);
580
581
hyp_spin_lock(&host_buffers.lock);
582
583
buf = hyp_buffers.tx;
584
*buf = (struct ffa_mem_region) {
585
.sender_id = HOST_FFA_ID,
586
.handle = handle,
587
};
588
589
ffa_retrieve_req(res, sizeof(*buf));
590
buf = hyp_buffers.rx;
591
if (res->a0 != FFA_MEM_RETRIEVE_RESP)
592
goto out_unlock;
593
594
len = res->a1;
595
fraglen = res->a2;
596
597
ep_mem_access = (void *)buf +
598
ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
599
offset = ep_mem_access->composite_off;
600
/*
601
* We can trust the SPMD to get this right, but let's at least
602
* check that we end up with something that doesn't look _completely_
603
* bogus.
604
*/
605
if (WARN_ON(offset > len ||
606
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
607
ret = FFA_RET_ABORTED;
608
ffa_rx_release(res);
609
goto out_unlock;
610
}
611
612
if (len > ffa_desc_buf.len) {
613
ret = FFA_RET_NO_MEMORY;
614
ffa_rx_release(res);
615
goto out_unlock;
616
}
617
618
buf = ffa_desc_buf.buf;
619
memcpy(buf, hyp_buffers.rx, fraglen);
620
ffa_rx_release(res);
621
622
for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
623
ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
624
if (res->a0 != FFA_MEM_FRAG_TX) {
625
ret = FFA_RET_INVALID_PARAMETERS;
626
goto out_unlock;
627
}
628
629
fraglen = res->a3;
630
memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
631
ffa_rx_release(res);
632
}
633
634
ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
635
if (res->a0 != FFA_SUCCESS)
636
goto out_unlock;
637
638
reg = (void *)buf + offset;
639
/* If the SPMD was happy, then we should be too. */
640
WARN_ON(ffa_host_unshare_ranges(reg->constituents,
641
reg->addr_range_cnt));
642
out_unlock:
643
hyp_spin_unlock(&host_buffers.lock);
644
645
if (ret)
646
ffa_to_smccc_res(res, ret);
647
}
648
649
/*
650
* Is a given FFA function supported, either by forwarding on directly
651
* or by handling at EL2?
652
*/
653
static bool ffa_call_supported(u64 func_id)
654
{
655
switch (func_id) {
656
/* Unsupported memory management calls */
657
case FFA_FN64_MEM_RETRIEVE_REQ:
658
case FFA_MEM_RETRIEVE_RESP:
659
case FFA_MEM_RELINQUISH:
660
case FFA_MEM_OP_PAUSE:
661
case FFA_MEM_OP_RESUME:
662
case FFA_MEM_FRAG_RX:
663
case FFA_FN64_MEM_DONATE:
664
/* Indirect message passing via RX/TX buffers */
665
case FFA_MSG_SEND:
666
case FFA_MSG_POLL:
667
case FFA_MSG_WAIT:
668
/* 32-bit variants of 64-bit calls */
669
case FFA_MSG_SEND_DIRECT_RESP:
670
case FFA_RXTX_MAP:
671
case FFA_MEM_DONATE:
672
case FFA_MEM_RETRIEVE_REQ:
673
/* Optional notification interfaces added in FF-A 1.1 */
674
case FFA_NOTIFICATION_BITMAP_CREATE:
675
case FFA_NOTIFICATION_BITMAP_DESTROY:
676
case FFA_NOTIFICATION_BIND:
677
case FFA_NOTIFICATION_UNBIND:
678
case FFA_NOTIFICATION_SET:
679
case FFA_NOTIFICATION_GET:
680
case FFA_NOTIFICATION_INFO_GET:
681
/* Optional interfaces added in FF-A 1.2 */
682
case FFA_MSG_SEND_DIRECT_REQ2: /* Optional per 7.5.1 */
683
case FFA_MSG_SEND_DIRECT_RESP2: /* Optional per 7.5.1 */
684
case FFA_CONSOLE_LOG: /* Optional per 13.1: not in Table 13.1 */
685
case FFA_PARTITION_INFO_GET_REGS: /* Optional for virtual instances per 13.1 */
686
return false;
687
}
688
689
return true;
690
}
691
692
static bool do_ffa_features(struct arm_smccc_1_2_regs *res,
693
struct kvm_cpu_context *ctxt)
694
{
695
DECLARE_REG(u32, id, ctxt, 1);
696
u64 prop = 0;
697
int ret = 0;
698
699
if (!ffa_call_supported(id)) {
700
ret = FFA_RET_NOT_SUPPORTED;
701
goto out_handled;
702
}
703
704
switch (id) {
705
case FFA_MEM_SHARE:
706
case FFA_FN64_MEM_SHARE:
707
case FFA_MEM_LEND:
708
case FFA_FN64_MEM_LEND:
709
ret = FFA_RET_SUCCESS;
710
prop = 0; /* No support for dynamic buffers */
711
goto out_handled;
712
default:
713
return false;
714
}
715
716
out_handled:
717
ffa_to_smccc_res_prop(res, ret, prop);
718
return true;
719
}
720
721
static int hyp_ffa_post_init(void)
722
{
723
size_t min_rxtx_sz;
724
struct arm_smccc_1_2_regs res;
725
726
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs){
727
.a0 = FFA_ID_GET,
728
}, &res);
729
if (res.a0 != FFA_SUCCESS)
730
return -EOPNOTSUPP;
731
732
if (res.a2 != HOST_FFA_ID)
733
return -EINVAL;
734
735
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs){
736
.a0 = FFA_FEATURES,
737
.a1 = FFA_FN64_RXTX_MAP,
738
}, &res);
739
if (res.a0 != FFA_SUCCESS)
740
return -EOPNOTSUPP;
741
742
switch (res.a2 & FFA_FEAT_RXTX_MIN_SZ_MASK) {
743
case FFA_FEAT_RXTX_MIN_SZ_4K:
744
min_rxtx_sz = SZ_4K;
745
break;
746
case FFA_FEAT_RXTX_MIN_SZ_16K:
747
min_rxtx_sz = SZ_16K;
748
break;
749
case FFA_FEAT_RXTX_MIN_SZ_64K:
750
min_rxtx_sz = SZ_64K;
751
break;
752
default:
753
return -EINVAL;
754
}
755
756
if (min_rxtx_sz > PAGE_SIZE)
757
return -EOPNOTSUPP;
758
759
return 0;
760
}
761
762
static void do_ffa_version(struct arm_smccc_1_2_regs *res,
763
struct kvm_cpu_context *ctxt)
764
{
765
DECLARE_REG(u32, ffa_req_version, ctxt, 1);
766
767
if (FFA_MAJOR_VERSION(ffa_req_version) != 1) {
768
res->a0 = FFA_RET_NOT_SUPPORTED;
769
return;
770
}
771
772
hyp_spin_lock(&version_lock);
773
if (has_version_negotiated) {
774
if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version))
775
res->a0 = FFA_RET_NOT_SUPPORTED;
776
else
777
res->a0 = hyp_ffa_version;
778
goto unlock;
779
}
780
781
/*
782
* If the client driver tries to downgrade the version, we need to ask
783
* first if TEE supports it.
784
*/
785
if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) {
786
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
787
.a0 = FFA_VERSION,
788
.a1 = ffa_req_version,
789
}, res);
790
if (res->a0 == FFA_RET_NOT_SUPPORTED)
791
goto unlock;
792
793
hyp_ffa_version = ffa_req_version;
794
}
795
796
if (hyp_ffa_post_init()) {
797
res->a0 = FFA_RET_NOT_SUPPORTED;
798
} else {
799
smp_store_release(&has_version_negotiated, true);
800
res->a0 = hyp_ffa_version;
801
}
802
unlock:
803
hyp_spin_unlock(&version_lock);
804
}
805
806
static void do_ffa_part_get(struct arm_smccc_1_2_regs *res,
807
struct kvm_cpu_context *ctxt)
808
{
809
DECLARE_REG(u32, uuid0, ctxt, 1);
810
DECLARE_REG(u32, uuid1, ctxt, 2);
811
DECLARE_REG(u32, uuid2, ctxt, 3);
812
DECLARE_REG(u32, uuid3, ctxt, 4);
813
DECLARE_REG(u32, flags, ctxt, 5);
814
u32 count, partition_sz, copy_sz;
815
816
hyp_spin_lock(&host_buffers.lock);
817
if (!host_buffers.rx) {
818
ffa_to_smccc_res(res, FFA_RET_BUSY);
819
goto out_unlock;
820
}
821
822
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
823
.a0 = FFA_PARTITION_INFO_GET,
824
.a1 = uuid0,
825
.a2 = uuid1,
826
.a3 = uuid2,
827
.a4 = uuid3,
828
.a5 = flags,
829
}, res);
830
831
if (res->a0 != FFA_SUCCESS)
832
goto out_unlock;
833
834
count = res->a2;
835
if (!count)
836
goto out_unlock;
837
838
if (hyp_ffa_version > FFA_VERSION_1_0) {
839
/* Get the number of partitions deployed in the system */
840
if (flags & 0x1)
841
goto out_unlock;
842
843
partition_sz = res->a3;
844
} else {
845
/* FFA_VERSION_1_0 lacks the size in the response */
846
partition_sz = FFA_1_0_PARTITON_INFO_SZ;
847
}
848
849
copy_sz = partition_sz * count;
850
if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
851
ffa_to_smccc_res(res, FFA_RET_ABORTED);
852
goto out_unlock;
853
}
854
855
memcpy(host_buffers.rx, hyp_buffers.rx, copy_sz);
856
out_unlock:
857
hyp_spin_unlock(&host_buffers.lock);
858
}
859
860
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
861
{
862
struct arm_smccc_1_2_regs res;
863
864
/*
865
* There's no way we can tell what a non-standard SMC call might
866
* be up to. Ideally, we would terminate these here and return
867
* an error to the host, but sadly devices make use of custom
868
* firmware calls for things like power management, debugging,
869
* RNG access and crash reporting.
870
*
871
* Given that the architecture requires us to trust EL3 anyway,
872
* we forward unrecognised calls on under the assumption that
873
* the firmware doesn't expose a mechanism to access arbitrary
874
* non-secure memory. Short of a per-device table of SMCs, this
875
* is the best we can do.
876
*/
877
if (!is_ffa_call(func_id))
878
return false;
879
880
if (func_id != FFA_VERSION &&
881
!smp_load_acquire(&has_version_negotiated)) {
882
ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
883
goto out_handled;
884
}
885
886
switch (func_id) {
887
case FFA_FEATURES:
888
if (!do_ffa_features(&res, host_ctxt))
889
return false;
890
goto out_handled;
891
/* Memory management */
892
case FFA_FN64_RXTX_MAP:
893
do_ffa_rxtx_map(&res, host_ctxt);
894
goto out_handled;
895
case FFA_RXTX_UNMAP:
896
do_ffa_rxtx_unmap(&res, host_ctxt);
897
goto out_handled;
898
case FFA_MEM_SHARE:
899
case FFA_FN64_MEM_SHARE:
900
do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
901
goto out_handled;
902
case FFA_MEM_RECLAIM:
903
do_ffa_mem_reclaim(&res, host_ctxt);
904
goto out_handled;
905
case FFA_MEM_LEND:
906
case FFA_FN64_MEM_LEND:
907
do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
908
goto out_handled;
909
case FFA_MEM_FRAG_TX:
910
do_ffa_mem_frag_tx(&res, host_ctxt);
911
goto out_handled;
912
case FFA_VERSION:
913
do_ffa_version(&res, host_ctxt);
914
goto out_handled;
915
case FFA_PARTITION_INFO_GET:
916
do_ffa_part_get(&res, host_ctxt);
917
goto out_handled;
918
}
919
920
if (ffa_call_supported(func_id))
921
return false; /* Pass through */
922
923
ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
924
out_handled:
925
ffa_set_retval(host_ctxt, &res);
926
return true;
927
}
928
929
int hyp_ffa_init(void *pages)
930
{
931
struct arm_smccc_1_2_regs res;
932
void *tx, *rx;
933
934
if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
935
return 0;
936
937
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
938
.a0 = FFA_VERSION,
939
.a1 = FFA_VERSION_1_2,
940
}, &res);
941
if (res.a0 == FFA_RET_NOT_SUPPORTED)
942
return 0;
943
944
/*
945
* Firmware returns the maximum supported version of the FF-A
946
* implementation. Check that the returned version is
947
* backwards-compatible with the hyp according to the rules in DEN0077A
948
* v1.1 REL0 13.2.1.
949
*
950
* Of course, things are never simple when dealing with firmware. v1.1
951
* broke ABI with v1.0 on several structures, which is itself
952
* incompatible with the aforementioned versioning scheme. The
953
* expectation is that v1.x implementations that do not support the v1.0
954
* ABI return NOT_SUPPORTED rather than a version number, according to
955
* DEN0077A v1.1 REL0 18.6.4.
956
*/
957
if (FFA_MAJOR_VERSION(res.a0) != 1)
958
return -EOPNOTSUPP;
959
960
if (FFA_MINOR_VERSION(res.a0) < FFA_MINOR_VERSION(FFA_VERSION_1_2))
961
hyp_ffa_version = res.a0;
962
else
963
hyp_ffa_version = FFA_VERSION_1_2;
964
965
tx = pages;
966
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
967
rx = pages;
968
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
969
970
ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
971
.buf = pages,
972
.len = PAGE_SIZE *
973
(hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
974
};
975
976
hyp_buffers = (struct kvm_ffa_buffers) {
977
.lock = __HYP_SPIN_LOCK_UNLOCKED,
978
.tx = tx,
979
.rx = rx,
980
};
981
982
host_buffers = (struct kvm_ffa_buffers) {
983
.lock = __HYP_SPIN_LOCK_UNLOCKED,
984
};
985
986
version_lock = __HYP_SPIN_LOCK_UNLOCKED;
987
return 0;
988
}
989
990