powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / drivers / firmware / arm_ffa / driver.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Arm Firmware Framework for ARMv8-A(FFA) interface driver
4  *
5  * The Arm FFA specification[1] describes a software architecture to
6  * leverages the virtualization extension to isolate software images
7  * provided by an ecosystem of vendors from each other and describes
8  * interfaces that standardize communication between the various software
9  * images including communication between images in the Secure world and
10  * Normal world. Any Hypervisor could use the FFA interfaces to enable
11  * communication between VMs it manages.
12  *
13  * The Hypervisor a.k.a Partition managers in FFA terminology can assign
14  * system resources(Memory regions, Devices, CPU cycles) to the partitions
15  * and manage isolation amongst them.
16  *
17  * [1] https://developer.arm.com/docs/den0077/latest
18  *
19  * Copyright (C) 2021 ARM Ltd.
20  */
21
22 #define DRIVER_NAME "ARM FF-A"
23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
24
25 #include <linux/arm_ffa.h>
26 #include <linux/bitfield.h>
27 #include <linux/device.h>
28 #include <linux/io.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/mm.h>
32 #include <linux/scatterlist.h>
33 #include <linux/slab.h>
34 #include <linux/uuid.h>
35
36 #include "common.h"
37
38 #define FFA_DRIVER_VERSION      FFA_VERSION_1_0
39 #define FFA_MIN_VERSION         FFA_VERSION_1_0
40
41 #define SENDER_ID_MASK          GENMASK(31, 16)
42 #define RECEIVER_ID_MASK        GENMASK(15, 0)
43 #define SENDER_ID(x)            ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
44 #define RECEIVER_ID(x)          ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
45 #define PACK_TARGET_INFO(s, r)          \
46         (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
47
48 /*
49  * Keeping RX TX buffer size as 4K for now
50  * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config
51  */
52 #define RXTX_BUFFER_SIZE        SZ_4K
53
54 static ffa_fn *invoke_ffa_fn;
55
56 static const int ffa_linux_errmap[] = {
57         /* better than switch case as long as return value is continuous */
58         0,              /* FFA_RET_SUCCESS */
59         -EOPNOTSUPP,    /* FFA_RET_NOT_SUPPORTED */
60         -EINVAL,        /* FFA_RET_INVALID_PARAMETERS */
61         -ENOMEM,        /* FFA_RET_NO_MEMORY */
62         -EBUSY,         /* FFA_RET_BUSY */
63         -EINTR,         /* FFA_RET_INTERRUPTED */
64         -EACCES,        /* FFA_RET_DENIED */
65         -EAGAIN,        /* FFA_RET_RETRY */
66         -ECANCELED,     /* FFA_RET_ABORTED */
67 };
68
69 static inline int ffa_to_linux_errno(int errno)
70 {
71         int err_idx = -errno;
72
73         if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
74                 return ffa_linux_errmap[err_idx];
75         return -EINVAL;
76 }
77
78 struct ffa_drv_info {
79         u32 version;
80         u16 vm_id;
81         struct mutex rx_lock; /* lock to protect Rx buffer */
82         struct mutex tx_lock; /* lock to protect Tx buffer */
83         void *rx_buffer;
84         void *tx_buffer;
85         bool mem_ops_native;
86 };
87
88 static struct ffa_drv_info *drv_info;
89
90 /*
91  * The driver must be able to support all the versions from the earliest
92  * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
93  * The specification states that if firmware supports a FFA implementation
94  * that is incompatible with and at a greater version number than specified
95  * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
96  * it must return the NOT_SUPPORTED error code.
97  */
98 static u32 ffa_compatible_version_find(u32 version)
99 {
100         u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version);
101         u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION);
102         u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION);
103
104         if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
105                 return version;
106
107         pr_info("Firmware version higher than driver version, downgrading\n");
108         return FFA_DRIVER_VERSION;
109 }
110
111 static int ffa_version_check(u32 *version)
112 {
113         ffa_value_t ver;
114
115         invoke_ffa_fn((ffa_value_t){
116                       .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
117                       }, &ver);
118
119         if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
120                 pr_info("FFA_VERSION returned not supported\n");
121                 return -EOPNOTSUPP;
122         }
123
124         if (ver.a0 < FFA_MIN_VERSION) {
125                 pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
126                        FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
127                        FFA_MAJOR_VERSION(FFA_MIN_VERSION),
128                        FFA_MINOR_VERSION(FFA_MIN_VERSION));
129                 return -EINVAL;
130         }
131
132         pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
133                 FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
134         pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0),
135                 FFA_MINOR_VERSION(ver.a0));
136         *version = ffa_compatible_version_find(ver.a0);
137
138         return 0;
139 }
140
141 static int ffa_rx_release(void)
142 {
143         ffa_value_t ret;
144
145         invoke_ffa_fn((ffa_value_t){
146                       .a0 = FFA_RX_RELEASE,
147                       }, &ret);
148
149         if (ret.a0 == FFA_ERROR)
150                 return ffa_to_linux_errno((int)ret.a2);
151
152         /* check for ret.a0 == FFA_RX_RELEASE ? */
153
154         return 0;
155 }
156
157 static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
158 {
159         ffa_value_t ret;
160
161         invoke_ffa_fn((ffa_value_t){
162                       .a0 = FFA_FN_NATIVE(RXTX_MAP),
163                       .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
164                       }, &ret);
165
166         if (ret.a0 == FFA_ERROR)
167                 return ffa_to_linux_errno((int)ret.a2);
168
169         return 0;
170 }
171
172 static int ffa_rxtx_unmap(u16 vm_id)
173 {
174         ffa_value_t ret;
175
176         invoke_ffa_fn((ffa_value_t){
177                       .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
178                       }, &ret);
179
180         if (ret.a0 == FFA_ERROR)
181                 return ffa_to_linux_errno((int)ret.a2);
182
183         return 0;
184 }
185
186 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY    BIT(0)
187
188 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
189 static int
190 __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
191                          struct ffa_partition_info *buffer, int num_partitions)
192 {
193         int idx, count, flags = 0, sz, buf_sz;
194         ffa_value_t partition_info;
195
196         if (drv_info->version > FFA_VERSION_1_0 &&
197             (!buffer || !num_partitions)) /* Just get the count for now */
198                 flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
199
200         mutex_lock(&drv_info->rx_lock);
201         invoke_ffa_fn((ffa_value_t){
202                       .a0 = FFA_PARTITION_INFO_GET,
203                       .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
204                       .a5 = flags,
205                       }, &partition_info);
206
207         if (partition_info.a0 == FFA_ERROR) {
208                 mutex_unlock(&drv_info->rx_lock);
209                 return ffa_to_linux_errno((int)partition_info.a2);
210         }
211
212         count = partition_info.a2;
213
214         if (drv_info->version > FFA_VERSION_1_0) {
215                 buf_sz = sz = partition_info.a3;
216                 if (sz > sizeof(*buffer))
217                         buf_sz = sizeof(*buffer);
218         } else {
219                 /* FFA_VERSION_1_0 lacks size in the response */
220                 buf_sz = sz = 8;
221         }
222
223         if (buffer && count <= num_partitions)
224                 for (idx = 0; idx < count; idx++)
225                         memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
226                                buf_sz);
227
228         ffa_rx_release();
229
230         mutex_unlock(&drv_info->rx_lock);
231
232         return count;
233 }
234
235 /* buffer is allocated and caller must free the same if returned count > 0 */
236 static int
237 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
238 {
239         int count;
240         u32 uuid0_4[4];
241         struct ffa_partition_info *pbuf;
242
243         export_uuid((u8 *)uuid0_4, uuid);
244         count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
245                                          uuid0_4[3], NULL, 0);
246         if (count <= 0)
247                 return count;
248
249         pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
250         if (!pbuf)
251                 return -ENOMEM;
252
253         count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
254                                          uuid0_4[3], pbuf, count);
255         if (count <= 0)
256                 kfree(pbuf);
257         else
258                 *buffer = pbuf;
259
260         return count;
261 }
262
263 #define VM_ID_MASK      GENMASK(15, 0)
264 static int ffa_id_get(u16 *vm_id)
265 {
266         ffa_value_t id;
267
268         invoke_ffa_fn((ffa_value_t){
269                       .a0 = FFA_ID_GET,
270                       }, &id);
271
272         if (id.a0 == FFA_ERROR)
273                 return ffa_to_linux_errno((int)id.a2);
274
275         *vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
276
277         return 0;
278 }
279
280 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
281                                    struct ffa_send_direct_data *data)
282 {
283         u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
284         ffa_value_t ret;
285
286         if (mode_32bit) {
287                 req_id = FFA_MSG_SEND_DIRECT_REQ;
288                 resp_id = FFA_MSG_SEND_DIRECT_RESP;
289         } else {
290                 req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
291                 resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
292         }
293
294         invoke_ffa_fn((ffa_value_t){
295                       .a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
296                       .a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
297                       .a6 = data->data3, .a7 = data->data4,
298                       }, &ret);
299
300         while (ret.a0 == FFA_INTERRUPT)
301                 invoke_ffa_fn((ffa_value_t){
302                               .a0 = FFA_RUN, .a1 = ret.a1,
303                               }, &ret);
304
305         if (ret.a0 == FFA_ERROR)
306                 return ffa_to_linux_errno((int)ret.a2);
307
308         if (ret.a0 == resp_id) {
309                 data->data0 = ret.a3;
310                 data->data1 = ret.a4;
311                 data->data2 = ret.a5;
312                 data->data3 = ret.a6;
313                 data->data4 = ret.a7;
314                 return 0;
315         }
316
317         return -EINVAL;
318 }
319
320 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
321                               u32 frag_len, u32 len, u64 *handle)
322 {
323         ffa_value_t ret;
324
325         invoke_ffa_fn((ffa_value_t){
326                       .a0 = func_id, .a1 = len, .a2 = frag_len,
327                       .a3 = buf, .a4 = buf_sz,
328                       }, &ret);
329
330         while (ret.a0 == FFA_MEM_OP_PAUSE)
331                 invoke_ffa_fn((ffa_value_t){
332                               .a0 = FFA_MEM_OP_RESUME,
333                               .a1 = ret.a1, .a2 = ret.a2,
334                               }, &ret);
335
336         if (ret.a0 == FFA_ERROR)
337                 return ffa_to_linux_errno((int)ret.a2);
338
339         if (ret.a0 == FFA_SUCCESS) {
340                 if (handle)
341                         *handle = PACK_HANDLE(ret.a2, ret.a3);
342         } else if (ret.a0 == FFA_MEM_FRAG_RX) {
343                 if (handle)
344                         *handle = PACK_HANDLE(ret.a1, ret.a2);
345         } else {
346                 return -EOPNOTSUPP;
347         }
348
349         return frag_len;
350 }
351
352 static int ffa_mem_next_frag(u64 handle, u32 frag_len)
353 {
354         ffa_value_t ret;
355
356         invoke_ffa_fn((ffa_value_t){
357                       .a0 = FFA_MEM_FRAG_TX,
358                       .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
359                       .a3 = frag_len,
360                       }, &ret);
361
362         while (ret.a0 == FFA_MEM_OP_PAUSE)
363                 invoke_ffa_fn((ffa_value_t){
364                               .a0 = FFA_MEM_OP_RESUME,
365                               .a1 = ret.a1, .a2 = ret.a2,
366                               }, &ret);
367
368         if (ret.a0 == FFA_ERROR)
369                 return ffa_to_linux_errno((int)ret.a2);
370
371         if (ret.a0 == FFA_MEM_FRAG_RX)
372                 return ret.a3;
373         else if (ret.a0 == FFA_SUCCESS)
374                 return 0;
375
376         return -EOPNOTSUPP;
377 }
378
379 static int
380 ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
381                       u32 len, u64 *handle, bool first)
382 {
383         if (!first)
384                 return ffa_mem_next_frag(*handle, frag_len);
385
386         return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
387 }
388
389 static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
390 {
391         u32 num_pages = 0;
392
393         do {
394                 num_pages += sg->length / FFA_PAGE_SIZE;
395         } while ((sg = sg_next(sg)));
396
397         return num_pages;
398 }
399
400 static int
401 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
402                        struct ffa_mem_ops_args *args)
403 {
404         int rc = 0;
405         bool first = true;
406         phys_addr_t addr = 0;
407         struct ffa_composite_mem_region *composite;
408         struct ffa_mem_region_addr_range *constituents;
409         struct ffa_mem_region_attributes *ep_mem_access;
410         struct ffa_mem_region *mem_region = buffer;
411         u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
412
413         mem_region->tag = args->tag;
414         mem_region->flags = args->flags;
415         mem_region->sender_id = drv_info->vm_id;
416         mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
417                                  FFA_MEM_INNER_SHAREABLE;
418         ep_mem_access = &mem_region->ep_mem_access[0];
419
420         for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
421                 ep_mem_access->receiver = args->attrs[idx].receiver;
422                 ep_mem_access->attrs = args->attrs[idx].attrs;
423                 ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
424                 ep_mem_access->flag = 0;
425                 ep_mem_access->reserved = 0;
426         }
427         mem_region->handle = 0;
428         mem_region->reserved_0 = 0;
429         mem_region->reserved_1 = 0;
430         mem_region->ep_count = args->nattrs;
431
432         composite = buffer + COMPOSITE_OFFSET(args->nattrs);
433         composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
434         composite->addr_range_cnt = num_entries;
435         composite->reserved = 0;
436
437         length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
438         frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
439         if (frag_len > max_fragsize)
440                 return -ENXIO;
441
442         if (!args->use_txbuf) {
443                 addr = virt_to_phys(buffer);
444                 buf_sz = max_fragsize / FFA_PAGE_SIZE;
445         }
446
447         constituents = buffer + frag_len;
448         idx = 0;
449         do {
450                 if (frag_len == max_fragsize) {
451                         rc = ffa_transmit_fragment(func_id, addr, buf_sz,
452                                                    frag_len, length,
453                                                    &args->g_handle, first);
454                         if (rc < 0)
455                                 return -ENXIO;
456
457                         first = false;
458                         idx = 0;
459                         frag_len = 0;
460                         constituents = buffer;
461                 }
462
463                 if ((void *)constituents - buffer > max_fragsize) {
464                         pr_err("Memory Region Fragment > Tx Buffer size\n");
465                         return -EFAULT;
466                 }
467
468                 constituents->address = sg_phys(args->sg);
469                 constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
470                 constituents->reserved = 0;
471                 constituents++;
472                 frag_len += sizeof(struct ffa_mem_region_addr_range);
473         } while ((args->sg = sg_next(args->sg)));
474
475         return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
476                                      length, &args->g_handle, first);
477 }
478
479 static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
480 {
481         int ret;
482         void *buffer;
483
484         if (!args->use_txbuf) {
485                 buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
486                 if (!buffer)
487                         return -ENOMEM;
488         } else {
489                 buffer = drv_info->tx_buffer;
490                 mutex_lock(&drv_info->tx_lock);
491         }
492
493         ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
494
495         if (args->use_txbuf)
496                 mutex_unlock(&drv_info->tx_lock);
497         else
498                 free_pages_exact(buffer, RXTX_BUFFER_SIZE);
499
500         return ret < 0 ? ret : 0;
501 }
502
503 static int ffa_memory_reclaim(u64 g_handle, u32 flags)
504 {
505         ffa_value_t ret;
506
507         invoke_ffa_fn((ffa_value_t){
508                       .a0 = FFA_MEM_RECLAIM,
509                       .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
510                       .a3 = flags,
511                       }, &ret);
512
513         if (ret.a0 == FFA_ERROR)
514                 return ffa_to_linux_errno((int)ret.a2);
515
516         return 0;
517 }
518
519 static int ffa_features(u32 func_feat_id, u32 input_props,
520                         u32 *if_props_1, u32 *if_props_2)
521 {
522         ffa_value_t id;
523
524         if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
525                 pr_err("%s: Invalid Parameters: %x, %x", __func__,
526                        func_feat_id, input_props);
527                 return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
528         }
529
530         invoke_ffa_fn((ffa_value_t){
531                 .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
532                 }, &id);
533
534         if (id.a0 == FFA_ERROR)
535                 return ffa_to_linux_errno((int)id.a2);
536
537         if (if_props_1)
538                 *if_props_1 = id.a2;
539         if (if_props_2)
540                 *if_props_2 = id.a3;
541
542         return 0;
543 }
544
545 static void ffa_set_up_mem_ops_native_flag(void)
546 {
547         if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
548             !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
549                 drv_info->mem_ops_native = true;
550 }
551
552 static u32 ffa_api_version_get(void)
553 {
554         return drv_info->version;
555 }
556
557 static int ffa_partition_info_get(const char *uuid_str,
558                                   struct ffa_partition_info *buffer)
559 {
560         int count;
561         uuid_t uuid;
562         struct ffa_partition_info *pbuf;
563
564         if (uuid_parse(uuid_str, &uuid)) {
565                 pr_err("invalid uuid (%s)\n", uuid_str);
566                 return -ENODEV;
567         }
568
569         count = ffa_partition_probe(&uuid, &pbuf);
570         if (count <= 0)
571                 return -ENOENT;
572
573         memcpy(buffer, pbuf, sizeof(*pbuf) * count);
574         kfree(pbuf);
575         return 0;
576 }
577
578 static void _ffa_mode_32bit_set(struct ffa_device *dev)
579 {
580         dev->mode_32bit = true;
581 }
582
583 static void ffa_mode_32bit_set(struct ffa_device *dev)
584 {
585         if (drv_info->version > FFA_VERSION_1_0)
586                 return;
587
588         _ffa_mode_32bit_set(dev);
589 }
590
591 static int ffa_sync_send_receive(struct ffa_device *dev,
592                                  struct ffa_send_direct_data *data)
593 {
594         return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
595                                        dev->mode_32bit, data);
596 }
597
598 static int ffa_memory_share(struct ffa_mem_ops_args *args)
599 {
600         if (drv_info->mem_ops_native)
601                 return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
602
603         return ffa_memory_ops(FFA_MEM_SHARE, args);
604 }
605
606 static int ffa_memory_lend(struct ffa_mem_ops_args *args)
607 {
608         /* Note that upon a successful MEM_LEND request the caller
609          * must ensure that the memory region specified is not accessed
610          * until a successful MEM_RECALIM call has been made.
611          * On systems with a hypervisor present this will been enforced,
612          * however on systems without a hypervisor the responsibility
613          * falls to the calling kernel driver to prevent access.
614          */
615         if (drv_info->mem_ops_native)
616                 return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
617
618         return ffa_memory_ops(FFA_MEM_LEND, args);
619 }
620
621 static const struct ffa_info_ops ffa_drv_info_ops = {
622         .api_version_get = ffa_api_version_get,
623         .partition_info_get = ffa_partition_info_get,
624 };
625
626 static const struct ffa_msg_ops ffa_drv_msg_ops = {
627         .mode_32bit_set = ffa_mode_32bit_set,
628         .sync_send_receive = ffa_sync_send_receive,
629 };
630
631 static const struct ffa_mem_ops ffa_drv_mem_ops = {
632         .memory_reclaim = ffa_memory_reclaim,
633         .memory_share = ffa_memory_share,
634         .memory_lend = ffa_memory_lend,
635 };
636
637 static const struct ffa_ops ffa_drv_ops = {
638         .info_ops = &ffa_drv_info_ops,
639         .msg_ops = &ffa_drv_msg_ops,
640         .mem_ops = &ffa_drv_mem_ops,
641 };
642
643 void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
644 {
645         int count, idx;
646         struct ffa_partition_info *pbuf, *tpbuf;
647
648         /*
649          * FF-A v1.1 provides UUID for each partition as part of the discovery
650          * API, the discovered UUID must be populated in the device's UUID and
651          * there is no need to copy the same from the driver table.
652          */
653         if (drv_info->version > FFA_VERSION_1_0)
654                 return;
655
656         count = ffa_partition_probe(uuid, &pbuf);
657         if (count <= 0)
658                 return;
659
660         for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
661                 if (tpbuf->id == ffa_dev->vm_id)
662                         uuid_copy(&ffa_dev->uuid, uuid);
663         kfree(pbuf);
664 }
665
666 static void ffa_setup_partitions(void)
667 {
668         int count, idx;
669         uuid_t uuid;
670         struct ffa_device *ffa_dev;
671         struct ffa_partition_info *pbuf, *tpbuf;
672
673         count = ffa_partition_probe(&uuid_null, &pbuf);
674         if (count <= 0) {
675                 pr_info("%s: No partitions found, error %d\n", __func__, count);
676                 return;
677         }
678
679         for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
680                 import_uuid(&uuid, (u8 *)tpbuf->uuid);
681
682                 /* Note that if the UUID will be uuid_null, that will require
683                  * ffa_device_match() to find the UUID of this partition id
684                  * with help of ffa_device_match_uuid(). FF-A v1.1 and above
685                  * provides UUID here for each partition as part of the
686                  * discovery API and the same is passed.
687                  */
688                 ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
689                 if (!ffa_dev) {
690                         pr_err("%s: failed to register partition ID 0x%x\n",
691                                __func__, tpbuf->id);
692                         continue;
693                 }
694
695                 if (drv_info->version > FFA_VERSION_1_0 &&
696                     !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
697                         _ffa_mode_32bit_set(ffa_dev);
698         }
699         kfree(pbuf);
700 }
701
702 static int __init ffa_init(void)
703 {
704         int ret;
705
706         ret = ffa_transport_init(&invoke_ffa_fn);
707         if (ret)
708                 return ret;
709
710         ret = arm_ffa_bus_init();
711         if (ret)
712                 return ret;
713
714         drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
715         if (!drv_info) {
716                 ret = -ENOMEM;
717                 goto ffa_bus_exit;
718         }
719
720         ret = ffa_version_check(&drv_info->version);
721         if (ret)
722                 goto free_drv_info;
723
724         if (ffa_id_get(&drv_info->vm_id)) {
725                 pr_err("failed to obtain VM id for self\n");
726                 ret = -ENODEV;
727                 goto free_drv_info;
728         }
729
730         drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
731         if (!drv_info->rx_buffer) {
732                 ret = -ENOMEM;
733                 goto free_pages;
734         }
735
736         drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
737         if (!drv_info->tx_buffer) {
738                 ret = -ENOMEM;
739                 goto free_pages;
740         }
741
742         ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
743                            virt_to_phys(drv_info->rx_buffer),
744                            RXTX_BUFFER_SIZE / FFA_PAGE_SIZE);
745         if (ret) {
746                 pr_err("failed to register FFA RxTx buffers\n");
747                 goto free_pages;
748         }
749
750         mutex_init(&drv_info->rx_lock);
751         mutex_init(&drv_info->tx_lock);
752
753         ffa_setup_partitions();
754
755         ffa_set_up_mem_ops_native_flag();
756
757         return 0;
758 free_pages:
759         if (drv_info->tx_buffer)
760                 free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
761         free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
762 free_drv_info:
763         kfree(drv_info);
764 ffa_bus_exit:
765         arm_ffa_bus_exit();
766         return ret;
767 }
768 subsys_initcall(ffa_init);
769
770 static void __exit ffa_exit(void)
771 {
772         ffa_rxtx_unmap(drv_info->vm_id);
773         free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
774         free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
775         kfree(drv_info);
776         arm_ffa_bus_exit();
777 }
778 module_exit(ffa_exit);
779
780 MODULE_ALIAS("arm-ffa");
781 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
782 MODULE_DESCRIPTION("Arm FF-A interface driver");
783 MODULE_LICENSE("GPL v2");