make the global var be the static var
[platform/adaptation/broadcom/libtbm-vc4.git] / src / tbm_bufmgr_vc4.c
1 /**************************************************************************
2
3 libtbm_vc4
4
5 Copyright 2017 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
19 of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 #ifdef HAVE_CONFIG_H
32 #include "config.h"
33 #endif
34
35 #include <libudev.h>
36
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <stdint.h>
40 #include <string.h>
41 #include <sys/ioctl.h>
42 #include <sys/types.h>
43 #include <unistd.h>
44 #include <sys/mman.h>
45 #include <sys/stat.h>
46 #include <fcntl.h>
47 #include <errno.h>
48 #include <xf86drm.h>
49 #include <vc4_drm.h>
50 #include <pthread.h>
51 #include <tbm_backend.h>
52 #include <tbm_drm_helper.h>
53 #include <tbm_log.h>
54 #include "tbm_bufmgr_tgl.h"
55
56 #define TBM_COLOR_FORMAT_COUNT 4
57
58 #define VC4_DRM_NAME "vc4"
59
60 #define STRERR_BUFSIZE 128
61
62 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
63 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
64 #define MAX(a, b) ((a) > (b) ? (a) : (b))
65
66 #ifdef ALIGN_EIGHT
67 #define TBM_SURFACE_ALIGNMENT_PLANE (8)
68 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
69 #else
70 #define TBM_SURFACE_ALIGNMENT_PLANE (16)
71 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (16)
72 #endif
73
74 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
75 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
76
77 #define SZ_1M                                   0x00100000
78 #define S5P_FIMV_MAX_FRAME_SIZE                 (2 * SZ_1M)
79 #define S5P_FIMV_D_ALIGN_PLANE_SIZE             64
80 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW           16
81 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL           16
82 #define S5P_FIMV_DEC_BUF_ALIGN                  (8 * 1024)
83 #define S5P_FIMV_NV12MT_HALIGN                  128
84 #define S5P_FIMV_NV12MT_VALIGN                  64
85
86 //#define VC4_TILED_FORMAT 1
87
88 struct dma_buf_info {
89         unsigned long   size;
90         unsigned int    fence_supported;
91         unsigned int    padding;
92 };
93
94 #define DMA_BUF_ACCESS_READ             0x1
95 #define DMA_BUF_ACCESS_WRITE            0x2
96 #define DMA_BUF_ACCESS_DMA              0x4
97 #define DMA_BUF_ACCESS_MAX              0x8
98
99 #define DMA_FENCE_LIST_MAX              5
100
101 struct dma_buf_fence {
102         unsigned long           ctx;
103         unsigned int            type;
104 };
105
106 #define DMABUF_IOCTL_BASE       'F'
107 #define DMABUF_IOWR(nr, type)   _IOWR(DMABUF_IOCTL_BASE, nr, type)
108
109 #define DMABUF_IOCTL_GET_INFO   DMABUF_IOWR(0x00, struct dma_buf_info)
110 #define DMABUF_IOCTL_GET_FENCE  DMABUF_IOWR(0x01, struct dma_buf_fence)
111 #define DMABUF_IOCTL_PUT_FENCE  DMABUF_IOWR(0x02, struct dma_buf_fence)
112
113 /* tgl key values */
114 #define GLOBAL_KEY   ((unsigned int)(-1))
115 /* TBM_CACHE */
116 #define TBM_VC4_CACHE_INV       0x01 /**< cache invalidate  */
117 #define TBM_VC4_CACHE_CLN       0x02 /**< cache clean */
118 #define TBM_VC4_CACHE_ALL       0x10 /**< cache all */
119 #define TBM_VC4_CACHE_FLUSH     (TBM_VC4_CACHE_INV|TBM_VC4_CACHE_CLN) /**< cache flush  */
120 #define TBM_VC4_CACHE_FLUSH_ALL (TBM_VC4_CACHE_FLUSH|TBM_VC4_CACHE_ALL) /**< cache flush all */
121
122 enum {
123         DEVICE_NONE = 0,
124         DEVICE_CA,                                      /* cache aware device */
125         DEVICE_CO                                       /* cache oblivious device */
126 };
127
128 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
129
130 union _tbm_bo_cache_state {
131         unsigned int val;
132         struct {
133                 unsigned int cntFlush:16;       /*Flush all index for sync */
134                 unsigned int isCached:1;
135                 unsigned int isDirtied:2;
136         } data;
137 };
138
139 typedef struct _tbm_bufmgr_vc4 *tbm_bufmgr_vc4;
140 typedef struct _tbm_bo_vc4 *tbm_bo_vc4;
141
142 /* tbm buffor object for vc4 */
143 struct _tbm_bo_vc4 {
144         int fd;
145
146         unsigned int name;    /* FLINK ID */
147
148         unsigned int gem;     /* GEM Handle */
149
150         unsigned int dmabuf;  /* fd for dmabuf */
151
152         void *pBase;          /* virtual address */
153
154         unsigned int size;
155
156         unsigned int flags_tbm; /*not used now*//*currently no values for the flags,but it may be used in future extension*/
157
158         pthread_mutex_t mutex;
159         struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
160         int device;
161         int opt;
162
163         tbm_bo_cache_state cache_state;
164         unsigned int map_cnt;
165         int last_map_device;
166
167         tbm_bufmgr_vc4 bufmgr_vc4;
168 };
169
170 /* tbm bufmgr private for vc4 */
171 struct _tbm_bufmgr_vc4 {
172         int fd;
173         int isLocal;
174         void *hashBos;
175
176         int use_dma_fence;
177
178         int tgl_fd;
179
180         char *device_name;
181         void *bind_display;
182
183         tbm_backend_bufmgr_func *bufmgr_func;
184         tbm_backend_bo_func *bo_func;
185
186         tbm_bufmgr bufmgr;
187 };
188
189 static char *STR_DEVICE[] = {
190         "DEF",
191         "CPU",
192         "2D",
193         "3D",
194         "MM"
195 };
196
197 static char *STR_OPT[] = {
198         "NONE",
199         "RD",
200         "WR",
201         "RDWR"
202 };
203
204
205 static uint32_t tbm_vc4_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
206                                                                                 TBM_FORMAT_ARGB8888,
207                                                                                 TBM_FORMAT_XRGB8888,
208                                                                                 TBM_FORMAT_NV12,
209                                                                                 TBM_FORMAT_YUV420
210                                                                         };
211 #undef  ENABLE_CACHECRTL
212 #ifdef ENABLE_CACHECRTL
213 #ifdef TGL_GET_VERSION
214 static inline int
215 _tgl_get_version(int fd)
216 {
217         struct tgl_ver_data data;
218         int err;
219         char buf[STRERR_BUFSIZE];
220
221         err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
222         if (err) {
223                 TBM_ERR("error(%s) %s:%d\n",
224                         strerror_r(errno, buf, STRERR_BUFSIZE));
225                 return 0;
226         }
227
228         TBM_DBG("tgl version is (%u, %u).\n", data.major, data.minor);
229
230         return 1;
231 }
232 #endif
233
234 static inline int
235 _tgl_init(int fd, unsigned int key)
236 {
237         struct tgl_reg_data data;
238         int err;
239         char buf[STRERR_BUFSIZE];
240
241         data.key = key;
242         data.timeout_ms = 1000;
243
244         err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
245         if (err) {
246                 TBM_ERR("error(%s) key:%d\n",
247                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
248                 return 0;
249         }
250
251         return 1;
252 }
253
254 static inline int
255 _tgl_destroy(int fd, unsigned int key)
256 {
257         struct tgl_reg_data data;
258         int err;
259         char buf[STRERR_BUFSIZE];
260
261         data.key = key;
262         err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
263         if (err) {
264                 TBM_ERR("error(%s) key:%d\n",
265                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
266                 return 0;
267         }
268
269         return 1;
270 }
271
272 static inline int
273 _tgl_lock(int fd, unsigned int key, int opt)
274 {
275         struct tgl_lock_data data;
276         enum tgl_type_data tgl_type;
277         int err;
278         char buf[STRERR_BUFSIZE];
279
280         switch (opt) {
281         case TBM_OPTION_READ:
282                 tgl_type = TGL_TYPE_READ;
283                 break;
284         case TBM_OPTION_WRITE:
285                 tgl_type = TGL_TYPE_WRITE;
286                 break;
287         default:
288                 tgl_type = TGL_TYPE_NONE;
289                 break;
290         }
291
292         data.key = key;
293         data.type = tgl_type;
294
295         err = ioctl(fd, TGL_IOCTL_LOCK, &data);
296         if (err) {
297                 TBM_ERR("error(%s) key:%d opt:%d\n",
298                         strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
299                 return 0;
300         }
301
302         return 1;
303 }
304
305 static inline int
306 _tgl_unlock(int fd, unsigned int key)
307 {
308         struct tgl_lock_data data;
309         int err;
310         char buf[STRERR_BUFSIZE];
311
312         data.key = key;
313         data.type = TGL_TYPE_NONE;
314
315         err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
316         if (err) {
317                 TBM_ERR("error(%s) key:%d\n",
318                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
319                 return 0;
320         }
321
322         return 1;
323 }
324
325 static inline int
326 _tgl_set_data(int fd, unsigned int key, unsigned int val)
327 {
328         struct tgl_usr_data data;
329         int err;
330         char buf[STRERR_BUFSIZE];
331
332         data.key = key;
333         data.data1 = val;
334
335         err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
336         if (err) {
337                 TBM_ERR("error(%s) key:%d\n",
338                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
339                 return 0;
340         }
341
342         return 1;
343 }
344
345 static inline unsigned int
346 _tgl_get_data(int fd, unsigned int key)
347 {
348         struct tgl_usr_data data = { 0, };
349         int err;
350         char buf[STRERR_BUFSIZE];
351
352         data.key = key;
353
354         err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
355         if (err) {
356                 TBM_ERR("error(%s) key:%d\n",
357                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
358                 return 0;
359         }
360
361         return data.data1;
362 }
363
364 static int
365 _vc4_cache_flush(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int flags)
366 {
367         TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
368
369         /* cache flush is managed by kernel side when using dma-fence. */
370         if (bufmgr_vc4->use_dma_fence)
371                 return 1;
372
373         struct drm_vc4_gem_cache_op cache_op = {0, };
374         int ret;
375
376         /* if bo_vc4 is null, do cache_flush_all */
377         if (bo_vc4) {
378                 cache_op.flags = 0;
379                 cache_op.usr_addr = (uint64_t)((uint32_t)bo_vc4->pBase);
380                 cache_op.size = bo_vc4->size;
381         } else {
382                 flags = TBM_VC4_CACHE_FLUSH_ALL;
383                 cache_op.flags = 0;
384                 cache_op.usr_addr = 0;
385                 cache_op.size = 0;
386         }
387
388         if (flags & TBM_VC4_CACHE_INV) {
389                 if (flags & TBM_VC4_CACHE_ALL)
390                         cache_op.flags |= VC4_DRM_CACHE_INV_ALL;
391                 else
392                         cache_op.flags |= VC4_DRM_CACHE_INV_RANGE;
393         }
394
395         if (flags & TBM_VC4_CACHE_CLN) {
396                 if (flags & TBM_VC4_CACHE_ALL)
397                         cache_op.flags |= VC4_DRM_CACHE_CLN_ALL;
398                 else
399                         cache_op.flags |= VC4_DRM_CACHE_CLN_RANGE;
400         }
401
402         if (flags & TBM_VC4_CACHE_ALL)
403                 cache_op.flags |= VC4_DRM_ALL_CACHES_CORES;
404
405         ret = drmCommandWriteRead(bufmgr_vc4->fd, DRM_VC4_GEM_CACHE_OP, &cache_op,
406                                   sizeof(cache_op));
407         if (ret) {
408                 TBM_ERR("fail to flush the cache.\n");
409                 return 0;
410         }
411
412         return 1;
413 }
414 #endif
415
416 static int
417 _bo_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int import)
418 {
419 #ifdef ENABLE_CACHECRTL
420         TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
421         TBM_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
422
423         if (bufmgr_vc4->use_dma_fence)
424                 return 1;
425
426         _tgl_init(bufmgr_vc4->tgl_fd, bo_vc4->name);
427
428         tbm_bo_cache_state cache_state;
429
430         if (import == 0) {
431                 cache_state.data.isDirtied = DEVICE_NONE;
432                 cache_state.data.isCached = 0;
433                 cache_state.data.cntFlush = 0;
434
435                 _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name, cache_state.val);
436         }
437 #endif
438
439         return 1;
440 }
441
442 static int
443 _bo_set_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int device, int opt)
444 {
445 #ifdef ENABLE_CACHECRTL
446         TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
447         TBM_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
448
449         if (bufmgr_vc4->use_dma_fence)
450                 return 1;
451
452         char need_flush = 0;
453         unsigned short cntFlush = 0;
454
455         /* get cache state of a bo_vc4 */
456         bo_vc4->cache_state.val = _tgl_get_data(bufmgr_vc4->tgl_fd,
457                                      bo_vc4->name);
458
459         /* get global cache flush count */
460         cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
461
462         if (device == TBM_DEVICE_CPU) {
463                 if (bo_vc4->cache_state.data.isDirtied == DEVICE_CO &&
464                     bo_vc4->cache_state.data.isCached)
465                         need_flush = TBM_VC4_CACHE_INV;
466
467                 bo_vc4->cache_state.data.isCached = 1;
468                 if (opt & TBM_OPTION_WRITE)
469                         bo_vc4->cache_state.data.isDirtied = DEVICE_CA;
470                 else {
471                         if (bo_vc4->cache_state.data.isDirtied != DEVICE_CA)
472                                 bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
473                 }
474         } else {
475                 if (bo_vc4->cache_state.data.isDirtied == DEVICE_CA &&
476                     bo_vc4->cache_state.data.isCached &&
477                     bo_vc4->cache_state.data.cntFlush == cntFlush)
478                         need_flush = TBM_VC4_CACHE_CLN | TBM_VC4_CACHE_ALL;
479
480                 if (opt & TBM_OPTION_WRITE)
481                         bo_vc4->cache_state.data.isDirtied = DEVICE_CO;
482                 else {
483                         if (bo_vc4->cache_state.data.isDirtied != DEVICE_CO)
484                                 bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
485                 }
486         }
487
488         if (need_flush) {
489                 if (need_flush & TBM_VC4_CACHE_ALL)
490                         _tgl_set_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
491
492                 /* call cache flush */
493                 _vc4_cache_flush(bufmgr_vc4, bo_vc4, need_flush);
494
495                 TBM_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
496                     bo_vc4->cache_state.data.isCached,
497                     bo_vc4->cache_state.data.isDirtied,
498                     need_flush,
499                     cntFlush);
500         }
501 #endif
502
503         return 1;
504 }
505
506 static int
507 _bo_save_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
508 {
509 #ifdef ENABLE_CACHECRTL
510         TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
511         TBM_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
512
513         if (bufmgr_vc4->use_dma_fence)
514                 return 1;
515
516         unsigned short cntFlush = 0;
517
518         /* get global cache flush count */
519         cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
520
521         /* save global cache flush count */
522         bo_vc4->cache_state.data.cntFlush = cntFlush;
523         _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name,
524                       bo_vc4->cache_state.val);
525 #endif
526
527         return 1;
528 }
529
530 static void
531 _bo_destroy_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
532 {
533 #ifdef ENABLE_CACHECRTL
534         TBM_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
535         TBM_RETURN_IF_FAIL(bo_vc4 != NULL);
536
537         if (bufmgr_vc4->use_dma_fence)
538                 return ;
539
540         _tgl_destroy(bufmgr_vc4->tgl_fd, bo_vc4->name);
541 #endif
542 }
543
544 static int
545 _bufmgr_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
546 {
547 #ifdef ENABLE_CACHECRTL
548         TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
549
550         if (bufmgr_vc4->use_dma_fence)
551                 return 1;
552
553         /* open tgl fd for saving cache flush data */
554         bufmgr_vc4->tgl_fd = open(tgl_devfile, O_RDWR);
555
556         if (bufmgr_vc4->tgl_fd < 0) {
557             bufmgr_vc4->tgl_fd = open(tgl_devfile1, O_RDWR);
558             if (bufmgr_vc4->tgl_fd < 0) {
559                     TBM_ERR("fail to open global_lock:%s\n",
560                                         tgl_devfile1);
561                     return 0;
562             }
563         }
564
565 #ifdef TGL_GET_VERSION
566         if (!_tgl_get_version(bufmgr_vc4->tgl_fd)) {
567                 TBM_ERR("fail to get tgl_version. tgl init failed.\n");
568                 close(bufmgr_sprd->tgl_fd);
569                 return 0;
570         }
571 #endif
572
573         if (!_tgl_init(bufmgr_vc4->tgl_fd, GLOBAL_KEY)) {
574                 TBM_ERR("fail to initialize the tgl\n");
575                 close(bufmgr_vc4->tgl_fd);
576                 return 0;
577         }
578 #endif
579
580         return 1;
581 }
582
583 static void
584 _bufmgr_deinit_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
585 {
586 #ifdef ENABLE_CACHECRTL
587         TBM_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
588
589         if (bufmgr_vc4->use_dma_fence)
590                 return;
591
592         if (bufmgr_vc4->tgl_fd >= 0)
593                 close(bufmgr_vc4->tgl_fd);
594 #endif
595 }
596
597 static int
598 _tbm_vc4_open_drm()
599 {
600         int fd = -1;
601
602         fd = drmOpen(VC4_DRM_NAME, NULL);
603         if (fd < 0) {
604                 TBM_ERR("fail to open drm.(%s)\n", VC4_DRM_NAME);
605         }
606
607         if (fd < 0) {
608                 struct udev *udev = NULL;
609                 struct udev_enumerate *e = NULL;
610                 struct udev_list_entry *entry = NULL;
611                 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
612                 const char *filepath;
613                 struct stat s;
614                 int ret;
615
616                 TBM_DBG("search drm-device by udev\n");
617
618                 udev = udev_new();
619                 if (!udev) {
620                         TBM_ERR("udev_new() failed.\n");
621                         return -1;
622                 }
623
624                 e = udev_enumerate_new(udev);
625                 udev_enumerate_add_match_subsystem(e, "drm");
626                 udev_enumerate_add_match_sysname(e, "card[0-9]*");
627                 udev_enumerate_scan_devices(e);
628
629                 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
630                         device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
631                                                               udev_list_entry_get_name(entry));
632                         device_parent = udev_device_get_parent(device);
633                         /* Not need unref device_parent. device_parent and device have same refcnt */
634                         if (device_parent) {
635                                 if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
636                                         drm_device = device;
637                                         TBM_DBG("Found render device: '%s' (%s)\n",
638                                             udev_device_get_syspath(drm_device),
639                                             udev_device_get_sysname(device_parent));
640                                         break;
641                                 }
642                         }
643                         udev_device_unref(device);
644                 }
645
646                 udev_enumerate_unref(e);
647
648                 /* Get device file path. */
649                 filepath = udev_device_get_devnode(drm_device);
650                 if (!filepath) {
651                         TBM_ERR("udev_device_get_devnode() failed.\n");
652                         udev_device_unref(drm_device);
653                         udev_unref(udev);
654                         return -1;
655                 }
656
657                 /* Open DRM device file and check validity. */
658                 fd = open(filepath, O_RDWR | O_CLOEXEC);
659                 if (fd < 0) {
660                         TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
661                         udev_device_unref(drm_device);
662                         udev_unref(udev);
663                         return -1;
664                 }
665
666                 ret = fstat(fd, &s);
667                 if (ret) {
668                         TBM_ERR("fstat() failed %s.\n");
669                         close(fd);
670                         udev_device_unref(drm_device);
671                         udev_unref(udev);
672                         return -1;
673                 }
674
675                 udev_device_unref(drm_device);
676                 udev_unref(udev);
677         }
678
679         return fd;
680 }
681
682 static int
683 _check_render_node(void)
684 {
685 #ifndef USE_RENDER_NODE
686         return 0;
687 #else
688         struct udev *udev = NULL;
689         struct udev_enumerate *e = NULL;
690         struct udev_list_entry *entry = NULL;
691         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
692
693         udev = udev_new();
694         if (!udev) {
695                 TBM_ERR("udev_new() failed.\n");
696                 return -1;
697         }
698
699         e = udev_enumerate_new(udev);
700         udev_enumerate_add_match_subsystem(e, "drm");
701         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
702         udev_enumerate_scan_devices(e);
703
704         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
705                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
706                                                       udev_list_entry_get_name(entry));
707                 device_parent = udev_device_get_parent(device);
708                 /* Not need unref device_parent. device_parent and device have same refcnt */
709                 if (device_parent) {
710                         if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
711                                 drm_device = device;
712                                 TBM_DBG("Found render device: '%s' (%s)\n",
713                                     udev_device_get_syspath(drm_device),
714                                     udev_device_get_sysname(device_parent));
715                                 break;
716                         }
717                 }
718                 udev_device_unref(device);
719         }
720
721         udev_enumerate_unref(e);
722         udev_unref(udev);
723
724         if (!drm_device) {
725                 udev_device_unref(drm_device);
726                 return 0;
727         }
728
729         udev_device_unref(drm_device);
730         return 1;
731 #endif
732 }
733
734 static int
735 _get_render_node(void)
736 {
737         struct udev *udev = NULL;
738         struct udev_enumerate *e = NULL;
739         struct udev_list_entry *entry = NULL;
740         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
741         const char *filepath;
742         struct stat s;
743         int fd = -1;
744         int ret;
745
746         udev = udev_new();
747         if (!udev) {
748                 TBM_ERR("udev_new() failed.\n");
749                 return -1;
750         }
751
752         e = udev_enumerate_new(udev);
753         udev_enumerate_add_match_subsystem(e, "drm");
754         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
755         udev_enumerate_scan_devices(e);
756
757         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
758                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
759                                                       udev_list_entry_get_name(entry));
760                 device_parent = udev_device_get_parent(device);
761                 /* Not need unref device_parent. device_parent and device have same refcnt */
762                 if (device_parent) {
763                         if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
764                                 drm_device = device;
765                                 TBM_DBG("Found render device: '%s' (%s)\n",
766                                     udev_device_get_syspath(drm_device),
767                                     udev_device_get_sysname(device_parent));
768                                 break;
769                         }
770                 }
771                 udev_device_unref(device);
772         }
773
774         udev_enumerate_unref(e);
775
776         /* Get device file path. */
777         filepath = udev_device_get_devnode(drm_device);
778         if (!filepath) {
779                 TBM_ERR("udev_device_get_devnode() failed.\n");
780                 udev_device_unref(drm_device);
781                 udev_unref(udev);
782                 return -1;
783         }
784
785         /* Open DRM device file and check validity. */
786         fd = open(filepath, O_RDWR | O_CLOEXEC);
787         if (fd < 0) {
788                 TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
789                 udev_device_unref(drm_device);
790                 udev_unref(udev);
791                 return -1;
792         }
793
794         ret = fstat(fd, &s);
795         if (ret) {
796                 TBM_ERR("fstat() failed %s.\n");
797                 udev_device_unref(drm_device);
798                 udev_unref(udev);
799                 close(fd);
800                 return -1;
801         }
802
803         udev_device_unref(drm_device);
804         udev_unref(udev);
805
806         return fd;
807 }
808
809 static unsigned int
810 _get_name(int fd, unsigned int gem)
811 {
812         struct drm_gem_flink arg = {0,};
813
814         arg.handle = gem;
815         if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
816                 TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
817                 return 0;
818         }
819
820         return (unsigned int)arg.name;
821 }
822
823 static tbm_bo_handle
824 _vc4_bo_handle(tbm_bo_vc4 bo_vc4, int device)
825 {
826         tbm_bo_handle bo_handle;
827
828         memset(&bo_handle, 0x0, sizeof(uint64_t));
829
830         switch (device) {
831         case TBM_DEVICE_DEFAULT:
832         case TBM_DEVICE_2D:
833                 bo_handle.u32 = (uint32_t)bo_vc4->gem;
834                 break;
835         case TBM_DEVICE_CPU:
836                 if (!bo_vc4->pBase) {
837                         struct drm_vc4_mmap_bo arg = {0, };
838                         void *map = NULL;
839
840                         arg.handle = bo_vc4->gem;
841                         if (drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_MMAP_BO, &arg)) {
842                                 TBM_ERR("Cannot map_vc4 gem=%d\n", bo_vc4->gem);
843                                 return (tbm_bo_handle) NULL;
844                         }
845
846                         map = mmap(NULL, bo_vc4->size, PROT_READ | PROT_WRITE, MAP_SHARED,
847                                    bo_vc4->fd, arg.offset);
848                         if (map == MAP_FAILED) {
849                                 TBM_ERR("Cannot usrptr gem=%d\n", bo_vc4->gem);
850                                 return (tbm_bo_handle) NULL;
851                         }
852                         bo_vc4->pBase = map;
853                 }
854                 bo_handle.ptr = (void *)bo_vc4->pBase;
855                 break;
856         case TBM_DEVICE_3D:
857         case TBM_DEVICE_MM:
858                 if (!bo_vc4->dmabuf) {
859                         struct drm_prime_handle arg = {0, };
860
861                         arg.handle = bo_vc4->gem;
862                         if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
863                                 TBM_ERR("Cannot dmabuf=%d\n", bo_vc4->gem);
864                                 return (tbm_bo_handle) NULL;
865                         }
866                         bo_vc4->dmabuf = arg.fd;
867                 }
868
869                 bo_handle.u32 = (uint32_t)bo_vc4->dmabuf;
870                 break;
871         default:
872                 TBM_ERR("Not supported device:%d\n", device);
873                 bo_handle.ptr = (void *) NULL;
874                 break;
875         }
876
877         return bo_handle;
878 }
879
880 static int
881 _new_calc_plane_nv12(int width, int height)
882 {
883         int mbX, mbY;
884
885         mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
886         mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
887
888         if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
889                 mbY = (mbY + 1) / 2 * 2;
890
891         return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
892                         S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
893 }
894
895 static int
896 _calc_yplane_nv12(int width, int height)
897 {
898         int mbX, mbY;
899
900         mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
901         mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
902
903         return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
904 }
905
906 static int
907 _calc_uvplane_nv12(int width, int height)
908 {
909         int mbX, mbY;
910
911         mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
912         mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
913
914         return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
915 }
916
917 static int
918 _new_calc_yplane_nv12(int width, int height)
919 {
920         return SIZE_ALIGN(_new_calc_plane_nv12(width,
921                                                 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
922                            TBM_SURFACE_ALIGNMENT_PLANE_NV12);
923 }
924
925 static int
926 _new_calc_uvplane_nv12(int width, int height)
927 {
928         return SIZE_ALIGN((_new_calc_plane_nv12(width,
929                                                 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
930                           TBM_SURFACE_ALIGNMENT_PLANE_NV12);
931 }
932
933 static tbm_bufmgr_capability
934 tbm_vc4_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error)
935 {
936         tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE;
937
938 #ifdef VC4_TILED_FORMAT
939         capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD|TBM_BUFMGR_CAPABILITY_TILED_MEMORY;
940 #else
941         capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD;
942 #endif
943
944         if (error)
945                 *error = TBM_ERROR_NONE;
946
947         return capabilities;
948 }
949
950 static tbm_error_e
951 tbm_vc4_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display)
952 {
953         tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
954         TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, TBM_ERROR_INVALID_PARAMETER);
955
956         if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_vc4->fd,
957                                            bufmgr_vc4->device_name, 0)) {
958                 TBM_ERR("fail to tbm_drm_helper_wl_server_init\n");
959                 return TBM_ERROR_INVALID_OPERATION;
960         }
961
962         bufmgr_vc4->bind_display = native_display;
963
964         return TBM_ERROR_NONE;
965 }
966 static tbm_error_e
967 tbm_vc4_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data,
968                                                         uint32_t **formats, uint32_t *num)
969 {
970         tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
971         uint32_t *color_formats;
972
973         TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, TBM_ERROR_INVALID_PARAMETER);
974
975         color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
976         if (color_formats == NULL)
977                 return TBM_ERROR_OUT_OF_MEMORY;
978
979         memcpy(color_formats, tbm_vc4_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
980
981         *formats = color_formats;
982         *num = TBM_COLOR_FORMAT_COUNT;
983
984         TBM_DBG("supported format count = %d\n", *num);
985
986         return TBM_ERROR_NONE;
987 }
988
989
990 #ifdef VC4_TILED_FORMAT
991 #include <drm_fourcc.h>
992 static inline uint32_t
993 vc4_utile_width(int cpp)
994 {
995         switch (cpp) {
996         case 1:
997         case 2:
998                 return 8;
999         case 4:
1000                 return 4;
1001         case 8:
1002                 return 2;
1003         default:
1004                 return 4;
1005         }
1006 }
1007
1008 static inline uint32_t
1009 vc4_utile_height(int cpp)
1010 {
1011         switch (cpp) {
1012         case 1:
1013                 return 8;
1014         case 2:
1015         case 4:
1016         case 8:
1017                 return 4;
1018         default:
1019                 return 4;
1020         }
1021 }
1022
1023 static inline bool
1024 vc4_size_is_lt(uint32_t width, uint32_t height, int cpp)
1025 {
1026         return (width <= 4 * vc4_utile_width(cpp) ||
1027                                 height <= 4 * vc4_utile_height(cpp));
1028 }
1029
1030 static tbm_backend_bo_data *
1031 tbm_vc4_bufmgr_alloc_bo_with_tiled_format(tbm_backend_bufmgr_data *bufmgr_data, int width, int height,
1032                         int cpp, int format, tbm_bo_memory_type flags, int bo_idx, tbm_error_e *err)
1033 {
1034         tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1035         tbm_bo_vc4 bo_vc4;
1036         uint32_t utile_w = vc4_utile_width(cpp);
1037         uint32_t utile_h = vc4_utile_height(cpp);
1038         uint32_t level_width, level_height;
1039         int size;
1040         uint32_t stride;
1041
1042
1043         level_width = width;
1044         level_height = height;
1045
1046         if (bufmgr_vc4 == NULL) {
1047                 TBM_ERR("bufmgr_data is null\n");
1048                 return NULL;
1049         }
1050
1051         if (vc4_size_is_lt(level_width, level_height, cpp)) {
1052                 level_width = SIZE_ALIGN(level_width, utile_w);
1053                 level_height = SIZE_ALIGN(level_height, utile_h);
1054         } else {
1055                 level_width = SIZE_ALIGN(level_width,
1056                                         4 * 2 * utile_w);
1057                 level_height = SIZE_ALIGN(level_height,
1058                                         4 * 2 * utile_h);
1059         }
1060
1061         stride = level_width * cpp;
1062
1063         size = level_height * stride;
1064         size = SIZE_ALIGN(size, 4096);
1065
1066
1067         bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1068         if (!bo_vc4) {
1069                 TBM_ERR("fail to allocate the bo_vc4 private\n");
1070                 return NULL;
1071         }
1072         bo_vc4->bufmgr_vc4 = bufmgr_vc4;
1073
1074         struct drm_vc4_create_bo arg = {0, };
1075
1076         arg.size = (__u32)size;
1077         arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
1078         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
1079                 TBM_ERR("Cannot create bo_vc4(flag:%x, size:%d)\n", arg.flags,
1080                                         (unsigned int)arg.size);
1081                 free(bo_vc4);
1082                 return NULL;
1083         }
1084
1085         bo_vc4->fd = bufmgr_vc4->fd;
1086         bo_vc4->gem = (unsigned int)arg.handle;
1087         bo_vc4->size = size;
1088         bo_vc4->flags_tbm = flags;
1089         bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
1090
1091         if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 0)) {
1092                 TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
1093                 free(bo_vc4);
1094                 return NULL;
1095         }
1096
1097         pthread_mutex_init(&bo_vc4->mutex, NULL);
1098
1099         if (bufmgr_vc4->use_dma_fence && !bo_vc4->dmabuf) {
1100                 struct drm_prime_handle arg = {0, };
1101
1102                 arg.handle = bo_vc4->gem;
1103                 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1104                         TBM_ERR("Cannot dmabuf=%d\n", bo_vc4->gem);
1105                         free(bo_vc4);
1106                         return NULL;
1107                 }
1108                 bo_vc4->dmabuf = arg.fd;
1109         }
1110
1111         //set modifier
1112         uint64_t modifier;
1113         modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
1114         struct drm_vc4_set_tiling set_tiling = {
1115                 .handle = bo_vc4->gem,
1116                 .modifier = modifier,
1117         };
1118         drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_SET_TILING, &set_tiling);
1119
1120
1121         /* add bo_vc4 to hash */
1122         if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
1123           TBM_ERR("Cannot insert bo_vc4 to Hash(%d)\n", bo_vc4->name);
1124
1125         TBM_DBG("     bo_vc4:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
1126                                 bo_vc4,
1127                                 bo_vc4->gem, bo_vc4->name,
1128                                 bo_vc4->flags_tbm,
1129                                 bo_vc4->size);
1130
1131         return (tbm_backend_bo_data *)bo_vc4;
1132 }
1133 #endif
1134
1135 static tbm_error_e
1136 tbm_vc4_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data,
1137                                                         tbm_format format, int plane_idx, int width,
1138                                                         int height, uint32_t *size, uint32_t *offset,
1139                                                         uint32_t *pitch, int *bo_idx)
1140 {
1141         tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1142         int bpp;
1143         int _offset = 0;
1144         int _pitch = 0;
1145         int _size = 0;
1146         int _bo_idx = 0;
1147
1148         TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, TBM_ERROR_INVALID_PARAMETER);
1149
1150         switch (format) {
1151                 /* 16 bpp RGB */
1152         case TBM_FORMAT_XRGB4444:
1153         case TBM_FORMAT_XBGR4444:
1154         case TBM_FORMAT_RGBX4444:
1155         case TBM_FORMAT_BGRX4444:
1156         case TBM_FORMAT_ARGB4444:
1157         case TBM_FORMAT_ABGR4444:
1158         case TBM_FORMAT_RGBA4444:
1159         case TBM_FORMAT_BGRA4444:
1160         case TBM_FORMAT_XRGB1555:
1161         case TBM_FORMAT_XBGR1555:
1162         case TBM_FORMAT_RGBX5551:
1163         case TBM_FORMAT_BGRX5551:
1164         case TBM_FORMAT_ARGB1555:
1165         case TBM_FORMAT_ABGR1555:
1166         case TBM_FORMAT_RGBA5551:
1167         case TBM_FORMAT_BGRA5551:
1168         case TBM_FORMAT_RGB565:
1169                 bpp = 16;
1170                 _offset = 0;
1171                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1172                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1173                 _bo_idx = 0;
1174                 break;
1175                 /* 24 bpp RGB */
1176         case TBM_FORMAT_RGB888:
1177         case TBM_FORMAT_BGR888:
1178                 bpp = 24;
1179                 _offset = 0;
1180                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1181                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1182                 _bo_idx = 0;
1183                 break;
1184                 /* 32 bpp RGB */
1185         case TBM_FORMAT_XRGB8888:
1186         case TBM_FORMAT_XBGR8888:
1187         case TBM_FORMAT_RGBX8888:
1188         case TBM_FORMAT_BGRX8888:
1189         case TBM_FORMAT_ARGB8888:
1190         case TBM_FORMAT_ABGR8888:
1191         case TBM_FORMAT_RGBA8888:
1192         case TBM_FORMAT_BGRA8888:
1193                 bpp = 32;
1194                 _offset = 0;
1195 #ifdef VC4_TILED_FORMAT
1196                 if (vc4_size_is_lt(width, height, 4)) {
1197                         width = SIZE_ALIGN(width, vc4_utile_width(4));
1198                         height = SIZE_ALIGN(height, vc4_utile_height(4));
1199
1200                 } else {
1201                         width = SIZE_ALIGN(width, 32);
1202                         uint32_t utile_h = vc4_utile_height(bpp);
1203                         height = SIZE_ALIGN(height, 8*utile_h);
1204                 }
1205 #endif
1206                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1207                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1208                 _bo_idx = 0;
1209                 break;
1210
1211                 /* packed YCbCr */
1212         case TBM_FORMAT_YUYV:
1213         case TBM_FORMAT_YVYU:
1214         case TBM_FORMAT_UYVY:
1215         case TBM_FORMAT_VYUY:
1216         case TBM_FORMAT_AYUV:
1217                 bpp = 32;
1218                 _offset = 0;
1219                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1220                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1221                 _bo_idx = 0;
1222                 break;
1223
1224                 /*
1225                 * 2 plane YCbCr
1226                 * index 0 = Y plane, [7:0] Y
1227                 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1228                 * or
1229                 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1230                 */
1231         case TBM_FORMAT_NV12:
1232                 bpp = 12;
1233                 if (plane_idx == 0) {
1234                         _offset = 0;
1235                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1236                         _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1237                                         height));
1238                         _bo_idx = 0;
1239                 } else if (plane_idx == 1) {
1240                         _offset = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1241                                         height));
1242                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1243                         _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1244                                         height));
1245                         _bo_idx = 0;
1246                 }
1247                 break;
1248         case TBM_FORMAT_NV21:
1249                 bpp = 12;
1250                 if (plane_idx == 0) {
1251                         _offset = 0;
1252                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1253                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1254                         _bo_idx = 0;
1255                 } else if (plane_idx == 1) {
1256                         _offset = width * height;
1257                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1258                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1259                         _bo_idx = 0;
1260                 }
1261                 break;
1262
1263         case TBM_FORMAT_NV16:
1264         case TBM_FORMAT_NV61:
1265                 bpp = 16;
1266                 /*if(plane_idx == 0)*/
1267                 {
1268                         _offset = 0;
1269                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1270                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1271                         _bo_idx = 0;
1272                         if (plane_idx == 0)
1273                                 break;
1274                 }
1275                 /*else if( plane_idx ==1 )*/
1276                 {
1277                         _offset += _size;
1278                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1279                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1280                         _bo_idx = 0;
1281                 }
1282                 break;
1283
1284                 /*
1285                 * 3 plane YCbCr
1286                 * index 0: Y plane, [7:0] Y
1287                 * index 1: Cb plane, [7:0] Cb
1288                 * index 2: Cr plane, [7:0] Cr
1289                 * or
1290                 * index 1: Cr plane, [7:0] Cr
1291                 * index 2: Cb plane, [7:0] Cb
1292                 */
1293
1294                 /*
1295                 * NATIVE_BUFFER_FORMAT_YV12
1296                 * NATIVE_BUFFER_FORMAT_I420
1297                 */
1298         case TBM_FORMAT_YUV410:
1299         case TBM_FORMAT_YVU410:
1300                 bpp = 9;
1301                 /*if(plane_idx == 0)*/
1302                 {
1303                         _offset = 0;
1304                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1305                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1306                         _bo_idx = 0;
1307                         if (plane_idx == 0)
1308                                 break;
1309                 }
1310                 /*else if(plane_idx == 1)*/
1311                 {
1312                         _offset += _size;
1313                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1314                         _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1315                         _bo_idx = 0;
1316                         if (plane_idx == 1)
1317                                 break;
1318                 }
1319                 /*else if (plane_idx == 2)*/
1320                 {
1321                         _offset += _size;
1322                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1323                         _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1324                         _bo_idx = 0;
1325                 }
1326                 break;
1327         case TBM_FORMAT_YUV411:
1328         case TBM_FORMAT_YVU411:
1329         case TBM_FORMAT_YUV420:
1330         case TBM_FORMAT_YVU420:
1331                 bpp = 12;
1332                 /*if(plane_idx == 0)*/
1333                 {
1334                         _offset = 0;
1335                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1336                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1337                         _bo_idx = 0;
1338                         if (plane_idx == 0)
1339                                 break;
1340                 }
1341                 /*else if(plane_idx == 1)*/
1342                 {
1343                         _offset = 0;
1344                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1345                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1346                         _bo_idx = 1;
1347                         if (plane_idx == 1)
1348                                 break;
1349                 }
1350                 /*else if (plane_idx == 2)*/
1351                 {
1352                         _offset = 0;
1353                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1354                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1355                         _bo_idx = 2;
1356                 }
1357                 break;
1358         case TBM_FORMAT_YUV422:
1359         case TBM_FORMAT_YVU422:
1360                 bpp = 16;
1361                 /*if(plane_idx == 0)*/
1362                 {
1363                         _offset = 0;
1364                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1365                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1366                         _bo_idx = 0;
1367                         if (plane_idx == 0)
1368                                 break;
1369                 }
1370                 /*else if(plane_idx == 1)*/
1371                 {
1372                         _offset += _size;
1373                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1374                         _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1375                         _bo_idx = 0;
1376                         if (plane_idx == 1)
1377                                 break;
1378                 }
1379                 /*else if (plane_idx == 2)*/
1380                 {
1381                         _offset += _size;
1382                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1383                         _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1384                         _bo_idx = 0;
1385                 }
1386                 break;
1387         case TBM_FORMAT_YUV444:
1388         case TBM_FORMAT_YVU444:
1389                 bpp = 24;
1390                 /*if(plane_idx == 0)*/
1391                 {
1392                         _offset = 0;
1393                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1394                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1395                         _bo_idx = 0;
1396                         if (plane_idx == 0)
1397                                 break;
1398                 }
1399                 /*else if(plane_idx == 1)*/
1400                 {
1401                         _offset += _size;
1402                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1403                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1404                         _bo_idx = 0;
1405                         if (plane_idx == 1)
1406                                 break;
1407                 }
1408                 /*else if (plane_idx == 2)*/
1409                 {
1410                         _offset += _size;
1411                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1412                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1413                         _bo_idx = 0;
1414                 }
1415                 break;
1416         default:
1417                 bpp = 0;
1418                 break;
1419         }
1420
1421         *size = _size;
1422         *offset = _offset;
1423         *pitch = _pitch;
1424         *bo_idx = _bo_idx;
1425
1426         return TBM_ERROR_NONE;
1427 }
1428
1429 static tbm_backend_bo_data *
1430 tbm_vc4_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, unsigned int size,
1431                                         tbm_bo_memory_type flags, tbm_error_e *error)
1432 {
1433         tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1434         tbm_bo_vc4 bo_vc4;
1435
1436         if (bufmgr_vc4 == NULL) {
1437                 TBM_ERR("bufmgr_data is null\n");
1438                 if (error)
1439                         *error = TBM_ERROR_INVALID_PARAMETER;
1440                 return NULL;
1441         }
1442
1443         bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1444         if (!bo_vc4) {
1445                 TBM_ERR("fail to allocate the bo_vc4 private\n");
1446                 if (error)
1447                         *error = TBM_ERROR_OUT_OF_MEMORY;
1448                 return NULL;
1449         }
1450         bo_vc4->bufmgr_vc4 = bufmgr_vc4;
1451
1452         struct drm_vc4_create_bo arg = {0, };
1453
1454         arg.size = (__u32)size;
1455         arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
1456         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
1457                 TBM_ERR("Cannot create bo_vc4(flag:%x, size:%d)\n", arg.flags,
1458                                (unsigned int)arg.size);
1459                 free(bo_vc4);
1460                 if (error)
1461                         *error = TBM_ERROR_INVALID_OPERATION;
1462                 return NULL;
1463         }
1464
1465         bo_vc4->fd = bufmgr_vc4->fd;
1466         bo_vc4->gem = (unsigned int)arg.handle;
1467         bo_vc4->size = size;
1468         bo_vc4->flags_tbm = flags;
1469         bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
1470
1471         if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 0)) {
1472                 TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
1473                 free(bo_vc4);
1474                 if (error)
1475                         *error = TBM_ERROR_INVALID_OPERATION;
1476                 return NULL;
1477         }
1478
1479         pthread_mutex_init(&bo_vc4->mutex, NULL);
1480
1481         if (bufmgr_vc4->use_dma_fence && !bo_vc4->dmabuf) {
1482                 struct drm_prime_handle arg = {0, };
1483
1484                 arg.handle = bo_vc4->gem;
1485                 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1486                         TBM_ERR("Cannot dmabuf=%d\n", bo_vc4->gem);
1487                         free(bo_vc4);
1488                         if (error)
1489                                 *error = TBM_ERROR_INVALID_OPERATION;
1490                         return NULL;
1491                 }
1492                 bo_vc4->dmabuf = arg.fd;
1493         }
1494
1495         /* add bo_vc4 to hash */
1496         if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
1497                 TBM_ERR("Cannot insert bo_vc4 to Hash(%d)\n", bo_vc4->name);
1498
1499         TBM_DBG("     bo_vc4:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
1500                 bo_vc4,
1501                 bo_vc4->gem, bo_vc4->name,
1502             bo_vc4->flags_tbm,
1503                 bo_vc4->size);
1504
1505         if (error)
1506                 *error = TBM_ERROR_NONE;
1507
1508         return (tbm_backend_bo_data *)bo_vc4;
1509 }
1510
1511 static tbm_backend_bo_data *
1512 tbm_vc4_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error)
1513 {
1514         tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1515         tbm_bo_vc4 bo_vc4;
1516         unsigned int gem = 0;
1517         unsigned int name;
1518         int ret;
1519         char buf[STRERR_BUFSIZE];
1520
1521         if (bufmgr_vc4 == NULL) {
1522                 TBM_ERR("bufmgr_data is null\n");
1523                 if (error)
1524                         *error = TBM_ERROR_INVALID_PARAMETER;
1525                 return NULL;
1526         }
1527
1528         /*getting handle from fd*/
1529         struct drm_prime_handle arg = {0, };
1530
1531         arg.fd = key;
1532         arg.flags = 0;
1533         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1534                 TBM_ERR("Cannot get gem handle from fd:%d (%s)\n",
1535                                arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
1536                 if (error)
1537                         *error = TBM_ERROR_INVALID_OPERATION;
1538                 return NULL;
1539         }
1540         gem = arg.handle;
1541
1542         name = _get_name(bufmgr_vc4->fd, gem);
1543         if (!name) {
1544                 TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
1545                                gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1546                 if (error)
1547                         *error = TBM_ERROR_INVALID_OPERATION;
1548                 return NULL;
1549         }
1550
1551         ret = drmHashLookup(bufmgr_vc4->hashBos, name, (void **)&bo_vc4);
1552         if (ret == 0) {
1553                 if (gem == bo_vc4->gem) {
1554                         if (error)
1555                                 *error = TBM_ERROR_NONE;
1556                         return bo_vc4;
1557                 }
1558         }
1559
1560         /* Determine size of bo_vc4.  The fd-to-handle ioctl really should
1561          * return the size, but it doesn't.  If we have kernel 3.12 or
1562          * later, we can lseek on the prime fd to get the size.  Older
1563          * kernels will just fail, in which case we fall back to the
1564          * provided (estimated or guess size).
1565          */
1566         unsigned int real_size = -1;
1567         struct drm_gem_open open_arg = {0, };
1568
1569         real_size = lseek(key, 0, SEEK_END);
1570
1571         open_arg.name = name;
1572         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
1573                 TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
1574                                gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1575                 if (error)
1576                         *error = TBM_ERROR_INVALID_OPERATION;
1577                 return NULL;
1578         }
1579
1580         /* Free gem handle to avoid a memory leak*/
1581         struct drm_gem_close close_arg = {0, };
1582         memset(&close_arg, 0, sizeof(close_arg));
1583         close_arg.handle = open_arg.handle;
1584         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_CLOSE, &close_arg)) {
1585                 TBM_ERR("Cannot close gem_handle.\n",
1586                               strerror_r(errno, buf, STRERR_BUFSIZE));
1587                 if (error)
1588                         *error = TBM_ERROR_INVALID_OPERATION;
1589                 return NULL;
1590         }
1591
1592         if (real_size == -1)
1593                 real_size = open_arg.size;
1594
1595         bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1596         if (!bo_vc4) {
1597                 TBM_ERR("bo_vc4:%p fail to allocate the bo_vc4\n", bo_vc4);
1598                 if (error)
1599                         *error = TBM_ERROR_OUT_OF_MEMORY;
1600                 return NULL;
1601         }
1602         bo_vc4->bufmgr_vc4 = bufmgr_vc4;
1603
1604         bo_vc4->fd = bufmgr_vc4->fd;
1605         bo_vc4->gem = gem;
1606         bo_vc4->size = real_size;
1607         bo_vc4->name = name;
1608         bo_vc4->flags_tbm = 0;
1609
1610 #ifdef VC4_TILED_FORMAT
1611         struct drm_vc4_get_tiling get_tiling = {
1612                 .handle = bo_vc4->gem,
1613         };
1614         drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
1615
1616         if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
1617                 bo_vc4->flags_tbm |= TBM_BO_TILED;
1618 #endif
1619
1620         if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
1621                 TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
1622                 free(bo_vc4);
1623                 if (error)
1624                         *error = TBM_ERROR_INVALID_OPERATION;
1625                 return NULL;
1626         }
1627
1628         /* add bo_vc4 to hash */
1629         if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
1630                 TBM_ERR("bo_vc4:%p Cannot insert bo_vc4 to Hash(%d) from gem:%d, fd:%d\n",
1631                                bo_vc4, bo_vc4->name, gem, key);
1632
1633         TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
1634             bo_vc4,
1635             bo_vc4->gem, bo_vc4->name,
1636             bo_vc4->dmabuf,
1637             key,
1638             bo_vc4->flags_tbm,
1639             bo_vc4->size);
1640
1641         if (error)
1642                 *error = TBM_ERROR_NONE;
1643
1644         return (tbm_backend_bo_data *)bo_vc4;
1645 }
1646
1647 static tbm_backend_bo_data *
1648 tbm_vc4_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error)
1649 {
1650         tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1651         tbm_bo_vc4 bo_vc4;
1652         int ret;
1653
1654         if (bufmgr_vc4 == NULL) {
1655                 TBM_ERR("bufmgr_data is null\n");
1656                 if (error)
1657                         *error = TBM_ERROR_INVALID_PARAMETER;
1658                 return NULL;
1659         }
1660
1661         ret = drmHashLookup(bufmgr_vc4->hashBos, key, (void **)&bo_vc4);
1662         if (ret == 0) {
1663                 if (error)
1664                         *error = TBM_ERROR_NONE;
1665                 return (tbm_backend_bo_data *)bo_vc4;
1666         }
1667
1668         struct drm_gem_open arg = {0, };
1669
1670         arg.name = key;
1671         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1672                 TBM_ERR("Cannot open gem name=%d\n", key);
1673                 if (error)
1674                         *error = TBM_ERROR_INVALID_OPERATION;
1675                 return NULL;
1676         }
1677
1678         bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1679         if (!bo_vc4) {
1680                 TBM_ERR("fail to allocate the bo_vc4 private\n");
1681                 if (error)
1682                         *error = TBM_ERROR_OUT_OF_MEMORY;
1683                 return NULL;
1684         }
1685         bo_vc4->bufmgr_vc4 = bufmgr_vc4;
1686
1687         bo_vc4->fd = bufmgr_vc4->fd;
1688         bo_vc4->gem = arg.handle;
1689         bo_vc4->size = arg.size;
1690         bo_vc4->name = key;
1691         bo_vc4->flags_tbm = 0;
1692
1693 #ifdef VC4_TILED_FORMAT
1694         struct drm_vc4_get_tiling get_tiling = {
1695                 .handle = bo_vc4->gem,
1696         };
1697         drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
1698
1699         if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
1700                 bo_vc4->flags_tbm |= TBM_BO_TILED;
1701 #endif
1702
1703         if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
1704                 TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
1705                 free(bo_vc4);
1706                 if (error)
1707                         *error = TBM_ERROR_INVALID_OPERATION;
1708                 return NULL;
1709         }
1710
1711         if (!bo_vc4->dmabuf) {
1712                 struct drm_prime_handle arg = {0, };
1713
1714                 arg.handle = bo_vc4->gem;
1715                 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1716                         TBM_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_vc4->gem);
1717                         if (error)
1718                                 *error = TBM_ERROR_INVALID_OPERATION;
1719                         free(bo_vc4);
1720                         return NULL;
1721                 }
1722                 bo_vc4->dmabuf = arg.fd;
1723         }
1724
1725         /* add bo_vc4 to hash */
1726         if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
1727                 TBM_ERR("Cannot insert bo_vc4 to Hash(%d)\n", bo_vc4->name);
1728
1729         TBM_DBG("    bo_vc4:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1730             bo_vc4,
1731             bo_vc4->gem, bo_vc4->name,
1732             bo_vc4->dmabuf,
1733             bo_vc4->flags_tbm,
1734             bo_vc4->size);
1735
1736         if (error)
1737                 *error = TBM_ERROR_NONE;
1738
1739         return (tbm_backend_bo_data *)bo_vc4;
1740 }
1741
1742 static void
1743 tbm_vc4_bo_free(tbm_backend_bo_data *bo_data)
1744 {
1745         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1746         tbm_bo_vc4 temp;
1747         tbm_bufmgr_vc4 bufmgr_vc4;
1748         char buf[STRERR_BUFSIZE];
1749         int ret;
1750
1751         if (!bo_data)
1752                 return;
1753
1754         bufmgr_vc4 = bo_vc4->bufmgr_vc4;
1755         if (!bufmgr_vc4)
1756                 return;
1757
1758         TBM_DBG("      bo_vc4:%p, gem:%d(%d), fd:%d, size:%d\n",
1759             bo_vc4,
1760             bo_vc4->gem, bo_vc4->name,
1761             bo_vc4->dmabuf,
1762             bo_vc4->size);
1763
1764         if (bo_vc4->pBase) {
1765                 if (munmap(bo_vc4->pBase, bo_vc4->size) == -1) {
1766                         TBM_ERR("bo_vc4:%p fail to munmap(%s)\n",
1767                                        bo_vc4, strerror_r(errno, buf, STRERR_BUFSIZE));
1768                 }
1769         }
1770
1771         /* close dmabuf */
1772         if (bo_vc4->dmabuf) {
1773                 close(bo_vc4->dmabuf);
1774                 bo_vc4->dmabuf = 0;
1775         }
1776
1777         /* delete bo_vc4 from hash */
1778         ret = drmHashLookup(bufmgr_vc4->hashBos, bo_vc4->name, (void **)&temp);
1779         if (ret == 0)
1780                 drmHashDelete(bufmgr_vc4->hashBos, bo_vc4->name);
1781         else
1782                 TBM_ERR("Cannot find bo_vc4 to Hash(%d), ret=%d\n", bo_vc4->name, ret);
1783
1784         if (temp != bo_vc4)
1785                 TBM_ERR("hashBos probably has several BOs with same name!!!\n");
1786
1787         _bo_destroy_cache_state(bufmgr_vc4, bo_vc4);
1788
1789         /* Free gem handle */
1790         struct drm_gem_close arg = {0, };
1791
1792         memset(&arg, 0, sizeof(arg));
1793         arg.handle = bo_vc4->gem;
1794         if (drmIoctl(bo_vc4->fd, DRM_IOCTL_GEM_CLOSE, &arg))
1795                 TBM_ERR("bo_vc4:%p fail to gem close.(%s)\n",
1796                                bo_vc4, strerror_r(errno, buf, STRERR_BUFSIZE));
1797
1798         free(bo_vc4);
1799 }
1800
1801 static int
1802 tbm_vc4_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1803 {
1804         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1805
1806         if (!bo_vc4) {
1807                 if (error)
1808                         *error = TBM_ERROR_INVALID_PARAMETER;
1809                 return 0;
1810         }
1811
1812         if (error)
1813                 *error = TBM_ERROR_NONE;
1814
1815         return bo_vc4->size;
1816 }
1817
1818 static tbm_bo_memory_type
1819 tbm_vc4_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1820 {
1821         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1822
1823         if (!bo_vc4) {
1824                 if (error)
1825                         *error = TBM_ERROR_INVALID_PARAMETER;
1826                 return TBM_BO_DEFAULT;
1827         }
1828
1829         if (error)
1830                 *error = TBM_ERROR_NONE;
1831
1832         return bo_vc4->flags_tbm;
1833 }
1834
1835 static tbm_bo_handle
1836 tbm_vc4_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error)
1837 {
1838         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1839         tbm_bo_handle bo_handle;
1840
1841         if (!bo_vc4) {
1842                 if (error)
1843                         *error = TBM_ERROR_INVALID_PARAMETER;
1844                 return (tbm_bo_handle) NULL;
1845         }
1846
1847         if (!bo_vc4->gem) {
1848                 TBM_ERR("Cannot map gem=%d\n", bo_vc4->gem);
1849                 if (error)
1850                         *error = TBM_ERROR_INVALID_PARAMETER;
1851                 return (tbm_bo_handle) NULL;
1852         }
1853
1854         TBM_DBG("bo_vc4:%p, gem:%d(%d), fd:%d, flags:%d, size:%d, %s\n",
1855             bo_vc4,
1856             bo_vc4->gem, bo_vc4->name,
1857             bo_vc4->dmabuf,
1858             bo_vc4->flags_tbm,
1859             bo_vc4->size,
1860             STR_DEVICE[device]);
1861
1862         /*Get mapped bo_handle*/
1863         bo_handle = _vc4_bo_handle(bo_vc4, device);
1864         if (bo_handle.ptr == NULL) {
1865                 TBM_ERR("Cannot get handle: gem:%d, device:%d\n",
1866                         bo_vc4->gem, device);
1867                 if (error)
1868                         *error = TBM_ERROR_INVALID_OPERATION;
1869                 return (tbm_bo_handle) NULL;
1870         }
1871
1872         if (error)
1873                 *error = TBM_ERROR_NONE;
1874
1875         return bo_handle;
1876 }
1877
1878 static tbm_bo_handle
1879 tbm_vc4_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1880                                 tbm_bo_access_option opt, tbm_error_e *error)
1881 {
1882         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1883         tbm_bo_handle bo_handle;
1884         tbm_bufmgr_vc4 bufmgr_vc4;
1885
1886         if (!bo_vc4) {
1887                 if (error)
1888                         *error = TBM_ERROR_INVALID_PARAMETER;
1889                 return (tbm_bo_handle) NULL;
1890         }
1891
1892         bufmgr_vc4 = bo_vc4->bufmgr_vc4;
1893         if (!bufmgr_vc4) {
1894                 if (error)
1895                         *error = TBM_ERROR_INVALID_PARAMETER;
1896                 return (tbm_bo_handle) NULL;
1897         }
1898
1899         if (!bo_vc4->gem) {
1900                 TBM_ERR("Cannot map gem=%d\n", bo_vc4->gem);
1901                 if (error)
1902                         *error = TBM_ERROR_INVALID_PARAMETER;
1903                 return (tbm_bo_handle) NULL;
1904         }
1905
1906         TBM_DBG("       bo_vc4:%p, gem:%d(%d), fd:%d, %s, %s\n",
1907             bo_vc4,
1908             bo_vc4->gem, bo_vc4->name,
1909             bo_vc4->dmabuf,
1910             STR_DEVICE[device],
1911             STR_OPT[opt]);
1912
1913         /*Get mapped bo_handle*/
1914         bo_handle = _vc4_bo_handle(bo_vc4, device);
1915         if (bo_handle.ptr == NULL) {
1916                 TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1917                                bo_vc4->gem, device, opt);
1918                 if (error)
1919                         *error = TBM_ERROR_INVALID_OPERATION;
1920                 return (tbm_bo_handle) NULL;
1921         }
1922
1923         if (bo_vc4->map_cnt == 0)
1924                 _bo_set_cache_state(bufmgr_vc4, bo_vc4, device, opt);
1925
1926         bo_vc4->last_map_device = device;
1927
1928         bo_vc4->map_cnt++;
1929
1930         if (error)
1931                 *error = TBM_ERROR_NONE;
1932
1933         return bo_handle;
1934 }
1935
1936 static tbm_error_e
1937 tbm_vc4_bo_unmap(tbm_backend_bo_data *bo_data)
1938 {
1939         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1940         tbm_bufmgr_vc4 bufmgr_vc4;
1941
1942         if (!bo_vc4)
1943                 return TBM_ERROR_INVALID_PARAMETER;
1944
1945         bufmgr_vc4 = bo_vc4->bufmgr_vc4;
1946         if (!bufmgr_vc4)
1947                 return TBM_ERROR_INVALID_PARAMETER;
1948
1949         if (!bo_vc4->gem)
1950                 return TBM_ERROR_INVALID_PARAMETER;
1951
1952         bo_vc4->map_cnt--;
1953
1954         if (bo_vc4->map_cnt == 0)
1955                 _bo_save_cache_state(bufmgr_vc4, bo_vc4);
1956
1957 #ifdef ENABLE_CACHECRTL
1958         if (bo_vc4->last_map_device == TBM_DEVICE_CPU)
1959                 _vc4_cache_flush(bufmgr_vc4, bo_vc4, TBM_VC4_CACHE_FLUSH_ALL);
1960 #endif
1961
1962         bo_vc4->last_map_device = -1;
1963
1964         TBM_DBG("     bo_vc4:%p, gem:%d(%d), fd:%d\n",
1965             bo_vc4,
1966             bo_vc4->gem, bo_vc4->name,
1967             bo_vc4->dmabuf);
1968
1969         return TBM_ERROR_NONE;
1970 }
1971
1972 static tbm_error_e
1973 tbm_vc4_bo_lock(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1974                                 tbm_bo_access_option opt)
1975 {
1976 #ifndef ALWAYS_BACKEND_CTRL
1977         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1978         tbm_bufmgr_vc4 bufmgr_vc4;
1979         struct dma_buf_fence fence;
1980         struct flock filelock;
1981         int ret = 0;
1982         char buf[STRERR_BUFSIZE];
1983
1984         if (!bo_vc4)
1985                 return TBM_ERROR_INVALID_PARAMETER;
1986
1987         bufmgr_vc4 = bo_vc4->bufmgr_vc4;
1988         if (!bufmgr_vc4)
1989                 return TBM_ERROR_INVALID_PARAMETER;
1990
1991         if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1992                 TBM_DBG("Not support device type,\n");
1993                 return TBM_ERROR_INVALID_OPERATION;
1994         }
1995
1996         memset(&fence, 0, sizeof(struct dma_buf_fence));
1997
1998         /* Check if the given type is valid or not. */
1999         if (opt & TBM_OPTION_WRITE) {
2000                 if (device == TBM_DEVICE_3D)
2001                         fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
2002         } else if (opt & TBM_OPTION_READ) {
2003                 if (device == TBM_DEVICE_3D)
2004                         fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
2005         } else {
2006                 TBM_ERR("Invalid argument\n");
2007                 return TBM_ERROR_INVALID_PARAMETER;
2008         }
2009
2010         /* Check if the tbm manager supports dma fence or not. */
2011         if (!bufmgr_vc4->use_dma_fence) {
2012                 TBM_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
2013                 return TBM_ERROR_INVALID_OPERATION;
2014
2015         }
2016
2017         if (device == TBM_DEVICE_3D) {
2018                 ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
2019                 if (ret < 0) {
2020                         TBM_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
2021                         return TBM_ERROR_INVALID_OPERATION;
2022                 }
2023         } else {
2024                 if (opt & TBM_OPTION_WRITE)
2025                         filelock.l_type = F_WRLCK;
2026                 else
2027                         filelock.l_type = F_RDLCK;
2028
2029                 filelock.l_whence = SEEK_CUR;
2030                 filelock.l_start = 0;
2031                 filelock.l_len = 0;
2032
2033                 if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
2034                         return TBM_ERROR_INVALID_OPERATION;
2035         }
2036
2037         pthread_mutex_lock(&bo_vc4->mutex);
2038
2039         if (device == TBM_DEVICE_3D) {
2040                 int i;
2041
2042                 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
2043                         if (bo_vc4->dma_fence[i].ctx == 0) {
2044                                 bo_vc4->dma_fence[i].type = fence.type;
2045                                 bo_vc4->dma_fence[i].ctx = fence.ctx;
2046                                 break;
2047                         }
2048                 }
2049
2050                 if (i == DMA_FENCE_LIST_MAX) {
2051                         /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
2052                         TBM_ERR("fence list is full\n");
2053                 }
2054         }
2055
2056         pthread_mutex_unlock(&bo_vc4->mutex);
2057
2058         TBM_DBG("DMABUF_IOCTL_GET_FENCE! bo_vc4:%p, gem:%d(%d), fd:%ds\n",
2059             bo_vc4,
2060             bo_vc4->gem, bo_vc4->name,
2061             bo_vc4->dmabuf);
2062 #endif /* ALWAYS_BACKEND_CTRL */
2063
2064         return TBM_ERROR_NONE;
2065 }
2066
2067 static tbm_error_e
2068 tbm_vc4_bo_unlock(tbm_backend_bo_data *bo_data)
2069 {
2070 #ifndef ALWAYS_BACKEND_CTRL
2071         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
2072         struct dma_buf_fence fence;
2073         struct flock filelock;
2074         unsigned int dma_type = 0;
2075         int ret = 0;
2076         char buf[STRERR_BUFSIZE];
2077
2078         bufmgr_vc4 = bo_vc4->bufmgr_vc4;
2079         if (!bufmgr_vc4)
2080                 return TBM_ERROR_INVALID_PARAMETER;
2081
2082         if (bo_vc4->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
2083                 dma_type = 1;
2084
2085         if (!bo_vc4->dma_fence[0].ctx && dma_type) {
2086                 TBM_DBG("FENCE not support or ignored,\n");
2087                 return TBM_ERROR_INVALID_OPERATION;
2088         }
2089
2090         if (!bo_vc4->dma_fence[0].ctx && dma_type) {
2091                 TBM_DBG("device type is not 3D/CPU,\n");
2092                 return TBM_ERROR_INVALID_OPERATION;
2093         }
2094
2095         pthread_mutex_lock(&bo_vc4->mutex);
2096
2097         if (dma_type) {
2098                 fence.type = bo_vc4->dma_fence[0].type;
2099                 fence.ctx = bo_vc4->dma_fence[0].ctx;
2100                 int i;
2101
2102                 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
2103                         bo_vc4->dma_fence[i - 1].type = bo_vc4->dma_fence[i].type;
2104                         bo_vc4->dma_fence[i - 1].ctx = bo_vc4->dma_fence[i].ctx;
2105                 }
2106                 bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
2107                 bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
2108         }
2109
2110         pthread_mutex_unlock(&bo_vc4->mutex);
2111
2112         if (dma_type) {
2113                 ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
2114                 if (ret < 0) {
2115                         TBM_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
2116                         return TBM_ERROR_INVALID_OPERATION;
2117                 }
2118         } else {
2119                 filelock.l_type = F_UNLCK;
2120                 filelock.l_whence = SEEK_CUR;
2121                 filelock.l_start = 0;
2122                 filelock.l_len = 0;
2123
2124                 if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
2125                         return TBM_ERROR_INVALID_OPERATION;
2126         }
2127
2128         TBM_DBG("DMABUF_IOCTL_PUT_FENCE! bo_vc4:%p, gem:%d(%d), fd:%ds\n",
2129             bo_vc4,
2130             bo_vc4->gem, bo_vc4->name,
2131             bo_vc4->dmabuf);
2132 #endif /* ALWAYS_BACKEND_CTRL */
2133
2134         return TBM_ERROR_NONE;
2135 }
2136
2137 static tbm_fd
2138 tbm_vc4_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error)
2139 {
2140         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
2141         int ret;
2142         char buf[STRERR_BUFSIZE];
2143
2144         if (!bo_vc4) {
2145                 if (error)
2146                         *error = TBM_ERROR_INVALID_PARAMETER;
2147                 return -1;
2148         }
2149
2150         struct drm_prime_handle arg = {0, };
2151
2152         arg.handle = bo_vc4->gem;
2153         ret = drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
2154         if (ret) {
2155                 TBM_ERR("bo_vc4:%p Cannot dmabuf=%d (%s)\n",
2156                                bo_vc4, bo_vc4->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
2157                 if (error)
2158                         *error = TBM_ERROR_INVALID_OPERATION;
2159                 return (tbm_fd) ret;
2160         }
2161
2162         TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
2163             bo_vc4,
2164             bo_vc4->gem, bo_vc4->name,
2165             bo_vc4->dmabuf,
2166             arg.fd,
2167             bo_vc4->flags_tbm,
2168             bo_vc4->size);
2169
2170         if (error)
2171                 *error = TBM_ERROR_NONE;
2172
2173         return (tbm_fd)arg.fd;
2174 }
2175
2176 static tbm_key
2177 tbm_vc4_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error)
2178 {
2179         tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
2180
2181         if (!bo_vc4) {
2182                 if (error)
2183                         *error = TBM_ERROR_INVALID_PARAMETER;
2184                 return 0;
2185         }
2186
2187         if (!bo_vc4->name) {
2188                 bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
2189                 if (!bo_vc4->name) {
2190                         TBM_ERR("error Cannot get name\n");
2191                         if (error)
2192                                 *error = TBM_ERROR_INVALID_PARAMETER;
2193                         return 0;
2194                 }
2195         }
2196
2197         TBM_DBG("    bo_vc4:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
2198             bo_vc4,
2199             bo_vc4->gem, bo_vc4->name,
2200             bo_vc4->dmabuf,
2201             bo_vc4->flags_tbm,
2202             bo_vc4->size);
2203
2204         if (error)
2205                 *error = TBM_ERROR_NONE;
2206
2207         return (tbm_key)bo_vc4->name;
2208 }
2209
2210 static void
2211 tbm_vc4_deinit(tbm_backend_bufmgr_data *bufmgr_data)
2212 {
2213         tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
2214         tbm_bufmgr bufmgr;
2215         tbm_error_e error;
2216         unsigned long key;
2217         void *value;
2218
2219         TBM_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
2220
2221         bufmgr = bufmgr_vc4->bufmgr;
2222
2223         tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_vc4->bufmgr_func);
2224         tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_vc4->bo_func);
2225
2226         if (bufmgr_vc4->hashBos) {
2227                 while (drmHashFirst(bufmgr_vc4->hashBos, &key, &value) > 0) {
2228                         free(value);
2229                         drmHashDelete(bufmgr_vc4->hashBos, key);
2230                 }
2231
2232                 drmHashDestroy(bufmgr_vc4->hashBos);
2233                 bufmgr_vc4->hashBos = NULL;
2234         }
2235
2236         _bufmgr_deinit_cache_state(bufmgr_vc4);
2237
2238         if (bufmgr_vc4->bind_display)
2239                 tbm_drm_helper_wl_auth_server_deinit();
2240
2241         if (bufmgr_vc4->device_name)
2242                 free(bufmgr_vc4->device_name);
2243
2244         if (tbm_backend_bufmgr_query_display_server(bufmgr, &error))
2245                 tbm_drm_helper_unset_tbm_master_fd();
2246         else
2247                 tbm_drm_helper_unset_fd();
2248
2249         close(bufmgr_vc4->fd);
2250
2251         free(bufmgr_vc4);
2252 }
2253
2254 static tbm_backend_bufmgr_data *
2255 tbm_vc4_init(tbm_bufmgr bufmgr, tbm_error_e *error)
2256 {
2257         tbm_bufmgr_vc4 bufmgr_vc4 = NULL;
2258         tbm_backend_bufmgr_func *bufmgr_func = NULL;
2259         tbm_backend_bo_func *bo_func = NULL;
2260         int fp;
2261         tbm_error_e err;
2262
2263         if (!bufmgr) {
2264                 TBM_ERR("bufmgr is null.\n");
2265                 if (error)
2266                         *error = TBM_ERROR_INVALID_PARAMETER;
2267                 return NULL;
2268         }
2269
2270         bufmgr_vc4 = calloc(1, sizeof(struct _tbm_bufmgr_vc4));
2271         if (!bufmgr_vc4) {
2272                 TBM_ERR("fail to alloc bufmgr_vc4!\n");
2273                 if (error)
2274                         *error = TBM_ERROR_OUT_OF_MEMORY;
2275                 return NULL;
2276         }
2277
2278         if (tbm_backend_bufmgr_query_display_server(bufmgr, &err)) {
2279                 bufmgr_vc4->fd = tbm_drm_helper_get_master_fd();
2280                 if (bufmgr_vc4->fd < 0) {
2281                         bufmgr_vc4->fd = _tbm_vc4_open_drm();
2282                         if (bufmgr_vc4->fd < 0) {
2283                                 TBM_ERR("fail to open drm!\n");
2284                                 if (error)
2285                                         *error = TBM_ERROR_INVALID_OPERATION;
2286                                 goto fail_open_drm;
2287                         }
2288                 }
2289
2290                 tbm_drm_helper_set_tbm_master_fd(bufmgr_vc4->fd);
2291
2292                 bufmgr_vc4->device_name = drmGetDeviceNameFromFd(bufmgr_vc4->fd);
2293                 if (!bufmgr_vc4->device_name) {
2294                         TBM_ERR("fail to get device name!\n");
2295                         tbm_drm_helper_unset_tbm_master_fd();
2296                         if (error)
2297                                 *error = TBM_ERROR_INVALID_OPERATION;
2298                         goto fail_get_device_name;
2299                 }
2300                 tbm_drm_helper_set_fd(bufmgr_vc4->fd);
2301         } else {
2302                 if (_check_render_node()) {
2303                         bufmgr_vc4->fd = _get_render_node();//TODO
2304                         if (bufmgr_vc4->fd < 0) {
2305                                 TBM_ERR("fail to get render node\n");
2306                                 if (error)
2307                                         *error = TBM_ERROR_INVALID_OPERATION;
2308                                 goto fail_get_render_node;
2309                         }
2310                         TBM_DBG("Use render node:%d\n", bufmgr_vc4->fd);
2311                 } else {
2312                         if (!tbm_drm_helper_get_auth_info(&(bufmgr_vc4->fd), &(bufmgr_vc4->device_name), NULL)) {
2313                                 TBM_ERR("fail to get auth drm info!\n");
2314                                 if (error)
2315                                         *error = TBM_ERROR_INVALID_OPERATION;
2316                                 goto fail_get_auth_info;
2317                         }
2318                         tbm_drm_helper_set_fd(bufmgr_vc4->fd);
2319                 }
2320         }
2321
2322         //Check if the tbm manager supports dma fence or not.
2323         fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2324         if (fp != -1) {
2325                 char buf[1];
2326                 int length = read(fp, buf, 1);
2327
2328                 if (length == 1 && buf[0] == '1')
2329                         bufmgr_vc4->use_dma_fence = 1;
2330
2331                 close(fp);
2332         }
2333
2334         if (!_bufmgr_init_cache_state(bufmgr_vc4)) {
2335                 TBM_ERR("fail to init bufmgr cache state\n");
2336                 goto fail_init_cache_state;
2337         }
2338
2339         /*Create Hash Table*/
2340         bufmgr_vc4->hashBos = drmHashCreate();
2341
2342         /* alloc and register bufmgr_funcs */
2343         bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err);
2344         if (!bufmgr_func) {
2345                 TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err);
2346                 if (error)
2347                         *error = TBM_ERROR_OUT_OF_MEMORY;
2348                 goto fail_alloc_bufmgr_func;
2349         }
2350
2351         bufmgr_func->bufmgr_get_capabilities = tbm_vc4_bufmgr_get_capabilities;
2352         //if (tbm_backend_bufmgr_query_display_server(bufmgr, &err) && !_check_render_node())
2353         bufmgr_func->bufmgr_bind_native_display = tbm_vc4_bufmgr_bind_native_display;
2354         bufmgr_func->bufmgr_get_supported_formats = tbm_vc4_bufmgr_get_supported_formats;
2355         bufmgr_func->bufmgr_get_plane_data = tbm_vc4_bufmgr_get_plane_data;
2356         bufmgr_func->bufmgr_alloc_bo = tbm_vc4_bufmgr_alloc_bo;
2357         bufmgr_func->bufmgr_alloc_bo_with_format = NULL;
2358         bufmgr_func->bufmgr_import_fd = tbm_vc4_bufmgr_import_fd;
2359         bufmgr_func->bufmgr_import_key = tbm_vc4_bufmgr_import_key;
2360 #ifdef VC4_TILED_FORMAT
2361         bufmgr_func->bufmgr_alloc_bo_with_tiled_format =  tbm_vc4_bufmgr_alloc_bo_with_tiled_format;
2362 #else
2363         bufmgr_func->bufmgr_alloc_bo_with_tiled_format = NULL;
2364 #endif
2365         err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func);
2366         if (err != TBM_ERROR_NONE) {
2367                 TBM_ERR("fail to register bufmgr_func! err(%d)\n", err);
2368                 if (error)
2369                         *error = TBM_ERROR_INVALID_OPERATION;
2370                 goto fail_register_bufmgr_func;
2371         }
2372         bufmgr_vc4->bufmgr_func = bufmgr_func;
2373
2374         /* alloc and register bo_funcs */
2375         bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err);
2376         if (!bo_func) {
2377                 TBM_ERR("fail to alloc bo_func! err(%d)\n", err);
2378                 if (error)
2379                         *error = TBM_ERROR_OUT_OF_MEMORY;
2380                 goto fail_alloc_bo_func;
2381         }
2382
2383         bo_func->bo_free = tbm_vc4_bo_free;
2384         bo_func->bo_get_size = tbm_vc4_bo_get_size;
2385         bo_func->bo_get_memory_types = tbm_vc4_bo_get_memory_type;
2386         bo_func->bo_get_handle = tbm_vc4_bo_get_handle;
2387         bo_func->bo_map = tbm_vc4_bo_map;
2388         bo_func->bo_unmap = tbm_vc4_bo_unmap;
2389         bo_func->bo_lock = tbm_vc4_bo_lock;
2390         bo_func->bo_unlock = tbm_vc4_bo_unlock;
2391         bo_func->bo_export_fd = tbm_vc4_bo_export_fd;
2392         bo_func->bo_export_key = tbm_vc4_bo_export_key;
2393
2394         err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func);
2395         if (err != TBM_ERROR_NONE) {
2396                 TBM_ERR("fail to register bo_func! err(%d)\n", err);
2397                 if (error)
2398                         *error = TBM_ERROR_INVALID_OPERATION;
2399                 goto fail_register_bo_func;
2400         }
2401         bufmgr_vc4->bo_func = bo_func;
2402
2403         TBM_DBG("drm_fd:%d\n", bufmgr_vc4->fd);
2404
2405         if (error)
2406                 *error = TBM_ERROR_NONE;
2407
2408         bufmgr_vc4->bufmgr = bufmgr;
2409
2410         return (tbm_backend_bufmgr_data *)bufmgr_vc4;
2411
2412 fail_register_bo_func:
2413         tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func);
2414 fail_alloc_bo_func:
2415 fail_register_bufmgr_func:
2416         tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func);
2417 fail_alloc_bufmgr_func:
2418         _bufmgr_deinit_cache_state(bufmgr_vc4);
2419         if (bufmgr_vc4->hashBos)
2420                 drmHashDestroy(bufmgr_vc4->hashBos);
2421 fail_init_cache_state:
2422         if (tbm_backend_bufmgr_query_display_server(bufmgr, &err))
2423                 tbm_drm_helper_unset_tbm_master_fd();
2424         else
2425                 tbm_drm_helper_unset_fd();
2426 fail_get_device_name:
2427         close(bufmgr_vc4->fd);
2428 fail_get_auth_info:
2429 fail_get_render_node:
2430 fail_open_drm:
2431         free(bufmgr_vc4);
2432         return NULL;
2433 }
2434
2435 tbm_backend_module tbm_backend_module_data = {
2436         "vc4",
2437         "Samsung",
2438         TBM_BACKEND_ABI_VERSION_3_0,
2439         tbm_vc4_init,
2440         tbm_vc4_deinit
2441 };