ddb26cadf86fcdf40257bd80390374614f30c279
[platform/adaptation/broadcom/libtbm-vc4.git] / src / tbm_bufmgr_vc4.c
1 /**************************************************************************
2
3 libtbm_vc4
4
5 Copyright 2017 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
19 of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 #ifdef HAVE_CONFIG_H
32 #include "config.h"
33 #endif
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <sys/ioctl.h>
40 #include <sys/types.h>
41 #include <unistd.h>
42 #include <sys/mman.h>
43 #include <sys/stat.h>
44 #include <fcntl.h>
45 #include <errno.h>
46 #include <xf86drm.h>
47 #include <tbm_bufmgr.h>
48 #include <tbm_bufmgr_backend.h>
49 #include <vc4_drm.h>
50 #include <pthread.h>
51 #include <tbm_surface.h>
52 #include <tbm_surface_internal.h>
53 #include <tbm_drm_helper.h>
54
55 #include <libudev.h>
56
57 #include "tbm_bufmgr_tgl.h"
58
59 #define DEBUG
60 #define USE_DMAIMPORT
61 #define TBM_COLOR_FORMAT_COUNT 4
62
63 #define VC4_DRM_NAME "vc42837"
64
65 #ifdef DEBUG
66 #define LOG_TAG "TBM_BACKEND"
67 #include <dlog.h>
68 static int bDebug;
69
70 char *target_name()
71 {
72         FILE *f;
73         char *slash;
74         static int initialized = 0;
75         static char app_name[128];
76
77         if (initialized)
78                 return app_name;
79
80         /* get the application name */
81         f = fopen("/proc/self/cmdline", "r");
82
83         if (!f)
84                 return 0;
85
86         memset(app_name, 0x00, sizeof(app_name));
87
88         if (fgets(app_name, 100, f) == NULL) {
89                 fclose(f);
90                 return 0;
91         }
92
93         fclose(f);
94
95         slash = strrchr(app_name, '/');
96         if (slash != NULL)
97                 memmove(app_name, slash + 1, strlen(slash));
98
99         initialized = 1;
100
101         return app_name;
102 }
103
104 #define TBM_VC4_ERROR(fmt, args...)     LOGE("\033[31m"  "[%s] " fmt "\033[0m", target_name(), ##args)
105 #define TBM_VC4_DEBUG(fmt, args...)     {if (bDebug&01) LOGD("[%s] " fmt, target_name(), ##args); }
106 #else
107 #define TBM_VC4_ERROR(...)
108 #define TBM_VC4_DEBUG(...)
109 #endif
110
111 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
112 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
113 #define MAX(a, b) ((a) > (b) ? (a) : (b))
114
115 #ifdef ALIGN_EIGHT
116 #define TBM_SURFACE_ALIGNMENT_PLANE (8)
117 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
118 #else
119 #define TBM_SURFACE_ALIGNMENT_PLANE (16)
120 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (16)
121 #endif
122
123 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
124 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
125
126 #define SZ_1M                                   0x00100000
127 #define S5P_FIMV_MAX_FRAME_SIZE                 (2 * SZ_1M)
128 #define S5P_FIMV_D_ALIGN_PLANE_SIZE             64
129 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW           16
130 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL           16
131 #define S5P_FIMV_DEC_BUF_ALIGN                  (8 * 1024)
132 #define S5P_FIMV_NV12MT_HALIGN                  128
133 #define S5P_FIMV_NV12MT_VALIGN                  64
134
135 /* check condition */
136 #define VC4_RETURN_IF_FAIL(cond) {\
137         if (!(cond)) {\
138                 TBM_VC4_ERROR("[%s] : '%s' failed.\n", __func__, #cond);\
139                 return;\
140         } \
141 }
142
143 #define VC4_RETURN_VAL_IF_FAIL(cond, val) {\
144         if (!(cond)) {\
145                 TBM_VC4_ERROR("[%s] : '%s' failed.\n", __func__, #cond);\
146                 return val;\
147         } \
148 }
149
150 struct dma_buf_info {
151         unsigned long   size;
152         unsigned int    fence_supported;
153         unsigned int    padding;
154 };
155
156 #define DMA_BUF_ACCESS_READ             0x1
157 #define DMA_BUF_ACCESS_WRITE            0x2
158 #define DMA_BUF_ACCESS_DMA              0x4
159 #define DMA_BUF_ACCESS_MAX              0x8
160
161 #define DMA_FENCE_LIST_MAX              5
162
163 struct dma_buf_fence {
164         unsigned long           ctx;
165         unsigned int            type;
166 };
167
168 #define DMABUF_IOCTL_BASE       'F'
169 #define DMABUF_IOWR(nr, type)   _IOWR(DMABUF_IOCTL_BASE, nr, type)
170
171 #define DMABUF_IOCTL_GET_INFO   DMABUF_IOWR(0x00, struct dma_buf_info)
172 #define DMABUF_IOCTL_GET_FENCE  DMABUF_IOWR(0x01, struct dma_buf_fence)
173 #define DMABUF_IOCTL_PUT_FENCE  DMABUF_IOWR(0x02, struct dma_buf_fence)
174
175 /* tgl key values */
176 #define GLOBAL_KEY   ((unsigned int)(-1))
177 /* TBM_CACHE */
178 #define TBM_VC4_CACHE_INV       0x01 /**< cache invalidate  */
179 #define TBM_VC4_CACHE_CLN       0x02 /**< cache clean */
180 #define TBM_VC4_CACHE_ALL       0x10 /**< cache all */
181 #define TBM_VC4_CACHE_FLUSH     (TBM_VC4_CACHE_INV|TBM_VC4_CACHE_CLN) /**< cache flush  */
182 #define TBM_VC4_CACHE_FLUSH_ALL (TBM_VC4_CACHE_FLUSH|TBM_VC4_CACHE_ALL) /**< cache flush all */
183
184 enum {
185         DEVICE_NONE = 0,
186         DEVICE_CA,                                      /* cache aware device */
187         DEVICE_CO                                       /* cache oblivious device */
188 };
189
190 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
191
192 union _tbm_bo_cache_state {
193         unsigned int val;
194         struct {
195                 unsigned int cntFlush:16;       /*Flush all index for sync */
196                 unsigned int isCached:1;
197                 unsigned int isDirtied:2;
198         } data;
199 };
200
201 typedef struct _tbm_bufmgr_vc4 *tbm_bufmgr_vc4;
202 typedef struct _tbm_bo_vc4 *tbm_bo_vc4;
203
204 typedef struct _vc4_private {
205         int ref_count;
206         struct _tbm_bo_vc4 *bo_priv;
207 } PrivGem;
208
209 /* tbm buffor object for vc4 */
210 struct _tbm_bo_vc4 {
211         int fd;
212
213         unsigned int name;    /* FLINK ID */
214
215         unsigned int gem;     /* GEM Handle */
216
217         unsigned int dmabuf;  /* fd for dmabuf */
218
219         void *pBase;          /* virtual address */
220
221         unsigned int size;
222
223         unsigned int flags_tbm; /*not used now*//*currently no values for the flags,but it may be used in future extension*/
224
225         PrivGem *private;
226
227         pthread_mutex_t mutex;
228         struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
229         int device;
230         int opt;
231
232         tbm_bo_cache_state cache_state;
233         unsigned int map_cnt;
234         int last_map_device;
235 };
236
237 /* tbm bufmgr private for vc4 */
238 struct _tbm_bufmgr_vc4 {
239         int fd;
240         int isLocal;
241         void *hashBos;
242
243         int use_dma_fence;
244
245         int tgl_fd;
246
247         char *device_name;
248         void *bind_display;
249 };
250
251 char *STR_DEVICE[] = {
252         "DEF",
253         "CPU",
254         "2D",
255         "3D",
256         "MM"
257 };
258
259 char *STR_OPT[] = {
260         "NONE",
261         "RD",
262         "WR",
263         "RDWR"
264 };
265
266
267 uint32_t tbm_vc4_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
268                                                                                 TBM_FORMAT_ARGB8888,
269                                                                                 TBM_FORMAT_XRGB8888,
270                                                                                 TBM_FORMAT_NV12,
271                                                                                 TBM_FORMAT_YUV420
272                                                                         };
273 #undef  ENABLE_CACHECRTL
274 #ifdef ENABLE_CACHECRTL
275 #ifdef TGL_GET_VERSION
276 static inline int
277 _tgl_get_version(int fd)
278 {
279         struct tgl_ver_data data;
280         int err;
281
282         err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
283         if (err) {
284                 TBM_VC4_ERROR("error(%s) %s:%d\n", strerror(errno));
285                 return 0;
286         }
287
288         TBM_VC4_DEBUG("tgl version is (%u, %u).\n", data.major, data.minor);
289
290         return 1;
291 }
292 #endif
293
294 static inline int
295 _tgl_init(int fd, unsigned int key)
296 {
297         struct tgl_reg_data data;
298         int err;
299
300         data.key = key;
301         data.timeout_ms = 1000;
302
303         err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
304         if (err) {
305                 TBM_VC4_ERROR("error(%s) key:%d\n", strerror(errno), key);
306                 return 0;
307         }
308
309         return 1;
310 }
311
312 static inline int
313 _tgl_destroy(int fd, unsigned int key)
314 {
315         struct tgl_reg_data data;
316         int err;
317
318         data.key = key;
319         err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
320         if (err) {
321                 TBM_VC4_ERROR("error(%s) key:%d\n", strerror(errno), key);
322                 return 0;
323         }
324
325         return 1;
326 }
327
328 static inline int
329 _tgl_lock(int fd, unsigned int key, int opt)
330 {
331         struct tgl_lock_data data;
332         enum tgl_type_data tgl_type;
333         int err;
334
335         switch (opt) {
336         case TBM_OPTION_READ:
337                 tgl_type = TGL_TYPE_READ;
338                 break;
339         case TBM_OPTION_WRITE:
340                 tgl_type = TGL_TYPE_WRITE;
341                 break;
342         default:
343                 tgl_type = TGL_TYPE_NONE;
344                 break;
345         }
346
347         data.key = key;
348         data.type = tgl_type;
349
350         err = ioctl(fd, TGL_IOCTL_LOCK, &data);
351         if (err) {
352                 TBM_VC4_ERROR("error(%s) key:%d opt:%d\n",
353                         strerror(errno), key, opt);
354                 return 0;
355         }
356
357         return 1;
358 }
359
360 static inline int
361 _tgl_unlock(int fd, unsigned int key)
362 {
363         struct tgl_lock_data data;
364         int err;
365
366         data.key = key;
367         data.type = TGL_TYPE_NONE;
368
369         err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
370         if (err) {
371                 TBM_VC4_ERROR("error(%s) key:%d\n",
372                         strerror(errno), key);
373                 return 0;
374         }
375
376         return 1;
377 }
378
379 static inline int
380 _tgl_set_data(int fd, unsigned int key, unsigned int val)
381 {
382         struct tgl_usr_data data;
383         int err;
384
385         data.key = key;
386         data.data1 = val;
387
388         err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
389         if (err) {
390                 TBM_VC4_ERROR("error(%s) key:%d\n",
391                         strerror(errno), key);
392                 return 0;
393         }
394
395         return 1;
396 }
397
398 static inline unsigned int
399 _tgl_get_data(int fd, unsigned int key)
400 {
401         struct tgl_usr_data data = { 0, };
402         int err;
403
404         data.key = key;
405
406         err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
407         if (err) {
408                 TBM_VC4_ERROR("error(%s) key:%d\n",
409                         strerror(errno), key);
410                 return 0;
411         }
412
413         return data.data1;
414 }
415
416 static int
417 _vc4_cache_flush(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int flags)
418 {
419         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
420
421         /* cache flush is managed by kernel side when using dma-fence. */
422         if (bufmgr_vc4->use_dma_fence)
423                 return 1;
424
425         struct drm_vc4_gem_cache_op cache_op = {0, };
426         int ret;
427
428         /* if bo_vc4 is null, do cache_flush_all */
429         if (bo_vc4) {
430                 cache_op.flags = 0;
431                 cache_op.usr_addr = (uint64_t)((uint32_t)bo_vc4->pBase);
432                 cache_op.size = bo_vc4->size;
433         } else {
434                 flags = TBM_VC4_CACHE_FLUSH_ALL;
435                 cache_op.flags = 0;
436                 cache_op.usr_addr = 0;
437                 cache_op.size = 0;
438         }
439
440         if (flags & TBM_VC4_CACHE_INV) {
441                 if (flags & TBM_VC4_CACHE_ALL)
442                         cache_op.flags |= VC4_DRM_CACHE_INV_ALL;
443                 else
444                         cache_op.flags |= VC4_DRM_CACHE_INV_RANGE;
445         }
446
447         if (flags & TBM_VC4_CACHE_CLN) {
448                 if (flags & TBM_VC4_CACHE_ALL)
449                         cache_op.flags |= VC4_DRM_CACHE_CLN_ALL;
450                 else
451                         cache_op.flags |= VC4_DRM_CACHE_CLN_RANGE;
452         }
453
454         if (flags & TBM_VC4_CACHE_ALL)
455                 cache_op.flags |= VC4_DRM_ALL_CACHES_CORES;
456
457         ret = drmCommandWriteRead(bufmgr_vc4->fd, DRM_VC4_GEM_CACHE_OP, &cache_op,
458                                   sizeof(cache_op));
459         if (ret) {
460                 TBM_VC4_ERROR("fail to flush the cache.\n");
461                 return 0;
462         }
463
464         return 1;
465 }
466 #endif
467
468 static int
469 _bo_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int import)
470 {
471 #ifdef ENABLE_CACHECRTL
472         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
473         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
474
475         if (bufmgr_vc4->use_dma_fence)
476                 return 1;
477
478         _tgl_init(bufmgr_vc4->tgl_fd, bo_vc4->name);
479
480         tbm_bo_cache_state cache_state;
481
482         if (import == 0) {
483                 cache_state.data.isDirtied = DEVICE_NONE;
484                 cache_state.data.isCached = 0;
485                 cache_state.data.cntFlush = 0;
486
487                 _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name, cache_state.val);
488         }
489 #endif
490
491         return 1;
492 }
493
494 static int
495 _bo_set_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int device, int opt)
496 {
497 #ifdef ENABLE_CACHECRTL
498         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
499         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
500
501         if (bufmgr_vc4->use_dma_fence)
502                 return 1;
503
504         char need_flush = 0;
505         unsigned short cntFlush = 0;
506
507         /* get cache state of a bo */
508         bo_vc4->cache_state.val = _tgl_get_data(bufmgr_vc4->tgl_fd,
509                                      bo_vc4->name);
510
511         /* get global cache flush count */
512         cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
513
514         if (device == TBM_DEVICE_CPU) {
515                 if (bo_vc4->cache_state.data.isDirtied == DEVICE_CO &&
516                     bo_vc4->cache_state.data.isCached)
517                         need_flush = TBM_VC4_CACHE_INV;
518
519                 bo_vc4->cache_state.data.isCached = 1;
520                 if (opt & TBM_OPTION_WRITE)
521                         bo_vc4->cache_state.data.isDirtied = DEVICE_CA;
522                 else {
523                         if (bo_vc4->cache_state.data.isDirtied != DEVICE_CA)
524                                 bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
525                 }
526         } else {
527                 if (bo_vc4->cache_state.data.isDirtied == DEVICE_CA &&
528                     bo_vc4->cache_state.data.isCached &&
529                     bo_vc4->cache_state.data.cntFlush == cntFlush)
530                         need_flush = TBM_VC4_CACHE_CLN | TBM_VC4_CACHE_ALL;
531
532                 if (opt & TBM_OPTION_WRITE)
533                         bo_vc4->cache_state.data.isDirtied = DEVICE_CO;
534                 else {
535                         if (bo_vc4->cache_state.data.isDirtied != DEVICE_CO)
536                                 bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
537                 }
538         }
539
540         if (need_flush) {
541                 if (need_flush & TBM_VC4_CACHE_ALL)
542                         _tgl_set_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
543
544                 /* call cache flush */
545                 _vc4_cache_flush(bufmgr_vc4, bo_vc4, need_flush);
546
547                 TBM_VC4_DEBUG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
548                     bo_vc4->cache_state.data.isCached,
549                     bo_vc4->cache_state.data.isDirtied,
550                     need_flush,
551                     cntFlush);
552         }
553 #endif
554
555         return 1;
556 }
557
558 static int
559 _bo_save_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
560 {
561 #ifdef ENABLE_CACHECRTL
562         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
563         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
564
565         if (bufmgr_vc4->use_dma_fence)
566                 return 1;
567
568         unsigned short cntFlush = 0;
569
570         /* get global cache flush count */
571         cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
572
573         /* save global cache flush count */
574         bo_vc4->cache_state.data.cntFlush = cntFlush;
575         _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name,
576                       bo_vc4->cache_state.val);
577 #endif
578
579         return 1;
580 }
581
582 static void
583 _bo_destroy_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
584 {
585 #ifdef ENABLE_CACHECRTL
586         VC4_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
587         VC4_RETURN_IF_FAIL(bo_vc4 != NULL);
588
589         if (bufmgr_vc4->use_dma_fence)
590                 return ;
591
592         _tgl_destroy(bufmgr_vc4->tgl_fd, bo_vc4->name);
593 #endif
594 }
595
596 static int
597 _bufmgr_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
598 {
599 #ifdef ENABLE_CACHECRTL
600         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
601
602         if (bufmgr_vc4->use_dma_fence)
603                 return 1;
604
605         /* open tgl fd for saving cache flush data */
606         bufmgr_vc4->tgl_fd = open(tgl_devfile, O_RDWR);
607
608         if (bufmgr_vc4->tgl_fd < 0) {
609             bufmgr_vc4->tgl_fd = open(tgl_devfile1, O_RDWR);
610             if (bufmgr_vc4->tgl_fd < 0) {
611                     TBM_VC4_ERROR("fail to open global_lock:%s\n",
612                                         tgl_devfile1);
613                     return 0;
614             }
615         }
616
617 #ifdef TGL_GET_VERSION
618         if (!_tgl_get_version(bufmgr_vc4->tgl_fd)) {
619                 TBM_VC4_ERROR("fail to get tgl_version. tgl init failed.\n");
620                 close(bufmgr_sprd->tgl_fd);
621                 return 0;
622         }
623 #endif
624
625         if (!_tgl_init(bufmgr_vc4->tgl_fd, GLOBAL_KEY)) {
626                 TBM_VC4_ERROR("fail to initialize the tgl\n");
627                 close(bufmgr_vc4->tgl_fd);
628                 return 0;
629         }
630 #endif
631
632         return 1;
633 }
634
635 static void
636 _bufmgr_deinit_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
637 {
638 #ifdef ENABLE_CACHECRTL
639         VC4_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
640
641         if (bufmgr_vc4->use_dma_fence)
642                 return;
643
644         if (bufmgr_vc4->tgl_fd >= 0)
645                 close(bufmgr_vc4->tgl_fd);
646 #endif
647 }
648
649 static int
650 _tbm_vc4_open_drm()
651 {
652         int fd = -1;
653
654         fd = drmOpen(VC4_DRM_NAME, NULL);
655         if (fd < 0) {
656                 TBM_VC4_ERROR("fail to open drm.(%s)\n", VC4_DRM_NAME);
657         }
658
659         if (fd < 0) {
660                 struct udev *udev = NULL;
661                 struct udev_enumerate *e = NULL;
662                 struct udev_list_entry *entry = NULL;
663                 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
664                 const char *filepath;
665                 struct stat s;
666                 int ret;
667
668                 TBM_VC4_DEBUG("search drm-device by udev\n");
669
670                 udev = udev_new();
671                 if (!udev) {
672                         TBM_VC4_ERROR("udev_new() failed.\n");
673                         return -1;
674                 }
675
676                 e = udev_enumerate_new(udev);
677                 udev_enumerate_add_match_subsystem(e, "drm");
678                 udev_enumerate_add_match_sysname(e, "card[0-9]*");
679                 udev_enumerate_scan_devices(e);
680
681                 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
682                         device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
683                                                               udev_list_entry_get_name(entry));
684                         device_parent = udev_device_get_parent(device);
685                         /* Not need unref device_parent. device_parent and device have same refcnt */
686                         if (device_parent) {
687                                 if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
688                                         drm_device = device;
689                                         TBM_VC4_DEBUG("[%s] Found render device: '%s' (%s)\n",
690                                             target_name(),
691                                             udev_device_get_syspath(drm_device),
692                                             udev_device_get_sysname(device_parent));
693                                         break;
694                                 }
695                         }
696                         udev_device_unref(device);
697                 }
698
699                 udev_enumerate_unref(e);
700
701                 /* Get device file path. */
702                 filepath = udev_device_get_devnode(drm_device);
703                 if (!filepath) {
704                         TBM_VC4_ERROR("udev_device_get_devnode() failed.\n");
705                         udev_device_unref(drm_device);
706                         udev_unref(udev);
707                         return -1;
708                 }
709
710                 /* Open DRM device file and check validity. */
711                 fd = open(filepath, O_RDWR | O_CLOEXEC);
712                 if (fd < 0) {
713                         TBM_VC4_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
714                         udev_device_unref(drm_device);
715                         udev_unref(udev);
716                         return -1;
717                 }
718
719                 ret = fstat(fd, &s);
720                 if (ret) {
721                         TBM_VC4_ERROR("fstat() failed %s.\n");
722                         close(fd);
723                         udev_device_unref(drm_device);
724                         udev_unref(udev);
725                         return -1;
726                 }
727
728                 udev_device_unref(drm_device);
729                 udev_unref(udev);
730         }
731
732         return fd;
733 }
734
735 static int
736 _check_render_node(void)  //TODO
737 {
738         struct udev *udev = NULL;
739         struct udev_enumerate *e = NULL;
740         struct udev_list_entry *entry = NULL;
741         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
742
743 #ifndef USE_RENDER_NODE
744         return 0;
745 #endif
746
747         udev = udev_new();
748         if (!udev) {
749                 TBM_VC4_ERROR("udev_new() failed.\n");
750                 return -1;
751         }
752
753         e = udev_enumerate_new(udev);
754         udev_enumerate_add_match_subsystem(e, "drm");
755         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
756         udev_enumerate_scan_devices(e);
757
758         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
759                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
760                                                       udev_list_entry_get_name(entry));
761                 device_parent = udev_device_get_parent(device);
762                 /* Not need unref device_parent. device_parent and device have same refcnt */
763                 if (device_parent) {
764                         if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
765                                 drm_device = device;
766                                 TBM_VC4_DEBUG("Found render device: '%s' (%s)\n",
767                                     udev_device_get_syspath(drm_device),
768                                     udev_device_get_sysname(device_parent));
769                                 break;
770                         }
771                 }
772                 udev_device_unref(device);
773         }
774
775         udev_enumerate_unref(e);
776         udev_unref(udev);
777
778         if (!drm_device) {
779                 udev_device_unref(drm_device);
780                 return 0;
781         }
782
783         udev_device_unref(drm_device);
784         return 1;
785 }
786
787 static int
788 _get_render_node(void)//TODO
789 {
790         struct udev *udev = NULL;
791         struct udev_enumerate *e = NULL;
792         struct udev_list_entry *entry = NULL;
793         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
794         const char *filepath;
795         struct stat s;
796         int fd = -1;
797         int ret;
798
799         udev = udev_new();
800         if (!udev) {
801                 TBM_VC4_ERROR("udev_new() failed.\n");
802                 return -1;
803         }
804
805         e = udev_enumerate_new(udev);
806         udev_enumerate_add_match_subsystem(e, "drm");
807         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
808         udev_enumerate_scan_devices(e);
809
810         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
811                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
812                                                       udev_list_entry_get_name(entry));
813                 device_parent = udev_device_get_parent(device);
814                 /* Not need unref device_parent. device_parent and device have same refcnt */
815                 if (device_parent) {
816                         if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
817                                 drm_device = device;
818                                 TBM_VC4_DEBUG("Found render device: '%s' (%s)\n",
819                                     udev_device_get_syspath(drm_device),
820                                     udev_device_get_sysname(device_parent));
821                                 break;
822                         }
823                 }
824                 udev_device_unref(device);
825         }
826
827         udev_enumerate_unref(e);
828
829         /* Get device file path. */
830         filepath = udev_device_get_devnode(drm_device);
831         if (!filepath) {
832                 TBM_VC4_ERROR("udev_device_get_devnode() failed.\n");
833                 udev_device_unref(drm_device);
834                 udev_unref(udev);
835                 return -1;
836         }
837
838         /* Open DRM device file and check validity. */
839         fd = open(filepath, O_RDWR | O_CLOEXEC);
840         if (fd < 0) {
841                 TBM_VC4_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
842                 udev_device_unref(drm_device);
843                 udev_unref(udev);
844                 return -1;
845         }
846
847         ret = fstat(fd, &s);
848         if (ret) {
849                 TBM_VC4_ERROR("fstat() failed %s.\n");
850                 udev_device_unref(drm_device);
851                 udev_unref(udev);
852                 close(fd);
853                 return -1;
854         }
855
856         udev_device_unref(drm_device);
857         udev_unref(udev);
858
859         return fd;
860 }
861
862 static unsigned int
863 _get_name(int fd, unsigned int gem)
864 {
865         struct drm_gem_flink arg = {0,};
866
867         arg.handle = gem;
868         if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
869                 TBM_VC4_ERROR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
870                 return 0;
871         }
872
873         return (unsigned int)arg.name;
874 }
875
876 static tbm_bo_handle
877 _vc4_bo_handle(tbm_bo_vc4 bo_vc4, int device)
878 {
879         tbm_bo_handle bo_handle;
880
881         memset(&bo_handle, 0x0, sizeof(uint64_t));
882
883         switch (device) {
884         case TBM_DEVICE_DEFAULT:
885         case TBM_DEVICE_2D:
886                 bo_handle.u32 = (uint32_t)bo_vc4->gem;
887                 break;
888         case TBM_DEVICE_CPU:
889                 if (!bo_vc4->pBase) {
890                         void *map = NULL;
891                         struct drm_vc4_mmap_bo arg = {0, };
892                         arg.handle = bo_vc4->gem;
893                         if (drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_MMAP_BO, &arg)){
894                                 TBM_VC4_ERROR("Cannot map_dumb gem=%d\n", bo_vc4->gem);
895                                 return (tbm_bo_handle) NULL;
896                         }
897
898                         map = mmap(NULL, bo_vc4->size, PROT_READ | PROT_WRITE, MAP_SHARED,
899                                    bo_vc4->fd, arg.offset);
900                         if (map == MAP_FAILED) {
901                                 TBM_VC4_ERROR("Cannot usrptr gem=%d\n", bo_vc4->gem);
902                                 return (tbm_bo_handle) NULL;
903                         }
904                         bo_vc4->pBase = map;
905                 }
906                 bo_handle.ptr = (void *)bo_vc4->pBase;
907                 break;
908         case TBM_DEVICE_3D:
909 #ifdef USE_DMAIMPORT
910                 if (bo_vc4->dmabuf) {
911                         bo_handle.u32 = (uint32_t)bo_vc4->dmabuf;
912                         break;
913                 }
914
915                 if (!bo_vc4->dmabuf) {
916                         struct drm_prime_handle arg = {0, };
917
918                         arg.handle = bo_vc4->gem;
919                         if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
920                                 TBM_VC4_ERROR("Cannot dmabuf=%d\n", bo_vc4->gem);
921                                 return (tbm_bo_handle) NULL;
922                         }
923                         bo_vc4->dmabuf = arg.fd;
924                 }
925
926                 bo_handle.u32 = (uint32_t)bo_vc4->dmabuf;
927 #endif
928                 break;
929         case TBM_DEVICE_MM:
930                 if (!bo_vc4->dmabuf) {
931                         struct drm_prime_handle arg = {0, };
932
933                         arg.handle = bo_vc4->gem;
934                         if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
935                                 TBM_VC4_ERROR("Cannot dmabuf=%d\n", bo_vc4->gem);
936                                 return (tbm_bo_handle) NULL;
937                         }
938                         bo_vc4->dmabuf = arg.fd;
939                 }
940
941                 bo_handle.u32 = (uint32_t)bo_vc4->dmabuf;
942                 break;
943         default:
944                 TBM_VC4_ERROR("Not supported device:%d\n", device);
945                 bo_handle.ptr = (void *) NULL;
946                 break;
947         }
948
949         return bo_handle;
950 }
951
952 static int
953 tbm_vc4_bo_size(tbm_bo bo)
954 {
955         VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
956
957         tbm_bo_vc4 bo_vc4;
958
959         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
960         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
961
962         return bo_vc4->size;
963 }
964
965 static void *
966 tbm_vc4_bo_alloc(tbm_bo bo, int size, int flags)
967 {
968         VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
969
970         tbm_bo_vc4 bo_vc4;
971         tbm_bufmgr_vc4 bufmgr_vc4;
972
973         bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
974         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
975
976         bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
977         if (!bo_vc4) {
978                 TBM_VC4_ERROR("fail to allocate the bo private\n");
979                 return 0;
980         }
981
982         struct drm_vc4_create_bo arg = {0, };
983         arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
984         arg.size = (__u32)size;
985         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)){
986                 TBM_VC4_ERROR("Cannot create bo(flag:%x, size:%d)\n", arg.flags,
987                                (unsigned int)arg.size);
988                 free(bo_vc4);
989                 return 0;
990         }
991
992         bo_vc4->fd = bufmgr_vc4->fd;
993         bo_vc4->gem = (unsigned int)arg.handle;
994         bo_vc4->size = size;
995         bo_vc4->flags_tbm = flags;
996         bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
997
998         if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 0)) {
999                 TBM_VC4_ERROR("fail init cache state(%d)\n", bo_vc4->name);
1000                 free(bo_vc4);
1001                 return 0;
1002         }
1003
1004         pthread_mutex_init(&bo_vc4->mutex, NULL);
1005
1006         if (bufmgr_vc4->use_dma_fence
1007             && !bo_vc4->dmabuf) {
1008                 struct drm_prime_handle arg = {0, };
1009
1010                 arg.handle = bo_vc4->gem;
1011                 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1012                         TBM_VC4_ERROR("Cannot dmabuf=%d\n", bo_vc4->gem);
1013                         free(bo_vc4);
1014                         return 0;
1015                 }
1016                 bo_vc4->dmabuf = arg.fd;
1017         }
1018
1019         /* add bo to hash */
1020         PrivGem *privGem = calloc(1, sizeof(PrivGem));
1021
1022         if (!privGem) {
1023                 TBM_VC4_ERROR("fail to calloc privGem\n");
1024                 free(bo_vc4);
1025                 return 0;
1026         }
1027
1028         privGem->ref_count = 1;
1029         privGem->bo_priv = bo_vc4;
1030
1031         if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name,
1032                           (void *)privGem) < 0) {
1033                 TBM_VC4_ERROR("Cannot insert bo to Hash(%d)\n", bo_vc4->name);
1034         }
1035
1036         TBM_VC4_DEBUG("     bo:%p, gem:%d(%d), flags:%d, size:%d\n",
1037             bo,
1038             bo_vc4->gem, bo_vc4->name,
1039             flags,
1040             bo_vc4->size);
1041
1042         return (void *)bo_vc4;
1043 }
1044
1045 static void
1046 tbm_vc4_bo_free(tbm_bo bo)
1047 {
1048         tbm_bo_vc4 bo_vc4;
1049         tbm_bufmgr_vc4 bufmgr_vc4;
1050
1051         if (!bo)
1052                 return;
1053
1054         bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1055         VC4_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
1056
1057         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1058         VC4_RETURN_IF_FAIL(bo_vc4 != NULL);
1059
1060         TBM_VC4_DEBUG("      bo:%p, gem:%d(%d), fd:%d, size:%d\n",
1061             bo,
1062             bo_vc4->gem, bo_vc4->name,
1063             bo_vc4->dmabuf,
1064             bo_vc4->size);
1065
1066         if (bo_vc4->pBase) {
1067                 if (munmap(bo_vc4->pBase, bo_vc4->size) == -1) {
1068                         TBM_VC4_ERROR("bo:%p fail to munmap(%s)\n",
1069                                        bo, strerror(errno));
1070                 }
1071         }
1072
1073         /* close dmabuf */
1074         if (bo_vc4->dmabuf) {
1075                 close(bo_vc4->dmabuf);
1076                 bo_vc4->dmabuf = 0;
1077         }
1078
1079         /* delete bo from hash */
1080         PrivGem *privGem = NULL;
1081         int ret;
1082
1083         ret = drmHashLookup(bufmgr_vc4->hashBos, bo_vc4->name,
1084                              (void **)&privGem);
1085         if (ret == 0) {
1086                 privGem->ref_count--;
1087                 if (privGem->ref_count == 0) {
1088                         drmHashDelete(bufmgr_vc4->hashBos, bo_vc4->name);
1089                         free(privGem);
1090                         privGem = NULL;
1091                 }
1092         } else {
1093                 TBM_VC4_ERROR("Cannot find bo to Hash(%d), ret=%d\n",
1094                         bo_vc4->name, ret);
1095         }
1096
1097         _bo_destroy_cache_state(bufmgr_vc4, bo_vc4);
1098
1099         /* Free gem handle */
1100         struct drm_gem_close arg = {0, };
1101
1102         memset(&arg, 0, sizeof(arg));
1103         arg.handle = bo_vc4->gem;
1104         if (drmIoctl(bo_vc4->fd, DRM_IOCTL_GEM_CLOSE, &arg)) {
1105                 TBM_VC4_ERROR("bo:%p fail to gem close.(%s)\n",
1106                                bo, strerror(errno));
1107         }
1108
1109         free(bo_vc4);
1110 }
1111
1112
1113 static void *
1114 tbm_vc4_bo_import(tbm_bo bo, unsigned int key)
1115 {
1116         VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1117
1118         tbm_bufmgr_vc4 bufmgr_vc4;
1119         tbm_bo_vc4 bo_vc4;
1120         PrivGem *privGem = NULL;
1121         int ret;
1122
1123         bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1124         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
1125
1126         ret = drmHashLookup(bufmgr_vc4->hashBos, key, (void **)&privGem);
1127         if (ret == 0)
1128                 return privGem->bo_priv;
1129
1130         struct drm_gem_open arg = {0, };
1131
1132         arg.name = key;
1133         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1134                 TBM_VC4_ERROR("Cannot open gem name=%d\n", key);
1135                 return 0;
1136         }
1137
1138         bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1139         if (!bo_vc4) {
1140                 TBM_VC4_ERROR("fail to allocate the bo private\n");
1141                 return 0;
1142         }
1143
1144         bo_vc4->fd = bufmgr_vc4->fd;
1145         bo_vc4->gem = arg.handle;
1146         bo_vc4->size = arg.size;
1147         bo_vc4->name = key;
1148         bo_vc4->flags_tbm = 0;
1149
1150         if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
1151                 TBM_VC4_ERROR("fail init cache state(%d)\n", bo_vc4->name);
1152                 free(bo_vc4);
1153                 return 0;
1154         }
1155
1156         if (!bo_vc4->dmabuf) {
1157                 struct drm_prime_handle arg = {0, };
1158
1159                 arg.handle = bo_vc4->gem;
1160                 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1161                         TBM_VC4_ERROR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_vc4->gem);
1162                         free(bo_vc4);
1163                         return 0;
1164                 }
1165                 bo_vc4->dmabuf = arg.fd;
1166         }
1167
1168         /* add bo to hash */
1169         privGem = NULL;
1170
1171         privGem = calloc(1, sizeof(PrivGem));
1172         if (!privGem) {
1173                 TBM_VC4_ERROR("fail to calloc privGem\n");
1174                 free(bo_vc4);
1175                 return 0;
1176         }
1177
1178         privGem->ref_count = 1;
1179         privGem->bo_priv = bo_vc4;
1180
1181         if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name,
1182                            (void *)privGem) < 0) {
1183                 TBM_VC4_ERROR("Cannot insert bo to Hash(%d)\n", bo_vc4->name);
1184         }
1185
1186         TBM_VC4_DEBUG("    bo:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
1187             bo,
1188             bo_vc4->gem, bo_vc4->name,
1189             bo_vc4->dmabuf,
1190             bo_vc4->flags_tbm,
1191             bo_vc4->size);
1192
1193         return (void *)bo_vc4;
1194 }
1195
1196 static void *
1197 tbm_vc4_bo_import_fd(tbm_bo bo, tbm_fd key)
1198 {
1199         VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1200
1201         tbm_bufmgr_vc4 bufmgr_vc4;
1202         tbm_bo_vc4 bo_vc4;
1203         PrivGem *privGem = NULL;
1204         unsigned int name;
1205         int ret;
1206
1207         bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1208         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
1209
1210         /*getting handle from fd*/
1211         unsigned int gem = 0;
1212         struct drm_prime_handle arg = {0, };
1213
1214         arg.fd = key;
1215         arg.flags = 0;
1216         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1217                 TBM_VC4_ERROR("bo:%p Cannot get gem handle from fd:%d (%s)\n",
1218                                bo, arg.fd, strerror(errno));
1219                 return NULL;
1220         }
1221         gem = arg.handle;
1222
1223         name = _get_name(bufmgr_vc4->fd, gem);
1224         if (!name) {
1225                 TBM_VC4_ERROR("bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1226                                bo, gem, key, strerror(errno));
1227                 return 0;
1228         }
1229
1230         ret = drmHashLookup(bufmgr_vc4->hashBos, name, (void **)&privGem);
1231         if (ret == 0) {
1232                 if (gem == privGem->bo_priv->gem)
1233                         return privGem->bo_priv;
1234         }
1235
1236         unsigned int real_size = -1;
1237
1238         /* Determine size of bo.  The fd-to-handle ioctl really should
1239          * return the size, but it doesn't.  If we have kernel 3.12 or
1240          * later, we can lseek on the prime fd to get the size.  Older
1241          * kernels will just fail, in which case we fall back to the
1242          * provided (estimated or guess size).
1243          */
1244         real_size = lseek(key, 0, SEEK_END);
1245
1246         struct drm_gem_open open_arg = {0, };
1247
1248         open_arg.name = name;
1249         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
1250                 TBM_VC4_ERROR("Cannot open gem name=%d\n", name);
1251                 return 0;
1252         }
1253
1254         struct drm_gem_close close_arg = {0, };
1255         memset(&arg, 0, sizeof(close_arg));
1256         close_arg.handle = open_arg.handle;
1257         if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_CLOSE, &close_arg)) {
1258                 TBM_VC4_ERROR("Cannot close gem_handle (%d)\n", open_arg.handle,
1259                               strerror(errno));
1260                 return 0;
1261         }
1262
1263         if (real_size == -1)
1264                 real_size = open_arg.size;
1265
1266         bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1267         if (!bo_vc4) {
1268                 TBM_VC4_ERROR("bo:%p fail to allocate the bo private\n", bo);
1269                 return 0;
1270         }
1271
1272         bo_vc4->fd = bufmgr_vc4->fd;
1273         bo_vc4->gem = gem;
1274         bo_vc4->size = real_size;
1275         bo_vc4->flags_tbm = 0;
1276         bo_vc4->name = name;
1277
1278         if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
1279                 TBM_VC4_ERROR("fail init cache state(%d)\n", bo_vc4->name);
1280                 free(bo_vc4);
1281                 return 0;
1282         }
1283
1284         /* add bo to hash */
1285         privGem = NULL;
1286
1287         privGem = calloc(1, sizeof(PrivGem));
1288         if (!privGem) {
1289                 TBM_VC4_ERROR("fail to calloc privGem\n");
1290                 free(bo_vc4);
1291                 return 0;
1292         }
1293
1294         privGem->ref_count = 1;
1295         privGem->bo_priv = bo_vc4;
1296
1297         if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name,
1298                            (void *)privGem) < 0) {
1299                 TBM_VC4_ERROR("bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1300                                bo, bo_vc4->name, gem, key);
1301         }
1302
1303         TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
1304             bo,
1305             bo_vc4->gem, bo_vc4->name,
1306             bo_vc4->dmabuf,
1307             key,
1308             bo_vc4->flags_tbm,
1309             bo_vc4->size);
1310
1311         return (void *)bo_vc4;
1312 }
1313
1314 static unsigned int
1315 tbm_vc4_bo_export(tbm_bo bo)
1316 {
1317         VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1318
1319         tbm_bo_vc4 bo_vc4;
1320
1321         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1322         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
1323
1324         if (!bo_vc4->name) {
1325                 bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
1326                 if (!bo_vc4->name) {
1327                         TBM_VC4_ERROR("Cannot get name\n");
1328                         return 0;
1329                 }
1330         }
1331
1332         TBM_VC4_DEBUG("    bo:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
1333             bo,
1334             bo_vc4->gem, bo_vc4->name,
1335             bo_vc4->dmabuf,
1336             bo_vc4->flags_tbm,
1337             bo_vc4->size);
1338
1339         return (unsigned int)bo_vc4->name;
1340 }
1341
1342 tbm_fd
1343 tbm_vc4_bo_export_fd(tbm_bo bo)
1344 {
1345         VC4_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1346
1347         tbm_bo_vc4 bo_vc4;
1348         int ret;
1349
1350         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1351         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, -1);
1352
1353         struct drm_prime_handle arg = {0, };
1354
1355         arg.handle = bo_vc4->gem;
1356         ret = drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1357         if (ret) {
1358                 TBM_VC4_ERROR("bo:%p Cannot dmabuf=%d (%s)\n",
1359                                bo, bo_vc4->gem, strerror(errno));
1360                 return (tbm_fd) ret;
1361         }
1362
1363         TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
1364             bo,
1365             bo_vc4->gem, bo_vc4->name,
1366             bo_vc4->dmabuf,
1367             arg.fd,
1368             bo_vc4->flags_tbm,
1369             bo_vc4->size);
1370
1371         return (tbm_fd)arg.fd;
1372 }
1373
1374 static tbm_bo_handle
1375 tbm_vc4_bo_get_handle(tbm_bo bo, int device)
1376 {
1377         VC4_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1378
1379         tbm_bo_handle bo_handle;
1380         tbm_bo_vc4 bo_vc4;
1381
1382         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1383         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, (tbm_bo_handle) NULL);
1384
1385         if (!bo_vc4->gem) {
1386                 TBM_VC4_ERROR("Cannot map gem=%d\n", bo_vc4->gem);
1387                 return (tbm_bo_handle) NULL;
1388         }
1389
1390         TBM_VC4_DEBUG("bo:%p, gem:%d(%d), fd:%d, flags:%d, size:%d, %s\n",
1391             bo,
1392             bo_vc4->gem, bo_vc4->name,
1393             bo_vc4->dmabuf,
1394             bo_vc4->flags_tbm,
1395             bo_vc4->size,
1396             STR_DEVICE[device]);
1397
1398         /*Get mapped bo_handle*/
1399         bo_handle = _vc4_bo_handle(bo_vc4, device);
1400         if (bo_handle.ptr == NULL) {
1401                 TBM_VC4_ERROR("Cannot get handle: gem:%d, device:%d\n",
1402                         bo_vc4->gem, device);
1403                 return (tbm_bo_handle) NULL;
1404         }
1405
1406         return bo_handle;
1407 }
1408
1409 static tbm_bo_handle
1410 tbm_vc4_bo_map(tbm_bo bo, int device, int opt)
1411 {
1412         VC4_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1413
1414         tbm_bo_handle bo_handle;
1415         tbm_bo_vc4 bo_vc4;
1416         tbm_bufmgr_vc4 bufmgr_vc4;
1417
1418         bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1419         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, (tbm_bo_handle)NULL);
1420
1421         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1422         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, (tbm_bo_handle) NULL);
1423
1424         if (!bo_vc4->gem) {
1425                 TBM_VC4_ERROR("Cannot map gem=%d\n", bo_vc4->gem);
1426                 return (tbm_bo_handle) NULL;
1427         }
1428
1429         TBM_VC4_DEBUG("       bo:%p, gem:%d(%d), fd:%d, %s, %s\n",
1430             bo,
1431             bo_vc4->gem, bo_vc4->name,
1432             bo_vc4->dmabuf,
1433             STR_DEVICE[device],
1434             STR_OPT[opt]);
1435
1436         /*Get mapped bo_handle*/
1437         bo_handle = _vc4_bo_handle(bo_vc4, device);
1438         if (bo_handle.ptr == NULL) {
1439                 TBM_VC4_ERROR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1440                                bo_vc4->gem, device, opt);
1441                 return (tbm_bo_handle) NULL;
1442         }
1443
1444         if (bo_vc4->map_cnt == 0)
1445                 _bo_set_cache_state(bufmgr_vc4, bo_vc4, device, opt);
1446
1447         bo_vc4->last_map_device = device;
1448
1449         bo_vc4->map_cnt++;
1450
1451         return bo_handle;
1452 }
1453
1454 static int
1455 tbm_vc4_bo_unmap(tbm_bo bo)
1456 {
1457         VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1458
1459         tbm_bo_vc4 bo_vc4;
1460         tbm_bufmgr_vc4 bufmgr_vc4;
1461
1462         bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1463         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
1464
1465         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1466         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
1467
1468
1469         if (!bo_vc4->gem)
1470                 return 0;
1471
1472         bo_vc4->map_cnt--;
1473
1474         if (bo_vc4->map_cnt == 0)
1475                 _bo_save_cache_state(bufmgr_vc4, bo_vc4);
1476
1477 #ifdef ENABLE_CACHECRTL
1478         if (bo_vc4->last_map_device == TBM_DEVICE_CPU)
1479                 _vc4_cache_flush(bufmgr_vc4, bo_vc4, TBM_VC4_CACHE_FLUSH_ALL);
1480 #endif
1481
1482         bo_vc4->last_map_device = -1;
1483
1484         TBM_VC4_DEBUG("     bo:%p, gem:%d(%d), fd:%d\n",
1485             bo,
1486             bo_vc4->gem, bo_vc4->name,
1487             bo_vc4->dmabuf);
1488
1489         return 1;
1490 }
1491
1492 static int
1493 tbm_vc4_bo_lock(tbm_bo bo, int device, int opt)
1494 {
1495         VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1496
1497 #ifndef ALWAYS_BACKEND_CTRL
1498         tbm_bufmgr_vc4 bufmgr_vc4;
1499         tbm_bo_vc4 bo_vc4;
1500         struct dma_buf_fence fence;
1501         struct flock filelock;
1502         int ret = 0;
1503
1504         if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1505                 TBM_VC4_DEBUG("Not support device type,\n");
1506                 return 0;
1507         }
1508
1509         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1510         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
1511
1512         bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1513         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
1514
1515         memset(&fence, 0, sizeof(struct dma_buf_fence));
1516
1517         /* Check if the given type is valid or not. */
1518         if (opt & TBM_OPTION_WRITE) {
1519                 if (device == TBM_DEVICE_3D)
1520                         fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1521         } else if (opt & TBM_OPTION_READ) {
1522                 if (device == TBM_DEVICE_3D)
1523                         fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1524         } else {
1525                 TBM_VC4_ERROR("Invalid argument\n");
1526                 return 0;
1527         }
1528
1529         /* Check if the tbm manager supports dma fence or not. */
1530         if (!bufmgr_vc4->use_dma_fence) {
1531                 TBM_VC4_ERROR("Not support DMA FENCE(%s)\n", strerror(errno));
1532                 return 0;
1533
1534         }
1535
1536         if (device == TBM_DEVICE_3D) {
1537                 ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1538                 if (ret < 0) {
1539                         TBM_VC4_ERROR("Cannot set GET FENCE(%s)\n", strerror(errno));
1540                         return 0;
1541                 }
1542         } else {
1543                 if (opt & TBM_OPTION_WRITE)
1544                         filelock.l_type = F_WRLCK;
1545                 else
1546                         filelock.l_type = F_RDLCK;
1547
1548                 filelock.l_whence = SEEK_CUR;
1549                 filelock.l_start = 0;
1550                 filelock.l_len = 0;
1551
1552                 if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
1553                         return 0;
1554         }
1555
1556         pthread_mutex_lock(&bo_vc4->mutex);
1557
1558         if (device == TBM_DEVICE_3D) {
1559                 int i;
1560
1561                 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1562                         if (bo_vc4->dma_fence[i].ctx == 0) {
1563                                 bo_vc4->dma_fence[i].type = fence.type;
1564                                 bo_vc4->dma_fence[i].ctx = fence.ctx;
1565                                 break;
1566                         }
1567                 }
1568
1569                 if (i == DMA_FENCE_LIST_MAX) {
1570                         /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1571                         TBM_VC4_ERROR("fence list is full\n");
1572                 }
1573         }
1574
1575         pthread_mutex_unlock(&bo_vc4->mutex);
1576
1577         TBM_VC4_DEBUG("DMABUF_IOCTL_GET_FENCE! bo:%p, gem:%d(%d), fd:%ds\n",
1578             bo,
1579             bo_vc4->gem, bo_vc4->name,
1580             bo_vc4->dmabuf);
1581 #endif /* ALWAYS_BACKEND_CTRL */
1582
1583         return 1;
1584 }
1585
1586 static int
1587 tbm_vc4_bo_unlock(tbm_bo bo)
1588 {
1589         VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1590
1591 #ifndef ALWAYS_BACKEND_CTRL
1592         tbm_bo_vc4 bo_vc4;
1593         struct dma_buf_fence fence;
1594         struct flock filelock;
1595         unsigned int dma_type = 0;
1596         int ret = 0;
1597
1598         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1599         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
1600
1601         if (bo_vc4->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1602                 dma_type = 1;
1603
1604         if (!bo_vc4->dma_fence[0].ctx && dma_type) {
1605                 TBM_VC4_DEBUG("FENCE not support or ignored,\n");
1606                 return 0;
1607         }
1608
1609         if (!bo_vc4->dma_fence[0].ctx && dma_type) {
1610                 TBM_VC4_DEBUG("device type is not 3D/CPU,\n");
1611                 return 0;
1612         }
1613
1614         pthread_mutex_lock(&bo_vc4->mutex);
1615
1616         if (dma_type) {
1617                 fence.type = bo_vc4->dma_fence[0].type;
1618                 fence.ctx = bo_vc4->dma_fence[0].ctx;
1619                 int i;
1620
1621                 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1622                         bo_vc4->dma_fence[i - 1].type = bo_vc4->dma_fence[i].type;
1623                         bo_vc4->dma_fence[i - 1].ctx = bo_vc4->dma_fence[i].ctx;
1624                 }
1625                 bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1626                 bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1627         }
1628         pthread_mutex_unlock(&bo_vc4->mutex);
1629
1630         if (dma_type) {
1631                 ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1632                 if (ret < 0) {
1633                         TBM_VC4_ERROR("Can not set PUT FENCE(%s)\n", strerror(errno));
1634                         return 0;
1635                 }
1636         } else {
1637                 filelock.l_type = F_UNLCK;
1638                 filelock.l_whence = SEEK_CUR;
1639                 filelock.l_start = 0;
1640                 filelock.l_len = 0;
1641
1642                 if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
1643                         return 0;
1644         }
1645
1646         TBM_VC4_DEBUG("DMABUF_IOCTL_PUT_FENCE! bo:%p, gem:%d(%d), fd:%ds\n",
1647             bo,
1648             bo_vc4->gem, bo_vc4->name,
1649             bo_vc4->dmabuf);
1650 #endif /* ALWAYS_BACKEND_CTRL */
1651
1652         return 1;
1653 }
1654
1655 static void
1656 tbm_vc4_bufmgr_deinit(void *priv)
1657 {
1658         VC4_RETURN_IF_FAIL(priv != NULL);
1659
1660         tbm_bufmgr_vc4 bufmgr_vc4;
1661
1662         bufmgr_vc4 = (tbm_bufmgr_vc4)priv;
1663
1664         if (bufmgr_vc4->hashBos) {
1665                 unsigned long key;
1666                 void *value;
1667
1668                 while (drmHashFirst(bufmgr_vc4->hashBos, &key, &value) > 0) {
1669                         free(value);
1670                         drmHashDelete(bufmgr_vc4->hashBos, key);
1671                 }
1672
1673                 drmHashDestroy(bufmgr_vc4->hashBos);
1674                 bufmgr_vc4->hashBos = NULL;
1675         }
1676
1677         _bufmgr_deinit_cache_state(bufmgr_vc4);
1678
1679         if (bufmgr_vc4->bind_display)
1680                 tbm_drm_helper_wl_auth_server_deinit();
1681
1682         if (bufmgr_vc4->device_name)
1683                 free(bufmgr_vc4->device_name);
1684
1685         if (tbm_backend_is_display_server())
1686                 tbm_drm_helper_unset_tbm_master_fd();
1687         else
1688                 tbm_drm_helper_unset_fd();
1689
1690         close(bufmgr_vc4->fd);
1691
1692         free(bufmgr_vc4);
1693 }
1694
1695 int
1696 tbm_vc4_surface_supported_format(uint32_t **formats, uint32_t *num)
1697 {
1698         uint32_t *color_formats = NULL;
1699
1700         color_formats = (uint32_t *)calloc(1,
1701                                            sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1702
1703         if (color_formats == NULL)
1704                 return 0;
1705
1706         memcpy(color_formats, tbm_vc4_color_format_list,
1707                sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1708
1709         *formats = color_formats;
1710         *num = TBM_COLOR_FORMAT_COUNT;
1711
1712         TBM_VC4_DEBUG("tbm_vc4_surface_supported_format  count = %d\n", *num);
1713
1714         return 1;
1715 }
1716
1717 static int
1718 _new_calc_plane_nv12(int width, int height)
1719 {
1720         int mbX, mbY;
1721
1722         mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
1723         mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
1724
1725         if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
1726                 mbY = (mbY + 1) / 2 * 2;
1727
1728         return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
1729                         S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
1730 }
1731
1732 static int
1733 _calc_yplane_nv12(int width, int height)
1734 {
1735         int mbX, mbY;
1736
1737         mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
1738         mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
1739
1740         return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
1741 }
1742
1743 static int
1744 _calc_uvplane_nv12(int width, int height)
1745 {
1746         int mbX, mbY;
1747
1748         mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
1749         mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
1750
1751         return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
1752 }
1753
1754 static int
1755 _new_calc_yplane_nv12(int width, int height)
1756 {
1757         return SIZE_ALIGN(_new_calc_plane_nv12(width,
1758                                                 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1759                            TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1760 }
1761
1762 static int
1763 _new_calc_uvplane_nv12(int width, int height)
1764 {
1765         return SIZE_ALIGN((_new_calc_plane_nv12(width,
1766                                                 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1767                           TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1768 }
1769
1770 /**
1771  * @brief get the plane data of the surface.
1772  * @param[in] width : the width of the surface
1773  * @param[in] height : the height of the surface
1774  * @param[in] format : the format of the surface
1775  * @param[in] plane_idx : the format of the surface
1776  * @param[out] size : the size of the plane
1777  * @param[out] offset : the offset of the plane
1778  * @param[out] pitch : the pitch of the plane
1779  * @param[out] padding : the padding of the plane
1780  * @return 1 if this function succeeds, otherwise 0.
1781  */
1782 int
1783 tbm_vc4_surface_get_plane_data(int width, int height,
1784                                   tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1785                                   uint32_t *pitch, int *bo_idx)
1786 {
1787         int ret = 1;
1788         int bpp;
1789         int _offset = 0;
1790         int _pitch = 0;
1791         int _size = 0;
1792         int _bo_idx = 0;
1793
1794         switch (format) {
1795                 /* 16 bpp RGB */
1796         case TBM_FORMAT_XRGB4444:
1797         case TBM_FORMAT_XBGR4444:
1798         case TBM_FORMAT_RGBX4444:
1799         case TBM_FORMAT_BGRX4444:
1800         case TBM_FORMAT_ARGB4444:
1801         case TBM_FORMAT_ABGR4444:
1802         case TBM_FORMAT_RGBA4444:
1803         case TBM_FORMAT_BGRA4444:
1804         case TBM_FORMAT_XRGB1555:
1805         case TBM_FORMAT_XBGR1555:
1806         case TBM_FORMAT_RGBX5551:
1807         case TBM_FORMAT_BGRX5551:
1808         case TBM_FORMAT_ARGB1555:
1809         case TBM_FORMAT_ABGR1555:
1810         case TBM_FORMAT_RGBA5551:
1811         case TBM_FORMAT_BGRA5551:
1812         case TBM_FORMAT_RGB565:
1813                 bpp = 16;
1814                 _offset = 0;
1815                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1816                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1817                 _bo_idx = 0;
1818                 break;
1819                 /* 24 bpp RGB */
1820         case TBM_FORMAT_RGB888:
1821         case TBM_FORMAT_BGR888:
1822                 bpp = 24;
1823                 _offset = 0;
1824                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1825                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1826                 _bo_idx = 0;
1827                 break;
1828                 /* 32 bpp RGB */
1829         case TBM_FORMAT_XRGB8888:
1830         case TBM_FORMAT_XBGR8888:
1831         case TBM_FORMAT_RGBX8888:
1832         case TBM_FORMAT_BGRX8888:
1833         case TBM_FORMAT_ARGB8888:
1834         case TBM_FORMAT_ABGR8888:
1835         case TBM_FORMAT_RGBA8888:
1836         case TBM_FORMAT_BGRA8888:
1837                 bpp = 32;
1838                 _offset = 0;
1839                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1840                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1841                 _bo_idx = 0;
1842                 break;
1843
1844                 /* packed YCbCr */
1845         case TBM_FORMAT_YUYV:
1846         case TBM_FORMAT_YVYU:
1847         case TBM_FORMAT_UYVY:
1848         case TBM_FORMAT_VYUY:
1849         case TBM_FORMAT_AYUV:
1850                 bpp = 32;
1851                 _offset = 0;
1852                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1853                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1854                 _bo_idx = 0;
1855                 break;
1856
1857                 /*
1858                 * 2 plane YCbCr
1859                 * index 0 = Y plane, [7:0] Y
1860                 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1861                 * or
1862                 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1863                 */
1864         case TBM_FORMAT_NV12:
1865                 bpp = 12;
1866                 if (plane_idx == 0) {
1867                         _offset = 0;
1868                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1869                         _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1870                                         height));
1871                         _bo_idx = 0;
1872                 } else if (plane_idx == 1) {
1873                         _offset = 0;
1874                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1875                         _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1876                                         height));
1877                         _bo_idx = 1;
1878                 }
1879                 break;
1880         case TBM_FORMAT_NV21:
1881                 bpp = 12;
1882                 if (plane_idx == 0) {
1883                         _offset = 0;
1884                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1885                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1886                         _bo_idx = 0;
1887                 } else if (plane_idx == 1) {
1888                         _offset = width * height;
1889                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1890                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1891                         _bo_idx = 0;
1892                 }
1893                 break;
1894
1895         case TBM_FORMAT_NV16:
1896         case TBM_FORMAT_NV61:
1897                 bpp = 16;
1898                 /*if(plane_idx == 0)*/
1899                 {
1900                         _offset = 0;
1901                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1902                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1903                         _bo_idx = 0;
1904                         if (plane_idx == 0)
1905                                 break;
1906                 }
1907                 /*else if( plane_idx ==1 )*/
1908                 {
1909                         _offset += _size;
1910                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1911                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1912                         _bo_idx = 0;
1913                 }
1914                 break;
1915
1916                 /*
1917                 * 3 plane YCbCr
1918                 * index 0: Y plane, [7:0] Y
1919                 * index 1: Cb plane, [7:0] Cb
1920                 * index 2: Cr plane, [7:0] Cr
1921                 * or
1922                 * index 1: Cr plane, [7:0] Cr
1923                 * index 2: Cb plane, [7:0] Cb
1924                 */
1925
1926                 /*
1927                 * NATIVE_BUFFER_FORMAT_YV12
1928                 * NATIVE_BUFFER_FORMAT_I420
1929                 */
1930         case TBM_FORMAT_YUV410:
1931         case TBM_FORMAT_YVU410:
1932                 bpp = 9;
1933                 /*if(plane_idx == 0)*/
1934                 {
1935                         _offset = 0;
1936                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1937                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1938                         _bo_idx = 0;
1939                         if (plane_idx == 0)
1940                                 break;
1941                 }
1942                 /*else if(plane_idx == 1)*/
1943                 {
1944                         _offset += _size;
1945                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1946                         _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1947                         _bo_idx = 0;
1948                         if (plane_idx == 1)
1949                                 break;
1950                 }
1951                 /*else if (plane_idx == 2)*/
1952                 {
1953                         _offset += _size;
1954                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1955                         _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1956                         _bo_idx = 0;
1957                 }
1958                 break;
1959         case TBM_FORMAT_YUV411:
1960         case TBM_FORMAT_YVU411:
1961         case TBM_FORMAT_YUV420:
1962         case TBM_FORMAT_YVU420:
1963                 bpp = 12;
1964                 /*if(plane_idx == 0)*/
1965                 {
1966                         _offset = 0;
1967                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1968                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1969                         _bo_idx = 0;
1970                         if (plane_idx == 0)
1971                                 break;
1972                 }
1973                 /*else if(plane_idx == 1)*/
1974                 {
1975                         _offset = 0;
1976                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1977                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1978                         _bo_idx = 1;
1979                         if (plane_idx == 1)
1980                                 break;
1981                 }
1982                 /*else if (plane_idx == 2)*/
1983                 {
1984                         _offset = 0;
1985                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1986                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1987                         _bo_idx = 2;
1988                 }
1989                 break;
1990         case TBM_FORMAT_YUV422:
1991         case TBM_FORMAT_YVU422:
1992                 bpp = 16;
1993                 /*if(plane_idx == 0)*/
1994                 {
1995                         _offset = 0;
1996                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1997                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1998                         _bo_idx = 0;
1999                         if (plane_idx == 0)
2000                                 break;
2001                 }
2002                 /*else if(plane_idx == 1)*/
2003                 {
2004                         _offset += _size;
2005                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2006                         _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
2007                         _bo_idx = 0;
2008                         if (plane_idx == 1)
2009                                 break;
2010                 }
2011                 /*else if (plane_idx == 2)*/
2012                 {
2013                         _offset += _size;
2014                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2015                         _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
2016                         _bo_idx = 0;
2017                 }
2018                 break;
2019         case TBM_FORMAT_YUV444:
2020         case TBM_FORMAT_YVU444:
2021                 bpp = 24;
2022                 /*if(plane_idx == 0)*/
2023                 {
2024                         _offset = 0;
2025                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2026                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2027                         _bo_idx = 0;
2028                         if (plane_idx == 0)
2029                                 break;
2030                 }
2031                 /*else if(plane_idx == 1)*/
2032                 {
2033                         _offset += _size;
2034                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2035                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2036                         _bo_idx = 0;
2037                         if (plane_idx == 1)
2038                                 break;
2039                 }
2040                 /*else if (plane_idx == 2)*/
2041                 {
2042                         _offset += _size;
2043                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2044                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2045                         _bo_idx = 0;
2046                 }
2047                 break;
2048         default:
2049                 bpp = 0;
2050                 break;
2051         }
2052
2053         *size = _size;
2054         *offset = _offset;
2055         *pitch = _pitch;
2056         *bo_idx = _bo_idx;
2057
2058         return ret;
2059 }
2060
2061 int
2062 tbm_vc4_bo_get_flags(tbm_bo bo)
2063 {
2064         VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
2065
2066         tbm_bo_vc4 bo_vc4;
2067
2068         bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
2069         VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
2070
2071         return bo_vc4->flags_tbm;
2072 }
2073
2074 int
2075 tbm_vc4_bufmgr_bind_native_display(tbm_bufmgr bufmgr, void *native_display)
2076 {
2077         tbm_bufmgr_vc4 bufmgr_vc4;
2078
2079         bufmgr_vc4 = tbm_backend_get_priv_from_bufmgr(bufmgr);
2080         VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
2081
2082         if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_vc4->fd,
2083                                            bufmgr_vc4->device_name, 0)) {
2084                 TBM_VC4_ERROR("fail to tbm_drm_helper_wl_server_init\n");
2085                 return 0;
2086         }
2087
2088         bufmgr_vc4->bind_display = native_display;
2089
2090         return 1;
2091 }
2092
2093 MODULEINITPPROTO(init_tbm_bufmgr_priv);
2094
2095 static TBMModuleVersionInfo BcmVersRec = {
2096         "vc42837",
2097         "Broadcom",
2098         TBM_ABI_VERSION,
2099 };
2100
2101 TBMModuleData tbmModuleData = { &BcmVersRec, init_tbm_bufmgr_priv};
2102
2103 int
2104 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
2105 {
2106         tbm_bufmgr_backend bufmgr_backend;
2107         tbm_bufmgr_vc4 bufmgr_vc4;
2108         int fp;
2109
2110         if (!bufmgr)
2111                 return 0;
2112
2113         bufmgr_vc4 = calloc(1, sizeof(struct _tbm_bufmgr_vc4));
2114         if (!bufmgr_vc4) {
2115                 TBM_VC4_ERROR("fail to alloc bufmgr_vc4!\n");
2116                 return 0;
2117         }
2118
2119         if (tbm_backend_is_display_server()) {
2120                 bufmgr_vc4->fd = tbm_drm_helper_get_master_fd();
2121                 if (bufmgr_vc4->fd < 0) {
2122                         bufmgr_vc4->fd = _tbm_vc4_open_drm();
2123                         if (bufmgr_vc4->fd < 0) {
2124                                 TBM_VC4_ERROR("fail to open drm!\n", getpid());
2125                                 goto fail_open_drm;
2126                         }
2127                 }
2128
2129                 tbm_drm_helper_set_tbm_master_fd(bufmgr_vc4->fd);
2130
2131                 bufmgr_vc4->device_name = drmGetDeviceNameFromFd(bufmgr_vc4->fd);
2132                 if (!bufmgr_vc4->device_name) {
2133                         TBM_VC4_ERROR("fail to get device name!\n", getpid());
2134
2135                         tbm_drm_helper_unset_tbm_master_fd();
2136                         goto fail_get_device_name;
2137                 }
2138                 tbm_drm_helper_set_fd(bufmgr_vc4->fd);
2139         } else {
2140                 if (_check_render_node()) {
2141                         bufmgr_vc4->fd = _get_render_node();//TODO
2142                         if (bufmgr_vc4->fd < 0) {
2143                                 TBM_VC4_ERROR("fail to get render node\n");
2144                                 goto fail_get_render_node;
2145                         }
2146                         TBM_VC4_DEBUG("Use render node:%d\n", bufmgr_vc4->fd);
2147                 } else {
2148                         if (!tbm_drm_helper_get_auth_info(&(bufmgr_vc4->fd), &(bufmgr_vc4->device_name), NULL)) {
2149                                 TBM_VC4_ERROR("fail to get auth drm info!\n");
2150                                 goto fail_get_auth_info;
2151                         }
2152
2153                         tbm_drm_helper_set_fd(bufmgr_vc4->fd);
2154                 }
2155         }
2156
2157         //Check if the tbm manager supports dma fence or not.
2158         fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2159         if (fp != -1) {
2160                 char buf[1];
2161                 int length = read(fp, buf, 1);
2162
2163                 if (length == 1 && buf[0] == '1')
2164                         bufmgr_vc4->use_dma_fence = 1;
2165
2166                 close(fp);
2167         }
2168
2169         if (!_bufmgr_init_cache_state(bufmgr_vc4)) {
2170                 TBM_VC4_ERROR("fail to init bufmgr cache state\n");
2171                 goto fail_init_cache_state;
2172         }
2173
2174         /*Create Hash Table*/
2175         bufmgr_vc4->hashBos = drmHashCreate();
2176
2177         bufmgr_backend = tbm_backend_alloc();
2178         if (!bufmgr_backend) {
2179                 TBM_VC4_ERROR("fail to alloc backend!\n");
2180                 goto fail_alloc_backend;
2181         }
2182
2183         bufmgr_backend->priv = (void *)bufmgr_vc4;
2184         bufmgr_backend->bufmgr_deinit = tbm_vc4_bufmgr_deinit;
2185         bufmgr_backend->bo_size = tbm_vc4_bo_size;
2186         bufmgr_backend->bo_alloc = tbm_vc4_bo_alloc;
2187         bufmgr_backend->bo_free = tbm_vc4_bo_free;
2188         bufmgr_backend->bo_import = tbm_vc4_bo_import;
2189         bufmgr_backend->bo_import_fd = tbm_vc4_bo_import_fd;
2190         bufmgr_backend->bo_export = tbm_vc4_bo_export;
2191         bufmgr_backend->bo_export_fd = tbm_vc4_bo_export_fd;
2192         bufmgr_backend->bo_get_handle = tbm_vc4_bo_get_handle;
2193         bufmgr_backend->bo_map = tbm_vc4_bo_map;
2194         bufmgr_backend->bo_unmap = tbm_vc4_bo_unmap;
2195         bufmgr_backend->surface_get_plane_data = tbm_vc4_surface_get_plane_data;
2196         bufmgr_backend->surface_supported_format = tbm_vc4_surface_supported_format;
2197         bufmgr_backend->bo_get_flags = tbm_vc4_bo_get_flags;
2198         bufmgr_backend->bo_lock = tbm_vc4_bo_lock;
2199         bufmgr_backend->bo_unlock = tbm_vc4_bo_unlock;
2200
2201         if (tbm_backend_is_display_server() && !_check_render_node())
2202                 bufmgr_backend->bufmgr_bind_native_display = tbm_vc4_bufmgr_bind_native_display;
2203
2204         if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
2205                 TBM_VC4_ERROR("fail to init backend!\n");
2206                 goto fail_init_backend;
2207         }
2208
2209 #ifdef DEBUG
2210         {
2211                 char *env;
2212
2213                 env = getenv("TBM_VC4_DEBUG");
2214                 if (env) {
2215                         bDebug = atoi(env);
2216                         TBM_VC4_ERROR("TBM_VC4_DEBUG=%s\n", env);
2217                 } else
2218                         bDebug = 0;
2219         }
2220 #endif
2221
2222         TBM_VC4_DEBUG("drm_fd:%d\n", bufmgr_vc4->fd);
2223
2224         return 1;
2225
2226 fail_init_backend:
2227         tbm_backend_free(bufmgr_backend);
2228 fail_alloc_backend:
2229         if (bufmgr_vc4->hashBos)
2230                 drmHashDestroy(bufmgr_vc4->hashBos);
2231         _bufmgr_deinit_cache_state(bufmgr_vc4);
2232 fail_init_cache_state:
2233         if (tbm_backend_is_display_server())
2234                 tbm_drm_helper_unset_tbm_master_fd();
2235         else
2236                 tbm_drm_helper_unset_fd();
2237 fail_get_device_name:
2238         close(bufmgr_vc4->fd);
2239 fail_get_auth_info:
2240 fail_get_render_node:
2241 fail_open_drm:
2242         free(bufmgr_vc4);
2243         return 0;
2244 }
2245