remove the uset of tbm_backend_bufmgr_query_display_server
[platform/adaptation/samsung_exynos/libtbm-exynos.git] / src / tbm_bufmgr_exynos.c
1 /**************************************************************************
2
3 libtbm_exynos
4
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
19 of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 #ifdef HAVE_CONFIG_H
32 #include "config.h"
33 #endif
34
35 #include <libudev.h>
36
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <stdint.h>
40 #include <string.h>
41 #include <sys/ioctl.h>
42 #include <sys/types.h>
43 #include <unistd.h>
44 #include <sys/mman.h>
45 #include <sys/stat.h>
46 #include <fcntl.h>
47 #include <errno.h>
48 #include <xf86drm.h>
49 #include <exynos_drm.h>
50 #include <pthread.h>
51 #include <tbm_backend.h>
52 #include <tbm_drm_helper.h>
53 #include <tbm_log.h>
54 #include <system_info.h>
55 #include "tbm_bufmgr_tgl.h"
56
57 #define TBM_COLOR_FORMAT_COUNT 4
58
59 #define EXYNOS_DRM_NAME "exynos"
60
61 #define STRERR_BUFSIZE 128
62
63 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
64 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
65 #define MAX(a, b) ((a) > (b) ? (a) : (b))
66
67 static unsigned int g_tbm_surface_alignment_plane;
68 static unsigned int g_tbm_surface_alignment_pitch_rgb;
69
70 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
71 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
72
73 #define SZ_1M                                   0x00100000
74 #define S5P_FIMV_MAX_FRAME_SIZE                 (2 * SZ_1M)
75 #define S5P_FIMV_D_ALIGN_PLANE_SIZE             64
76 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW           16
77 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL           16
78 #define S5P_FIMV_DEC_BUF_ALIGN                  (8 * 1024)
79 #define S5P_FIMV_NV12MT_HALIGN                  128
80 #define S5P_FIMV_NV12MT_VALIGN                  64
81
82 /* cache control at backend */
83 static unsigned int g_enable_cache_ctrl = 0;
84
85 struct dma_buf_info {
86         unsigned long   size;
87         unsigned int    fence_supported;
88         unsigned int    padding;
89 };
90
91 #define DMA_BUF_ACCESS_READ             0x1
92 #define DMA_BUF_ACCESS_WRITE            0x2
93 #define DMA_BUF_ACCESS_DMA              0x4
94 #define DMA_BUF_ACCESS_MAX              0x8
95
96 #define DMA_FENCE_LIST_MAX              5
97
98 struct dma_buf_fence {
99         unsigned long           ctx;
100         unsigned int            type;
101 };
102
103 #define DMABUF_IOCTL_BASE       'F'
104 #define DMABUF_IOWR(nr, type)   _IOWR(DMABUF_IOCTL_BASE, nr, type)
105
106 #define DMABUF_IOCTL_GET_INFO   DMABUF_IOWR(0x00, struct dma_buf_info)
107 #define DMABUF_IOCTL_GET_FENCE  DMABUF_IOWR(0x01, struct dma_buf_fence)
108 #define DMABUF_IOCTL_PUT_FENCE  DMABUF_IOWR(0x02, struct dma_buf_fence)
109
110 /* tgl key values */
111 #define GLOBAL_KEY   ((unsigned int)(-1))
112 /* TBM_CACHE */
113 #define TBM_EXYNOS_CACHE_INV       0x01 /**< cache invalidate  */
114 #define TBM_EXYNOS_CACHE_CLN       0x02 /**< cache clean */
115 #define TBM_EXYNOS_CACHE_ALL       0x10 /**< cache all */
116 #define TBM_EXYNOS_CACHE_FLUSH     (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush  */
117 #define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL)        /**< cache flush all */
118
119 enum {
120         DEVICE_NONE = 0,
121         DEVICE_CA,                                      /* cache aware device */
122         DEVICE_CO                                       /* cache oblivious device */
123 };
124
125 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
126
127 union _tbm_bo_cache_state {
128         unsigned int val;
129         struct {
130                 unsigned int cntFlush:16;       /*Flush all index for sync */
131                 unsigned int isCached:1;
132                 unsigned int isDirtied:2;
133         } data;
134 };
135
136 typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
137 typedef struct _tbm_bo_exynos *tbm_bo_exynos;
138
139 /* tbm buffor object for exynos */
140 struct _tbm_bo_exynos {
141         int fd;
142
143         unsigned int name;    /* FLINK ID */
144
145         unsigned int gem;     /* GEM Handle */
146
147         unsigned int dmabuf;  /* fd for dmabuf */
148
149         void *pBase;          /* virtual address */
150
151         unsigned int size;
152
153         unsigned int flags_exynos;
154         unsigned int flags_tbm;
155
156         pthread_mutex_t mutex;
157         struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
158         int device;
159         int opt;
160
161         tbm_bo_cache_state cache_state;
162         unsigned int map_cnt;
163         int last_map_device;
164
165         tbm_bufmgr_exynos bufmgr_exynos;
166 };
167
168 /* tbm bufmgr private for exynos */
169 struct _tbm_bufmgr_exynos {
170         int fd;
171         int isLocal;
172         void *hashBos;
173
174         int use_dma_fence;
175
176         int tgl_fd;
177
178         char *device_name;
179         void *bind_display;
180
181         tbm_backend_bufmgr_func *bufmgr_func;
182         tbm_backend_bo_func *bo_func;
183
184         tbm_bufmgr bufmgr;
185 };
186
187 const static char *STR_DEVICE[] = {
188         "DEF",
189         "CPU",
190         "2D",
191         "3D",
192         "MM"
193 };
194
195 const static char *STR_OPT[] = {
196         "NONE",
197         "RD",
198         "WR",
199         "RDWR"
200 };
201
202 static int _get_render_node(int is_master);
203
204 static inline int
205 _tgl_init(int fd, unsigned int key)
206 {
207         struct tgl_reg_data data;
208         int err;
209         char buf[STRERR_BUFSIZE];
210
211         data.key = key;
212         data.timeout_ms = 1000;
213
214         err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
215         if (err) {
216                 TBM_ERR("error(%s) key:%d\n",
217                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
218                 return 0;
219         }
220
221         return 1;
222 }
223
224 static inline int
225 _tgl_destroy(int fd, unsigned int key)
226 {
227         struct tgl_reg_data data;
228         int err;
229         char buf[STRERR_BUFSIZE];
230
231         data.key = key;
232         err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
233         if (err) {
234                 TBM_ERR("error(%s) key:%d\n",
235                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
236                 return 0;
237         }
238
239         return 1;
240 }
241
242 static inline int
243 _tgl_lock(int fd, unsigned int key, int opt)
244 {
245         struct tgl_lock_data data;
246         enum tgl_type_data tgl_type;
247         int err;
248         char buf[STRERR_BUFSIZE];
249
250         switch (opt) {
251         case TBM_OPTION_READ:
252                 tgl_type = TGL_TYPE_READ;
253                 break;
254         case TBM_OPTION_WRITE:
255                 tgl_type = TGL_TYPE_WRITE;
256                 break;
257         default:
258                 tgl_type = TGL_TYPE_NONE;
259                 break;
260         }
261
262         data.key = key;
263         data.type = tgl_type;
264
265         err = ioctl(fd, TGL_IOCTL_LOCK, &data);
266         if (err) {
267                 TBM_ERR("error(%s) key:%d opt:%d\n",
268                         strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
269                 return 0;
270         }
271
272         return 1;
273 }
274
275 static inline int
276 _tgl_unlock(int fd, unsigned int key)
277 {
278         struct tgl_lock_data data;
279         int err;
280         char buf[STRERR_BUFSIZE];
281
282         data.key = key;
283         data.type = TGL_TYPE_NONE;
284
285         err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
286         if (err) {
287                 TBM_ERR("error(%s) key:%d\n",
288                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
289                 return 0;
290         }
291
292         return 1;
293 }
294
295 static inline int
296 _tgl_set_data(int fd, unsigned int key, unsigned int val)
297 {
298         struct tgl_usr_data data;
299         int err;
300         char buf[STRERR_BUFSIZE];
301
302         data.key = key;
303         data.data1 = val;
304
305         err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
306         if (err) {
307                 TBM_ERR("error(%s) key:%d\n",
308                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
309                 return 0;
310         }
311
312         return 1;
313 }
314
315 static inline unsigned int
316 _tgl_get_data(int fd, unsigned int key)
317 {
318         struct tgl_usr_data data = { 0, };
319         int err;
320         char buf[STRERR_BUFSIZE];
321
322         data.key = key;
323
324         err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
325         if (err) {
326                 TBM_ERR("error(%s) key:%d\n",
327                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
328                 return 0;
329         }
330
331         return data.data1;
332 }
333
334 static int
335 _exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags)
336 {
337         TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
338
339         /* cache flush is managed by kernel side when using dma-fence. */
340         if (bufmgr_exynos->use_dma_fence)
341                 return 1;
342
343         struct drm_exynos_gem_cache_op cache_op = {0, };
344         int ret;
345
346         /* if bo_exynos is null, do cache_flush_all */
347         if (bo_exynos) {
348                 cache_op.flags = 0;
349                 cache_op.usr_addr = (uint64_t)((uintptr_t)bo_exynos->pBase);
350                 cache_op.size = bo_exynos->size;
351         } else {
352                 flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
353                 cache_op.flags = 0;
354                 cache_op.usr_addr = 0;
355                 cache_op.size = 0;
356         }
357
358         if (flags & TBM_EXYNOS_CACHE_INV) {
359                 if (flags & TBM_EXYNOS_CACHE_ALL)
360                         cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
361                 else
362                         cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
363         }
364
365         if (flags & TBM_EXYNOS_CACHE_CLN) {
366                 if (flags & TBM_EXYNOS_CACHE_ALL)
367                         cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
368                 else
369                         cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
370         }
371
372         if (flags & TBM_EXYNOS_CACHE_ALL)
373                 cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
374
375         ret = drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
376                                   sizeof(cache_op));
377         if (ret) {
378                 TBM_ERR("fail to flush the cache.\n");
379                 return 0;
380         }
381
382         return 1;
383 }
384
385 static int
386 _bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import)
387 {
388     /* check whether cache control do or not */
389         if (!g_enable_cache_ctrl)
390                 return 1;
391
392         TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
393         TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
394
395         if (bufmgr_exynos->use_dma_fence)
396                 return 1;
397
398         _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
399
400         tbm_bo_cache_state cache_state;
401
402         if (import == 0) {
403                 cache_state.data.isDirtied = DEVICE_NONE;
404                 cache_state.data.isCached = 0;
405                 cache_state.data.cntFlush = 0;
406
407                 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
408         }
409
410         return 1;
411 }
412
413 static int
414 _bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt)
415 {
416         /* check whether cache control do or not */
417         if (!g_enable_cache_ctrl)
418                 return 1;
419
420         TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
421         TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
422
423         if (bufmgr_exynos->use_dma_fence)
424                 return 1;
425
426         char need_flush = 0;
427         unsigned short cntFlush = 0;
428
429         if (!(bo_exynos->flags_exynos & EXYNOS_BO_CACHABLE))
430                 return 1;
431
432         /* get cache state of a bo_exynos */
433         bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd,
434                                      bo_exynos->name);
435
436         /* get global cache flush count */
437         cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
438
439         if (device == TBM_DEVICE_CPU) {
440                 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
441                     bo_exynos->cache_state.data.isCached)
442                         need_flush = TBM_EXYNOS_CACHE_INV;
443
444                 bo_exynos->cache_state.data.isCached = 1;
445                 if (opt & TBM_OPTION_WRITE)
446                         bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
447                 else {
448                         if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
449                                 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
450                 }
451         } else {
452                 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
453                     bo_exynos->cache_state.data.isCached &&
454                     bo_exynos->cache_state.data.cntFlush == cntFlush)
455                         need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
456
457                 if (opt & TBM_OPTION_WRITE)
458                         bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
459                 else {
460                         if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
461                                 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
462                 }
463         }
464
465         if (need_flush) {
466                 if (need_flush & TBM_EXYNOS_CACHE_ALL)
467                         _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
468
469                 /* call cache flush */
470                 _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush);
471
472                 TBM_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
473                     bo_exynos->cache_state.data.isCached,
474                     bo_exynos->cache_state.data.isDirtied,
475                     need_flush,
476                     cntFlush);
477         }
478
479         return 1;
480 }
481
482 static int
483 _bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
484 {
485     /* check whether cache control do or not */
486         if (!g_enable_cache_ctrl)
487                 return 1;
488
489         TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
490         TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
491
492         if (bufmgr_exynos->use_dma_fence)
493                 return 1;
494
495         unsigned short cntFlush = 0;
496
497         /* get global cache flush count */
498         cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
499
500         /* save global cache flush count */
501         bo_exynos->cache_state.data.cntFlush = cntFlush;
502         _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name,
503                       bo_exynos->cache_state.val);
504
505         return 1;
506 }
507
508 static void
509 _bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
510 {
511     /* check whether cache control do or not */
512         if (!g_enable_cache_ctrl)
513                 return;
514
515         TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL);
516         TBM_RETURN_IF_FAIL(bo_exynos != NULL);
517
518         if (bufmgr_exynos->use_dma_fence)
519                 return ;
520
521         _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
522 }
523
524 static int
525 _bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
526 {
527         /* check whether cache control do or not */
528         if (!g_enable_cache_ctrl)
529                 return 1;
530
531         TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
532
533         if (bufmgr_exynos->use_dma_fence)
534                 return 1;
535
536         /* open tgl fd for saving cache flush data */
537         bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
538
539         if (bufmgr_exynos->tgl_fd < 0) {
540                 bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
541                 if (bufmgr_exynos->tgl_fd < 0) {
542                     TBM_ERR("fail to open global_lock:%s\n",
543                                         tgl_devfile1);
544                         return 0;
545                 }
546         }
547
548         if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
549                 TBM_ERR("fail to initialize the tgl\n");
550                 close(bufmgr_exynos->tgl_fd);
551                 return 0;
552         }
553
554         return 1;
555 }
556
557 static void
558 _bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
559 {
560     /* check whether cache control do or not */
561         if (!g_enable_cache_ctrl)
562                 return;
563
564         TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL);
565
566         if (bufmgr_exynos->use_dma_fence)
567                 return;
568
569         if (bufmgr_exynos->tgl_fd >= 0)
570                 close(bufmgr_exynos->tgl_fd);
571 }
572
573 static int
574 _tbm_exynos_open_drm()
575 {
576         int fd = -1;
577
578         fd = drmOpen(EXYNOS_DRM_NAME, NULL);
579         if (fd < 0) {
580                 TBM_ERR("fail to open drm.(%s)\n", EXYNOS_DRM_NAME);
581         }
582
583         if (fd < 0) {
584                 fd = _get_render_node(1);
585                 if (fd < 0) {
586                         TBM_ERR("cannot find render_node\n");
587                 }
588         }
589
590         return fd;
591 }
592
593 static int
594 _get_render_node(int is_master)
595 {
596         struct udev *udev = NULL;
597         struct udev_enumerate *e = NULL;
598         struct udev_list_entry *entry = NULL;
599         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
600         const char *filepath;
601         struct stat s;
602         int fd = -1;
603         int ret;
604
605         TBM_DBG("search drm-device by udev(is_master:%d)\n", is_master);
606
607         udev = udev_new();
608         if (!udev) {
609                 TBM_ERR("udev_new() failed.\n");
610                 return -1;
611         }
612
613         e = udev_enumerate_new(udev);
614         udev_enumerate_add_match_subsystem(e, "drm");
615         if (is_master)
616                 udev_enumerate_add_match_sysname(e, "card[0-9]*");
617         else
618                 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
619         udev_enumerate_scan_devices(e);
620
621         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
622                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
623                                                       udev_list_entry_get_name(entry));
624                 device_parent = udev_device_get_parent(device);
625                 /* Not need unref device_parent. device_parent and device have same refcnt */
626                 if (device_parent) {
627                         if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
628                                 drm_device = device;
629                                 TBM_DBG("Found render device: '%s' (%s)\n",
630                                     udev_device_get_syspath(drm_device),
631                                     udev_device_get_sysname(device_parent));
632                                 break;
633                         }
634                 }
635                 udev_device_unref(device);
636         }
637
638         udev_enumerate_unref(e);
639
640         if (!drm_device) {
641                 TBM_ERR("failed to find device\n");
642                 udev_unref(udev);
643                 return -1;
644         }
645
646         /* Get device file path. */
647         filepath = udev_device_get_devnode(drm_device);
648         if (!filepath) {
649                 TBM_ERR("udev_device_get_devnode() failed.\n");
650                 udev_device_unref(drm_device);
651                 udev_unref(udev);
652                 return -1;
653         }
654
655         /* Open DRM device file and check validity. */
656         fd = open(filepath, O_RDWR | O_CLOEXEC);
657         if (fd < 0) {
658                 TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
659                 udev_device_unref(drm_device);
660                 udev_unref(udev);
661                 return -1;
662         }
663
664         ret = fstat(fd, &s);
665         if (ret) {
666                 TBM_ERR("fstat() failed %s.\n");
667                 udev_device_unref(drm_device);
668                 udev_unref(udev);
669                 close(fd);
670                 return -1;
671         }
672
673         udev_device_unref(drm_device);
674         udev_unref(udev);
675
676         return fd;
677 }
678
679 static unsigned int
680 _get_exynos_flag_from_tbm(unsigned int ftbm)
681 {
682         unsigned int flags = 0;
683
684         if (ftbm & TBM_BO_SCANOUT)
685                 flags |= EXYNOS_BO_CONTIG;
686         else
687                 flags |= EXYNOS_BO_NONCONTIG;
688
689         if (ftbm & TBM_BO_WC)
690                 flags |= EXYNOS_BO_WC;
691         else if (ftbm & TBM_BO_NONCACHABLE)
692                 flags |= EXYNOS_BO_NONCACHABLE;
693         else
694                 flags |= EXYNOS_BO_CACHABLE;
695
696         return flags;
697 }
698
699 static unsigned int
700 _get_tbm_flag_from_exynos(unsigned int fexynos)
701 {
702         unsigned int flags = 0;
703
704         if (fexynos & EXYNOS_BO_NONCONTIG)
705                 flags |= TBM_BO_DEFAULT;
706         else
707                 flags |= TBM_BO_SCANOUT;
708
709         if (fexynos & EXYNOS_BO_WC)
710                 flags |= TBM_BO_WC;
711         else if (fexynos & EXYNOS_BO_CACHABLE)
712                 flags |= TBM_BO_DEFAULT;
713         else
714                 flags |= TBM_BO_NONCACHABLE;
715
716         return flags;
717 }
718
719 static unsigned int
720 _get_name(int fd, unsigned int gem)
721 {
722         struct drm_gem_flink arg = {0,};
723
724         arg.handle = gem;
725         if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
726                 TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
727                 return 0;
728         }
729
730         return (unsigned int)arg.name;
731 }
732
733 static tbm_bo_handle
734 _exynos_bo_handle(tbm_bo_exynos bo_exynos, int device)
735 {
736         tbm_bo_handle bo_handle;
737
738         memset(&bo_handle, 0x0, sizeof(uint64_t));
739
740         switch (device) {
741         case TBM_DEVICE_DEFAULT:
742         case TBM_DEVICE_2D:
743                 bo_handle.u32 = (uint32_t)bo_exynos->gem;
744                 break;
745         case TBM_DEVICE_CPU:
746                 if (!bo_exynos->pBase) {
747                         struct drm_exynos_gem_map arg = {0,};
748                         void *map = NULL;
749
750                         arg.handle = bo_exynos->gem;
751                         if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg,
752                                                 sizeof(arg))) {
753                                 TBM_ERR("Cannot map_exynos gem=%d\n", bo_exynos->gem);
754                                 return (tbm_bo_handle) NULL;
755                         }
756
757                         map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED,
758                                    bo_exynos->fd, arg.offset);
759                         if (map == MAP_FAILED) {
760                                 TBM_ERR("Cannot usrptr gem=%d\n", bo_exynos->gem);
761                                 return (tbm_bo_handle) NULL;
762                         }
763                         bo_exynos->pBase = map;
764                 }
765                 bo_handle.ptr = (void *)bo_exynos->pBase;
766                 break;
767         case TBM_DEVICE_3D:
768         case TBM_DEVICE_MM:
769                 if (!bo_exynos->dmabuf) {
770                         struct drm_prime_handle arg = {0, };
771
772                         arg.handle = bo_exynos->gem;
773                         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
774                                 TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem);
775                                 return (tbm_bo_handle) NULL;
776                         }
777                         bo_exynos->dmabuf = arg.fd;
778                 }
779
780                 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
781                 break;
782         default:
783                 TBM_ERR("Not supported device:%d\n", device);
784                 bo_handle.ptr = (void *) NULL;
785                 break;
786         }
787
788         return bo_handle;
789 }
790
791 static int
792 _new_calc_plane_nv12(int width, int height)
793 {
794         int mbX, mbY;
795
796         mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
797         mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
798
799         if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
800                 mbY = (mbY + 1) / 2 * 2;
801
802         return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
803                         S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
804 }
805
806 static int
807 _calc_yplane_nv12(int width, int height)
808 {
809         int mbX, mbY;
810
811         mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
812         mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
813
814         return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
815 }
816
817 static int
818 _calc_uvplane_nv12(int width, int height)
819 {
820         int mbX, mbY;
821
822         mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
823         mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
824
825         return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
826 }
827
828 static int
829 _new_calc_yplane_nv12(int width, int height)
830 {
831         return SIZE_ALIGN(_new_calc_plane_nv12(width,
832                                                 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
833                            TBM_SURFACE_ALIGNMENT_PLANE_NV12);
834 }
835
836 static int
837 _new_calc_uvplane_nv12(int width, int height)
838 {
839         return SIZE_ALIGN((_new_calc_plane_nv12(width,
840                                                 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
841                           TBM_SURFACE_ALIGNMENT_PLANE_NV12);
842 }
843
844 static tbm_bufmgr_capability
845 tbm_exynos_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error)
846 {
847         tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE;
848
849         capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD;
850
851         if (error)
852                 *error = TBM_ERROR_NONE;
853
854         return capabilities;
855 }
856
857 static tbm_error_e
858 tbm_exynos_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display)
859 {
860         tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
861         TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER);
862
863         if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd,
864                                            bufmgr_exynos->device_name, 0)) {
865                 TBM_ERR("fail to tbm_drm_helper_wl_server_init\n");
866                 return TBM_ERROR_INVALID_OPERATION;
867         }
868
869         bufmgr_exynos->bind_display = native_display;
870
871         return TBM_ERROR_NONE;
872 }
873
874 static tbm_error_e
875 tbm_exynos_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data,
876                                                         uint32_t **formats, uint32_t *num)
877 {
878         const static uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
879                                                                                         TBM_FORMAT_ARGB8888,
880                                                                                         TBM_FORMAT_XRGB8888,
881                                                                                         TBM_FORMAT_NV12,
882                                                                                         TBM_FORMAT_YUV420
883                                                                                 };
884
885         tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
886         uint32_t *color_formats;
887
888         TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER);
889
890         color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
891         if (color_formats == NULL)
892                 return TBM_ERROR_OUT_OF_MEMORY;
893
894         memcpy(color_formats, tbm_exynos_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
895
896         *formats = color_formats;
897         *num = TBM_COLOR_FORMAT_COUNT;
898
899         TBM_DBG("supported format count = %d\n", *num);
900
901         return TBM_ERROR_NONE;
902 }
903
904 static tbm_error_e
905 tbm_exynos_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data,
906                                                         tbm_format format, int plane_idx, int width,
907                                                         int height, uint32_t *size, uint32_t *offset,
908                                                         uint32_t *pitch, int *bo_idx)
909 {
910         tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
911         int bpp;
912         int _offset = 0;
913         int _pitch = 0;
914         int _size = 0;
915         int _bo_idx = 0;
916
917         TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER);
918
919         switch (format) {
920                 /* 16 bpp RGB */
921         case TBM_FORMAT_XRGB4444:
922         case TBM_FORMAT_XBGR4444:
923         case TBM_FORMAT_RGBX4444:
924         case TBM_FORMAT_BGRX4444:
925         case TBM_FORMAT_ARGB4444:
926         case TBM_FORMAT_ABGR4444:
927         case TBM_FORMAT_RGBA4444:
928         case TBM_FORMAT_BGRA4444:
929         case TBM_FORMAT_XRGB1555:
930         case TBM_FORMAT_XBGR1555:
931         case TBM_FORMAT_RGBX5551:
932         case TBM_FORMAT_BGRX5551:
933         case TBM_FORMAT_ARGB1555:
934         case TBM_FORMAT_ABGR1555:
935         case TBM_FORMAT_RGBA5551:
936         case TBM_FORMAT_BGRA5551:
937         case TBM_FORMAT_RGB565:
938                 bpp = 16;
939                 _offset = 0;
940                 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
941                 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
942                 _bo_idx = 0;
943                 break;
944                 /* 24 bpp RGB */
945         case TBM_FORMAT_RGB888:
946         case TBM_FORMAT_BGR888:
947                 bpp = 24;
948                 _offset = 0;
949                 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
950                 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
951                 _bo_idx = 0;
952                 break;
953                 /* 32 bpp RGB */
954         case TBM_FORMAT_XRGB8888:
955         case TBM_FORMAT_XBGR8888:
956         case TBM_FORMAT_RGBX8888:
957         case TBM_FORMAT_BGRX8888:
958         case TBM_FORMAT_ARGB8888:
959         case TBM_FORMAT_ABGR8888:
960         case TBM_FORMAT_RGBA8888:
961         case TBM_FORMAT_BGRA8888:
962                 bpp = 32;
963                 _offset = 0;
964                 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
965                 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
966                 _bo_idx = 0;
967                 break;
968
969                 /* packed YCbCr */
970         case TBM_FORMAT_YUYV:
971         case TBM_FORMAT_YVYU:
972         case TBM_FORMAT_UYVY:
973         case TBM_FORMAT_VYUY:
974         case TBM_FORMAT_AYUV:
975                 bpp = 32;
976                 _offset = 0;
977                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
978                 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
979                 _bo_idx = 0;
980                 break;
981
982                 /*
983                 * 2 plane YCbCr
984                 * index 0 = Y plane, [7:0] Y
985                 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
986                 * or
987                 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
988                 */
989         case TBM_FORMAT_NV12:
990         case TBM_FORMAT_NV21:
991                 bpp = 12;
992                 if (plane_idx == 0) {
993                         _offset = 0;
994                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
995                         _size = MAX(_calc_yplane_nv12(width, height),
996                                                 _new_calc_yplane_nv12(width, height));
997                         _bo_idx = 0;
998                 } else if (plane_idx == 1) {
999                         _offset = 0;
1000                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1001                         _size = MAX(_calc_uvplane_nv12(width, height),
1002                                                 _new_calc_uvplane_nv12(width, height));
1003                         _bo_idx = 1;
1004                 }
1005                 break;
1006         case TBM_FORMAT_NV16:
1007         case TBM_FORMAT_NV61:
1008                 bpp = 16;
1009                 /*if(plane_idx == 0)*/
1010                 {
1011                         _offset = 0;
1012                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1013                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1014                         _bo_idx = 0;
1015                         if (plane_idx == 0)
1016                                 break;
1017                 }
1018                 /*else if( plane_idx ==1 )*/
1019                 {
1020                         _offset += _size;
1021                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1022                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1023                         _bo_idx = 0;
1024                 }
1025                 break;
1026
1027                 /*
1028                 * 3 plane YCbCr
1029                 * index 0: Y plane, [7:0] Y
1030                 * index 1: Cb plane, [7:0] Cb
1031                 * index 2: Cr plane, [7:0] Cr
1032                 * or
1033                 * index 1: Cr plane, [7:0] Cr
1034                 * index 2: Cb plane, [7:0] Cb
1035                 */
1036
1037                 /*
1038                 * NATIVE_BUFFER_FORMAT_YV12
1039                 * NATIVE_BUFFER_FORMAT_I420
1040                 */
1041         case TBM_FORMAT_YUV410:
1042         case TBM_FORMAT_YVU410:
1043                 bpp = 9;
1044                 /*if(plane_idx == 0)*/
1045                 {
1046                         _offset = 0;
1047                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1048                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1049                         _bo_idx = 0;
1050                         if (plane_idx == 0)
1051                                 break;
1052                 }
1053                 /*else if(plane_idx == 1)*/
1054                 {
1055                         _offset += _size;
1056                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1057                         _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
1058                         _bo_idx = 0;
1059                         if (plane_idx == 1)
1060                                 break;
1061                 }
1062                 /*else if (plane_idx == 2)*/
1063                 {
1064                         _offset += _size;
1065                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1066                         _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
1067                         _bo_idx = 0;
1068                 }
1069                 break;
1070         case TBM_FORMAT_YUV411:
1071         case TBM_FORMAT_YVU411:
1072         case TBM_FORMAT_YUV420:
1073         case TBM_FORMAT_YVU420:
1074                 bpp = 12;
1075                 /*if(plane_idx == 0)*/
1076                 {
1077                         _offset = 0;
1078                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1079                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1080                         _bo_idx = 0;
1081                         if (plane_idx == 0)
1082                                 break;
1083                 }
1084                 /*else if(plane_idx == 1)*/
1085                 {
1086                         _offset += _size;
1087                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1088                         _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
1089                         _bo_idx = 0;
1090                         if (plane_idx == 1)
1091                                 break;
1092                 }
1093                 /*else if (plane_idx == 2)*/
1094                 {
1095                         _offset += _size;
1096                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1097                         _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
1098                         _bo_idx = 0;
1099                 }
1100                 break;
1101         case TBM_FORMAT_YUV422:
1102         case TBM_FORMAT_YVU422:
1103                 bpp = 16;
1104                 /*if(plane_idx == 0)*/
1105                 {
1106                         _offset = 0;
1107                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1108                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1109                         _bo_idx = 0;
1110                         if (plane_idx == 0)
1111                                 break;
1112                 }
1113                 /*else if(plane_idx == 1)*/
1114                 {
1115                         _offset += _size;
1116                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1117                         _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
1118                         _bo_idx = 0;
1119                         if (plane_idx == 1)
1120                                 break;
1121                 }
1122                 /*else if (plane_idx == 2)*/
1123                 {
1124                         _offset += _size;
1125                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1126                         _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
1127                         _bo_idx = 0;
1128                 }
1129                 break;
1130         case TBM_FORMAT_YUV444:
1131         case TBM_FORMAT_YVU444:
1132                 bpp = 24;
1133                 /*if(plane_idx == 0)*/
1134                 {
1135                         _offset = 0;
1136                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1137                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1138                         _bo_idx = 0;
1139                         if (plane_idx == 0)
1140                                 break;
1141                 }
1142                 /*else if(plane_idx == 1)*/
1143                 {
1144                         _offset += _size;
1145                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1146                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1147                         _bo_idx = 0;
1148                         if (plane_idx == 1)
1149                                 break;
1150                 }
1151                 /*else if (plane_idx == 2)*/
1152                 {
1153                         _offset += _size;
1154                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1155                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1156                         _bo_idx = 0;
1157                 }
1158                 break;
1159         default:
1160                 bpp = 0;
1161                 break;
1162         }
1163
1164         *size = _size;
1165         *offset = _offset;
1166         *pitch = _pitch;
1167         *bo_idx = _bo_idx;
1168
1169         return TBM_ERROR_NONE;
1170 }
1171
1172 static tbm_backend_bo_data *
1173 tbm_exynos_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, unsigned int size,
1174                                         tbm_bo_memory_type flags, tbm_error_e *error)
1175 {
1176         tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
1177         tbm_bo_exynos bo_exynos;
1178         unsigned int exynos_flags;
1179
1180         if (bufmgr_exynos == NULL) {
1181                 TBM_ERR("bufmgr_data is null\n");
1182                 if (error)
1183                         *error = TBM_ERROR_INVALID_PARAMETER;
1184                 return NULL;
1185         }
1186
1187         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1188         if (!bo_exynos) {
1189                 TBM_ERR("fail to allocate the bo_exynos private\n");
1190                 if (error)
1191                         *error = TBM_ERROR_OUT_OF_MEMORY;
1192                 return NULL;
1193         }
1194         bo_exynos->bufmgr_exynos = bufmgr_exynos;
1195
1196         exynos_flags = _get_exynos_flag_from_tbm(flags);
1197         if ((flags & TBM_BO_SCANOUT) &&
1198             size <= 4 * 1024) {
1199                 exynos_flags |= EXYNOS_BO_NONCONTIG;
1200         }
1201
1202         struct drm_exynos_gem_create arg = {0, };
1203
1204         arg.size = (uint64_t)size;
1205         arg.flags = exynos_flags;
1206         if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg,
1207                                 sizeof(arg))) {
1208                 TBM_ERR("Cannot create bo_exynos(flag:%x, size:%d)\n", arg.flags,
1209                                (unsigned int)arg.size);
1210                 free(bo_exynos);
1211                 if (error)
1212                         *error = TBM_ERROR_INVALID_OPERATION;
1213                 return NULL;
1214         }
1215
1216         bo_exynos->fd = bufmgr_exynos->fd;
1217         bo_exynos->gem = arg.handle;
1218         bo_exynos->size = size;
1219         bo_exynos->flags_tbm = flags;
1220         bo_exynos->flags_exynos = exynos_flags;
1221         bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1222
1223         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) {
1224                 TBM_ERR("fail init cache state(%d)\n", bo_exynos->name);
1225                 free(bo_exynos);
1226                 if (error)
1227                         *error = TBM_ERROR_INVALID_OPERATION;
1228                 return NULL;
1229         }
1230
1231         pthread_mutex_init(&bo_exynos->mutex, NULL);
1232
1233         if (bufmgr_exynos->use_dma_fence && !bo_exynos->dmabuf) {
1234                 struct drm_prime_handle arg = {0, };
1235
1236                 arg.handle = bo_exynos->gem;
1237                 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1238                         TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem);
1239                         free(bo_exynos);
1240                         if (error)
1241                                 *error = TBM_ERROR_INVALID_OPERATION;
1242                         return NULL;
1243                 }
1244                 bo_exynos->dmabuf = arg.fd;
1245         }
1246
1247         /* add bo_exynos to hash */
1248         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0)
1249                 TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name);
1250
1251         TBM_DBG("     bo_exynos:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
1252                 bo_exynos,
1253                 bo_exynos->gem, bo_exynos->name,
1254                 flags, exynos_flags,
1255                 bo_exynos->size);
1256
1257         if (error)
1258                 *error = TBM_ERROR_NONE;
1259
1260         return (tbm_backend_bo_data *)bo_exynos;
1261 }
1262
1263 static tbm_backend_bo_data *
1264 tbm_exynos_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error)
1265 {
1266         tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
1267         tbm_bo_exynos bo_exynos;
1268         unsigned int gem = 0;
1269         unsigned int name;
1270         int ret;
1271         char buf[STRERR_BUFSIZE];
1272
1273         if (bufmgr_exynos == NULL) {
1274                 TBM_ERR("bufmgr_data is null\n");
1275                 if (error)
1276                         *error = TBM_ERROR_INVALID_PARAMETER;
1277                 return NULL;
1278         }
1279
1280         /*getting handle from fd*/
1281         struct drm_prime_handle arg = {0, };
1282
1283         arg.fd = key;
1284         arg.flags = 0;
1285         if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1286                 TBM_ERR("Cannot get gem handle from fd:%d (%s)\n",
1287                                arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
1288                 if (error)
1289                         *error = TBM_ERROR_INVALID_OPERATION;
1290                 return NULL;
1291         }
1292         gem = arg.handle;
1293
1294         name = _get_name(bufmgr_exynos->fd, gem);
1295         if (!name) {
1296                 TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
1297                                gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1298                 if (error)
1299                         *error = TBM_ERROR_INVALID_OPERATION;
1300                 return NULL;
1301         }
1302
1303         ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&bo_exynos);
1304         if (ret == 0) {
1305                 if (gem == bo_exynos->gem) {
1306                         if (error)
1307                                 *error = TBM_ERROR_NONE;
1308                         return bo_exynos;
1309                 }
1310         }
1311
1312         /* Determine size of bo_exynos.  The fd-to-handle ioctl really should
1313          * return the size, but it doesn't.  If we have kernel 3.12 or
1314          * later, we can lseek on the prime fd to get the size.  Older
1315          * kernels will just fail, in which case we fall back to the
1316          * provided (estimated or guess size).
1317          */
1318         unsigned int real_size = -1;
1319         struct drm_exynos_gem_info info = {0, };
1320
1321         real_size = lseek(key, 0, SEEK_END);
1322
1323         info.handle = gem;
1324         if (drmCommandWriteRead(bufmgr_exynos->fd,
1325                                 DRM_EXYNOS_GEM_GET,
1326                                 &info,
1327                                 sizeof(struct drm_exynos_gem_info))) {
1328                 TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
1329                                gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1330                 if (error)
1331                         *error = TBM_ERROR_INVALID_OPERATION;
1332                 return NULL;
1333         }
1334
1335         if (real_size == -1)
1336                 real_size = info.size;
1337
1338         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1339         if (!bo_exynos) {
1340                 TBM_ERR("bo_exynos:%p fail to allocate the bo_exynos\n", bo_exynos);
1341                 if (error)
1342                         *error = TBM_ERROR_OUT_OF_MEMORY;
1343                 return NULL;
1344         }
1345         bo_exynos->bufmgr_exynos = bufmgr_exynos;
1346
1347         bo_exynos->fd = bufmgr_exynos->fd;
1348         bo_exynos->gem = gem;
1349         bo_exynos->size = real_size;
1350         bo_exynos->flags_exynos = info.flags;
1351         bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1352         bo_exynos->name = name;
1353
1354         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1355                 TBM_ERR("fail init cache state(%d)\n", bo_exynos->name);
1356                 free(bo_exynos);
1357                 if (error)
1358                         *error = TBM_ERROR_INVALID_OPERATION;
1359                 return NULL;
1360         }
1361
1362         /* add bo_exynos to hash */
1363         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0)
1364                 TBM_ERR("bo_exynos:%p Cannot insert bo_exynos to Hash(%d) from gem:%d, fd:%d\n",
1365                                bo_exynos, bo_exynos->name, gem, key);
1366
1367         TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1368             bo_exynos,
1369             bo_exynos->gem, bo_exynos->name,
1370             bo_exynos->dmabuf,
1371             key,
1372             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1373             bo_exynos->size);
1374
1375         if (error)
1376                 *error = TBM_ERROR_NONE;
1377
1378         return (tbm_backend_bo_data *)bo_exynos;
1379 }
1380
1381 static tbm_backend_bo_data *
1382 tbm_exynos_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error)
1383 {
1384         tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
1385         tbm_bo_exynos bo_exynos;
1386         int ret;
1387
1388         if (bufmgr_exynos == NULL) {
1389                 TBM_ERR("bufmgr_data is null\n");
1390                 if (error)
1391                         *error = TBM_ERROR_INVALID_PARAMETER;
1392                 return NULL;
1393         }
1394
1395         ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&bo_exynos);
1396         if (ret == 0) {
1397                 if (error)
1398                         *error = TBM_ERROR_NONE;
1399                 return (tbm_backend_bo_data *)bo_exynos;
1400         }
1401
1402         struct drm_gem_open arg = {0, };
1403         struct drm_exynos_gem_info info = {0, };
1404
1405         arg.name = key;
1406         if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1407                 TBM_ERR("Cannot open gem name=%d\n", key);
1408                 if (error)
1409                         *error = TBM_ERROR_INVALID_OPERATION;
1410                 return NULL;
1411         }
1412
1413         info.handle = arg.handle;
1414         if (drmCommandWriteRead(bufmgr_exynos->fd,
1415                                 DRM_EXYNOS_GEM_GET,
1416                                 &info,
1417                                 sizeof(struct drm_exynos_gem_info))) {
1418                 TBM_ERR("Cannot get gem info=%d\n", key);
1419                 if (error)
1420                         *error = TBM_ERROR_INVALID_OPERATION;
1421                 return NULL;
1422         }
1423
1424         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1425         if (!bo_exynos) {
1426                 TBM_ERR("fail to allocate the bo_exynos private\n");
1427                 if (error)
1428                         *error = TBM_ERROR_OUT_OF_MEMORY;
1429                 return NULL;
1430         }
1431         bo_exynos->bufmgr_exynos = bufmgr_exynos;
1432
1433         bo_exynos->fd = bufmgr_exynos->fd;
1434         bo_exynos->gem = arg.handle;
1435         bo_exynos->size = arg.size;
1436         bo_exynos->flags_exynos = info.flags;
1437         bo_exynos->name = key;
1438         bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1439
1440         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1441                 TBM_ERR("fail init cache state(%d)\n", bo_exynos->name);
1442                 free(bo_exynos);
1443                 if (error)
1444                         *error = TBM_ERROR_INVALID_OPERATION;
1445                 return NULL;
1446         }
1447
1448         if (!bo_exynos->dmabuf) {
1449                 struct drm_prime_handle arg = {0, };
1450
1451                 arg.handle = bo_exynos->gem;
1452                 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1453                         TBM_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_exynos->gem);
1454                         if (error)
1455                                 *error = TBM_ERROR_INVALID_OPERATION;
1456                         free(bo_exynos);
1457                         return NULL;
1458                 }
1459                 bo_exynos->dmabuf = arg.fd;
1460         }
1461
1462         /* add bo_exynos to hash */
1463         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0)
1464                 TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name);
1465
1466         TBM_DBG("    bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1467             bo_exynos,
1468             bo_exynos->gem, bo_exynos->name,
1469             bo_exynos->dmabuf,
1470             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1471             bo_exynos->size);
1472
1473         if (error)
1474                 *error = TBM_ERROR_NONE;
1475
1476         return (tbm_backend_bo_data *)bo_exynos;
1477 }
1478
1479 static void
1480 tbm_exynos_bo_free(tbm_backend_bo_data *bo_data)
1481 {
1482         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1483         tbm_bo_exynos temp;
1484         tbm_bufmgr_exynos bufmgr_exynos;
1485         char buf[STRERR_BUFSIZE];
1486         int ret;
1487
1488         if (!bo_data)
1489                 return;
1490
1491         bufmgr_exynos = bo_exynos->bufmgr_exynos;
1492         if (!bufmgr_exynos)
1493                 return;
1494
1495         TBM_DBG("      bo_exynos:%p, gem:%d(%d), fd:%d, size:%d\n",
1496             bo_exynos,
1497             bo_exynos->gem, bo_exynos->name,
1498             bo_exynos->dmabuf,
1499             bo_exynos->size);
1500
1501         if (bo_exynos->pBase) {
1502                 if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
1503                         TBM_ERR("bo_exynos:%p fail to munmap(%s)\n",
1504                                        bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE));
1505                 }
1506         }
1507
1508         /* close dmabuf */
1509         if (bo_exynos->dmabuf) {
1510                 close(bo_exynos->dmabuf);
1511                 bo_exynos->dmabuf = 0;
1512         }
1513
1514         /* delete bo_exynos from hash */
1515         ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name, (void **)&temp);
1516         if (ret == 0)
1517                 drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name);
1518         else
1519                 TBM_ERR("Cannot find bo_exynos to Hash(%d), ret=%d\n", bo_exynos->name, ret);
1520
1521         if (temp != bo_exynos)
1522                 TBM_ERR("hashBos probably has several BOs with same name!!!\n");
1523
1524         _bo_destroy_cache_state(bufmgr_exynos, bo_exynos);
1525
1526         /* Free gem handle */
1527         struct drm_gem_close arg = {0, };
1528
1529         memset(&arg, 0, sizeof(arg));
1530         arg.handle = bo_exynos->gem;
1531         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg))
1532                 TBM_ERR("bo_exynos:%p fail to gem close.(%s)\n",
1533                                bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE));
1534
1535         free(bo_exynos);
1536 }
1537
1538 static int
1539 tbm_exynos_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1540 {
1541         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1542
1543         if (!bo_exynos) {
1544                 if (error)
1545                         *error = TBM_ERROR_INVALID_PARAMETER;
1546                 return 0;
1547         }
1548
1549         if (error)
1550                 *error = TBM_ERROR_NONE;
1551
1552         return bo_exynos->size;
1553 }
1554
1555 static tbm_bo_memory_type
1556 tbm_exynos_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1557 {
1558         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1559
1560         if (!bo_exynos) {
1561                 if (error)
1562                         *error = TBM_ERROR_INVALID_PARAMETER;
1563                 return TBM_BO_DEFAULT;
1564         }
1565
1566         if (error)
1567                 *error = TBM_ERROR_NONE;
1568
1569         return bo_exynos->flags_tbm;
1570 }
1571
1572 static tbm_bo_handle
1573 tbm_exynos_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error)
1574 {
1575         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1576         tbm_bo_handle bo_handle;
1577
1578         if (!bo_exynos) {
1579                 if (error)
1580                         *error = TBM_ERROR_INVALID_PARAMETER;
1581                 return (tbm_bo_handle) NULL;
1582         }
1583
1584         if (!bo_exynos->gem) {
1585                 TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem);
1586                 if (error)
1587                         *error = TBM_ERROR_INVALID_PARAMETER;
1588                 return (tbm_bo_handle) NULL;
1589         }
1590
1591         TBM_DBG("bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1592             bo_exynos,
1593             bo_exynos->gem, bo_exynos->name,
1594             bo_exynos->dmabuf,
1595             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1596             bo_exynos->size,
1597             STR_DEVICE[device]);
1598
1599         /*Get mapped bo_handle*/
1600         bo_handle = _exynos_bo_handle(bo_exynos, device);
1601         if (bo_handle.ptr == NULL) {
1602                 TBM_ERR("Cannot get handle: gem:%d, device:%d\n",
1603                         bo_exynos->gem, device);
1604                 if (error)
1605                         *error = TBM_ERROR_INVALID_OPERATION;
1606                 return (tbm_bo_handle) NULL;
1607         }
1608
1609         if (error)
1610                 *error = TBM_ERROR_NONE;
1611
1612         return bo_handle;
1613 }
1614
1615 static tbm_bo_handle
1616 tbm_exynos_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1617                                 tbm_bo_access_option opt, tbm_error_e *error)
1618 {
1619         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1620         tbm_bo_handle bo_handle;
1621         tbm_bufmgr_exynos bufmgr_exynos;
1622
1623         if (!bo_exynos) {
1624                 if (error)
1625                         *error = TBM_ERROR_INVALID_PARAMETER;
1626                 return (tbm_bo_handle) NULL;
1627         }
1628
1629         bufmgr_exynos = bo_exynos->bufmgr_exynos;
1630         if (!bufmgr_exynos) {
1631                 if (error)
1632                         *error = TBM_ERROR_INVALID_PARAMETER;
1633                 return (tbm_bo_handle) NULL;
1634         }
1635
1636         if (!bo_exynos->gem) {
1637                 TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem);
1638                 if (error)
1639                         *error = TBM_ERROR_INVALID_PARAMETER;
1640                 return (tbm_bo_handle) NULL;
1641         }
1642
1643         TBM_DBG("       bo_exynos:%p, gem:%d(%d), fd:%d, %s, %s\n",
1644             bo_exynos,
1645             bo_exynos->gem, bo_exynos->name,
1646             bo_exynos->dmabuf,
1647             STR_DEVICE[device],
1648             STR_OPT[opt]);
1649
1650         /*Get mapped bo_handle*/
1651         bo_handle = _exynos_bo_handle(bo_exynos, device);
1652         if (bo_handle.ptr == NULL) {
1653                 TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1654                                bo_exynos->gem, device, opt);
1655                 if (error)
1656                         *error = TBM_ERROR_INVALID_OPERATION;
1657                 return (tbm_bo_handle) NULL;
1658         }
1659
1660         if (bo_exynos->map_cnt == 0)
1661                 _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt);
1662
1663         bo_exynos->last_map_device = device;
1664
1665         bo_exynos->map_cnt++;
1666
1667         if (error)
1668                 *error = TBM_ERROR_NONE;
1669
1670         return bo_handle;
1671 }
1672
1673 static tbm_error_e
1674 tbm_exynos_bo_unmap(tbm_backend_bo_data *bo_data)
1675 {
1676         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1677         tbm_bufmgr_exynos bufmgr_exynos;
1678
1679         if (!bo_exynos)
1680                 return TBM_ERROR_INVALID_PARAMETER;
1681
1682         bufmgr_exynos = bo_exynos->bufmgr_exynos;
1683         if (!bufmgr_exynos)
1684                 return TBM_ERROR_INVALID_PARAMETER;
1685
1686         if (!bo_exynos->gem)
1687                 return TBM_ERROR_INVALID_PARAMETER;
1688
1689         bo_exynos->map_cnt--;
1690
1691         if (bo_exynos->map_cnt == 0)
1692                 _bo_save_cache_state(bufmgr_exynos, bo_exynos);
1693
1694     /* check whether cache control do or not */
1695         if (g_enable_cache_ctrl && bo_exynos->last_map_device == TBM_DEVICE_CPU)
1696                 _exynos_cache_flush(bufmgr_exynos, bo_exynos, TBM_EXYNOS_CACHE_FLUSH_ALL);
1697
1698         bo_exynos->last_map_device = -1;
1699
1700         TBM_DBG("     bo_exynos:%p, gem:%d(%d), fd:%d\n",
1701             bo_exynos,
1702             bo_exynos->gem, bo_exynos->name,
1703             bo_exynos->dmabuf);
1704
1705         return TBM_ERROR_NONE;
1706 }
1707
1708 static tbm_error_e
1709 tbm_exynos_bo_lock(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1710                                 tbm_bo_access_option opt)
1711 {
1712 #ifndef ALWAYS_BACKEND_CTRL
1713         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1714         tbm_bufmgr_exynos bufmgr_exynos;
1715         struct dma_buf_fence fence;
1716         struct flock filelock;
1717         int ret = 0;
1718         char buf[STRERR_BUFSIZE];
1719
1720         if (!bo_exynos)
1721                 return TBM_ERROR_INVALID_PARAMETER;
1722
1723         bufmgr_exynos = bo_exynos->bufmgr_exynos;
1724         if (!bufmgr_exynos)
1725                 return TBM_ERROR_INVALID_PARAMETER;
1726
1727         if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1728                 TBM_DBG("Not support device type,\n");
1729                 return TBM_ERROR_INVALID_OPERATION;
1730         }
1731
1732         memset(&fence, 0, sizeof(struct dma_buf_fence));
1733
1734         /* Check if the given type is valid or not. */
1735         if (opt & TBM_OPTION_WRITE) {
1736                 if (device == TBM_DEVICE_3D)
1737                         fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1738         } else if (opt & TBM_OPTION_READ) {
1739                 if (device == TBM_DEVICE_3D)
1740                         fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1741         } else {
1742                 TBM_ERR("Invalid argument\n");
1743                 return TBM_ERROR_INVALID_PARAMETER;
1744         }
1745
1746         /* Check if the tbm manager supports dma fence or not. */
1747         if (!bufmgr_exynos->use_dma_fence) {
1748                 TBM_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1749                 return TBM_ERROR_INVALID_OPERATION;
1750
1751         }
1752
1753         if (device == TBM_DEVICE_3D) {
1754                 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1755                 if (ret < 0) {
1756                         TBM_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1757                         return TBM_ERROR_INVALID_OPERATION;
1758                 }
1759         } else {
1760                 if (opt & TBM_OPTION_WRITE)
1761                         filelock.l_type = F_WRLCK;
1762                 else
1763                         filelock.l_type = F_RDLCK;
1764
1765                 filelock.l_whence = SEEK_CUR;
1766                 filelock.l_start = 0;
1767                 filelock.l_len = 0;
1768
1769                 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1770                         return TBM_ERROR_INVALID_OPERATION;
1771         }
1772
1773         pthread_mutex_lock(&bo_exynos->mutex);
1774
1775         if (device == TBM_DEVICE_3D) {
1776                 int i;
1777
1778                 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1779                         if (bo_exynos->dma_fence[i].ctx == 0) {
1780                                 bo_exynos->dma_fence[i].type = fence.type;
1781                                 bo_exynos->dma_fence[i].ctx = fence.ctx;
1782                                 break;
1783                         }
1784                 }
1785
1786                 if (i == DMA_FENCE_LIST_MAX) {
1787                         /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1788                         TBM_ERR("fence list is full\n");
1789                 }
1790         }
1791
1792         pthread_mutex_unlock(&bo_exynos->mutex);
1793
1794         TBM_DBG("DMABUF_IOCTL_GET_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n",
1795             bo_exynos,
1796             bo_exynos->gem, bo_exynos->name,
1797             bo_exynos->dmabuf);
1798 #endif /* ALWAYS_BACKEND_CTRL */
1799
1800         return TBM_ERROR_NONE;
1801 }
1802
1803 static tbm_error_e
1804 tbm_exynos_bo_unlock(tbm_backend_bo_data *bo_data)
1805 {
1806 #ifndef ALWAYS_BACKEND_CTRL
1807         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1808         struct dma_buf_fence fence;
1809         struct flock filelock;
1810         unsigned int dma_type = 0;
1811         int ret = 0;
1812         char buf[STRERR_BUFSIZE];
1813
1814         bufmgr_exynos = bo_exynos->bufmgr_exynos;
1815         if (!bufmgr_exynos)
1816                 return TBM_ERROR_INVALID_PARAMETER;
1817
1818         if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1819                 dma_type = 1;
1820
1821         if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1822                 TBM_DBG("FENCE not support or ignored,\n");
1823                 return TBM_ERROR_INVALID_OPERATION;
1824         }
1825
1826         if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1827                 TBM_DBG("device type is not 3D/CPU,\n");
1828                 return TBM_ERROR_INVALID_OPERATION;
1829         }
1830
1831         pthread_mutex_lock(&bo_exynos->mutex);
1832
1833         if (dma_type) {
1834                 fence.type = bo_exynos->dma_fence[0].type;
1835                 fence.ctx = bo_exynos->dma_fence[0].ctx;
1836                 int i;
1837
1838                 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1839                         bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type;
1840                         bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx;
1841                 }
1842                 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1843                 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1844         }
1845         pthread_mutex_unlock(&bo_exynos->mutex);
1846
1847         if (dma_type) {
1848                 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1849                 if (ret < 0) {
1850                         TBM_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1851                         return TBM_ERROR_INVALID_OPERATION;
1852                 }
1853         } else {
1854                 filelock.l_type = F_UNLCK;
1855                 filelock.l_whence = SEEK_CUR;
1856                 filelock.l_start = 0;
1857                 filelock.l_len = 0;
1858
1859                 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1860                         return TBM_ERROR_INVALID_OPERATION;
1861         }
1862
1863         TBM_DBG("DMABUF_IOCTL_PUT_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n",
1864             bo_exynos,
1865             bo_exynos->gem, bo_exynos->name,
1866             bo_exynos->dmabuf);
1867 #endif /* ALWAYS_BACKEND_CTRL */
1868
1869         return TBM_ERROR_NONE;
1870 }
1871 static tbm_fd
1872 tbm_exynos_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1873 {
1874         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1875         int ret;
1876         char buf[STRERR_BUFSIZE];
1877
1878         if (!bo_exynos) {
1879                 if (error)
1880                         *error = TBM_ERROR_INVALID_PARAMETER;
1881                 return -1;
1882         }
1883
1884         struct drm_prime_handle arg = {0, };
1885
1886         arg.handle = bo_exynos->gem;
1887         ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1888         if (ret) {
1889                 TBM_ERR("bo_exynos:%p Cannot dmabuf=%d (%s)\n",
1890                                bo_exynos, bo_exynos->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
1891                 if (error)
1892                         *error = TBM_ERROR_INVALID_OPERATION;
1893                 return (tbm_fd) ret;
1894         }
1895
1896         TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1897             bo_exynos,
1898             bo_exynos->gem, bo_exynos->name,
1899             bo_exynos->dmabuf,
1900             arg.fd,
1901             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1902             bo_exynos->size);
1903
1904         if (error)
1905                 *error = TBM_ERROR_NONE;
1906
1907         return (tbm_fd)arg.fd;
1908 }
1909
1910 static tbm_key
1911 tbm_exynos_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1912 {
1913         tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1914
1915         if (!bo_exynos) {
1916                 if (error)
1917                         *error = TBM_ERROR_INVALID_PARAMETER;
1918                 return 0;
1919         }
1920
1921         if (!bo_exynos->name) {
1922                 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1923                 if (!bo_exynos->name) {
1924                         TBM_ERR("error Cannot get name\n");
1925                         if (error)
1926                                 *error = TBM_ERROR_INVALID_PARAMETER;
1927                         return 0;
1928                 }
1929         }
1930
1931         TBM_DBG("    bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1932             bo_exynos,
1933             bo_exynos->gem, bo_exynos->name,
1934             bo_exynos->dmabuf,
1935             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1936             bo_exynos->size);
1937
1938         if (error)
1939                 *error = TBM_ERROR_NONE;
1940
1941         return (tbm_key)bo_exynos->name;
1942 }
1943
1944 static void
1945 tbm_exynos_deinit(tbm_backend_bufmgr_data *bufmgr_data)
1946 {
1947         tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
1948         tbm_bufmgr bufmgr;
1949         tbm_error_e error;
1950         unsigned long key;
1951         void *value;
1952
1953         TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL);
1954
1955         bufmgr = bufmgr_exynos->bufmgr;
1956
1957         tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_exynos->bufmgr_func);
1958         tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_exynos->bo_func);
1959
1960         if (bufmgr_exynos->hashBos) {
1961                 while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) {
1962                         free(value);
1963                         drmHashDelete(bufmgr_exynos->hashBos, key);
1964                 }
1965
1966                 drmHashDestroy(bufmgr_exynos->hashBos);
1967                 bufmgr_exynos->hashBos = NULL;
1968         }
1969
1970         _bufmgr_deinit_cache_state(bufmgr_exynos);
1971
1972         if (bufmgr_exynos->bind_display)
1973                 tbm_drm_helper_wl_auth_server_deinit();
1974
1975         if (bufmgr_exynos->device_name)
1976                 free(bufmgr_exynos->device_name);
1977
1978         if (tbm_backend_bufmgr_query_display_server(bufmgr, &error))
1979                 tbm_drm_helper_unset_tbm_master_fd();
1980         else
1981                 tbm_drm_helper_unset_fd();
1982
1983         close(bufmgr_exynos->fd);
1984
1985         free(bufmgr_exynos);
1986 }
1987
1988 static tbm_backend_bufmgr_data *
1989 tbm_exynos_init(tbm_bufmgr bufmgr, tbm_error_e *error)
1990 {
1991         tbm_bufmgr_exynos bufmgr_exynos = NULL;
1992         tbm_backend_bufmgr_func *bufmgr_func = NULL;
1993         tbm_backend_bo_func *bo_func = NULL;
1994         int fp;
1995         tbm_error_e err;
1996         char *value = NULL;
1997
1998         if (!bufmgr) {
1999                 TBM_ERR("bufmgr is null.\n");
2000                 if (error)
2001                         *error = TBM_ERROR_INVALID_PARAMETER;
2002                 return NULL;
2003         }
2004
2005         bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos));
2006         if (!bufmgr_exynos) {
2007                 TBM_ERR("fail to alloc bufmgr_exynos!\n");
2008                 if (error)
2009                         *error = TBM_ERROR_OUT_OF_MEMORY;
2010                 return NULL;
2011         }
2012
2013         /* check the master_fd which already had opened */
2014         bufmgr_exynos->fd = tbm_drm_helper_get_master_fd();
2015         if (bufmgr_exynos->fd < 0) {
2016                 bufmgr_exynos->fd = _tbm_exynos_open_drm();
2017                 if (bufmgr_exynos->fd < 0) {
2018                         TBM_ERR("fail to open drm!\n");
2019                         if (error)
2020                                 *error = TBM_ERROR_INVALID_OPERATION;
2021                         goto fail_open_drm;
2022                 }
2023
2024                 if (drmIsMaster(bufmgr_exynos->fd)) {
2025                         tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd);
2026
2027                         bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2028                         if (!bufmgr_exynos->device_name) {
2029                                 TBM_ERR("fail to get device name!\n");
2030                                 tbm_drm_helper_unset_tbm_master_fd();
2031                                 if (error)
2032                                         *error = TBM_ERROR_INVALID_OPERATION;
2033                                 goto fail_get_device_name;
2034                         }
2035                         TBM_INFO("This is Master FD(%d) from open_drm.", bufmgr_exynos->fd);
2036                 } else {
2037                         /* close the fd and get the authenticated fd from the master fd */
2038                         close(bufmgr_exynos->fd);
2039 #ifdef USE_RENDER_NODE
2040                         bufmgr_exynos->fd = _get_render_node(0);
2041 #else
2042                         bufmgr_exynos->fd = -1;
2043 #endif
2044                         /* get the authenticated drm fd from the master fd */
2045                         if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) {
2046                                 TBM_ERR("fail to get auth drm info!\n");
2047                                 if (error)
2048                                         *error = TBM_ERROR_INVALID_OPERATION;
2049                                 goto fail_get_auth_info;
2050                         }
2051                         TBM_INFO("This is Authenticated FD(%d)", bufmgr_exynos->fd);
2052                 }
2053         } else {
2054                 bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2055                 if (!bufmgr_exynos->device_name) {
2056                         TBM_ERR("fail to get device name!\n");
2057                         tbm_drm_helper_unset_tbm_master_fd();
2058                         if (error)
2059                                 *error = TBM_ERROR_INVALID_OPERATION;
2060                         goto fail_get_device_name;
2061                 }
2062                 TBM_INFO("This is Master FD from tbm_drm_helper_get_master_fd(%d)", bufmgr_exynos->fd);
2063         }
2064         tbm_drm_helper_set_fd(bufmgr_exynos->fd);
2065
2066         //Check if the tbm manager supports dma fence or not.
2067         fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2068         if (fp != -1) {
2069                 char buf[1];
2070                 int length = read(fp, buf, 1);
2071
2072                 if (length == 1 && buf[0] == '1')
2073                         bufmgr_exynos->use_dma_fence = 1;
2074
2075                 close(fp);
2076         }
2077
2078         /* get the model name from the capi-system-info.
2079          * The alignment_plane and alignment_pitch_rgb is different accoring to the target.
2080          * There will be the stride issue when the right alignment_plane and alignment_pitch_rgb
2081          * is not set to the backend.
2082          */
2083         if (system_info_get_platform_string("http://tizen.org/system/model_name", &value) != SYSTEM_INFO_ERROR_NONE) {
2084                 TBM_ERR("Cannot get the \"http://tizen.org/system/model_name\" key value from system-info.\n");
2085                 TBM_ERR("May not set the right value on libtbm-exynos backend.\n");
2086         } else {
2087                 if (!strncmp(value, "TW1", 4)) {
2088                         g_tbm_surface_alignment_plane = 8;
2089                         g_tbm_surface_alignment_pitch_rgb = 8;
2090                         g_enable_cache_ctrl = 1;
2091                 } else {
2092                         g_tbm_surface_alignment_plane = 64;
2093                         g_tbm_surface_alignment_pitch_rgb = 64;
2094                 }
2095         }
2096
2097         free(value);
2098
2099         if (!_bufmgr_init_cache_state(bufmgr_exynos)) {
2100                 TBM_ERR("fail to init bufmgr cache state\n");
2101                 if (error)
2102                         *error = TBM_ERROR_INVALID_OPERATION;
2103                 goto fail_init_cache_state;
2104         }
2105
2106         /*Create Hash Table*/
2107         bufmgr_exynos->hashBos = drmHashCreate();
2108
2109         /* alloc and register bufmgr_funcs */
2110         bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err);
2111         if (!bufmgr_func) {
2112                 TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err);
2113                 if (error)
2114                         *error = TBM_ERROR_OUT_OF_MEMORY;
2115                 goto fail_alloc_bufmgr_func;
2116         }
2117
2118         bufmgr_func->bufmgr_get_capabilities = tbm_exynos_bufmgr_get_capabilities;
2119         //if (tbm_backend_bufmgr_query_display_server(bufmgr, &err) && !_check_render_node())
2120         bufmgr_func->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display;
2121         bufmgr_func->bufmgr_get_supported_formats = tbm_exynos_bufmgr_get_supported_formats;
2122         bufmgr_func->bufmgr_get_plane_data = tbm_exynos_bufmgr_get_plane_data;
2123         bufmgr_func->bufmgr_alloc_bo = tbm_exynos_bufmgr_alloc_bo;
2124         bufmgr_func->bufmgr_alloc_bo_with_format = NULL;
2125         bufmgr_func->bufmgr_import_fd = tbm_exynos_bufmgr_import_fd;
2126         bufmgr_func->bufmgr_import_key = tbm_exynos_bufmgr_import_key;
2127
2128         err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func);
2129         if (err != TBM_ERROR_NONE) {
2130                 TBM_ERR("fail to register bufmgr_func! err(%d)\n", err);
2131                 if (error)
2132                         *error = TBM_ERROR_INVALID_OPERATION;
2133                 goto fail_register_bufmgr_func;
2134         }
2135         bufmgr_exynos->bufmgr_func = bufmgr_func;
2136
2137         /* alloc and register bo_funcs */
2138         bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err);
2139         if (!bo_func) {
2140                 TBM_ERR("fail to alloc bo_func! err(%d)\n", err);
2141                 if (error)
2142                         *error = TBM_ERROR_OUT_OF_MEMORY;
2143                 goto fail_alloc_bo_func;
2144         }
2145
2146         bo_func->bo_free = tbm_exynos_bo_free;
2147         bo_func->bo_get_size = tbm_exynos_bo_get_size;
2148         bo_func->bo_get_memory_types = tbm_exynos_bo_get_memory_type;
2149         bo_func->bo_get_handle = tbm_exynos_bo_get_handle;
2150         bo_func->bo_map = tbm_exynos_bo_map;
2151         bo_func->bo_unmap = tbm_exynos_bo_unmap;
2152         bo_func->bo_lock = tbm_exynos_bo_lock;
2153         bo_func->bo_unlock = tbm_exynos_bo_unlock;
2154         bo_func->bo_export_fd = tbm_exynos_bo_export_fd;
2155         bo_func->bo_export_key = tbm_exynos_bo_export_key;
2156
2157         err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func);
2158         if (err != TBM_ERROR_NONE) {
2159                 TBM_ERR("fail to register bo_func! err(%d)\n", err);
2160                 if (error)
2161                         *error = TBM_ERROR_INVALID_OPERATION;
2162                 goto fail_register_bo_func;
2163         }
2164         bufmgr_exynos->bo_func = bo_func;
2165
2166         TBM_DBG("drm_fd:%d\n", bufmgr_exynos->fd);
2167
2168         if (error)
2169                 *error = TBM_ERROR_NONE;
2170
2171         bufmgr_exynos->bufmgr = bufmgr;
2172
2173         return (tbm_backend_bufmgr_data *)bufmgr_exynos;
2174
2175 fail_register_bo_func:
2176         tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func);
2177 fail_alloc_bo_func:
2178 fail_register_bufmgr_func:
2179         tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func);
2180 fail_alloc_bufmgr_func:
2181         _bufmgr_deinit_cache_state(bufmgr_exynos);
2182         if (bufmgr_exynos->hashBos)
2183                 drmHashDestroy(bufmgr_exynos->hashBos);
2184 fail_init_cache_state:
2185         if (tbm_drm_helper_get_master_fd() >= 0)
2186                 tbm_drm_helper_unset_tbm_master_fd();
2187         else
2188                 tbm_drm_helper_unset_fd();
2189 fail_get_device_name:
2190         if (bufmgr_exynos->fd >= 0)
2191                 close(bufmgr_exynos->fd);
2192 fail_get_auth_info:
2193 fail_open_drm:
2194         free(bufmgr_exynos);
2195         return NULL;
2196 }
2197
2198 tbm_backend_module tbm_backend_module_data = {
2199         "exynos",
2200         "Samsung",
2201         TBM_BACKEND_ABI_VERSION_3_0,
2202         tbm_exynos_init,
2203         tbm_exynos_deinit
2204 };