use g_enable_cache_ctrl for cache control
[platform/adaptation/samsung_exynos/libtbm-exynos.git] / src / tbm_bufmgr_exynos.c
1 /**************************************************************************
2
3 libtbm_exynos
4
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
19 of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 #ifdef HAVE_CONFIG_H
32 #include "config.h"
33 #endif
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <sys/ioctl.h>
40 #include <sys/types.h>
41 #include <unistd.h>
42 #include <sys/mman.h>
43 #include <sys/stat.h>
44 #include <fcntl.h>
45 #include <errno.h>
46 #include <xf86drm.h>
47 #include <tbm_bufmgr.h>
48 #include <tbm_bufmgr_backend.h>
49 #include <exynos_drm.h>
50 #include <pthread.h>
51 #include <tbm_surface.h>
52 #include <tbm_surface_internal.h>
53 #include <tbm_drm_helper.h>
54
55 #include <libudev.h>
56
57 #include <system_info.h>
58
59 #include "tbm_bufmgr_tgl.h"
60
61 #define DEBUG
62 #define USE_DMAIMPORT
63 #define TBM_COLOR_FORMAT_COUNT 4
64
65 #define EXYNOS_DRM_NAME "exynos"
66
67 #ifdef DEBUG
68 #define LOG_TAG "TBM_BACKEND"
69 #include <dlog.h>
70 static int bDebug;
71
72 char *target_name()
73 {
74         FILE *f;
75         char *slash;
76         static int initialized = 0;
77         static char app_name[128];
78
79         if (initialized)
80                 return app_name;
81
82         /* get the application name */
83         f = fopen("/proc/self/cmdline", "r");
84
85         if (!f)
86                 return 0;
87
88         memset(app_name, 0x00, sizeof(app_name));
89
90         if (fgets(app_name, 100, f) == NULL) {
91                 fclose(f);
92                 return 0;
93         }
94
95         fclose(f);
96
97         slash = strrchr(app_name, '/');
98         if (slash != NULL)
99                 memmove(app_name, slash + 1, strlen(slash));
100
101         initialized = 1;
102
103         return app_name;
104 }
105
106 #define TBM_EXYNOS_ERROR(fmt, args...)  LOGE("\033[31m"  "[%s] " fmt "\033[0m", target_name(), ##args)
107 #define TBM_EXYNOS_DEBUG(fmt, args...)  {if (bDebug&01) LOGD("[%s] " fmt, target_name(), ##args); }
108 #else
109 #define TBM_EXYNOS_ERROR(...)
110 #define TBM_EXYNOS_DEBUG(...)
111 #endif
112
113 #define STRERR_BUFSIZE 128
114
115 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
116 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
117 #define MAX(a, b) ((a) > (b) ? (a) : (b))
118
119 static unsigned int g_tbm_surface_alignment_plane;
120 static unsigned int g_tbm_surface_alignment_pitch_rgb;
121
122 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
123 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
124
125 #define SZ_1M                                   0x00100000
126 #define S5P_FIMV_MAX_FRAME_SIZE                 (2 * SZ_1M)
127 #define S5P_FIMV_D_ALIGN_PLANE_SIZE             64
128 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW           16
129 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL           16
130 #define S5P_FIMV_DEC_BUF_ALIGN                  (8 * 1024)
131 #define S5P_FIMV_NV12MT_HALIGN                  128
132 #define S5P_FIMV_NV12MT_VALIGN                  64
133
134 /* check condition */
135 #define EXYNOS_RETURN_IF_FAIL(cond) {\
136         if (!(cond)) {\
137                 TBM_EXYNOS_ERROR("[%s] : '%s' failed.\n", __func__, #cond);\
138                 return;\
139         } \
140 }
141
142 #define EXYNOS_RETURN_VAL_IF_FAIL(cond, val) {\
143         if (!(cond)) {\
144                 TBM_EXYNOS_ERROR("[%s] : '%s' failed.\n", __func__, #cond);\
145                 return val;\
146         } \
147 }
148
149 /* cache control at backend */
150 static unsigned int g_enable_cache_ctrl = 0;
151
152 struct dma_buf_info {
153         unsigned long   size;
154         unsigned int    fence_supported;
155         unsigned int    padding;
156 };
157
158 #define DMA_BUF_ACCESS_READ             0x1
159 #define DMA_BUF_ACCESS_WRITE            0x2
160 #define DMA_BUF_ACCESS_DMA              0x4
161 #define DMA_BUF_ACCESS_MAX              0x8
162
163 #define DMA_FENCE_LIST_MAX              5
164
165 struct dma_buf_fence {
166         unsigned long           ctx;
167         unsigned int            type;
168 };
169
170 #define DMABUF_IOCTL_BASE       'F'
171 #define DMABUF_IOWR(nr, type)   _IOWR(DMABUF_IOCTL_BASE, nr, type)
172
173 #define DMABUF_IOCTL_GET_INFO   DMABUF_IOWR(0x00, struct dma_buf_info)
174 #define DMABUF_IOCTL_GET_FENCE  DMABUF_IOWR(0x01, struct dma_buf_fence)
175 #define DMABUF_IOCTL_PUT_FENCE  DMABUF_IOWR(0x02, struct dma_buf_fence)
176
177 /* tgl key values */
178 #define GLOBAL_KEY   ((unsigned int)(-1))
179 /* TBM_CACHE */
180 #define TBM_EXYNOS_CACHE_INV       0x01 /**< cache invalidate  */
181 #define TBM_EXYNOS_CACHE_CLN       0x02 /**< cache clean */
182 #define TBM_EXYNOS_CACHE_ALL       0x10 /**< cache all */
183 #define TBM_EXYNOS_CACHE_FLUSH     (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush  */
184 #define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL)        /**< cache flush all */
185
186 enum {
187         DEVICE_NONE = 0,
188         DEVICE_CA,                                      /* cache aware device */
189         DEVICE_CO                                       /* cache oblivious device */
190 };
191
192 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
193
194 union _tbm_bo_cache_state {
195         unsigned int val;
196         struct {
197                 unsigned int cntFlush:16;       /*Flush all index for sync */
198                 unsigned int isCached:1;
199                 unsigned int isDirtied:2;
200         } data;
201 };
202
203 typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
204 typedef struct _tbm_bo_exynos *tbm_bo_exynos;
205
206 /* tbm buffor object for exynos */
207 struct _tbm_bo_exynos {
208         int fd;
209
210         unsigned int name;    /* FLINK ID */
211
212         unsigned int gem;     /* GEM Handle */
213
214         unsigned int dmabuf;  /* fd for dmabuf */
215
216         void *pBase;          /* virtual address */
217
218         unsigned int size;
219
220         unsigned int flags_exynos;
221         unsigned int flags_tbm;
222
223         pthread_mutex_t mutex;
224         struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
225         int device;
226         int opt;
227
228         tbm_bo_cache_state cache_state;
229         unsigned int map_cnt;
230         int last_map_device;
231 };
232
233 /* tbm bufmgr private for exynos */
234 struct _tbm_bufmgr_exynos {
235         int fd;
236         int isLocal;
237         void *hashBos;
238
239         int use_dma_fence;
240
241         int tgl_fd;
242
243         char *device_name;
244         void *bind_display;
245 };
246
247 char *STR_DEVICE[] = {
248         "DEF",
249         "CPU",
250         "2D",
251         "3D",
252         "MM"
253 };
254
255 char *STR_OPT[] = {
256         "NONE",
257         "RD",
258         "WR",
259         "RDWR"
260 };
261
262
263 uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
264                                                                                 TBM_FORMAT_ARGB8888,
265                                                                                 TBM_FORMAT_XRGB8888,
266                                                                                 TBM_FORMAT_NV12,
267                                                                                 TBM_FORMAT_YUV420
268                                                                         };
269
270 #ifdef TGL_GET_VERSION
271 static inline int
272 _tgl_get_version(int fd)
273 {
274         struct tgl_ver_data data;
275         int err;
276         char buf[STRERR_BUFSIZE];
277
278         err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
279         if (err) {
280                 TBM_EXYNOS_ERROR("error(%s) %s:%d\n", strerror_r(errno, buf, STRERR_BUFSIZE));
281                 return 0;
282         }
283
284         TBM_EXYNOS_DEBUG("tgl version is (%u, %u).\n", data.major, data.minor);
285
286         return 1;
287 }
288 #endif
289
290 static inline int
291 _tgl_init(int fd, unsigned int key)
292 {
293         struct tgl_reg_data data;
294         int err;
295         char buf[STRERR_BUFSIZE];
296
297         data.key = key;
298         data.timeout_ms = 1000;
299
300         err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
301         if (err) {
302                 TBM_EXYNOS_ERROR("error(%s) key:%d\n", strerror_r(errno, buf, STRERR_BUFSIZE), key);
303                 return 0;
304         }
305
306         return 1;
307 }
308
309 static inline int
310 _tgl_destroy(int fd, unsigned int key)
311 {
312         struct tgl_reg_data data;
313         int err;
314         char buf[STRERR_BUFSIZE];
315
316         data.key = key;
317         err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
318         if (err) {
319                 TBM_EXYNOS_ERROR("error(%s) key:%d\n", strerror_r(errno, buf, STRERR_BUFSIZE), key);
320                 return 0;
321         }
322
323         return 1;
324 }
325
326 static inline int
327 _tgl_lock(int fd, unsigned int key, int opt)
328 {
329         struct tgl_lock_data data;
330         enum tgl_type_data tgl_type;
331         int err;
332         char buf[STRERR_BUFSIZE];
333
334         switch (opt) {
335         case TBM_OPTION_READ:
336                 tgl_type = TGL_TYPE_READ;
337                 break;
338         case TBM_OPTION_WRITE:
339                 tgl_type = TGL_TYPE_WRITE;
340                 break;
341         default:
342                 tgl_type = TGL_TYPE_NONE;
343                 break;
344         }
345
346         data.key = key;
347         data.type = tgl_type;
348
349         err = ioctl(fd, TGL_IOCTL_LOCK, &data);
350         if (err) {
351                 TBM_EXYNOS_ERROR("error(%s) key:%d opt:%d\n",
352                         strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
353                 return 0;
354         }
355
356         return 1;
357 }
358
359 static inline int
360 _tgl_unlock(int fd, unsigned int key)
361 {
362         struct tgl_lock_data data;
363         int err;
364         char buf[STRERR_BUFSIZE];
365
366         data.key = key;
367         data.type = TGL_TYPE_NONE;
368
369         err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
370         if (err) {
371                 TBM_EXYNOS_ERROR("error(%s) key:%d\n",
372                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
373                 return 0;
374         }
375
376         return 1;
377 }
378
379 static inline int
380 _tgl_set_data(int fd, unsigned int key, unsigned int val)
381 {
382         struct tgl_usr_data data;
383         int err;
384         char buf[STRERR_BUFSIZE];
385
386         data.key = key;
387         data.data1 = val;
388
389         err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
390         if (err) {
391                 TBM_EXYNOS_ERROR("error(%s) key:%d\n",
392                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
393                 return 0;
394         }
395
396         return 1;
397 }
398
399 static inline unsigned int
400 _tgl_get_data(int fd, unsigned int key)
401 {
402         struct tgl_usr_data data = { 0, };
403         int err;
404         char buf[STRERR_BUFSIZE];
405
406         data.key = key;
407
408         err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
409         if (err) {
410                 TBM_EXYNOS_ERROR("error(%s) key:%d\n",
411                         strerror_r(errno, buf, STRERR_BUFSIZE), key);
412                 return 0;
413         }
414
415         return data.data1;
416 }
417
418 static int
419 _exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags)
420 {
421         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
422
423         /* cache flush is managed by kernel side when using dma-fence. */
424         if (bufmgr_exynos->use_dma_fence)
425                 return 1;
426
427         struct drm_exynos_gem_cache_op cache_op = {0, };
428         int ret;
429
430         /* if bo_exynos is null, do cache_flush_all */
431         if (bo_exynos) {
432                 cache_op.flags = 0;
433                 cache_op.usr_addr = (uint64_t)((uint32_t)bo_exynos->pBase);
434                 cache_op.size = bo_exynos->size;
435         } else {
436                 flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
437                 cache_op.flags = 0;
438                 cache_op.usr_addr = 0;
439                 cache_op.size = 0;
440         }
441
442         if (flags & TBM_EXYNOS_CACHE_INV) {
443                 if (flags & TBM_EXYNOS_CACHE_ALL)
444                         cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
445                 else
446                         cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
447         }
448
449         if (flags & TBM_EXYNOS_CACHE_CLN) {
450                 if (flags & TBM_EXYNOS_CACHE_ALL)
451                         cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
452                 else
453                         cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
454         }
455
456         if (flags & TBM_EXYNOS_CACHE_ALL)
457                 cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
458
459         ret = drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
460                                   sizeof(cache_op));
461         if (ret) {
462                 TBM_EXYNOS_ERROR("fail to flush the cache.\n");
463                 return 0;
464         }
465
466         return 1;
467 }
468
469 static int
470 _bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import)
471 {
472     /* check whether cache control do or not */
473         if (!g_enable_cache_ctrl)
474                 return 1;
475
476         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
477         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
478
479         if (bufmgr_exynos->use_dma_fence)
480                 return 1;
481
482         _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
483
484         tbm_bo_cache_state cache_state;
485
486         if (import == 0) {
487                 cache_state.data.isDirtied = DEVICE_NONE;
488                 cache_state.data.isCached = 0;
489                 cache_state.data.cntFlush = 0;
490
491                 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
492         }
493
494         return 1;
495 }
496
497 static int
498 _bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt)
499 {
500         /* check whether cache control do or not */
501         if (!g_enable_cache_ctrl)
502                 return 1;
503
504         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
505         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
506
507         if (bufmgr_exynos->use_dma_fence)
508                 return 1;
509
510         char need_flush = 0;
511         unsigned short cntFlush = 0;
512
513         if (!(bo_exynos->flags_exynos & EXYNOS_BO_CACHABLE))
514                 return 1;
515
516         /* get cache state of a bo */
517         bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd,
518                                      bo_exynos->name);
519
520         /* get global cache flush count */
521         cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
522
523         if (device == TBM_DEVICE_CPU) {
524                 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
525                     bo_exynos->cache_state.data.isCached)
526                         need_flush = TBM_EXYNOS_CACHE_INV;
527
528                 bo_exynos->cache_state.data.isCached = 1;
529                 if (opt & TBM_OPTION_WRITE)
530                         bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
531                 else {
532                         if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
533                                 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
534                 }
535         } else {
536                 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
537                     bo_exynos->cache_state.data.isCached &&
538                     bo_exynos->cache_state.data.cntFlush == cntFlush)
539                         need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
540
541                 if (opt & TBM_OPTION_WRITE)
542                         bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
543                 else {
544                         if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
545                                 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
546                 }
547         }
548
549         if (need_flush) {
550                 if (need_flush & TBM_EXYNOS_CACHE_ALL)
551                         _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
552
553                 /* call cache flush */
554                 _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush);
555
556                 TBM_EXYNOS_DEBUG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
557                     bo_exynos->cache_state.data.isCached,
558                     bo_exynos->cache_state.data.isDirtied,
559                     need_flush,
560                     cntFlush);
561         }
562
563         return 1;
564 }
565
566 static int
567 _bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
568 {
569     /* check whether cache control do or not */
570         if (!g_enable_cache_ctrl)
571                 return 1;
572
573         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
574         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
575
576         if (bufmgr_exynos->use_dma_fence)
577                 return 1;
578
579         unsigned short cntFlush = 0;
580
581         /* get global cache flush count */
582         cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
583
584         /* save global cache flush count */
585         bo_exynos->cache_state.data.cntFlush = cntFlush;
586         _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name,
587                       bo_exynos->cache_state.val);
588
589         return 1;
590 }
591
592 static void
593 _bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
594 {
595     /* check whether cache control do or not */
596         if (!g_enable_cache_ctrl)
597                 return;
598
599         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
600         EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
601
602         if (bufmgr_exynos->use_dma_fence)
603                 return ;
604
605         _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
606 }
607
608 static int
609 _bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
610 {
611     /* check whether cache control do or not */
612         if (!g_enable_cache_ctrl)
613                 return 1;
614
615         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
616
617         if (bufmgr_exynos->use_dma_fence)
618                 return 1;
619
620         /* open tgl fd for saving cache flush data */
621         bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
622
623         if (bufmgr_exynos->tgl_fd < 0) {
624             bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
625             if (bufmgr_exynos->tgl_fd < 0) {
626                     TBM_EXYNOS_ERROR("fail to open global_lock:%s\n",
627                                         tgl_devfile1);
628                     return 0;
629             }
630         }
631
632 #ifdef TGL_GET_VERSION
633         if (!_tgl_get_version(bufmgr_exynos->tgl_fd)) {
634                 TBM_EXYNOS_ERROR("fail to get tgl_version. tgl init failed.\n");
635                 close(bufmgr_sprd->tgl_fd);
636                 return 0;
637         }
638 #endif
639
640         if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
641                 TBM_EXYNOS_ERROR("fail to initialize the tgl\n");
642                 close(bufmgr_exynos->tgl_fd);
643                 return 0;
644         }
645
646         return 1;
647 }
648
649 static void
650 _bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
651 {
652     /* check whether cache control do or not */
653         if (!g_enable_cache_ctrl)
654                 return;
655
656         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
657
658         if (bufmgr_exynos->use_dma_fence)
659                 return;
660
661         if (bufmgr_exynos->tgl_fd >= 0)
662                 close(bufmgr_exynos->tgl_fd);
663 }
664
665 static int
666 _tbm_exynos_open_drm()
667 {
668         int fd = -1;
669
670         fd = drmOpen(EXYNOS_DRM_NAME, NULL);
671         if (fd < 0) {
672                 TBM_EXYNOS_ERROR("fail to open drm.(%s)\n", EXYNOS_DRM_NAME);
673         }
674
675         if (fd < 0) {
676                 struct udev *udev = NULL;
677                 struct udev_enumerate *e = NULL;
678                 struct udev_list_entry *entry = NULL;
679                 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
680                 const char *filepath;
681                 struct stat s;
682                 int ret;
683
684                 TBM_EXYNOS_DEBUG("search drm-device by udev\n");
685
686                 udev = udev_new();
687                 if (!udev) {
688                         TBM_EXYNOS_ERROR("udev_new() failed.\n");
689                         return -1;
690                 }
691
692                 e = udev_enumerate_new(udev);
693                 udev_enumerate_add_match_subsystem(e, "drm");
694                 udev_enumerate_add_match_sysname(e, "card[0-9]*");
695                 udev_enumerate_scan_devices(e);
696
697                 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
698                         device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
699                                                               udev_list_entry_get_name(entry));
700                         device_parent = udev_device_get_parent(device);
701                         /* Not need unref device_parent. device_parent and device have same refcnt */
702                         if (device_parent) {
703                                 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
704                                         drm_device = device;
705                                         TBM_EXYNOS_DEBUG("[%s] Found render device: '%s' (%s)\n",
706                                             target_name(),
707                                             udev_device_get_syspath(drm_device),
708                                             udev_device_get_sysname(device_parent));
709                                         break;
710                                 }
711                         }
712                         udev_device_unref(device);
713                 }
714
715                 udev_enumerate_unref(e);
716
717                 /* Get device file path. */
718                 filepath = udev_device_get_devnode(drm_device);
719                 if (!filepath) {
720                         TBM_EXYNOS_ERROR("udev_device_get_devnode() failed.\n");
721                         udev_device_unref(drm_device);
722                         udev_unref(udev);
723                         return -1;
724                 }
725
726                 /* Open DRM device file and check validity. */
727                 fd = open(filepath, O_RDWR | O_CLOEXEC);
728                 if (fd < 0) {
729                         TBM_EXYNOS_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
730                         udev_device_unref(drm_device);
731                         udev_unref(udev);
732                         return -1;
733                 }
734
735                 ret = fstat(fd, &s);
736                 if (ret) {
737                         TBM_EXYNOS_ERROR("fstat() failed %s.\n");
738                         close(fd);
739                         udev_device_unref(drm_device);
740                         udev_unref(udev);
741                         return -1;
742                 }
743
744                 udev_device_unref(drm_device);
745                 udev_unref(udev);
746         }
747
748         return fd;
749 }
750
751 static int
752 _check_render_node(void)
753 {
754         struct udev *udev = NULL;
755         struct udev_enumerate *e = NULL;
756         struct udev_list_entry *entry = NULL;
757         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
758
759 #ifndef USE_RENDER_NODE
760         return 0;
761 #endif
762
763         udev = udev_new();
764         if (!udev) {
765                 TBM_EXYNOS_ERROR("udev_new() failed.\n");
766                 return -1;
767         }
768
769         e = udev_enumerate_new(udev);
770         udev_enumerate_add_match_subsystem(e, "drm");
771         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
772         udev_enumerate_scan_devices(e);
773
774         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
775                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
776                                                       udev_list_entry_get_name(entry));
777                 device_parent = udev_device_get_parent(device);
778                 /* Not need unref device_parent. device_parent and device have same refcnt */
779                 if (device_parent) {
780                         if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
781                                 drm_device = device;
782                                 TBM_EXYNOS_DEBUG("Found render device: '%s' (%s)\n",
783                                     udev_device_get_syspath(drm_device),
784                                     udev_device_get_sysname(device_parent));
785                                 break;
786                         }
787                 }
788                 udev_device_unref(device);
789         }
790
791         udev_enumerate_unref(e);
792         udev_unref(udev);
793
794         if (!drm_device) {
795                 udev_device_unref(drm_device);
796                 return 0;
797         }
798
799         udev_device_unref(drm_device);
800         return 1;
801 }
802
803 static int
804 _get_render_node(void)
805 {
806         struct udev *udev = NULL;
807         struct udev_enumerate *e = NULL;
808         struct udev_list_entry *entry = NULL;
809         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
810         const char *filepath;
811         struct stat s;
812         int fd = -1;
813         int ret;
814
815         udev = udev_new();
816         if (!udev) {
817                 TBM_EXYNOS_ERROR("udev_new() failed.\n");
818                 return -1;
819         }
820
821         e = udev_enumerate_new(udev);
822         udev_enumerate_add_match_subsystem(e, "drm");
823         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
824         udev_enumerate_scan_devices(e);
825
826         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
827                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
828                                                       udev_list_entry_get_name(entry));
829                 device_parent = udev_device_get_parent(device);
830                 /* Not need unref device_parent. device_parent and device have same refcnt */
831                 if (device_parent) {
832                         if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
833                                 drm_device = device;
834                                 TBM_EXYNOS_DEBUG("Found render device: '%s' (%s)\n",
835                                     udev_device_get_syspath(drm_device),
836                                     udev_device_get_sysname(device_parent));
837                                 break;
838                         }
839                 }
840                 udev_device_unref(device);
841         }
842
843         udev_enumerate_unref(e);
844
845         /* Get device file path. */
846         filepath = udev_device_get_devnode(drm_device);
847         if (!filepath) {
848                 TBM_EXYNOS_ERROR("udev_device_get_devnode() failed.\n");
849                 udev_device_unref(drm_device);
850                 udev_unref(udev);
851                 return -1;
852         }
853
854         /* Open DRM device file and check validity. */
855         fd = open(filepath, O_RDWR | O_CLOEXEC);
856         if (fd < 0) {
857                 TBM_EXYNOS_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
858                 udev_device_unref(drm_device);
859                 udev_unref(udev);
860                 return -1;
861         }
862
863         ret = fstat(fd, &s);
864         if (ret) {
865                 TBM_EXYNOS_ERROR("fstat() failed %s.\n");
866                 udev_device_unref(drm_device);
867                 udev_unref(udev);
868                 close(fd);
869                 return -1;
870         }
871
872         udev_device_unref(drm_device);
873         udev_unref(udev);
874
875         return fd;
876 }
877
878 static unsigned int
879 _get_exynos_flag_from_tbm(unsigned int ftbm)
880 {
881         unsigned int flags = 0;
882
883         if (ftbm & TBM_BO_SCANOUT)
884                 flags |= EXYNOS_BO_CONTIG;
885         else
886                 flags |= EXYNOS_BO_NONCONTIG;
887
888         if (ftbm & TBM_BO_WC)
889                 flags |= EXYNOS_BO_WC;
890         else if (ftbm & TBM_BO_NONCACHABLE)
891                 flags |= EXYNOS_BO_NONCACHABLE;
892         else
893                 flags |= EXYNOS_BO_CACHABLE;
894
895         return flags;
896 }
897
898 static unsigned int
899 _get_tbm_flag_from_exynos(unsigned int fexynos)
900 {
901         unsigned int flags = 0;
902
903         if (fexynos & EXYNOS_BO_NONCONTIG)
904                 flags |= TBM_BO_DEFAULT;
905         else
906                 flags |= TBM_BO_SCANOUT;
907
908         if (fexynos & EXYNOS_BO_WC)
909                 flags |= TBM_BO_WC;
910         else if (fexynos & EXYNOS_BO_CACHABLE)
911                 flags |= TBM_BO_DEFAULT;
912         else
913                 flags |= TBM_BO_NONCACHABLE;
914
915         return flags;
916 }
917
918 static unsigned int
919 _get_name(int fd, unsigned int gem)
920 {
921         struct drm_gem_flink arg = {0,};
922
923         arg.handle = gem;
924         if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
925                 TBM_EXYNOS_ERROR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
926                 return 0;
927         }
928
929         return (unsigned int)arg.name;
930 }
931
932 static tbm_bo_handle
933 _exynos_bo_handle(tbm_bo_exynos bo_exynos, int device)
934 {
935         tbm_bo_handle bo_handle;
936
937         memset(&bo_handle, 0x0, sizeof(uint64_t));
938
939         switch (device) {
940         case TBM_DEVICE_DEFAULT:
941         case TBM_DEVICE_2D:
942                 bo_handle.u32 = (uint32_t)bo_exynos->gem;
943                 break;
944         case TBM_DEVICE_CPU:
945                 if (!bo_exynos->pBase) {
946                         struct drm_exynos_gem_map arg = {0,};
947                         void *map = NULL;
948
949                         arg.handle = bo_exynos->gem;
950                         if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg,
951                                                 sizeof(arg))) {
952                                 TBM_EXYNOS_ERROR("Cannot map_dumb gem=%d\n", bo_exynos->gem);
953                                 return (tbm_bo_handle) NULL;
954                         }
955
956                         map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED,
957                                    bo_exynos->fd, arg.offset);
958                         if (map == MAP_FAILED) {
959                                 TBM_EXYNOS_ERROR("Cannot usrptr gem=%d\n", bo_exynos->gem);
960                                 return (tbm_bo_handle) NULL;
961                         }
962                         bo_exynos->pBase = map;
963                 }
964                 bo_handle.ptr = (void *)bo_exynos->pBase;
965                 break;
966         case TBM_DEVICE_3D:
967 #ifdef USE_DMAIMPORT
968                 if (bo_exynos->dmabuf) {
969                         bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
970                         break;
971                 }
972
973                 if (!bo_exynos->dmabuf) {
974                         struct drm_prime_handle arg = {0, };
975
976                         arg.handle = bo_exynos->gem;
977                         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
978                                 TBM_EXYNOS_ERROR("Cannot dmabuf=%d\n", bo_exynos->gem);
979                                 return (tbm_bo_handle) NULL;
980                         }
981                         bo_exynos->dmabuf = arg.fd;
982                 }
983
984                 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
985 #endif
986                 break;
987         case TBM_DEVICE_MM:
988                 if (!bo_exynos->dmabuf) {
989                         struct drm_prime_handle arg = {0, };
990
991                         arg.handle = bo_exynos->gem;
992                         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
993                                 TBM_EXYNOS_ERROR("Cannot dmabuf=%d\n", bo_exynos->gem);
994                                 return (tbm_bo_handle) NULL;
995                         }
996                         bo_exynos->dmabuf = arg.fd;
997                 }
998
999                 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
1000                 break;
1001         default:
1002                 TBM_EXYNOS_ERROR("Not supported device:%d\n", device);
1003                 bo_handle.ptr = (void *) NULL;
1004                 break;
1005         }
1006
1007         return bo_handle;
1008 }
1009
1010 static int
1011 tbm_exynos_bo_size(tbm_bo bo)
1012 {
1013         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1014
1015         tbm_bo_exynos bo_exynos;
1016
1017         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1018         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1019
1020         return bo_exynos->size;
1021 }
1022
1023 static void *
1024 tbm_exynos_bo_alloc(tbm_bo bo, int size, int flags)
1025 {
1026         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1027
1028         tbm_bo_exynos bo_exynos;
1029         tbm_bufmgr_exynos bufmgr_exynos;
1030         unsigned int exynos_flags;
1031
1032         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1033         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1034
1035         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1036         if (!bo_exynos) {
1037                 TBM_EXYNOS_ERROR("fail to allocate the bo private\n");
1038                 return 0;
1039         }
1040
1041         exynos_flags = _get_exynos_flag_from_tbm(flags);
1042         if ((flags & TBM_BO_SCANOUT) &&
1043             size <= 4 * 1024) {
1044                 exynos_flags |= EXYNOS_BO_NONCONTIG;
1045         }
1046
1047         struct drm_exynos_gem_create arg = {0, };
1048
1049         arg.size = (uint64_t)size;
1050         arg.flags = exynos_flags;
1051         if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg,
1052                                 sizeof(arg))) {
1053                 TBM_EXYNOS_ERROR("Cannot create bo(flag:%x, size:%d)\n", arg.flags,
1054                                (unsigned int)arg.size);
1055                 free(bo_exynos);
1056                 return 0;
1057         }
1058
1059         bo_exynos->fd = bufmgr_exynos->fd;
1060         bo_exynos->gem = arg.handle;
1061         bo_exynos->size = size;
1062         bo_exynos->flags_tbm = flags;
1063         bo_exynos->flags_exynos = exynos_flags;
1064         bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1065
1066         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) {
1067                 TBM_EXYNOS_ERROR("fail init cache state(%d)\n", bo_exynos->name);
1068                 free(bo_exynos);
1069                 return 0;
1070         }
1071
1072         pthread_mutex_init(&bo_exynos->mutex, NULL);
1073
1074         if (bufmgr_exynos->use_dma_fence
1075             && !bo_exynos->dmabuf) {
1076                 struct drm_prime_handle arg = {0, };
1077
1078                 arg.handle = bo_exynos->gem;
1079                 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1080                         TBM_EXYNOS_ERROR("Cannot dmabuf=%d\n", bo_exynos->gem);
1081                         free(bo_exynos);
1082                         return 0;
1083                 }
1084                 bo_exynos->dmabuf = arg.fd;
1085         }
1086
1087         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1088                           (void *)bo_exynos) < 0) {
1089                 TBM_EXYNOS_ERROR("Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1090         }
1091
1092         TBM_EXYNOS_DEBUG("     bo:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
1093             bo,
1094             bo_exynos->gem, bo_exynos->name,
1095             flags, exynos_flags,
1096             bo_exynos->size);
1097
1098         return (void *)bo_exynos;
1099 }
1100
1101 static void
1102 tbm_exynos_bo_free(tbm_bo bo)
1103 {
1104         tbm_bo_exynos bo_exynos;
1105         tbm_bufmgr_exynos bufmgr_exynos;
1106         char buf[STRERR_BUFSIZE];
1107
1108         if (!bo)
1109                 return;
1110
1111         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1112         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
1113
1114         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1115         EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
1116
1117         TBM_EXYNOS_DEBUG("      bo:%p, gem:%d(%d), fd:%d, size:%d\n",
1118             bo,
1119             bo_exynos->gem, bo_exynos->name,
1120             bo_exynos->dmabuf,
1121             bo_exynos->size);
1122
1123         if (bo_exynos->pBase) {
1124                 if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
1125                         TBM_EXYNOS_ERROR("bo:%p fail to munmap(%s)\n",
1126                                        bo, strerror_r(errno, buf, STRERR_BUFSIZE));
1127                 }
1128         }
1129
1130         /* close dmabuf */
1131         if (bo_exynos->dmabuf) {
1132                 close(bo_exynos->dmabuf);
1133                 bo_exynos->dmabuf = 0;
1134         }
1135
1136         /* delete bo from hash */
1137         int ret;
1138
1139         ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name,
1140                              (void **)&bo_exynos);
1141         if (ret == 0) {
1142                 drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name);
1143         } else {
1144                 TBM_EXYNOS_ERROR("Cannot find bo to Hash(%d), ret=%d\n",
1145                         bo_exynos->name, ret);
1146         }
1147
1148         _bo_destroy_cache_state(bufmgr_exynos, bo_exynos);
1149
1150         /* Free gem handle */
1151         struct drm_gem_close arg = {0, };
1152
1153         memset(&arg, 0, sizeof(arg));
1154         arg.handle = bo_exynos->gem;
1155         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg)) {
1156                 TBM_EXYNOS_ERROR("bo:%p fail to gem close.(%s)\n",
1157                                bo, strerror_r(errno, buf, STRERR_BUFSIZE));
1158         }
1159
1160         free(bo_exynos);
1161 }
1162
1163
1164 static void *
1165 tbm_exynos_bo_import(tbm_bo bo, unsigned int key)
1166 {
1167         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1168
1169         tbm_bufmgr_exynos bufmgr_exynos;
1170         tbm_bo_exynos bo_exynos;
1171         int ret;
1172
1173         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1174         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1175
1176         ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&bo_exynos);
1177         if (ret == 0)
1178                 return bo_exynos;
1179
1180         struct drm_gem_open arg = {0, };
1181         struct drm_exynos_gem_info info = {0, };
1182
1183         arg.name = key;
1184         if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1185                 TBM_EXYNOS_ERROR("Cannot open gem name=%d\n", key);
1186                 return 0;
1187         }
1188
1189         info.handle = arg.handle;
1190         if (drmCommandWriteRead(bufmgr_exynos->fd,
1191                                 DRM_EXYNOS_GEM_GET,
1192                                 &info,
1193                                 sizeof(struct drm_exynos_gem_info))) {
1194                 TBM_EXYNOS_ERROR("Cannot get gem info=%d\n", key);
1195                 return 0;
1196         }
1197
1198         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1199         if (!bo_exynos) {
1200                 TBM_EXYNOS_ERROR("fail to allocate the bo private\n");
1201                 return 0;
1202         }
1203
1204         bo_exynos->fd = bufmgr_exynos->fd;
1205         bo_exynos->gem = arg.handle;
1206         bo_exynos->size = arg.size;
1207         bo_exynos->flags_exynos = info.flags;
1208         bo_exynos->name = key;
1209         bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1210
1211         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1212                 TBM_EXYNOS_ERROR("fail init cache state(%d)\n", bo_exynos->name);
1213                 free(bo_exynos);
1214                 return 0;
1215         }
1216
1217         if (!bo_exynos->dmabuf) {
1218                 struct drm_prime_handle arg = {0, };
1219
1220                 arg.handle = bo_exynos->gem;
1221                 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1222                         TBM_EXYNOS_ERROR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_exynos->gem);
1223                         free(bo_exynos);
1224                         return 0;
1225                 }
1226                 bo_exynos->dmabuf = arg.fd;
1227         }
1228
1229         /* add bo to hash */
1230         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1231                            (void *)bo_exynos) < 0) {
1232                 TBM_EXYNOS_ERROR("Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1233         }
1234
1235         TBM_EXYNOS_DEBUG("    bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1236             bo,
1237             bo_exynos->gem, bo_exynos->name,
1238             bo_exynos->dmabuf,
1239             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1240             bo_exynos->size);
1241
1242         return (void *)bo_exynos;
1243 }
1244
1245 static void *
1246 tbm_exynos_bo_import_fd(tbm_bo bo, tbm_fd key)
1247 {
1248         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1249
1250         tbm_bufmgr_exynos bufmgr_exynos;
1251         tbm_bo_exynos bo_exynos;
1252         unsigned int name;
1253         int ret;
1254         char buf[STRERR_BUFSIZE];
1255
1256         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1257         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1258
1259         /*getting handle from fd*/
1260         unsigned int gem = 0;
1261         struct drm_prime_handle arg = {0, };
1262
1263         arg.fd = key;
1264         arg.flags = 0;
1265         if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1266                 TBM_EXYNOS_ERROR("bo:%p Cannot get gem handle from fd:%d (%s)\n",
1267                                bo, arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
1268                 return NULL;
1269         }
1270         gem = arg.handle;
1271
1272         name = _get_name(bufmgr_exynos->fd, gem);
1273         if (!name) {
1274                 TBM_EXYNOS_ERROR("bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1275                                bo, gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1276                 return 0;
1277         }
1278
1279         ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&bo_exynos);
1280         if (ret == 0) {
1281                 if (gem == bo_exynos->gem)
1282                         return bo_exynos;
1283         }
1284
1285         unsigned int real_size = -1;
1286         struct drm_exynos_gem_info info = {0, };
1287
1288         /* Determine size of bo.  The fd-to-handle ioctl really should
1289          * return the size, but it doesn't.  If we have kernel 3.12 or
1290          * later, we can lseek on the prime fd to get the size.  Older
1291          * kernels will just fail, in which case we fall back to the
1292          * provided (estimated or guess size).
1293          */
1294         real_size = lseek(key, 0, SEEK_END);
1295
1296         info.handle = gem;
1297         if (drmCommandWriteRead(bufmgr_exynos->fd,
1298                                 DRM_EXYNOS_GEM_GET,
1299                                 &info,
1300                                 sizeof(struct drm_exynos_gem_info))) {
1301                 TBM_EXYNOS_ERROR("bo:%p Cannot get gem info from gem:%d, fd:%d (%s)\n",
1302                                bo, gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1303                 return 0;
1304         }
1305
1306         if (real_size == -1)
1307                 real_size = info.size;
1308
1309         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1310         if (!bo_exynos) {
1311                 TBM_EXYNOS_ERROR("bo:%p fail to allocate the bo private\n", bo);
1312                 return 0;
1313         }
1314
1315         bo_exynos->fd = bufmgr_exynos->fd;
1316         bo_exynos->gem = gem;
1317         bo_exynos->size = real_size;
1318         bo_exynos->flags_exynos = info.flags;
1319         bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1320         bo_exynos->name = name;
1321
1322         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1323                 TBM_EXYNOS_ERROR("fail init cache state(%d)\n", bo_exynos->name);
1324                 free(bo_exynos);
1325                 return 0;
1326         }
1327
1328         /* add bo to hash */
1329         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1330                            (void *)bo_exynos) < 0) {
1331                 TBM_EXYNOS_ERROR("bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1332                                bo, bo_exynos->name, gem, key);
1333         }
1334
1335         TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1336             bo,
1337             bo_exynos->gem, bo_exynos->name,
1338             bo_exynos->dmabuf,
1339             key,
1340             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1341             bo_exynos->size);
1342
1343         return (void *)bo_exynos;
1344 }
1345
1346 static unsigned int
1347 tbm_exynos_bo_export(tbm_bo bo)
1348 {
1349         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1350
1351         tbm_bo_exynos bo_exynos;
1352
1353         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1354         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1355
1356         if (!bo_exynos->name) {
1357                 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1358                 if (!bo_exynos->name) {
1359                         TBM_EXYNOS_ERROR("Cannot get name\n");
1360                         return 0;
1361                 }
1362         }
1363
1364         TBM_EXYNOS_DEBUG("    bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1365             bo,
1366             bo_exynos->gem, bo_exynos->name,
1367             bo_exynos->dmabuf,
1368             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1369             bo_exynos->size);
1370
1371         return (unsigned int)bo_exynos->name;
1372 }
1373
1374 tbm_fd
1375 tbm_exynos_bo_export_fd(tbm_bo bo)
1376 {
1377         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1378
1379         tbm_bo_exynos bo_exynos;
1380         int ret;
1381         char buf[STRERR_BUFSIZE];
1382
1383         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1384         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, -1);
1385
1386         struct drm_prime_handle arg = {0, };
1387
1388         arg.handle = bo_exynos->gem;
1389         ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1390         if (ret) {
1391                 TBM_EXYNOS_ERROR("bo:%p Cannot dmabuf=%d (%s)\n",
1392                                bo, bo_exynos->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
1393                 return (tbm_fd) ret;
1394         }
1395
1396         TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1397             bo,
1398             bo_exynos->gem, bo_exynos->name,
1399             bo_exynos->dmabuf,
1400             arg.fd,
1401             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1402             bo_exynos->size);
1403
1404         return (tbm_fd)arg.fd;
1405 }
1406
1407 static tbm_bo_handle
1408 tbm_exynos_bo_get_handle(tbm_bo bo, int device)
1409 {
1410         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1411
1412         tbm_bo_handle bo_handle;
1413         tbm_bo_exynos bo_exynos;
1414
1415         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1416         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1417
1418         if (!bo_exynos->gem) {
1419                 TBM_EXYNOS_ERROR("Cannot map gem=%d\n", bo_exynos->gem);
1420                 return (tbm_bo_handle) NULL;
1421         }
1422
1423         TBM_EXYNOS_DEBUG("bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1424             bo,
1425             bo_exynos->gem, bo_exynos->name,
1426             bo_exynos->dmabuf,
1427             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1428             bo_exynos->size,
1429             STR_DEVICE[device]);
1430
1431         /*Get mapped bo_handle*/
1432         bo_handle = _exynos_bo_handle(bo_exynos, device);
1433         if (bo_handle.ptr == NULL) {
1434                 TBM_EXYNOS_ERROR("Cannot get handle: gem:%d, device:%d\n",
1435                         bo_exynos->gem, device);
1436                 return (tbm_bo_handle) NULL;
1437         }
1438
1439         return bo_handle;
1440 }
1441
1442 static tbm_bo_handle
1443 tbm_exynos_bo_map(tbm_bo bo, int device, int opt)
1444 {
1445         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1446
1447         tbm_bo_handle bo_handle;
1448         tbm_bo_exynos bo_exynos;
1449         tbm_bufmgr_exynos bufmgr_exynos;
1450
1451         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1452         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, (tbm_bo_handle)NULL);
1453
1454         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1455         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1456
1457         if (!bo_exynos->gem) {
1458                 TBM_EXYNOS_ERROR("Cannot map gem=%d\n", bo_exynos->gem);
1459                 return (tbm_bo_handle) NULL;
1460         }
1461
1462         TBM_EXYNOS_DEBUG("       bo:%p, gem:%d(%d), fd:%d, %s, %s\n",
1463             bo,
1464             bo_exynos->gem, bo_exynos->name,
1465             bo_exynos->dmabuf,
1466             STR_DEVICE[device],
1467             STR_OPT[opt]);
1468
1469         /*Get mapped bo_handle*/
1470         bo_handle = _exynos_bo_handle(bo_exynos, device);
1471         if (bo_handle.ptr == NULL) {
1472                 TBM_EXYNOS_ERROR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1473                                bo_exynos->gem, device, opt);
1474                 return (tbm_bo_handle) NULL;
1475         }
1476
1477         if (bo_exynos->map_cnt == 0)
1478                 _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt);
1479
1480         bo_exynos->last_map_device = device;
1481
1482         bo_exynos->map_cnt++;
1483
1484         return bo_handle;
1485 }
1486
1487 static int
1488 tbm_exynos_bo_unmap(tbm_bo bo)
1489 {
1490         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1491
1492         tbm_bo_exynos bo_exynos;
1493         tbm_bufmgr_exynos bufmgr_exynos;
1494
1495         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1496         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1497
1498         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1499         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1500
1501
1502         if (!bo_exynos->gem)
1503                 return 0;
1504
1505         bo_exynos->map_cnt--;
1506
1507         if (bo_exynos->map_cnt == 0)
1508                 _bo_save_cache_state(bufmgr_exynos, bo_exynos);
1509
1510     /* check whether cache control do or not */
1511         if (g_enable_cache_ctrl && bo_exynos->last_map_device == TBM_DEVICE_CPU)
1512                 _exynos_cache_flush(bufmgr_exynos, bo_exynos, TBM_EXYNOS_CACHE_FLUSH_ALL);
1513
1514         bo_exynos->last_map_device = -1;
1515
1516         TBM_EXYNOS_DEBUG("     bo:%p, gem:%d(%d), fd:%d\n",
1517             bo,
1518             bo_exynos->gem, bo_exynos->name,
1519             bo_exynos->dmabuf);
1520
1521         return 1;
1522 }
1523
1524 static int
1525 tbm_exynos_bo_lock(tbm_bo bo, int device, int opt)
1526 {
1527         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1528
1529 #ifndef ALWAYS_BACKEND_CTRL
1530         tbm_bufmgr_exynos bufmgr_exynos;
1531         tbm_bo_exynos bo_exynos;
1532         struct dma_buf_fence fence;
1533         struct flock filelock;
1534         int ret = 0;
1535         char buf[STRERR_BUFSIZE];
1536
1537         if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1538                 TBM_EXYNOS_DEBUG("Not support device type,\n");
1539                 return 0;
1540         }
1541
1542         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1543         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1544
1545         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1546         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1547
1548         memset(&fence, 0, sizeof(struct dma_buf_fence));
1549
1550         /* Check if the given type is valid or not. */
1551         if (opt & TBM_OPTION_WRITE) {
1552                 if (device == TBM_DEVICE_3D)
1553                         fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1554         } else if (opt & TBM_OPTION_READ) {
1555                 if (device == TBM_DEVICE_3D)
1556                         fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1557         } else {
1558                 TBM_EXYNOS_ERROR("Invalid argument\n");
1559                 return 0;
1560         }
1561
1562         /* Check if the tbm manager supports dma fence or not. */
1563         if (!bufmgr_exynos->use_dma_fence) {
1564                 TBM_EXYNOS_ERROR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1565                 return 0;
1566
1567         }
1568
1569         if (device == TBM_DEVICE_3D) {
1570                 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1571                 if (ret < 0) {
1572                         TBM_EXYNOS_ERROR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1573                         return 0;
1574                 }
1575         } else {
1576                 if (opt & TBM_OPTION_WRITE)
1577                         filelock.l_type = F_WRLCK;
1578                 else
1579                         filelock.l_type = F_RDLCK;
1580
1581                 filelock.l_whence = SEEK_CUR;
1582                 filelock.l_start = 0;
1583                 filelock.l_len = 0;
1584
1585                 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1586                         return 0;
1587         }
1588
1589         pthread_mutex_lock(&bo_exynos->mutex);
1590
1591         if (device == TBM_DEVICE_3D) {
1592                 int i;
1593
1594                 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1595                         if (bo_exynos->dma_fence[i].ctx == 0) {
1596                                 bo_exynos->dma_fence[i].type = fence.type;
1597                                 bo_exynos->dma_fence[i].ctx = fence.ctx;
1598                                 break;
1599                         }
1600                 }
1601
1602                 if (i == DMA_FENCE_LIST_MAX) {
1603                         /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1604                         TBM_EXYNOS_ERROR("fence list is full\n");
1605                 }
1606         }
1607
1608         pthread_mutex_unlock(&bo_exynos->mutex);
1609
1610         TBM_EXYNOS_DEBUG("DMABUF_IOCTL_GET_FENCE! bo:%p, gem:%d(%d), fd:%ds\n",
1611             bo,
1612             bo_exynos->gem, bo_exynos->name,
1613             bo_exynos->dmabuf);
1614 #endif /* ALWAYS_BACKEND_CTRL */
1615
1616         return 1;
1617 }
1618
1619 static int
1620 tbm_exynos_bo_unlock(tbm_bo bo)
1621 {
1622         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1623
1624 #ifndef ALWAYS_BACKEND_CTRL
1625         tbm_bo_exynos bo_exynos;
1626         struct dma_buf_fence fence;
1627         struct flock filelock;
1628         unsigned int dma_type = 0;
1629         int ret = 0;
1630         char buf[STRERR_BUFSIZE];
1631
1632         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1633         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1634
1635         if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1636                 dma_type = 1;
1637
1638         if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1639                 TBM_EXYNOS_DEBUG("FENCE not support or ignored,\n");
1640                 return 0;
1641         }
1642
1643         if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1644                 TBM_EXYNOS_DEBUG("device type is not 3D/CPU,\n");
1645                 return 0;
1646         }
1647
1648         pthread_mutex_lock(&bo_exynos->mutex);
1649
1650         if (dma_type) {
1651                 fence.type = bo_exynos->dma_fence[0].type;
1652                 fence.ctx = bo_exynos->dma_fence[0].ctx;
1653                 int i;
1654
1655                 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1656                         bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type;
1657                         bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx;
1658                 }
1659                 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1660                 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1661         }
1662         pthread_mutex_unlock(&bo_exynos->mutex);
1663
1664         if (dma_type) {
1665                 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1666                 if (ret < 0) {
1667                         TBM_EXYNOS_ERROR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1668                         return 0;
1669                 }
1670         } else {
1671                 filelock.l_type = F_UNLCK;
1672                 filelock.l_whence = SEEK_CUR;
1673                 filelock.l_start = 0;
1674                 filelock.l_len = 0;
1675
1676                 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1677                         return 0;
1678         }
1679
1680         TBM_EXYNOS_DEBUG("DMABUF_IOCTL_PUT_FENCE! bo:%p, gem:%d(%d), fd:%ds\n",
1681             bo,
1682             bo_exynos->gem, bo_exynos->name,
1683             bo_exynos->dmabuf);
1684 #endif /* ALWAYS_BACKEND_CTRL */
1685
1686         return 1;
1687 }
1688
1689 static void
1690 tbm_exynos_bufmgr_deinit(void *priv)
1691 {
1692         EXYNOS_RETURN_IF_FAIL(priv != NULL);
1693
1694         tbm_bufmgr_exynos bufmgr_exynos;
1695
1696         bufmgr_exynos = (tbm_bufmgr_exynos)priv;
1697
1698         if (bufmgr_exynos->hashBos) {
1699                 unsigned long key;
1700                 void *value;
1701
1702                 while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) {
1703                         free(value);
1704                         drmHashDelete(bufmgr_exynos->hashBos, key);
1705                 }
1706
1707                 drmHashDestroy(bufmgr_exynos->hashBos);
1708                 bufmgr_exynos->hashBos = NULL;
1709         }
1710
1711         _bufmgr_deinit_cache_state(bufmgr_exynos);
1712
1713         if (bufmgr_exynos->bind_display)
1714                 tbm_drm_helper_wl_auth_server_deinit();
1715
1716         if (bufmgr_exynos->device_name)
1717                 free(bufmgr_exynos->device_name);
1718
1719         if (tbm_backend_is_display_server())
1720                 tbm_drm_helper_unset_tbm_master_fd();
1721         else
1722                 tbm_drm_helper_unset_fd();
1723
1724         close(bufmgr_exynos->fd);
1725
1726         free(bufmgr_exynos);
1727 }
1728
1729 int
1730 tbm_exynos_surface_supported_format(uint32_t **formats, uint32_t *num)
1731 {
1732         uint32_t *color_formats = NULL;
1733
1734         color_formats = (uint32_t *)calloc(1,
1735                                            sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1736
1737         if (color_formats == NULL)
1738                 return 0;
1739
1740         memcpy(color_formats, tbm_exynos_color_format_list,
1741                sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1742
1743         *formats = color_formats;
1744         *num = TBM_COLOR_FORMAT_COUNT;
1745
1746         TBM_EXYNOS_DEBUG("tbm_exynos_surface_supported_format  count = %d\n", *num);
1747
1748         return 1;
1749 }
1750
1751 static int
1752 _new_calc_plane_nv12(int width, int height)
1753 {
1754         int mbX, mbY;
1755
1756         mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
1757         mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
1758
1759         if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
1760                 mbY = (mbY + 1) / 2 * 2;
1761
1762         return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
1763                         S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
1764 }
1765
1766 static int
1767 _calc_yplane_nv12(int width, int height)
1768 {
1769         int mbX, mbY;
1770
1771         mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
1772         mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
1773
1774         return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
1775 }
1776
1777 static int
1778 _calc_uvplane_nv12(int width, int height)
1779 {
1780         int mbX, mbY;
1781
1782         mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
1783         mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
1784
1785         return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
1786 }
1787
1788 static int
1789 _new_calc_yplane_nv12(int width, int height)
1790 {
1791         return SIZE_ALIGN(_new_calc_plane_nv12(width,
1792                                                 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1793                            TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1794 }
1795
1796 static int
1797 _new_calc_uvplane_nv12(int width, int height)
1798 {
1799         return SIZE_ALIGN((_new_calc_plane_nv12(width,
1800                                                 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1801                           TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1802 }
1803
1804 /**
1805  * @brief get the plane data of the surface.
1806  * @param[in] width : the width of the surface
1807  * @param[in] height : the height of the surface
1808  * @param[in] format : the format of the surface
1809  * @param[in] plane_idx : the format of the surface
1810  * @param[out] size : the size of the plane
1811  * @param[out] offset : the offset of the plane
1812  * @param[out] pitch : the pitch of the plane
1813  * @param[out] padding : the padding of the plane
1814  * @return 1 if this function succeeds, otherwise 0.
1815  */
1816 int
1817 tbm_exynos_surface_get_plane_data(int width, int height,
1818                                   tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1819                                   uint32_t *pitch, int *bo_idx)
1820 {
1821         int ret = 1;
1822         int bpp;
1823         int _offset = 0;
1824         int _pitch = 0;
1825         int _size = 0;
1826         int _bo_idx = 0;
1827
1828         switch (format) {
1829                 /* 16 bpp RGB */
1830         case TBM_FORMAT_XRGB4444:
1831         case TBM_FORMAT_XBGR4444:
1832         case TBM_FORMAT_RGBX4444:
1833         case TBM_FORMAT_BGRX4444:
1834         case TBM_FORMAT_ARGB4444:
1835         case TBM_FORMAT_ABGR4444:
1836         case TBM_FORMAT_RGBA4444:
1837         case TBM_FORMAT_BGRA4444:
1838         case TBM_FORMAT_XRGB1555:
1839         case TBM_FORMAT_XBGR1555:
1840         case TBM_FORMAT_RGBX5551:
1841         case TBM_FORMAT_BGRX5551:
1842         case TBM_FORMAT_ARGB1555:
1843         case TBM_FORMAT_ABGR1555:
1844         case TBM_FORMAT_RGBA5551:
1845         case TBM_FORMAT_BGRA5551:
1846         case TBM_FORMAT_RGB565:
1847                 bpp = 16;
1848                 _offset = 0;
1849                 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
1850                 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1851                 _bo_idx = 0;
1852                 break;
1853                 /* 24 bpp RGB */
1854         case TBM_FORMAT_RGB888:
1855         case TBM_FORMAT_BGR888:
1856                 bpp = 24;
1857                 _offset = 0;
1858                 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
1859                 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1860                 _bo_idx = 0;
1861                 break;
1862                 /* 32 bpp RGB */
1863         case TBM_FORMAT_XRGB8888:
1864         case TBM_FORMAT_XBGR8888:
1865         case TBM_FORMAT_RGBX8888:
1866         case TBM_FORMAT_BGRX8888:
1867         case TBM_FORMAT_ARGB8888:
1868         case TBM_FORMAT_ABGR8888:
1869         case TBM_FORMAT_RGBA8888:
1870         case TBM_FORMAT_BGRA8888:
1871                 bpp = 32;
1872                 _offset = 0;
1873                 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
1874                 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1875                 _bo_idx = 0;
1876                 break;
1877
1878                 /* packed YCbCr */
1879         case TBM_FORMAT_YUYV:
1880         case TBM_FORMAT_YVYU:
1881         case TBM_FORMAT_UYVY:
1882         case TBM_FORMAT_VYUY:
1883         case TBM_FORMAT_AYUV:
1884                 bpp = 32;
1885                 _offset = 0;
1886                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1887                 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1888                 _bo_idx = 0;
1889                 break;
1890
1891                 /*
1892                 * 2 plane YCbCr
1893                 * index 0 = Y plane, [7:0] Y
1894                 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1895                 * or
1896                 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1897                 */
1898         case TBM_FORMAT_NV12:
1899                 bpp = 12;
1900                 if (plane_idx == 0) {
1901                         _offset = 0;
1902                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1903                         _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1904                                         height));
1905                         _bo_idx = 0;
1906                 } else if (plane_idx == 1) {
1907                         _offset = 0;
1908                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1909                         _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1910                                         height));
1911                         _bo_idx = 1;
1912                 }
1913                 break;
1914         case TBM_FORMAT_NV21:
1915                 bpp = 12;
1916                 if (plane_idx == 0) {
1917                         _offset = 0;
1918                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1919                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1920                         _bo_idx = 0;
1921                 } else if (plane_idx == 1) {
1922                         _offset = width * height;
1923                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1924                         _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
1925                         _bo_idx = 0;
1926                 }
1927                 break;
1928
1929         case TBM_FORMAT_NV16:
1930         case TBM_FORMAT_NV61:
1931                 bpp = 16;
1932                 /*if(plane_idx == 0)*/
1933                 {
1934                         _offset = 0;
1935                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1936                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1937                         _bo_idx = 0;
1938                         if (plane_idx == 0)
1939                                 break;
1940                 }
1941                 /*else if( plane_idx ==1 )*/
1942                 {
1943                         _offset += _size;
1944                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1945                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1946                         _bo_idx = 0;
1947                 }
1948                 break;
1949
1950                 /*
1951                 * 3 plane YCbCr
1952                 * index 0: Y plane, [7:0] Y
1953                 * index 1: Cb plane, [7:0] Cb
1954                 * index 2: Cr plane, [7:0] Cr
1955                 * or
1956                 * index 1: Cr plane, [7:0] Cr
1957                 * index 2: Cb plane, [7:0] Cb
1958                 */
1959
1960                 /*
1961                 * NATIVE_BUFFER_FORMAT_YV12
1962                 * NATIVE_BUFFER_FORMAT_I420
1963                 */
1964         case TBM_FORMAT_YUV410:
1965         case TBM_FORMAT_YVU410:
1966                 bpp = 9;
1967                 /*if(plane_idx == 0)*/
1968                 {
1969                         _offset = 0;
1970                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1971                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1972                         _bo_idx = 0;
1973                         if (plane_idx == 0)
1974                                 break;
1975                 }
1976                 /*else if(plane_idx == 1)*/
1977                 {
1978                         _offset += _size;
1979                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1980                         _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
1981                         _bo_idx = 0;
1982                         if (plane_idx == 1)
1983                                 break;
1984                 }
1985                 /*else if (plane_idx == 2)*/
1986                 {
1987                         _offset += _size;
1988                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1989                         _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
1990                         _bo_idx = 0;
1991                 }
1992                 break;
1993         case TBM_FORMAT_YUV411:
1994         case TBM_FORMAT_YVU411:
1995         case TBM_FORMAT_YUV420:
1996         case TBM_FORMAT_YVU420:
1997                 bpp = 12;
1998                 /*if(plane_idx == 0)*/
1999                 {
2000                         _offset = 0;
2001                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2002                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2003                         _bo_idx = 0;
2004                         if (plane_idx == 0)
2005                                 break;
2006                 }
2007                 /*else if(plane_idx == 1)*/
2008                 {
2009                         _offset += _size;
2010                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2011                         _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
2012                         _bo_idx = 0;
2013                         if (plane_idx == 1)
2014                                 break;
2015                 }
2016                 /*else if (plane_idx == 2)*/
2017                 {
2018                         _offset += _size;
2019                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2020                         _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
2021                         _bo_idx = 0;
2022                 }
2023                 break;
2024         case TBM_FORMAT_YUV422:
2025         case TBM_FORMAT_YVU422:
2026                 bpp = 16;
2027                 /*if(plane_idx == 0)*/
2028                 {
2029                         _offset = 0;
2030                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2031                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2032                         _bo_idx = 0;
2033                         if (plane_idx == 0)
2034                                 break;
2035                 }
2036                 /*else if(plane_idx == 1)*/
2037                 {
2038                         _offset += _size;
2039                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2040                         _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
2041                         _bo_idx = 0;
2042                         if (plane_idx == 1)
2043                                 break;
2044                 }
2045                 /*else if (plane_idx == 2)*/
2046                 {
2047                         _offset += _size;
2048                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2049                         _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
2050                         _bo_idx = 0;
2051                 }
2052                 break;
2053         case TBM_FORMAT_YUV444:
2054         case TBM_FORMAT_YVU444:
2055                 bpp = 24;
2056                 /*if(plane_idx == 0)*/
2057                 {
2058                         _offset = 0;
2059                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2060                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2061                         _bo_idx = 0;
2062                         if (plane_idx == 0)
2063                                 break;
2064                 }
2065                 /*else if(plane_idx == 1)*/
2066                 {
2067                         _offset += _size;
2068                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2069                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2070                         _bo_idx = 0;
2071                         if (plane_idx == 1)
2072                                 break;
2073                 }
2074                 /*else if (plane_idx == 2)*/
2075                 {
2076                         _offset += _size;
2077                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2078                         _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2079                         _bo_idx = 0;
2080                 }
2081                 break;
2082         default:
2083                 bpp = 0;
2084                 break;
2085         }
2086
2087         *size = _size;
2088         *offset = _offset;
2089         *pitch = _pitch;
2090         *bo_idx = _bo_idx;
2091
2092         return ret;
2093 }
2094
2095 int
2096 tbm_exynos_bo_get_flags(tbm_bo bo)
2097 {
2098         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
2099
2100         tbm_bo_exynos bo_exynos;
2101
2102         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
2103         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
2104
2105         return bo_exynos->flags_tbm;
2106 }
2107
2108 int
2109 tbm_exynos_bufmgr_bind_native_display(tbm_bufmgr bufmgr, void *native_display)
2110 {
2111         tbm_bufmgr_exynos bufmgr_exynos;
2112
2113         bufmgr_exynos = tbm_backend_get_priv_from_bufmgr(bufmgr);
2114         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
2115
2116         if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd,
2117                                            bufmgr_exynos->device_name, 0)) {
2118                 TBM_EXYNOS_ERROR("fail to tbm_drm_helper_wl_server_init\n");
2119                 return 0;
2120         }
2121
2122         bufmgr_exynos->bind_display = native_display;
2123
2124         return 1;
2125 }
2126
2127 MODULEINITPPROTO(init_tbm_bufmgr_priv);
2128
2129 static TBMModuleVersionInfo ExynosVersRec = {
2130         "exynos",
2131         "Samsung",
2132         TBM_ABI_VERSION,
2133 };
2134
2135 TBMModuleData tbmModuleData = { &ExynosVersRec, init_tbm_bufmgr_priv};
2136
2137 int
2138 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
2139 {
2140         tbm_bufmgr_backend bufmgr_backend;
2141         tbm_bufmgr_exynos bufmgr_exynos;
2142         int fp;
2143
2144         if (!bufmgr)
2145                 return 0;
2146
2147         bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos));
2148         if (!bufmgr_exynos) {
2149                 TBM_EXYNOS_ERROR("fail to alloc bufmgr_exynos!\n");
2150                 return 0;
2151         }
2152
2153         if (tbm_backend_is_display_server()) {
2154                 bufmgr_exynos->fd = tbm_drm_helper_get_master_fd();
2155                 if (bufmgr_exynos->fd < 0) {
2156                         bufmgr_exynos->fd = _tbm_exynos_open_drm();
2157                         if (bufmgr_exynos->fd < 0) {
2158                                 TBM_EXYNOS_ERROR("fail to open drm!\n", getpid());
2159                                 goto fail_open_drm;
2160                         }
2161                 }
2162
2163                 tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd);
2164
2165                 bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2166                 if (!bufmgr_exynos->device_name) {
2167                         TBM_EXYNOS_ERROR("fail to get device name!\n", getpid());
2168
2169                         tbm_drm_helper_unset_tbm_master_fd();
2170                         goto fail_get_device_name;
2171                 }
2172         } else {
2173                 if (_check_render_node()) {
2174                         bufmgr_exynos->fd = _get_render_node();
2175                         if (bufmgr_exynos->fd < 0) {
2176                                 TBM_EXYNOS_ERROR("fail to get render node\n");
2177                                 goto fail_get_render_node;
2178                         }
2179                         TBM_EXYNOS_DEBUG("Use render node:%d\n", bufmgr_exynos->fd);
2180                 } else {
2181                         if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) {
2182                                 TBM_EXYNOS_ERROR("fail to get auth drm info!\n");
2183                                 goto fail_get_auth_info;
2184                         }
2185
2186                         tbm_drm_helper_set_fd(bufmgr_exynos->fd);
2187                 }
2188         }
2189
2190         //Check if the tbm manager supports dma fence or not.
2191         fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2192         if (fp != -1) {
2193                 char buf[1];
2194                 int length = read(fp, buf, 1);
2195
2196                 if (length == 1 && buf[0] == '1')
2197                         bufmgr_exynos->use_dma_fence = 1;
2198
2199                 close(fp);
2200         }
2201
2202         if (!_bufmgr_init_cache_state(bufmgr_exynos)) {
2203                 TBM_EXYNOS_ERROR("fail to init bufmgr cache state\n");
2204                 goto fail_init_cache_state;
2205         }
2206
2207         /*Create Hash Table*/
2208         bufmgr_exynos->hashBos = drmHashCreate();
2209
2210         bufmgr_backend = tbm_backend_alloc();
2211         if (!bufmgr_backend) {
2212                 TBM_EXYNOS_ERROR("fail to alloc backend!\n");
2213                 goto fail_alloc_backend;
2214         }
2215
2216         bufmgr_backend->priv = (void *)bufmgr_exynos;
2217         bufmgr_backend->bufmgr_deinit = tbm_exynos_bufmgr_deinit;
2218         bufmgr_backend->bo_size = tbm_exynos_bo_size;
2219         bufmgr_backend->bo_alloc = tbm_exynos_bo_alloc;
2220         bufmgr_backend->bo_free = tbm_exynos_bo_free;
2221         bufmgr_backend->bo_import = tbm_exynos_bo_import;
2222         bufmgr_backend->bo_import_fd = tbm_exynos_bo_import_fd;
2223         bufmgr_backend->bo_export = tbm_exynos_bo_export;
2224         bufmgr_backend->bo_export_fd = tbm_exynos_bo_export_fd;
2225         bufmgr_backend->bo_get_handle = tbm_exynos_bo_get_handle;
2226         bufmgr_backend->bo_map = tbm_exynos_bo_map;
2227         bufmgr_backend->bo_unmap = tbm_exynos_bo_unmap;
2228         bufmgr_backend->surface_get_plane_data = tbm_exynos_surface_get_plane_data;
2229         bufmgr_backend->surface_supported_format = tbm_exynos_surface_supported_format;
2230         bufmgr_backend->bo_get_flags = tbm_exynos_bo_get_flags;
2231         bufmgr_backend->bo_lock = tbm_exynos_bo_lock;
2232         bufmgr_backend->bo_unlock = tbm_exynos_bo_unlock;
2233
2234         if (tbm_backend_is_display_server() && !_check_render_node())
2235                 bufmgr_backend->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display;
2236
2237         if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
2238                 TBM_EXYNOS_ERROR("fail to init backend!\n");
2239                 goto fail_init_backend;
2240         }
2241
2242         /* get the model name from the capi-system-info.
2243          * The alignment_plane and alignment_pitch_rgb is different accoring to the target.
2244          * There will be the stride issue when the right alignment_plane and alignment_pitch_rgb
2245          * is not set to the backend.
2246          */
2247         char *value = NULL;
2248         if (system_info_get_platform_string("http://tizen.org/system/model_name", &value) != SYSTEM_INFO_ERROR_NONE) {
2249                 TBM_EXYNOS_ERROR("Cannot get the \"http://tizen.org/system/model_name\" key value from system-info.\n");
2250                 TBM_EXYNOS_ERROR("May not set the right value on libtbm-exynos backend.\n");
2251         } else {
2252                 if (!strncmp(value, "TW1", 4)) {
2253                         g_tbm_surface_alignment_plane = 8;
2254                         g_tbm_surface_alignment_pitch_rgb = 8;
2255                         g_enable_cache_ctrl = 1;
2256                 } else {
2257                         g_tbm_surface_alignment_plane = 64;
2258                         g_tbm_surface_alignment_pitch_rgb = 64;
2259                 }
2260         }
2261
2262         free(value);
2263
2264 #ifdef DEBUG
2265         {
2266                 char *env;
2267
2268                 env = getenv("TBM_EXYNOS_DEBUG");
2269                 if (env) {
2270                         bDebug = atoi(env);
2271                         TBM_EXYNOS_ERROR("TBM_EXYNOS_DEBUG=%s\n", env);
2272                 } else
2273                         bDebug = 0;
2274         }
2275 #endif
2276
2277         TBM_EXYNOS_DEBUG("drm_fd:%d\n", bufmgr_exynos->fd);
2278
2279         return 1;
2280
2281 fail_init_backend:
2282         tbm_backend_free(bufmgr_backend);
2283 fail_alloc_backend:
2284         if (bufmgr_exynos->hashBos)
2285                 drmHashDestroy(bufmgr_exynos->hashBos);
2286         _bufmgr_deinit_cache_state(bufmgr_exynos);
2287 fail_init_cache_state:
2288         if (tbm_backend_is_display_server())
2289                 tbm_drm_helper_unset_tbm_master_fd();
2290         else
2291                 tbm_drm_helper_unset_fd();
2292 fail_get_device_name:
2293         close(bufmgr_exynos->fd);
2294 fail_get_auth_info:
2295 fail_get_render_node:
2296 fail_open_drm:
2297         free(bufmgr_exynos);
2298         return 0;
2299 }
2300