tbm_bufmgr_exynos: clean up init_tbm_bufmgr_priv()
[platform/adaptation/samsung_exynos/libtbm-exynos.git] / src / tbm_bufmgr_exynos.c
1 /**************************************************************************
2
3 libtbm_exynos
4
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
19 of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 #ifdef HAVE_CONFIG_H
32 #include "config.h"
33 #endif
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <sys/ioctl.h>
40 #include <sys/types.h>
41 #include <unistd.h>
42 #include <sys/mman.h>
43 #include <sys/stat.h>
44 #include <fcntl.h>
45 #include <errno.h>
46 #include <xf86drm.h>
47 #include <tbm_bufmgr.h>
48 #include <tbm_bufmgr_backend.h>
49 #include <exynos_drm.h>
50 #include <pthread.h>
51 #include <tbm_surface.h>
52 #include <tbm_surface_internal.h>
53 #include <tbm_drm_helper.h>
54
55 #include <libudev.h>
56
57 #include "tbm_bufmgr_tgl.h"
58
59 #define DEBUG
60 #define USE_DMAIMPORT
61 #define TBM_COLOR_FORMAT_COUNT 8
62
63 #define EXYNOS_DRM_NAME "exynos"
64
65 #ifdef DEBUG
66 #define LOG_TAG "TBM_BACKEND"
67 #include <dlog.h>
68 static int bDebug;
69
70 char *target_name()
71 {
72         FILE *f;
73         char *slash;
74         static int initialized = 0;
75         static char app_name[128];
76
77         if (initialized)
78                 return app_name;
79
80         /* get the application name */
81         f = fopen("/proc/self/cmdline", "r");
82
83         if (!f)
84                 return 0;
85
86         memset(app_name, 0x00, sizeof(app_name));
87
88         if (fgets(app_name, 100, f) == NULL) {
89                 fclose(f);
90                 return 0;
91         }
92
93         fclose(f);
94
95         slash = strrchr(app_name, '/');
96         if (slash != NULL)
97                 memmove(app_name, slash + 1, strlen(slash));
98
99         initialized = 1;
100
101         return app_name;
102 }
103
104 #define TBM_EXYNOS_LOG(fmt, args...) LOGE("\033[31m"  "[%s]" fmt "\033[0m", target_name(), ##args)
105 #define DBG(fmt, args...)  {if (bDebug&01) LOGE(fmt, ##args); }
106 #else
107 #define TBM_EXYNOS_LOG(...)
108 #define DBG(...)
109 #endif
110
111 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
112 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
113 #define MAX(a, b) ((a) > (b) ? (a) : (b))
114
115 #ifdef ALIGN_EIGHT
116 #define TBM_SURFACE_ALIGNMENT_PLANE (8)
117 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
118 #else
119 #define TBM_SURFACE_ALIGNMENT_PLANE (64)
120 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (64)
121 #endif
122
123 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
124 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
125
126 #define SZ_1M                                   0x00100000
127 #define S5P_FIMV_MAX_FRAME_SIZE                 (2 * SZ_1M)
128 #define S5P_FIMV_D_ALIGN_PLANE_SIZE             64
129 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW           16
130 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL           16
131 #define S5P_FIMV_DEC_BUF_ALIGN                  (8 * 1024)
132 #define S5P_FIMV_NV12MT_HALIGN                  128
133 #define S5P_FIMV_NV12MT_VALIGN                  64
134
135 /* check condition */
136 #define EXYNOS_RETURN_IF_FAIL(cond) {\
137         if (!(cond)) {\
138                 TBM_EXYNOS_LOG("[%s] : '%s' failed.\n", __func__, #cond);\
139                 return;\
140         } \
141 }
142
143 #define EXYNOS_RETURN_VAL_IF_FAIL(cond, val) {\
144         if (!(cond)) {\
145                 TBM_EXYNOS_LOG("[%s] : '%s' failed.\n", __func__, #cond);\
146                 return val;\
147         } \
148 }
149
150 struct dma_buf_info {
151         unsigned long   size;
152         unsigned int    fence_supported;
153         unsigned int    padding;
154 };
155
156 #define DMA_BUF_ACCESS_READ             0x1
157 #define DMA_BUF_ACCESS_WRITE            0x2
158 #define DMA_BUF_ACCESS_DMA              0x4
159 #define DMA_BUF_ACCESS_MAX              0x8
160
161 #define DMA_FENCE_LIST_MAX              5
162
163 struct dma_buf_fence {
164         unsigned long           ctx;
165         unsigned int            type;
166 };
167
168 #define DMABUF_IOCTL_BASE       'F'
169 #define DMABUF_IOWR(nr, type)   _IOWR(DMABUF_IOCTL_BASE, nr, type)
170
171 #define DMABUF_IOCTL_GET_INFO   DMABUF_IOWR(0x00, struct dma_buf_info)
172 #define DMABUF_IOCTL_GET_FENCE  DMABUF_IOWR(0x01, struct dma_buf_fence)
173 #define DMABUF_IOCTL_PUT_FENCE  DMABUF_IOWR(0x02, struct dma_buf_fence)
174
175 /* tgl key values */
176 #define GLOBAL_KEY   ((unsigned int)(-1))
177 /* TBM_CACHE */
178 #define TBM_EXYNOS_CACHE_INV       0x01 /**< cache invalidate  */
179 #define TBM_EXYNOS_CACHE_CLN       0x02 /**< cache clean */
180 #define TBM_EXYNOS_CACHE_ALL       0x10 /**< cache all */
181 #define TBM_EXYNOS_CACHE_FLUSH     (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush  */
182 #define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL)        /**< cache flush all */
183
184 enum {
185         DEVICE_NONE = 0,
186         DEVICE_CA,                                      /* cache aware device */
187         DEVICE_CO                                       /* cache oblivious device */
188 };
189
190 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
191
192 union _tbm_bo_cache_state {
193         unsigned int val;
194         struct {
195                 unsigned int cntFlush:16;       /*Flush all index for sync */
196                 unsigned int isCached:1;
197                 unsigned int isDirtied:2;
198         } data;
199 };
200
201 typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
202 typedef struct _tbm_bo_exynos *tbm_bo_exynos;
203
204 typedef struct _exynos_private {
205         int ref_count;
206         struct _tbm_bo_exynos *bo_priv;
207 } PrivGem;
208
209 /* tbm buffor object for exynos */
210 struct _tbm_bo_exynos {
211         int fd;
212
213         unsigned int name;    /* FLINK ID */
214
215         unsigned int gem;     /* GEM Handle */
216
217         unsigned int dmabuf;  /* fd for dmabuf */
218
219         void *pBase;          /* virtual address */
220
221         unsigned int size;
222
223         unsigned int flags_exynos;
224         unsigned int flags_tbm;
225
226         PrivGem *private;
227
228         pthread_mutex_t mutex;
229         struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
230         int device;
231         int opt;
232
233         tbm_bo_cache_state cache_state;
234         unsigned int map_cnt;
235         int last_map_device;
236 };
237
238 /* tbm bufmgr private for exynos */
239 struct _tbm_bufmgr_exynos {
240         int fd;
241         int isLocal;
242         void *hashBos;
243
244         int use_dma_fence;
245
246         int tgl_fd;
247
248         char *device_name;
249         void *bind_display;
250 };
251
252 char *STR_DEVICE[] = {
253         "DEF",
254         "CPU",
255         "2D",
256         "3D",
257         "MM"
258 };
259
260 char *STR_OPT[] = {
261         "NONE",
262         "RD",
263         "WR",
264         "RDWR"
265 };
266
267
268 uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {   TBM_FORMAT_RGBA8888,
269                                                                     TBM_FORMAT_BGRA8888,
270                                                                     TBM_FORMAT_RGBX8888,
271                                                                     TBM_FORMAT_RGB888,
272                                                                     TBM_FORMAT_NV12,
273                                                                     TBM_FORMAT_NV21,
274                                                                     TBM_FORMAT_YUV420,
275                                                                     TBM_FORMAT_YVU420
276                                                                 };
277
278 #ifdef ENABLE_CACHECRTL
279 static inline int
280 _tgl_init(int fd, unsigned int key)
281 {
282         struct tgl_attribute attr;
283         int err;
284
285         attr.key = key;
286         attr.timeout_ms = 1000;
287
288         err = ioctl(fd, TGL_IOC_INIT_LOCK, &attr);
289         if (err) {
290                 TBM_EXYNOS_LOG("[libtbm:%d] "
291                                "error(%s) %s:%d key:%d\n",
292                                getpid(), strerror(errno), __func__, __LINE__, key);
293                 return 0;
294         }
295
296         return 1;
297 }
298
299 static inline int
300 _tgl_destroy(int fd, unsigned int key)
301 {
302         int err;
303
304         err = ioctl(fd, TGL_IOC_DESTROY_LOCK, key);
305         if (err) {
306                 TBM_EXYNOS_LOG("[libtbm:%d] "
307                                "error(%s) %s:%d key:%d\n",
308                                getpid(), strerror(errno), __func__, __LINE__, key);
309                 return 0;
310         }
311
312         return 1;
313 }
314 static inline int
315 _tgl_set_data(int fd, unsigned int key, unsigned int val)
316 {
317         int err;
318
319         struct tgl_user_data arg;
320
321         arg.key = key;
322         arg.data1 = val;
323         err = ioctl(fd, TGL_IOC_SET_DATA, &arg);
324         if (err) {
325                 TBM_EXYNOS_LOG("[libtbm:%d] "
326                                "error(%s) %s:%d key:%d\n",
327                                getpid(), strerror(errno), __func__, __LINE__, key);
328                 return 0;
329         }
330
331         return 1;
332 }
333
334 static inline unsigned int
335 _tgl_get_data(int fd, unsigned int key)
336 {
337         int err;
338         struct tgl_user_data arg = { 0, };
339
340         arg.key = key;
341         err = ioctl(fd, TGL_IOC_GET_DATA, &arg);
342         if (err) {
343                 TBM_EXYNOS_LOG("[libtbm:%d] "
344                                "error(%s) %s:%d key:%d\n",
345                                getpid(), strerror(errno), __func__, __LINE__, key);
346                 return 0;
347         }
348
349         return arg.data1;
350 }
351
352 static int
353 _exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags)
354 {
355         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
356
357         /* cache flush is managed by kernel side when using dma-fence. */
358         if (bufmgr_exynos->use_dma_fence)
359                 return 1;
360
361         struct drm_exynos_gem_cache_op cache_op = {0, };
362         int ret;
363
364         /* if bo_exynos is null, do cache_flush_all */
365         if (bo_exynos) {
366                 cache_op.flags = 0;
367                 cache_op.usr_addr = (uint64_t)((uint32_t)bo_exynos->pBase);
368                 cache_op.size = bo_exynos->size;
369         } else {
370                 flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
371                 cache_op.flags = 0;
372                 cache_op.usr_addr = 0;
373                 cache_op.size = 0;
374         }
375
376         if (flags & TBM_EXYNOS_CACHE_INV) {
377                 if (flags & TBM_EXYNOS_CACHE_ALL)
378                         cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
379                 else
380                         cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
381         }
382
383         if (flags & TBM_EXYNOS_CACHE_CLN) {
384                 if (flags & TBM_EXYNOS_CACHE_ALL)
385                         cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
386                 else
387                         cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
388         }
389
390         if (flags & TBM_EXYNOS_CACHE_ALL)
391                 cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
392
393         ret = drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
394                                   sizeof(cache_op));
395         if (ret) {
396                 TBM_EXYNOS_LOG("error fail to flush the cache.\n");
397                 return 0;
398         }
399
400         return 1;
401 }
402 #endif
403
404 static int
405 _bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import)
406 {
407 #ifdef ENABLE_CACHECRTL
408         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
409         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
410
411         if (bufmgr_exynos->use_dma_fence)
412                 return 1;
413
414         tbm_bo_cache_state cache_state;
415
416         _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
417
418         if (import == 0) {
419                 cache_state.data.isDirtied = DEVICE_NONE;
420                 cache_state.data.isCached = 0;
421                 cache_state.data.cntFlush = 0;
422
423                 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
424         }
425 #endif
426
427         return 1;
428 }
429
430 static int
431 _bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt)
432 {
433 #ifdef ENABLE_CACHECRTL
434         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
435         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
436
437         if (bufmgr_exynos->use_dma_fence)
438                 return 1;
439
440         char need_flush = 0;
441         unsigned short cntFlush = 0;
442
443         if (!(bo_exynos->flags_exynos & EXYNOS_BO_CACHABLE))
444                 return 1;
445
446         /* get cache state of a bo */
447         bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd,
448                                      bo_exynos->name);
449
450         /* get global cache flush count */
451         cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
452
453         if (device == TBM_DEVICE_CPU) {
454                 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
455                     bo_exynos->cache_state.data.isCached)
456                         need_flush = TBM_EXYNOS_CACHE_INV;
457
458                 bo_exynos->cache_state.data.isCached = 1;
459                 if (opt & TBM_OPTION_WRITE)
460                         bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
461                 else {
462                         if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
463                                 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
464                 }
465         } else {
466                 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
467                     bo_exynos->cache_state.data.isCached &&
468                     bo_exynos->cache_state.data.cntFlush == cntFlush)
469                         need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
470
471                 if (opt & TBM_OPTION_WRITE)
472                         bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
473                 else {
474                         if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
475                                 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
476                 }
477         }
478
479         if (need_flush) {
480                 if (need_flush & TBM_EXYNOS_CACHE_ALL)
481                         _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
482
483                 /* call cache flush */
484                 _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush);
485
486                 DBG("[libtbm:%d] \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
487                     getpid(),
488                     bo_exynos->cache_state.data.isCached,
489                     bo_exynos->cache_state.data.isDirtied,
490                     need_flush,
491                     cntFlush);
492         }
493 #endif
494
495         return 1;
496 }
497
498 static int
499 _bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
500 {
501 #ifdef ENABLE_CACHECRTL
502         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
503         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
504
505         if (bufmgr_exynos->use_dma_fence)
506                 return 1;
507
508         unsigned short cntFlush = 0;
509
510         /* get global cache flush count */
511         cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
512
513         /* save global cache flush count */
514         bo_exynos->cache_state.data.cntFlush = cntFlush;
515         _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name,
516                       bo_exynos->cache_state.val);
517 #endif
518
519         return 1;
520 }
521
522 static void
523 _bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
524 {
525 #ifdef ENABLE_CACHECRTL
526         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
527         EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
528
529         if (bufmgr_exynos->use_dma_fence)
530                 return ;
531
532         _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
533 #endif
534 }
535
536 static int
537 _bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
538 {
539 #ifdef ENABLE_CACHECRTL
540         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
541
542         if (bufmgr_exynos->use_dma_fence)
543                 return 1;
544
545         /* open tgl fd for saving cache flush data */
546         bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
547
548         if (bufmgr_exynos->tgl_fd < 0) {
549                 bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
550                 if (bufmgr_exynos->tgl_fd < 0) {
551                         TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
552                                        "error: Fail to open global_lock:%s\n",
553                                        getpid(), tgl_devfile);
554                         return 0;
555                 }
556         }
557
558         if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
559                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
560                                "error: Fail to initialize the tgl\n",
561                                getpid());
562
563                 close(bufmgr_exynos->tgl_fd);
564                 return 0;
565         }
566 #endif
567
568         return 1;
569 }
570
571 static void
572 _bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
573 {
574 #ifdef ENABLE_CACHECRTL
575         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
576
577         if (bufmgr_exynos->use_dma_fence)
578                 return;
579
580         if (bufmgr_exynos->tgl_fd >= 0)
581                 close(bufmgr_exynos->tgl_fd);
582 #endif
583 }
584
585 static int
586 _tbm_exynos_open_drm()
587 {
588         int fd = -1;
589
590         fd = drmOpen(EXYNOS_DRM_NAME, NULL);
591         if (fd < 0) {
592                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
593                               "warning %s:%d fail to open drm\n",
594                               getpid(), __FUNCTION__, __LINE__);
595         }
596
597         if (fd < 0) {
598                 struct udev *udev = NULL;
599                 struct udev_enumerate *e = NULL;
600                 struct udev_list_entry *entry = NULL;
601                 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
602                 const char *filepath;
603                 struct stat s;
604                 int ret;
605
606                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
607                               "%s:%d search drm-device by udev\n",
608                               getpid(), __FUNCTION__, __LINE__);
609
610                 udev = udev_new();
611                 if (!udev) {
612                         TBM_EXYNOS_LOG("udev_new() failed.\n");
613                         return -1;
614                 }
615
616                 e = udev_enumerate_new(udev);
617                 udev_enumerate_add_match_subsystem(e, "drm");
618                 udev_enumerate_add_match_sysname(e, "card[0-9]*");
619                 udev_enumerate_scan_devices(e);
620
621                 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
622                         device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
623                                                               udev_list_entry_get_name(entry));
624                         device_parent = udev_device_get_parent(device);
625                         /* Not need unref device_parent. device_parent and device have same refcnt */
626                         if (device_parent) {
627                                 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
628                                         drm_device = device;
629                                         DBG("[%s] Found render device: '%s' (%s)\n",
630                                             target_name(),
631                                             udev_device_get_syspath(drm_device),
632                                             udev_device_get_sysname(device_parent));
633                                         break;
634                                 }
635                         }
636                         udev_device_unref(device);
637                 }
638
639                 udev_enumerate_unref(e);
640
641                 /* Get device file path. */
642                 filepath = udev_device_get_devnode(drm_device);
643                 if (!filepath) {
644                         TBM_EXYNOS_LOG("udev_device_get_devnode() failed.\n");
645                         udev_device_unref(drm_device);
646                         udev_unref(udev);
647                         return -1;
648                 }
649
650                 /* Open DRM device file and check validity. */
651                 fd = open(filepath, O_RDWR | O_CLOEXEC);
652                 if (fd < 0) {
653                         TBM_EXYNOS_LOG("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
654                         udev_device_unref(drm_device);
655                         udev_unref(udev);
656                         return -1;
657                 }
658
659                 ret = fstat(fd, &s);
660                 if (ret) {
661                         TBM_EXYNOS_LOG("fstat() failed %s.\n");
662                         close(fd);
663                         udev_device_unref(drm_device);
664                         udev_unref(udev);
665                         return -1;
666                 }
667
668                 udev_device_unref(drm_device);
669                 udev_unref(udev);
670         }
671
672         return fd;
673 }
674
675 static int
676 _check_render_node(void)
677 {
678         struct udev *udev = NULL;
679         struct udev_enumerate *e = NULL;
680         struct udev_list_entry *entry = NULL;
681         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
682
683 #ifndef USE_RENDER_NODE
684         return 0;
685 #endif
686
687         udev = udev_new();
688         if (!udev) {
689                 TBM_EXYNOS_LOG("udev_new() failed.\n");
690                 return -1;
691         }
692
693         e = udev_enumerate_new(udev);
694         udev_enumerate_add_match_subsystem(e, "drm");
695         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
696         udev_enumerate_scan_devices(e);
697
698         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
699                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
700                                                       udev_list_entry_get_name(entry));
701                 device_parent = udev_device_get_parent(device);
702                 /* Not need unref device_parent. device_parent and device have same refcnt */
703                 if (device_parent) {
704                         if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
705                                 drm_device = device;
706                                 DBG("[%s] Found render device: '%s' (%s)\n",
707                                     target_name(),
708                                     udev_device_get_syspath(drm_device),
709                                     udev_device_get_sysname(device_parent));
710                                 break;
711                         }
712                 }
713                 udev_device_unref(device);
714         }
715
716         udev_enumerate_unref(e);
717         udev_unref(udev);
718
719         if (!drm_device) {
720                 udev_device_unref(drm_device);
721                 return 0;
722         }
723
724         udev_device_unref(drm_device);
725         return 1;
726 }
727
728 static int
729 _get_render_node(void)
730 {
731         struct udev *udev = NULL;
732         struct udev_enumerate *e = NULL;
733         struct udev_list_entry *entry = NULL;
734         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
735         const char *filepath;
736         struct stat s;
737         int fd = -1;
738         int ret;
739
740         udev = udev_new();
741         if (!udev) {
742                 TBM_EXYNOS_LOG("udev_new() failed.\n");
743                 return -1;
744         }
745
746         e = udev_enumerate_new(udev);
747         udev_enumerate_add_match_subsystem(e, "drm");
748         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
749         udev_enumerate_scan_devices(e);
750
751         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
752                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
753                                                       udev_list_entry_get_name(entry));
754                 device_parent = udev_device_get_parent(device);
755                 /* Not need unref device_parent. device_parent and device have same refcnt */
756                 if (device_parent) {
757                         if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
758                                 drm_device = device;
759                                 DBG("[%s] Found render device: '%s' (%s)\n",
760                                     target_name(),
761                                     udev_device_get_syspath(drm_device),
762                                     udev_device_get_sysname(device_parent));
763                                 break;
764                         }
765                 }
766                 udev_device_unref(device);
767         }
768
769         udev_enumerate_unref(e);
770
771         /* Get device file path. */
772         filepath = udev_device_get_devnode(drm_device);
773         if (!filepath) {
774                 TBM_EXYNOS_LOG("udev_device_get_devnode() failed.\n");
775                 udev_device_unref(drm_device);
776                 udev_unref(udev);
777                 return -1;
778         }
779
780         /* Open DRM device file and check validity. */
781         fd = open(filepath, O_RDWR | O_CLOEXEC);
782         if (fd < 0) {
783                 TBM_EXYNOS_LOG("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
784                 udev_device_unref(drm_device);
785                 udev_unref(udev);
786                 return -1;
787         }
788
789         ret = fstat(fd, &s);
790         if (ret) {
791                 TBM_EXYNOS_LOG("fstat() failed %s.\n");
792                 udev_device_unref(drm_device);
793                 udev_unref(udev);
794                 close(fd);
795                 return -1;
796         }
797
798         udev_device_unref(drm_device);
799         udev_unref(udev);
800
801         return fd;
802 }
803
804 static unsigned int
805 _get_exynos_flag_from_tbm(unsigned int ftbm)
806 {
807         unsigned int flags = 0;
808
809         if (ftbm & TBM_BO_SCANOUT)
810                 flags |= EXYNOS_BO_CONTIG;
811         else
812                 flags |= EXYNOS_BO_NONCONTIG;
813
814         if (ftbm & TBM_BO_WC)
815                 flags |= EXYNOS_BO_WC;
816         else if (ftbm & TBM_BO_NONCACHABLE)
817                 flags |= EXYNOS_BO_NONCACHABLE;
818         else
819                 flags |= EXYNOS_BO_CACHABLE;
820
821         return flags;
822 }
823
824 static unsigned int
825 _get_tbm_flag_from_exynos(unsigned int fexynos)
826 {
827         unsigned int flags = 0;
828
829         if (fexynos & EXYNOS_BO_NONCONTIG)
830                 flags |= TBM_BO_DEFAULT;
831         else
832                 flags |= TBM_BO_SCANOUT;
833
834         if (fexynos & EXYNOS_BO_WC)
835                 flags |= TBM_BO_WC;
836         else if (fexynos & EXYNOS_BO_CACHABLE)
837                 flags |= TBM_BO_DEFAULT;
838         else
839                 flags |= TBM_BO_NONCACHABLE;
840
841         return flags;
842 }
843
844 static unsigned int
845 _get_name(int fd, unsigned int gem)
846 {
847         struct drm_gem_flink arg = {0,};
848
849         arg.handle = gem;
850         if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
851                 TBM_EXYNOS_LOG("error fail to get flink from gem:%d (DRM_IOCTL_GEM_FLINK)\n",
852                                gem);
853                 return 0;
854         }
855
856         return (unsigned int)arg.name;
857 }
858
859 static tbm_bo_handle
860 _exynos_bo_handle(tbm_bo_exynos bo_exynos, int device)
861 {
862         tbm_bo_handle bo_handle;
863
864         memset(&bo_handle, 0x0, sizeof(uint64_t));
865
866         switch (device) {
867         case TBM_DEVICE_DEFAULT:
868         case TBM_DEVICE_2D:
869                 bo_handle.u32 = (uint32_t)bo_exynos->gem;
870                 break;
871         case TBM_DEVICE_CPU:
872                 if (!bo_exynos->pBase) {
873                         struct drm_exynos_gem_map arg = {0,};
874                         void *map = NULL;
875
876                         arg.handle = bo_exynos->gem;
877                         if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg,
878                                                 sizeof(arg))) {
879                                 TBM_EXYNOS_LOG("error Cannot map_dumb gem=%d\n", bo_exynos->gem);
880                                 return (tbm_bo_handle) NULL;
881                         }
882
883                         map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED,
884                                    bo_exynos->fd, arg.offset);
885                         if (map == MAP_FAILED) {
886                                 TBM_EXYNOS_LOG("error Cannot usrptr gem=%d\n", bo_exynos->gem);
887                                 return (tbm_bo_handle) NULL;
888                         }
889                         bo_exynos->pBase = map;
890                 }
891                 bo_handle.ptr = (void *)bo_exynos->pBase;
892                 break;
893         case TBM_DEVICE_3D:
894 #ifdef USE_DMAIMPORT
895                 if (bo_exynos->dmabuf) {
896                         bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
897                         break;
898                 }
899
900                 if (!bo_exynos->dmabuf) {
901                         struct drm_prime_handle arg = {0, };
902
903                         arg.handle = bo_exynos->gem;
904                         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
905                                 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
906                                 return (tbm_bo_handle) NULL;
907                         }
908                         bo_exynos->dmabuf = arg.fd;
909                 }
910
911                 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
912 #endif
913                 break;
914         case TBM_DEVICE_MM:
915                 if (!bo_exynos->dmabuf) {
916                         struct drm_prime_handle arg = {0, };
917
918                         arg.handle = bo_exynos->gem;
919                         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
920                                 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
921                                 return (tbm_bo_handle) NULL;
922                         }
923                         bo_exynos->dmabuf = arg.fd;
924                 }
925
926                 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
927                 break;
928         default:
929                 TBM_EXYNOS_LOG("error Not supported device:%d\n", device);
930                 bo_handle.ptr = (void *) NULL;
931                 break;
932         }
933
934         return bo_handle;
935 }
936
937 static int
938 tbm_exynos_bo_size(tbm_bo bo)
939 {
940         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
941
942         tbm_bo_exynos bo_exynos;
943
944         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
945         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
946
947         return bo_exynos->size;
948 }
949
950 static void *
951 tbm_exynos_bo_alloc(tbm_bo bo, int size, int flags)
952 {
953         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
954
955         tbm_bo_exynos bo_exynos;
956         tbm_bufmgr_exynos bufmgr_exynos;
957         unsigned int exynos_flags;
958
959         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
960         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
961
962         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
963         if (!bo_exynos) {
964                 TBM_EXYNOS_LOG("error fail to allocate the bo private\n");
965                 return 0;
966         }
967
968         exynos_flags = _get_exynos_flag_from_tbm(flags);
969         if ((flags & TBM_BO_SCANOUT) &&
970             size <= 4 * 1024) {
971                 exynos_flags |= EXYNOS_BO_NONCONTIG;
972         }
973
974         struct drm_exynos_gem_create arg = {0, };
975
976         arg.size = (uint64_t)size;
977         arg.flags = exynos_flags;
978         if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg,
979                                 sizeof(arg))) {
980                 TBM_EXYNOS_LOG("error Cannot create bo(flag:%x, size:%d)\n", arg.flags,
981                                (unsigned int)arg.size);
982                 free(bo_exynos);
983                 return 0;
984         }
985
986         bo_exynos->fd = bufmgr_exynos->fd;
987         bo_exynos->gem = arg.handle;
988         bo_exynos->size = size;
989         bo_exynos->flags_tbm = flags;
990         bo_exynos->flags_exynos = exynos_flags;
991         bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
992
993         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) {
994                 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
995                 free(bo_exynos);
996                 return 0;
997         }
998
999         pthread_mutex_init(&bo_exynos->mutex, NULL);
1000
1001         if (bufmgr_exynos->use_dma_fence
1002             && !bo_exynos->dmabuf) {
1003                 struct drm_prime_handle arg = {0, };
1004
1005                 arg.handle = bo_exynos->gem;
1006                 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1007                         TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
1008                         free(bo_exynos);
1009                         return 0;
1010                 }
1011                 bo_exynos->dmabuf = arg.fd;
1012         }
1013
1014         /* add bo to hash */
1015         PrivGem *privGem = calloc(1, sizeof(PrivGem));
1016
1017         if (!privGem) {
1018                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1019                                "error %s:%d Fail to calloc privGem\n",
1020                                getpid(), __func__, __LINE__);
1021                 free(bo_exynos);
1022                 return 0;
1023         }
1024
1025         privGem->ref_count = 1;
1026         privGem->bo_priv = bo_exynos;
1027
1028         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1029                           (void *)privGem) < 0) {
1030                 TBM_EXYNOS_LOG("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1031         }
1032
1033         DBG("     [%s] bo:%p, gem:%d(%d), flags:%d(%d), size:%d\n", target_name(),
1034             bo,
1035             bo_exynos->gem, bo_exynos->name,
1036             flags, exynos_flags,
1037             bo_exynos->size);
1038
1039         return (void *)bo_exynos;
1040 }
1041
1042 static void
1043 tbm_exynos_bo_free(tbm_bo bo)
1044 {
1045         tbm_bo_exynos bo_exynos;
1046         tbm_bufmgr_exynos bufmgr_exynos;
1047
1048         if (!bo)
1049                 return;
1050
1051         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1052         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
1053
1054         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1055         EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
1056
1057         DBG("      [%s] bo:%p, gem:%d(%d), fd:%d, size:%d\n", target_name(),
1058             bo,
1059             bo_exynos->gem, bo_exynos->name,
1060             bo_exynos->dmabuf,
1061             bo_exynos->size);
1062
1063         if (bo_exynos->pBase) {
1064                 if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
1065                         TBM_EXYNOS_LOG("error bo:%p fail to munmap(%s)\n",
1066                                        bo, strerror(errno));
1067                 }
1068         }
1069
1070         /* close dmabuf */
1071         if (bo_exynos->dmabuf) {
1072                 close(bo_exynos->dmabuf);
1073                 bo_exynos->dmabuf = 0;
1074         }
1075
1076         /* delete bo from hash */
1077         PrivGem *privGem = NULL;
1078         int ret;
1079
1080         ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name,
1081                              (void **)&privGem);
1082         if (ret == 0) {
1083                 privGem->ref_count--;
1084                 if (privGem->ref_count == 0) {
1085                         drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name);
1086                         free(privGem);
1087                         privGem = NULL;
1088                 }
1089         } else {
1090                 TBM_EXYNOS_LOG("warning Cannot find bo to Hash(%d), ret=%d\n", bo_exynos->name,
1091                                ret);
1092         }
1093
1094         _bo_destroy_cache_state(bufmgr_exynos, bo_exynos);
1095
1096         /* Free gem handle */
1097         struct drm_gem_close arg = {0, };
1098
1099         memset(&arg, 0, sizeof(arg));
1100         arg.handle = bo_exynos->gem;
1101         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg)) {
1102                 TBM_EXYNOS_LOG("error bo:%p fail to gem close.(%s)\n",
1103                                bo, strerror(errno));
1104         }
1105
1106         free(bo_exynos);
1107 }
1108
1109
1110 static void *
1111 tbm_exynos_bo_import(tbm_bo bo, unsigned int key)
1112 {
1113         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1114
1115         tbm_bufmgr_exynos bufmgr_exynos;
1116         tbm_bo_exynos bo_exynos;
1117         PrivGem *privGem = NULL;
1118         int ret;
1119
1120         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1121         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1122
1123         ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&privGem);
1124         if (ret == 0)
1125                 return privGem->bo_priv;
1126
1127         struct drm_gem_open arg = {0, };
1128         struct drm_exynos_gem_info info = {0, };
1129
1130         arg.name = key;
1131         if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1132                 TBM_EXYNOS_LOG("error Cannot open gem name=%d\n", key);
1133                 return 0;
1134         }
1135
1136         info.handle = arg.handle;
1137         if (drmCommandWriteRead(bufmgr_exynos->fd,
1138                                 DRM_EXYNOS_GEM_GET,
1139                                 &info,
1140                                 sizeof(struct drm_exynos_gem_info))) {
1141                 TBM_EXYNOS_LOG("error Cannot get gem info=%d\n", key);
1142                 return 0;
1143         }
1144
1145         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1146         if (!bo_exynos) {
1147                 TBM_EXYNOS_LOG("error fail to allocate the bo private\n");
1148                 return 0;
1149         }
1150
1151         bo_exynos->fd = bufmgr_exynos->fd;
1152         bo_exynos->gem = arg.handle;
1153         bo_exynos->size = arg.size;
1154         bo_exynos->flags_exynos = info.flags;
1155         bo_exynos->name = key;
1156         bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1157
1158         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1159                 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
1160                 free(bo_exynos);
1161                 return 0;
1162         }
1163
1164         if (!bo_exynos->dmabuf) {
1165                 struct drm_prime_handle arg = {0, };
1166
1167                 arg.handle = bo_exynos->gem;
1168                 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1169                         TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
1170                         free(bo_exynos);
1171                         return 0;
1172                 }
1173                 bo_exynos->dmabuf = arg.fd;
1174         }
1175
1176         /* add bo to hash */
1177         privGem = NULL;
1178
1179         privGem = calloc(1, sizeof(PrivGem));
1180         if (!privGem) {
1181                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1182                                 "error %s:%d Fail to calloc privGem\n",
1183                                 getpid(), __func__, __LINE__);
1184                 free(bo_exynos);
1185                 return 0;
1186         }
1187
1188         privGem->ref_count = 1;
1189         privGem->bo_priv = bo_exynos;
1190
1191         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1192                            (void *)privGem) < 0) {
1193                 TBM_EXYNOS_LOG("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1194         }
1195
1196         DBG("    [%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1197             target_name(),
1198             bo,
1199             bo_exynos->gem, bo_exynos->name,
1200             bo_exynos->dmabuf,
1201             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1202             bo_exynos->size);
1203
1204         return (void *)bo_exynos;
1205 }
1206
1207 static void *
1208 tbm_exynos_bo_import_fd(tbm_bo bo, tbm_fd key)
1209 {
1210         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1211
1212         tbm_bufmgr_exynos bufmgr_exynos;
1213         tbm_bo_exynos bo_exynos;
1214         PrivGem *privGem = NULL;
1215         unsigned int name;
1216         int ret;
1217
1218         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1219         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1220
1221         /*getting handle from fd*/
1222         unsigned int gem = 0;
1223         struct drm_prime_handle arg = {0, };
1224
1225         arg.fd = key;
1226         arg.flags = 0;
1227         if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1228                 TBM_EXYNOS_LOG("error bo:%p Cannot get gem handle from fd:%d (%s)\n",
1229                                bo, arg.fd, strerror(errno));
1230                 return NULL;
1231         }
1232         gem = arg.handle;
1233
1234         name = _get_name(bufmgr_exynos->fd, gem);
1235         if (!name) {
1236                 TBM_EXYNOS_LOG("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1237                                bo, gem, key, strerror(errno));
1238                 return 0;
1239         }
1240
1241         ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&privGem);
1242         if (ret == 0) {
1243                 if (gem == privGem->bo_priv->gem)
1244                         return privGem->bo_priv;
1245         }
1246
1247         unsigned int real_size = -1;
1248         struct drm_exynos_gem_info info = {0, };
1249
1250         /* Determine size of bo.  The fd-to-handle ioctl really should
1251          * return the size, but it doesn't.  If we have kernel 3.12 or
1252          * later, we can lseek on the prime fd to get the size.  Older
1253          * kernels will just fail, in which case we fall back to the
1254          * provided (estimated or guess size).
1255          */
1256         real_size = lseek(key, 0, SEEK_END);
1257
1258         info.handle = gem;
1259         if (drmCommandWriteRead(bufmgr_exynos->fd,
1260                                 DRM_EXYNOS_GEM_GET,
1261                                 &info,
1262                                 sizeof(struct drm_exynos_gem_info))) {
1263                 TBM_EXYNOS_LOG("error bo:%p Cannot get gem info from gem:%d, fd:%d (%s)\n",
1264                                bo, gem, key, strerror(errno));
1265                 return 0;
1266         }
1267
1268         if (real_size == -1)
1269                 real_size = info.size;
1270
1271         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1272         if (!bo_exynos) {
1273                 TBM_EXYNOS_LOG("error bo:%p fail to allocate the bo private\n", bo);
1274                 return 0;
1275         }
1276
1277         bo_exynos->fd = bufmgr_exynos->fd;
1278         bo_exynos->gem = gem;
1279         bo_exynos->size = real_size;
1280         bo_exynos->flags_exynos = info.flags;
1281         bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1282         bo_exynos->name = name;
1283
1284         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1285                 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
1286                 free(bo_exynos);
1287                 return 0;
1288         }
1289
1290         /* add bo to hash */
1291         privGem = NULL;
1292
1293         privGem = calloc(1, sizeof(PrivGem));
1294         if (!privGem) {
1295                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1296                                "error %s:%d Fail to calloc privGem\n",
1297                                getpid(), __func__, __LINE__);
1298                 free(bo_exynos);
1299                 return 0;
1300         }
1301
1302         privGem->ref_count = 1;
1303         privGem->bo_priv = bo_exynos;
1304
1305         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1306                            (void *)privGem) < 0) {
1307                 TBM_EXYNOS_LOG("error bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1308                                bo, bo_exynos->name, gem, key);
1309         }
1310
1311         DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1312             target_name(),
1313             bo,
1314             bo_exynos->gem, bo_exynos->name,
1315             bo_exynos->dmabuf,
1316             key,
1317             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1318             bo_exynos->size);
1319
1320         return (void *)bo_exynos;
1321 }
1322
1323 static unsigned int
1324 tbm_exynos_bo_export(tbm_bo bo)
1325 {
1326         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1327
1328         tbm_bo_exynos bo_exynos;
1329
1330         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1331         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1332
1333         if (!bo_exynos->name) {
1334                 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1335                 if (!bo_exynos->name) {
1336                         TBM_EXYNOS_LOG("error Cannot get name\n");
1337                         return 0;
1338                 }
1339         }
1340
1341         DBG("    [%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1342             target_name(),
1343             bo,
1344             bo_exynos->gem, bo_exynos->name,
1345             bo_exynos->dmabuf,
1346             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1347             bo_exynos->size);
1348
1349         return (unsigned int)bo_exynos->name;
1350 }
1351
1352 tbm_fd
1353 tbm_exynos_bo_export_fd(tbm_bo bo)
1354 {
1355         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1356
1357         tbm_bo_exynos bo_exynos;
1358         int ret;
1359
1360         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1361         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, -1);
1362
1363         struct drm_prime_handle arg = {0, };
1364
1365         arg.handle = bo_exynos->gem;
1366         ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1367         if (ret) {
1368                 TBM_EXYNOS_LOG("error bo:%p Cannot dmabuf=%d (%s)\n",
1369                                bo, bo_exynos->gem, strerror(errno));
1370                 return (tbm_fd) ret;
1371         }
1372
1373         DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1374             target_name(),
1375             bo,
1376             bo_exynos->gem, bo_exynos->name,
1377             bo_exynos->dmabuf,
1378             arg.fd,
1379             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1380             bo_exynos->size);
1381
1382         return (tbm_fd)arg.fd;
1383 }
1384
1385 static tbm_bo_handle
1386 tbm_exynos_bo_get_handle(tbm_bo bo, int device)
1387 {
1388         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1389
1390         tbm_bo_handle bo_handle;
1391         tbm_bo_exynos bo_exynos;
1392
1393         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1394         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1395
1396         if (!bo_exynos->gem) {
1397                 TBM_EXYNOS_LOG("error Cannot map gem=%d\n", bo_exynos->gem);
1398                 return (tbm_bo_handle) NULL;
1399         }
1400
1401         DBG("[%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1402             target_name(),
1403             bo,
1404             bo_exynos->gem, bo_exynos->name,
1405             bo_exynos->dmabuf,
1406             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1407             bo_exynos->size,
1408             STR_DEVICE[device]);
1409
1410         /*Get mapped bo_handle*/
1411         bo_handle = _exynos_bo_handle(bo_exynos, device);
1412         if (bo_handle.ptr == NULL) {
1413                 TBM_EXYNOS_LOG("error Cannot get handle: gem:%d, device:%d\n", bo_exynos->gem,
1414                                device);
1415                 return (tbm_bo_handle) NULL;
1416         }
1417
1418         return bo_handle;
1419 }
1420
1421 static tbm_bo_handle
1422 tbm_exynos_bo_map(tbm_bo bo, int device, int opt)
1423 {
1424         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1425
1426         tbm_bo_handle bo_handle;
1427         tbm_bo_exynos bo_exynos;
1428         tbm_bufmgr_exynos bufmgr_exynos;
1429
1430         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1431         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, (tbm_bo_handle)NULL);
1432
1433         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1434         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1435
1436         if (!bo_exynos->gem) {
1437                 TBM_EXYNOS_LOG("error Cannot map gem=%d\n", bo_exynos->gem);
1438                 return (tbm_bo_handle) NULL;
1439         }
1440
1441         DBG("       [%s] bo:%p, gem:%d(%d), fd:%d, %s, %s\n", target_name(),
1442             bo,
1443             bo_exynos->gem, bo_exynos->name,
1444             bo_exynos->dmabuf,
1445             STR_DEVICE[device],
1446             STR_OPT[opt]);
1447
1448         /*Get mapped bo_handle*/
1449         bo_handle = _exynos_bo_handle(bo_exynos, device);
1450         if (bo_handle.ptr == NULL) {
1451                 TBM_EXYNOS_LOG("error Cannot get handle: gem:%d, device:%d, opt:%d\n",
1452                                bo_exynos->gem, device, opt);
1453                 return (tbm_bo_handle) NULL;
1454         }
1455
1456         if (bo_exynos->map_cnt == 0)
1457                 _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt);
1458
1459         bo_exynos->last_map_device = device;
1460
1461         bo_exynos->map_cnt++;
1462
1463         return bo_handle;
1464 }
1465
1466 static int
1467 tbm_exynos_bo_unmap(tbm_bo bo)
1468 {
1469         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1470
1471         tbm_bo_exynos bo_exynos;
1472         tbm_bufmgr_exynos bufmgr_exynos;
1473
1474         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1475         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1476
1477         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1478         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1479
1480
1481         if (!bo_exynos->gem)
1482                 return 0;
1483
1484         bo_exynos->map_cnt--;
1485
1486         if (bo_exynos->map_cnt == 0)
1487                 _bo_save_cache_state(bufmgr_exynos, bo_exynos);
1488
1489 #ifdef ENABLE_CACHECRTL
1490         if (bo_exynos->last_map_device == TBM_DEVICE_CPU)
1491                 _exynos_cache_flush(bufmgr_exynos, bo_exynos, TBM_EXYNOS_CACHE_FLUSH_ALL);
1492 #endif
1493
1494         bo_exynos->last_map_device = -1;
1495
1496         DBG("     [%s] bo:%p, gem:%d(%d), fd:%d\n", target_name(),
1497             bo,
1498             bo_exynos->gem, bo_exynos->name,
1499             bo_exynos->dmabuf);
1500
1501         return 1;
1502 }
1503
1504 static int
1505 tbm_exynos_bo_lock(tbm_bo bo, int device, int opt)
1506 {
1507         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1508
1509 #ifndef ALWAYS_BACKEND_CTRL
1510         tbm_bufmgr_exynos bufmgr_exynos;
1511         tbm_bo_exynos bo_exynos;
1512         struct dma_buf_fence fence;
1513         struct flock filelock;
1514         int ret = 0;
1515
1516         if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1517                 DBG("[libtbm-exynos:%d] %s not support device type,\n", getpid(),
1518                     __func__);
1519                 return 0;
1520         }
1521
1522         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1523         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1524
1525         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1526         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1527
1528         memset(&fence, 0, sizeof(struct dma_buf_fence));
1529
1530         /* Check if the given type is valid or not. */
1531         if (opt & TBM_OPTION_WRITE) {
1532                 if (device == TBM_DEVICE_3D)
1533                         fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1534         } else if (opt & TBM_OPTION_READ) {
1535                 if (device == TBM_DEVICE_3D)
1536                         fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1537         } else {
1538                 TBM_EXYNOS_LOG("error Invalid argument\n");
1539                 return 0;
1540         }
1541
1542         /* Check if the tbm manager supports dma fence or not. */
1543         if (!bufmgr_exynos->use_dma_fence) {
1544                 TBM_EXYNOS_LOG("error Not support DMA FENCE(%s)\n", strerror(errno));
1545                 return 0;
1546
1547         }
1548
1549         if (device == TBM_DEVICE_3D) {
1550                 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1551                 if (ret < 0) {
1552                         TBM_EXYNOS_LOG("error Cannot set GET FENCE(%s)\n", strerror(errno));
1553                         return 0;
1554                 }
1555         } else {
1556                 if (opt & TBM_OPTION_WRITE)
1557                         filelock.l_type = F_WRLCK;
1558                 else
1559                         filelock.l_type = F_RDLCK;
1560
1561                 filelock.l_whence = SEEK_CUR;
1562                 filelock.l_start = 0;
1563                 filelock.l_len = 0;
1564
1565                 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1566                         return 0;
1567         }
1568
1569         pthread_mutex_lock(&bo_exynos->mutex);
1570
1571         if (device == TBM_DEVICE_3D) {
1572                 int i;
1573
1574                 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1575                         if (bo_exynos->dma_fence[i].ctx == 0) {
1576                                 bo_exynos->dma_fence[i].type = fence.type;
1577                                 bo_exynos->dma_fence[i].ctx = fence.ctx;
1578                                 break;
1579                         }
1580                 }
1581
1582                 if (i == DMA_FENCE_LIST_MAX) {
1583                         /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1584                         TBM_EXYNOS_LOG("error fence list is full\n");
1585                 }
1586         }
1587
1588         pthread_mutex_unlock(&bo_exynos->mutex);
1589
1590         DBG("[%s] DMABUF_IOCTL_GET_FENCE! bo:%p, gem:%d(%d), fd:%ds\n", target_name(),
1591             bo,
1592             bo_exynos->gem, bo_exynos->name,
1593             bo_exynos->dmabuf);
1594 #endif /* ALWAYS_BACKEND_CTRL */
1595
1596         return 1;
1597 }
1598
1599 static int
1600 tbm_exynos_bo_unlock(tbm_bo bo)
1601 {
1602         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1603
1604 #ifndef ALWAYS_BACKEND_CTRL
1605         tbm_bo_exynos bo_exynos;
1606         struct dma_buf_fence fence;
1607         struct flock filelock;
1608         unsigned int dma_type = 0;
1609         int ret = 0;
1610
1611         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1612         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1613
1614         if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1615                 dma_type = 1;
1616
1617         if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1618                 DBG("[libtbm-exynos:%d] %s FENCE not support or ignored,\n", getpid(),
1619                     __func__);
1620                 return 0;
1621         }
1622
1623         if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1624                 DBG("[libtbm-exynos:%d] %s device type is not 3D/CPU,\n", getpid(),
1625                     __func__);
1626                 return 0;
1627         }
1628
1629         pthread_mutex_lock(&bo_exynos->mutex);
1630
1631         if (dma_type) {
1632                 fence.type = bo_exynos->dma_fence[0].type;
1633                 fence.ctx = bo_exynos->dma_fence[0].ctx;
1634                 int i;
1635
1636                 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1637                         bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type;
1638                         bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx;
1639                 }
1640                 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1641                 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1642         }
1643         pthread_mutex_unlock(&bo_exynos->mutex);
1644
1645         if (dma_type) {
1646                 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1647                 if (ret < 0) {
1648                         TBM_EXYNOS_LOG("error Can not set PUT FENCE(%s)\n", strerror(errno));
1649                         return 0;
1650                 }
1651         } else {
1652                 filelock.l_type = F_UNLCK;
1653                 filelock.l_whence = SEEK_CUR;
1654                 filelock.l_start = 0;
1655                 filelock.l_len = 0;
1656
1657                 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1658                         return 0;
1659         }
1660
1661         DBG("[%s] DMABUF_IOCTL_PUT_FENCE! bo:%p, gem:%d(%d), fd:%ds\n", target_name(),
1662             bo,
1663             bo_exynos->gem, bo_exynos->name,
1664             bo_exynos->dmabuf);
1665 #endif /* ALWAYS_BACKEND_CTRL */
1666
1667         return 1;
1668 }
1669
1670 static void
1671 tbm_exynos_bufmgr_deinit(void *priv)
1672 {
1673         EXYNOS_RETURN_IF_FAIL(priv != NULL);
1674
1675         tbm_bufmgr_exynos bufmgr_exynos;
1676
1677         bufmgr_exynos = (tbm_bufmgr_exynos)priv;
1678
1679         if (bufmgr_exynos->hashBos) {
1680                 unsigned long key;
1681                 void *value;
1682
1683                 while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) {
1684                         free(value);
1685                         drmHashDelete(bufmgr_exynos->hashBos, key);
1686                 }
1687
1688                 drmHashDestroy(bufmgr_exynos->hashBos);
1689                 bufmgr_exynos->hashBos = NULL;
1690         }
1691
1692         _bufmgr_deinit_cache_state(bufmgr_exynos);
1693
1694         if (bufmgr_exynos->bind_display)
1695                 tbm_drm_helper_wl_auth_server_deinit();
1696
1697         if (bufmgr_exynos->device_name)
1698                 free(bufmgr_exynos->device_name);
1699
1700         if (tbm_backend_is_display_server())
1701                 tbm_drm_helper_unset_tbm_master_fd();
1702
1703         close(bufmgr_exynos->fd);
1704
1705         free(bufmgr_exynos);
1706 }
1707
1708 int
1709 tbm_exynos_surface_supported_format(uint32_t **formats, uint32_t *num)
1710 {
1711         uint32_t *color_formats = NULL;
1712
1713         color_formats = (uint32_t *)calloc(1,
1714                                            sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1715
1716         if (color_formats == NULL)
1717                 return 0;
1718
1719         memcpy(color_formats, tbm_exynos_color_format_list,
1720                sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1721
1722         *formats = color_formats;
1723         *num = TBM_COLOR_FORMAT_COUNT;
1724
1725         fprintf(stderr, "tbm_exynos_surface_supported_format  count = %d\n", *num);
1726
1727         return 1;
1728 }
1729
1730 static int
1731 _new_calc_plane_nv12(int width, int height)
1732 {
1733         int mbX, mbY;
1734
1735         mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
1736         mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
1737
1738         if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
1739                 mbY = (mbY + 1) / 2 * 2;
1740
1741         return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
1742                         S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
1743 }
1744
1745 static int
1746 _calc_yplane_nv12(int width, int height)
1747 {
1748         int mbX, mbY;
1749
1750         mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
1751         mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
1752
1753         return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
1754 }
1755
1756 static int
1757 _calc_uvplane_nv12(int width, int height)
1758 {
1759         int mbX, mbY;
1760
1761         mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
1762         mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
1763
1764         return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
1765 }
1766
1767 static int
1768 _new_calc_yplane_nv12(int width, int height)
1769 {
1770         return SIZE_ALIGN(_new_calc_plane_nv12(width,
1771                                                 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1772                            TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1773 }
1774
1775 static int
1776 _new_calc_uvplane_nv12(int width, int height)
1777 {
1778         return SIZE_ALIGN((_new_calc_plane_nv12(width,
1779                                                 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1780                           TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1781 }
1782
1783 /**
1784  * @brief get the plane data of the surface.
1785  * @param[in] width : the width of the surface
1786  * @param[in] height : the height of the surface
1787  * @param[in] format : the format of the surface
1788  * @param[in] plane_idx : the format of the surface
1789  * @param[out] size : the size of the plane
1790  * @param[out] offset : the offset of the plane
1791  * @param[out] pitch : the pitch of the plane
1792  * @param[out] padding : the padding of the plane
1793  * @return 1 if this function succeeds, otherwise 0.
1794  */
1795 int
1796 tbm_exynos_surface_get_plane_data(int width, int height,
1797                                   tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1798                                   uint32_t *pitch, int *bo_idx)
1799 {
1800         int ret = 1;
1801         int bpp;
1802         int _offset = 0;
1803         int _pitch = 0;
1804         int _size = 0;
1805         int _bo_idx = 0;
1806
1807         switch (format) {
1808                 /* 16 bpp RGB */
1809         case TBM_FORMAT_XRGB4444:
1810         case TBM_FORMAT_XBGR4444:
1811         case TBM_FORMAT_RGBX4444:
1812         case TBM_FORMAT_BGRX4444:
1813         case TBM_FORMAT_ARGB4444:
1814         case TBM_FORMAT_ABGR4444:
1815         case TBM_FORMAT_RGBA4444:
1816         case TBM_FORMAT_BGRA4444:
1817         case TBM_FORMAT_XRGB1555:
1818         case TBM_FORMAT_XBGR1555:
1819         case TBM_FORMAT_RGBX5551:
1820         case TBM_FORMAT_BGRX5551:
1821         case TBM_FORMAT_ARGB1555:
1822         case TBM_FORMAT_ABGR1555:
1823         case TBM_FORMAT_RGBA5551:
1824         case TBM_FORMAT_BGRA5551:
1825         case TBM_FORMAT_RGB565:
1826                 bpp = 16;
1827                 _offset = 0;
1828                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1829                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1830                 _bo_idx = 0;
1831                 break;
1832                 /* 24 bpp RGB */
1833         case TBM_FORMAT_RGB888:
1834         case TBM_FORMAT_BGR888:
1835                 bpp = 24;
1836                 _offset = 0;
1837                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1838                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1839                 _bo_idx = 0;
1840                 break;
1841                 /* 32 bpp RGB */
1842         case TBM_FORMAT_XRGB8888:
1843         case TBM_FORMAT_XBGR8888:
1844         case TBM_FORMAT_RGBX8888:
1845         case TBM_FORMAT_BGRX8888:
1846         case TBM_FORMAT_ARGB8888:
1847         case TBM_FORMAT_ABGR8888:
1848         case TBM_FORMAT_RGBA8888:
1849         case TBM_FORMAT_BGRA8888:
1850                 bpp = 32;
1851                 _offset = 0;
1852                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1853                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1854                 _bo_idx = 0;
1855                 break;
1856
1857                 /* packed YCbCr */
1858         case TBM_FORMAT_YUYV:
1859         case TBM_FORMAT_YVYU:
1860         case TBM_FORMAT_UYVY:
1861         case TBM_FORMAT_VYUY:
1862         case TBM_FORMAT_AYUV:
1863                 bpp = 32;
1864                 _offset = 0;
1865                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1866                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1867                 _bo_idx = 0;
1868                 break;
1869
1870                 /*
1871                 * 2 plane YCbCr
1872                 * index 0 = Y plane, [7:0] Y
1873                 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1874                 * or
1875                 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1876                 */
1877         case TBM_FORMAT_NV12:
1878                 bpp = 12;
1879                 if (plane_idx == 0) {
1880                         _offset = 0;
1881                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1882                         _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1883                                         height));
1884                         _bo_idx = 0;
1885                 } else if (plane_idx == 1) {
1886                         _offset = 0;
1887                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1888                         _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1889                                         height));
1890                         _bo_idx = 1;
1891                 }
1892                 break;
1893         case TBM_FORMAT_NV21:
1894                 bpp = 12;
1895                 if (plane_idx == 0) {
1896                         _offset = 0;
1897                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1898                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1899                         _bo_idx = 0;
1900                 } else if (plane_idx == 1) {
1901                         _offset = width * height;
1902                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1903                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1904                         _bo_idx = 0;
1905                 }
1906                 break;
1907
1908         case TBM_FORMAT_NV16:
1909         case TBM_FORMAT_NV61:
1910                 bpp = 16;
1911                 /*if(plane_idx == 0)*/
1912                 {
1913                         _offset = 0;
1914                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1915                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1916                         _bo_idx = 0;
1917                         if (plane_idx == 0)
1918                                 break;
1919                 }
1920                 /*else if( plane_idx ==1 )*/
1921                 {
1922                         _offset += _size;
1923                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1924                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1925                         _bo_idx = 0;
1926                 }
1927                 break;
1928
1929                 /*
1930                 * 3 plane YCbCr
1931                 * index 0: Y plane, [7:0] Y
1932                 * index 1: Cb plane, [7:0] Cb
1933                 * index 2: Cr plane, [7:0] Cr
1934                 * or
1935                 * index 1: Cr plane, [7:0] Cr
1936                 * index 2: Cb plane, [7:0] Cb
1937                 */
1938
1939                 /*
1940                 * NATIVE_BUFFER_FORMAT_YV12
1941                 * NATIVE_BUFFER_FORMAT_I420
1942                 */
1943         case TBM_FORMAT_YUV410:
1944         case TBM_FORMAT_YVU410:
1945                 bpp = 9;
1946                 /*if(plane_idx == 0)*/
1947                 {
1948                         _offset = 0;
1949                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1950                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1951                         _bo_idx = 0;
1952                         if (plane_idx == 0)
1953                                 break;
1954                 }
1955                 /*else if(plane_idx == 1)*/
1956                 {
1957                         _offset += _size;
1958                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1959                         _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1960                         _bo_idx = 0;
1961                         if (plane_idx == 1)
1962                                 break;
1963                 }
1964                 /*else if (plane_idx == 2)*/
1965                 {
1966                         _offset += _size;
1967                         _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1968                         _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1969                         _bo_idx = 0;
1970                 }
1971                 break;
1972         case TBM_FORMAT_YUV411:
1973         case TBM_FORMAT_YVU411:
1974         case TBM_FORMAT_YUV420:
1975         case TBM_FORMAT_YVU420:
1976                 bpp = 12;
1977                 /*if(plane_idx == 0)*/
1978                 {
1979                         _offset = 0;
1980                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1981                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1982                         _bo_idx = 0;
1983                         if (plane_idx == 0)
1984                                 break;
1985                 }
1986                 /*else if(plane_idx == 1)*/
1987                 {
1988                         _offset += _size;
1989                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1990                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1991                         _bo_idx = 0;
1992                         if (plane_idx == 1)
1993                                 break;
1994                 }
1995                 /*else if (plane_idx == 2)*/
1996                 {
1997                         _offset += _size;
1998                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1999                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
2000                         _bo_idx = 0;
2001                 }
2002                 break;
2003         case TBM_FORMAT_YUV422:
2004         case TBM_FORMAT_YVU422:
2005                 bpp = 16;
2006                 /*if(plane_idx == 0)*/
2007                 {
2008                         _offset = 0;
2009                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2010                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2011                         _bo_idx = 0;
2012                         if (plane_idx == 0)
2013                                 break;
2014                 }
2015                 /*else if(plane_idx == 1)*/
2016                 {
2017                         _offset += _size;
2018                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2019                         _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
2020                         _bo_idx = 0;
2021                         if (plane_idx == 1)
2022                                 break;
2023                 }
2024                 /*else if (plane_idx == 2)*/
2025                 {
2026                         _offset += _size;
2027                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2028                         _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
2029                         _bo_idx = 0;
2030                 }
2031                 break;
2032         case TBM_FORMAT_YUV444:
2033         case TBM_FORMAT_YVU444:
2034                 bpp = 24;
2035                 /*if(plane_idx == 0)*/
2036                 {
2037                         _offset = 0;
2038                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2039                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2040                         _bo_idx = 0;
2041                         if (plane_idx == 0)
2042                                 break;
2043                 }
2044                 /*else if(plane_idx == 1)*/
2045                 {
2046                         _offset += _size;
2047                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2048                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2049                         _bo_idx = 0;
2050                         if (plane_idx == 1)
2051                                 break;
2052                 }
2053                 /*else if (plane_idx == 2)*/
2054                 {
2055                         _offset += _size;
2056                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2057                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2058                         _bo_idx = 0;
2059                 }
2060                 break;
2061         default:
2062                 bpp = 0;
2063                 break;
2064         }
2065
2066         *size = _size;
2067         *offset = _offset;
2068         *pitch = _pitch;
2069         *bo_idx = _bo_idx;
2070
2071         return ret;
2072 }
2073
2074 int
2075 tbm_exynos_bo_get_flags(tbm_bo bo)
2076 {
2077         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
2078
2079         tbm_bo_exynos bo_exynos;
2080
2081         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
2082         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
2083
2084         return bo_exynos->flags_tbm;
2085 }
2086
2087 int
2088 tbm_exynos_bufmgr_bind_native_display(tbm_bufmgr bufmgr, void *native_display)
2089 {
2090         tbm_bufmgr_exynos bufmgr_exynos;
2091
2092         bufmgr_exynos = tbm_backend_get_priv_from_bufmgr(bufmgr);
2093         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
2094
2095         if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd,
2096                                            bufmgr_exynos->device_name, 0)) {
2097                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error:Fail to tbm_drm_helper_wl_server_init\n");
2098                 return 0;
2099         }
2100
2101         bufmgr_exynos->bind_display = native_display;
2102
2103         return 1;
2104 }
2105
2106 MODULEINITPPROTO(init_tbm_bufmgr_priv);
2107
2108 static TBMModuleVersionInfo ExynosVersRec = {
2109         "exynos",
2110         "Samsung",
2111         TBM_ABI_VERSION,
2112 };
2113
2114 TBMModuleData tbmModuleData = { &ExynosVersRec, init_tbm_bufmgr_priv};
2115
2116 int
2117 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
2118 {
2119         tbm_bufmgr_backend bufmgr_backend;
2120         tbm_bufmgr_exynos bufmgr_exynos;
2121         int fp;
2122
2123         if (!bufmgr)
2124                 return 0;
2125
2126         bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos));
2127         if (!bufmgr_exynos) {
2128                 TBM_EXYNOS_LOG("error: Fail to alloc bufmgr_exynos!\n");
2129                 return 0;
2130         }
2131
2132         if (tbm_backend_is_display_server()) {
2133                 bufmgr_exynos->fd = tbm_drm_helper_get_master_fd();
2134                 if (bufmgr_exynos->fd < 0) {
2135                         bufmgr_exynos->fd = _tbm_exynos_open_drm();
2136                         if (bufmgr_exynos->fd < 0) {
2137                                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error: Fail to open drm!\n", getpid());
2138                                 goto fail_open_drm;
2139                         }
2140                 }
2141
2142                 tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd);
2143
2144                 bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2145                 if (!bufmgr_exynos->device_name) {
2146                         TBM_EXYNOS_LOG("[libtbm-exynos:%d] error: Fail to get device name!\n", getpid());
2147
2148                         tbm_drm_helper_unset_tbm_master_fd();
2149                         goto fail_get_device_name;
2150                 }
2151         } else {
2152                 if (_check_render_node()) {
2153                         bufmgr_exynos->fd = _get_render_node();
2154                         if (bufmgr_exynos->fd < 0) {
2155                                 TBM_EXYNOS_LOG("[%s] get render node failed\n", target_name(), fd);
2156                                 goto fail_get_render_node;
2157                         }
2158                         DBG("[%s] Use render node:%d\n", target_name(), bufmgr_exynos->fd);
2159                 } else {
2160                         if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) {
2161                                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error: Fail to get auth drm info!\n", getpid());
2162                                 goto fail_get_auth_info;
2163                         }
2164                 }
2165         }
2166
2167         //Check if the tbm manager supports dma fence or not.
2168         fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2169         if (fp != -1) {
2170                 char buf[1];
2171                 int length = read(fp, buf, 1);
2172
2173                 if (length == 1 && buf[0] == '1')
2174                         bufmgr_exynos->use_dma_fence = 1;
2175
2176                 close(fp);
2177         }
2178
2179         if (!_bufmgr_init_cache_state(bufmgr_exynos)) {
2180                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error: Fail to init bufmgr cache state\n", getpid());
2181                 goto fail_init_cache_state;
2182         }
2183
2184         /*Create Hash Table*/
2185         bufmgr_exynos->hashBos = drmHashCreate();
2186
2187         bufmgr_backend = tbm_backend_alloc();
2188         if (!bufmgr_backend) {
2189                 TBM_EXYNOS_LOG("error: Fail to alloc backend!\n");
2190                 goto fail_alloc_backend;
2191         }
2192
2193         bufmgr_backend->priv = (void *)bufmgr_exynos;
2194         bufmgr_backend->bufmgr_deinit = tbm_exynos_bufmgr_deinit;
2195         bufmgr_backend->bo_size = tbm_exynos_bo_size;
2196         bufmgr_backend->bo_alloc = tbm_exynos_bo_alloc;
2197         bufmgr_backend->bo_free = tbm_exynos_bo_free;
2198         bufmgr_backend->bo_import = tbm_exynos_bo_import;
2199         bufmgr_backend->bo_import_fd = tbm_exynos_bo_import_fd;
2200         bufmgr_backend->bo_export = tbm_exynos_bo_export;
2201         bufmgr_backend->bo_export_fd = tbm_exynos_bo_export_fd;
2202         bufmgr_backend->bo_get_handle = tbm_exynos_bo_get_handle;
2203         bufmgr_backend->bo_map = tbm_exynos_bo_map;
2204         bufmgr_backend->bo_unmap = tbm_exynos_bo_unmap;
2205         bufmgr_backend->surface_get_plane_data = tbm_exynos_surface_get_plane_data;
2206         bufmgr_backend->surface_supported_format = tbm_exynos_surface_supported_format;
2207         bufmgr_backend->bo_get_flags = tbm_exynos_bo_get_flags;
2208         bufmgr_backend->bo_lock = tbm_exynos_bo_lock;
2209         bufmgr_backend->bo_unlock = tbm_exynos_bo_unlock;
2210
2211         if (tbm_backend_is_display_server() && !_check_render_node())
2212                 bufmgr_backend->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display;
2213
2214         if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
2215                 TBM_EXYNOS_LOG("error: Fail to init backend!\n");
2216                 goto fail_init_backend;
2217         }
2218
2219 #ifdef DEBUG
2220         {
2221                 char *env;
2222
2223                 env = getenv("TBM_EXYNOS_DEBUG");
2224                 if (env) {
2225                         bDebug = atoi(env);
2226                         TBM_EXYNOS_LOG("TBM_EXYNOS_DEBUG=%s\n", env);
2227                 } else
2228                         bDebug = 0;
2229         }
2230 #endif
2231
2232         DBG("[%s] drm_fd:%d\n", target_name(), bufmgr_exynos->fd);
2233
2234         return 1;
2235
2236 fail_init_backend:
2237         tbm_backend_free(bufmgr_backend);
2238 fail_alloc_backend:
2239         if (bufmgr_exynos->hashBos)
2240                 drmHashDestroy(bufmgr_exynos->hashBos);
2241         _bufmgr_deinit_cache_state(bufmgr_exynos);
2242 fail_init_cache_state:
2243         if (tbm_backend_is_display_server())
2244                 tbm_drm_helper_unset_tbm_master_fd();
2245 fail_get_device_name:
2246         close(bufmgr_exynos->fd);
2247 fail_get_auth_info:
2248 fail_get_render_node:
2249 fail_open_drm:
2250         free(bufmgr_exynos);
2251         return 0;
2252 }
2253