fix handle leak error
[archive/platform/adaptation/samsung_exynos/libtbm-exynos4412.git] / src / tbm_bufmgr_exynos4412.c
1 /**************************************************************************
2
3 libtbm_exynos
4
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
6
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
8
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
19 of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 #ifdef HAVE_CONFIG_H
32 #include "config.h"
33 #endif
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <sys/ioctl.h>
40 #include <sys/types.h>
41 #include <unistd.h>
42 #include <sys/mman.h>
43 #include <sys/stat.h>
44 #include <fcntl.h>
45 #include <errno.h>
46 #include <xf86drm.h>
47 #include <tbm_bufmgr.h>
48 #include <tbm_bufmgr_backend.h>
49 #include <exynos_drm.h>
50 #include <pthread.h>
51 #include <tbm_surface.h>
52 #include <tbm_surface_internal.h>
53 #include <tbm_drm_helper.h>
54
55 #include <libudev.h>
56
57 #include "tbm_bufmgr_tgl.h"
58
59 #define DEBUG
60 #define USE_DMAIMPORT
61 #define TBM_COLOR_FORMAT_COUNT 8
62
63 #define EXYNOS_DRM_NAME "exynos"
64
65 #ifdef DEBUG
66 #define LOG_TAG "TBM_BACKEND"
67 #include <dlog.h>
68 static int bDebug;
69
70 char *target_name()
71 {
72         FILE *f;
73         char *slash;
74         static int initialized = 0;
75         static char app_name[128];
76
77         if (initialized)
78                 return app_name;
79
80         /* get the application name */
81         f = fopen("/proc/self/cmdline", "r");
82
83         if (!f)
84                 return 0;
85
86         memset(app_name, 0x00, sizeof(app_name));
87
88         if (fgets(app_name, 100, f) == NULL) {
89                 fclose(f);
90                 return 0;
91         }
92
93         fclose(f);
94
95         slash = strrchr(app_name, '/');
96         if (slash != NULL)
97                 memmove(app_name, slash + 1, strlen(slash));
98
99         initialized = 1;
100
101         return app_name;
102 }
103
104 #define TBM_EXYNOS_LOG(fmt, args...) LOGE("\033[31m"  "[%s]" fmt "\033[0m", target_name(), ##args)
105 #define DBG(fmt, args...)  {if (bDebug&01) LOGE(fmt, ##args);}
106 #else
107 #define TBM_EXYNOS_LOG(...)
108 #define DBG(...)
109 #endif
110
111 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
112 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
113 #define MAX(a, b) ((a) > (b) ? (a) : (b))
114
115 #ifdef ALIGN_EIGHT
116 #define TBM_SURFACE_ALIGNMENT_PLANE (8)
117 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
118 #else
119 #define TBM_SURFACE_ALIGNMENT_PLANE (64)
120 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (64)
121 #endif
122
123 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
124 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
125
126 #define SZ_1M                                   0x00100000
127 #define S5P_FIMV_MAX_FRAME_SIZE                 (2 * SZ_1M)
128 #define S5P_FIMV_D_ALIGN_PLANE_SIZE             64
129 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW           16
130 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL           16
131 #define S5P_FIMV_DEC_BUF_ALIGN                  (8 * 1024)
132 #define S5P_FIMV_NV12MT_HALIGN                  128
133 #define S5P_FIMV_NV12MT_VALIGN                  64
134
135 /* check condition */
136 #define EXYNOS_RETURN_IF_FAIL(cond) {\
137         if (!(cond)) {\
138                 TBM_EXYNOS_LOG("[%s] : '%s' failed.\n", __func__, #cond);\
139                 return;\
140         } \
141 }
142
143 #define EXYNOS_RETURN_VAL_IF_FAIL(cond, val) {\
144         if (!(cond)) {\
145                 TBM_EXYNOS_LOG("[%s] : '%s' failed.\n", __func__, #cond);\
146                 return val;\
147         } \
148 }
149
150 struct dma_buf_info {
151         unsigned long   size;
152         unsigned int    fence_supported;
153         unsigned int    padding;
154 };
155
156 #define DMA_BUF_ACCESS_READ             0x1
157 #define DMA_BUF_ACCESS_WRITE            0x2
158 #define DMA_BUF_ACCESS_DMA              0x4
159 #define DMA_BUF_ACCESS_MAX              0x8
160
161 #define DMA_FENCE_LIST_MAX              5
162
163 struct dma_buf_fence {
164         unsigned long           ctx;
165         unsigned int            type;
166 };
167
168 #define DMABUF_IOCTL_BASE       'F'
169 #define DMABUF_IOWR(nr, type)   _IOWR(DMABUF_IOCTL_BASE, nr, type)
170
171 #define DMABUF_IOCTL_GET_INFO   DMABUF_IOWR(0x00, struct dma_buf_info)
172 #define DMABUF_IOCTL_GET_FENCE  DMABUF_IOWR(0x01, struct dma_buf_fence)
173 #define DMABUF_IOCTL_PUT_FENCE  DMABUF_IOWR(0x02, struct dma_buf_fence)
174
175 /* tgl key values */
176 #define GLOBAL_KEY   ((unsigned int)(-1))
177 /* TBM_CACHE */
178 #define TBM_EXYNOS_CACHE_INV       0x01 /**< cache invalidate  */
179 #define TBM_EXYNOS_CACHE_CLN       0x02 /**< cache clean */
180 #define TBM_EXYNOS_CACHE_ALL       0x10 /**< cache all */
181 #define TBM_EXYNOS_CACHE_FLUSH     (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush  */
182 #define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL)        /**< cache flush all */
183
184 enum {
185         DEVICE_NONE = 0,
186         DEVICE_CA,                                      /* cache aware device */
187         DEVICE_CO                                       /* cache oblivious device */
188 };
189
190 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
191
192 union _tbm_bo_cache_state {
193         unsigned int val;
194         struct {
195                 unsigned int cntFlush: 16;      /*Flush all index for sync */
196                 unsigned int isCached: 1;
197                 unsigned int isDirtied: 2;
198         } data;
199 };
200
201 typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
202 typedef struct _tbm_bo_exynos *tbm_bo_exynos;
203
204 typedef struct _exynos_private {
205         int ref_count;
206         struct _tbm_bo_exynos *bo_priv;
207 } PrivGem;
208
209 /* tbm buffor object for exynos */
210 struct _tbm_bo_exynos {
211         int fd;
212
213         unsigned int name;    /* FLINK ID */
214
215         unsigned int gem;     /* GEM Handle */
216
217         unsigned int dmabuf;  /* fd for dmabuf */
218
219         void *pBase;          /* virtual address */
220
221         unsigned int size;
222
223         unsigned int flags_exynos;
224         unsigned int flags_tbm;
225
226         PrivGem *private;
227
228         pthread_mutex_t mutex;
229         struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
230         int device;
231         int opt;
232
233         tbm_bo_cache_state cache_state;
234         unsigned int map_cnt;
235 };
236
237 /* tbm bufmgr private for exynos */
238 struct _tbm_bufmgr_exynos {
239         int fd;
240         int isLocal;
241         void *hashBos;
242
243         int use_dma_fence;
244
245         int tgl_fd;
246
247         char *device_name;
248         void *bind_display;
249 };
250
251 char *STR_DEVICE[] = {
252         "DEF",
253         "CPU",
254         "2D",
255         "3D",
256         "MM"
257 };
258
259 char *STR_OPT[] = {
260         "NONE",
261         "RD",
262         "WR",
263         "RDWR"
264 };
265
266
267 uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {   TBM_FORMAT_RGBA8888,
268                                                                     TBM_FORMAT_BGRA8888,
269                                                                     TBM_FORMAT_RGBX8888,
270                                                                     TBM_FORMAT_RGB888,
271                                                                     TBM_FORMAT_NV12,
272                                                                     TBM_FORMAT_NV21,
273                                                                     TBM_FORMAT_YUV420,
274                                                                     TBM_FORMAT_YVU420
275                                                                 };
276
277 #ifdef ENABLE_CACHECRTL
278 static inline int
279 _tgl_init(int fd, unsigned int key)
280 {
281         struct tgl_attribute attr;
282         int err;
283
284         attr.key = key;
285         attr.timeout_ms = 1000;
286
287         err = ioctl(fd, TGL_IOC_INIT_LOCK, &attr);
288         if (err) {
289                 TBM_EXYNOS_LOG("[libtbm:%d] "
290                                "error(%s) %s:%d key:%d\n",
291                                getpid(), strerror(errno), __func__, __LINE__, key);
292                 return 0;
293         }
294
295         return 1;
296 }
297
298 static inline int
299 _tgl_destroy(int fd, unsigned int key)
300 {
301         int err;
302
303         err = ioctl(fd, TGL_IOC_DESTROY_LOCK, key);
304         if (err) {
305                 TBM_EXYNOS_LOG("[libtbm:%d] "
306                                "error(%s) %s:%d key:%d\n",
307                                getpid(), strerror(errno), __func__, __LINE__, key);
308                 return 0;
309         }
310
311         return 1;
312 }
313 static inline int
314 _tgl_set_data(int fd, unsigned int key, unsigned int val)
315 {
316         int err;
317
318         struct tgl_user_data arg;
319
320         arg.key = key;
321         arg.data1 = val;
322         err = ioctl(fd, TGL_IOC_SET_DATA, &arg);
323         if (err) {
324                 TBM_EXYNOS_LOG("[libtbm:%d] "
325                                "error(%s) %s:%d key:%d\n",
326                                getpid(), strerror(errno), __func__, __LINE__, key);
327                 return 0;
328         }
329
330         return 1;
331 }
332
333 static inline unsigned int
334 _tgl_get_data(int fd, unsigned int key)
335 {
336         int err;
337         struct tgl_user_data arg = { 0, };
338
339         arg.key = key;
340         err = ioctl(fd, TGL_IOC_GET_DATA, &arg);
341         if (err) {
342                 TBM_EXYNOS_LOG("[libtbm:%d] "
343                                "error(%s) %s:%d key:%d\n",
344                                getpid(), strerror(errno), __func__, __LINE__, key);
345                 return 0;
346         }
347
348         return arg.data1;
349 }
350
351 static int
352 _exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags)
353 {
354         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
355
356         /* cache flush is managed by kernel side when using dma-fence. */
357         if (bufmgr_exynos->use_dma_fence)
358                 return 1;
359
360         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
361
362         struct drm_exynos_gem_cache_op cache_op = {0, };
363         int ret;
364
365         /* if bo_exynos is null, do cache_flush_all */
366         if (bo_exynos) {
367                 cache_op.flags = 0;
368                 cache_op.usr_addr = (uint64_t)((uint32_t)bo_exynos->pBase);
369                 cache_op.size = bo_exynos->size;
370         } else {
371                 flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
372                 cache_op.flags = 0;
373                 cache_op.usr_addr = 0;
374                 cache_op.size = 0;
375         }
376
377         if (flags & TBM_EXYNOS_CACHE_INV) {
378                 if (flags & TBM_EXYNOS_CACHE_ALL)
379                         cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
380                 else
381                         cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
382         }
383
384         if (flags & TBM_EXYNOS_CACHE_CLN) {
385                 if (flags & TBM_EXYNOS_CACHE_ALL)
386                         cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
387                 else
388                         cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
389         }
390
391         if (flags & TBM_EXYNOS_CACHE_ALL)
392                 cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
393
394         ret = drmCommandWriteRead(fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
395                                   sizeof(cache_op));
396         if (ret) {
397                 TBM_EXYNOS_LOG("error fail to flush the cache.\n");
398                 return 0;
399         }
400
401         return 1;
402 }
403 #endif
404
405 static int
406 _bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import)
407 {
408 #ifdef ENABLE_CACHECRTL
409         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
410         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
411
412         if (bufmgr_exynos->use_dma_fence)
413                 return 1;
414
415         tbm_bo_cache_state cache_state;
416
417         _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
418
419         if (import == 0) {
420                 cache_state.data.isDirtied = DEVICE_NONE;
421                 cache_state.data.isCached = 0;
422                 cache_state.data.cntFlush = 0;
423
424                 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
425         }
426 #endif
427
428         return 1;
429 }
430
431 static int
432 _bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt)
433 {
434 #ifdef ENABLE_CACHECRTL
435         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
436         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
437
438         if (bufmgr_exynos->use_dma_fence)
439                 return 1;
440
441         char need_flush = 0;
442         unsigned short cntFlush = 0;
443
444         if (bo_exynos->flags_exynos & EXYNOS_BO_NONCACHABLE)
445                 return 1;
446
447         /* get cache state of a bo */
448         bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd,
449                                      bo_exynos->name);
450
451         /* get global cache flush count */
452         cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
453
454         if (opt == TBM_DEVICE_CPU) {
455                 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
456                     bo_exynos->cache_state.data.isCached)
457                         need_flush = TBM_EXYNOS_CACHE_INV;
458
459                 bo_exynos->cache_state.data.isCached = 1;
460                 if (opt & TBM_OPTION_WRITE)
461                         bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
462                 else {
463                         if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
464                                 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
465                 }
466         } else {
467                 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
468                     bo_exynos->cache_state.data.isCached &&
469                     bo_exynos->cache_state.data.cntFlush == cntFlush)
470                         need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
471
472                 if (opt & TBM_OPTION_WRITE)
473                         bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
474                 else {
475                         if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
476                                 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
477                 }
478         }
479
480         if (need_flush) {
481                 if (need_flush & TBM_EXYNOS_CACHE_ALL)
482                         _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
483
484                 /* call cache flush */
485                 _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush);
486
487                 DBG("[libtbm:%d] \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
488                     getpid(),
489                     bo_exynos->cache_state.data.isCached,
490                     bo_exynos->cache_state.data.isDirtied,
491                     need_flush,
492                     cntFlush);
493         }
494 #endif
495
496         return 1;
497 }
498
499 static int
500 _bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
501 {
502 #ifdef ENABLE_CACHECRTL
503         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
504         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
505
506         if (bufmgr_exynos->use_dma_fence)
507                 return 1;
508
509         unsigned short cntFlush = 0;
510
511         /* get global cache flush count */
512         cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
513
514         /* save global cache flush count */
515         bo_exynos->cache_state.data.cntFlush = cntFlush;
516         _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name,
517                       bo_exynos->cache_state.val);
518 #endif
519
520         return 1;
521 }
522
523 static void
524 _bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
525 {
526 #ifdef ENABLE_CACHECRTL
527         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
528         EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
529
530         if (bufmgr_exynos->use_dma_fence)
531                 return ;
532
533         _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
534 #endif
535 }
536
537 static int
538 _bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
539 {
540 #ifdef ENABLE_CACHECRTL
541         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
542
543         if (bufmgr_exynos->use_dma_fence)
544                 return 1;
545
546         /* open tgl fd for saving cache flush data */
547         bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
548
549         if (bufmgr_exynos->tgl_fd < 0) {
550                 bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
551                 if (bufmgr_exynos->tgl_fd < 0) {
552                         TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
553                                        "error: Fail to open global_lock:%s\n",
554                                        getpid(), tgl_devfile);
555
556                         close(bufmgr_exynos->tgl_fd);
557                         return 0;
558                 }
559         }
560
561         if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
562                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
563                                "error: Fail to initialize the tgl\n",
564                                getpid());
565
566                 close(bufmgr_exynos->tgl_fd);
567                 return 0;
568         }
569 #endif
570
571         return 1;
572 }
573
574 static void
575 _bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
576 {
577 #ifdef ENABLE_CACHECRTL
578         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
579
580         if (bufmgr_exynos->use_dma_fence)
581                 return;
582
583         if (bufmgr_exynos->tgl_fd >= 0)
584                 close(bufmgr_exynos->tgl_fd);
585 #endif
586 }
587
588 static int
589 _tbm_exynos_open_drm()
590 {
591         int fd = -1;
592
593         fd = drmOpen(EXYNOS_DRM_NAME, NULL);
594         if (fd < 0) {
595                 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] "
596                               "warning %s:%d fail to open drm\n",
597                               getpid(), __FUNCTION__, __LINE__);
598         }
599
600         if (fd < 0) {
601                 struct udev *udev = NULL;
602                 struct udev_enumerate *e = NULL;
603                 struct udev_list_entry *entry = NULL;
604                 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
605                 const char *filepath;
606                 struct stat s;
607                 int ret;
608
609                 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] "
610                               "%s:%d search drm-device by udev\n",
611                               getpid(), __FUNCTION__, __LINE__);
612
613                 udev = udev_new();
614                 if (!udev) {
615                         TBM_EXYNOS_LOG("udev_new() failed.\n");
616                         return -1;
617                 }
618
619                 e = udev_enumerate_new(udev);
620                 udev_enumerate_add_match_subsystem(e, "drm");
621                 udev_enumerate_add_match_sysname(e, "card[0-9]*");
622                 udev_enumerate_scan_devices(e);
623
624                 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
625                         device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
626                                                               udev_list_entry_get_name(entry));
627                         device_parent = udev_device_get_parent(device);
628                         /* Not need unref device_parent. device_parent and device have same refcnt */
629                         if (device_parent) {
630                                 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
631                                         drm_device = device;
632                                         DBG("[%s] Found render device: '%s' (%s)\n",
633                                             target_name(),
634                                             udev_device_get_syspath(drm_device),
635                                             udev_device_get_sysname(device_parent));
636                                         break;
637                                 }
638                         }
639                         udev_device_unref(device);
640                 }
641
642                 udev_enumerate_unref(e);
643
644                 /* Get device file path. */
645                 filepath = udev_device_get_devnode(drm_device);
646                 if (!filepath) {
647                         TBM_EXYNOS_LOG("udev_device_get_devnode() failed.\n");
648                         udev_device_unref(drm_device);
649                         udev_unref(udev);
650                         return -1;
651                 }
652
653                 /* Open DRM device file and check validity. */
654                 fd = open(filepath, O_RDWR | O_CLOEXEC);
655                 if (fd < 0) {
656                         TBM_EXYNOS_LOG("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
657                         udev_device_unref(drm_device);
658                         udev_unref(udev);
659                         return -1;
660                 }
661
662                 ret = fstat(fd, &s);
663                 if (ret) {
664                         TBM_EXYNOS_LOG("fstat() failed %s.\n");
665                         close(fd);
666                         udev_device_unref(drm_device);
667                         udev_unref(udev);
668                         return -1;
669                 }
670
671                 udev_device_unref(drm_device);
672                 udev_unref(udev);
673         }
674
675         return fd;
676 }
677
678 static int
679 _check_render_node(void)
680 {
681         struct udev *udev = NULL;
682         struct udev_enumerate *e = NULL;
683         struct udev_list_entry *entry = NULL;
684         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
685
686         udev = udev_new();
687         if (!udev) {
688                 TBM_EXYNOS_LOG("udev_new() failed.\n");
689                 return -1;
690         }
691
692         e = udev_enumerate_new(udev);
693         udev_enumerate_add_match_subsystem(e, "drm");
694         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
695         udev_enumerate_scan_devices(e);
696
697         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
698                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
699                                                       udev_list_entry_get_name(entry));
700                 device_parent = udev_device_get_parent(device);
701                 /* Not need unref device_parent. device_parent and device have same refcnt */
702                 if (device_parent) {
703                         if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
704                                 drm_device = device;
705                                 DBG("[%s] Found render device: '%s' (%s)\n",
706                                     target_name(),
707                                     udev_device_get_syspath(drm_device),
708                                     udev_device_get_sysname(device_parent));
709                                 break;
710                         }
711                 }
712                 udev_device_unref(device);
713         }
714
715         udev_enumerate_unref(e);
716         udev_unref(udev);
717
718         if (!drm_device) {
719                 udev_device_unref(drm_device);
720                 return 0;
721         }
722
723         udev_device_unref(drm_device);
724         return 1;
725 }
726
727 static int
728 _get_render_node(void)
729 {
730         struct udev *udev = NULL;
731         struct udev_enumerate *e = NULL;
732         struct udev_list_entry *entry = NULL;
733         struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
734         const char *filepath;
735         struct stat s;
736         int fd = -1;
737         int ret;
738
739         udev = udev_new();
740         if (!udev) {
741                 TBM_EXYNOS_LOG("udev_new() failed.\n");
742                 return -1;
743         }
744
745         e = udev_enumerate_new(udev);
746         udev_enumerate_add_match_subsystem(e, "drm");
747         udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
748         udev_enumerate_scan_devices(e);
749
750         udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
751                 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
752                                                       udev_list_entry_get_name(entry));
753                 device_parent = udev_device_get_parent(device);
754                 /* Not need unref device_parent. device_parent and device have same refcnt */
755                 if (device_parent) {
756                         if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
757                                 drm_device = device;
758                                 DBG("[%s] Found render device: '%s' (%s)\n",
759                                     target_name(),
760                                     udev_device_get_syspath(drm_device),
761                                     udev_device_get_sysname(device_parent));
762                                 break;
763                         }
764                 }
765                 udev_device_unref(device);
766         }
767
768         udev_enumerate_unref(e);
769
770         /* Get device file path. */
771         filepath = udev_device_get_devnode(drm_device);
772         if (!filepath) {
773                 TBM_EXYNOS_LOG("udev_device_get_devnode() failed.\n");
774                 udev_device_unref(drm_device);
775                 udev_unref(udev);
776                 return -1;
777         }
778
779         /* Open DRM device file and check validity. */
780         fd = open(filepath, O_RDWR | O_CLOEXEC);
781         if (fd < 0) {
782                 TBM_EXYNOS_LOG("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
783                 udev_device_unref(drm_device);
784                 udev_unref(udev);
785                 return -1;
786         }
787
788         ret = fstat(fd, &s);
789         if (ret) {
790                 TBM_EXYNOS_LOG("fstat() failed %s.\n");
791                 udev_device_unref(drm_device);
792                 udev_unref(udev);
793                 close(fd);
794                 return -1;
795         }
796
797         udev_device_unref(drm_device);
798         udev_unref(udev);
799
800         return fd;
801 }
802
803 static unsigned int
804 _get_exynos_flag_from_tbm(unsigned int ftbm)
805 {
806         unsigned int flags = 0;
807
808         if (ftbm & TBM_BO_SCANOUT)
809                 flags |= EXYNOS_BO_CONTIG;
810         else
811                 flags |= EXYNOS_BO_NONCONTIG;
812
813         if (ftbm & TBM_BO_WC)
814                 flags |= EXYNOS_BO_WC;
815         else if (ftbm & TBM_BO_NONCACHABLE)
816                 flags |= EXYNOS_BO_NONCACHABLE;
817         else
818                 flags |= EXYNOS_BO_CACHABLE;
819
820         return flags;
821 }
822
823 static unsigned int
824 _get_tbm_flag_from_exynos(unsigned int fexynos)
825 {
826         unsigned int flags = 0;
827
828         if (fexynos & EXYNOS_BO_NONCONTIG)
829                 flags |= TBM_BO_DEFAULT;
830         else
831                 flags |= TBM_BO_SCANOUT;
832
833         if (fexynos & EXYNOS_BO_WC)
834                 flags |= TBM_BO_WC;
835         else if (fexynos & EXYNOS_BO_CACHABLE)
836                 flags |= TBM_BO_DEFAULT;
837         else
838                 flags |= TBM_BO_NONCACHABLE;
839
840         return flags;
841 }
842
843 static unsigned int
844 _get_name(int fd, unsigned int gem)
845 {
846         struct drm_gem_flink arg = {0,};
847
848         arg.handle = gem;
849         if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
850                 TBM_EXYNOS_LOG("error fail to get flink from gem:%d (DRM_IOCTL_GEM_FLINK)\n",
851                                gem);
852                 return 0;
853         }
854
855         return (unsigned int)arg.name;
856 }
857
858 static tbm_bo_handle
859 _exynos_bo_handle(tbm_bo_exynos bo_exynos, int device)
860 {
861         tbm_bo_handle bo_handle;
862
863         memset(&bo_handle, 0x0, sizeof(uint64_t));
864
865         switch (device) {
866         case TBM_DEVICE_DEFAULT:
867         case TBM_DEVICE_2D:
868                 bo_handle.u32 = (uint32_t)bo_exynos->gem;
869                 break;
870         case TBM_DEVICE_CPU:
871                 if (!bo_exynos->pBase) {
872                         struct drm_exynos_gem_map arg = {0,};
873                         void *map = NULL;
874
875                         arg.handle = bo_exynos->gem;
876                         if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg,
877                                                 sizeof(arg))) {
878                                 TBM_EXYNOS_LOG("error Cannot map_dumb gem=%d\n", bo_exynos->gem);
879                                 return (tbm_bo_handle) NULL;
880                         }
881
882                         map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED,
883                                    bo_exynos->fd, arg.offset);
884                         if (map == MAP_FAILED) {
885                                 TBM_EXYNOS_LOG("error Cannot usrptr gem=%d\n", bo_exynos->gem);
886                                 return (tbm_bo_handle) NULL;
887                         }
888                         bo_exynos->pBase = map;
889                 }
890                 bo_handle.ptr = (void *)bo_exynos->pBase;
891                 break;
892         case TBM_DEVICE_3D:
893 #ifdef USE_DMAIMPORT
894                 if (bo_exynos->dmabuf) {
895                         bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
896                         break;
897                 }
898
899                 if (!bo_exynos->dmabuf) {
900                         struct drm_prime_handle arg = {0, };
901
902                         arg.handle = bo_exynos->gem;
903                         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
904                                 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
905                                 return (tbm_bo_handle) NULL;
906                         }
907                         bo_exynos->dmabuf = arg.fd;
908                 }
909
910                 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
911 #endif
912                 break;
913         case TBM_DEVICE_MM:
914                 if (!bo_exynos->dmabuf) {
915                         struct drm_prime_handle arg = {0, };
916
917                         arg.handle = bo_exynos->gem;
918                         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
919                                 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
920                                 return (tbm_bo_handle) NULL;
921                         }
922                         bo_exynos->dmabuf = arg.fd;
923                 }
924
925                 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
926                 break;
927         default:
928                 TBM_EXYNOS_LOG("error Not supported device:%d\n", device);
929                 bo_handle.ptr = (void *) NULL;
930                 break;
931         }
932
933         return bo_handle;
934 }
935
936 static int
937 tbm_exynos_bo_size(tbm_bo bo)
938 {
939         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
940
941         tbm_bo_exynos bo_exynos;
942
943         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
944         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
945
946         return bo_exynos->size;
947 }
948
949 static void *
950 tbm_exynos_bo_alloc(tbm_bo bo, int size, int flags)
951 {
952         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
953
954         tbm_bo_exynos bo_exynos;
955         tbm_bufmgr_exynos bufmgr_exynos;
956         unsigned int exynos_flags;
957
958         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
959         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
960
961         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
962         if (!bo_exynos) {
963                 TBM_EXYNOS_LOG("error fail to allocate the bo private\n");
964                 return 0;
965         }
966
967         exynos_flags = _get_exynos_flag_from_tbm(flags);
968         if ((flags & TBM_BO_SCANOUT) &&
969             size <= 4 * 1024) {
970                 exynos_flags |= EXYNOS_BO_NONCONTIG;
971         }
972
973         struct drm_exynos_gem_create arg = {0, };
974
975         arg.size = (uint64_t)size;
976         arg.flags = exynos_flags;
977         if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg,
978                                 sizeof(arg))) {
979                 TBM_EXYNOS_LOG("error Cannot create bo(flag:%x, size:%d)\n", arg.flags,
980                                (unsigned int)arg.size);
981                 free(bo_exynos);
982                 return 0;
983         }
984
985         bo_exynos->fd = bufmgr_exynos->fd;
986         bo_exynos->gem = arg.handle;
987         bo_exynos->size = size;
988         bo_exynos->flags_tbm = flags;
989         bo_exynos->flags_exynos = exynos_flags;
990         bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
991
992         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) {
993                 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
994                 free(bo_exynos);
995                 return 0;
996         }
997
998         pthread_mutex_init(&bo_exynos->mutex, NULL);
999
1000         if (bufmgr_exynos->use_dma_fence
1001             && !bo_exynos->dmabuf) {
1002                 struct drm_prime_handle arg = {0, };
1003
1004                 arg.handle = bo_exynos->gem;
1005                 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1006                         TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
1007                         free(bo_exynos);
1008                         return 0;
1009                 }
1010                 bo_exynos->dmabuf = arg.fd;
1011         }
1012
1013         /* add bo to hash */
1014         PrivGem *privGem = calloc(1, sizeof(PrivGem));
1015
1016         if (!privGem) {
1017                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1018                                "error %s:%d Fail to calloc privGem\n",
1019                                getpid(), __func__, __LINE__);
1020                 free(bo_exynos);
1021                 return 0;
1022         }
1023
1024         privGem->ref_count = 1;
1025         privGem->bo_priv = bo_exynos;
1026
1027         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1028                           (void *)privGem) < 0) {
1029                 TBM_EXYNOS_LOG("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1030         }
1031
1032         DBG("     [%s] bo:%p, gem:%d(%d), flags:%d(%d), size:%d\n", target_name(),
1033             bo,
1034             bo_exynos->gem, bo_exynos->name,
1035             flags, exynos_flags,
1036             bo_exynos->size);
1037
1038         return (void *)bo_exynos;
1039 }
1040
1041 static void
1042 tbm_exynos_bo_free(tbm_bo bo)
1043 {
1044         tbm_bo_exynos bo_exynos;
1045         tbm_bufmgr_exynos bufmgr_exynos;
1046
1047         if (!bo)
1048                 return;
1049
1050         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1051         EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
1052
1053         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1054         EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
1055
1056         DBG("      [%s] bo:%p, gem:%d(%d), fd:%d, size:%d\n", target_name(),
1057             bo,
1058             bo_exynos->gem, bo_exynos->name,
1059             bo_exynos->dmabuf,
1060             bo_exynos->size);
1061
1062         if (bo_exynos->pBase) {
1063                 if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
1064                         TBM_EXYNOS_LOG("error bo:%p fail to munmap(%s)\n",
1065                                        bo, strerror(errno));
1066                 }
1067         }
1068
1069         /* close dmabuf */
1070         if (bo_exynos->dmabuf) {
1071                 close(bo_exynos->dmabuf);
1072                 bo_exynos->dmabuf = 0;
1073         }
1074
1075         /* delete bo from hash */
1076         PrivGem *privGem = NULL;
1077         int ret;
1078
1079         ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name,
1080                              (void **)&privGem);
1081         if (ret == 0) {
1082                 privGem->ref_count--;
1083                 if (privGem->ref_count == 0) {
1084                         drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name);
1085                         free(privGem);
1086                         privGem = NULL;
1087                 }
1088         } else {
1089                 TBM_EXYNOS_LOG("warning Cannot find bo to Hash(%d), ret=%d\n", bo_exynos->name,
1090                                ret);
1091         }
1092
1093         _bo_destroy_cache_state(bufmgr_exynos, bo_exynos);
1094
1095         /* Free gem handle */
1096         struct drm_gem_close arg = {0, };
1097
1098         memset(&arg, 0, sizeof(arg));
1099         arg.handle = bo_exynos->gem;
1100         if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg)) {
1101                 TBM_EXYNOS_LOG("error bo:%p fail to gem close.(%s)\n",
1102                                bo, strerror(errno));
1103         }
1104
1105         free(bo_exynos);
1106 }
1107
1108
1109 static void *
1110 tbm_exynos_bo_import(tbm_bo bo, unsigned int key)
1111 {
1112         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1113
1114         tbm_bufmgr_exynos bufmgr_exynos;
1115         tbm_bo_exynos bo_exynos;
1116         PrivGem *privGem = NULL;
1117         int ret;
1118
1119         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1120         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1121
1122         ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&privGem);
1123         if (ret == 0) {
1124                 return privGem->bo_priv;
1125         }
1126
1127         struct drm_gem_open arg = {0, };
1128         struct drm_exynos_gem_info info = {0, };
1129
1130         arg.name = key;
1131         if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1132                 TBM_EXYNOS_LOG("error Cannot open gem name=%d\n", key);
1133                 return 0;
1134         }
1135
1136         info.handle = arg.handle;
1137         if (drmCommandWriteRead(bufmgr_exynos->fd,
1138                                 DRM_EXYNOS_GEM_GET,
1139                                 &info,
1140                                 sizeof(struct drm_exynos_gem_info))) {
1141                 TBM_EXYNOS_LOG("error Cannot get gem info=%d\n", key);
1142                 return 0;
1143         }
1144
1145         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1146         if (!bo_exynos) {
1147                 TBM_EXYNOS_LOG("error fail to allocate the bo private\n");
1148                 return 0;
1149         }
1150
1151         bo_exynos->fd = bufmgr_exynos->fd;
1152         bo_exynos->gem = arg.handle;
1153         bo_exynos->size = arg.size;
1154         bo_exynos->flags_exynos = info.flags;
1155         bo_exynos->name = key;
1156         bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1157
1158         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1159                 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
1160                 free(bo_exynos);
1161                 return 0;
1162         }
1163
1164         if (!bo_exynos->dmabuf) {
1165                 struct drm_prime_handle arg = {0, };
1166
1167                 arg.handle = bo_exynos->gem;
1168                 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1169                         TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
1170                         free(bo_exynos);
1171                         return 0;
1172                 }
1173                 bo_exynos->dmabuf = arg.fd;
1174         }
1175
1176         /* add bo to hash */
1177         privGem = NULL;
1178
1179         privGem = calloc(1, sizeof(PrivGem));
1180         if (!privGem) {
1181                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1182                                 "error %s:%d Fail to calloc privGem\n",
1183                                 getpid(), __func__, __LINE__);
1184                 free(bo_exynos);
1185                 return 0;
1186         }
1187
1188         privGem->ref_count = 1;
1189         privGem->bo_priv = bo_exynos;
1190
1191         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1192                            (void *)privGem) < 0) {
1193                 TBM_EXYNOS_LOG("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1194         }
1195
1196         DBG("    [%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1197             target_name(),
1198             bo,
1199             bo_exynos->gem, bo_exynos->name,
1200             bo_exynos->dmabuf,
1201             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1202             bo_exynos->size);
1203
1204         return (void *)bo_exynos;
1205 }
1206
1207 static void *
1208 tbm_exynos_bo_import_fd(tbm_bo bo, tbm_fd key)
1209 {
1210         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1211
1212         tbm_bufmgr_exynos bufmgr_exynos;
1213         tbm_bo_exynos bo_exynos;
1214         PrivGem *privGem = NULL;
1215         unsigned int name;
1216         int ret;
1217
1218         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1219         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1220
1221         /*getting handle from fd*/
1222         unsigned int gem = 0;
1223         struct drm_prime_handle arg = {0, };
1224
1225         arg.fd = key;
1226         arg.flags = 0;
1227         if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1228                 TBM_EXYNOS_LOG("error bo:%p Cannot get gem handle from fd:%d (%s)\n",
1229                                bo, arg.fd, strerror(errno));
1230                 return NULL;
1231         }
1232         gem = arg.handle;
1233
1234         name = _get_name(bufmgr_exynos->fd, gem);
1235         if (!name) {
1236                 TBM_EXYNOS_LOG("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1237                                bo, gem, key, strerror(errno));
1238                 return 0;
1239         }
1240
1241         ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&privGem);
1242         if (ret == 0) {
1243                 if (gem == privGem->bo_priv->gem) {
1244                         return privGem->bo_priv;
1245                 }
1246         }
1247
1248         unsigned int real_size = -1;
1249         struct drm_exynos_gem_info info = {0, };
1250
1251         /* Determine size of bo.  The fd-to-handle ioctl really should
1252          * return the size, but it doesn't.  If we have kernel 3.12 or
1253          * later, we can lseek on the prime fd to get the size.  Older
1254          * kernels will just fail, in which case we fall back to the
1255          * provided (estimated or guess size).
1256          */
1257         real_size = lseek(key, 0, SEEK_END);
1258
1259         info.handle = gem;
1260         if (drmCommandWriteRead(bufmgr_exynos->fd,
1261                                 DRM_EXYNOS_GEM_GET,
1262                                 &info,
1263                                 sizeof(struct drm_exynos_gem_info))) {
1264                 TBM_EXYNOS_LOG("error bo:%p Cannot get gem info from gem:%d, fd:%d (%s)\n",
1265                                bo, gem, key, strerror(errno));
1266                 return 0;
1267         }
1268
1269         if (real_size == -1)
1270                 real_size = info.size;
1271
1272         bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1273         if (!bo_exynos) {
1274                 TBM_EXYNOS_LOG("error bo:%p fail to allocate the bo private\n", bo);
1275                 return 0;
1276         }
1277
1278         bo_exynos->fd = bufmgr_exynos->fd;
1279         bo_exynos->gem = gem;
1280         bo_exynos->size = real_size;
1281         bo_exynos->flags_exynos = info.flags;
1282         bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1283         bo_exynos->name = name;
1284
1285         if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1286                 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
1287                 free(bo_exynos);
1288                 return 0;
1289         }
1290
1291         /* add bo to hash */
1292         privGem = NULL;
1293
1294         privGem = calloc(1, sizeof(PrivGem));
1295         if (!privGem) {
1296                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1297                                "error %s:%d Fail to calloc privGem\n",
1298                                getpid(), __func__, __LINE__);
1299                 free(bo_exynos);
1300                 return 0;
1301         }
1302
1303         privGem->ref_count = 1;
1304         privGem->bo_priv = bo_exynos;
1305
1306         if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1307                            (void *)privGem) < 0) {
1308                 TBM_EXYNOS_LOG("error bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1309                                bo, bo_exynos->name, gem, key);
1310         }
1311
1312         DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1313             target_name(),
1314             bo,
1315             bo_exynos->gem, bo_exynos->name,
1316             bo_exynos->dmabuf,
1317             key,
1318             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1319             bo_exynos->size);
1320
1321         return (void *)bo_exynos;
1322 }
1323
1324 static unsigned int
1325 tbm_exynos_bo_export(tbm_bo bo)
1326 {
1327         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1328
1329         tbm_bo_exynos bo_exynos;
1330
1331         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1332         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1333
1334         if (!bo_exynos->name) {
1335                 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1336                 if (!bo_exynos->name) {
1337                         TBM_EXYNOS_LOG("error Cannot get name\n");
1338                         return 0;
1339                 }
1340         }
1341
1342         DBG("    [%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1343             target_name(),
1344             bo,
1345             bo_exynos->gem, bo_exynos->name,
1346             bo_exynos->dmabuf,
1347             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1348             bo_exynos->size);
1349
1350         return (unsigned int)bo_exynos->name;
1351 }
1352
1353 tbm_fd
1354 tbm_exynos_bo_export_fd(tbm_bo bo)
1355 {
1356         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1357
1358         tbm_bo_exynos bo_exynos;
1359         int ret;
1360
1361         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1362         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, -1);
1363
1364         struct drm_prime_handle arg = {0, };
1365
1366         arg.handle = bo_exynos->gem;
1367         ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1368         if (ret) {
1369                 TBM_EXYNOS_LOG("error bo:%p Cannot dmabuf=%d (%s)\n",
1370                                bo, bo_exynos->gem, strerror(errno));
1371                 return (tbm_fd) ret;
1372         }
1373
1374         DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1375             target_name(),
1376             bo,
1377             bo_exynos->gem, bo_exynos->name,
1378             bo_exynos->dmabuf,
1379             arg.fd,
1380             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1381             bo_exynos->size);
1382
1383         return (tbm_fd)arg.fd;
1384 }
1385
1386 static tbm_bo_handle
1387 tbm_exynos_bo_get_handle(tbm_bo bo, int device)
1388 {
1389         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1390
1391         tbm_bo_handle bo_handle;
1392         tbm_bo_exynos bo_exynos;
1393
1394         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1395         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1396
1397         if (!bo_exynos->gem) {
1398                 TBM_EXYNOS_LOG("error Cannot map gem=%d\n", bo_exynos->gem);
1399                 return (tbm_bo_handle) NULL;
1400         }
1401
1402         DBG("[%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1403             target_name(),
1404             bo,
1405             bo_exynos->gem, bo_exynos->name,
1406             bo_exynos->dmabuf,
1407             bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1408             bo_exynos->size,
1409             STR_DEVICE[device]);
1410
1411         /*Get mapped bo_handle*/
1412         bo_handle = _exynos_bo_handle(bo_exynos, device);
1413         if (bo_handle.ptr == NULL) {
1414                 TBM_EXYNOS_LOG("error Cannot get handle: gem:%d, device:%d\n", bo_exynos->gem,
1415                                device);
1416                 return (tbm_bo_handle) NULL;
1417         }
1418
1419         return bo_handle;
1420 }
1421
1422 static tbm_bo_handle
1423 tbm_exynos_bo_map(tbm_bo bo, int device, int opt)
1424 {
1425         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1426
1427         tbm_bo_handle bo_handle;
1428         tbm_bo_exynos bo_exynos;
1429         tbm_bufmgr_exynos bufmgr_exynos;
1430
1431         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1432         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, (tbm_bo_handle)NULL);
1433
1434         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1435         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1436
1437         if (!bo_exynos->gem) {
1438                 TBM_EXYNOS_LOG("error Cannot map gem=%d\n", bo_exynos->gem);
1439                 return (tbm_bo_handle) NULL;
1440         }
1441
1442         DBG("       [%s] bo:%p, gem:%d(%d), fd:%d, %s, %s\n", target_name(),
1443             bo,
1444             bo_exynos->gem, bo_exynos->name,
1445             bo_exynos->dmabuf,
1446             STR_DEVICE[device],
1447             STR_OPT[opt]);
1448
1449         /*Get mapped bo_handle*/
1450         bo_handle = _exynos_bo_handle(bo_exynos, device);
1451         if (bo_handle.ptr == NULL) {
1452                 TBM_EXYNOS_LOG("error Cannot get handle: gem:%d, device:%d, opt:%d\n",
1453                                bo_exynos->gem, device, opt);
1454                 return (tbm_bo_handle) NULL;
1455         }
1456
1457         if (bo_exynos->map_cnt == 0)
1458                 _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt);
1459
1460         bo_exynos->map_cnt++;
1461
1462         return bo_handle;
1463 }
1464
1465 static int
1466 tbm_exynos_bo_unmap(tbm_bo bo)
1467 {
1468         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1469
1470         tbm_bo_exynos bo_exynos;
1471         tbm_bufmgr_exynos bufmgr_exynos;
1472
1473         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1474         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1475
1476         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1477         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1478
1479         if (!bo_exynos->gem)
1480                 return 0;
1481
1482         bo_exynos->map_cnt--;
1483
1484         if (bo_exynos->map_cnt == 0)
1485                 _bo_save_cache_state(bufmgr_exynos, bo_exynos);
1486
1487         DBG("     [%s] bo:%p, gem:%d(%d), fd:%d\n", target_name(),
1488             bo,
1489             bo_exynos->gem, bo_exynos->name,
1490             bo_exynos->dmabuf);
1491
1492         return 1;
1493 }
1494
1495 static int
1496 tbm_exynos_bo_lock(tbm_bo bo, int device, int opt)
1497 {
1498         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1499
1500 #ifndef ALWAYS_BACKEND_CTRL
1501         tbm_bufmgr_exynos bufmgr_exynos;
1502         tbm_bo_exynos bo_exynos;
1503         struct dma_buf_fence fence;
1504         struct flock filelock;
1505         int ret = 0;
1506
1507         if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1508                 DBG("[libtbm-exynos:%d] %s not support device type,\n", getpid(),
1509                     __func__);
1510                 return 0;
1511         }
1512
1513         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1514         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1515
1516         bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1517         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1518
1519         memset(&fence, 0, sizeof(struct dma_buf_fence));
1520
1521         /* Check if the given type is valid or not. */
1522         if (opt & TBM_OPTION_WRITE) {
1523                 if (device == TBM_DEVICE_3D)
1524                         fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1525         } else if (opt & TBM_OPTION_READ) {
1526                 if (device == TBM_DEVICE_3D)
1527                         fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1528         } else {
1529                 TBM_EXYNOS_LOG("error Invalid argument\n");
1530                 return 0;
1531         }
1532
1533         /* Check if the tbm manager supports dma fence or not. */
1534         if (!bufmgr_exynos->use_dma_fence) {
1535                 TBM_EXYNOS_LOG("error Not support DMA FENCE(%s)\n", strerror(errno));
1536                 return 0;
1537
1538         }
1539
1540         if (device == TBM_DEVICE_3D) {
1541                 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1542                 if (ret < 0) {
1543                         TBM_EXYNOS_LOG("error Cannot set GET FENCE(%s)\n", strerror(errno));
1544                         return 0;
1545                 }
1546         } else {
1547                 if (opt & TBM_OPTION_WRITE)
1548                         filelock.l_type = F_WRLCK;
1549                 else
1550                         filelock.l_type = F_RDLCK;
1551
1552                 filelock.l_whence = SEEK_CUR;
1553                 filelock.l_start = 0;
1554                 filelock.l_len = 0;
1555
1556                 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1557                         return 0;
1558         }
1559
1560         pthread_mutex_lock(&bo_exynos->mutex);
1561
1562         if (device == TBM_DEVICE_3D) {
1563                 int i;
1564
1565                 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1566                         if (bo_exynos->dma_fence[i].ctx == 0) {
1567                                 bo_exynos->dma_fence[i].type = fence.type;
1568                                 bo_exynos->dma_fence[i].ctx = fence.ctx;
1569                                 break;
1570                         }
1571                 }
1572
1573                 if (i == DMA_FENCE_LIST_MAX) {
1574                         /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1575                         TBM_EXYNOS_LOG("error fence list is full\n");
1576                 }
1577         }
1578
1579         pthread_mutex_unlock(&bo_exynos->mutex);
1580
1581         DBG("[%s] DMABUF_IOCTL_GET_FENCE! bo:%p, gem:%d(%d), fd:%ds\n", target_name(),
1582             bo,
1583             bo_exynos->gem, bo_exynos->name,
1584             bo_exynos->dmabuf);
1585 #endif /* ALWAYS_BACKEND_CTRL */
1586
1587         return 1;
1588 }
1589
1590 static int
1591 tbm_exynos_bo_unlock(tbm_bo bo)
1592 {
1593         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1594
1595 #ifndef ALWAYS_BACKEND_CTRL
1596         tbm_bo_exynos bo_exynos;
1597         struct dma_buf_fence fence;
1598         struct flock filelock;
1599         unsigned int dma_type = 0;
1600         int ret = 0;
1601
1602         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1603         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1604
1605         if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1606                 dma_type = 1;
1607
1608         if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1609                 DBG("[libtbm-exynos:%d] %s FENCE not support or ignored,\n", getpid(),
1610                     __func__);
1611                 return 0;
1612         }
1613
1614         if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1615                 DBG("[libtbm-exynos:%d] %s device type is not 3D/CPU,\n", getpid(),
1616                     __func__);
1617                 return 0;
1618         }
1619
1620         pthread_mutex_lock(&bo_exynos->mutex);
1621
1622         if (dma_type) {
1623                 fence.type = bo_exynos->dma_fence[0].type;
1624                 fence.ctx = bo_exynos->dma_fence[0].ctx;
1625                 int i;
1626
1627                 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1628                         bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type;
1629                         bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx;
1630                 }
1631                 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1632                 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1633         }
1634         pthread_mutex_unlock(&bo_exynos->mutex);
1635
1636         if (dma_type) {
1637                 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1638                 if (ret < 0) {
1639                         TBM_EXYNOS_LOG("error Can not set PUT FENCE(%s)\n", strerror(errno));
1640                         return 0;
1641                 }
1642         } else {
1643                 filelock.l_type = F_UNLCK;
1644                 filelock.l_whence = SEEK_CUR;
1645                 filelock.l_start = 0;
1646                 filelock.l_len = 0;
1647
1648                 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1649                         return 0;
1650         }
1651
1652         DBG("[%s] DMABUF_IOCTL_PUT_FENCE! bo:%p, gem:%d(%d), fd:%ds\n", target_name(),
1653             bo,
1654             bo_exynos->gem, bo_exynos->name,
1655             bo_exynos->dmabuf);
1656 #endif /* ALWAYS_BACKEND_CTRL */
1657
1658         return 1;
1659 }
1660
1661 static void
1662 tbm_exynos_bufmgr_deinit(void *priv)
1663 {
1664         EXYNOS_RETURN_IF_FAIL(priv != NULL);
1665
1666         tbm_bufmgr_exynos bufmgr_exynos;
1667
1668         bufmgr_exynos = (tbm_bufmgr_exynos)priv;
1669
1670         if (bufmgr_exynos->hashBos) {
1671                 unsigned long key;
1672                 void *value;
1673
1674                 while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) {
1675                         free(value);
1676                         drmHashDelete(bufmgr_exynos->hashBos, key);
1677                 }
1678
1679                 drmHashDestroy(bufmgr_exynos->hashBos);
1680                 bufmgr_exynos->hashBos = NULL;
1681         }
1682
1683         _bufmgr_deinit_cache_state(bufmgr_exynos);
1684
1685         if (bufmgr_exynos->bind_display)
1686                 tbm_drm_helper_wl_auth_server_deinit();
1687
1688         if (bufmgr_exynos->device_name)
1689                 free(bufmgr_exynos->device_name);
1690
1691         if (tbm_backend_is_display_server())
1692                 tbm_drm_helper_unset_tbm_master_fd();
1693
1694         close(bufmgr_exynos->fd);
1695
1696         free(bufmgr_exynos);
1697 }
1698
1699 int
1700 tbm_exynos_surface_supported_format(uint32_t **formats, uint32_t *num)
1701 {
1702         uint32_t *color_formats = NULL;
1703
1704         color_formats = (uint32_t *)calloc(1,
1705                                            sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1706
1707         if (color_formats == NULL)
1708                 return 0;
1709
1710         memcpy(color_formats, tbm_exynos_color_format_list,
1711                sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1712
1713         *formats = color_formats;
1714         *num = TBM_COLOR_FORMAT_COUNT;
1715
1716         fprintf(stderr, "tbm_exynos_surface_supported_format  count = %d\n", *num);
1717
1718         return 1;
1719 }
1720
1721 static int
1722 _new_calc_plane_nv12(int width, int height)
1723 {
1724         int mbX, mbY;
1725
1726         mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
1727         mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
1728
1729         if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
1730                 mbY = (mbY + 1) / 2 * 2;
1731
1732         return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
1733                         S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
1734 }
1735
1736 static int
1737 _calc_yplane_nv12(int width, int height)
1738 {
1739         int mbX, mbY;
1740
1741         mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
1742         mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
1743
1744         return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
1745 }
1746
1747 static int
1748 _calc_uvplane_nv12(int width, int height)
1749 {
1750         int mbX, mbY;
1751
1752         mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
1753         mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
1754
1755         return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
1756 }
1757
1758 static int
1759 _new_calc_yplane_nv12(int width, int height)
1760 {
1761         return SIZE_ALIGN(_new_calc_plane_nv12(width,
1762                                                 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1763                            TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1764 }
1765
1766 static int
1767 _new_calc_uvplane_nv12(int width, int height)
1768 {
1769         return SIZE_ALIGN((_new_calc_plane_nv12(width,
1770                                                 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1771                           TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1772 }
1773
1774 /**
1775  * @brief get the plane data of the surface.
1776  * @param[in] width : the width of the surface
1777  * @param[in] height : the height of the surface
1778  * @param[in] format : the format of the surface
1779  * @param[in] plane_idx : the format of the surface
1780  * @param[out] size : the size of the plane
1781  * @param[out] offset : the offset of the plane
1782  * @param[out] pitch : the pitch of the plane
1783  * @param[out] padding : the padding of the plane
1784  * @return 1 if this function succeeds, otherwise 0.
1785  */
1786 int
1787 tbm_exynos_surface_get_plane_data(int width, int height,
1788                                   tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1789                                   uint32_t *pitch, int *bo_idx)
1790 {
1791         int ret = 1;
1792         int bpp;
1793         int _offset = 0;
1794         int _pitch = 0;
1795         int _size = 0;
1796         int _bo_idx = 0;
1797
1798         switch (format) {
1799                 /* 16 bpp RGB */
1800         case TBM_FORMAT_XRGB4444:
1801         case TBM_FORMAT_XBGR4444:
1802         case TBM_FORMAT_RGBX4444:
1803         case TBM_FORMAT_BGRX4444:
1804         case TBM_FORMAT_ARGB4444:
1805         case TBM_FORMAT_ABGR4444:
1806         case TBM_FORMAT_RGBA4444:
1807         case TBM_FORMAT_BGRA4444:
1808         case TBM_FORMAT_XRGB1555:
1809         case TBM_FORMAT_XBGR1555:
1810         case TBM_FORMAT_RGBX5551:
1811         case TBM_FORMAT_BGRX5551:
1812         case TBM_FORMAT_ARGB1555:
1813         case TBM_FORMAT_ABGR1555:
1814         case TBM_FORMAT_RGBA5551:
1815         case TBM_FORMAT_BGRA5551:
1816         case TBM_FORMAT_RGB565:
1817                 bpp = 16;
1818                 _offset = 0;
1819                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1820                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1821                 _bo_idx = 0;
1822                 break;
1823                 /* 24 bpp RGB */
1824         case TBM_FORMAT_RGB888:
1825         case TBM_FORMAT_BGR888:
1826                 bpp = 24;
1827                 _offset = 0;
1828                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1829                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1830                 _bo_idx = 0;
1831                 break;
1832                 /* 32 bpp RGB */
1833         case TBM_FORMAT_XRGB8888:
1834         case TBM_FORMAT_XBGR8888:
1835         case TBM_FORMAT_RGBX8888:
1836         case TBM_FORMAT_BGRX8888:
1837         case TBM_FORMAT_ARGB8888:
1838         case TBM_FORMAT_ABGR8888:
1839         case TBM_FORMAT_RGBA8888:
1840         case TBM_FORMAT_BGRA8888:
1841                 bpp = 32;
1842                 _offset = 0;
1843                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1844                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1845                 _bo_idx = 0;
1846                 break;
1847
1848                 /* packed YCbCr */
1849         case TBM_FORMAT_YUYV:
1850         case TBM_FORMAT_YVYU:
1851         case TBM_FORMAT_UYVY:
1852         case TBM_FORMAT_VYUY:
1853         case TBM_FORMAT_AYUV:
1854                 bpp = 32;
1855                 _offset = 0;
1856                 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1857                 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1858                 _bo_idx = 0;
1859                 break;
1860
1861                 /*
1862                 * 2 plane YCbCr
1863                 * index 0 = Y plane, [7:0] Y
1864                 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1865                 * or
1866                 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1867                 */
1868         case TBM_FORMAT_NV12:
1869                 bpp = 12;
1870                 if (plane_idx == 0) {
1871                         _offset = 0;
1872                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1873                         _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1874                                         height));
1875                         _bo_idx = 0;
1876                 } else if (plane_idx == 1) {
1877                         _offset = 0;
1878                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1879                         _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1880                                         height));
1881                         _bo_idx = 1;
1882                 }
1883                 break;
1884         case TBM_FORMAT_NV21:
1885                 bpp = 12;
1886                 if (plane_idx == 0) {
1887                         _offset = 0;
1888                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1889                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1890                         _bo_idx = 0;
1891                 } else if (plane_idx == 1) {
1892                         _offset = width * height;
1893                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1894                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1895                         _bo_idx = 0;
1896                 }
1897                 break;
1898
1899         case TBM_FORMAT_NV16:
1900         case TBM_FORMAT_NV61:
1901                 bpp = 16;
1902                 /*if(plane_idx == 0)*/
1903                 {
1904                         _offset = 0;
1905                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1906                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1907                         _bo_idx = 0;
1908                         if (plane_idx == 0)
1909                                 break;
1910                 }
1911                 /*else if( plane_idx ==1 )*/
1912                 {
1913                         _offset += _size;
1914                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1915                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1916                         _bo_idx = 0;
1917                 }
1918                 break;
1919
1920                 /*
1921                 * 3 plane YCbCr
1922                 * index 0: Y plane, [7:0] Y
1923                 * index 1: Cb plane, [7:0] Cb
1924                 * index 2: Cr plane, [7:0] Cr
1925                 * or
1926                 * index 1: Cr plane, [7:0] Cr
1927                 * index 2: Cb plane, [7:0] Cb
1928                 */
1929
1930                 /*
1931                 * NATIVE_BUFFER_FORMAT_YV12
1932                 * NATIVE_BUFFER_FORMAT_I420
1933                 */
1934         case TBM_FORMAT_YUV410:
1935         case TBM_FORMAT_YVU410:
1936                 bpp = 9;
1937                 _bo_idx = 0;
1938                 break;
1939         case TBM_FORMAT_YUV411:
1940         case TBM_FORMAT_YVU411:
1941         case TBM_FORMAT_YUV420:
1942         case TBM_FORMAT_YVU420:
1943                 bpp = 12;
1944                 /*if(plane_idx == 0)*/
1945                 {
1946                         _offset = 0;
1947                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1948                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1949                         _bo_idx = 0;
1950                         if (plane_idx == 0)
1951                                 break;
1952                 }
1953                 /*else if(plane_idx == 1)*/
1954                 {
1955                         _offset += _size;
1956                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1957                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1958                         _bo_idx = 0;
1959                         if (plane_idx == 1)
1960                                 break;
1961                 }
1962                 /*else if (plane_idx == 2)*/
1963                 {
1964                         _offset += _size;
1965                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1966                         _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1967                         _bo_idx = 0;
1968                 }
1969                 break;
1970         case TBM_FORMAT_YUV422:
1971         case TBM_FORMAT_YVU422:
1972                 bpp = 16;
1973                 /*if(plane_idx == 0)*/
1974                 {
1975                         _offset = 0;
1976                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1977                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1978                         _bo_idx = 0;
1979                         if (plane_idx == 0)
1980                                 break;
1981                 }
1982                 /*else if(plane_idx == 1)*/
1983                 {
1984                         _offset += _size;
1985                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1986                         _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1987                         _bo_idx = 0;
1988                         if (plane_idx == 1)
1989                                 break;
1990                 }
1991                 /*else if (plane_idx == 2)*/
1992                 {
1993                         _offset += _size;
1994                         _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1995                         _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1996                         _bo_idx = 0;
1997                 }
1998                 break;
1999         case TBM_FORMAT_YUV444:
2000         case TBM_FORMAT_YVU444:
2001                 bpp = 24;
2002                 /*if(plane_idx == 0)*/
2003                 {
2004                         _offset = 0;
2005                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2006                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2007                         _bo_idx = 0;
2008                         if (plane_idx == 0)
2009                                 break;
2010                 }
2011                 /*else if(plane_idx == 1)*/
2012                 {
2013                         _offset += _size;
2014                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2015                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2016                         _bo_idx = 0;
2017                         if (plane_idx == 1)
2018                                 break;
2019                 }
2020                 /*else if (plane_idx == 2)*/
2021                 {
2022                         _offset += _size;
2023                         _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2024                         _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2025                         _bo_idx = 0;
2026                 }
2027                 break;
2028         default:
2029                 bpp = 0;
2030                 break;
2031         }
2032
2033         *size = _size;
2034         *offset = _offset;
2035         *pitch = _pitch;
2036         *bo_idx = _bo_idx;
2037
2038         return ret;
2039 }
2040
2041 int
2042 tbm_exynos_bo_get_flags(tbm_bo bo)
2043 {
2044         EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
2045
2046         tbm_bo_exynos bo_exynos;
2047
2048         bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
2049         EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
2050
2051         return bo_exynos->flags_tbm;
2052 }
2053
2054 int
2055 tbm_exynos_bufmgr_bind_native_display (tbm_bufmgr bufmgr, void *native_display)
2056 {
2057         tbm_bufmgr_exynos bufmgr_exynos;
2058
2059         bufmgr_exynos = tbm_backend_get_priv_from_bufmgr(bufmgr);
2060         EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
2061
2062         if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd,
2063                                            bufmgr_exynos->device_name, 0)) {
2064                 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error:Fail to tbm_drm_helper_wl_server_init\n");
2065                 return 0;
2066         }
2067
2068         bufmgr_exynos->bind_display = native_display;
2069
2070         return 1;
2071 }
2072
2073 MODULEINITPPROTO(init_tbm_bufmgr_priv);
2074
2075 static TBMModuleVersionInfo ExynosVersRec = {
2076         "exynos",
2077         "Samsung",
2078         TBM_ABI_VERSION,
2079 };
2080
2081 TBMModuleData tbmModuleData = { &ExynosVersRec, init_tbm_bufmgr_priv};
2082
2083 int
2084 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
2085 {
2086         tbm_bufmgr_exynos bufmgr_exynos;
2087         tbm_bufmgr_backend bufmgr_backend;
2088
2089         if (!bufmgr)
2090                 return 0;
2091
2092         bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos));
2093         if (!bufmgr_exynos) {
2094                 TBM_EXYNOS_LOG("error: Fail to alloc bufmgr_exynos!\n");
2095                 return 0;
2096         }
2097
2098         if (tbm_backend_is_display_server()) {
2099                 bufmgr_exynos->fd = -1;
2100
2101                 bufmgr_exynos->fd = tbm_drm_helper_get_master_fd();
2102                 if (bufmgr_exynos->fd < 0) {
2103                         bufmgr_exynos->fd = _tbm_exynos_open_drm();
2104                 }
2105
2106                 if (bufmgr_exynos->fd < 0) {
2107                         TBM_EXYNOS_LOG ("[libtbm-exynos:%d] error: Fail to create drm!\n", getpid());
2108                         free (bufmgr_exynos);
2109                         return 0;
2110                 }
2111
2112                 tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd);
2113
2114                 bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2115
2116                 if (!bufmgr_exynos->device_name)
2117                 {
2118                         TBM_EXYNOS_LOG ("[libtbm-exynos:%d] error: Fail to get device name!\n", getpid());
2119
2120                         tbm_drm_helper_unset_tbm_master_fd();
2121                         close(bufmgr_exynos->fd);
2122                         free (bufmgr_exynos);
2123                         return 0;
2124                 }
2125
2126         } else {
2127                 if (_check_render_node()) {
2128                         bufmgr_exynos->fd = _get_render_node();
2129                         if (bufmgr_exynos->fd < 0) {
2130                                 TBM_EXYNOS_LOG("[%s] get render node failed\n", target_name(), fd);
2131                                 free (bufmgr_exynos);
2132                                 return 0;
2133                         }
2134                         DBG("[%s] Use render node:%d\n", target_name(), bufmgr_exynos->fd);
2135                 }
2136                 else {
2137                         if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) {
2138                                 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] error: Fail to get auth drm info!\n", getpid());
2139                                 free (bufmgr_exynos);
2140                                 return 0;
2141                         }
2142                 }
2143         }
2144
2145         //Check if the tbm manager supports dma fence or not.
2146         int fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2147         int length;
2148         char buf[1];
2149         if (fp != -1) {
2150                 length = read(fp, buf, 1);
2151
2152                 if (length == 1 && buf[0] == '1')
2153                         bufmgr_exynos->use_dma_fence = 1;
2154
2155                 close(fp);
2156         }
2157
2158         if (!_bufmgr_init_cache_state(bufmgr_exynos)) {
2159                 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] error: init bufmgr cache state failed!\n", getpid());
2160
2161                 if (tbm_backend_is_display_server())
2162                         tbm_drm_helper_unset_tbm_master_fd();
2163
2164                 close(bufmgr_exynos->fd);
2165
2166                 free(bufmgr_exynos);
2167                 return 0;
2168         }
2169
2170         /*Create Hash Table*/
2171         bufmgr_exynos->hashBos = drmHashCreate();
2172
2173         bufmgr_backend = tbm_backend_alloc();
2174         if (!bufmgr_backend) {
2175                 TBM_EXYNOS_LOG("error: Fail to create drm!\n");
2176                 if (bufmgr_exynos->hashBos)
2177                         drmHashDestroy(bufmgr_exynos->hashBos);
2178
2179                 _bufmgr_deinit_cache_state(bufmgr_exynos);
2180
2181                 if (tbm_backend_is_display_server())
2182                         tbm_drm_helper_unset_tbm_master_fd();
2183
2184                 close(bufmgr_exynos->fd);
2185
2186                 free(bufmgr_exynos);
2187                 return 0;
2188         }
2189
2190         bufmgr_backend->priv = (void *)bufmgr_exynos;
2191         bufmgr_backend->bufmgr_deinit = tbm_exynos_bufmgr_deinit;
2192         bufmgr_backend->bo_size = tbm_exynos_bo_size;
2193         bufmgr_backend->bo_alloc = tbm_exynos_bo_alloc;
2194         bufmgr_backend->bo_free = tbm_exynos_bo_free;
2195         bufmgr_backend->bo_import = tbm_exynos_bo_import;
2196         bufmgr_backend->bo_import_fd = tbm_exynos_bo_import_fd;
2197         bufmgr_backend->bo_export = tbm_exynos_bo_export;
2198         bufmgr_backend->bo_export_fd = tbm_exynos_bo_export_fd;
2199         bufmgr_backend->bo_get_handle = tbm_exynos_bo_get_handle;
2200         bufmgr_backend->bo_map = tbm_exynos_bo_map;
2201         bufmgr_backend->bo_unmap = tbm_exynos_bo_unmap;
2202         bufmgr_backend->surface_get_plane_data = tbm_exynos_surface_get_plane_data;
2203         bufmgr_backend->surface_supported_format = tbm_exynos_surface_supported_format;
2204         bufmgr_backend->bo_get_flags = tbm_exynos_bo_get_flags;
2205         bufmgr_backend->bo_lock = tbm_exynos_bo_lock;
2206         bufmgr_backend->bo_unlock = tbm_exynos_bo_unlock;
2207
2208         if (tbm_backend_is_display_server() && !_check_render_node()) {
2209                 bufmgr_backend->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display;
2210         }
2211
2212         if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
2213                 TBM_EXYNOS_LOG("error: Fail to init backend!\n");
2214                 tbm_backend_free(bufmgr_backend);
2215
2216                 _bufmgr_deinit_cache_state(bufmgr_exynos);
2217
2218                 if (tbm_backend_is_display_server())
2219                         tbm_drm_helper_unset_tbm_master_fd();
2220
2221                 close(bufmgr_exynos->fd);
2222
2223                 free(bufmgr_exynos);
2224                 return 0;
2225         }
2226
2227 #ifdef DEBUG
2228         {
2229                 char *env;
2230
2231                 env = getenv("TBM_EXYNOS_DEBUG");
2232                 if (env) {
2233                         bDebug = atoi(env);
2234                         TBM_EXYNOS_LOG("TBM_EXYNOS_DEBUG=%s\n", env);
2235                 } else {
2236                         bDebug = 0;
2237                 }
2238         }
2239 #endif
2240
2241         DBG("[%s] drm_fd:%d\n", target_name(),
2242             bufmgr_exynos->fd);
2243
2244         return 1;
2245 }
2246