1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <boram1288.park@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define LAYER_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_private_layer *private_layer; \
48 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
49 TDM_RETURN_VAL_IF_FAIL(layer != NULL, TDM_ERROR_INVALID_PARAMETER); \
50 private_layer = (tdm_private_layer*)layer; \
51 private_output = private_layer->private_output; \
52 private_display = private_output->private_display
54 #define LAYER_FUNC_ENTRY_ERROR() \
55 tdm_private_display *private_display; \
56 tdm_private_output *private_output; \
57 tdm_private_layer *private_layer; \
58 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
59 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(layer != NULL, TDM_ERROR_INVALID_PARAMETER, NULL); \
60 private_layer = (tdm_private_layer*)layer; \
61 private_output = private_layer->private_output; \
62 private_display = private_output->private_display
64 #define LAYER_FUNC_ENTRY_VOID_RETURN() \
65 tdm_private_display *private_display; \
66 tdm_private_output *private_output; \
67 tdm_private_layer *private_layer; \
68 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
69 TDM_RETURN_IF_FAIL(layer != NULL); \
70 private_layer = (tdm_private_layer*)layer; \
71 private_output = private_layer->private_output; \
72 private_display = private_output->private_display
74 #define OUTPUT_HWC_CAP_CHECK() \
75 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) { \
76 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output); \
77 _pthread_mutex_unlock(&private_display->lock); \
78 return TDM_ERROR_OPERATION_FAILED; \
81 #define OUTPUT_HWC_CAP_CHECK_ERROR() \
82 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) { \
83 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output); \
84 if (error) *error = TDM_ERROR_OPERATION_FAILED; \
85 _pthread_mutex_unlock(&private_display->lock); \
89 #define OUTPUT_HWC_CAP_CHECK_VOID_RETURN() \
90 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) { \
91 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output); \
92 _pthread_mutex_unlock(&private_display->lock); \
96 static void _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
97 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
98 static void _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data);
99 static void _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data);
100 static void _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
101 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
102 static void _tdm_layer_reset_pending_data(tdm_private_layer *private_layer);
105 tdm_layer_get_output(tdm_layer *layer, tdm_error *error)
109 LAYER_FUNC_ENTRY_ERROR();
111 _pthread_mutex_lock(&private_display->lock);
114 *error = TDM_ERROR_NONE;
116 output = private_layer->private_output;
118 _pthread_mutex_unlock(&private_display->lock);
124 tdm_layer_get_index(tdm_layer *layer, int *index)
128 TDM_RETURN_VAL_IF_FAIL(index != NULL, TDM_ERROR_INVALID_PARAMETER);
130 _pthread_mutex_lock(&private_display->lock);
132 *index = private_layer->index;
134 _pthread_mutex_unlock(&private_display->lock);
140 tdm_layer_get_capabilities(tdm_layer *layer, tdm_layer_capability *capabilities)
144 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
146 _pthread_mutex_lock(&private_display->lock);
148 *capabilities = private_layer->caps.capabilities;
150 _pthread_mutex_unlock(&private_display->lock);
156 tdm_layer_get_available_formats(tdm_layer *layer, const tbm_format **formats, int *count)
160 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
161 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
163 _pthread_mutex_lock(&private_display->lock);
165 *formats = (const tbm_format *)private_layer->caps.formats;
166 *count = private_layer->caps.format_count;
168 _pthread_mutex_unlock(&private_display->lock);
174 tdm_layer_get_available_properties(tdm_layer *layer, const tdm_prop **props, int *count)
178 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
179 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
181 _pthread_mutex_lock(&private_display->lock);
183 *props = (const tdm_prop *)private_layer->caps.props;
184 *count = private_layer->caps.prop_count;
186 _pthread_mutex_unlock(&private_display->lock);
192 tdm_layer_get_zpos(tdm_layer *layer, int *zpos)
196 TDM_RETURN_VAL_IF_FAIL(zpos != NULL, TDM_ERROR_INVALID_PARAMETER);
198 _pthread_mutex_lock(&private_display->lock);
200 *zpos = private_layer->caps.zpos;
202 _pthread_mutex_unlock(&private_display->lock);
208 tdm_layer_set_property(tdm_layer *layer, unsigned int id, tdm_value value)
210 tdm_private_module *private_module;
211 tdm_func_layer *func_layer;
214 _pthread_mutex_lock(&private_display->lock);
216 private_module = private_layer->private_module;
217 func_layer = &private_module->func_layer;
219 if (private_layer->usable)
220 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
222 private_layer->usable = 0;
224 if (!func_layer->layer_set_property) {
225 /* LCOV_EXCL_START */
226 _pthread_mutex_unlock(&private_display->lock);
227 TDM_ERR("not implemented!!");
228 return TDM_ERROR_NOT_IMPLEMENTED;
232 ret = func_layer->layer_set_property(private_layer->layer_backend, id, value);
234 _pthread_mutex_unlock(&private_display->lock);
240 tdm_layer_get_property(tdm_layer *layer, unsigned int id, tdm_value *value)
242 tdm_private_module *private_module;
243 tdm_func_layer *func_layer;
246 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
248 _pthread_mutex_lock(&private_display->lock);
250 private_module = private_layer->private_module;
251 func_layer = &private_module->func_layer;
253 if (!func_layer->layer_get_property) {
254 /* LCOV_EXCL_START */
255 _pthread_mutex_unlock(&private_display->lock);
256 TDM_ERR("not implemented!!");
257 return TDM_ERROR_NOT_IMPLEMENTED;
261 ret = func_layer->layer_get_property(private_layer->layer_backend, id, value);
263 _pthread_mutex_unlock(&private_display->lock);
269 tdm_layer_set_info_internal(tdm_private_layer *private_layer, tdm_info_layer *info)
271 tdm_private_module *private_module;
272 tdm_func_layer *func_layer;
275 private_module = private_layer->private_module;
276 func_layer = &private_module->func_layer;
278 if (private_layer->usable)
279 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
281 private_layer->usable = 0;
283 if (!func_layer->layer_set_info) {
284 /* LCOV_EXCL_START */
285 TDM_ERR("not implemented!!");
286 return TDM_ERROR_NOT_IMPLEMENTED;
290 if (info->src_config.format)
291 snprintf(fmtstr, 128, "%c%c%c%c", FOURCC_STR(info->src_config.format));
293 snprintf(fmtstr, 128, "NONE");
295 TDM_INFO("layer(%p,%d) info: src(%ux%u %u,%u %ux%u %s) dst(%u,%u %ux%u) trans(%d)",
296 private_layer, private_layer->index,
297 info->src_config.size.h, info->src_config.size.v,
298 info->src_config.pos.x, info->src_config.pos.y,
299 info->src_config.pos.w, info->src_config.pos.h,
301 info->dst_pos.x, info->dst_pos.y,
302 info->dst_pos.w, info->dst_pos.h,
305 private_layer->pending_info_changed = 1;
306 private_layer->pending_info = *info;
308 return TDM_ERROR_NONE;
312 tdm_layer_set_info(tdm_layer *layer, tdm_info_layer *info)
316 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
318 _pthread_mutex_lock(&private_display->lock);
320 ret = tdm_layer_set_info_internal(private_layer, info);
322 _pthread_mutex_unlock(&private_display->lock);
328 tdm_layer_get_info(tdm_layer *layer, tdm_info_layer *info)
330 tdm_private_module *private_module;
331 tdm_func_layer *func_layer;
334 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
336 _pthread_mutex_lock(&private_display->lock);
338 private_module = private_layer->private_module;
339 func_layer = &private_module->func_layer;
341 if (!func_layer->layer_get_info) {
342 /* LCOV_EXCL_START */
343 _pthread_mutex_unlock(&private_display->lock);
344 TDM_ERR("not implemented!!");
345 return TDM_ERROR_NOT_IMPLEMENTED;
349 ret = func_layer->layer_get_info(private_layer->layer_backend, info);
351 _pthread_mutex_unlock(&private_display->lock);
356 /* LCOV_EXCL_START */
358 _tdm_layer_dump_buffer(tdm_layer *layer, tbm_surface_h buffer)
360 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
361 tdm_private_output *private_output = private_layer->private_output;
363 char fname[TDM_PATH_LEN], bufs[TDM_PATH_LEN];
364 int zpos, len = TDM_PATH_LEN;
365 tdm_private_layer *l = NULL;
370 pipe = private_output->pipe;
371 zpos = private_layer->caps.zpos;
374 LIST_FOR_EACH_ENTRY(l, &private_output->layer_list, link) {
375 if (!l->showing_buffer)
377 TDM_SNPRINTF(p, remain, "_%p", l->showing_buffer->buffer);
380 n = snprintf(fname, sizeof(fname), "tdm_%d_lyr_%d%s", pipe, zpos, bufs);
381 if ((size_t)n >= sizeof(fname)) {
382 fname[sizeof(fname) - 1] = '\0';
385 tbm_surface_internal_dump_buffer(buffer, fname);
386 TDM_DBG("%s dump excute", fname);
393 tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer)
395 tdm_private_display *private_display;
400 private_display = private_layer->private_output->private_display;
402 LIST_DEL(&layer_buffer->link);
403 if (layer_buffer->buffer) {
404 _pthread_mutex_unlock(&private_display->lock);
405 tdm_buffer_unref_backend(layer_buffer->buffer);
406 if (private_layer->buffer_queue)
407 tbm_surface_queue_release(private_layer->buffer_queue, layer_buffer->buffer);
408 _pthread_mutex_lock(&private_display->lock);
414 _tdm_layer_free_all_buffers(tdm_private_layer *private_layer)
416 tdm_private_output *private_output = private_layer->private_output;
417 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
418 struct list_head clone_list;
420 LIST_INITHEAD(&clone_list);
422 _tdm_layer_reset_pending_data(private_layer);
424 if (private_layer->waiting_buffer) {
425 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
426 private_layer->waiting_buffer = NULL;
428 if (tdm_debug_module & TDM_DEBUG_BUFFER)
429 TDM_INFO("layer(%p,%d) waiting_buffer(%p)",
430 private_layer, private_layer->index, private_layer->waiting_buffer);
433 if (private_layer->committed_buffer) {
434 tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
435 private_layer->committed_buffer = NULL;
437 if (tdm_debug_module & TDM_DEBUG_BUFFER)
438 TDM_INFO("layer(%p,%d) committed_buffer(%p)",
439 private_layer, private_layer->index, private_layer->committed_buffer);
442 if (private_layer->showing_buffer) {
443 tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
444 private_layer->showing_buffer = NULL;
446 if (tdm_debug_module & TDM_DEBUG_BUFFER)
447 TDM_INFO("layer(%p,%d) showing_buffer(%p)",
448 private_layer, private_layer->index, private_layer->showing_buffer);
451 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
452 if (lm->private_layer != private_layer)
455 LIST_ADDTAIL(&lm->link, &clone_list);
458 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
460 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
461 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
465 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
466 if (lm->private_layer != private_layer)
469 LIST_ADDTAIL(&lm->link, &clone_list);
472 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
474 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
478 if (private_layer->buffer_queue) {
479 tbm_surface_queue_remove_acquirable_cb(private_layer->buffer_queue, _tbm_layer_queue_acquirable_cb, private_layer);
480 tbm_surface_queue_remove_destroy_cb(private_layer->buffer_queue, _tbm_layer_queue_destroy_cb, private_layer);
481 private_layer->buffer_queue = NULL;
486 tdm_layer_set_buffer_internal(tdm_private_layer *private_layer, tbm_surface_h buffer)
488 tdm_private_module *private_module;
489 tdm_private_output *private_output = private_layer->private_output;
490 tdm_func_layer *func_layer;
492 /* LCOV_EXCL_START */
494 if (tdm_dump_enable && !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO))
495 _tdm_layer_dump_buffer(private_layer, buffer);
497 if (tdm_debug_dump & TDM_DUMP_FLAG_LAYER &&
498 !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
499 char str[TDM_PATH_LEN];
501 snprintf(str, TDM_PATH_LEN, "layer_%d_%d_%03d",
502 private_output->index, private_layer->index, i++);
503 tdm_helper_dump_buffer_str(buffer, tdm_debug_dump_dir, str);
507 private_module = private_layer->private_module;
508 func_layer = &private_module->func_layer;
510 if (private_layer->usable)
511 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
513 private_layer->usable = 0;
515 if (!func_layer->layer_set_buffer) {
516 /* LCOV_EXCL_START */
517 TDM_ERR("not implemented!!");
518 return TDM_ERROR_NOT_IMPLEMENTED;
519 /* LCOV_EXCL_START */
522 private_layer->pending_buffer_changed = 1;
524 if (private_layer->pending_buffer) {
526 if (tdm_debug_module & TDM_DEBUG_BUFFER)
527 TDM_INFO("layer(%p,%d) pending_buffer(%p) skipped",
528 private_layer, private_layer->index, private_layer->pending_buffer);
530 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
531 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
532 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
535 tbm_surface_internal_unref(private_layer->pending_buffer);
538 tbm_surface_internal_ref(buffer);
539 private_layer->pending_buffer = buffer;
541 if (tdm_debug_module & TDM_DEBUG_BUFFER) {
542 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
543 int flags = tbm_bo_get_flags(bo);
544 TDM_INFO("layer(%p,%d) pending_buffer(%p) bo_flags(%x)",
545 private_layer, private_layer->index, private_layer->pending_buffer, flags);
548 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
549 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
550 TDM_TRACE_ASYNC_BEGIN((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
553 return TDM_ERROR_NONE;
557 tdm_layer_set_buffer(tdm_layer *layer, tbm_surface_h buffer)
561 TDM_RETURN_VAL_IF_FAIL(buffer != NULL, TDM_ERROR_INVALID_PARAMETER);
563 _pthread_mutex_lock(&private_display->lock);
565 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
567 _pthread_mutex_unlock(&private_display->lock);
573 tdm_layer_unset_buffer_internal(tdm_private_layer *private_layer)
575 tdm_private_module *private_module;
576 tdm_func_layer *func_layer;
577 tdm_error ret = TDM_ERROR_NONE;
579 private_module = private_layer->private_module;
580 func_layer = &private_module->func_layer;
582 _tdm_layer_free_all_buffers(private_layer);
584 private_layer->usable = 1;
586 if (private_layer->usable)
587 TDM_INFO("layer(%p,%d) now usable", private_layer, private_layer->index);
589 if (!func_layer->layer_unset_buffer) {
590 /* LCOV_EXCL_START */
591 TDM_ERR("not implemented!!");
592 return TDM_ERROR_NOT_IMPLEMENTED;
593 /* LCOV_EXCL_START */
596 ret = func_layer->layer_unset_buffer(private_layer->layer_backend);
597 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
603 tdm_layer_unset_buffer(tdm_layer *layer)
607 _pthread_mutex_lock(&private_display->lock);
609 ret = tdm_layer_unset_buffer_internal(private_layer);
610 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
612 _pthread_mutex_unlock(&private_display->lock);
618 tdm_layer_committed(tdm_private_layer *private_layer, tdm_private_layer_buffer **committed_buffer)
620 tdm_private_output *private_output = private_layer->private_output;
621 tdm_private_display *private_display = private_output->private_display;
623 /* LCOV_EXCL_START */
624 if (private_display->print_fps) {
625 double curr = tdm_helper_get_time();
626 if (private_layer->fps_stamp == 0) {
627 private_layer->fps_stamp = curr;
628 } else if ((curr - private_layer->fps_stamp) > 1.0) {
629 TDM_INFO("output(%d) layer(%p,%d) fps: %d",
630 private_output->index, private_layer, private_layer->index, private_layer->fps_count);
631 private_layer->fps_count = 0;
632 private_layer->fps_stamp = curr;
634 private_layer->fps_count++;
635 } else if (private_layer->fps_stamp != 0) {
636 private_layer->fps_stamp = 0;
637 private_layer->fps_count = 0;
641 if (private_layer->showing_buffer) {
642 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
643 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->showing_buffer->buffer, 0);
644 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
647 tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
650 private_layer->showing_buffer = *committed_buffer;
651 *committed_buffer = NULL;
653 if (tdm_debug_module & TDM_DEBUG_BUFFER)
654 TDM_INFO("layer(%p,%d) committed_buffer(%p) showing_buffer(%p)",
655 private_layer, private_layer->index, *committed_buffer,
656 (private_layer->showing_buffer) ? private_layer->showing_buffer->buffer : NULL);
660 _tdm_layer_got_output_vblank(tdm_private_output *private_output, unsigned int sequence,
661 unsigned int tv_sec, unsigned int tv_usec)
663 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
664 tdm_private_display *private_display;
665 struct list_head clone_list, pending_clone_list;
666 tdm_error ret = TDM_ERROR_NONE;
668 private_display = private_output->private_display;
670 private_output->layer_waiting_vblank = 0;
672 LIST_INITHEAD(&clone_list);
673 LIST_INITHEAD(&pending_clone_list);
675 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
677 lm->private_layer->committing = 0;
678 LIST_ADDTAIL(&lm->link, &clone_list);
681 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
683 lm->private_layer->committing = 0;
684 LIST_ADDTAIL(&lm->link, &pending_clone_list);
687 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
688 if (tdm_debug_module & TDM_DEBUG_COMMIT)
689 TDM_INFO("layer(%p,%d) committed. handle(%p) commited_buffer(%p)",
690 lm->private_layer, lm->private_layer->index, lm, (lm->committed_buffer) ? lm->committed_buffer->buffer : NULL);
693 tdm_layer_committed(lm->private_layer, &lm->committed_buffer);
694 _pthread_mutex_unlock(&private_display->lock);
696 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
697 _pthread_mutex_lock(&private_display->lock);
698 if (lm->committed_buffer)
699 tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
703 if (LIST_IS_EMPTY(&pending_clone_list))
706 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, wait_failed);
708 ret = tdm_output_commit_internal(private_output, 0, NULL, NULL);
709 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
711 if (tdm_debug_module & TDM_DEBUG_COMMIT)
712 TDM_INFO("layer commit: output(%d) commit", private_output->pipe);
714 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
715 /* tdm_vblank APIs is for server. it should be called in unlock status*/
716 if (!private_output->layer_waiting_vblank) {
717 _pthread_mutex_unlock(&private_display->lock);
718 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
719 _pthread_mutex_lock(&private_display->lock);
720 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
721 private_output->layer_waiting_vblank = 1;
725 if (tdm_debug_module & TDM_DEBUG_COMMIT)
726 TDM_INFO("layer commit: output(%d) wait vblank", private_output->pipe);
728 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
730 LIST_ADDTAIL(&lm->link, &private_output->layer_commit_handler_list);
733 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
734 TDM_WRN("dpms %s. Directly call vblank callback.", tdm_dpms_str(private_output->current_dpms_value));
735 _pthread_mutex_unlock(&private_display->lock);
736 _tdm_layer_cb_wait_vblank(private_output->vblank, 0, 0, 0, 0, private_output);
737 _pthread_mutex_lock(&private_display->lock);
742 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
743 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
744 * the layer commit handler MUST be called.
746 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
747 TDM_WRN("Directly call layer commit handlers: ret(%d)\n", ret);
749 _pthread_mutex_unlock(&private_display->lock);
751 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
752 _pthread_mutex_lock(&private_display->lock);
753 tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
761 _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
762 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
764 tdm_private_layer_commit_handler *layer_commit_handler = user_data;
765 tdm_private_layer_commit_handler *lm = NULL;
766 tdm_private_display *private_display;
767 tdm_private_output *private_output = output;
768 tdm_private_layer *private_layer;
771 TDM_RETURN_IF_FAIL(layer_commit_handler != NULL);
773 private_display = private_output->private_display;
775 LIST_FOR_EACH_ENTRY(lm, &private_output->layer_commit_handler_list, link) {
776 if (layer_commit_handler == lm) {
785 LIST_DEL(&layer_commit_handler->link);
787 private_layer = layer_commit_handler->private_layer;
789 private_layer->committing = 0;
791 if (tdm_debug_module & TDM_DEBUG_COMMIT)
792 TDM_INFO("layer(%p,%d) commit: output(%d) committed. handle(%p)",
793 private_layer, private_layer->index, private_output->pipe, layer_commit_handler);
795 _pthread_mutex_lock(&private_display->lock);
797 tdm_layer_committed(private_layer, &layer_commit_handler->committed_buffer);
799 if (layer_commit_handler->func) {
800 _pthread_mutex_unlock(&private_display->lock);
801 layer_commit_handler->func(private_layer, sequence,
802 tv_sec, tv_usec, layer_commit_handler->user_data);
803 _pthread_mutex_lock(&private_display->lock);
806 free(layer_commit_handler);
808 _pthread_mutex_unlock(&private_display->lock);
812 _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
813 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
815 tdm_private_output *private_output = user_data;
816 tdm_private_display *private_display;
818 TDM_RETURN_IF_FAIL(private_output != NULL);
820 private_display = private_output->private_display;
822 _pthread_mutex_lock(&private_display->lock);
824 if (tdm_debug_module & TDM_DEBUG_COMMIT)
825 TDM_INFO("layer commit: output(%d) got vblank", private_output->pipe);
827 _tdm_layer_got_output_vblank(private_output, sequence, tv_sec, tv_usec);
829 _pthread_mutex_unlock(&private_display->lock);
833 _tdm_layer_get_output_used_layer_count(tdm_private_output *private_output)
835 tdm_private_layer *private_layer = NULL;
836 unsigned int count = 0;
838 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
839 if (!private_layer->usable)
846 /* commit_per_vblank == 1: we can commit if
847 * - there is no previous commit request
848 * - only 1 layer is used
849 * commit_per_vblank == 2: we can commit if
850 * - there is no previous commit request
853 _tdm_layer_commit_possible(tdm_private_layer *private_layer)
855 tdm_private_output *private_output = private_layer->private_output;
857 TDM_RETURN_VAL_IF_FAIL(private_output->commit_per_vblank > 0, 1);
859 /* There is a previous commit request which is not done and displayed on screen yet.
860 * We can't commit at this time.
862 if (!LIST_IS_EMPTY(&private_output->layer_commit_handler_list)) {
863 if (tdm_debug_module & TDM_DEBUG_COMMIT)
864 TDM_INFO("layer(%p,%d) commit: not possible(previous commit)",
865 private_layer, private_layer->index);
869 if (private_output->commit_per_vblank == 1 && _tdm_layer_get_output_used_layer_count(private_output) > 1) {
870 if (tdm_debug_module & TDM_DEBUG_COMMIT)
871 TDM_INFO("layer(%p,%d) commit: not possible(more than 2 layers)",
872 private_layer, private_layer->index);
876 if (tdm_debug_module & TDM_DEBUG_COMMIT)
877 TDM_INFO("layer(%p,%d) commit: possible", private_layer, private_layer->index);
883 _tdm_layer_reset_pending_data(tdm_private_layer *private_layer)
885 private_layer->pending_info_changed = 0;
886 memset(&private_layer->pending_info, 0, sizeof private_layer->pending_info);
888 private_layer->pending_buffer_changed = 0;
889 if (private_layer->pending_buffer) {
890 tbm_surface_internal_unref(private_layer->pending_buffer);
891 private_layer->pending_buffer = NULL;
896 tdm_layer_commit_pending_data(tdm_private_layer *private_layer)
898 tdm_private_module *private_module;
899 tdm_func_layer *func_layer;
900 tdm_error ret = TDM_ERROR_NONE;
902 private_module = private_layer->private_module;
903 func_layer = &private_module->func_layer;
905 if (private_layer->pending_info_changed) {
906 ret = func_layer->layer_set_info(private_layer->layer_backend, &private_layer->pending_info);
907 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, done);
910 if (private_layer->pending_buffer_changed) {
911 tdm_private_layer_buffer *layer_buffer;
913 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
914 TDM_GOTO_IF_FAIL(layer_buffer != NULL, done);
916 LIST_INITHEAD(&layer_buffer->link);
918 ret = func_layer->layer_set_buffer(private_layer->layer_backend, private_layer->pending_buffer);
919 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
921 if (tdm_debug_module & TDM_DEBUG_BUFFER)
922 TDM_INFO("layer(%p,%d) pending_buffer(%p) committed",
923 private_layer, private_layer->index, private_layer->pending_buffer);
925 if (ret == TDM_ERROR_NONE) {
926 if (private_layer->waiting_buffer)
927 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
929 private_layer->waiting_buffer = layer_buffer;
930 private_layer->waiting_buffer->buffer = tdm_buffer_ref_backend(private_layer->pending_buffer);
931 if (tdm_debug_module & TDM_DEBUG_BUFFER)
932 TDM_INFO("layer(%p,%d) waiting_buffer(%p)",
933 private_layer, private_layer->index, private_layer->waiting_buffer->buffer);
935 tdm_layer_free_buffer(private_layer, layer_buffer);
939 _tdm_layer_reset_pending_data(private_layer);
944 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
945 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
946 * the layer commit handler MUST be called.
949 _tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
951 tdm_private_layer_commit_handler *layer_commit_handler;
954 layer_commit_handler = calloc(1, sizeof(tdm_private_layer_commit_handler));
955 if (!layer_commit_handler) {
956 /* LCOV_EXCL_START */
957 TDM_ERR("failed: alloc memory");
958 return TDM_ERROR_OUT_OF_MEMORY;
962 if (tdm_debug_module & TDM_DEBUG_COMMIT)
963 TDM_INFO("layer(%p,%d) commit: handle(%p)", private_layer, private_layer->index, layer_commit_handler);
965 LIST_INITHEAD(&layer_commit_handler->link);
966 layer_commit_handler->private_layer = private_layer;
967 layer_commit_handler->func = func;
968 layer_commit_handler->user_data = user_data;
970 layer_commit_handler->committed_buffer = private_layer->waiting_buffer;
971 private_layer->waiting_buffer = NULL;
973 if (!private_layer->committing && layer_commit_handler->committed_buffer)
974 private_layer->commiting_buffer = layer_commit_handler->committed_buffer->buffer;
976 if (private_layer->committing)
977 TDM_WRN("layer(%p,%d) too many commit", private_layer, private_layer->index);
979 private_layer->committing = 1;
981 if (tdm_debug_module & TDM_DEBUG_BUFFER)
982 TDM_INFO("layer(%p,%d) waiting_buffer(%p) committed_buffer(%p)",
983 private_layer, private_layer->index, private_layer->waiting_buffer,
984 (layer_commit_handler->committed_buffer) ? layer_commit_handler->committed_buffer->buffer : NULL);
986 if (!private_output->commit_per_vblank) {
987 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT, commit_failed);
989 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
990 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
991 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
993 if (tdm_debug_module & TDM_DEBUG_COMMIT)
994 TDM_INFO("layer(%p,%d) commit: no commit-per-vblank", private_layer, private_layer->index);
996 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_LAYER, commit_failed);
998 if (_tdm_layer_commit_possible(private_layer)) {
999 /* add to layer_commit_handler_list */
1000 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
1001 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
1002 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1004 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1005 TDM_INFO("layer(%p,%d) commit: output", private_layer, private_layer->index);
1007 /* add to pending_commit_handler_list. It will be commited when a vblank occurs */
1008 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->pending_commit_handler_list);
1010 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1011 TDM_INFO("layer(%p,%d) commit: pending", private_layer, private_layer->index);
1014 if (!private_output->vblank) {
1015 /* tdm_vblank APIs is for server. it should be called in unlock status*/
1016 _pthread_mutex_unlock(&private_display->lock);
1017 private_output->vblank = tdm_vblank_create(private_display, private_output, NULL);
1018 _pthread_mutex_lock(&private_display->lock);
1019 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, commit_failed);
1022 if (!private_output->layer_waiting_vblank) {
1023 /* tdm_vblank APIs is for server. it should be called in unlock status*/
1024 _pthread_mutex_unlock(&private_display->lock);
1025 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
1026 _pthread_mutex_lock(&private_display->lock);
1027 if (ret != TDM_ERROR_NONE) {
1028 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1029 /* dpms off : the allocated memory was free in tdm_output_commit_internal */
1035 private_output->layer_waiting_vblank = 1;
1037 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1038 TDM_INFO("layer(%p,%d) commit: wait vblank", private_layer, private_layer->index);
1045 if (layer_commit_handler) {
1046 private_layer->waiting_buffer = layer_commit_handler->committed_buffer;
1047 LIST_DEL(&layer_commit_handler->link);
1048 free(layer_commit_handler);
1054 tdm_layer_commit_internal(tdm_private_layer *private_layer, tdm_layer_commit_handler func, void *user_data)
1056 tdm_private_output *private_output = private_layer->private_output;
1057 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */
1059 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE) {
1060 if (!private_output->commit_per_vblank)
1061 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1063 private_output->commit_type = TDM_COMMIT_TYPE_LAYER;
1066 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1067 TDM_ERR("layer(%p,%d)'s output(%d) dpms: %s", private_layer, private_layer->index, private_output->pipe,
1068 tdm_dpms_str(private_output->current_dpms_value));
1069 return TDM_ERROR_DPMS_OFF;
1072 /* don't call this inside of _tdm_layer_commit */
1073 ret = tdm_layer_commit_pending_data(private_layer);
1074 if (ret != TDM_ERROR_NONE) {
1075 TDM_ERR("layer(%p,%d) committing pending data failed", private_layer, private_layer->index);
1079 ret = _tdm_layer_commit(private_layer, func, user_data);
1085 tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1089 _pthread_mutex_lock(&private_display->lock);
1091 ret = tdm_layer_commit_internal(private_layer, func, user_data);
1093 _pthread_mutex_unlock(&private_display->lock);
1099 tdm_layer_is_committing(tdm_layer *layer, unsigned int *committing)
1103 TDM_RETURN_VAL_IF_FAIL(committing != NULL, TDM_ERROR_INVALID_PARAMETER);
1105 _pthread_mutex_lock(&private_display->lock);
1107 *committing = private_layer->committing;
1109 _pthread_mutex_unlock(&private_display->lock);
1115 tdm_layer_remove_commit_handler_internal(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1117 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
1118 tdm_private_output *private_output = private_layer->private_output;
1119 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
1121 if (!func && !user_data)
1124 TDM_RETURN_IF_FAIL(private_layer != NULL);
1125 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1127 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
1128 if (lm->func == func && lm->user_data == user_data) {
1129 LIST_DEL(&lm->link);
1130 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1131 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1137 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
1138 if (lm->func == func && lm->user_data == user_data) {
1139 LIST_DEL(&lm->link);
1140 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1141 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1149 tdm_layer_remove_commit_handler(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1153 _pthread_mutex_lock(&private_display->lock);
1155 tdm_layer_remove_commit_handler_internal(layer, func, user_data);
1157 _pthread_mutex_unlock(&private_display->lock);
1162 EXTERN tbm_surface_h
1163 tdm_layer_get_displaying_buffer(tdm_layer *layer, tdm_error *error)
1165 tbm_surface_h buffer;
1166 LAYER_FUNC_ENTRY_ERROR();
1168 _pthread_mutex_lock(&private_display->lock);
1171 *error = TDM_ERROR_NONE;
1173 if (private_layer->showing_buffer) {
1174 buffer = private_layer->showing_buffer->buffer;
1176 _pthread_mutex_unlock(&private_display->lock);
1177 TDM_DBG("layer(%p,%d) showing_buffer is null", private_layer, private_layer->index);
1180 _pthread_mutex_unlock(&private_display->lock);
1186 _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data)
1188 TDM_RETURN_IF_FAIL(data != NULL);
1189 tdm_layer *layer = data;
1190 tdm_private_module *private_module;
1191 tdm_func_layer *func_layer;
1192 tbm_surface_h buffer = NULL;
1193 LAYER_FUNC_ENTRY_VOID_RETURN();
1195 _pthread_mutex_lock(&private_display->lock);
1197 private_module = private_layer->private_module;
1198 func_layer = &private_module->func_layer;
1200 if (!func_layer->layer_set_buffer) {
1201 /* LCOV_EXCL_START */
1202 _pthread_mutex_unlock(&private_display->lock);
1204 /* LCOV_EXCL_STOP */
1207 if (TBM_SURFACE_QUEUE_ERROR_NONE != tbm_surface_queue_acquire(private_layer->buffer_queue, &buffer) ||
1209 /* LCOV_EXCL_START */
1210 TDM_ERR("layer(%p,%d) tbm_surface_queue_acquire() failed surface:%p",
1211 private_layer, private_layer->index, buffer);
1212 _pthread_mutex_unlock(&private_display->lock);
1214 /* LCOV_EXCL_STOP */
1217 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
1218 if (ret != TDM_ERROR_NONE) {
1219 TDM_ERR("tdm_layer_set_buffer_internal failed");
1220 _pthread_mutex_unlock(&private_display->lock);
1224 ret = tdm_layer_commit_internal(private_layer, NULL, NULL);
1225 if (ret != TDM_ERROR_NONE) {
1226 TDM_ERR("tdm_layer_commit_internal failed");
1227 _pthread_mutex_unlock(&private_display->lock);
1231 _pthread_mutex_unlock(&private_display->lock);
1235 _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data)
1237 TDM_RETURN_IF_FAIL(data != NULL);
1238 tdm_layer *layer = data;
1239 LAYER_FUNC_ENTRY_VOID_RETURN();
1240 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
1242 _pthread_mutex_lock(&private_display->lock);
1244 private_layer->buffer_queue = NULL;
1246 _tdm_layer_free_all_buffers(private_layer);
1248 _pthread_mutex_unlock(&private_display->lock);
1252 tdm_layer_set_buffer_queue(tdm_layer *layer, tbm_surface_queue_h buffer_queue)
1254 tdm_private_module *private_module;
1255 tdm_func_layer *func_layer;
1258 TDM_RETURN_VAL_IF_FAIL(buffer_queue != NULL, TDM_ERROR_INVALID_PARAMETER);
1260 _pthread_mutex_lock(&private_display->lock);
1262 private_module = private_layer->private_module;
1263 func_layer = &private_module->func_layer;
1265 if (private_layer->usable)
1266 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
1268 private_layer->usable = 0;
1270 if (!func_layer->layer_set_buffer) {
1271 /* LCOV_EXCL_START */
1272 _pthread_mutex_unlock(&private_display->lock);
1273 TDM_ERR("not implemented!!");
1274 return TDM_ERROR_NOT_IMPLEMENTED;
1275 /* LCOV_EXCL_STOP */
1278 if (buffer_queue == private_layer->buffer_queue) {
1279 _pthread_mutex_unlock(&private_display->lock);
1280 return TDM_ERROR_NONE;
1283 if (private_layer->waiting_buffer) {
1284 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
1285 private_layer->waiting_buffer = NULL;
1287 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1288 TDM_INFO("layer(%p,%d) waiting_buffer(%p)",
1289 private_layer, private_layer->index, private_layer->waiting_buffer);
1292 private_layer->buffer_queue = buffer_queue;
1293 tbm_surface_queue_add_acquirable_cb(private_layer->buffer_queue,
1294 _tbm_layer_queue_acquirable_cb,
1296 tbm_surface_queue_add_destroy_cb(private_layer->buffer_queue,
1297 _tbm_layer_queue_destroy_cb,
1299 _pthread_mutex_unlock(&private_display->lock);
1305 tdm_layer_unset_buffer_queue(tdm_layer *layer)
1307 return tdm_layer_unset_buffer(layer);
1311 tdm_layer_is_usable(tdm_layer *layer, unsigned int *usable)
1315 TDM_RETURN_VAL_IF_FAIL(usable != NULL, TDM_ERROR_INVALID_PARAMETER);
1317 _pthread_mutex_lock(&private_display->lock);
1319 *usable = private_layer->usable;
1321 _pthread_mutex_unlock(&private_display->lock);
1327 tdm_layer_set_video_pos(tdm_layer *layer, int zpos)
1329 tdm_private_module *private_module;
1330 tdm_func_layer *func_layer;
1333 _pthread_mutex_lock(&private_display->lock);
1335 private_module = private_layer->private_module;
1336 func_layer = &private_module->func_layer;
1338 if (!(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
1339 TDM_ERR("layer(%p,%d) is not video layer", private_layer, private_layer->index);
1340 _pthread_mutex_unlock(&private_display->lock);
1341 return TDM_ERROR_BAD_REQUEST;
1344 if (!func_layer->layer_set_video_pos) {
1345 /* LCOV_EXCL_START */
1346 _pthread_mutex_unlock(&private_display->lock);
1347 TDM_ERR("layer(%p,%d) not implemented!!", private_layer, private_layer->index);
1348 return TDM_ERROR_NOT_IMPLEMENTED;
1349 /* LCOV_EXCL_STOP */
1352 ret = func_layer->layer_set_video_pos(private_layer->layer_backend, zpos);
1354 _pthread_mutex_unlock(&private_display->lock);
1359 EXTERN tdm_capture *
1360 tdm_layer_create_capture(tdm_layer *layer, tdm_error *error)
1362 tdm_capture *capture = NULL;
1364 LAYER_FUNC_ENTRY_ERROR();
1366 _pthread_mutex_lock(&private_display->lock);
1368 capture = (tdm_capture *)tdm_capture_create_layer_internal(private_layer, error);
1370 _pthread_mutex_unlock(&private_display->lock);
1376 tdm_layer_get_buffer_flags(tdm_layer *layer, unsigned int *flags)
1378 tdm_private_module *private_module;
1379 tdm_func_layer *func_layer;
1381 TDM_RETURN_VAL_IF_FAIL(flags != NULL, TDM_ERROR_INVALID_PARAMETER);
1383 _pthread_mutex_lock(&private_display->lock);
1385 private_module = private_layer->private_module;
1386 func_layer = &private_module->func_layer;
1388 if (!func_layer->layer_get_buffer_flags) {
1389 /* LCOV_EXCL_START */
1391 _pthread_mutex_unlock(&private_display->lock);
1392 TDM_INFO("not implemented!!");
1393 return TDM_ERROR_NONE;
1394 /* LCOV_EXCL_STOP */
1397 ret = func_layer->layer_get_buffer_flags(private_layer->layer_backend, flags);
1399 _pthread_mutex_unlock(&private_display->lock);