1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <boram1288.park@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define LAYER_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_private_layer *private_layer; \
48 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
49 TDM_RETURN_VAL_IF_FAIL(layer != NULL, TDM_ERROR_INVALID_PARAMETER); \
50 private_layer = (tdm_private_layer*)layer; \
51 private_output = private_layer->private_output; \
52 private_display = private_output->private_display
54 #define LAYER_FUNC_ENTRY_ERROR() \
55 tdm_private_display *private_display; \
56 tdm_private_output *private_output; \
57 tdm_private_layer *private_layer; \
58 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
59 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(layer != NULL, TDM_ERROR_INVALID_PARAMETER, NULL); \
60 private_layer = (tdm_private_layer*)layer; \
61 private_output = private_layer->private_output; \
62 private_display = private_output->private_display
64 #define LAYER_FUNC_ENTRY_VOID_RETURN() \
65 tdm_private_display *private_display; \
66 tdm_private_output *private_output; \
67 tdm_private_layer *private_layer; \
68 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
69 TDM_RETURN_IF_FAIL(layer != NULL); \
70 private_layer = (tdm_private_layer*)layer; \
71 private_output = private_layer->private_output; \
72 private_display = private_output->private_display
74 #define OUTPUT_HWC_CAP_CHECK() \
75 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) { \
76 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output); \
77 _pthread_mutex_unlock(&private_display->lock); \
78 return TDM_ERROR_OPERATION_FAILED; \
81 #define OUTPUT_HWC_CAP_CHECK_ERROR() \
82 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) { \
83 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output); \
84 if (error) *error = TDM_ERROR_OPERATION_FAILED; \
85 _pthread_mutex_unlock(&private_display->lock); \
89 #define OUTPUT_HWC_CAP_CHECK_VOID_RETURN() \
90 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) { \
91 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output); \
92 _pthread_mutex_unlock(&private_display->lock); \
97 static void _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
98 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
99 static void _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data);
100 static void _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data);
101 static void _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
102 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
103 static void _tdm_layer_reset_pending_data(tdm_private_layer *private_layer);
106 tdm_layer_get_output(tdm_layer *layer, tdm_error *error)
110 LAYER_FUNC_ENTRY_ERROR();
112 _pthread_mutex_lock(&private_display->lock);
114 OUTPUT_HWC_CAP_CHECK_ERROR();
117 *error = TDM_ERROR_NONE;
119 output = private_layer->private_output;
121 _pthread_mutex_unlock(&private_display->lock);
127 tdm_layer_get_index(tdm_layer *layer, int *index)
131 TDM_RETURN_VAL_IF_FAIL(index != NULL, TDM_ERROR_INVALID_PARAMETER);
133 _pthread_mutex_lock(&private_display->lock);
135 OUTPUT_HWC_CAP_CHECK();
137 *index = private_layer->index;
139 _pthread_mutex_unlock(&private_display->lock);
145 tdm_layer_get_capabilities(tdm_layer *layer, tdm_layer_capability *capabilities)
149 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
151 _pthread_mutex_lock(&private_display->lock);
153 OUTPUT_HWC_CAP_CHECK();
155 *capabilities = private_layer->caps.capabilities;
157 _pthread_mutex_unlock(&private_display->lock);
163 tdm_layer_get_available_formats(tdm_layer *layer, const tbm_format **formats, int *count)
167 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
168 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
170 _pthread_mutex_lock(&private_display->lock);
172 OUTPUT_HWC_CAP_CHECK();
174 *formats = (const tbm_format *)private_layer->caps.formats;
175 *count = private_layer->caps.format_count;
177 _pthread_mutex_unlock(&private_display->lock);
183 tdm_layer_get_available_properties(tdm_layer *layer, const tdm_prop **props, int *count)
187 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
188 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
190 _pthread_mutex_lock(&private_display->lock);
192 OUTPUT_HWC_CAP_CHECK();
194 *props = (const tdm_prop *)private_layer->caps.props;
195 *count = private_layer->caps.prop_count;
197 _pthread_mutex_unlock(&private_display->lock);
203 tdm_layer_get_zpos(tdm_layer *layer, int *zpos)
207 TDM_RETURN_VAL_IF_FAIL(zpos != NULL, TDM_ERROR_INVALID_PARAMETER);
209 _pthread_mutex_lock(&private_display->lock);
211 OUTPUT_HWC_CAP_CHECK();
213 *zpos = private_layer->caps.zpos;
215 _pthread_mutex_unlock(&private_display->lock);
221 tdm_layer_set_property(tdm_layer *layer, unsigned int id, tdm_value value)
223 tdm_private_module *private_module;
224 tdm_func_layer *func_layer;
227 _pthread_mutex_lock(&private_display->lock);
229 OUTPUT_HWC_CAP_CHECK();
231 private_module = private_layer->private_module;
232 func_layer = &private_module->func_layer;
234 if (private_layer->usable)
235 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
237 private_layer->usable = 0;
239 if (!func_layer->layer_set_property) {
240 _pthread_mutex_unlock(&private_display->lock);
241 TDM_ERR("not implemented!!");
242 return TDM_ERROR_NOT_IMPLEMENTED;
245 ret = func_layer->layer_set_property(private_layer->layer_backend, id, value);
247 _pthread_mutex_unlock(&private_display->lock);
253 tdm_layer_get_property(tdm_layer *layer, unsigned int id, tdm_value *value)
255 tdm_private_module *private_module;
256 tdm_func_layer *func_layer;
259 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
261 _pthread_mutex_lock(&private_display->lock);
263 OUTPUT_HWC_CAP_CHECK();
265 private_module = private_layer->private_module;
266 func_layer = &private_module->func_layer;
268 if (!func_layer->layer_get_property) {
269 _pthread_mutex_unlock(&private_display->lock);
270 TDM_ERR("not implemented!!");
271 return TDM_ERROR_NOT_IMPLEMENTED;
274 ret = func_layer->layer_get_property(private_layer->layer_backend, id, value);
276 _pthread_mutex_unlock(&private_display->lock);
282 tdm_layer_set_info_internal(tdm_private_layer *private_layer, tdm_info_layer *info)
284 tdm_private_module *private_module;
285 tdm_func_layer *func_layer;
288 private_module = private_layer->private_module;
289 func_layer = &private_module->func_layer;
291 if (private_layer->usable)
292 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
294 private_layer->usable = 0;
296 if (!func_layer->layer_set_info) {
297 TDM_ERR("not implemented!!");
298 return TDM_ERROR_NOT_IMPLEMENTED;
301 if (info->src_config.format)
302 snprintf(fmtstr, 128, "%c%c%c%c", FOURCC_STR(info->src_config.format));
304 snprintf(fmtstr, 128, "NONE");
306 TDM_INFO("layer(%p,%d) info: src(%ux%u %u,%u %ux%u %s) dst(%u,%u %ux%u) trans(%d)",
307 private_layer, private_layer->index,
308 info->src_config.size.h, info->src_config.size.v,
309 info->src_config.pos.x, info->src_config.pos.y,
310 info->src_config.pos.w, info->src_config.pos.h,
312 info->dst_pos.x, info->dst_pos.y,
313 info->dst_pos.w, info->dst_pos.h,
316 private_layer->pending_info_changed = 1;
317 private_layer->pending_info = *info;
319 return TDM_ERROR_NONE;
323 tdm_layer_set_info(tdm_layer *layer, tdm_info_layer *info)
327 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
329 _pthread_mutex_lock(&private_display->lock);
331 OUTPUT_HWC_CAP_CHECK();
333 ret = tdm_layer_set_info_internal(private_layer, info);
335 _pthread_mutex_unlock(&private_display->lock);
341 tdm_layer_get_info(tdm_layer *layer, tdm_info_layer *info)
343 tdm_private_module *private_module;
344 tdm_func_layer *func_layer;
347 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
349 _pthread_mutex_lock(&private_display->lock);
351 OUTPUT_HWC_CAP_CHECK();
353 private_module = private_layer->private_module;
354 func_layer = &private_module->func_layer;
356 if (!func_layer->layer_get_info) {
357 _pthread_mutex_unlock(&private_display->lock);
358 TDM_ERR("not implemented!!");
359 return TDM_ERROR_NOT_IMPLEMENTED;
362 ret = func_layer->layer_get_info(private_layer->layer_backend, info);
364 _pthread_mutex_unlock(&private_display->lock);
370 _tdm_layer_dump_buffer(tdm_layer *layer, tbm_surface_h buffer)
372 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
373 tdm_private_output *private_output = private_layer->private_output;
375 char fname[TDM_PATH_LEN], bufs[TDM_PATH_LEN];
376 int zpos, len = TDM_PATH_LEN;
377 tdm_private_layer *l = NULL;
382 pipe = private_output->pipe;
383 zpos = private_layer->caps.zpos;
386 LIST_FOR_EACH_ENTRY(l, &private_output->layer_list, link) {
387 if (!l->showing_buffer)
389 TDM_SNPRINTF(p, remain, "_%p", l->showing_buffer->buffer);
392 n = snprintf(fname, sizeof(fname), "tdm_%d_lyr_%d%s", pipe, zpos, bufs);
393 if ((size_t)n >= sizeof(fname)) {
394 fname[sizeof(fname) - 1] = '\0';
397 tbm_surface_internal_dump_buffer(buffer, fname);
398 TDM_DBG("%s dump excute", fname);
404 tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer)
406 tdm_private_display *private_display;
411 private_display = private_layer->private_output->private_display;
413 LIST_DEL(&layer_buffer->link);
414 if (layer_buffer->buffer) {
415 _pthread_mutex_unlock(&private_display->lock);
416 tdm_buffer_unref_backend(layer_buffer->buffer);
417 if (private_layer->buffer_queue)
418 tbm_surface_queue_release(private_layer->buffer_queue, layer_buffer->buffer);
419 _pthread_mutex_lock(&private_display->lock);
425 _tdm_layer_free_all_buffers(tdm_private_layer *private_layer)
427 tdm_private_output *private_output = private_layer->private_output;
428 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
429 struct list_head clone_list;
431 LIST_INITHEAD(&clone_list);
433 _tdm_layer_reset_pending_data(private_layer);
435 if (private_layer->waiting_buffer) {
436 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
437 private_layer->waiting_buffer = NULL;
439 if (tdm_debug_module & TDM_DEBUG_BUFFER)
440 TDM_INFO("layer(%p,%d) waiting_buffer(%p)",
441 private_layer, private_layer->index, private_layer->waiting_buffer);
444 if (private_layer->committed_buffer) {
445 tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
446 private_layer->committed_buffer = NULL;
448 if (tdm_debug_module & TDM_DEBUG_BUFFER)
449 TDM_INFO("layer(%p,%d) committed_buffer(%p)",
450 private_layer, private_layer->index, private_layer->committed_buffer);
453 if (private_layer->showing_buffer) {
454 tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
455 private_layer->showing_buffer = NULL;
457 if (tdm_debug_module & TDM_DEBUG_BUFFER)
458 TDM_INFO("layer(%p,%d) showing_buffer(%p)",
459 private_layer, private_layer->index, private_layer->showing_buffer);
462 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
463 if (lm->private_layer != private_layer)
466 LIST_ADDTAIL(&lm->link, &clone_list);
469 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
471 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
472 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
476 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
477 if (lm->private_layer != private_layer)
480 LIST_ADDTAIL(&lm->link, &clone_list);
483 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
485 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
489 if (private_layer->buffer_queue) {
490 tbm_surface_queue_remove_acquirable_cb(private_layer->buffer_queue, _tbm_layer_queue_acquirable_cb, private_layer);
491 tbm_surface_queue_remove_destroy_cb(private_layer->buffer_queue, _tbm_layer_queue_destroy_cb, private_layer);
492 private_layer->buffer_queue = NULL;
497 tdm_layer_set_buffer_internal(tdm_private_layer *private_layer, tbm_surface_h buffer)
499 tdm_private_module *private_module;
500 tdm_private_output *private_output = private_layer->private_output;
501 tdm_func_layer *func_layer;
504 if (tdm_dump_enable && !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO))
505 _tdm_layer_dump_buffer(private_layer, buffer);
507 if (tdm_debug_dump & TDM_DUMP_FLAG_LAYER &&
508 !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
509 char str[TDM_PATH_LEN];
511 snprintf(str, TDM_PATH_LEN, "layer_%d_%d_%03d",
512 private_output->index, private_layer->index, i++);
513 tdm_helper_dump_buffer_str(buffer, tdm_debug_dump_dir, str);
516 private_module = private_layer->private_module;
517 func_layer = &private_module->func_layer;
519 if (private_layer->usable)
520 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
522 private_layer->usable = 0;
524 if (!func_layer->layer_set_buffer) {
525 TDM_ERR("not implemented!!");
526 return TDM_ERROR_NOT_IMPLEMENTED;
529 private_layer->pending_buffer_changed = 1;
531 if (private_layer->pending_buffer) {
533 if (tdm_debug_module & TDM_DEBUG_BUFFER)
534 TDM_INFO("layer(%p,%d) pending_buffer(%p) skipped",
535 private_layer, private_layer->index, private_layer->pending_buffer);
537 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
538 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
539 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
542 tbm_surface_internal_unref(private_layer->pending_buffer);
545 tbm_surface_internal_ref(buffer);
546 private_layer->pending_buffer = buffer;
548 if (tdm_debug_module & TDM_DEBUG_BUFFER) {
549 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
550 int flags = tbm_bo_get_flags(bo);
551 TDM_INFO("layer(%p,%d) pending_buffer(%p) bo_flags(%x)",
552 private_layer, private_layer->index, private_layer->pending_buffer, flags);
555 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
556 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
557 TDM_TRACE_ASYNC_BEGIN((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
560 return TDM_ERROR_NONE;
564 tdm_layer_set_buffer(tdm_layer *layer, tbm_surface_h buffer)
568 TDM_RETURN_VAL_IF_FAIL(buffer != NULL, TDM_ERROR_INVALID_PARAMETER);
570 _pthread_mutex_lock(&private_display->lock);
572 OUTPUT_HWC_CAP_CHECK();
574 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
576 _pthread_mutex_unlock(&private_display->lock);
582 tdm_layer_unset_buffer_internal(tdm_private_layer *private_layer)
584 tdm_private_module *private_module;
585 tdm_func_layer *func_layer;
586 tdm_error ret = TDM_ERROR_NONE;
588 private_module = private_layer->private_module;
589 func_layer = &private_module->func_layer;
591 _tdm_layer_free_all_buffers(private_layer);
593 private_layer->usable = 1;
595 if (private_layer->usable)
596 TDM_INFO("layer(%p,%d) now usable", private_layer, private_layer->index);
598 if (!func_layer->layer_unset_buffer) {
599 TDM_ERR("not implemented!!");
600 return TDM_ERROR_NOT_IMPLEMENTED;
603 ret = func_layer->layer_unset_buffer(private_layer->layer_backend);
604 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
610 tdm_layer_unset_buffer(tdm_layer *layer)
614 _pthread_mutex_lock(&private_display->lock);
616 OUTPUT_HWC_CAP_CHECK();
618 ret = tdm_layer_unset_buffer_internal(private_layer);
619 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
621 _pthread_mutex_unlock(&private_display->lock);
627 tdm_layer_committed(tdm_private_layer *private_layer, tdm_private_layer_buffer **committed_buffer)
629 tdm_private_output *private_output = private_layer->private_output;
630 tdm_private_display *private_display = private_output->private_display;
632 if (private_display->print_fps) {
633 double curr = tdm_helper_get_time();
634 if (private_layer->fps_stamp == 0) {
635 private_layer->fps_stamp = curr;
636 } else if ((curr - private_layer->fps_stamp) > 1.0) {
637 TDM_INFO("output(%d) layer(%p,%d) fps: %d",
638 private_output->index, private_layer, private_layer->index, private_layer->fps_count);
639 private_layer->fps_count = 0;
640 private_layer->fps_stamp = curr;
642 private_layer->fps_count++;
643 } else if (private_layer->fps_stamp != 0) {
644 private_layer->fps_stamp = 0;
645 private_layer->fps_count = 0;
648 if (private_layer->showing_buffer) {
649 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
650 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->showing_buffer->buffer, 0);
651 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
654 tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
657 private_layer->showing_buffer = *committed_buffer;
658 *committed_buffer = NULL;
660 if (tdm_debug_module & TDM_DEBUG_BUFFER)
661 TDM_INFO("layer(%p,%d) committed_buffer(%p) showing_buffer(%p)",
662 private_layer, private_layer->index, *committed_buffer,
663 (private_layer->showing_buffer) ? private_layer->showing_buffer->buffer : NULL);
667 _tdm_layer_got_output_vblank(tdm_private_output *private_output, unsigned int sequence,
668 unsigned int tv_sec, unsigned int tv_usec)
670 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
671 tdm_private_display *private_display;
672 struct list_head clone_list, pending_clone_list;
673 tdm_error ret = TDM_ERROR_NONE;
675 private_display = private_output->private_display;
677 private_output->layer_waiting_vblank = 0;
679 LIST_INITHEAD(&clone_list);
680 LIST_INITHEAD(&pending_clone_list);
682 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
684 lm->private_layer->committing = 0;
685 LIST_ADDTAIL(&lm->link, &clone_list);
688 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
690 lm->private_layer->committing = 0;
691 LIST_ADDTAIL(&lm->link, &pending_clone_list);
694 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
695 if (tdm_debug_module & TDM_DEBUG_COMMIT)
696 TDM_INFO("layer(%p,%d) committed. handle(%p) commited_buffer(%p)",
697 lm->private_layer, lm->private_layer->index, lm, (lm->committed_buffer) ? lm->committed_buffer->buffer : NULL);
700 tdm_layer_committed(lm->private_layer, &lm->committed_buffer);
701 _pthread_mutex_unlock(&private_display->lock);
703 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
704 _pthread_mutex_lock(&private_display->lock);
705 if (lm->committed_buffer)
706 tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
710 if (LIST_IS_EMPTY(&pending_clone_list))
713 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, wait_failed);
715 ret = tdm_output_commit_internal(private_output, 0, NULL, NULL);
716 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
718 if (tdm_debug_module & TDM_DEBUG_COMMIT)
719 TDM_INFO("layer commit: output(%d) commit", private_output->pipe);
721 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
722 /* tdm_vblank APIs is for server. it should be called in unlock status*/
723 if (!private_output->layer_waiting_vblank) {
724 _pthread_mutex_unlock(&private_display->lock);
725 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
726 _pthread_mutex_lock(&private_display->lock);
727 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
728 private_output->layer_waiting_vblank = 1;
732 if (tdm_debug_module & TDM_DEBUG_COMMIT)
733 TDM_INFO("layer commit: output(%d) wait vblank", private_output->pipe);
735 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
737 LIST_ADDTAIL(&lm->link, &private_output->layer_commit_handler_list);
740 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
741 TDM_WRN("dpms %s. Directly call vblank callback.", tdm_dpms_str(private_output->current_dpms_value));
742 _pthread_mutex_unlock(&private_display->lock);
743 _tdm_layer_cb_wait_vblank(private_output->vblank, 0, 0, 0, 0, private_output);
744 _pthread_mutex_lock(&private_display->lock);
749 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
750 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
751 * the layer commit handler MUST be called.
753 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
754 TDM_WRN("Directly call layer commit handlers: ret(%d)\n", ret);
756 _pthread_mutex_unlock(&private_display->lock);
758 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
759 _pthread_mutex_lock(&private_display->lock);
760 tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
768 _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
769 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
771 tdm_private_layer_commit_handler *layer_commit_handler = user_data;
772 tdm_private_layer_commit_handler *lm = NULL;
773 tdm_private_display *private_display;
774 tdm_private_output *private_output = output;
775 tdm_private_layer *private_layer;
778 TDM_RETURN_IF_FAIL(layer_commit_handler != NULL);
780 private_display = private_output->private_display;
782 LIST_FOR_EACH_ENTRY(lm, &private_output->layer_commit_handler_list, link) {
783 if (layer_commit_handler == lm) {
792 LIST_DEL(&layer_commit_handler->link);
794 private_layer = layer_commit_handler->private_layer;
796 private_layer->committing = 0;
798 if (tdm_debug_module & TDM_DEBUG_COMMIT)
799 TDM_INFO("layer(%p,%d) commit: output(%d) committed. handle(%p)",
800 private_layer, private_layer->index, private_output->pipe, layer_commit_handler);
802 _pthread_mutex_lock(&private_display->lock);
804 tdm_layer_committed(private_layer, &layer_commit_handler->committed_buffer);
806 if (layer_commit_handler->func) {
807 _pthread_mutex_unlock(&private_display->lock);
808 layer_commit_handler->func(private_layer, sequence,
809 tv_sec, tv_usec, layer_commit_handler->user_data);
810 _pthread_mutex_lock(&private_display->lock);
813 free(layer_commit_handler);
815 _pthread_mutex_unlock(&private_display->lock);
819 _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
820 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
822 tdm_private_output *private_output = user_data;
823 tdm_private_display *private_display;
825 TDM_RETURN_IF_FAIL(private_output != NULL);
827 private_display = private_output->private_display;
829 _pthread_mutex_lock(&private_display->lock);
831 if (tdm_debug_module & TDM_DEBUG_COMMIT)
832 TDM_INFO("layer commit: output(%d) got vblank", private_output->pipe);
834 _tdm_layer_got_output_vblank(private_output, sequence, tv_sec, tv_usec);
836 _pthread_mutex_unlock(&private_display->lock);
840 _tdm_layer_get_output_used_layer_count(tdm_private_output *private_output)
842 tdm_private_layer *private_layer = NULL;
843 unsigned int count = 0;
845 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
846 if (!private_layer->usable)
853 /* commit_per_vblank == 1: we can commit if
854 * - there is no previous commit request
855 * - only 1 layer is used
856 * commit_per_vblank == 2: we can commit if
857 * - there is no previous commit request
860 _tdm_layer_commit_possible(tdm_private_layer *private_layer)
862 tdm_private_output *private_output = private_layer->private_output;
864 TDM_RETURN_VAL_IF_FAIL(private_output->commit_per_vblank > 0, 1);
866 /* There is a previous commit request which is not done and displayed on screen yet.
867 * We can't commit at this time.
869 if (!LIST_IS_EMPTY(&private_output->layer_commit_handler_list)) {
870 if (tdm_debug_module & TDM_DEBUG_COMMIT)
871 TDM_INFO("layer(%p,%d) commit: not possible(previous commit)",
872 private_layer, private_layer->index);
876 if (private_output->commit_per_vblank == 1 && _tdm_layer_get_output_used_layer_count(private_output) > 1) {
877 if (tdm_debug_module & TDM_DEBUG_COMMIT)
878 TDM_INFO("layer(%p,%d) commit: not possible(more than 2 layers)",
879 private_layer, private_layer->index);
883 if (tdm_debug_module & TDM_DEBUG_COMMIT)
884 TDM_INFO("layer(%p,%d) commit: possible", private_layer, private_layer->index);
890 _tdm_layer_reset_pending_data(tdm_private_layer *private_layer)
892 private_layer->pending_info_changed = 0;
893 memset(&private_layer->pending_info, 0, sizeof private_layer->pending_info);
895 private_layer->pending_buffer_changed = 0;
896 if (private_layer->pending_buffer) {
897 tbm_surface_internal_unref(private_layer->pending_buffer);
898 private_layer->pending_buffer = NULL;
903 tdm_layer_commit_pending_data(tdm_private_layer *private_layer)
905 tdm_private_module *private_module;
906 tdm_func_layer *func_layer;
907 tdm_error ret = TDM_ERROR_NONE;
909 private_module = private_layer->private_module;
910 func_layer = &private_module->func_layer;
912 if (private_layer->pending_info_changed) {
913 ret = func_layer->layer_set_info(private_layer->layer_backend, &private_layer->pending_info);
914 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, done);
917 if (private_layer->pending_buffer_changed) {
918 tdm_private_layer_buffer *layer_buffer;
920 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
921 TDM_GOTO_IF_FAIL(layer_buffer != NULL, done);
923 LIST_INITHEAD(&layer_buffer->link);
925 ret = func_layer->layer_set_buffer(private_layer->layer_backend, private_layer->pending_buffer);
926 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
928 if (tdm_debug_module & TDM_DEBUG_BUFFER)
929 TDM_INFO("layer(%p,%d) pending_buffer(%p) committed",
930 private_layer, private_layer->index, private_layer->pending_buffer);
932 if (ret == TDM_ERROR_NONE) {
933 if (private_layer->waiting_buffer)
934 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
936 private_layer->waiting_buffer = layer_buffer;
937 private_layer->waiting_buffer->buffer = tdm_buffer_ref_backend(private_layer->pending_buffer);
938 if (tdm_debug_module & TDM_DEBUG_BUFFER)
939 TDM_INFO("layer(%p,%d) waiting_buffer(%p)",
940 private_layer, private_layer->index, private_layer->waiting_buffer->buffer);
942 tdm_layer_free_buffer(private_layer, layer_buffer);
946 _tdm_layer_reset_pending_data(private_layer);
951 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
952 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
953 * the layer commit handler MUST be called.
956 _tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
958 tdm_private_layer_commit_handler *layer_commit_handler;
961 layer_commit_handler = calloc(1, sizeof(tdm_private_layer_commit_handler));
962 if (!layer_commit_handler) {
963 TDM_ERR("failed: alloc memory");
964 return TDM_ERROR_OUT_OF_MEMORY;
967 if (tdm_debug_module & TDM_DEBUG_COMMIT)
968 TDM_INFO("layer(%p,%d) commit: handle(%p)", private_layer, private_layer->index, layer_commit_handler);
970 LIST_INITHEAD(&layer_commit_handler->link);
971 layer_commit_handler->private_layer = private_layer;
972 layer_commit_handler->func = func;
973 layer_commit_handler->user_data = user_data;
975 layer_commit_handler->committed_buffer = private_layer->waiting_buffer;
976 private_layer->waiting_buffer = NULL;
978 if (!private_layer->committing && layer_commit_handler->committed_buffer)
979 private_layer->commiting_buffer = layer_commit_handler->committed_buffer->buffer;
981 if (private_layer->committing)
982 TDM_WRN("layer(%p,%d) too many commit", private_layer, private_layer->index);
984 private_layer->committing = 1;
986 if (tdm_debug_module & TDM_DEBUG_BUFFER)
987 TDM_INFO("layer(%p,%d) waiting_buffer(%p) committed_buffer(%p)",
988 private_layer, private_layer->index, private_layer->waiting_buffer,
989 (layer_commit_handler->committed_buffer) ? layer_commit_handler->committed_buffer->buffer : NULL);
991 if (!private_output->commit_per_vblank) {
992 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT, commit_failed);
994 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
995 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
996 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
998 if (tdm_debug_module & TDM_DEBUG_COMMIT)
999 TDM_INFO("layer(%p,%d) commit: no commit-per-vblank", private_layer, private_layer->index);
1001 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_LAYER, commit_failed);
1003 if (_tdm_layer_commit_possible(private_layer)) {
1004 /* add to layer_commit_handler_list */
1005 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
1006 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
1007 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1009 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1010 TDM_INFO("layer(%p,%d) commit: output", private_layer, private_layer->index);
1012 /* add to pending_commit_handler_list. It will be commited when a vblank occurs */
1013 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->pending_commit_handler_list);
1015 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1016 TDM_INFO("layer(%p,%d) commit: pending", private_layer, private_layer->index);
1019 if (!private_output->vblank) {
1020 /* tdm_vblank APIs is for server. it should be called in unlock status*/
1021 _pthread_mutex_unlock(&private_display->lock);
1022 private_output->vblank = tdm_vblank_create(private_display, private_output, NULL);
1023 _pthread_mutex_lock(&private_display->lock);
1024 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, commit_failed);
1027 if (!private_output->layer_waiting_vblank) {
1028 /* tdm_vblank APIs is for server. it should be called in unlock status*/
1029 _pthread_mutex_unlock(&private_display->lock);
1030 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
1031 _pthread_mutex_lock(&private_display->lock);
1032 if (ret != TDM_ERROR_NONE) {
1033 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1034 /* dpms off : the allocated memory was free in tdm_output_commit_internal */
1040 private_output->layer_waiting_vblank = 1;
1042 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1043 TDM_INFO("layer(%p,%d) commit: wait vblank", private_layer, private_layer->index);
1050 if (layer_commit_handler) {
1051 private_layer->waiting_buffer = layer_commit_handler->committed_buffer;
1052 LIST_DEL(&layer_commit_handler->link);
1053 free(layer_commit_handler);
1059 tdm_layer_commit_internal(tdm_private_layer *private_layer, tdm_layer_commit_handler func, void *user_data)
1061 tdm_private_output *private_output = private_layer->private_output;
1062 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */
1064 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE) {
1065 if (!private_output->commit_per_vblank)
1066 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1068 private_output->commit_type = TDM_COMMIT_TYPE_LAYER;
1071 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1072 TDM_ERR("layer(%p,%d)'s output(%d) dpms: %s", private_layer, private_layer->index, private_output->pipe,
1073 tdm_dpms_str(private_output->current_dpms_value));
1074 return TDM_ERROR_DPMS_OFF;
1077 /* don't call this inside of _tdm_layer_commit */
1078 ret = tdm_layer_commit_pending_data(private_layer);
1079 if (ret != TDM_ERROR_NONE) {
1080 TDM_ERR("layer(%p,%d) committing pending data failed", private_layer, private_layer->index);
1084 ret = _tdm_layer_commit(private_layer, func, user_data);
1090 tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1094 _pthread_mutex_lock(&private_display->lock);
1096 OUTPUT_HWC_CAP_CHECK();
1098 ret = tdm_layer_commit_internal(private_layer, func, user_data);
1100 _pthread_mutex_unlock(&private_display->lock);
1106 tdm_layer_is_committing(tdm_layer *layer, unsigned int *committing)
1110 TDM_RETURN_VAL_IF_FAIL(committing != NULL, TDM_ERROR_INVALID_PARAMETER);
1112 _pthread_mutex_lock(&private_display->lock);
1114 OUTPUT_HWC_CAP_CHECK();
1116 *committing = private_layer->committing;
1118 _pthread_mutex_unlock(&private_display->lock);
1124 tdm_layer_remove_commit_handler_internal(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1126 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
1127 tdm_private_output *private_output = private_layer->private_output;
1128 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
1130 if (!func && !user_data)
1133 TDM_RETURN_IF_FAIL(private_layer != NULL);
1134 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1136 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
1137 if (lm->func == func && lm->user_data == user_data) {
1138 LIST_DEL(&lm->link);
1139 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1140 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1146 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
1147 if (lm->func == func && lm->user_data == user_data) {
1148 LIST_DEL(&lm->link);
1149 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1150 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1158 tdm_layer_remove_commit_handler(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1162 _pthread_mutex_lock(&private_display->lock);
1164 OUTPUT_HWC_CAP_CHECK();
1166 tdm_layer_remove_commit_handler_internal(layer, func, user_data);
1168 _pthread_mutex_unlock(&private_display->lock);
1173 EXTERN tbm_surface_h
1174 tdm_layer_get_displaying_buffer(tdm_layer *layer, tdm_error *error)
1176 tbm_surface_h buffer;
1177 LAYER_FUNC_ENTRY_ERROR();
1179 _pthread_mutex_lock(&private_display->lock);
1181 OUTPUT_HWC_CAP_CHECK_ERROR();
1184 *error = TDM_ERROR_NONE;
1186 if (private_layer->showing_buffer) {
1187 buffer = private_layer->showing_buffer->buffer;
1189 _pthread_mutex_unlock(&private_display->lock);
1190 TDM_DBG("layer(%p,%d) showing_buffer is null", private_layer, private_layer->index);
1193 _pthread_mutex_unlock(&private_display->lock);
1199 _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data)
1201 TDM_RETURN_IF_FAIL(data != NULL);
1202 tdm_layer *layer = data;
1203 tdm_private_module *private_module;
1204 tdm_func_layer *func_layer;
1205 tbm_surface_h buffer = NULL;
1206 LAYER_FUNC_ENTRY_VOID_RETURN();
1208 _pthread_mutex_lock(&private_display->lock);
1210 private_module = private_layer->private_module;
1211 func_layer = &private_module->func_layer;
1213 if (!func_layer->layer_set_buffer) {
1214 _pthread_mutex_unlock(&private_display->lock);
1218 if (TBM_SURFACE_QUEUE_ERROR_NONE != tbm_surface_queue_acquire(private_layer->buffer_queue, &buffer) ||
1220 TDM_ERR("layer(%p,%d) tbm_surface_queue_acquire() failed surface:%p",
1221 private_layer, private_layer->index, buffer);
1222 _pthread_mutex_unlock(&private_display->lock);
1226 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
1227 if (ret != TDM_ERROR_NONE) {
1228 TDM_ERR("tdm_layer_set_buffer_internal failed");
1229 _pthread_mutex_unlock(&private_display->lock);
1233 ret = tdm_layer_commit_internal(private_layer, NULL, NULL);
1234 if (ret != TDM_ERROR_NONE) {
1235 TDM_ERR("tdm_layer_commit_internal failed");
1236 _pthread_mutex_unlock(&private_display->lock);
1240 _pthread_mutex_unlock(&private_display->lock);
1244 _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data)
1246 TDM_RETURN_IF_FAIL(data != NULL);
1247 tdm_layer *layer = data;
1248 LAYER_FUNC_ENTRY_VOID_RETURN();
1249 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
1251 _pthread_mutex_lock(&private_display->lock);
1253 private_layer->buffer_queue = NULL;
1255 _tdm_layer_free_all_buffers(private_layer);
1257 _pthread_mutex_unlock(&private_display->lock);
1261 tdm_layer_set_buffer_queue(tdm_layer *layer, tbm_surface_queue_h buffer_queue)
1263 tdm_private_module *private_module;
1264 tdm_func_layer *func_layer;
1267 TDM_RETURN_VAL_IF_FAIL(buffer_queue != NULL, TDM_ERROR_INVALID_PARAMETER);
1269 _pthread_mutex_lock(&private_display->lock);
1271 OUTPUT_HWC_CAP_CHECK();
1273 private_module = private_layer->private_module;
1274 func_layer = &private_module->func_layer;
1276 if (private_layer->usable)
1277 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
1279 private_layer->usable = 0;
1281 if (!func_layer->layer_set_buffer) {
1282 _pthread_mutex_unlock(&private_display->lock);
1283 TDM_ERR("not implemented!!");
1284 return TDM_ERROR_NOT_IMPLEMENTED;
1287 if (buffer_queue == private_layer->buffer_queue) {
1288 _pthread_mutex_unlock(&private_display->lock);
1289 return TDM_ERROR_NONE;
1292 if (private_layer->waiting_buffer) {
1293 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
1294 private_layer->waiting_buffer = NULL;
1296 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1297 TDM_INFO("layer(%p,%d) waiting_buffer(%p)",
1298 private_layer, private_layer->index, private_layer->waiting_buffer);
1301 private_layer->buffer_queue = buffer_queue;
1302 tbm_surface_queue_add_acquirable_cb(private_layer->buffer_queue,
1303 _tbm_layer_queue_acquirable_cb,
1305 tbm_surface_queue_add_destroy_cb(private_layer->buffer_queue,
1306 _tbm_layer_queue_destroy_cb,
1308 _pthread_mutex_unlock(&private_display->lock);
1314 tdm_layer_unset_buffer_queue(tdm_layer *layer)
1316 return tdm_layer_unset_buffer(layer);
1320 tdm_layer_is_usable(tdm_layer *layer, unsigned int *usable)
1324 TDM_RETURN_VAL_IF_FAIL(usable != NULL, TDM_ERROR_INVALID_PARAMETER);
1326 _pthread_mutex_lock(&private_display->lock);
1328 OUTPUT_HWC_CAP_CHECK();
1330 *usable = private_layer->usable;
1332 _pthread_mutex_unlock(&private_display->lock);
1338 tdm_layer_set_video_pos(tdm_layer *layer, int zpos)
1340 tdm_private_module *private_module;
1341 tdm_func_layer *func_layer;
1344 _pthread_mutex_lock(&private_display->lock);
1346 OUTPUT_HWC_CAP_CHECK();
1348 private_module = private_layer->private_module;
1349 func_layer = &private_module->func_layer;
1351 if (!(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
1352 TDM_ERR("layer(%p,%d) is not video layer", private_layer, private_layer->index);
1353 _pthread_mutex_unlock(&private_display->lock);
1354 return TDM_ERROR_BAD_REQUEST;
1357 if (!func_layer->layer_set_video_pos) {
1358 _pthread_mutex_unlock(&private_display->lock);
1359 TDM_ERR("layer(%p,%d) not implemented!!", private_layer, private_layer->index);
1360 return TDM_ERROR_NOT_IMPLEMENTED;
1363 ret = func_layer->layer_set_video_pos(private_layer->layer_backend, zpos);
1365 _pthread_mutex_unlock(&private_display->lock);
1370 EXTERN tdm_capture *
1371 tdm_layer_create_capture(tdm_layer *layer, tdm_error *error)
1373 tdm_capture *capture = NULL;
1375 LAYER_FUNC_ENTRY_ERROR();
1377 _pthread_mutex_lock(&private_display->lock);
1379 OUTPUT_HWC_CAP_CHECK_ERROR();
1381 capture = (tdm_capture *)tdm_capture_create_layer_internal(private_layer, error);
1383 _pthread_mutex_unlock(&private_display->lock);
1389 tdm_layer_get_buffer_flags(tdm_layer *layer, unsigned int *flags)
1391 tdm_private_module *private_module;
1392 tdm_func_layer *func_layer;
1394 TDM_RETURN_VAL_IF_FAIL(flags != NULL, TDM_ERROR_INVALID_PARAMETER);
1396 _pthread_mutex_lock(&private_display->lock);
1398 OUTPUT_HWC_CAP_CHECK();
1400 private_module = private_layer->private_module;
1401 func_layer = &private_module->func_layer;
1403 if (!func_layer->layer_get_buffer_flags) {
1405 _pthread_mutex_unlock(&private_display->lock);
1406 TDM_INFO("not implemented!!");
1407 return TDM_ERROR_NONE;
1410 ret = func_layer->layer_get_buffer_flags(private_layer->layer_backend, flags);
1412 _pthread_mutex_unlock(&private_display->lock);
1416 /* LCOV_EXCL_STOP */