1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <boram1288.park@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define LAYER_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_private_layer *private_layer; \
48 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
49 TDM_RETURN_VAL_IF_FAIL(layer != NULL, TDM_ERROR_INVALID_PARAMETER); \
50 private_layer = (tdm_private_layer*)layer; \
51 private_output = private_layer->private_output; \
52 private_display = private_output->private_display
54 #define LAYER_FUNC_ENTRY_ERROR() \
55 tdm_private_display *private_display; \
56 tdm_private_output *private_output; \
57 tdm_private_layer *private_layer; \
58 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
59 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(layer != NULL, TDM_ERROR_INVALID_PARAMETER, NULL); \
60 private_layer = (tdm_private_layer*)layer; \
61 private_output = private_layer->private_output; \
62 private_display = private_output->private_display
64 #define LAYER_FUNC_ENTRY_VOID_RETURN() \
65 tdm_private_display *private_display; \
66 tdm_private_output *private_output; \
67 tdm_private_layer *private_layer; \
68 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
69 TDM_RETURN_IF_FAIL(layer != NULL); \
70 private_layer = (tdm_private_layer*)layer; \
71 private_output = private_layer->private_output; \
72 private_display = private_output->private_display
74 #define OUTPUT_HWC_CAP_CHECK() \
75 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) { \
76 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output); \
77 _pthread_mutex_unlock(&private_display->lock); \
78 return TDM_ERROR_OPERATION_FAILED; \
81 #define OUTPUT_HWC_CAP_CHECK_ERROR() \
82 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) { \
83 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output); \
84 if (error) *error = TDM_ERROR_OPERATION_FAILED; \
85 _pthread_mutex_unlock(&private_display->lock); \
89 #define OUTPUT_HWC_CAP_CHECK_VOID_RETURN() \
90 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) { \
91 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output); \
92 _pthread_mutex_unlock(&private_display->lock); \
96 static void _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
97 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
98 static void _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data);
99 static void _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data);
100 static void _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
101 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
102 static void _tdm_layer_reset_pending_data(tdm_private_layer *private_layer);
105 tdm_layer_get_output(tdm_layer *layer, tdm_error *error)
109 LAYER_FUNC_ENTRY_ERROR();
111 _pthread_mutex_lock(&private_display->lock);
113 OUTPUT_HWC_CAP_CHECK_ERROR();
116 *error = TDM_ERROR_NONE;
118 output = private_layer->private_output;
120 _pthread_mutex_unlock(&private_display->lock);
126 tdm_layer_get_index(tdm_layer *layer, int *index)
130 TDM_RETURN_VAL_IF_FAIL(index != NULL, TDM_ERROR_INVALID_PARAMETER);
132 _pthread_mutex_lock(&private_display->lock);
134 OUTPUT_HWC_CAP_CHECK();
136 *index = private_layer->index;
138 _pthread_mutex_unlock(&private_display->lock);
144 tdm_layer_get_capabilities(tdm_layer *layer, tdm_layer_capability *capabilities)
148 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
150 _pthread_mutex_lock(&private_display->lock);
152 OUTPUT_HWC_CAP_CHECK();
154 *capabilities = private_layer->caps.capabilities;
156 _pthread_mutex_unlock(&private_display->lock);
162 tdm_layer_get_available_formats(tdm_layer *layer, const tbm_format **formats, int *count)
166 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
167 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
169 _pthread_mutex_lock(&private_display->lock);
171 OUTPUT_HWC_CAP_CHECK();
173 *formats = (const tbm_format *)private_layer->caps.formats;
174 *count = private_layer->caps.format_count;
176 _pthread_mutex_unlock(&private_display->lock);
182 tdm_layer_get_available_properties(tdm_layer *layer, const tdm_prop **props, int *count)
186 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
187 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
189 _pthread_mutex_lock(&private_display->lock);
191 OUTPUT_HWC_CAP_CHECK();
193 *props = (const tdm_prop *)private_layer->caps.props;
194 *count = private_layer->caps.prop_count;
196 _pthread_mutex_unlock(&private_display->lock);
202 tdm_layer_get_zpos(tdm_layer *layer, int *zpos)
206 TDM_RETURN_VAL_IF_FAIL(zpos != NULL, TDM_ERROR_INVALID_PARAMETER);
208 _pthread_mutex_lock(&private_display->lock);
210 OUTPUT_HWC_CAP_CHECK();
212 *zpos = private_layer->caps.zpos;
214 _pthread_mutex_unlock(&private_display->lock);
220 tdm_layer_set_property(tdm_layer *layer, unsigned int id, tdm_value value)
222 tdm_private_module *private_module;
223 tdm_func_layer *func_layer;
226 _pthread_mutex_lock(&private_display->lock);
228 OUTPUT_HWC_CAP_CHECK();
230 private_module = private_layer->private_module;
231 func_layer = &private_module->func_layer;
233 if (private_layer->usable)
234 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
236 private_layer->usable = 0;
238 if (!func_layer->layer_set_property) {
239 /* LCOV_EXCL_START */
240 _pthread_mutex_unlock(&private_display->lock);
241 TDM_ERR("not implemented!!");
242 return TDM_ERROR_NOT_IMPLEMENTED;
246 ret = func_layer->layer_set_property(private_layer->layer_backend, id, value);
248 _pthread_mutex_unlock(&private_display->lock);
254 tdm_layer_get_property(tdm_layer *layer, unsigned int id, tdm_value *value)
256 tdm_private_module *private_module;
257 tdm_func_layer *func_layer;
260 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
262 _pthread_mutex_lock(&private_display->lock);
264 OUTPUT_HWC_CAP_CHECK();
266 private_module = private_layer->private_module;
267 func_layer = &private_module->func_layer;
269 if (!func_layer->layer_get_property) {
270 /* LCOV_EXCL_START */
271 _pthread_mutex_unlock(&private_display->lock);
272 TDM_ERR("not implemented!!");
273 return TDM_ERROR_NOT_IMPLEMENTED;
277 ret = func_layer->layer_get_property(private_layer->layer_backend, id, value);
279 _pthread_mutex_unlock(&private_display->lock);
285 tdm_layer_set_info_internal(tdm_private_layer *private_layer, tdm_info_layer *info)
287 tdm_private_module *private_module;
288 tdm_func_layer *func_layer;
291 private_module = private_layer->private_module;
292 func_layer = &private_module->func_layer;
294 if (private_layer->usable)
295 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
297 private_layer->usable = 0;
299 if (!func_layer->layer_set_info) {
300 /* LCOV_EXCL_START */
301 TDM_ERR("not implemented!!");
302 return TDM_ERROR_NOT_IMPLEMENTED;
306 if (info->src_config.format)
307 snprintf(fmtstr, 128, "%c%c%c%c", FOURCC_STR(info->src_config.format));
309 snprintf(fmtstr, 128, "NONE");
311 TDM_INFO("layer(%p,%d) info: src(%ux%u %u,%u %ux%u %s) dst(%u,%u %ux%u) trans(%d)",
312 private_layer, private_layer->index,
313 info->src_config.size.h, info->src_config.size.v,
314 info->src_config.pos.x, info->src_config.pos.y,
315 info->src_config.pos.w, info->src_config.pos.h,
317 info->dst_pos.x, info->dst_pos.y,
318 info->dst_pos.w, info->dst_pos.h,
321 private_layer->pending_info_changed = 1;
322 private_layer->pending_info = *info;
324 return TDM_ERROR_NONE;
328 tdm_layer_set_info(tdm_layer *layer, tdm_info_layer *info)
332 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
334 _pthread_mutex_lock(&private_display->lock);
336 OUTPUT_HWC_CAP_CHECK();
338 ret = tdm_layer_set_info_internal(private_layer, info);
340 _pthread_mutex_unlock(&private_display->lock);
346 tdm_layer_get_info(tdm_layer *layer, tdm_info_layer *info)
348 tdm_private_module *private_module;
349 tdm_func_layer *func_layer;
352 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
354 _pthread_mutex_lock(&private_display->lock);
356 OUTPUT_HWC_CAP_CHECK();
358 private_module = private_layer->private_module;
359 func_layer = &private_module->func_layer;
361 if (!func_layer->layer_get_info) {
362 /* LCOV_EXCL_START */
363 _pthread_mutex_unlock(&private_display->lock);
364 TDM_ERR("not implemented!!");
365 return TDM_ERROR_NOT_IMPLEMENTED;
369 ret = func_layer->layer_get_info(private_layer->layer_backend, info);
371 _pthread_mutex_unlock(&private_display->lock);
376 /* LCOV_EXCL_START */
378 _tdm_layer_dump_buffer(tdm_layer *layer, tbm_surface_h buffer)
380 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
381 tdm_private_output *private_output = private_layer->private_output;
383 char fname[TDM_PATH_LEN], bufs[TDM_PATH_LEN];
384 int zpos, len = TDM_PATH_LEN;
385 tdm_private_layer *l = NULL;
390 pipe = private_output->pipe;
391 zpos = private_layer->caps.zpos;
394 LIST_FOR_EACH_ENTRY(l, &private_output->layer_list, link) {
395 if (!l->showing_buffer)
397 TDM_SNPRINTF(p, remain, "_%p", l->showing_buffer->buffer);
400 n = snprintf(fname, sizeof(fname), "tdm_%d_lyr_%d%s", pipe, zpos, bufs);
401 if ((size_t)n >= sizeof(fname)) {
402 fname[sizeof(fname) - 1] = '\0';
405 tbm_surface_internal_dump_buffer(buffer, fname);
406 TDM_DBG("%s dump excute", fname);
413 tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer)
415 tdm_private_display *private_display;
420 private_display = private_layer->private_output->private_display;
422 LIST_DEL(&layer_buffer->link);
423 if (layer_buffer->buffer) {
424 _pthread_mutex_unlock(&private_display->lock);
425 tdm_buffer_unref_backend(layer_buffer->buffer);
426 if (private_layer->buffer_queue)
427 tbm_surface_queue_release(private_layer->buffer_queue, layer_buffer->buffer);
428 _pthread_mutex_lock(&private_display->lock);
434 _tdm_layer_free_all_buffers(tdm_private_layer *private_layer)
436 tdm_private_output *private_output = private_layer->private_output;
437 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
438 struct list_head clone_list;
440 LIST_INITHEAD(&clone_list);
442 _tdm_layer_reset_pending_data(private_layer);
444 if (private_layer->waiting_buffer) {
445 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
446 private_layer->waiting_buffer = NULL;
448 if (tdm_debug_module & TDM_DEBUG_BUFFER)
449 TDM_INFO("layer(%p,%d) waiting_buffer(%p)",
450 private_layer, private_layer->index, private_layer->waiting_buffer);
453 if (private_layer->committed_buffer) {
454 tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
455 private_layer->committed_buffer = NULL;
457 if (tdm_debug_module & TDM_DEBUG_BUFFER)
458 TDM_INFO("layer(%p,%d) committed_buffer(%p)",
459 private_layer, private_layer->index, private_layer->committed_buffer);
462 if (private_layer->showing_buffer) {
463 tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
464 private_layer->showing_buffer = NULL;
466 if (tdm_debug_module & TDM_DEBUG_BUFFER)
467 TDM_INFO("layer(%p,%d) showing_buffer(%p)",
468 private_layer, private_layer->index, private_layer->showing_buffer);
471 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
472 if (lm->private_layer != private_layer)
475 LIST_ADDTAIL(&lm->link, &clone_list);
478 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
480 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
481 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
485 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
486 if (lm->private_layer != private_layer)
489 LIST_ADDTAIL(&lm->link, &clone_list);
492 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
494 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
498 if (private_layer->buffer_queue) {
499 tbm_surface_queue_remove_acquirable_cb(private_layer->buffer_queue, _tbm_layer_queue_acquirable_cb, private_layer);
500 tbm_surface_queue_remove_destroy_cb(private_layer->buffer_queue, _tbm_layer_queue_destroy_cb, private_layer);
501 private_layer->buffer_queue = NULL;
506 tdm_layer_set_buffer_internal(tdm_private_layer *private_layer, tbm_surface_h buffer)
508 tdm_private_module *private_module;
509 tdm_private_output *private_output = private_layer->private_output;
510 tdm_func_layer *func_layer;
512 /* LCOV_EXCL_START */
514 if (tdm_dump_enable && !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO))
515 _tdm_layer_dump_buffer(private_layer, buffer);
517 if (tdm_debug_dump & TDM_DUMP_FLAG_LAYER &&
518 !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
519 char str[TDM_PATH_LEN];
521 snprintf(str, TDM_PATH_LEN, "layer_%d_%d_%03d",
522 private_output->index, private_layer->index, i++);
523 tdm_helper_dump_buffer_str(buffer, tdm_debug_dump_dir, str);
527 private_module = private_layer->private_module;
528 func_layer = &private_module->func_layer;
530 if (private_layer->usable)
531 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
533 private_layer->usable = 0;
535 if (!func_layer->layer_set_buffer) {
536 /* LCOV_EXCL_START */
537 TDM_ERR("not implemented!!");
538 return TDM_ERROR_NOT_IMPLEMENTED;
539 /* LCOV_EXCL_START */
542 private_layer->pending_buffer_changed = 1;
544 if (private_layer->pending_buffer) {
546 if (tdm_debug_module & TDM_DEBUG_BUFFER)
547 TDM_INFO("layer(%p,%d) pending_buffer(%p) skipped",
548 private_layer, private_layer->index, private_layer->pending_buffer);
550 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
551 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
552 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
555 tbm_surface_internal_unref(private_layer->pending_buffer);
558 tbm_surface_internal_ref(buffer);
559 private_layer->pending_buffer = buffer;
561 if (tdm_debug_module & TDM_DEBUG_BUFFER) {
562 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
563 int flags = tbm_bo_get_flags(bo);
564 TDM_INFO("layer(%p,%d) pending_buffer(%p) bo_flags(%x)",
565 private_layer, private_layer->index, private_layer->pending_buffer, flags);
568 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
569 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
570 TDM_TRACE_ASYNC_BEGIN((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
573 return TDM_ERROR_NONE;
577 tdm_layer_set_buffer(tdm_layer *layer, tbm_surface_h buffer)
581 TDM_RETURN_VAL_IF_FAIL(buffer != NULL, TDM_ERROR_INVALID_PARAMETER);
583 _pthread_mutex_lock(&private_display->lock);
585 OUTPUT_HWC_CAP_CHECK();
587 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
589 _pthread_mutex_unlock(&private_display->lock);
595 tdm_layer_unset_buffer_internal(tdm_private_layer *private_layer)
597 tdm_private_module *private_module;
598 tdm_func_layer *func_layer;
599 tdm_error ret = TDM_ERROR_NONE;
601 private_module = private_layer->private_module;
602 func_layer = &private_module->func_layer;
604 _tdm_layer_free_all_buffers(private_layer);
606 private_layer->usable = 1;
608 if (private_layer->usable)
609 TDM_INFO("layer(%p,%d) now usable", private_layer, private_layer->index);
611 if (!func_layer->layer_unset_buffer) {
612 /* LCOV_EXCL_START */
613 TDM_ERR("not implemented!!");
614 return TDM_ERROR_NOT_IMPLEMENTED;
615 /* LCOV_EXCL_START */
618 ret = func_layer->layer_unset_buffer(private_layer->layer_backend);
619 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
625 tdm_layer_unset_buffer(tdm_layer *layer)
629 _pthread_mutex_lock(&private_display->lock);
631 OUTPUT_HWC_CAP_CHECK();
633 ret = tdm_layer_unset_buffer_internal(private_layer);
634 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
636 _pthread_mutex_unlock(&private_display->lock);
642 tdm_layer_committed(tdm_private_layer *private_layer, tdm_private_layer_buffer **committed_buffer)
644 tdm_private_output *private_output = private_layer->private_output;
645 tdm_private_display *private_display = private_output->private_display;
647 /* LCOV_EXCL_START */
648 if (private_display->print_fps) {
649 double curr = tdm_helper_get_time();
650 if (private_layer->fps_stamp == 0) {
651 private_layer->fps_stamp = curr;
652 } else if ((curr - private_layer->fps_stamp) > 1.0) {
653 TDM_INFO("output(%d) layer(%p,%d) fps: %d",
654 private_output->index, private_layer, private_layer->index, private_layer->fps_count);
655 private_layer->fps_count = 0;
656 private_layer->fps_stamp = curr;
658 private_layer->fps_count++;
659 } else if (private_layer->fps_stamp != 0) {
660 private_layer->fps_stamp = 0;
661 private_layer->fps_count = 0;
665 if (private_layer->showing_buffer) {
666 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
667 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->showing_buffer->buffer, 0);
668 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
671 tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
674 private_layer->showing_buffer = *committed_buffer;
675 *committed_buffer = NULL;
677 if (tdm_debug_module & TDM_DEBUG_BUFFER)
678 TDM_INFO("layer(%p,%d) committed_buffer(%p) showing_buffer(%p)",
679 private_layer, private_layer->index, *committed_buffer,
680 (private_layer->showing_buffer) ? private_layer->showing_buffer->buffer : NULL);
684 _tdm_layer_got_output_vblank(tdm_private_output *private_output, unsigned int sequence,
685 unsigned int tv_sec, unsigned int tv_usec)
687 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
688 tdm_private_display *private_display;
689 struct list_head clone_list, pending_clone_list;
690 tdm_error ret = TDM_ERROR_NONE;
692 private_display = private_output->private_display;
694 private_output->layer_waiting_vblank = 0;
696 LIST_INITHEAD(&clone_list);
697 LIST_INITHEAD(&pending_clone_list);
699 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
701 lm->private_layer->committing = 0;
702 LIST_ADDTAIL(&lm->link, &clone_list);
705 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
707 lm->private_layer->committing = 0;
708 LIST_ADDTAIL(&lm->link, &pending_clone_list);
711 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
712 if (tdm_debug_module & TDM_DEBUG_COMMIT)
713 TDM_INFO("layer(%p,%d) committed. handle(%p) commited_buffer(%p)",
714 lm->private_layer, lm->private_layer->index, lm, (lm->committed_buffer) ? lm->committed_buffer->buffer : NULL);
717 tdm_layer_committed(lm->private_layer, &lm->committed_buffer);
718 _pthread_mutex_unlock(&private_display->lock);
720 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
721 _pthread_mutex_lock(&private_display->lock);
722 if (lm->committed_buffer)
723 tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
727 if (LIST_IS_EMPTY(&pending_clone_list))
730 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, wait_failed);
732 ret = tdm_output_commit_internal(private_output, 0, NULL, NULL);
733 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
735 if (tdm_debug_module & TDM_DEBUG_COMMIT)
736 TDM_INFO("layer commit: output(%d) commit", private_output->pipe);
738 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
739 /* tdm_vblank APIs is for server. it should be called in unlock status*/
740 if (!private_output->layer_waiting_vblank) {
741 _pthread_mutex_unlock(&private_display->lock);
742 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
743 _pthread_mutex_lock(&private_display->lock);
744 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
745 private_output->layer_waiting_vblank = 1;
749 if (tdm_debug_module & TDM_DEBUG_COMMIT)
750 TDM_INFO("layer commit: output(%d) wait vblank", private_output->pipe);
752 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
754 LIST_ADDTAIL(&lm->link, &private_output->layer_commit_handler_list);
757 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
758 TDM_WRN("dpms %s. Directly call vblank callback.", tdm_dpms_str(private_output->current_dpms_value));
759 _pthread_mutex_unlock(&private_display->lock);
760 _tdm_layer_cb_wait_vblank(private_output->vblank, 0, 0, 0, 0, private_output);
761 _pthread_mutex_lock(&private_display->lock);
766 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
767 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
768 * the layer commit handler MUST be called.
770 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
771 TDM_WRN("Directly call layer commit handlers: ret(%d)\n", ret);
773 _pthread_mutex_unlock(&private_display->lock);
775 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
776 _pthread_mutex_lock(&private_display->lock);
777 tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
785 _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
786 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
788 tdm_private_layer_commit_handler *layer_commit_handler = user_data;
789 tdm_private_layer_commit_handler *lm = NULL;
790 tdm_private_display *private_display;
791 tdm_private_output *private_output = output;
792 tdm_private_layer *private_layer;
795 TDM_RETURN_IF_FAIL(layer_commit_handler != NULL);
797 private_display = private_output->private_display;
799 LIST_FOR_EACH_ENTRY(lm, &private_output->layer_commit_handler_list, link) {
800 if (layer_commit_handler == lm) {
809 LIST_DEL(&layer_commit_handler->link);
811 private_layer = layer_commit_handler->private_layer;
813 private_layer->committing = 0;
815 if (tdm_debug_module & TDM_DEBUG_COMMIT)
816 TDM_INFO("layer(%p,%d) commit: output(%d) committed. handle(%p)",
817 private_layer, private_layer->index, private_output->pipe, layer_commit_handler);
819 _pthread_mutex_lock(&private_display->lock);
821 tdm_layer_committed(private_layer, &layer_commit_handler->committed_buffer);
823 if (layer_commit_handler->func) {
824 _pthread_mutex_unlock(&private_display->lock);
825 layer_commit_handler->func(private_layer, sequence,
826 tv_sec, tv_usec, layer_commit_handler->user_data);
827 _pthread_mutex_lock(&private_display->lock);
830 free(layer_commit_handler);
832 _pthread_mutex_unlock(&private_display->lock);
836 _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
837 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
839 tdm_private_output *private_output = user_data;
840 tdm_private_display *private_display;
842 TDM_RETURN_IF_FAIL(private_output != NULL);
844 private_display = private_output->private_display;
846 _pthread_mutex_lock(&private_display->lock);
848 if (tdm_debug_module & TDM_DEBUG_COMMIT)
849 TDM_INFO("layer commit: output(%d) got vblank", private_output->pipe);
851 _tdm_layer_got_output_vblank(private_output, sequence, tv_sec, tv_usec);
853 _pthread_mutex_unlock(&private_display->lock);
857 _tdm_layer_get_output_used_layer_count(tdm_private_output *private_output)
859 tdm_private_layer *private_layer = NULL;
860 unsigned int count = 0;
862 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
863 if (!private_layer->usable)
870 /* commit_per_vblank == 1: we can commit if
871 * - there is no previous commit request
872 * - only 1 layer is used
873 * commit_per_vblank == 2: we can commit if
874 * - there is no previous commit request
877 _tdm_layer_commit_possible(tdm_private_layer *private_layer)
879 tdm_private_output *private_output = private_layer->private_output;
881 TDM_RETURN_VAL_IF_FAIL(private_output->commit_per_vblank > 0, 1);
883 /* There is a previous commit request which is not done and displayed on screen yet.
884 * We can't commit at this time.
886 if (!LIST_IS_EMPTY(&private_output->layer_commit_handler_list)) {
887 if (tdm_debug_module & TDM_DEBUG_COMMIT)
888 TDM_INFO("layer(%p,%d) commit: not possible(previous commit)",
889 private_layer, private_layer->index);
893 if (private_output->commit_per_vblank == 1 && _tdm_layer_get_output_used_layer_count(private_output) > 1) {
894 if (tdm_debug_module & TDM_DEBUG_COMMIT)
895 TDM_INFO("layer(%p,%d) commit: not possible(more than 2 layers)",
896 private_layer, private_layer->index);
900 if (tdm_debug_module & TDM_DEBUG_COMMIT)
901 TDM_INFO("layer(%p,%d) commit: possible", private_layer, private_layer->index);
907 _tdm_layer_reset_pending_data(tdm_private_layer *private_layer)
909 private_layer->pending_info_changed = 0;
910 memset(&private_layer->pending_info, 0, sizeof private_layer->pending_info);
912 private_layer->pending_buffer_changed = 0;
913 if (private_layer->pending_buffer) {
914 tbm_surface_internal_unref(private_layer->pending_buffer);
915 private_layer->pending_buffer = NULL;
920 tdm_layer_commit_pending_data(tdm_private_layer *private_layer)
922 tdm_private_module *private_module;
923 tdm_func_layer *func_layer;
924 tdm_error ret = TDM_ERROR_NONE;
926 private_module = private_layer->private_module;
927 func_layer = &private_module->func_layer;
929 if (private_layer->pending_info_changed) {
930 ret = func_layer->layer_set_info(private_layer->layer_backend, &private_layer->pending_info);
931 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, done);
934 if (private_layer->pending_buffer_changed) {
935 tdm_private_layer_buffer *layer_buffer;
937 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
938 TDM_GOTO_IF_FAIL(layer_buffer != NULL, done);
940 LIST_INITHEAD(&layer_buffer->link);
942 ret = func_layer->layer_set_buffer(private_layer->layer_backend, private_layer->pending_buffer);
943 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
945 if (tdm_debug_module & TDM_DEBUG_BUFFER)
946 TDM_INFO("layer(%p,%d) pending_buffer(%p) committed",
947 private_layer, private_layer->index, private_layer->pending_buffer);
949 if (ret == TDM_ERROR_NONE) {
950 if (private_layer->waiting_buffer)
951 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
953 private_layer->waiting_buffer = layer_buffer;
954 private_layer->waiting_buffer->buffer = tdm_buffer_ref_backend(private_layer->pending_buffer);
955 if (tdm_debug_module & TDM_DEBUG_BUFFER)
956 TDM_INFO("layer(%p,%d) waiting_buffer(%p)",
957 private_layer, private_layer->index, private_layer->waiting_buffer->buffer);
959 tdm_layer_free_buffer(private_layer, layer_buffer);
963 _tdm_layer_reset_pending_data(private_layer);
968 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
969 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
970 * the layer commit handler MUST be called.
973 _tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
975 tdm_private_layer_commit_handler *layer_commit_handler;
978 layer_commit_handler = calloc(1, sizeof(tdm_private_layer_commit_handler));
979 if (!layer_commit_handler) {
980 /* LCOV_EXCL_START */
981 TDM_ERR("failed: alloc memory");
982 return TDM_ERROR_OUT_OF_MEMORY;
986 if (tdm_debug_module & TDM_DEBUG_COMMIT)
987 TDM_INFO("layer(%p,%d) commit: handle(%p)", private_layer, private_layer->index, layer_commit_handler);
989 LIST_INITHEAD(&layer_commit_handler->link);
990 layer_commit_handler->private_layer = private_layer;
991 layer_commit_handler->func = func;
992 layer_commit_handler->user_data = user_data;
994 layer_commit_handler->committed_buffer = private_layer->waiting_buffer;
995 private_layer->waiting_buffer = NULL;
997 if (!private_layer->committing && layer_commit_handler->committed_buffer)
998 private_layer->commiting_buffer = layer_commit_handler->committed_buffer->buffer;
1000 if (private_layer->committing)
1001 TDM_WRN("layer(%p,%d) too many commit", private_layer, private_layer->index);
1003 private_layer->committing = 1;
1005 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1006 TDM_INFO("layer(%p,%d) waiting_buffer(%p) committed_buffer(%p)",
1007 private_layer, private_layer->index, private_layer->waiting_buffer,
1008 (layer_commit_handler->committed_buffer) ? layer_commit_handler->committed_buffer->buffer : NULL);
1010 if (!private_output->commit_per_vblank) {
1011 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT, commit_failed);
1013 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
1014 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
1015 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1017 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1018 TDM_INFO("layer(%p,%d) commit: no commit-per-vblank", private_layer, private_layer->index);
1020 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_LAYER, commit_failed);
1022 if (_tdm_layer_commit_possible(private_layer)) {
1023 /* add to layer_commit_handler_list */
1024 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
1025 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
1026 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1028 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1029 TDM_INFO("layer(%p,%d) commit: output", private_layer, private_layer->index);
1031 /* add to pending_commit_handler_list. It will be commited when a vblank occurs */
1032 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->pending_commit_handler_list);
1034 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1035 TDM_INFO("layer(%p,%d) commit: pending", private_layer, private_layer->index);
1038 if (!private_output->vblank) {
1039 /* tdm_vblank APIs is for server. it should be called in unlock status*/
1040 _pthread_mutex_unlock(&private_display->lock);
1041 private_output->vblank = tdm_vblank_create(private_display, private_output, NULL);
1042 _pthread_mutex_lock(&private_display->lock);
1043 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, commit_failed);
1046 if (!private_output->layer_waiting_vblank) {
1047 /* tdm_vblank APIs is for server. it should be called in unlock status*/
1048 _pthread_mutex_unlock(&private_display->lock);
1049 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
1050 _pthread_mutex_lock(&private_display->lock);
1051 if (ret != TDM_ERROR_NONE) {
1052 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1053 /* dpms off : the allocated memory was free in tdm_output_commit_internal */
1059 private_output->layer_waiting_vblank = 1;
1061 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1062 TDM_INFO("layer(%p,%d) commit: wait vblank", private_layer, private_layer->index);
1069 if (layer_commit_handler) {
1070 private_layer->waiting_buffer = layer_commit_handler->committed_buffer;
1071 LIST_DEL(&layer_commit_handler->link);
1072 free(layer_commit_handler);
1078 tdm_layer_commit_internal(tdm_private_layer *private_layer, tdm_layer_commit_handler func, void *user_data)
1080 tdm_private_output *private_output = private_layer->private_output;
1081 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */
1083 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE) {
1084 if (!private_output->commit_per_vblank)
1085 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1087 private_output->commit_type = TDM_COMMIT_TYPE_LAYER;
1090 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1091 TDM_ERR("layer(%p,%d)'s output(%d) dpms: %s", private_layer, private_layer->index, private_output->pipe,
1092 tdm_dpms_str(private_output->current_dpms_value));
1093 return TDM_ERROR_DPMS_OFF;
1096 /* don't call this inside of _tdm_layer_commit */
1097 ret = tdm_layer_commit_pending_data(private_layer);
1098 if (ret != TDM_ERROR_NONE) {
1099 TDM_ERR("layer(%p,%d) committing pending data failed", private_layer, private_layer->index);
1103 ret = _tdm_layer_commit(private_layer, func, user_data);
1109 tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1113 _pthread_mutex_lock(&private_display->lock);
1115 OUTPUT_HWC_CAP_CHECK();
1117 ret = tdm_layer_commit_internal(private_layer, func, user_data);
1119 _pthread_mutex_unlock(&private_display->lock);
1125 tdm_layer_is_committing(tdm_layer *layer, unsigned int *committing)
1129 TDM_RETURN_VAL_IF_FAIL(committing != NULL, TDM_ERROR_INVALID_PARAMETER);
1131 _pthread_mutex_lock(&private_display->lock);
1133 OUTPUT_HWC_CAP_CHECK();
1135 *committing = private_layer->committing;
1137 _pthread_mutex_unlock(&private_display->lock);
1143 tdm_layer_remove_commit_handler_internal(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1145 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
1146 tdm_private_output *private_output = private_layer->private_output;
1147 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
1149 if (!func && !user_data)
1152 TDM_RETURN_IF_FAIL(private_layer != NULL);
1153 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1155 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
1156 if (lm->func == func && lm->user_data == user_data) {
1157 LIST_DEL(&lm->link);
1158 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1159 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1165 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
1166 if (lm->func == func && lm->user_data == user_data) {
1167 LIST_DEL(&lm->link);
1168 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1169 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1177 tdm_layer_remove_commit_handler(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1181 _pthread_mutex_lock(&private_display->lock);
1183 OUTPUT_HWC_CAP_CHECK();
1185 tdm_layer_remove_commit_handler_internal(layer, func, user_data);
1187 _pthread_mutex_unlock(&private_display->lock);
1192 EXTERN tbm_surface_h
1193 tdm_layer_get_displaying_buffer(tdm_layer *layer, tdm_error *error)
1195 tbm_surface_h buffer;
1196 LAYER_FUNC_ENTRY_ERROR();
1198 _pthread_mutex_lock(&private_display->lock);
1200 OUTPUT_HWC_CAP_CHECK_ERROR();
1203 *error = TDM_ERROR_NONE;
1205 if (private_layer->showing_buffer) {
1206 buffer = private_layer->showing_buffer->buffer;
1208 _pthread_mutex_unlock(&private_display->lock);
1209 TDM_DBG("layer(%p,%d) showing_buffer is null", private_layer, private_layer->index);
1212 _pthread_mutex_unlock(&private_display->lock);
1218 _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data)
1220 TDM_RETURN_IF_FAIL(data != NULL);
1221 tdm_layer *layer = data;
1222 tdm_private_module *private_module;
1223 tdm_func_layer *func_layer;
1224 tbm_surface_h buffer = NULL;
1225 LAYER_FUNC_ENTRY_VOID_RETURN();
1227 _pthread_mutex_lock(&private_display->lock);
1229 private_module = private_layer->private_module;
1230 func_layer = &private_module->func_layer;
1232 if (!func_layer->layer_set_buffer) {
1233 /* LCOV_EXCL_START */
1234 _pthread_mutex_unlock(&private_display->lock);
1236 /* LCOV_EXCL_STOP */
1239 if (TBM_SURFACE_QUEUE_ERROR_NONE != tbm_surface_queue_acquire(private_layer->buffer_queue, &buffer) ||
1241 /* LCOV_EXCL_START */
1242 TDM_ERR("layer(%p,%d) tbm_surface_queue_acquire() failed surface:%p",
1243 private_layer, private_layer->index, buffer);
1244 _pthread_mutex_unlock(&private_display->lock);
1246 /* LCOV_EXCL_STOP */
1249 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
1250 if (ret != TDM_ERROR_NONE) {
1251 TDM_ERR("tdm_layer_set_buffer_internal failed");
1252 _pthread_mutex_unlock(&private_display->lock);
1256 ret = tdm_layer_commit_internal(private_layer, NULL, NULL);
1257 if (ret != TDM_ERROR_NONE) {
1258 TDM_ERR("tdm_layer_commit_internal failed");
1259 _pthread_mutex_unlock(&private_display->lock);
1263 _pthread_mutex_unlock(&private_display->lock);
1267 _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data)
1269 TDM_RETURN_IF_FAIL(data != NULL);
1270 tdm_layer *layer = data;
1271 LAYER_FUNC_ENTRY_VOID_RETURN();
1272 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
1274 _pthread_mutex_lock(&private_display->lock);
1276 private_layer->buffer_queue = NULL;
1278 _tdm_layer_free_all_buffers(private_layer);
1280 _pthread_mutex_unlock(&private_display->lock);
1284 tdm_layer_set_buffer_queue(tdm_layer *layer, tbm_surface_queue_h buffer_queue)
1286 tdm_private_module *private_module;
1287 tdm_func_layer *func_layer;
1290 TDM_RETURN_VAL_IF_FAIL(buffer_queue != NULL, TDM_ERROR_INVALID_PARAMETER);
1292 _pthread_mutex_lock(&private_display->lock);
1294 OUTPUT_HWC_CAP_CHECK();
1296 private_module = private_layer->private_module;
1297 func_layer = &private_module->func_layer;
1299 if (private_layer->usable)
1300 TDM_INFO("layer(%p,%d) not usable", private_layer, private_layer->index);
1302 private_layer->usable = 0;
1304 if (!func_layer->layer_set_buffer) {
1305 /* LCOV_EXCL_START */
1306 _pthread_mutex_unlock(&private_display->lock);
1307 TDM_ERR("not implemented!!");
1308 return TDM_ERROR_NOT_IMPLEMENTED;
1309 /* LCOV_EXCL_STOP */
1312 if (buffer_queue == private_layer->buffer_queue) {
1313 _pthread_mutex_unlock(&private_display->lock);
1314 return TDM_ERROR_NONE;
1317 if (private_layer->waiting_buffer) {
1318 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
1319 private_layer->waiting_buffer = NULL;
1321 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1322 TDM_INFO("layer(%p,%d) waiting_buffer(%p)",
1323 private_layer, private_layer->index, private_layer->waiting_buffer);
1326 private_layer->buffer_queue = buffer_queue;
1327 tbm_surface_queue_add_acquirable_cb(private_layer->buffer_queue,
1328 _tbm_layer_queue_acquirable_cb,
1330 tbm_surface_queue_add_destroy_cb(private_layer->buffer_queue,
1331 _tbm_layer_queue_destroy_cb,
1333 _pthread_mutex_unlock(&private_display->lock);
1339 tdm_layer_unset_buffer_queue(tdm_layer *layer)
1341 return tdm_layer_unset_buffer(layer);
1345 tdm_layer_is_usable(tdm_layer *layer, unsigned int *usable)
1349 TDM_RETURN_VAL_IF_FAIL(usable != NULL, TDM_ERROR_INVALID_PARAMETER);
1351 _pthread_mutex_lock(&private_display->lock);
1353 OUTPUT_HWC_CAP_CHECK();
1355 *usable = private_layer->usable;
1357 _pthread_mutex_unlock(&private_display->lock);
1363 tdm_layer_set_video_pos(tdm_layer *layer, int zpos)
1365 tdm_private_module *private_module;
1366 tdm_func_layer *func_layer;
1369 _pthread_mutex_lock(&private_display->lock);
1371 OUTPUT_HWC_CAP_CHECK();
1373 private_module = private_layer->private_module;
1374 func_layer = &private_module->func_layer;
1376 if (!(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
1377 TDM_ERR("layer(%p,%d) is not video layer", private_layer, private_layer->index);
1378 _pthread_mutex_unlock(&private_display->lock);
1379 return TDM_ERROR_BAD_REQUEST;
1382 if (!func_layer->layer_set_video_pos) {
1383 /* LCOV_EXCL_START */
1384 _pthread_mutex_unlock(&private_display->lock);
1385 TDM_ERR("layer(%p,%d) not implemented!!", private_layer, private_layer->index);
1386 return TDM_ERROR_NOT_IMPLEMENTED;
1387 /* LCOV_EXCL_STOP */
1390 ret = func_layer->layer_set_video_pos(private_layer->layer_backend, zpos);
1392 _pthread_mutex_unlock(&private_display->lock);
1397 EXTERN tdm_capture *
1398 tdm_layer_create_capture(tdm_layer *layer, tdm_error *error)
1400 tdm_capture *capture = NULL;
1402 LAYER_FUNC_ENTRY_ERROR();
1404 _pthread_mutex_lock(&private_display->lock);
1406 OUTPUT_HWC_CAP_CHECK_ERROR();
1408 capture = (tdm_capture *)tdm_capture_create_layer_internal(private_layer, error);
1410 _pthread_mutex_unlock(&private_display->lock);
1416 tdm_layer_get_buffer_flags(tdm_layer *layer, unsigned int *flags)
1418 tdm_private_module *private_module;
1419 tdm_func_layer *func_layer;
1421 TDM_RETURN_VAL_IF_FAIL(flags != NULL, TDM_ERROR_INVALID_PARAMETER);
1423 _pthread_mutex_lock(&private_display->lock);
1425 OUTPUT_HWC_CAP_CHECK();
1427 private_module = private_layer->private_module;
1428 func_layer = &private_module->func_layer;
1430 if (!func_layer->layer_get_buffer_flags) {
1431 /* LCOV_EXCL_START */
1433 _pthread_mutex_unlock(&private_display->lock);
1434 TDM_INFO("not implemented!!");
1435 return TDM_ERROR_NONE;
1436 /* LCOV_EXCL_STOP */
1439 ret = func_layer->layer_get_buffer_flags(private_layer->layer_backend, flags);
1441 _pthread_mutex_unlock(&private_display->lock);