1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <sc1.lim@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define LAYER_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_private_layer *private_layer; \
48 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
49 TDM_RETURN_VAL_IF_FAIL(layer != NULL, TDM_ERROR_INVALID_PARAMETER); \
50 private_layer = (tdm_private_layer*)layer; \
51 private_output = private_layer->private_output; \
52 private_display = private_output->private_display
54 #define LAYER_FUNC_ENTRY_ERROR() \
55 tdm_private_display *private_display; \
56 tdm_private_output *private_output; \
57 tdm_private_layer *private_layer; \
58 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
59 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(layer != NULL, TDM_ERROR_INVALID_PARAMETER, NULL); \
60 private_layer = (tdm_private_layer*)layer; \
61 private_output = private_layer->private_output; \
62 private_display = private_output->private_display
64 #define LAYER_FUNC_ENTRY_VOID_RETURN() \
65 tdm_private_display *private_display; \
66 tdm_private_output *private_output; \
67 tdm_private_layer *private_layer; \
68 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
69 TDM_RETURN_IF_FAIL(layer != NULL); \
70 private_layer = (tdm_private_layer*)layer; \
71 private_output = private_layer->private_output; \
72 private_display = private_output->private_display
74 static void _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
75 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
76 static void _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data);
77 static void _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data);
78 static void _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
79 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
80 static void _tdm_layer_reset_pending_data(tdm_private_layer *private_layer);
83 tdm_layer_get_output(tdm_layer *layer, tdm_error *error)
87 LAYER_FUNC_ENTRY_ERROR();
89 _pthread_mutex_lock(&private_display->lock);
92 *error = TDM_ERROR_NONE;
94 output = private_layer->private_output;
96 _pthread_mutex_unlock(&private_display->lock);
102 tdm_layer_get_capabilities(tdm_layer *layer, tdm_layer_capability *capabilities)
106 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
108 _pthread_mutex_lock(&private_display->lock);
110 *capabilities = private_layer->caps.capabilities;
112 _pthread_mutex_unlock(&private_display->lock);
118 tdm_layer_get_available_formats(tdm_layer *layer, const tbm_format **formats, int *count)
122 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
123 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
125 _pthread_mutex_lock(&private_display->lock);
127 *formats = (const tbm_format *)private_layer->caps.formats;
128 *count = private_layer->caps.format_count;
130 _pthread_mutex_unlock(&private_display->lock);
136 tdm_layer_get_available_properties(tdm_layer *layer, const tdm_prop **props, int *count)
140 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
141 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
143 _pthread_mutex_lock(&private_display->lock);
145 *props = (const tdm_prop *)private_layer->caps.props;
146 *count = private_layer->caps.prop_count;
148 _pthread_mutex_unlock(&private_display->lock);
154 tdm_layer_get_zpos(tdm_layer *layer, int *zpos)
158 TDM_RETURN_VAL_IF_FAIL(zpos != NULL, TDM_ERROR_INVALID_PARAMETER);
160 _pthread_mutex_lock(&private_display->lock);
162 *zpos = private_layer->caps.zpos;
164 _pthread_mutex_unlock(&private_display->lock);
170 tdm_layer_set_property(tdm_layer *layer, unsigned int id, tdm_value value)
172 tdm_private_module *private_module;
173 tdm_func_layer *func_layer;
176 _pthread_mutex_lock(&private_display->lock);
178 private_module = private_layer->private_module;
179 func_layer = &private_module->func_layer;
181 if (private_layer->usable)
182 TDM_INFO("layer(%d) not usable", private_layer->index);
184 private_layer->usable = 0;
186 if (!func_layer->layer_set_property) {
187 /* LCOV_EXCL_START */
188 _pthread_mutex_unlock(&private_display->lock);
189 TDM_ERR("not implemented!!");
190 return TDM_ERROR_NOT_IMPLEMENTED;
194 ret = func_layer->layer_set_property(private_layer->layer_backend, id, value);
196 _pthread_mutex_unlock(&private_display->lock);
202 tdm_layer_get_property(tdm_layer *layer, unsigned int id, tdm_value *value)
204 tdm_private_module *private_module;
205 tdm_func_layer *func_layer;
208 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
210 _pthread_mutex_lock(&private_display->lock);
212 private_module = private_layer->private_module;
213 func_layer = &private_module->func_layer;
215 if (!func_layer->layer_get_property) {
216 /* LCOV_EXCL_START */
217 _pthread_mutex_unlock(&private_display->lock);
218 TDM_ERR("not implemented!!");
219 return TDM_ERROR_NOT_IMPLEMENTED;
223 ret = func_layer->layer_get_property(private_layer->layer_backend, id, value);
225 _pthread_mutex_unlock(&private_display->lock);
231 tdm_layer_set_info_internal(tdm_private_layer *private_layer, tdm_info_layer *info)
233 tdm_private_module *private_module;
234 tdm_func_layer *func_layer;
237 private_module = private_layer->private_module;
238 func_layer = &private_module->func_layer;
240 if (private_layer->usable)
241 TDM_INFO("layer(%p) not usable", private_layer);
243 private_layer->usable = 0;
245 if (!func_layer->layer_set_info) {
246 /* LCOV_EXCL_START */
247 TDM_ERR("not implemented!!");
248 return TDM_ERROR_NOT_IMPLEMENTED;
252 if (info->src_config.format)
253 snprintf(fmtstr, 128, "%c%c%c%c", FOURCC_STR(info->src_config.format));
255 snprintf(fmtstr, 128, "NONE");
257 TDM_INFO("layer(%p) info: src(%ux%u %u,%u %ux%u %s) dst(%u,%u %ux%u) trans(%d)",
258 private_layer, info->src_config.size.h, info->src_config.size.v,
259 info->src_config.pos.x, info->src_config.pos.y,
260 info->src_config.pos.w, info->src_config.pos.h,
262 info->dst_pos.x, info->dst_pos.y,
263 info->dst_pos.w, info->dst_pos.h,
266 private_layer->pending_info_changed = 1;
267 private_layer->pending_info = *info;
269 return TDM_ERROR_NONE;
273 tdm_layer_set_info(tdm_layer *layer, tdm_info_layer *info)
277 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
279 _pthread_mutex_lock(&private_display->lock);
281 ret = tdm_layer_set_info_internal(private_layer, info);
283 _pthread_mutex_unlock(&private_display->lock);
289 tdm_layer_get_info(tdm_layer *layer, tdm_info_layer *info)
291 tdm_private_module *private_module;
292 tdm_func_layer *func_layer;
295 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
297 _pthread_mutex_lock(&private_display->lock);
299 private_module = private_layer->private_module;
300 func_layer = &private_module->func_layer;
302 if (!func_layer->layer_get_info) {
303 /* LCOV_EXCL_START */
304 _pthread_mutex_unlock(&private_display->lock);
305 TDM_ERR("not implemented!!");
306 return TDM_ERROR_NOT_IMPLEMENTED;
310 ret = func_layer->layer_get_info(private_layer->layer_backend, info);
312 _pthread_mutex_unlock(&private_display->lock);
317 /* LCOV_EXCL_START */
319 _tdm_layer_dump_buffer(tdm_layer *layer, tbm_surface_h buffer)
321 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
322 tdm_private_output *private_output = private_layer->private_output;
324 char fname[TDM_PATH_LEN], bufs[TDM_PATH_LEN];
325 int zpos, len = TDM_PATH_LEN;
326 tdm_private_layer *l = NULL;
330 pipe = private_output->pipe;
331 zpos = private_layer->caps.zpos;
334 LIST_FOR_EACH_ENTRY(l, &private_output->layer_list, link) {
335 if (!l->showing_buffer)
337 TDM_SNPRINTF(p, remain, "_%p", l->showing_buffer->buffer);
340 snprintf(fname, sizeof(fname), "tdm_%d_lyr_%d%s", pipe, zpos, bufs);
342 tbm_surface_internal_dump_buffer(buffer, fname);
343 TDM_DBG("%s dump excute", fname);
350 tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer)
352 tdm_private_display *private_display;
357 private_display = private_layer->private_output->private_display;
359 LIST_DEL(&layer_buffer->link);
360 if (layer_buffer->buffer) {
361 _pthread_mutex_unlock(&private_display->lock);
362 tdm_buffer_unref_backend(layer_buffer->buffer);
363 if (private_layer->buffer_queue)
364 tbm_surface_queue_release(private_layer->buffer_queue, layer_buffer->buffer);
365 _pthread_mutex_lock(&private_display->lock);
371 _tdm_layer_free_all_buffers(tdm_private_layer *private_layer)
373 tdm_private_output *private_output = private_layer->private_output;
374 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
375 struct list_head clone_list;
377 LIST_INITHEAD(&clone_list);
379 _tdm_layer_reset_pending_data(private_layer);
381 if (private_layer->waiting_buffer) {
382 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
383 private_layer->waiting_buffer = NULL;
385 if (tdm_debug_module & TDM_DEBUG_BUFFER)
386 TDM_INFO("layer(%p) waiting_buffer(%p)",
387 private_layer, private_layer->waiting_buffer);
390 if (private_layer->committed_buffer) {
391 tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
392 private_layer->committed_buffer = NULL;
394 if (tdm_debug_module & TDM_DEBUG_BUFFER)
395 TDM_INFO("layer(%p) committed_buffer(%p)",
396 private_layer, private_layer->committed_buffer);
399 if (private_layer->showing_buffer) {
400 tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
401 private_layer->showing_buffer = NULL;
403 if (tdm_debug_module & TDM_DEBUG_BUFFER)
404 TDM_INFO("layer(%p) showing_buffer(%p)",
405 private_layer, private_layer->showing_buffer);
408 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
409 if (lm->private_layer != private_layer)
412 LIST_ADDTAIL(&lm->link, &clone_list);
415 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
417 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
418 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
422 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
423 if (lm->private_layer != private_layer)
426 LIST_ADDTAIL(&lm->link, &clone_list);
429 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
431 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
435 if (private_layer->buffer_queue) {
436 tbm_surface_queue_remove_acquirable_cb(private_layer->buffer_queue, _tbm_layer_queue_acquirable_cb, private_layer);
437 tbm_surface_queue_remove_destroy_cb(private_layer->buffer_queue, _tbm_layer_queue_destroy_cb, private_layer);
438 private_layer->buffer_queue = NULL;
443 tdm_layer_set_buffer_internal(tdm_private_layer *private_layer, tbm_surface_h buffer)
445 tdm_private_module *private_module;
446 tdm_private_output *private_output = private_layer->private_output;
447 tdm_func_layer *func_layer;
449 /* LCOV_EXCL_START */
451 if (tdm_dump_enable && !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO))
452 _tdm_layer_dump_buffer(private_layer, buffer);
454 if (tdm_debug_dump & TDM_DUMP_FLAG_LAYER &&
455 !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
456 char str[TDM_PATH_LEN];
458 snprintf(str, TDM_PATH_LEN, "layer_%d_%d_%03d",
459 private_output->index, private_layer->index, i++);
460 tdm_helper_dump_buffer_str(buffer, tdm_debug_dump_dir, str);
464 private_module = private_layer->private_module;
465 func_layer = &private_module->func_layer;
467 if (private_layer->usable)
468 TDM_INFO("layer(%p) not usable", private_layer);
470 private_layer->usable = 0;
472 if (!func_layer->layer_set_buffer) {
473 /* LCOV_EXCL_START */
474 TDM_ERR("not implemented!!");
475 return TDM_ERROR_NOT_IMPLEMENTED;
476 /* LCOV_EXCL_START */
479 private_layer->pending_buffer_changed = 1;
481 if (private_layer->pending_buffer) {
483 if (tdm_debug_module & TDM_DEBUG_BUFFER)
484 TDM_INFO("layer(%p) pending_buffer(%p) skipped",
485 private_layer, private_layer->pending_buffer);
487 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
488 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
489 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
492 tbm_surface_internal_unref(private_layer->pending_buffer);
495 tbm_surface_internal_ref(buffer);
496 private_layer->pending_buffer = buffer;
498 if (tdm_debug_module & TDM_DEBUG_BUFFER)
499 TDM_INFO("layer(%p) pending_buffer(%p)",
500 private_layer, private_layer->pending_buffer);
502 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
503 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
504 TDM_TRACE_ASYNC_BEGIN((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
507 return TDM_ERROR_NONE;
511 tdm_layer_set_buffer(tdm_layer *layer, tbm_surface_h buffer)
515 TDM_RETURN_VAL_IF_FAIL(buffer != NULL, TDM_ERROR_INVALID_PARAMETER);
517 _pthread_mutex_lock(&private_display->lock);
519 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
521 _pthread_mutex_unlock(&private_display->lock);
527 tdm_layer_unset_buffer_internal(tdm_private_layer *private_layer)
529 tdm_private_module *private_module;
530 tdm_func_layer *func_layer;
531 tdm_error ret = TDM_ERROR_NONE;
533 private_module = private_layer->private_module;
534 func_layer = &private_module->func_layer;
536 _tdm_layer_free_all_buffers(private_layer);
538 private_layer->usable = 1;
540 if (private_layer->usable)
541 TDM_INFO("layer(%p) now usable", private_layer);
543 if (!func_layer->layer_unset_buffer) {
544 /* LCOV_EXCL_START */
545 TDM_ERR("not implemented!!");
546 return TDM_ERROR_NOT_IMPLEMENTED;
547 /* LCOV_EXCL_START */
550 ret = func_layer->layer_unset_buffer(private_layer->layer_backend);
551 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
557 tdm_layer_unset_buffer(tdm_layer *layer)
561 _pthread_mutex_lock(&private_display->lock);
563 ret = tdm_layer_unset_buffer_internal(private_layer);
564 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
566 _pthread_mutex_unlock(&private_display->lock);
572 tdm_layer_committed(tdm_private_layer *private_layer, tdm_private_layer_buffer **committed_buffer)
574 tdm_private_output *private_output = private_layer->private_output;
575 tdm_private_display *private_display = private_output->private_display;
577 /* LCOV_EXCL_START */
578 if (private_display->print_fps) {
579 double curr = tdm_helper_get_time();
580 if (private_layer->fps_stamp == 0) {
581 private_layer->fps_stamp = curr;
582 } else if ((curr - private_layer->fps_stamp) > 1.0) {
583 TDM_INFO("output(%d) layer(%d) fps: %d", private_output->index, private_layer->index, private_layer->fps_count);
584 private_layer->fps_count = 0;
585 private_layer->fps_stamp = curr;
587 private_layer->fps_count++;
588 } else if (private_layer->fps_stamp != 0) {
589 private_layer->fps_stamp = 0;
590 private_layer->fps_count = 0;
594 if (private_layer->showing_buffer) {
595 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
596 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->showing_buffer->buffer, 0);
597 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
600 tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
603 private_layer->showing_buffer = *committed_buffer;
604 *committed_buffer = NULL;
606 if (tdm_debug_module & TDM_DEBUG_BUFFER)
607 TDM_INFO("layer(%p) committed_buffer(%p) showing_buffer(%p)",
608 private_layer, *committed_buffer,
609 (private_layer->showing_buffer) ? private_layer->showing_buffer->buffer : NULL);
613 _tdm_layer_got_output_vblank(tdm_private_output *private_output, unsigned int sequence,
614 unsigned int tv_sec, unsigned int tv_usec)
616 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
617 tdm_private_display *private_display;
618 struct list_head clone_list, pending_clone_list;
619 tdm_error ret = TDM_ERROR_NONE;
621 private_display = private_output->private_display;
623 private_output->layer_waiting_vblank = 0;
625 LIST_INITHEAD(&clone_list);
626 LIST_INITHEAD(&pending_clone_list);
628 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
630 lm->private_layer->committing = 0;
631 LIST_ADDTAIL(&lm->link, &clone_list);
634 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
636 lm->private_layer->committing = 0;
637 LIST_ADDTAIL(&lm->link, &pending_clone_list);
640 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
641 if (tdm_debug_module & TDM_DEBUG_COMMIT)
642 TDM_INFO("layer(%p) committed. handle(%p) commited_buffer(%p)",
643 lm->private_layer, lm, (lm->committed_buffer) ? lm->committed_buffer->buffer : NULL);
646 tdm_layer_committed(lm->private_layer, &lm->committed_buffer);
647 _pthread_mutex_unlock(&private_display->lock);
649 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
650 _pthread_mutex_lock(&private_display->lock);
651 if (lm->committed_buffer)
652 tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
656 if (LIST_IS_EMPTY(&pending_clone_list))
659 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, wait_failed);
661 ret = tdm_output_commit_internal(private_output, 0, NULL, NULL);
662 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
664 if (tdm_debug_module & TDM_DEBUG_COMMIT)
665 TDM_INFO("layer commit: output(%d) commit", private_output->pipe);
667 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
668 /* tdm_vblank APIs is for server. it should be called in unlock status*/
669 if (!private_output->layer_waiting_vblank) {
670 _pthread_mutex_unlock(&private_display->lock);
671 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
672 _pthread_mutex_lock(&private_display->lock);
673 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
674 private_output->layer_waiting_vblank = 1;
678 if (tdm_debug_module & TDM_DEBUG_COMMIT)
679 TDM_INFO("layer commit: output(%d) wait vblank", private_output->pipe);
681 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
683 LIST_ADDTAIL(&lm->link, &private_output->layer_commit_handler_list);
686 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
687 TDM_WRN("dpms %s. Directly call vblank callback.", tdm_dpms_str(private_output->current_dpms_value));
688 _pthread_mutex_unlock(&private_display->lock);
689 _tdm_layer_cb_wait_vblank(private_output->vblank, 0, 0, 0, 0, private_output);
690 _pthread_mutex_lock(&private_display->lock);
695 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
696 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
697 * the layer commit handler MUST be called.
699 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
700 TDM_WRN("Directly call layer commit handlers: ret(%d)\n", ret);
702 _pthread_mutex_unlock(&private_display->lock);
704 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
705 _pthread_mutex_lock(&private_display->lock);
706 tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
714 _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
715 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
717 tdm_private_layer_commit_handler *layer_commit_handler = user_data;
718 tdm_private_layer_commit_handler *lm = NULL;
719 tdm_private_display *private_display;
720 tdm_private_output *private_output = output;
721 tdm_private_layer *private_layer;
724 TDM_RETURN_IF_FAIL(layer_commit_handler != NULL);
726 private_display = private_output->private_display;
728 LIST_FOR_EACH_ENTRY(lm, &private_output->layer_commit_handler_list, link) {
729 if (layer_commit_handler == lm) {
738 LIST_DEL(&layer_commit_handler->link);
740 private_layer = layer_commit_handler->private_layer;
742 private_layer->committing = 0;
744 if (tdm_debug_module & TDM_DEBUG_COMMIT)
745 TDM_INFO("layer(%p) commit: output(%d) committed. handle(%p)",
746 private_layer, private_output->pipe, layer_commit_handler);
748 _pthread_mutex_lock(&private_display->lock);
750 tdm_layer_committed(private_layer, &layer_commit_handler->committed_buffer);
752 if (layer_commit_handler->func) {
753 _pthread_mutex_unlock(&private_display->lock);
754 layer_commit_handler->func(private_layer, sequence,
755 tv_sec, tv_usec, layer_commit_handler->user_data);
756 _pthread_mutex_lock(&private_display->lock);
759 free(layer_commit_handler);
761 _pthread_mutex_unlock(&private_display->lock);
765 _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
766 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
768 tdm_private_output *private_output = user_data;
769 tdm_private_display *private_display;
771 TDM_RETURN_IF_FAIL(private_output != NULL);
773 private_display = private_output->private_display;
775 _pthread_mutex_lock(&private_display->lock);
777 if (tdm_debug_module & TDM_DEBUG_COMMIT)
778 TDM_INFO("layer commit: output(%d) got vblank", private_output->pipe);
780 _tdm_layer_got_output_vblank(private_output, sequence, tv_sec, tv_usec);
782 _pthread_mutex_unlock(&private_display->lock);
786 _tdm_lauer_get_output_used_layer_count(tdm_private_output *private_output)
788 tdm_private_layer *private_layer = NULL;
789 unsigned int count = 0;
791 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
792 if (!private_layer->usable)
799 /* commit_per_vblank == 1: we can commit if
800 * - there is no previous commit request
801 * - only 1 layer is used
802 * commit_per_vblank == 2: we can commit if
803 * - there is no previous commit request
806 _tdm_layer_commit_possible(tdm_private_layer *private_layer)
808 tdm_private_output *private_output = private_layer->private_output;
810 TDM_RETURN_VAL_IF_FAIL(private_output->commit_per_vblank > 0, 1);
812 /* There is a previous commit request which is not done and displayed on screen yet.
813 * We can't commit at this time.
815 if (!LIST_IS_EMPTY(&private_output->layer_commit_handler_list)) {
816 if (tdm_debug_module & TDM_DEBUG_COMMIT)
817 TDM_INFO("layer(%p) commit: not possible(previous commit)", private_layer);
821 if (private_output->commit_per_vblank == 1 && _tdm_lauer_get_output_used_layer_count(private_output) > 1) {
822 if (tdm_debug_module & TDM_DEBUG_COMMIT)
823 TDM_INFO("layer(%p) commit: not possible(more than 2 layers)", private_layer);
827 if (tdm_debug_module & TDM_DEBUG_COMMIT)
828 TDM_INFO("layer(%p) commit: possible", private_layer);
834 _tdm_layer_reset_pending_data(tdm_private_layer *private_layer)
836 private_layer->pending_info_changed = 0;
837 memset(&private_layer->pending_info, 0, sizeof private_layer->pending_info);
839 private_layer->pending_buffer_changed = 0;
840 if (private_layer->pending_buffer) {
841 tbm_surface_internal_unref(private_layer->pending_buffer);
842 private_layer->pending_buffer = NULL;
847 tdm_layer_commit_pending_data(tdm_private_layer *private_layer)
849 tdm_private_module *private_module;
850 tdm_func_layer *func_layer;
851 tdm_error ret = TDM_ERROR_NONE;
853 private_module = private_layer->private_module;
854 func_layer = &private_module->func_layer;
856 if (private_layer->pending_info_changed) {
857 ret = func_layer->layer_set_info(private_layer->layer_backend, &private_layer->pending_info);
858 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, done);
861 if (private_layer->pending_buffer_changed) {
862 tdm_private_layer_buffer *layer_buffer;
864 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
865 TDM_GOTO_IF_FAIL(layer_buffer != NULL, done);
867 LIST_INITHEAD(&layer_buffer->link);
869 ret = func_layer->layer_set_buffer(private_layer->layer_backend, private_layer->pending_buffer);
870 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
872 if (tdm_debug_module & TDM_DEBUG_BUFFER)
873 TDM_INFO("layer(%p) pending_buffer(%p) committed",
874 private_layer, private_layer->pending_buffer);
876 if (ret == TDM_ERROR_NONE) {
877 if (private_layer->waiting_buffer)
878 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
880 private_layer->waiting_buffer = layer_buffer;
881 private_layer->waiting_buffer->buffer = tdm_buffer_ref_backend(private_layer->pending_buffer);
882 if (tdm_debug_module & TDM_DEBUG_BUFFER)
883 TDM_INFO("layer(%p) waiting_buffer(%p)",
884 private_layer, private_layer->waiting_buffer->buffer);
886 tdm_layer_free_buffer(private_layer, layer_buffer);
890 _tdm_layer_reset_pending_data(private_layer);
895 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
896 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
897 * the layer commit handler MUST be called.
900 _tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
902 tdm_private_layer_commit_handler *layer_commit_handler;
905 layer_commit_handler = calloc(1, sizeof(tdm_private_layer_commit_handler));
906 if (!layer_commit_handler) {
907 /* LCOV_EXCL_START */
908 TDM_ERR("failed: alloc memory");
909 return TDM_ERROR_OUT_OF_MEMORY;
913 if (tdm_debug_module & TDM_DEBUG_COMMIT)
914 TDM_INFO("layer(%p) commit: handle(%p)", private_layer, layer_commit_handler);
916 LIST_INITHEAD(&layer_commit_handler->link);
917 layer_commit_handler->private_layer = private_layer;
918 layer_commit_handler->func = func;
919 layer_commit_handler->user_data = user_data;
921 layer_commit_handler->committed_buffer = private_layer->waiting_buffer;
922 private_layer->waiting_buffer = NULL;
924 if (private_layer->committing)
925 TDM_WRN("layer(%d) too many commit", private_layer->index);
927 private_layer->committing = 1;
929 if (tdm_debug_module & TDM_DEBUG_BUFFER)
930 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
931 private_layer, private_layer->waiting_buffer,
932 (layer_commit_handler->committed_buffer) ? layer_commit_handler->committed_buffer->buffer : NULL);
934 if (!private_output->commit_per_vblank) {
935 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT, commit_failed);
937 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
938 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
939 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
941 if (tdm_debug_module & TDM_DEBUG_COMMIT)
942 TDM_INFO("layer(%p) commit: no commit-per-vblank", private_layer);
944 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_LAYER, commit_failed);
946 if (_tdm_layer_commit_possible(private_layer)) {
947 /* add to layer_commit_handler_list */
948 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
949 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
950 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
952 if (tdm_debug_module & TDM_DEBUG_COMMIT)
953 TDM_INFO("layer(%p) commit: output", private_layer);
955 /* add to pending_commit_handler_list. It will be commited when a vblank occurs */
956 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->pending_commit_handler_list);
958 if (tdm_debug_module & TDM_DEBUG_COMMIT)
959 TDM_INFO("layer(%p) commit: pending", private_layer);
962 if (!private_output->vblank) {
963 /* tdm_vblank APIs is for server. it should be called in unlock status*/
964 _pthread_mutex_unlock(&private_display->lock);
965 private_output->vblank = tdm_vblank_create(private_display, private_output, NULL);
966 _pthread_mutex_lock(&private_display->lock);
967 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, commit_failed);
970 if (!private_output->layer_waiting_vblank) {
971 /* tdm_vblank APIs is for server. it should be called in unlock status*/
972 _pthread_mutex_unlock(&private_display->lock);
973 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
974 _pthread_mutex_lock(&private_display->lock);
975 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
976 private_output->layer_waiting_vblank = 1;
978 if (tdm_debug_module & TDM_DEBUG_COMMIT)
979 TDM_INFO("layer(%p) commit: wait vblank", private_layer);
986 if (layer_commit_handler) {
987 private_layer->waiting_buffer = layer_commit_handler->committed_buffer;
988 LIST_DEL(&layer_commit_handler->link);
989 free(layer_commit_handler);
995 tdm_layer_commit_internal(tdm_private_layer *private_layer, tdm_layer_commit_handler func, void *user_data)
997 tdm_private_output *private_output = private_layer->private_output;
998 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */
1000 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE) {
1001 if (!private_output->commit_per_vblank)
1002 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1004 private_output->commit_type = TDM_COMMIT_TYPE_LAYER;
1007 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1008 TDM_ERR("layer(%p)'s output(%d) dpms: %s", private_layer, private_output->pipe,
1009 tdm_dpms_str(private_output->current_dpms_value));
1010 return TDM_ERROR_DPMS_OFF;
1013 /* don't call this inside of _tdm_layer_commit */
1014 ret = tdm_layer_commit_pending_data(private_layer);
1015 if (ret != TDM_ERROR_NONE) {
1016 TDM_ERR("layer(%p) committing pending data failed", private_layer);
1020 ret = _tdm_layer_commit(private_layer, func, user_data);
1026 tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1030 _pthread_mutex_lock(&private_display->lock);
1032 ret = tdm_layer_commit_internal(private_layer, func, user_data);
1034 _pthread_mutex_unlock(&private_display->lock);
1040 tdm_layer_is_committing(tdm_layer *layer, unsigned int *committing)
1044 TDM_RETURN_VAL_IF_FAIL(committing != NULL, TDM_ERROR_INVALID_PARAMETER);
1046 _pthread_mutex_lock(&private_display->lock);
1048 *committing = private_layer->committing;
1050 _pthread_mutex_unlock(&private_display->lock);
1056 tdm_layer_remove_commit_handler_internal(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1058 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
1059 tdm_private_output *private_output = private_layer->private_output;
1060 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
1062 if (!func && !user_data)
1065 TDM_RETURN_IF_FAIL(private_layer != NULL);
1066 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1068 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
1069 if (lm->func == func && lm->user_data == user_data) {
1070 LIST_DEL(&lm->link);
1071 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1072 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1078 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
1079 if (lm->func == func && lm->user_data == user_data) {
1080 LIST_DEL(&lm->link);
1081 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1082 tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1090 tdm_layer_remove_commit_handler(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1094 _pthread_mutex_lock(&private_display->lock);
1096 tdm_layer_remove_commit_handler_internal(layer, func, user_data);
1098 _pthread_mutex_unlock(&private_display->lock);
1103 EXTERN tbm_surface_h
1104 tdm_layer_get_displaying_buffer(tdm_layer *layer, tdm_error *error)
1106 tbm_surface_h buffer;
1107 LAYER_FUNC_ENTRY_ERROR();
1109 _pthread_mutex_lock(&private_display->lock);
1112 *error = TDM_ERROR_NONE;
1114 if (private_layer->showing_buffer) {
1115 buffer = private_layer->showing_buffer->buffer;
1117 _pthread_mutex_unlock(&private_display->lock);
1118 TDM_DBG("layer(%p) showing_buffer is null", private_layer);
1121 _pthread_mutex_unlock(&private_display->lock);
1127 _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data)
1129 TDM_RETURN_IF_FAIL(data != NULL);
1130 tdm_layer *layer = data;
1131 tdm_private_module *private_module;
1132 tdm_func_layer *func_layer;
1133 tbm_surface_h buffer = NULL;
1134 LAYER_FUNC_ENTRY_VOID_RETURN();
1136 _pthread_mutex_lock(&private_display->lock);
1138 private_module = private_layer->private_module;
1139 func_layer = &private_module->func_layer;
1141 if (!func_layer->layer_set_buffer) {
1142 /* LCOV_EXCL_START */
1143 _pthread_mutex_unlock(&private_display->lock);
1145 /* LCOV_EXCL_STOP */
1148 if (TBM_SURFACE_QUEUE_ERROR_NONE != tbm_surface_queue_acquire(private_layer->buffer_queue, &buffer) ||
1150 /* LCOV_EXCL_START */
1151 TDM_ERR("layer(%p) tbm_surface_queue_acquire() failed surface:%p",
1152 private_layer, buffer);
1153 _pthread_mutex_unlock(&private_display->lock);
1155 /* LCOV_EXCL_STOP */
1158 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
1159 if (ret != TDM_ERROR_NONE) {
1160 TDM_ERR("tdm_layer_set_buffer_internal failed");
1161 _pthread_mutex_unlock(&private_display->lock);
1165 ret = tdm_layer_commit_internal(private_layer, NULL, NULL);
1166 if (ret != TDM_ERROR_NONE) {
1167 TDM_ERR("tdm_layer_commit_internal failed");
1168 _pthread_mutex_unlock(&private_display->lock);
1172 _pthread_mutex_unlock(&private_display->lock);
1176 _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data)
1178 TDM_RETURN_IF_FAIL(data != NULL);
1179 tdm_layer *layer = data;
1180 LAYER_FUNC_ENTRY_VOID_RETURN();
1181 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
1183 _pthread_mutex_lock(&private_display->lock);
1185 private_layer->buffer_queue = NULL;
1187 _tdm_layer_free_all_buffers(private_layer);
1189 _pthread_mutex_unlock(&private_display->lock);
1193 tdm_layer_set_buffer_queue(tdm_layer *layer, tbm_surface_queue_h buffer_queue)
1195 tdm_private_module *private_module;
1196 tdm_func_layer *func_layer;
1199 TDM_RETURN_VAL_IF_FAIL(buffer_queue != NULL, TDM_ERROR_INVALID_PARAMETER);
1201 _pthread_mutex_lock(&private_display->lock);
1203 private_module = private_layer->private_module;
1204 func_layer = &private_module->func_layer;
1206 if (private_layer->usable)
1207 TDM_INFO("layer(%p) not usable", private_layer);
1209 private_layer->usable = 0;
1211 if (!func_layer->layer_set_buffer) {
1212 /* LCOV_EXCL_START */
1213 _pthread_mutex_unlock(&private_display->lock);
1214 TDM_ERR("not implemented!!");
1215 return TDM_ERROR_NOT_IMPLEMENTED;
1216 /* LCOV_EXCL_STOP */
1219 if (buffer_queue == private_layer->buffer_queue) {
1220 _pthread_mutex_unlock(&private_display->lock);
1221 return TDM_ERROR_NONE;
1224 if (private_layer->waiting_buffer) {
1225 tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
1226 private_layer->waiting_buffer = NULL;
1228 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1229 TDM_INFO("layer(%p) waiting_buffer(%p)",
1230 private_layer, private_layer->waiting_buffer);
1233 private_layer->buffer_queue = buffer_queue;
1234 tbm_surface_queue_add_acquirable_cb(private_layer->buffer_queue,
1235 _tbm_layer_queue_acquirable_cb,
1237 tbm_surface_queue_add_destroy_cb(private_layer->buffer_queue,
1238 _tbm_layer_queue_destroy_cb,
1240 _pthread_mutex_unlock(&private_display->lock);
1246 tdm_layer_unset_buffer_queue(tdm_layer *layer)
1248 return tdm_layer_unset_buffer(layer);
1252 tdm_layer_is_usable(tdm_layer *layer, unsigned int *usable)
1256 TDM_RETURN_VAL_IF_FAIL(usable != NULL, TDM_ERROR_INVALID_PARAMETER);
1258 _pthread_mutex_lock(&private_display->lock);
1260 *usable = private_layer->usable;
1262 _pthread_mutex_unlock(&private_display->lock);
1268 tdm_layer_set_video_pos(tdm_layer *layer, int zpos)
1270 tdm_private_module *private_module;
1271 tdm_func_layer *func_layer;
1274 _pthread_mutex_lock(&private_display->lock);
1276 private_module = private_layer->private_module;
1277 func_layer = &private_module->func_layer;
1279 if (!(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
1280 TDM_ERR("layer(%p) is not video layer", private_layer);
1281 _pthread_mutex_unlock(&private_display->lock);
1282 return TDM_ERROR_BAD_REQUEST;
1285 if (!func_layer->layer_set_video_pos) {
1286 /* LCOV_EXCL_START */
1287 _pthread_mutex_unlock(&private_display->lock);
1288 TDM_ERR("not implemented!!");
1289 return TDM_ERROR_NOT_IMPLEMENTED;
1290 /* LCOV_EXCL_STOP */
1293 ret = func_layer->layer_set_video_pos(private_layer->layer_backend, zpos);
1295 _pthread_mutex_unlock(&private_display->lock);
1300 EXTERN tdm_capture *
1301 tdm_layer_create_capture(tdm_layer *layer, tdm_error *error)
1303 tdm_capture *capture = NULL;
1305 LAYER_FUNC_ENTRY_ERROR();
1307 _pthread_mutex_lock(&private_display->lock);
1309 capture = (tdm_capture *)tdm_capture_create_layer_internal(private_layer, error);
1311 _pthread_mutex_unlock(&private_display->lock);
1317 tdm_layer_get_buffer_flags(tdm_layer *layer, unsigned int *flags)
1319 tdm_private_module *private_module;
1320 tdm_func_layer *func_layer;
1322 TDM_RETURN_VAL_IF_FAIL(flags != NULL, TDM_ERROR_INVALID_PARAMETER);
1324 _pthread_mutex_lock(&private_display->lock);
1326 private_module = private_layer->private_module;
1327 func_layer = &private_module->func_layer;
1329 if (!func_layer->layer_get_buffer_flags) {
1330 /* LCOV_EXCL_START */
1332 _pthread_mutex_unlock(&private_display->lock);
1333 TDM_INFO("not implemented!!");
1334 return TDM_ERROR_NONE;
1335 /* LCOV_EXCL_STOP */
1338 ret = func_layer->layer_get_buffer_flags(private_layer->layer_backend, flags);
1340 _pthread_mutex_unlock(&private_display->lock);