1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <sc1.lim@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define LAYER_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_private_layer *private_layer; \
48 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
49 TDM_RETURN_VAL_IF_FAIL(layer != NULL, TDM_ERROR_INVALID_PARAMETER); \
50 private_layer = (tdm_private_layer*)layer; \
51 private_output = private_layer->private_output; \
52 private_display = private_output->private_display
54 #define LAYER_FUNC_ENTRY_ERROR() \
55 tdm_private_display *private_display; \
56 tdm_private_output *private_output; \
57 tdm_private_layer *private_layer; \
58 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
59 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(layer != NULL, TDM_ERROR_INVALID_PARAMETER, NULL); \
60 private_layer = (tdm_private_layer*)layer; \
61 private_output = private_layer->private_output; \
62 private_display = private_output->private_display
64 #define LAYER_FUNC_ENTRY_VOID_RETURN() \
65 tdm_private_display *private_display; \
66 tdm_private_output *private_output; \
67 tdm_private_layer *private_layer; \
68 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
69 TDM_RETURN_IF_FAIL(layer != NULL); \
70 private_layer = (tdm_private_layer*)layer; \
71 private_output = private_layer->private_output; \
72 private_display = private_output->private_display
74 static void _tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer);
75 static void _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
76 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
77 static void _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data);
78 static void _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data);
79 static void _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
80 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
81 static void _tdm_layer_reset_pending_data(tdm_private_layer *private_layer);
84 tdm_layer_get_output(tdm_layer *layer, tdm_error *error)
88 LAYER_FUNC_ENTRY_ERROR();
90 _pthread_mutex_lock(&private_display->lock);
93 *error = TDM_ERROR_NONE;
95 output = private_layer->private_output;
97 _pthread_mutex_unlock(&private_display->lock);
103 tdm_layer_get_capabilities(tdm_layer *layer, tdm_layer_capability *capabilities)
107 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
109 _pthread_mutex_lock(&private_display->lock);
111 *capabilities = private_layer->caps.capabilities;
113 _pthread_mutex_unlock(&private_display->lock);
119 tdm_layer_get_available_formats(tdm_layer *layer, const tbm_format **formats, int *count)
123 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
124 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
126 _pthread_mutex_lock(&private_display->lock);
128 *formats = (const tbm_format *)private_layer->caps.formats;
129 *count = private_layer->caps.format_count;
131 _pthread_mutex_unlock(&private_display->lock);
137 tdm_layer_get_available_properties(tdm_layer *layer, const tdm_prop **props, int *count)
141 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
142 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
144 _pthread_mutex_lock(&private_display->lock);
146 *props = (const tdm_prop *)private_layer->caps.props;
147 *count = private_layer->caps.prop_count;
149 _pthread_mutex_unlock(&private_display->lock);
155 tdm_layer_get_zpos(tdm_layer *layer, int *zpos)
159 TDM_RETURN_VAL_IF_FAIL(zpos != NULL, TDM_ERROR_INVALID_PARAMETER);
161 _pthread_mutex_lock(&private_display->lock);
163 *zpos = private_layer->caps.zpos;
165 _pthread_mutex_unlock(&private_display->lock);
171 tdm_layer_set_property(tdm_layer *layer, unsigned int id, tdm_value value)
173 tdm_private_backend *private_backend;
174 tdm_func_layer *func_layer;
177 _pthread_mutex_lock(&private_display->lock);
179 private_backend = private_layer->private_backend;
180 func_layer = &private_backend->func_layer;
182 if (private_layer->usable)
183 TDM_INFO("layer(%d) not usable", private_layer->index);
185 private_layer->usable = 0;
187 if (!func_layer->layer_set_property) {
188 /* LCOV_EXCL_START */
189 _pthread_mutex_unlock(&private_display->lock);
190 TDM_ERR("not implemented!!");
191 return TDM_ERROR_NOT_IMPLEMENTED;
195 ret = func_layer->layer_set_property(private_layer->layer_backend, id, value);
197 _pthread_mutex_unlock(&private_display->lock);
203 tdm_layer_get_property(tdm_layer *layer, unsigned int id, tdm_value *value)
205 tdm_private_backend *private_backend;
206 tdm_func_layer *func_layer;
209 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
211 _pthread_mutex_lock(&private_display->lock);
213 private_backend = private_layer->private_backend;
214 func_layer = &private_backend->func_layer;
216 if (!func_layer->layer_get_property) {
217 /* LCOV_EXCL_START */
218 _pthread_mutex_unlock(&private_display->lock);
219 TDM_ERR("not implemented!!");
220 return TDM_ERROR_NOT_IMPLEMENTED;
224 ret = func_layer->layer_get_property(private_layer->layer_backend, id, value);
226 _pthread_mutex_unlock(&private_display->lock);
232 tdm_layer_set_info_internal(tdm_private_layer *private_layer, tdm_info_layer *info)
234 tdm_private_backend *private_backend;
235 tdm_func_layer *func_layer;
238 private_backend = private_layer->private_backend;
239 func_layer = &private_backend->func_layer;
241 if (private_layer->usable)
242 TDM_INFO("layer(%p) not usable", private_layer);
244 private_layer->usable = 0;
246 if (!func_layer->layer_set_info) {
247 /* LCOV_EXCL_START */
248 TDM_ERR("not implemented!!");
249 return TDM_ERROR_NOT_IMPLEMENTED;
253 if (info->src_config.format)
254 snprintf(fmtstr, 128, "%c%c%c%c", FOURCC_STR(info->src_config.format));
256 snprintf(fmtstr, 128, "NONE");
258 TDM_INFO("layer(%p) info: src(%ux%u %u,%u %ux%u %s) dst(%u,%u %ux%u) trans(%d)",
259 private_layer, info->src_config.size.h, info->src_config.size.v,
260 info->src_config.pos.x, info->src_config.pos.y,
261 info->src_config.pos.w, info->src_config.pos.h,
263 info->dst_pos.x, info->dst_pos.y,
264 info->dst_pos.w, info->dst_pos.h,
267 private_layer->pending_info_changed = 1;
268 private_layer->pending_info = *info;
270 return TDM_ERROR_NONE;
274 tdm_layer_set_info(tdm_layer *layer, tdm_info_layer *info)
278 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
280 _pthread_mutex_lock(&private_display->lock);
282 ret = tdm_layer_set_info_internal(private_layer, info);
284 _pthread_mutex_unlock(&private_display->lock);
290 tdm_layer_get_info(tdm_layer *layer, tdm_info_layer *info)
292 tdm_private_backend *private_backend;
293 tdm_func_layer *func_layer;
296 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
298 _pthread_mutex_lock(&private_display->lock);
300 private_backend = private_layer->private_backend;
301 func_layer = &private_backend->func_layer;
303 if (!func_layer->layer_get_info) {
304 /* LCOV_EXCL_START */
305 _pthread_mutex_unlock(&private_display->lock);
306 TDM_ERR("not implemented!!");
307 return TDM_ERROR_NOT_IMPLEMENTED;
311 ret = func_layer->layer_get_info(private_layer->layer_backend, info);
313 _pthread_mutex_unlock(&private_display->lock);
318 /* LCOV_EXCL_START */
320 _tdm_layer_dump_buffer(tdm_layer *layer, tbm_surface_h buffer)
322 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
323 tdm_private_output *private_output = private_layer->private_output;
325 char fname[TDM_PATH_LEN], bufs[TDM_PATH_LEN];
326 int zpos, len = TDM_PATH_LEN;
327 tdm_private_layer *l = NULL;
331 pipe = private_output->pipe;
332 zpos = private_layer->caps.zpos;
335 LIST_FOR_EACH_ENTRY(l, &private_output->layer_list, link) {
336 if (!l->showing_buffer)
338 TDM_SNPRINTF(p, remain, "_%p", l->showing_buffer->buffer);
341 snprintf(fname, sizeof(fname), "tdm_%d_lyr_%d%s", pipe, zpos, bufs);
343 tbm_surface_internal_dump_buffer(buffer, fname);
344 TDM_DBG("%s dump excute", fname);
351 _tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer)
353 tdm_private_display *private_display;
358 private_display = private_layer->private_output->private_display;
360 LIST_DEL(&layer_buffer->link);
361 if (layer_buffer->buffer) {
362 _pthread_mutex_unlock(&private_display->lock);
363 tdm_buffer_unref_backend(layer_buffer->buffer);
364 if (private_layer->buffer_queue)
365 tbm_surface_queue_release(private_layer->buffer_queue, layer_buffer->buffer);
366 _pthread_mutex_lock(&private_display->lock);
372 _tdm_layer_free_all_buffers(tdm_private_layer *private_layer)
374 tdm_private_output *private_output = private_layer->private_output;
375 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
376 struct list_head clone_list;
378 LIST_INITHEAD(&clone_list);
380 _tdm_layer_reset_pending_data(private_layer);
382 if (private_layer->waiting_buffer) {
383 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
384 private_layer->waiting_buffer = NULL;
386 if (tdm_debug_module & TDM_DEBUG_BUFFER)
387 TDM_INFO("layer(%p) waiting_buffer(%p)",
388 private_layer, private_layer->waiting_buffer);
391 if (private_layer->committed_buffer) {
392 _tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
393 private_layer->committed_buffer = NULL;
395 if (tdm_debug_module & TDM_DEBUG_BUFFER)
396 TDM_INFO("layer(%p) committed_buffer(%p)",
397 private_layer, private_layer->committed_buffer);
400 if (private_layer->showing_buffer) {
401 _tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
402 private_layer->showing_buffer = NULL;
404 if (tdm_debug_module & TDM_DEBUG_BUFFER)
405 TDM_INFO("layer(%p) showing_buffer(%p)",
406 private_layer, private_layer->showing_buffer);
409 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
410 if (lm->private_layer != private_layer)
413 LIST_ADDTAIL(&lm->link, &clone_list);
416 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
418 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
419 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
423 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
424 if (lm->private_layer != private_layer)
427 LIST_ADDTAIL(&lm->link, &clone_list);
430 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
432 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
436 if (private_layer->buffer_queue) {
437 tbm_surface_queue_remove_acquirable_cb(private_layer->buffer_queue, _tbm_layer_queue_acquirable_cb, private_layer);
438 tbm_surface_queue_remove_destroy_cb(private_layer->buffer_queue, _tbm_layer_queue_destroy_cb, private_layer);
439 private_layer->buffer_queue = NULL;
444 tdm_layer_set_buffer_internal(tdm_private_layer *private_layer, tbm_surface_h buffer)
446 tdm_private_backend *private_backend;
447 tdm_private_output *private_output = private_layer->private_output;
448 tdm_func_layer *func_layer;
450 /* LCOV_EXCL_START */
452 if (tdm_dump_enable && !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO))
453 _tdm_layer_dump_buffer(private_layer, buffer);
455 if (tdm_debug_dump & TDM_DUMP_FLAG_LAYER &&
456 !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
457 char str[TDM_PATH_LEN];
459 snprintf(str, TDM_PATH_LEN, "layer_%d_%d_%03d",
460 private_output->index, private_layer->index, i++);
461 tdm_helper_dump_buffer_str(buffer, tdm_debug_dump_dir, str);
465 private_backend = private_layer->private_backend;
466 func_layer = &private_backend->func_layer;
468 if (private_layer->usable)
469 TDM_INFO("layer(%p) not usable", private_layer);
471 private_layer->usable = 0;
473 if (!func_layer->layer_set_buffer) {
474 /* LCOV_EXCL_START */
475 TDM_ERR("not implemented!!");
476 return TDM_ERROR_NOT_IMPLEMENTED;
477 /* LCOV_EXCL_START */
480 private_layer->pending_buffer_changed = 1;
482 if (private_layer->pending_buffer) {
484 if (tdm_debug_module & TDM_DEBUG_BUFFER)
485 TDM_INFO("layer(%p) pending_buffer(%p) skipped",
486 private_layer, private_layer->pending_buffer);
488 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
489 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
490 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
493 tbm_surface_internal_unref(private_layer->pending_buffer);
496 tbm_surface_internal_ref(buffer);
497 private_layer->pending_buffer = buffer;
499 if (tdm_debug_module & TDM_DEBUG_BUFFER)
500 TDM_INFO("layer(%p) pending_buffer(%p)",
501 private_layer, private_layer->pending_buffer);
503 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
504 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
505 TDM_TRACE_ASYNC_BEGIN((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
508 return TDM_ERROR_NONE;
512 tdm_layer_set_buffer(tdm_layer *layer, tbm_surface_h buffer)
516 TDM_RETURN_VAL_IF_FAIL(buffer != NULL, TDM_ERROR_INVALID_PARAMETER);
518 _pthread_mutex_lock(&private_display->lock);
520 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
522 _pthread_mutex_unlock(&private_display->lock);
528 tdm_layer_unset_buffer_internal(tdm_private_layer *private_layer)
530 tdm_private_backend *private_backend;
531 tdm_func_layer *func_layer;
532 tdm_error ret = TDM_ERROR_NONE;
534 private_backend = private_layer->private_backend;
535 func_layer = &private_backend->func_layer;
537 _tdm_layer_free_all_buffers(private_layer);
539 private_layer->usable = 1;
541 if (private_layer->usable)
542 TDM_INFO("layer(%p) now usable", private_layer);
544 if (!func_layer->layer_unset_buffer) {
545 /* LCOV_EXCL_START */
546 TDM_ERR("not implemented!!");
547 return TDM_ERROR_NOT_IMPLEMENTED;
548 /* LCOV_EXCL_START */
551 ret = func_layer->layer_unset_buffer(private_layer->layer_backend);
552 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
558 tdm_layer_unset_buffer(tdm_layer *layer)
562 _pthread_mutex_lock(&private_display->lock);
564 ret = tdm_layer_unset_buffer_internal(private_layer);
565 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
567 _pthread_mutex_unlock(&private_display->lock);
573 tdm_layer_committed(tdm_private_layer *private_layer, tdm_private_layer_buffer **committed_buffer)
575 tdm_private_output *private_output = private_layer->private_output;
576 tdm_private_display *private_display = private_output->private_display;
578 /* LCOV_EXCL_START */
579 if (private_display->print_fps) {
580 double curr = tdm_helper_get_time();
581 if (private_layer->fps_stamp == 0) {
582 private_layer->fps_stamp = curr;
583 } else if ((curr - private_layer->fps_stamp) > 1.0) {
584 TDM_INFO("output(%d) layer(%d) fps: %d", private_output->index, private_layer->index, private_layer->fps_count);
585 private_layer->fps_count = 0;
586 private_layer->fps_stamp = curr;
588 private_layer->fps_count++;
589 } else if (private_layer->fps_stamp != 0) {
590 private_layer->fps_stamp = 0;
591 private_layer->fps_count = 0;
595 if (private_layer->showing_buffer) {
596 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
597 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->showing_buffer->buffer, 0);
598 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
601 _tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
604 private_layer->showing_buffer = *committed_buffer;
605 *committed_buffer = NULL;
607 if (tdm_debug_module & TDM_DEBUG_BUFFER)
608 TDM_INFO("layer(%p) committed_buffer(%p) showing_buffer(%p)",
609 private_layer, *committed_buffer,
610 (private_layer->showing_buffer) ? private_layer->showing_buffer->buffer : NULL);
614 _tdm_layer_got_output_vblank(tdm_private_output *private_output, unsigned int sequence,
615 unsigned int tv_sec, unsigned int tv_usec)
617 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
618 tdm_private_display *private_display;
619 struct list_head clone_list, pending_clone_list;
620 tdm_error ret = TDM_ERROR_NONE;
622 private_display = private_output->private_display;
624 private_output->layer_waiting_vblank = 0;
626 LIST_INITHEAD(&clone_list);
627 LIST_INITHEAD(&pending_clone_list);
629 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
631 lm->private_layer->committing = 0;
632 LIST_ADDTAIL(&lm->link, &clone_list);
635 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
637 lm->private_layer->committing = 0;
638 LIST_ADDTAIL(&lm->link, &pending_clone_list);
641 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
642 if (tdm_debug_module & TDM_DEBUG_COMMIT)
643 TDM_INFO("layer(%p) committed. handle(%p) commited_buffer(%p)",
644 lm->private_layer, lm, (lm->committed_buffer) ? lm->committed_buffer->buffer : NULL);
647 tdm_layer_committed(lm->private_layer, &lm->committed_buffer);
648 _pthread_mutex_unlock(&private_display->lock);
650 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
651 _pthread_mutex_lock(&private_display->lock);
652 if (lm->committed_buffer)
653 _tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
657 if (LIST_IS_EMPTY(&pending_clone_list))
660 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, wait_failed);
662 ret = tdm_output_commit_internal(private_output, 0, NULL, NULL);
663 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
665 if (tdm_debug_module & TDM_DEBUG_COMMIT)
666 TDM_INFO("layer commit: output(%d) commit", private_output->pipe);
668 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
669 /* tdm_vblank APIs is for server. it should be called in unlock status*/
670 if (!private_output->layer_waiting_vblank) {
671 _pthread_mutex_unlock(&private_display->lock);
672 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
673 _pthread_mutex_lock(&private_display->lock);
674 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
675 private_output->layer_waiting_vblank = 1;
679 if (tdm_debug_module & TDM_DEBUG_COMMIT)
680 TDM_INFO("layer commit: output(%d) wait vblank", private_output->pipe);
682 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
684 LIST_ADDTAIL(&lm->link, &private_output->layer_commit_handler_list);
687 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
688 TDM_WRN("dpms %s. Directly call vblank callback.", tdm_dpms_str(private_output->current_dpms_value));
689 _pthread_mutex_unlock(&private_display->lock);
690 _tdm_layer_cb_wait_vblank(private_output->vblank, 0, 0, 0, 0, private_output);
691 _pthread_mutex_lock(&private_display->lock);
696 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
697 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
698 * the layer commit handler MUST be called.
700 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
701 TDM_WRN("Directly call layer commit handlers: ret(%d)\n", ret);
703 _pthread_mutex_unlock(&private_display->lock);
705 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
706 _pthread_mutex_lock(&private_display->lock);
707 _tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
715 _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
716 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
718 tdm_private_layer_commit_handler *layer_commit_handler = user_data;
719 tdm_private_layer_commit_handler *lm = NULL;
720 tdm_private_display *private_display;
721 tdm_private_output *private_output = output;
722 tdm_private_layer *private_layer;
725 TDM_RETURN_IF_FAIL(layer_commit_handler != NULL);
727 private_display = private_output->private_display;
729 LIST_FOR_EACH_ENTRY(lm, &private_output->layer_commit_handler_list, link) {
730 if (layer_commit_handler == lm) {
739 LIST_DEL(&layer_commit_handler->link);
741 private_layer = layer_commit_handler->private_layer;
743 private_layer->committing = 0;
745 if (tdm_debug_module & TDM_DEBUG_COMMIT)
746 TDM_INFO("layer(%p) commit: output(%d) committed. handle(%p)",
747 private_layer, private_output->pipe, layer_commit_handler);
749 _pthread_mutex_lock(&private_display->lock);
751 tdm_layer_committed(private_layer, &layer_commit_handler->committed_buffer);
753 if (layer_commit_handler->func) {
754 _pthread_mutex_unlock(&private_display->lock);
755 layer_commit_handler->func(private_output, sequence,
756 tv_sec, tv_usec, layer_commit_handler->user_data);
757 _pthread_mutex_lock(&private_display->lock);
760 free(layer_commit_handler);
762 _pthread_mutex_unlock(&private_display->lock);
766 _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
767 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
769 tdm_private_output *private_output = user_data;
770 tdm_private_display *private_display;
772 TDM_RETURN_IF_FAIL(private_output != NULL);
774 private_display = private_output->private_display;
776 _pthread_mutex_lock(&private_display->lock);
778 if (tdm_debug_module & TDM_DEBUG_COMMIT)
779 TDM_INFO("layer commit: output(%d) got vblank", private_output->pipe);
781 _tdm_layer_got_output_vblank(private_output, sequence, tv_sec, tv_usec);
783 _pthread_mutex_unlock(&private_display->lock);
787 _tdm_lauer_get_output_used_layer_count(tdm_private_output *private_output)
789 tdm_private_layer *private_layer = NULL;
790 unsigned int count = 0;
792 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
793 if (!private_layer->usable)
800 /* commit_per_vblank == 1: we can commit if
801 * - there is no previous commit request
802 * - only 1 layer is used
803 * commit_per_vblank == 2: we can commit if
804 * - there is no previous commit request
807 _tdm_layer_commit_possible(tdm_private_layer *private_layer)
809 tdm_private_output *private_output = private_layer->private_output;
811 TDM_RETURN_VAL_IF_FAIL(private_output->commit_per_vblank > 0, 1);
813 /* There is a previous commit request which is not done and displayed on screen yet.
814 * We can't commit at this time.
816 if (!LIST_IS_EMPTY(&private_output->layer_commit_handler_list)) {
817 if (tdm_debug_module & TDM_DEBUG_COMMIT)
818 TDM_INFO("layer(%p) commit: not possible(previous commit)", private_layer);
822 if (private_output->commit_per_vblank == 1 && _tdm_lauer_get_output_used_layer_count(private_output) > 1) {
823 if (tdm_debug_module & TDM_DEBUG_COMMIT)
824 TDM_INFO("layer(%p) commit: not possible(more than 2 layers)", private_layer);
828 if (tdm_debug_module & TDM_DEBUG_COMMIT)
829 TDM_INFO("layer(%p) commit: possible", private_layer);
835 _tdm_layer_reset_pending_data(tdm_private_layer *private_layer)
837 private_layer->pending_info_changed = 0;
838 memset(&private_layer->pending_info, 0, sizeof private_layer->pending_info);
840 private_layer->pending_buffer_changed = 0;
841 if (private_layer->pending_buffer) {
842 tbm_surface_internal_unref(private_layer->pending_buffer);
843 private_layer->pending_buffer = NULL;
848 tdm_layer_commit_pending_data(tdm_private_layer *private_layer)
850 tdm_private_backend *private_backend;
851 tdm_func_layer *func_layer;
852 tdm_error ret = TDM_ERROR_NONE;
854 private_backend = private_layer->private_backend;
855 func_layer = &private_backend->func_layer;
857 if (private_layer->pending_info_changed) {
858 ret = func_layer->layer_set_info(private_layer->layer_backend, &private_layer->pending_info);
859 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, done);
862 if (private_layer->pending_buffer_changed) {
863 tdm_private_layer_buffer *layer_buffer;
865 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
866 TDM_GOTO_IF_FAIL(layer_buffer != NULL, done);
868 LIST_INITHEAD(&layer_buffer->link);
870 ret = func_layer->layer_set_buffer(private_layer->layer_backend, private_layer->pending_buffer);
871 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
873 if (tdm_debug_module & TDM_DEBUG_BUFFER)
874 TDM_INFO("layer(%p) pending_buffer(%p) committed",
875 private_layer, private_layer->pending_buffer);
877 if (ret == TDM_ERROR_NONE) {
878 if (private_layer->waiting_buffer)
879 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
881 private_layer->waiting_buffer = layer_buffer;
882 private_layer->waiting_buffer->buffer = tdm_buffer_ref_backend(private_layer->pending_buffer);
883 if (tdm_debug_module & TDM_DEBUG_BUFFER)
884 TDM_INFO("layer(%p) waiting_buffer(%p)",
885 private_layer, private_layer->waiting_buffer->buffer);
887 _tdm_layer_free_buffer(private_layer, layer_buffer);
891 _tdm_layer_reset_pending_data(private_layer);
896 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
897 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
898 * the layer commit handler MUST be called.
901 _tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
903 tdm_private_layer_commit_handler *layer_commit_handler;
906 layer_commit_handler = calloc(1, sizeof(tdm_private_layer_commit_handler));
907 if (!layer_commit_handler) {
908 /* LCOV_EXCL_START */
909 TDM_ERR("failed: alloc memory");
910 return TDM_ERROR_OUT_OF_MEMORY;
914 if (tdm_debug_module & TDM_DEBUG_COMMIT)
915 TDM_INFO("layer(%p) commit: handle(%p)", private_layer, layer_commit_handler);
917 LIST_INITHEAD(&layer_commit_handler->link);
918 layer_commit_handler->private_layer = private_layer;
919 layer_commit_handler->func = func;
920 layer_commit_handler->user_data = user_data;
922 layer_commit_handler->committed_buffer = private_layer->waiting_buffer;
923 private_layer->waiting_buffer = NULL;
925 if (private_layer->committing)
926 TDM_WRN("layer(%d) too many commit", private_layer->index);
928 private_layer->committing = 1;
930 if (tdm_debug_module & TDM_DEBUG_BUFFER)
931 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
932 private_layer, private_layer->waiting_buffer,
933 (layer_commit_handler->committed_buffer) ? layer_commit_handler->committed_buffer->buffer : NULL);
935 if (!private_output->commit_per_vblank) {
936 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT, commit_failed);
938 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
939 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
940 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
942 if (tdm_debug_module & TDM_DEBUG_COMMIT)
943 TDM_INFO("layer(%p) commit: no commit-per-vblank", private_layer);
945 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_LAYER, commit_failed);
947 if (_tdm_layer_commit_possible(private_layer)) {
948 /* add to layer_commit_handler_list */
949 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
950 ret = tdm_output_commit_internal(private_layer->private_output, 0, NULL, NULL);
951 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
953 if (tdm_debug_module & TDM_DEBUG_COMMIT)
954 TDM_INFO("layer(%p) commit: output", private_layer);
956 /* add to pending_commit_handler_list. It will be commited when a vblank occurs */
957 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->pending_commit_handler_list);
959 if (tdm_debug_module & TDM_DEBUG_COMMIT)
960 TDM_INFO("layer(%p) commit: pending", private_layer);
963 if (!private_output->vblank) {
964 /* tdm_vblank APIs is for server. it should be called in unlock status*/
965 _pthread_mutex_unlock(&private_display->lock);
966 private_output->vblank = tdm_vblank_create(private_display, private_output, NULL);
967 _pthread_mutex_lock(&private_display->lock);
968 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, commit_failed);
971 if (!private_output->layer_waiting_vblank) {
972 /* tdm_vblank APIs is for server. it should be called in unlock status*/
973 _pthread_mutex_unlock(&private_display->lock);
974 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
975 _pthread_mutex_lock(&private_display->lock);
976 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
977 private_output->layer_waiting_vblank = 1;
979 if (tdm_debug_module & TDM_DEBUG_COMMIT)
980 TDM_INFO("layer(%p) commit: wait vblank", private_layer);
987 if (layer_commit_handler) {
988 private_layer->waiting_buffer = layer_commit_handler->committed_buffer;
989 LIST_DEL(&layer_commit_handler->link);
990 free(layer_commit_handler);
996 tdm_layer_commit_internal(tdm_private_layer *private_layer, tdm_layer_commit_handler func, void *user_data)
998 tdm_private_output *private_output = private_layer->private_output;
999 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */
1001 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE) {
1002 if (!private_output->commit_per_vblank)
1003 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1005 private_output->commit_type = TDM_COMMIT_TYPE_LAYER;
1008 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1009 TDM_ERR("layer(%p)'s output(%d) dpms: %s", private_layer, private_output->pipe,
1010 tdm_dpms_str(private_output->current_dpms_value));
1011 return TDM_ERROR_DPMS_OFF;
1014 /* don't call this inside of _tdm_layer_commit */
1015 ret = tdm_layer_commit_pending_data(private_layer);
1016 if (ret != TDM_ERROR_NONE) {
1017 TDM_ERR("layer(%p) committing pending data failed", private_layer);
1021 ret = _tdm_layer_commit(private_layer, func, user_data);
1027 tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1031 _pthread_mutex_lock(&private_display->lock);
1033 ret = tdm_layer_commit_internal(private_layer, func, user_data);
1035 _pthread_mutex_unlock(&private_display->lock);
1041 tdm_layer_is_committing(tdm_layer *layer, unsigned int *committing)
1045 TDM_RETURN_VAL_IF_FAIL(committing != NULL, TDM_ERROR_INVALID_PARAMETER);
1047 _pthread_mutex_lock(&private_display->lock);
1049 *committing = private_layer->committing;
1051 _pthread_mutex_unlock(&private_display->lock);
1057 tdm_layer_remove_commit_handler_internal(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1059 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
1060 tdm_private_output *private_output = private_layer->private_output;
1061 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
1063 if (!func && !user_data)
1066 TDM_RETURN_IF_FAIL(private_layer != NULL);
1067 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1069 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
1070 if (lm->func == func && lm->user_data == user_data) {
1071 LIST_DEL(&lm->link);
1072 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1073 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1079 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
1080 if (lm->func == func && lm->user_data == user_data) {
1081 LIST_DEL(&lm->link);
1082 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1083 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1091 tdm_layer_remove_commit_handler(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1095 _pthread_mutex_lock(&private_display->lock);
1097 tdm_layer_remove_commit_handler_internal(layer, func, user_data);
1099 _pthread_mutex_unlock(&private_display->lock);
1104 EXTERN tbm_surface_h
1105 tdm_layer_get_displaying_buffer(tdm_layer *layer, tdm_error *error)
1107 tbm_surface_h buffer;
1108 LAYER_FUNC_ENTRY_ERROR();
1110 _pthread_mutex_lock(&private_display->lock);
1113 *error = TDM_ERROR_NONE;
1115 if (private_layer->showing_buffer) {
1116 buffer = private_layer->showing_buffer->buffer;
1118 _pthread_mutex_unlock(&private_display->lock);
1119 TDM_DBG("layer(%p) showing_buffer is null", private_layer);
1122 _pthread_mutex_unlock(&private_display->lock);
1128 _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data)
1130 TDM_RETURN_IF_FAIL(data != NULL);
1131 tdm_layer *layer = data;
1132 tdm_private_backend *private_backend;
1133 tdm_func_layer *func_layer;
1134 tbm_surface_h buffer = NULL;
1135 tdm_private_layer_buffer *layer_buffer;
1136 LAYER_FUNC_ENTRY_VOID_RETURN();
1138 _pthread_mutex_lock(&private_display->lock);
1140 private_backend = private_layer->private_backend;
1141 func_layer = &private_backend->func_layer;
1143 if (!func_layer->layer_set_buffer) {
1144 /* LCOV_EXCL_START */
1145 _pthread_mutex_unlock(&private_display->lock);
1147 /* LCOV_EXCL_STOP */
1150 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
1151 if (!layer_buffer) {
1152 /* LCOV_EXCL_START */
1153 _pthread_mutex_unlock(&private_display->lock);
1154 TDM_ERR("alloc failed");
1156 /* LCOV_EXCL_STOP */
1158 LIST_INITHEAD(&layer_buffer->link);
1160 if (TBM_SURFACE_QUEUE_ERROR_NONE != tbm_surface_queue_acquire(private_layer->buffer_queue, &buffer) ||
1162 /* LCOV_EXCL_START */
1163 TDM_ERR("layer(%p) tbm_surface_queue_acquire() failed surface:%p",
1164 private_layer, buffer);
1165 _pthread_mutex_unlock(&private_display->lock);
1168 /* LCOV_EXCL_STOP */
1171 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
1172 if (ret != TDM_ERROR_NONE) {
1173 TDM_ERR("tdm_layer_set_buffer_internal failed");
1174 _pthread_mutex_unlock(&private_display->lock);
1178 ret = tdm_layer_commit_internal(private_layer, NULL, NULL);
1179 if (ret != TDM_ERROR_NONE) {
1180 TDM_ERR("tdm_layer_commit_internal failed");
1181 _pthread_mutex_unlock(&private_display->lock);
1185 _pthread_mutex_unlock(&private_display->lock);
1189 _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data)
1191 TDM_RETURN_IF_FAIL(data != NULL);
1192 tdm_layer *layer = data;
1193 LAYER_FUNC_ENTRY_VOID_RETURN();
1194 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
1196 _pthread_mutex_lock(&private_display->lock);
1198 private_layer->buffer_queue = NULL;
1200 _tdm_layer_free_all_buffers(private_layer);
1202 _pthread_mutex_unlock(&private_display->lock);
1206 tdm_layer_set_buffer_queue(tdm_layer *layer, tbm_surface_queue_h buffer_queue)
1208 tdm_private_backend *private_backend;
1209 tdm_func_layer *func_layer;
1212 TDM_RETURN_VAL_IF_FAIL(buffer_queue != NULL, TDM_ERROR_INVALID_PARAMETER);
1214 _pthread_mutex_lock(&private_display->lock);
1216 private_backend = private_layer->private_backend;
1217 func_layer = &private_backend->func_layer;
1219 if (private_layer->usable)
1220 TDM_INFO("layer(%p) not usable", private_layer);
1222 private_layer->usable = 0;
1224 if (!func_layer->layer_set_buffer) {
1225 /* LCOV_EXCL_START */
1226 _pthread_mutex_unlock(&private_display->lock);
1227 TDM_ERR("not implemented!!");
1228 return TDM_ERROR_NOT_IMPLEMENTED;
1229 /* LCOV_EXCL_STOP */
1232 if (buffer_queue == private_layer->buffer_queue) {
1233 _pthread_mutex_unlock(&private_display->lock);
1234 return TDM_ERROR_NONE;
1237 if (private_layer->waiting_buffer) {
1238 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
1239 private_layer->waiting_buffer = NULL;
1241 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1242 TDM_INFO("layer(%p) waiting_buffer(%p)",
1243 private_layer, private_layer->waiting_buffer);
1246 private_layer->buffer_queue = buffer_queue;
1247 tbm_surface_queue_add_acquirable_cb(private_layer->buffer_queue,
1248 _tbm_layer_queue_acquirable_cb,
1250 tbm_surface_queue_add_destroy_cb(private_layer->buffer_queue,
1251 _tbm_layer_queue_destroy_cb,
1253 _pthread_mutex_unlock(&private_display->lock);
1259 tdm_layer_unset_buffer_queue(tdm_layer *layer)
1261 return tdm_layer_unset_buffer(layer);
1265 tdm_layer_is_usable(tdm_layer *layer, unsigned int *usable)
1269 TDM_RETURN_VAL_IF_FAIL(usable != NULL, TDM_ERROR_INVALID_PARAMETER);
1271 _pthread_mutex_lock(&private_display->lock);
1273 *usable = private_layer->usable;
1275 _pthread_mutex_unlock(&private_display->lock);
1281 tdm_layer_set_video_pos(tdm_layer *layer, int zpos)
1283 tdm_private_backend *private_backend;
1284 tdm_func_layer *func_layer;
1287 _pthread_mutex_lock(&private_display->lock);
1289 private_backend = private_layer->private_backend;
1290 func_layer = &private_backend->func_layer;
1292 if (!(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
1293 TDM_ERR("layer(%p) is not video layer", private_layer);
1294 _pthread_mutex_unlock(&private_display->lock);
1295 return TDM_ERROR_BAD_REQUEST;
1298 if (!func_layer->layer_set_video_pos) {
1299 /* LCOV_EXCL_START */
1300 _pthread_mutex_unlock(&private_display->lock);
1301 TDM_ERR("not implemented!!");
1302 return TDM_ERROR_NOT_IMPLEMENTED;
1303 /* LCOV_EXCL_STOP */
1306 ret = func_layer->layer_set_video_pos(private_layer->layer_backend, zpos);
1308 _pthread_mutex_unlock(&private_display->lock);
1313 EXTERN tdm_capture *
1314 tdm_layer_create_capture(tdm_layer *layer, tdm_error *error)
1316 tdm_capture *capture = NULL;
1318 LAYER_FUNC_ENTRY_ERROR();
1320 _pthread_mutex_lock(&private_display->lock);
1322 capture = (tdm_capture *)tdm_capture_create_layer_internal(private_layer, error);
1324 _pthread_mutex_unlock(&private_display->lock);
1330 tdm_layer_get_buffer_flags(tdm_layer *layer, unsigned int *flags)
1332 tdm_private_backend *private_backend;
1333 tdm_func_layer *func_layer;
1335 TDM_RETURN_VAL_IF_FAIL(flags != NULL, TDM_ERROR_INVALID_PARAMETER);
1337 _pthread_mutex_lock(&private_display->lock);
1339 private_backend = private_layer->private_backend;
1340 func_layer = &private_backend->func_layer;
1342 if (!func_layer->layer_get_buffer_flags) {
1343 /* LCOV_EXCL_START */
1345 _pthread_mutex_unlock(&private_display->lock);
1346 TDM_ERR("not implemented!!");
1347 return TDM_ERROR_NOT_IMPLEMENTED;
1348 /* LCOV_EXCL_STOP */
1351 ret = func_layer->layer_get_buffer_flags(private_layer->layer_backend, flags);
1353 _pthread_mutex_unlock(&private_display->lock);