1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <sc1.lim@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
41 #include "tdm_backend.h"
42 #include "tdm_private.h"
43 #include "tdm_helper.h"
47 #define LAYER_FUNC_ENTRY() \
48 tdm_private_display *private_display; \
49 tdm_private_output *private_output; \
50 tdm_private_layer *private_layer; \
51 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
52 TDM_RETURN_VAL_IF_FAIL(layer != NULL, TDM_ERROR_INVALID_PARAMETER); \
53 private_layer = (tdm_private_layer*)layer; \
54 private_output = private_layer->private_output; \
55 private_display = private_output->private_display
57 #define LAYER_FUNC_ENTRY_ERROR() \
58 tdm_private_display *private_display; \
59 tdm_private_output *private_output; \
60 tdm_private_layer *private_layer; \
61 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
62 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(layer != NULL, TDM_ERROR_INVALID_PARAMETER, NULL); \
63 private_layer = (tdm_private_layer*)layer; \
64 private_output = private_layer->private_output; \
65 private_display = private_output->private_display
67 #define LAYER_FUNC_ENTRY_VOID_RETURN() \
68 tdm_private_display *private_display; \
69 tdm_private_output *private_output; \
70 tdm_private_layer *private_layer; \
71 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
72 TDM_RETURN_IF_FAIL(layer != NULL); \
73 private_layer = (tdm_private_layer*)layer; \
74 private_output = private_layer->private_output; \
75 private_display = private_output->private_display
77 static void _tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer);
78 static void _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
79 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
80 static void _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data);
81 static void _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data);
82 static void _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
83 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
84 static void _tdm_layer_reset_pending_data(tdm_private_layer *private_layer);
87 tdm_layer_get_capabilities(tdm_layer *layer, tdm_layer_capability *capabilities)
91 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
93 _pthread_mutex_lock(&private_display->lock);
95 *capabilities = private_layer->caps.capabilities;
97 _pthread_mutex_unlock(&private_display->lock);
103 tdm_layer_get_available_formats(tdm_layer *layer, const tbm_format **formats, int *count)
107 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
108 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
110 _pthread_mutex_lock(&private_display->lock);
112 *formats = (const tbm_format *)private_layer->caps.formats;
113 *count = private_layer->caps.format_count;
115 _pthread_mutex_unlock(&private_display->lock);
121 tdm_layer_get_available_properties(tdm_layer *layer, const tdm_prop **props, int *count)
125 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
126 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
128 _pthread_mutex_lock(&private_display->lock);
130 *props = (const tdm_prop *)private_layer->caps.props;
131 *count = private_layer->caps.prop_count;
133 _pthread_mutex_unlock(&private_display->lock);
139 tdm_layer_get_zpos(tdm_layer *layer, int *zpos)
143 TDM_RETURN_VAL_IF_FAIL(zpos != NULL, TDM_ERROR_INVALID_PARAMETER);
145 _pthread_mutex_lock(&private_display->lock);
147 *zpos = private_layer->caps.zpos;
149 _pthread_mutex_unlock(&private_display->lock);
155 tdm_layer_set_property(tdm_layer *layer, unsigned int id, tdm_value value)
157 tdm_func_layer *func_layer;
160 _pthread_mutex_lock(&private_display->lock);
162 func_layer = &private_display->func_layer;
164 if (private_layer->usable)
165 TDM_INFO("layer(%d) not usable", private_layer->index);
167 private_layer->usable = 0;
169 if (!func_layer->layer_set_property) {
170 /* LCOV_EXCL_START */
171 _pthread_mutex_unlock(&private_display->lock);
172 TDM_ERR("not implemented!!");
173 return TDM_ERROR_NOT_IMPLEMENTED;
177 ret = func_layer->layer_set_property(private_layer->layer_backend, id, value);
179 _pthread_mutex_unlock(&private_display->lock);
185 tdm_layer_get_property(tdm_layer *layer, unsigned int id, tdm_value *value)
187 tdm_func_layer *func_layer;
190 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
192 _pthread_mutex_lock(&private_display->lock);
194 func_layer = &private_display->func_layer;
196 if (!func_layer->layer_get_property) {
197 /* LCOV_EXCL_START */
198 _pthread_mutex_unlock(&private_display->lock);
199 TDM_ERR("not implemented!!");
200 return TDM_ERROR_NOT_IMPLEMENTED;
204 ret = func_layer->layer_get_property(private_layer->layer_backend, id, value);
206 _pthread_mutex_unlock(&private_display->lock);
212 tdm_layer_set_info(tdm_layer *layer, tdm_info_layer *info)
214 tdm_func_layer *func_layer;
219 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
221 _pthread_mutex_lock(&private_display->lock);
223 func_layer = &private_display->func_layer;
225 if (private_layer->usable)
226 TDM_INFO("layer(%p) not usable", private_layer);
228 private_layer->usable = 0;
230 if (!func_layer->layer_set_info) {
231 /* LCOV_EXCL_START */
232 _pthread_mutex_unlock(&private_display->lock);
233 TDM_ERR("not implemented!!");
234 return TDM_ERROR_NOT_IMPLEMENTED;
238 if (info->src_config.format)
239 snprintf(fmtstr, 128, "%c%c%c%c", FOURCC_STR(info->src_config.format));
241 snprintf(fmtstr, 128, "NONE");
243 TDM_INFO("layer(%p) info: src(%ux%u %u,%u %ux%u %s) dst(%u,%u %ux%u) trans(%d)",
244 private_layer, info->src_config.size.h, info->src_config.size.v,
245 info->src_config.pos.x, info->src_config.pos.y,
246 info->src_config.pos.w, info->src_config.pos.h,
248 info->dst_pos.x, info->dst_pos.y,
249 info->dst_pos.w, info->dst_pos.h,
252 private_layer->pending_info_changed = 1;
253 private_layer->pending_info = *info;
255 _pthread_mutex_unlock(&private_display->lock);
261 tdm_layer_get_info(tdm_layer *layer, tdm_info_layer *info)
263 tdm_func_layer *func_layer;
266 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
268 _pthread_mutex_lock(&private_display->lock);
270 func_layer = &private_display->func_layer;
272 if (!func_layer->layer_get_info) {
273 /* LCOV_EXCL_START */
274 _pthread_mutex_unlock(&private_display->lock);
275 TDM_ERR("not implemented!!");
276 return TDM_ERROR_NOT_IMPLEMENTED;
280 ret = func_layer->layer_get_info(private_layer->layer_backend, info);
282 _pthread_mutex_unlock(&private_display->lock);
287 /* LCOV_EXCL_START */
289 _tdm_layer_dump_buffer(tdm_layer *layer, tbm_surface_h buffer)
291 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
292 tdm_private_output *private_output = private_layer->private_output;
294 char fname[PATH_MAX], bufs[PATH_MAX];
295 int zpos, len = PATH_MAX;
296 tdm_private_layer *l = NULL;
300 pipe = private_output->pipe;
301 zpos = private_layer->caps.zpos;
304 LIST_FOR_EACH_ENTRY(l, &private_output->layer_list, link) {
305 if (!l->showing_buffer)
307 TDM_SNPRINTF(p, remain, "_%p", l->showing_buffer->buffer);
310 snprintf(fname, sizeof(fname), "tdm_%d_lyr_%d%s", pipe, zpos, bufs);
312 tbm_surface_internal_dump_buffer(buffer, fname);
313 TDM_DBG("%s dump excute", fname);
320 _tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer)
322 tdm_private_display *private_display;
327 private_display = private_layer->private_output->private_display;
329 LIST_DEL(&layer_buffer->link);
330 if (layer_buffer->buffer) {
331 _pthread_mutex_unlock(&private_display->lock);
332 tdm_buffer_unref_backend(layer_buffer->buffer);
333 if (private_layer->buffer_queue)
334 tbm_surface_queue_release(private_layer->buffer_queue, layer_buffer->buffer);
335 _pthread_mutex_lock(&private_display->lock);
341 _tdm_layer_free_all_buffers(tdm_private_layer *private_layer)
343 tdm_private_output *private_output = private_layer->private_output;
344 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
345 struct list_head clone_list;
347 LIST_INITHEAD(&clone_list);
349 _tdm_layer_reset_pending_data(private_layer);
351 if (private_layer->waiting_buffer) {
352 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
353 private_layer->waiting_buffer = NULL;
355 if (tdm_debug_module & TDM_DEBUG_BUFFER)
356 TDM_INFO("layer(%p) waiting_buffer(%p)",
357 private_layer, private_layer->waiting_buffer);
360 if (private_layer->committed_buffer) {
361 _tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
362 private_layer->committed_buffer = NULL;
364 if (tdm_debug_module & TDM_DEBUG_BUFFER)
365 TDM_INFO("layer(%p) committed_buffer(%p)",
366 private_layer, private_layer->committed_buffer);
369 if (private_layer->showing_buffer) {
370 _tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
371 private_layer->showing_buffer = NULL;
373 if (tdm_debug_module & TDM_DEBUG_BUFFER)
374 TDM_INFO("layer(%p) showing_buffer(%p)",
375 private_layer, private_layer->showing_buffer);
378 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
379 if (lm->private_layer != private_layer)
382 LIST_ADDTAIL(&lm->link, &clone_list);
385 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
387 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
388 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
392 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
393 if (lm->private_layer != private_layer)
396 LIST_ADDTAIL(&lm->link, &clone_list);
399 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
401 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
405 if (private_layer->buffer_queue) {
406 tbm_surface_queue_remove_acquirable_cb(private_layer->buffer_queue, _tbm_layer_queue_acquirable_cb, private_layer);
407 tbm_surface_queue_remove_destroy_cb(private_layer->buffer_queue, _tbm_layer_queue_destroy_cb, private_layer);
408 private_layer->buffer_queue = NULL;
413 tdm_layer_set_buffer(tdm_layer *layer, tbm_surface_h buffer)
415 tdm_func_layer *func_layer;
419 TDM_RETURN_VAL_IF_FAIL(buffer != NULL, TDM_ERROR_INVALID_PARAMETER);
421 _pthread_mutex_lock(&private_display->lock);
423 /* LCOV_EXCL_START */
425 if (tdm_dump_enable && !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO))
426 _tdm_layer_dump_buffer(private_layer, buffer);
428 if (tdm_debug_dump & TDM_DUMP_FLAG_LAYER &&
429 !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
430 char str[TDM_PATH_LEN];
432 snprintf(str, TDM_PATH_LEN, "layer_%d_%d_%03d",
433 private_output->index, private_layer->index, i++);
434 tdm_helper_dump_buffer_str(buffer, tdm_debug_dump_dir, str);
438 func_layer = &private_display->func_layer;
440 if (private_layer->usable)
441 TDM_INFO("layer(%p) not usable", private_layer);
443 private_layer->usable = 0;
445 if (!func_layer->layer_set_buffer) {
446 /* LCOV_EXCL_START */
447 _pthread_mutex_unlock(&private_display->lock);
448 TDM_ERR("not implemented!!");
449 return TDM_ERROR_NOT_IMPLEMENTED;
450 /* LCOV_EXCL_START */
453 private_layer->pending_buffer_changed = 1;
455 if (private_layer->pending_buffer) {
457 if (tdm_debug_module & TDM_DEBUG_BUFFER)
458 TDM_INFO("layer(%p) pending_buffer(%p) skipped",
459 private_layer, private_layer->pending_buffer);
461 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
462 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
463 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
466 tbm_surface_internal_unref(private_layer->pending_buffer);
469 tbm_surface_internal_ref(buffer);
470 private_layer->pending_buffer = buffer;
472 if (tdm_debug_module & TDM_DEBUG_BUFFER)
473 TDM_INFO("layer(%p) pending_buffer(%p)",
474 private_layer, private_layer->pending_buffer);
476 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
477 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->pending_buffer, 0);
478 TDM_TRACE_ASYNC_BEGIN((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
481 _pthread_mutex_unlock(&private_display->lock);
487 tdm_layer_unset_buffer(tdm_layer *layer)
489 tdm_func_layer *func_layer;
492 _pthread_mutex_lock(&private_display->lock);
494 func_layer = &private_display->func_layer;
496 _tdm_layer_free_all_buffers(private_layer);
498 private_layer->usable = 1;
500 if (private_layer->usable)
501 TDM_INFO("layer(%p) now usable", private_layer);
503 if (!func_layer->layer_unset_buffer) {
504 /* LCOV_EXCL_START */
505 _pthread_mutex_unlock(&private_display->lock);
506 TDM_ERR("not implemented!!");
507 return TDM_ERROR_NOT_IMPLEMENTED;
508 /* LCOV_EXCL_START */
511 ret = func_layer->layer_unset_buffer(private_layer->layer_backend);
512 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
514 _pthread_mutex_unlock(&private_display->lock);
520 tdm_layer_committed(tdm_private_layer *private_layer, tdm_private_layer_buffer **committed_buffer)
522 tdm_private_output *private_output = private_layer->private_output;
523 tdm_private_display *private_display = private_output->private_display;
525 /* LCOV_EXCL_START */
526 if (private_display->print_fps) {
527 double curr = tdm_helper_get_time();
528 if (private_layer->fps_stamp == 0) {
529 private_layer->fps_stamp = curr;
530 } else if ((curr - private_layer->fps_stamp) > 1.0) {
531 TDM_INFO("output(%d) layer(%d) fps: %d", private_output->index, private_layer->index, private_layer->fps_count);
532 private_layer->fps_count = 0;
533 private_layer->fps_stamp = curr;
535 private_layer->fps_count++;
536 } else if (private_layer->fps_stamp != 0) {
537 private_layer->fps_stamp = 0;
538 private_layer->fps_count = 0;
542 if (private_layer->showing_buffer) {
543 if (tdm_ttrace_module & TDM_TTRACE_LAYER) {
544 tbm_bo bo = tbm_surface_internal_get_bo(private_layer->showing_buffer->buffer, 0);
545 TDM_TRACE_ASYNC_END((intptr_t)private_layer, "[LAYER] %d", tbm_bo_export(bo));
548 _tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
551 private_layer->showing_buffer = *committed_buffer;
552 *committed_buffer = NULL;
554 if (tdm_debug_module & TDM_DEBUG_BUFFER)
555 TDM_INFO("layer(%p) committed_buffer(%p) showing_buffer(%p)",
556 private_layer, *committed_buffer,
557 (private_layer->showing_buffer) ? private_layer->showing_buffer->buffer : NULL);
561 _tdm_layer_got_output_vblank(tdm_private_output *private_output, unsigned int sequence,
562 unsigned int tv_sec, unsigned int tv_usec)
564 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
565 tdm_private_display *private_display;
566 struct list_head clone_list, pending_clone_list;
567 tdm_error ret = TDM_ERROR_NONE;
569 private_display = private_output->private_display;
571 private_output->layer_waiting_vblank = 0;
573 LIST_INITHEAD(&clone_list);
574 LIST_INITHEAD(&pending_clone_list);
576 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
578 lm->private_layer->committing = 0;
579 LIST_ADDTAIL(&lm->link, &clone_list);
582 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
584 lm->private_layer->committing = 0;
585 LIST_ADDTAIL(&lm->link, &pending_clone_list);
588 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
589 if (tdm_debug_module & TDM_DEBUG_COMMIT)
590 TDM_INFO("layer(%p) committed. handle(%p) commited_buffer(%p)",
591 lm->private_layer, lm, (lm->committed_buffer) ? lm->committed_buffer->buffer : NULL);
594 tdm_layer_committed(lm->private_layer, &lm->committed_buffer);
595 _pthread_mutex_unlock(&private_display->lock);
597 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
598 _pthread_mutex_lock(&private_display->lock);
599 if (lm->committed_buffer)
600 _tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
604 if (LIST_IS_EMPTY(&pending_clone_list))
607 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, wait_failed);
609 ret = tdm_output_commit_internal(private_output, 0, NULL, NULL);
610 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
612 if (tdm_debug_module & TDM_DEBUG_COMMIT)
613 TDM_INFO("layer commit: output(%d) commit", private_output->pipe);
615 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
616 /* tdm_vblank APIs is for server. it should be called in unlock status*/
617 if (!private_output->layer_waiting_vblank) {
618 _pthread_mutex_unlock(&private_display->lock);
619 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
620 _pthread_mutex_lock(&private_display->lock);
621 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
622 private_output->layer_waiting_vblank = 1;
626 if (tdm_debug_module & TDM_DEBUG_COMMIT)
627 TDM_INFO("layer commit: output(%d) wait vblank", private_output->pipe);
629 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
631 LIST_ADDTAIL(&lm->link, &private_output->layer_commit_handler_list);
634 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
635 TDM_WRN("dpms %s. Directly call vblank callback.", tdm_dpms_str(private_output->current_dpms_value));
636 _pthread_mutex_unlock(&private_display->lock);
637 _tdm_layer_cb_wait_vblank(private_output->vblank, 0, 0, 0, 0, private_output);
638 _pthread_mutex_lock(&private_display->lock);
643 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
644 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
645 * the layer commit handler MUST be called.
647 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
648 TDM_WRN("Directly call layer commit handlers: ret(%d)\n", ret);
650 _pthread_mutex_unlock(&private_display->lock);
652 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
653 _pthread_mutex_lock(&private_display->lock);
654 _tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
662 _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
663 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
665 tdm_private_layer_commit_handler *layer_commit_handler = user_data;
666 tdm_private_layer_commit_handler *lm = NULL;
667 tdm_private_display *private_display;
668 tdm_private_output *private_output = output;
669 tdm_private_layer *private_layer;
672 TDM_RETURN_IF_FAIL(layer_commit_handler != NULL);
674 private_display = private_output->private_display;
676 LIST_FOR_EACH_ENTRY(lm, &private_output->layer_commit_handler_list, link) {
677 if (layer_commit_handler == lm) {
686 LIST_DEL(&layer_commit_handler->link);
688 private_layer = layer_commit_handler->private_layer;
690 if (tdm_debug_module & TDM_DEBUG_COMMIT)
691 TDM_INFO("layer(%p) commit: output(%d) committed. handle(%p)",
692 private_layer, private_output->pipe, layer_commit_handler);
694 _pthread_mutex_lock(&private_display->lock);
696 tdm_layer_committed(private_layer, &layer_commit_handler->committed_buffer);
698 if (layer_commit_handler->func) {
699 _pthread_mutex_unlock(&private_display->lock);
700 layer_commit_handler->func(private_output, sequence,
701 tv_sec, tv_usec, layer_commit_handler->user_data);
702 _pthread_mutex_lock(&private_display->lock);
705 free(layer_commit_handler);
707 _pthread_mutex_unlock(&private_display->lock);
711 _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
712 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
714 tdm_private_output *private_output = user_data;
715 tdm_private_display *private_display;
717 TDM_RETURN_IF_FAIL(private_output != NULL);
719 private_display = private_output->private_display;
721 _pthread_mutex_lock(&private_display->lock);
723 if (tdm_debug_module & TDM_DEBUG_COMMIT)
724 TDM_INFO("layer commit: output(%d) got vblank", private_output->pipe);
726 _tdm_layer_got_output_vblank(private_output, sequence, tv_sec, tv_usec);
728 _pthread_mutex_unlock(&private_display->lock);
732 _tdm_lauer_get_output_used_layer_count(tdm_private_output *private_output)
734 tdm_private_layer *private_layer = NULL;
735 unsigned int count = 0;
737 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
738 if (!private_layer->usable)
745 /* commit_per_vblank == 1: we can commit if
746 * - there is no previous commit request
747 * - only 1 layer is used
748 * commit_per_vblank == 2: we can commit if
749 * - there is no previous commit request
752 _tdm_layer_commit_possible(tdm_private_layer *private_layer)
754 tdm_private_output *private_output = private_layer->private_output;
756 TDM_RETURN_VAL_IF_FAIL(private_output->commit_per_vblank > 0, 1);
758 /* There is a previous commit request which is not done and displayed on screen yet.
759 * We can't commit at this time.
761 if (!LIST_IS_EMPTY(&private_output->layer_commit_handler_list)) {
762 if (tdm_debug_module & TDM_DEBUG_COMMIT)
763 TDM_INFO("layer(%p) commit: not possible(previous commit)", private_layer);
767 if (private_output->commit_per_vblank == 1 && _tdm_lauer_get_output_used_layer_count(private_output) > 1) {
768 if (tdm_debug_module & TDM_DEBUG_COMMIT)
769 TDM_INFO("layer(%p) commit: not possible(more than 2 layers)", private_layer);
773 if (tdm_debug_module & TDM_DEBUG_COMMIT)
774 TDM_INFO("layer(%p) commit: possible", private_layer);
780 _tdm_layer_reset_pending_data(tdm_private_layer *private_layer)
782 private_layer->pending_info_changed = 0;
783 memset(&private_layer->pending_info, 0, sizeof private_layer->pending_info);
785 private_layer->pending_buffer_changed = 0;
786 if (private_layer->pending_buffer) {
787 tbm_surface_internal_unref(private_layer->pending_buffer);
788 private_layer->pending_buffer = NULL;
793 tdm_layer_commit_pending_data(tdm_private_layer *private_layer)
795 tdm_private_output *private_output = private_layer->private_output;
796 tdm_private_display *private_display = private_output->private_display;
797 tdm_func_layer *func_layer;
798 tdm_error ret = TDM_ERROR_NONE;
800 func_layer = &private_display->func_layer;
802 if (private_layer->pending_info_changed) {
803 ret = func_layer->layer_set_info(private_layer->layer_backend, &private_layer->pending_info);
804 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, done);
807 if (private_layer->pending_buffer_changed) {
808 tdm_private_layer_buffer *layer_buffer;
810 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
811 TDM_GOTO_IF_FAIL(layer_buffer != NULL, done);
813 LIST_INITHEAD(&layer_buffer->link);
815 ret = func_layer->layer_set_buffer(private_layer->layer_backend, private_layer->pending_buffer);
816 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
818 if (tdm_debug_module & TDM_DEBUG_BUFFER)
819 TDM_INFO("layer(%p) pending_buffer(%p) committed",
820 private_layer, private_layer->pending_buffer);
822 if (ret == TDM_ERROR_NONE) {
823 if (private_layer->waiting_buffer)
824 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
826 private_layer->waiting_buffer = layer_buffer;
827 private_layer->waiting_buffer->buffer = tdm_buffer_ref_backend(private_layer->pending_buffer);
828 if (tdm_debug_module & TDM_DEBUG_BUFFER)
829 TDM_INFO("layer(%p) waiting_buffer(%p)",
830 private_layer, private_layer->waiting_buffer->buffer);
832 _tdm_layer_free_buffer(private_layer, layer_buffer);
836 _tdm_layer_reset_pending_data(private_layer);
841 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
842 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
843 * the layer commit handler MUST be called.
846 _tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
848 tdm_private_layer_commit_handler *layer_commit_handler;
851 layer_commit_handler = calloc(1, sizeof(tdm_private_layer_commit_handler));
852 if (!layer_commit_handler) {
853 /* LCOV_EXCL_START */
854 TDM_ERR("failed: alloc memory");
855 return TDM_ERROR_OUT_OF_MEMORY;
859 if (tdm_debug_module & TDM_DEBUG_COMMIT)
860 TDM_INFO("layer(%p) commit: handle(%p)", private_layer, layer_commit_handler);
862 LIST_INITHEAD(&layer_commit_handler->link);
863 layer_commit_handler->private_layer = private_layer;
864 layer_commit_handler->func = func;
865 layer_commit_handler->user_data = user_data;
867 layer_commit_handler->committed_buffer = private_layer->waiting_buffer;
868 private_layer->waiting_buffer = NULL;
870 if (tdm_debug_module & TDM_DEBUG_BUFFER)
871 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
872 private_layer, private_layer->waiting_buffer,
873 (layer_commit_handler->committed_buffer) ? layer_commit_handler->committed_buffer->buffer : NULL);
875 if (!private_output->commit_per_vblank) {
876 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT, commit_failed);
878 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
879 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
880 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
882 if (tdm_debug_module & TDM_DEBUG_COMMIT)
883 TDM_INFO("layer(%p) commit: no commit-per-vblank", private_layer);
885 TDM_GOTO_IF_FAIL(private_output->commit_type == TDM_COMMIT_TYPE_LAYER, commit_failed);
887 if (private_layer->committing)
888 TDM_WRN("layer(%d) too many commit", private_layer->index);
890 private_layer->committing = 1;
892 if (_tdm_layer_commit_possible(private_layer)) {
893 /* add to layer_commit_handler_list */
894 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
895 ret = tdm_output_commit_internal(private_layer->private_output, 0, NULL, NULL);
896 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
898 if (tdm_debug_module & TDM_DEBUG_COMMIT)
899 TDM_INFO("layer(%p) commit: output", private_layer);
901 /* add to pending_commit_handler_list. It will be commited when a vblank occurs */
902 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->pending_commit_handler_list);
904 if (tdm_debug_module & TDM_DEBUG_COMMIT)
905 TDM_INFO("layer(%p) commit: pending", private_layer);
908 if (!private_output->vblank) {
909 /* tdm_vblank APIs is for server. it should be called in unlock status*/
910 _pthread_mutex_unlock(&private_display->lock);
911 private_output->vblank = tdm_vblank_create(private_display, private_output, NULL);
912 _pthread_mutex_lock(&private_display->lock);
913 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, commit_failed);
916 if (!private_output->layer_waiting_vblank) {
917 /* tdm_vblank APIs is for server. it should be called in unlock status*/
918 _pthread_mutex_unlock(&private_display->lock);
919 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
920 _pthread_mutex_lock(&private_display->lock);
921 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
922 private_output->layer_waiting_vblank = 1;
924 if (tdm_debug_module & TDM_DEBUG_COMMIT)
925 TDM_INFO("layer(%p) commit: wait vblank", private_layer);
932 if (layer_commit_handler) {
933 private_layer->waiting_buffer = layer_commit_handler->committed_buffer;
934 LIST_DEL(&layer_commit_handler->link);
935 free(layer_commit_handler);
941 tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
945 _pthread_mutex_lock(&private_display->lock);
947 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE) {
948 if (!private_output->commit_per_vblank)
949 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
951 private_output->commit_type = TDM_COMMIT_TYPE_LAYER;
954 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
955 TDM_ERR("layer(%p)'s output(%d) dpms: %s", layer, private_output->pipe,
956 tdm_dpms_str(private_output->current_dpms_value));
957 _pthread_mutex_unlock(&private_display->lock);
958 return TDM_ERROR_DPMS_OFF;
961 /* don't call this inside of _tdm_layer_commit */
962 ret = tdm_layer_commit_pending_data(private_layer);
963 if (ret != TDM_ERROR_NONE) {
964 TDM_ERR("layer(%p) committing pending data failed", layer);
965 _pthread_mutex_unlock(&private_display->lock);
969 ret = _tdm_layer_commit(private_layer, func, user_data);
971 _pthread_mutex_unlock(&private_display->lock);
977 tdm_layer_is_committing(tdm_layer *layer, unsigned int *committing)
981 TDM_RETURN_VAL_IF_FAIL(committing != NULL, TDM_ERROR_INVALID_PARAMETER);
983 _pthread_mutex_lock(&private_display->lock);
985 *committing = private_layer->committing;
987 _pthread_mutex_unlock(&private_display->lock);
993 tdm_layer_remove_commit_handler_internal(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
995 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
996 tdm_private_output *private_output = private_layer->private_output;
997 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
999 TDM_RETURN_IF_FAIL(private_layer != NULL);
1000 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1002 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
1003 if (lm->func == func && lm->user_data == user_data) {
1004 LIST_DEL(&lm->link);
1005 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1006 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1012 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
1013 if (lm->func == func && lm->user_data == user_data) {
1014 LIST_DEL(&lm->link);
1015 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
1016 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
1024 tdm_layer_remove_commit_handler(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
1028 _pthread_mutex_lock(&private_display->lock);
1030 tdm_layer_remove_commit_handler_internal(layer, func, user_data);
1032 _pthread_mutex_unlock(&private_display->lock);
1037 EXTERN tbm_surface_h
1038 tdm_layer_get_displaying_buffer(tdm_layer *layer, tdm_error *error)
1040 tbm_surface_h buffer;
1041 LAYER_FUNC_ENTRY_ERROR();
1043 _pthread_mutex_lock(&private_display->lock);
1046 *error = TDM_ERROR_NONE;
1048 if (private_layer->showing_buffer) {
1049 buffer = private_layer->showing_buffer->buffer;
1052 *error = TDM_ERROR_OPERATION_FAILED;
1053 _pthread_mutex_unlock(&private_display->lock);
1054 TDM_DBG("layer(%p) showing_buffer is null", private_layer);
1057 _pthread_mutex_unlock(&private_display->lock);
1063 _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data)
1065 TDM_RETURN_IF_FAIL(data != NULL);
1066 tdm_layer *layer = data;
1067 tdm_func_layer *func_layer;
1068 tbm_surface_h surface = NULL;
1069 tdm_private_layer_buffer *layer_buffer;
1070 LAYER_FUNC_ENTRY_VOID_RETURN();
1072 _pthread_mutex_lock(&private_display->lock);
1074 func_layer = &private_display->func_layer;
1075 if (!func_layer->layer_set_buffer) {
1076 /* LCOV_EXCL_START */
1077 _pthread_mutex_unlock(&private_display->lock);
1079 /* LCOV_EXCL_STOP */
1082 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
1083 if (!layer_buffer) {
1084 /* LCOV_EXCL_START */
1085 _pthread_mutex_unlock(&private_display->lock);
1086 TDM_ERR("alloc failed");
1088 /* LCOV_EXCL_STOP */
1090 LIST_INITHEAD(&layer_buffer->link);
1092 if (TBM_SURFACE_QUEUE_ERROR_NONE != tbm_surface_queue_acquire(private_layer->buffer_queue, &surface) ||
1094 /* LCOV_EXCL_START */
1095 TDM_ERR("layer(%p) tbm_surface_queue_acquire() failed surface:%p",
1096 private_layer, surface);
1097 _pthread_mutex_unlock(&private_display->lock);
1100 /* LCOV_EXCL_STOP */
1103 /* we don't need to handle pending data here because the changes in this function
1104 * should be applied immediately. we can't expect calling tdm_layer_commit.
1106 ret = func_layer->layer_set_buffer(private_layer->layer_backend, surface);
1107 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
1109 if (ret == TDM_ERROR_NONE) {
1110 if (private_layer->waiting_buffer) {
1111 TDM_DBG("layer(%p) drop waiting_buffer(%p)", private_layer, private_layer->waiting_buffer->buffer);
1112 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
1115 private_layer->waiting_buffer = layer_buffer;
1116 private_layer->waiting_buffer->buffer = tdm_buffer_ref_backend(surface);
1118 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1119 TDM_INFO("layer(%p) waiting_buffer(%p)",
1120 private_layer, private_layer->waiting_buffer->buffer);
1122 if (private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT) {
1123 ret = tdm_output_commit_internal(private_layer->private_output, 0, NULL, NULL);
1124 if (ret != TDM_ERROR_NONE)
1125 TDM_ERR("tdm_output_commit_internal() is fail");
1126 } else if (private_output->commit_type == TDM_COMMIT_TYPE_LAYER) {
1127 ret = _tdm_layer_commit(private_layer, NULL, NULL);
1128 if (ret != TDM_ERROR_NONE)
1129 TDM_ERR("layer(%p) _tdm_layer_commit() is fail", private_layer);
1131 TDM_NEVER_GET_HERE();
1134 _tdm_layer_free_buffer(private_layer, layer_buffer);
1136 _pthread_mutex_unlock(&private_display->lock);
1140 _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data)
1142 TDM_RETURN_IF_FAIL(data != NULL);
1143 tdm_layer *layer = data;
1144 LAYER_FUNC_ENTRY_VOID_RETURN();
1145 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
1147 _pthread_mutex_lock(&private_display->lock);
1149 private_layer->buffer_queue = NULL;
1151 _tdm_layer_free_all_buffers(private_layer);
1153 _pthread_mutex_unlock(&private_display->lock);
1157 tdm_layer_set_buffer_queue(tdm_layer *layer, tbm_surface_queue_h buffer_queue)
1159 tdm_func_layer *func_layer;
1162 TDM_RETURN_VAL_IF_FAIL(buffer_queue != NULL, TDM_ERROR_INVALID_PARAMETER);
1164 _pthread_mutex_lock(&private_display->lock);
1166 func_layer = &private_display->func_layer;
1168 if (private_layer->usable)
1169 TDM_INFO("layer(%p) not usable", private_layer);
1171 private_layer->usable = 0;
1173 if (!func_layer->layer_set_buffer) {
1174 /* LCOV_EXCL_START */
1175 _pthread_mutex_unlock(&private_display->lock);
1176 TDM_ERR("not implemented!!");
1177 return TDM_ERROR_NOT_IMPLEMENTED;
1178 /* LCOV_EXCL_STOP */
1181 if (buffer_queue == private_layer->buffer_queue) {
1182 _pthread_mutex_unlock(&private_display->lock);
1183 return TDM_ERROR_NONE;
1186 if (private_layer->waiting_buffer) {
1187 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
1188 private_layer->waiting_buffer = NULL;
1190 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1191 TDM_INFO("layer(%p) waiting_buffer(%p)",
1192 private_layer, private_layer->waiting_buffer);
1195 private_layer->buffer_queue = buffer_queue;
1196 tbm_surface_queue_add_acquirable_cb(private_layer->buffer_queue,
1197 _tbm_layer_queue_acquirable_cb,
1199 tbm_surface_queue_add_destroy_cb(private_layer->buffer_queue,
1200 _tbm_layer_queue_destroy_cb,
1202 _pthread_mutex_unlock(&private_display->lock);
1208 tdm_layer_unset_buffer_queue(tdm_layer *layer)
1210 return tdm_layer_unset_buffer(layer);
1214 tdm_layer_is_usable(tdm_layer *layer, unsigned int *usable)
1218 TDM_RETURN_VAL_IF_FAIL(usable != NULL, TDM_ERROR_INVALID_PARAMETER);
1220 _pthread_mutex_lock(&private_display->lock);
1222 *usable = private_layer->usable;
1224 _pthread_mutex_unlock(&private_display->lock);
1230 tdm_layer_set_video_pos(tdm_layer *layer, int zpos)
1232 tdm_func_layer *func_layer;
1235 _pthread_mutex_lock(&private_display->lock);
1237 func_layer = &private_display->func_layer;
1239 if (!(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
1240 TDM_ERR("layer(%p) is not video layer", private_layer);
1241 _pthread_mutex_unlock(&private_display->lock);
1242 return TDM_ERROR_INVALID_PARAMETER;
1245 if (!func_layer->layer_set_video_pos) {
1246 /* LCOV_EXCL_START */
1247 _pthread_mutex_unlock(&private_display->lock);
1248 TDM_ERR("not implemented!!");
1249 return TDM_ERROR_NOT_IMPLEMENTED;
1250 /* LCOV_EXCL_STOP */
1253 ret = func_layer->layer_set_video_pos(private_layer->layer_backend, zpos);
1255 _pthread_mutex_unlock(&private_display->lock);
1260 EXTERN tdm_capture *
1261 tdm_layer_create_capture(tdm_layer *layer, tdm_error *error)
1263 tdm_capture *capture = NULL;
1265 LAYER_FUNC_ENTRY_ERROR();
1267 _pthread_mutex_lock(&private_display->lock);
1269 capture = (tdm_capture *)tdm_capture_create_layer_internal(private_layer, error);
1271 _pthread_mutex_unlock(&private_display->lock);
1277 tdm_layer_get_buffer_flags(tdm_layer *layer, unsigned int *flags)
1279 tdm_func_layer *func_layer;
1281 TDM_RETURN_VAL_IF_FAIL(flags != NULL, TDM_ERROR_INVALID_PARAMETER);
1283 _pthread_mutex_lock(&private_display->lock);
1285 func_layer = &private_display->func_layer;
1287 if (!func_layer->layer_get_buffer_flags) {
1288 /* LCOV_EXCL_START */
1289 _pthread_mutex_unlock(&private_display->lock);
1290 TDM_ERR("not implemented!!");
1291 return TDM_ERROR_NOT_IMPLEMENTED;
1292 /* LCOV_EXCL_STOP */
1295 ret = func_layer->layer_get_buffer_flags(private_layer->layer_backend, flags);
1297 _pthread_mutex_unlock(&private_display->lock);