1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <sc1.lim@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
41 #include "tdm_backend.h"
42 #include "tdm_private.h"
43 #include "tdm_helper.h"
47 #define LAYER_FUNC_ENTRY() \
48 tdm_private_display *private_display; \
49 tdm_private_output *private_output; \
50 tdm_private_layer *private_layer; \
51 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
52 TDM_RETURN_VAL_IF_FAIL(layer != NULL, TDM_ERROR_INVALID_PARAMETER); \
53 private_layer = (tdm_private_layer*)layer; \
54 private_output = private_layer->private_output; \
55 private_display = private_output->private_display
57 #define LAYER_FUNC_ENTRY_ERROR() \
58 tdm_private_display *private_display; \
59 tdm_private_output *private_output; \
60 tdm_private_layer *private_layer; \
61 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
62 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(layer != NULL, TDM_ERROR_INVALID_PARAMETER, NULL); \
63 private_layer = (tdm_private_layer*)layer; \
64 private_output = private_layer->private_output; \
65 private_display = private_output->private_display
67 #define LAYER_FUNC_ENTRY_VOID_RETURN() \
68 tdm_private_display *private_display; \
69 tdm_private_output *private_output; \
70 tdm_private_layer *private_layer; \
71 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
72 TDM_RETURN_IF_FAIL(layer != NULL); \
73 private_layer = (tdm_private_layer*)layer; \
74 private_output = private_layer->private_output; \
75 private_display = private_output->private_display
77 static void _tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer);
78 static void _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
79 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
80 static void _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data);
81 static void _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data);
82 static void _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
83 unsigned int tv_sec, unsigned int tv_usec, void *user_data);
86 tdm_layer_get_capabilities(tdm_layer *layer, tdm_layer_capability *capabilities)
90 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
92 _pthread_mutex_lock(&private_display->lock);
94 *capabilities = private_layer->caps.capabilities;
96 _pthread_mutex_unlock(&private_display->lock);
102 tdm_layer_get_available_formats(tdm_layer *layer, const tbm_format **formats, int *count)
106 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
107 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
109 _pthread_mutex_lock(&private_display->lock);
111 *formats = (const tbm_format *)private_layer->caps.formats;
112 *count = private_layer->caps.format_count;
114 _pthread_mutex_unlock(&private_display->lock);
120 tdm_layer_get_available_properties(tdm_layer *layer, const tdm_prop **props, int *count)
124 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
125 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
127 _pthread_mutex_lock(&private_display->lock);
129 *props = (const tdm_prop *)private_layer->caps.props;
130 *count = private_layer->caps.prop_count;
132 _pthread_mutex_unlock(&private_display->lock);
138 tdm_layer_get_zpos(tdm_layer *layer, int *zpos)
142 TDM_RETURN_VAL_IF_FAIL(zpos != NULL, TDM_ERROR_INVALID_PARAMETER);
144 _pthread_mutex_lock(&private_display->lock);
146 *zpos = private_layer->caps.zpos;
148 _pthread_mutex_unlock(&private_display->lock);
154 tdm_layer_set_property(tdm_layer *layer, unsigned int id, tdm_value value)
156 tdm_func_layer *func_layer;
159 _pthread_mutex_lock(&private_display->lock);
161 func_layer = &private_display->func_layer;
163 if (private_layer->usable)
164 TDM_INFO("layer(%d) not usable", private_layer->index);
166 private_layer->usable = 0;
168 if (!func_layer->layer_set_property) {
169 _pthread_mutex_unlock(&private_display->lock);
170 TDM_ERR("not implemented!!");
171 return TDM_ERROR_NOT_IMPLEMENTED;
174 ret = func_layer->layer_set_property(private_layer->layer_backend, id, value);
176 _pthread_mutex_unlock(&private_display->lock);
182 tdm_layer_get_property(tdm_layer *layer, unsigned int id, tdm_value *value)
184 tdm_func_layer *func_layer;
187 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
189 _pthread_mutex_lock(&private_display->lock);
191 func_layer = &private_display->func_layer;
193 if (!func_layer->layer_get_property) {
194 _pthread_mutex_unlock(&private_display->lock);
195 TDM_ERR("not implemented!!");
196 return TDM_ERROR_NOT_IMPLEMENTED;
199 ret = func_layer->layer_get_property(private_layer->layer_backend, id, value);
201 _pthread_mutex_unlock(&private_display->lock);
207 tdm_layer_set_info(tdm_layer *layer, tdm_info_layer *info)
209 tdm_func_layer *func_layer;
214 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
216 _pthread_mutex_lock(&private_display->lock);
218 func_layer = &private_display->func_layer;
220 if (private_layer->usable)
221 TDM_INFO("layer(%p) not usable", private_layer);
223 private_layer->usable = 0;
225 if (!func_layer->layer_set_info) {
226 _pthread_mutex_unlock(&private_display->lock);
227 TDM_ERR("not implemented!!");
228 return TDM_ERROR_NOT_IMPLEMENTED;
231 if (info->src_config.format)
232 snprintf(fmtstr, 128, "%c%c%c%c", FOURCC_STR(info->src_config.format));
234 snprintf(fmtstr, 128, "NONE");
236 TDM_INFO("layer(%p) info: src(%dx%d %d,%d %dx%d %s) dst(%d,%d %dx%d) trans(%d)",
237 private_layer, info->src_config.size.h, info->src_config.size.v,
238 info->src_config.pos.x, info->src_config.pos.y,
239 info->src_config.pos.w, info->src_config.pos.h,
241 info->dst_pos.x, info->dst_pos.y,
242 info->dst_pos.w, info->dst_pos.h,
245 ret = func_layer->layer_set_info(private_layer->layer_backend, info);
246 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
248 _pthread_mutex_unlock(&private_display->lock);
254 tdm_layer_get_info(tdm_layer *layer, tdm_info_layer *info)
256 tdm_func_layer *func_layer;
259 TDM_RETURN_VAL_IF_FAIL(info != NULL, TDM_ERROR_INVALID_PARAMETER);
261 _pthread_mutex_lock(&private_display->lock);
263 func_layer = &private_display->func_layer;
265 if (!func_layer->layer_get_info) {
266 _pthread_mutex_unlock(&private_display->lock);
267 TDM_ERR("not implemented!!");
268 return TDM_ERROR_NOT_IMPLEMENTED;
271 ret = func_layer->layer_get_info(private_layer->layer_backend, info);
273 _pthread_mutex_unlock(&private_display->lock);
279 _tdm_layer_dump_buffer(tdm_layer *layer, tbm_surface_h buffer)
281 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
282 tdm_private_output *private_output = private_layer->private_output;
285 char fname[PATH_MAX];
287 pipe = private_output->pipe;
288 zpos = private_layer->caps.zpos;
290 snprintf(fname, sizeof(fname), "tdm_%d_lyr_%d", pipe, zpos);
292 tbm_surface_internal_dump_buffer(buffer, fname);
293 TDM_DBG("%s dump excute", fname);
299 _tdm_layer_free_buffer(tdm_private_layer *private_layer, tdm_private_layer_buffer *layer_buffer)
301 tdm_private_display *private_display;
306 private_display = private_layer->private_output->private_display;
308 LIST_DEL(&layer_buffer->link);
309 if (layer_buffer->buffer) {
310 _pthread_mutex_unlock(&private_display->lock);
311 tdm_buffer_unref_backend(layer_buffer->buffer);
312 if (private_layer->buffer_queue)
313 tbm_surface_queue_release(private_layer->buffer_queue, layer_buffer->buffer);
314 _pthread_mutex_lock(&private_display->lock);
320 _tdm_layer_free_all_buffers(tdm_private_layer *private_layer)
322 tdm_private_output *private_output = private_layer->private_output;
323 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
324 struct list_head clone_list;
326 LIST_INITHEAD(&clone_list);
328 if (private_layer->waiting_buffer) {
329 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
330 private_layer->waiting_buffer = NULL;
332 if (tdm_debug_module & TDM_DEBUG_BUFFER)
333 TDM_INFO("layer(%p) waiting_buffer(%p)",
334 private_layer, private_layer->waiting_buffer);
337 if (private_layer->committed_buffer) {
338 _tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
339 private_layer->committed_buffer = NULL;
341 if (tdm_debug_module & TDM_DEBUG_BUFFER)
342 TDM_INFO("layer(%p) committed_buffer(%p)",
343 private_layer, private_layer->committed_buffer);
346 if (private_layer->showing_buffer) {
347 _tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
348 private_layer->showing_buffer = NULL;
350 if (tdm_debug_module & TDM_DEBUG_BUFFER)
351 TDM_INFO("layer(%p) showing_buffer(%p)",
352 private_layer, private_layer->showing_buffer);
355 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
356 if (lm->private_layer != private_layer)
359 LIST_ADDTAIL(&lm->link, &clone_list);
362 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
364 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
365 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
369 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
370 if (lm->private_layer != private_layer)
373 LIST_ADDTAIL(&lm->link, &clone_list);
376 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
378 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
382 if (private_layer->buffer_queue) {
383 tbm_surface_queue_remove_acquirable_cb(private_layer->buffer_queue, _tbm_layer_queue_acquirable_cb, private_layer);
384 tbm_surface_queue_remove_destroy_cb(private_layer->buffer_queue, _tbm_layer_queue_destroy_cb, private_layer);
385 private_layer->buffer_queue = NULL;
390 tdm_layer_set_buffer(tdm_layer *layer, tbm_surface_h buffer)
392 tdm_func_layer *func_layer;
393 tdm_private_layer_buffer *layer_buffer;
397 TDM_RETURN_VAL_IF_FAIL(buffer != NULL, TDM_ERROR_INVALID_PARAMETER);
399 _pthread_mutex_lock(&private_display->lock);
401 if (tdm_debug_dump & TDM_DUMP_FLAG_LAYER &&
402 !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
403 char str[TDM_PATH_LEN];
405 snprintf(str, TDM_PATH_LEN, "layer_%d_%d_%03d",
406 private_output->index, private_layer->index, i++);
407 tdm_helper_dump_buffer_str(buffer, tdm_debug_dump_dir, str);
410 func_layer = &private_display->func_layer;
412 if (private_layer->usable)
413 TDM_INFO("layer(%p) not usable", private_layer);
415 private_layer->usable = 0;
417 if (!func_layer->layer_set_buffer) {
418 _pthread_mutex_unlock(&private_display->lock);
419 TDM_ERR("not implemented!!");
420 return TDM_ERROR_NOT_IMPLEMENTED;
423 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
425 _pthread_mutex_unlock(&private_display->lock);
426 TDM_ERR("alloc failed");
427 return TDM_ERROR_OUT_OF_MEMORY;
429 LIST_INITHEAD(&layer_buffer->link);
431 ret = func_layer->layer_set_buffer(private_layer->layer_backend, buffer);
432 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
435 if (tdm_dump_enable && !(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO))
436 _tdm_layer_dump_buffer(layer, buffer);
438 if (ret == TDM_ERROR_NONE) {
439 if (private_layer->waiting_buffer)
440 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
442 private_layer->waiting_buffer = layer_buffer;
443 private_layer->waiting_buffer->buffer = tdm_buffer_ref_backend(buffer);
444 if (tdm_debug_module & TDM_DEBUG_BUFFER)
445 TDM_INFO("layer(%p) waiting_buffer(%p)",
446 private_layer, private_layer->waiting_buffer->buffer);
448 _tdm_layer_free_buffer(private_layer, layer_buffer);
450 _pthread_mutex_unlock(&private_display->lock);
456 tdm_layer_unset_buffer(tdm_layer *layer)
458 tdm_func_layer *func_layer;
461 _pthread_mutex_lock(&private_display->lock);
463 func_layer = &private_display->func_layer;
465 _tdm_layer_free_all_buffers(private_layer);
467 private_layer->usable = 1;
469 if (private_layer->usable)
470 TDM_INFO("layer(%p) now usable", private_layer);
472 if (!func_layer->layer_unset_buffer) {
473 _pthread_mutex_unlock(&private_display->lock);
474 TDM_ERR("not implemented!!");
475 return TDM_ERROR_NOT_IMPLEMENTED;
478 ret = func_layer->layer_unset_buffer(private_layer->layer_backend);
479 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
481 _pthread_mutex_unlock(&private_display->lock);
487 tdm_layer_committed(tdm_private_layer *private_layer, tdm_private_layer_buffer **committed_buffer)
489 tdm_private_output *private_output = private_layer->private_output;
490 tdm_private_display *private_display = private_output->private_display;
492 if (private_display->print_fps) {
493 double curr = tdm_helper_get_time();
494 if (private_layer->fps_stamp == 0) {
495 private_layer->fps_stamp = curr;
496 } else if ((curr - private_layer->fps_stamp) > 1.0) {
497 TDM_INFO("output(%d) layer(%d) fps: %d", private_output->index, private_layer->index, private_layer->fps_count);
498 private_layer->fps_count = 0;
499 private_layer->fps_stamp = curr;
501 private_layer->fps_count++;
502 } else if (private_layer->fps_stamp != 0) {
503 private_layer->fps_stamp = 0;
504 private_layer->fps_count = 0;
507 if (private_layer->showing_buffer)
508 _tdm_layer_free_buffer(private_layer, private_layer->showing_buffer);
510 private_layer->showing_buffer = *committed_buffer;
511 *committed_buffer = NULL;
513 if (tdm_debug_module & TDM_DEBUG_BUFFER)
514 TDM_INFO("layer(%p) committed_buffer(%p) showing_buffer(%p)",
515 private_layer, *committed_buffer,
516 (private_layer->showing_buffer) ? private_layer->showing_buffer->buffer : NULL);
520 _tdm_layer_got_output_vblank(tdm_private_output *private_output, unsigned int sequence,
521 unsigned int tv_sec, unsigned int tv_usec)
523 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
524 tdm_private_display *private_display;
525 struct list_head clone_list, pending_clone_list;
526 tdm_error ret = TDM_ERROR_NONE;
528 private_display = private_output->private_display;
530 private_output->layer_waiting_vblank = 0;
532 LIST_INITHEAD(&clone_list);
533 LIST_INITHEAD(&pending_clone_list);
535 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
537 lm->private_layer->committing = 0;
538 LIST_ADDTAIL(&lm->link, &clone_list);
541 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
543 lm->private_layer->committing = 0;
544 LIST_ADDTAIL(&lm->link, &pending_clone_list);
547 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &clone_list, link) {
548 if (tdm_debug_module & TDM_DEBUG_COMMIT)
549 TDM_INFO("layer(%p) committed. handle(%p) commited_buffer(%p)",
550 lm->private_layer, lm, (lm->committed_buffer) ? lm->committed_buffer->buffer : NULL);
553 tdm_layer_committed(lm->private_layer, &lm->committed_buffer);
554 _pthread_mutex_unlock(&private_display->lock);
556 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
557 _pthread_mutex_lock(&private_display->lock);
558 if (lm->committed_buffer)
559 _tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
563 if (LIST_IS_EMPTY(&pending_clone_list))
566 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, wait_failed);
568 ret = tdm_output_commit_internal(private_output, 0, NULL, NULL);
569 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
571 if (tdm_debug_module & TDM_DEBUG_COMMIT)
572 TDM_INFO("layer commit: output(%d) commit", private_output->pipe);
574 if (private_output->current_dpms_value == TDM_OUTPUT_DPMS_ON) {
575 /* tdm_vblank APIs is for server. it should be called in unlock status*/
576 if (!private_output->layer_waiting_vblank) {
577 _pthread_mutex_unlock(&private_display->lock);
578 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
579 _pthread_mutex_lock(&private_display->lock);
580 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
581 private_output->layer_waiting_vblank = 1;
585 if (tdm_debug_module & TDM_DEBUG_COMMIT)
586 TDM_INFO("layer commit: output(%d) wait vblank", private_output->pipe);
588 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
590 LIST_ADDTAIL(&lm->link, &private_output->layer_commit_handler_list);
593 if (private_output->current_dpms_value != TDM_OUTPUT_DPMS_ON) {
594 TDM_WRN("TDM_OUTPUT_DPMS_OFF. Directly call vblank callback.");
595 _pthread_mutex_unlock(&private_display->lock);
596 _tdm_layer_cb_wait_vblank(private_output->vblank, 0, 0, 0, 0, private_output);
597 _pthread_mutex_lock(&private_display->lock);
602 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
603 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
604 * the layer commit handler MUST be called.
606 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &pending_clone_list, link) {
607 TDM_WRN("Directly call layer commit handlers: ret(%d)\n", ret);
609 _pthread_mutex_unlock(&private_display->lock);
611 lm->func(lm->private_layer, sequence, tv_sec, tv_usec, lm->user_data);
612 _pthread_mutex_lock(&private_display->lock);
613 _tdm_layer_free_buffer(lm->private_layer, lm->committed_buffer);
621 _tdm_layer_cb_output_commit(tdm_output *output, unsigned int sequence,
622 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
624 tdm_private_layer_commit_handler *layer_commit_handler = user_data;
625 tdm_private_layer_commit_handler *lm = NULL;
626 tdm_private_display *private_display;
627 tdm_private_output *private_output = output;
628 tdm_private_layer *private_layer;
631 TDM_RETURN_IF_FAIL(layer_commit_handler != NULL);
633 private_display = private_output->private_display;
635 LIST_FOR_EACH_ENTRY(lm, &private_output->layer_commit_handler_list, link) {
636 if (layer_commit_handler == lm) {
645 private_layer = layer_commit_handler->private_layer;
647 if (tdm_debug_module & TDM_DEBUG_COMMIT)
648 TDM_INFO("layer(%p) commit: output(%d) committed. handle(%p)",
649 private_layer, private_output->pipe, layer_commit_handler);
651 _pthread_mutex_lock(&private_display->lock);
653 tdm_layer_committed(private_layer, &layer_commit_handler->committed_buffer);
655 if (layer_commit_handler->func) {
656 _pthread_mutex_unlock(&private_display->lock);
657 layer_commit_handler->func(private_output, sequence,
658 tv_sec, tv_usec, layer_commit_handler->user_data);
659 _pthread_mutex_lock(&private_display->lock);
662 LIST_DEL(&layer_commit_handler->link);
663 free(layer_commit_handler);
665 _pthread_mutex_unlock(&private_display->lock);
669 _tdm_layer_cb_wait_vblank(tdm_vblank *vblank, tdm_error error, unsigned int sequence,
670 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
672 tdm_private_output *private_output = user_data;
673 tdm_private_display *private_display;
675 TDM_RETURN_IF_FAIL(private_output != NULL);
677 private_display = private_output->private_display;
679 _pthread_mutex_lock(&private_display->lock);
681 if (tdm_debug_module & TDM_DEBUG_COMMIT)
682 TDM_INFO("layer commit: output(%d) got vblank", private_output->pipe);
684 _tdm_layer_got_output_vblank(private_output, sequence, tv_sec, tv_usec);
686 _pthread_mutex_unlock(&private_display->lock);
690 _tdm_lauer_get_output_used_layer_count(tdm_private_output *private_output)
692 tdm_private_layer *private_layer = NULL;
693 unsigned int count = 0;
695 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
696 if (!private_layer->usable)
703 /* commit_per_vblank == 1: we can commit if
704 * - there is no previous commit request
705 * - only 1 layer is used
706 * commit_per_vblank == 2: we can commit if
707 * - there is no previous commit request
710 _tdm_layer_commit_possible(tdm_private_layer *private_layer)
712 tdm_private_output *private_output = private_layer->private_output;
713 tdm_private_display *private_display = private_output->private_display;
715 TDM_RETURN_VAL_IF_FAIL(!(private_display->commit_per_vblank > 0), 1);
717 /* There is a previous commit request which is not done and displayed on screen yet.
718 * We can't commit at this time.
720 if (!LIST_IS_EMPTY(&private_output->layer_commit_handler_list)) {
721 if (tdm_debug_module & TDM_DEBUG_COMMIT)
722 TDM_INFO("layer(%p) commit: not possible(previous commit)", private_layer);
726 if (private_display->commit_per_vblank == 1 && _tdm_lauer_get_output_used_layer_count(private_output) > 1) {
727 if (tdm_debug_module & TDM_DEBUG_COMMIT)
728 TDM_INFO("layer(%p) commit: not possible(more than 2 layers)", private_layer);
732 TDM_INFO("layer(%p) commit: possible", private_layer);
737 /* CAUTION: Once _tdm_layer_commit returns success, the layer commit handler MUST be called always.
738 * That is, even if we get error in _tdm_layer_got_output_vblank() function for some reasons,
739 * the layer commit handler MUST be called.
742 _tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
744 tdm_private_layer_commit_handler *layer_commit_handler;
747 layer_commit_handler = calloc(1, sizeof(tdm_private_layer_commit_handler));
748 if (!layer_commit_handler) {
749 TDM_ERR("failed: alloc memory");
750 return TDM_ERROR_OUT_OF_MEMORY;
753 if (tdm_debug_module & TDM_DEBUG_COMMIT)
754 TDM_INFO("layer(%p) commit: handle(%p)", private_layer, layer_commit_handler);
756 LIST_INITHEAD(&layer_commit_handler->link);
757 layer_commit_handler->private_layer = private_layer;
758 layer_commit_handler->func = func;
759 layer_commit_handler->user_data = user_data;
761 layer_commit_handler->committed_buffer = private_layer->waiting_buffer;
762 private_layer->waiting_buffer = NULL;
764 if (tdm_debug_module & TDM_DEBUG_BUFFER)
765 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
766 private_layer, private_layer->waiting_buffer,
767 (layer_commit_handler->committed_buffer) ? layer_commit_handler->committed_buffer->buffer : NULL);
769 if (!private_display->commit_per_vblank) {
770 TDM_GOTO_IF_FAIL(private_display->commit_type == TDM_COMMIT_TYPE_OUTPUT, commit_failed);
772 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
773 ret = tdm_output_commit_internal(private_layer->private_output, 0, _tdm_layer_cb_output_commit, layer_commit_handler);
774 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
776 if (tdm_debug_module & TDM_DEBUG_COMMIT)
777 TDM_INFO("layer(%p) commit: no commit-per-vblank", private_layer);
779 TDM_GOTO_IF_FAIL(private_display->commit_type == TDM_COMMIT_TYPE_LAYER, commit_failed);
781 if (private_layer->committing)
782 TDM_WRN("layer(%d) too many commit", private_layer->index);
784 private_layer->committing = 1;
786 if (_tdm_layer_commit_possible(private_layer)) {
787 /* add to layer_commit_handler_list */
788 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->layer_commit_handler_list);
789 ret = tdm_output_commit_internal(private_layer->private_output, 0, NULL, NULL);
790 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
792 if (tdm_debug_module & TDM_DEBUG_COMMIT)
793 TDM_INFO("layer(%p) commit: output", private_layer);
795 /* add to pending_commit_handler_list. It will be commited when a vblank occurs */
796 LIST_ADDTAIL(&layer_commit_handler->link, &private_output->pending_commit_handler_list);
798 if (tdm_debug_module & TDM_DEBUG_COMMIT)
799 TDM_INFO("layer(%p) commit: pending", private_layer);
802 if (!private_output->vblank) {
803 /* tdm_vblank APIs is for server. it should be called in unlock status*/
804 _pthread_mutex_unlock(&private_display->lock);
805 private_output->vblank = tdm_vblank_create(private_display, private_output, NULL);
806 _pthread_mutex_lock(&private_display->lock);
807 TDM_GOTO_IF_FAIL(private_output->vblank != NULL, commit_failed);
810 if (!private_output->layer_waiting_vblank) {
811 /* tdm_vblank APIs is for server. it should be called in unlock status*/
812 _pthread_mutex_unlock(&private_display->lock);
813 ret = tdm_vblank_wait(private_output->vblank, 0, 0, 1, _tdm_layer_cb_wait_vblank, private_output);
814 _pthread_mutex_lock(&private_display->lock);
815 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
816 private_output->layer_waiting_vblank = 1;
818 if (tdm_debug_module & TDM_DEBUG_COMMIT)
819 TDM_INFO("layer(%p) commit: wait vblank", private_layer);
826 if (layer_commit_handler) {
827 private_layer->waiting_buffer = layer_commit_handler->committed_buffer;
828 LIST_DEL(&layer_commit_handler->link);
829 free(layer_commit_handler);
835 tdm_layer_commit(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
839 _pthread_mutex_lock(&private_display->lock);
841 if (private_display->commit_type == TDM_COMMIT_TYPE_NONE) {
842 if (!private_display->commit_per_vblank)
843 private_display->commit_type = TDM_COMMIT_TYPE_OUTPUT;
845 private_display->commit_type = TDM_COMMIT_TYPE_LAYER;
848 if (private_output->current_dpms_value > TDM_OUTPUT_DPMS_ON) {
849 TDM_ERR("layer(%p)'s output(%d) dpms: %s", layer, private_output->pipe,
850 tdm_dpms_str(private_output->current_dpms_value));
851 _pthread_mutex_unlock(&private_display->lock);
852 return TDM_ERROR_DPMS_OFF;
855 ret = _tdm_layer_commit(private_layer, func, user_data);
857 _pthread_mutex_unlock(&private_display->lock);
863 tdm_layer_is_committing(tdm_layer *layer, unsigned int *committing)
867 TDM_RETURN_VAL_IF_FAIL(committing != NULL, TDM_ERROR_INVALID_PARAMETER);
869 _pthread_mutex_lock(&private_display->lock);
871 *committing = private_layer->committing;
873 _pthread_mutex_unlock(&private_display->lock);
879 tdm_layer_remove_commit_handler_internal(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
881 tdm_private_layer *private_layer = (tdm_private_layer*)layer;
882 tdm_private_output *private_output = private_layer->private_output;
883 tdm_private_layer_commit_handler *lm = NULL, *lmm = NULL;
885 TDM_RETURN_IF_FAIL(private_layer != NULL);
886 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
888 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->layer_commit_handler_list, link) {
889 if (lm->func == func && lm->user_data == user_data) {
891 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
892 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
898 LIST_FOR_EACH_ENTRY_SAFE(lm, lmm, &private_output->pending_commit_handler_list, link) {
899 if (lm->func == func && lm->user_data == user_data) {
901 tdm_output_remove_commit_handler_internal(private_output, _tdm_layer_cb_output_commit, lm);
902 _tdm_layer_free_buffer(private_layer, lm->committed_buffer);
910 tdm_layer_remove_commit_handler(tdm_layer *layer, tdm_layer_commit_handler func, void *user_data)
914 _pthread_mutex_lock(&private_display->lock);
916 tdm_layer_remove_commit_handler_internal(layer, func, user_data);
918 _pthread_mutex_unlock(&private_display->lock);
924 tdm_layer_get_displaying_buffer(tdm_layer *layer, tdm_error *error)
926 tbm_surface_h buffer;
927 LAYER_FUNC_ENTRY_ERROR();
929 _pthread_mutex_lock(&private_display->lock);
932 *error = TDM_ERROR_NONE;
934 if (private_layer->showing_buffer) {
935 buffer = private_layer->showing_buffer->buffer;
938 *error = TDM_ERROR_OPERATION_FAILED;
939 _pthread_mutex_unlock(&private_display->lock);
940 TDM_DBG("layer(%p) showing_buffer is null", private_layer);
943 _pthread_mutex_unlock(&private_display->lock);
949 _tbm_layer_queue_acquirable_cb(tbm_surface_queue_h surface_queue, void *data)
951 TDM_RETURN_IF_FAIL(data != NULL);
952 tdm_layer *layer = data;
953 tdm_func_layer *func_layer;
954 tbm_surface_h surface = NULL;
955 tdm_private_layer_buffer *layer_buffer;
956 LAYER_FUNC_ENTRY_VOID_RETURN();
958 _pthread_mutex_lock(&private_display->lock);
960 func_layer = &private_display->func_layer;
961 if (!func_layer->layer_set_buffer) {
962 _pthread_mutex_unlock(&private_display->lock);
966 layer_buffer = calloc(1, sizeof(tdm_private_layer_buffer));
968 _pthread_mutex_unlock(&private_display->lock);
969 TDM_ERR("alloc failed");
972 LIST_INITHEAD(&layer_buffer->link);
974 if (TBM_SURFACE_QUEUE_ERROR_NONE != tbm_surface_queue_acquire(private_layer->buffer_queue, &surface) ||
976 TDM_ERR("layer(%p) tbm_surface_queue_acquire() failed surface:%p",
977 private_layer, surface);
978 _pthread_mutex_unlock(&private_display->lock);
983 ret = func_layer->layer_set_buffer(private_layer->layer_backend, surface);
984 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
986 if (ret == TDM_ERROR_NONE) {
987 if (private_layer->waiting_buffer) {
988 TDM_DBG("layer(%p) drop waiting_buffer(%p)", private_layer, private_layer->waiting_buffer->buffer);
989 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
992 private_layer->waiting_buffer = layer_buffer;
993 private_layer->waiting_buffer->buffer = tdm_buffer_ref_backend(surface);
995 if (tdm_debug_module & TDM_DEBUG_BUFFER)
996 TDM_INFO("layer(%p) waiting_buffer(%p)",
997 private_layer, private_layer->waiting_buffer->buffer);
999 if (private_display->commit_type == TDM_COMMIT_TYPE_OUTPUT) {
1000 ret = tdm_output_commit_internal(private_layer->private_output, 0, NULL, NULL);
1001 if (ret != TDM_ERROR_NONE)
1002 TDM_ERR("tdm_output_commit_internal() is fail");
1003 } else if (private_display->commit_type == TDM_COMMIT_TYPE_LAYER) {
1004 ret = _tdm_layer_commit(private_layer, NULL, NULL);
1005 if (ret != TDM_ERROR_NONE)
1006 TDM_ERR("layer(%p) _tdm_layer_commit() is fail", private_layer);
1008 TDM_NEVER_GET_HERE();
1011 _tdm_layer_free_buffer(private_layer, layer_buffer);
1013 _pthread_mutex_unlock(&private_display->lock);
1017 _tbm_layer_queue_destroy_cb(tbm_surface_queue_h surface_queue, void *data)
1019 TDM_RETURN_IF_FAIL(data != NULL);
1020 tdm_layer *layer = data;
1021 LAYER_FUNC_ENTRY_VOID_RETURN();
1022 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
1024 _pthread_mutex_lock(&private_display->lock);
1026 private_layer->buffer_queue = NULL;
1028 _tdm_layer_free_all_buffers(private_layer);
1030 _pthread_mutex_unlock(&private_display->lock);
1034 tdm_layer_set_buffer_queue(tdm_layer *layer, tbm_surface_queue_h buffer_queue)
1036 tdm_func_layer *func_layer;
1039 TDM_RETURN_VAL_IF_FAIL(buffer_queue != NULL, TDM_ERROR_INVALID_PARAMETER);
1041 _pthread_mutex_lock(&private_display->lock);
1043 func_layer = &private_display->func_layer;
1045 if (private_layer->usable)
1046 TDM_INFO("layer(%p) not usable", private_layer);
1048 private_layer->usable = 0;
1050 if (!func_layer->layer_set_buffer) {
1051 _pthread_mutex_unlock(&private_display->lock);
1052 TDM_ERR("not implemented!!");
1053 return TDM_ERROR_NOT_IMPLEMENTED;
1056 if (buffer_queue == private_layer->buffer_queue) {
1057 _pthread_mutex_unlock(&private_display->lock);
1058 return TDM_ERROR_NONE;
1061 if (private_layer->waiting_buffer) {
1062 _tdm_layer_free_buffer(private_layer, private_layer->waiting_buffer);
1063 private_layer->waiting_buffer = NULL;
1065 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1066 TDM_INFO("layer(%p) waiting_buffer(%p)",
1067 private_layer, private_layer->waiting_buffer);
1070 private_layer->buffer_queue = buffer_queue;
1071 tbm_surface_queue_add_acquirable_cb(private_layer->buffer_queue,
1072 _tbm_layer_queue_acquirable_cb,
1074 tbm_surface_queue_add_destroy_cb(private_layer->buffer_queue,
1075 _tbm_layer_queue_destroy_cb,
1077 _pthread_mutex_unlock(&private_display->lock);
1083 tdm_layer_unset_buffer_queue(tdm_layer *layer)
1085 return tdm_layer_unset_buffer(layer);
1089 tdm_layer_is_usable(tdm_layer *layer, unsigned int *usable)
1093 TDM_RETURN_VAL_IF_FAIL(usable != NULL, TDM_ERROR_INVALID_PARAMETER);
1095 _pthread_mutex_lock(&private_display->lock);
1097 *usable = private_layer->usable;
1099 _pthread_mutex_unlock(&private_display->lock);
1105 tdm_layer_set_video_pos(tdm_layer *layer, int zpos)
1107 tdm_func_layer *func_layer;
1110 _pthread_mutex_lock(&private_display->lock);
1112 func_layer = &private_display->func_layer;
1114 if (!(private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_VIDEO)) {
1115 TDM_ERR("layer(%p) is not video layer", private_layer);
1116 _pthread_mutex_unlock(&private_display->lock);
1117 return TDM_ERROR_INVALID_PARAMETER;
1120 if (!func_layer->layer_set_video_pos) {
1121 _pthread_mutex_unlock(&private_display->lock);
1122 TDM_ERR("not implemented!!");
1123 return TDM_ERROR_NOT_IMPLEMENTED;
1126 ret = func_layer->layer_set_video_pos(private_layer->layer_backend, zpos);
1128 _pthread_mutex_unlock(&private_display->lock);
1133 EXTERN tdm_capture *
1134 tdm_layer_create_capture(tdm_layer *layer, tdm_error *error)
1136 tdm_capture *capture = NULL;
1138 LAYER_FUNC_ENTRY_ERROR();
1140 _pthread_mutex_lock(&private_display->lock);
1142 capture = (tdm_capture *)tdm_capture_create_layer_internal(private_layer, error);
1144 _pthread_mutex_unlock(&private_display->lock);
1150 tdm_layer_get_buffer_flags(tdm_layer *layer, unsigned int *flags)
1152 tdm_func_layer *func_layer;
1155 _pthread_mutex_lock(&private_display->lock);
1157 func_layer = &private_display->func_layer;
1159 if (!func_layer->layer_get_buffer_flags) {
1160 _pthread_mutex_unlock(&private_display->lock);
1161 TDM_ERR("not implemented!!");
1162 return TDM_ERROR_NOT_IMPLEMENTED;
1165 ret = func_layer->layer_get_buffer_flags(private_layer->layer_backend, flags);
1167 _pthread_mutex_unlock(&private_display->lock);