1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <sc1.lim@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define OUTPUT_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
48 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER); \
49 private_output = (tdm_private_output*)output; \
50 private_display = private_output->private_display
52 #define OUTPUT_FUNC_ENTRY_ERROR() \
53 tdm_private_display *private_display; \
54 tdm_private_output *private_output; \
55 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
56 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER, NULL); \
57 private_output = (tdm_private_output*)output; \
58 private_display = private_output->private_display
61 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay);
64 _tdm_output_vblank_timeout_cb(void *user_data)
66 tdm_private_output *private_output = user_data;
67 tdm_private_output_vblank_handler *v = NULL;
69 TDM_RETURN_VAL_IF_FAIL(private_output != NULL, TDM_ERROR_OPERATION_FAILED);
71 TDM_ERR("TDM output(%d) vblank TIMEOUT!!", private_output->pipe);
73 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
74 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
75 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
78 return TDM_ERROR_NONE;
82 tdm_output_vblank_print_wait_information(tdm_private_output *private_output, void *user_data)
84 tdm_private_output_vblank_handler *v = NULL;
86 TDM_RETURN_IF_FAIL(private_output != NULL);
87 TDM_RETURN_IF_FAIL(user_data != NULL);
89 TDM_ERR("TDM output(%d) vblank user_data(%p) info!!", private_output->pipe, user_data);
91 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
92 if (v->user_data != user_data)
94 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
95 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
100 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay)
104 if (!private_output->vblank_timeout_timer) {
105 private_output->vblank_timeout_timer =
106 tdm_event_loop_add_timer_handler(private_output->private_display,
107 _tdm_output_vblank_timeout_cb,
110 if (!private_output->vblank_timeout_timer) {
111 TDM_ERR("output(%d) couldn't add timer", private_output->pipe);
114 TDM_INFO("output(%d) create vblank timeout timer", private_output->pipe);
117 ret = tdm_event_loop_source_timer_update(private_output->vblank_timeout_timer, ms_delay);
118 if (ret != TDM_ERROR_NONE) {
119 TDM_ERR("output(%d) couldn't update timer", private_output->pipe);
124 static tdm_private_hwc_window *
125 _tdm_output_find_private_hwc_window(tdm_private_output *private_output,
126 tdm_hwc_window *hwc_window_backend)
128 tdm_private_hwc_window *private_hwc_window = NULL;
130 LIST_FOR_EACH_ENTRY(private_hwc_window, &private_output->hwc_window_list, link) {
131 if (private_hwc_window->hwc_window_backend == hwc_window_backend)
132 return private_hwc_window;
139 tdm_output_init(tdm_private_display *private_display)
141 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_COMMIT, tdm_display_find_output_stamp);
142 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_VBLANK, tdm_display_find_output_stamp);
143 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_CHANGE, tdm_display_find_output_stamp);
145 return TDM_ERROR_NONE;
149 tdm_output_get_model_info(tdm_output *output, const char **maker,
150 const char **model, const char **name)
154 _pthread_mutex_lock(&private_display->lock);
157 *maker = private_output->caps.maker;
159 *model = private_output->caps.model;
161 *name = private_output->caps.name;
163 _pthread_mutex_unlock(&private_display->lock);
169 tdm_output_get_capabilities(tdm_output *output, tdm_output_capability *capabilities)
173 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
175 _pthread_mutex_lock(&private_display->lock);
177 *capabilities = private_output->caps.capabilities;
179 _pthread_mutex_unlock(&private_display->lock);
185 tdm_output_get_conn_status(tdm_output *output, tdm_output_conn_status *status)
189 TDM_RETURN_VAL_IF_FAIL(status != NULL, TDM_ERROR_INVALID_PARAMETER);
191 _pthread_mutex_lock(&private_display->lock);
193 *status = private_output->caps.status;
195 _pthread_mutex_unlock(&private_display->lock);
200 /* LCOV_EXCL_START */
202 _tdm_output_update(tdm_output *output_backend, void *user_data)
204 tdm_private_display *private_display;
205 tdm_private_output *private_output = user_data;
208 TDM_RETURN_IF_FAIL(private_output);
210 private_display = private_output->private_display;
212 ret = tdm_display_update_output(private_display, output_backend, private_output->pipe);
213 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
218 tdm_output_thread_cb_change(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
220 tdm_private_output *private_output = object;
221 tdm_thread_cb_output_change *output_change = (tdm_thread_cb_output_change *)cb_base;
222 tdm_private_output_change_handler *change_handler = user_data;
224 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
226 assert(change_handler->owner_tid == syscall(SYS_gettid));
228 _pthread_mutex_unlock(&private_display->lock);
229 change_handler->func(private_output, output_change->type, output_change->value, change_handler->user_data);
230 _pthread_mutex_lock(&private_display->lock);
234 _tdm_output_call_thread_cb_change(tdm_private_output *private_output, tdm_output_change_type type, tdm_value value)
236 tdm_thread_cb_output_change output_change;
239 memset(&output_change, 0, sizeof output_change);
240 output_change.base.type = TDM_THREAD_CB_OUTPUT_CHANGE;
241 output_change.base.length = sizeof output_change;
242 output_change.base.object_stamp = private_output->stamp;
243 output_change.base.data = NULL;
244 output_change.base.sync = 1;
245 output_change.type = type;
246 output_change.value = value;
248 ret = tdm_thread_cb_call(private_output, &output_change.base);
249 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
251 return TDM_ERROR_NONE;
255 tdm_output_cb_status(tdm_output *output_backend, tdm_output_conn_status status, void *user_data)
257 tdm_private_output *private_output = user_data;
261 TDM_INFO("output(%d) main %s", private_output->pipe, tdm_status_str(status));
263 _tdm_output_update(output_backend, user_data);
267 ret = _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_CONNECTION, value);
268 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
272 tdm_output_cb_dpms(tdm_output *output_backend, tdm_output_dpms dpms, void *user_data)
274 tdm_private_output *private_output = user_data;
278 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(dpms));
280 private_output->current_dpms_value = dpms;
281 private_output->waiting_dpms_change = 0;
282 TDM_INFO("output(%d) dpms async '%s' done", private_output->pipe, tdm_dpms_str(dpms));
286 ret = _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_DPMS, value);
287 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
291 tdm_output_add_change_handler(tdm_output *output,
292 tdm_output_change_handler func,
295 tdm_private_output_change_handler *change_handler;
298 TDM_RETURN_VAL_IF_FAIL(func != NULL, TDM_ERROR_INVALID_PARAMETER);
300 _pthread_mutex_lock(&private_display->lock);
302 change_handler = calloc(1, sizeof(tdm_private_output_change_handler));
303 if (!change_handler) {
304 /* LCOV_EXCL_START */
305 TDM_ERR("failed: alloc memory");
306 _pthread_mutex_unlock(&private_display->lock);
307 return TDM_ERROR_OUT_OF_MEMORY;
311 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_CHANGE, NULL, tdm_output_thread_cb_change, change_handler);
312 if (ret != TDM_ERROR_NONE) {
313 /* LCOV_EXCL_START */
314 TDM_ERR("tdm_thread_cb_add failed");
315 free(change_handler);
316 _pthread_mutex_unlock(&private_display->lock);
317 return TDM_ERROR_OPERATION_FAILED;
321 change_handler->private_output = private_output;
322 change_handler->func = func;
323 change_handler->user_data = user_data;
324 change_handler->owner_tid = syscall(SYS_gettid);
326 LIST_ADDTAIL(&change_handler->link, &private_output->change_handler_list);
328 _pthread_mutex_unlock(&private_display->lock);
334 tdm_output_remove_change_handler(tdm_output *output,
335 tdm_output_change_handler func,
338 tdm_private_display *private_display;
339 tdm_private_output *private_output;
340 tdm_private_output_change_handler *change_handler = NULL, *hh = NULL;
342 TDM_RETURN_IF_FAIL(tdm_output_is_valid(output));
343 TDM_RETURN_IF_FAIL(func != NULL);
345 private_output = (tdm_private_output*)output;
346 private_display = private_output->private_display;
348 _pthread_mutex_lock(&private_display->lock);
350 LIST_FOR_EACH_ENTRY_SAFE(change_handler, hh, &private_output->change_handler_list, link) {
351 if (change_handler->func != func || change_handler->user_data != user_data)
354 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_CHANGE, NULL, tdm_output_thread_cb_change, change_handler);
356 LIST_DEL(&change_handler->link);
357 free(change_handler);
359 _pthread_mutex_unlock(&private_display->lock);
364 _pthread_mutex_unlock(&private_display->lock);
368 tdm_output_get_output_type(tdm_output *output, tdm_output_type *type)
372 TDM_RETURN_VAL_IF_FAIL(type != NULL, TDM_ERROR_INVALID_PARAMETER);
374 _pthread_mutex_lock(&private_display->lock);
376 *type = private_output->caps.type;
378 _pthread_mutex_unlock(&private_display->lock);
384 tdm_output_get_layer_count(tdm_output *output, int *count)
386 tdm_private_layer *private_layer = NULL;
390 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
392 _pthread_mutex_lock(&private_display->lock);
394 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
395 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
397 _pthread_mutex_unlock(&private_display->lock);
398 return TDM_ERROR_BAD_REQUEST;
402 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link)
405 _pthread_mutex_unlock(&private_display->lock);
406 return TDM_ERROR_NONE;
409 _pthread_mutex_unlock(&private_display->lock);
416 tdm_output_get_layer(tdm_output *output, int index, tdm_error *error)
418 tdm_private_layer *private_layer = NULL;
420 OUTPUT_FUNC_ENTRY_ERROR();
422 _pthread_mutex_lock(&private_display->lock);
425 *error = TDM_ERROR_NONE;
427 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
428 _pthread_mutex_unlock(&private_display->lock);
429 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
431 *error = TDM_ERROR_BAD_REQUEST;
435 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
436 if (private_layer->index == index) {
437 _pthread_mutex_unlock(&private_display->lock);
438 return private_layer;
442 _pthread_mutex_unlock(&private_display->lock);
448 tdm_output_get_available_properties(tdm_output *output, const tdm_prop **props,
453 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
454 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
456 _pthread_mutex_lock(&private_display->lock);
458 *props = (const tdm_prop *)private_output->caps.props;
459 *count = private_output->caps.prop_count;
461 _pthread_mutex_unlock(&private_display->lock);
467 tdm_output_get_available_modes(tdm_output *output,
468 const tdm_output_mode **modes, int *count)
472 TDM_RETURN_VAL_IF_FAIL(modes != NULL, TDM_ERROR_INVALID_PARAMETER);
473 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
475 _pthread_mutex_lock(&private_display->lock);
477 *modes = (const tdm_output_mode *)private_output->caps.modes;
478 *count = private_output->caps.mode_count;
480 _pthread_mutex_unlock(&private_display->lock);
486 tdm_output_get_available_size(tdm_output *output, int *min_w, int *min_h,
487 int *max_w, int *max_h, int *preferred_align)
491 _pthread_mutex_lock(&private_display->lock);
494 *min_w = TDM_FRONT_VALUE(private_output->caps.min_w);
496 *min_h = TDM_FRONT_VALUE(private_output->caps.min_h);
498 *max_w = TDM_FRONT_VALUE(private_output->caps.max_w);
500 *max_h = TDM_FRONT_VALUE(private_output->caps.max_h);
502 *preferred_align = TDM_FRONT_VALUE(private_output->caps.preferred_align);
504 _pthread_mutex_unlock(&private_display->lock);
510 tdm_output_get_cursor_available_size(tdm_output *output, int *min_w, int *min_h,
511 int *max_w, int *max_h, int *preferred_align)
515 _pthread_mutex_lock(&private_display->lock);
517 if (!tdm_display_check_module_abi(private_display, 1, 5)) {
528 *preferred_align = -1;
530 _pthread_mutex_unlock(&private_display->lock);
532 return TDM_ERROR_BAD_MODULE;
536 *min_w = TDM_FRONT_VALUE(private_output->caps.cursor_min_w);
538 *min_h = TDM_FRONT_VALUE(private_output->caps.cursor_min_h);
540 *max_w = TDM_FRONT_VALUE(private_output->caps.cursor_max_w);
542 *max_h = TDM_FRONT_VALUE(private_output->caps.cursor_max_h);
544 *preferred_align = TDM_FRONT_VALUE(private_output->caps.cursor_preferred_align);
546 _pthread_mutex_unlock(&private_display->lock);
552 tdm_output_get_physical_size(tdm_output *output, unsigned int *mmWidth,
553 unsigned int *mmHeight)
557 _pthread_mutex_lock(&private_display->lock);
560 *mmWidth = private_output->caps.mmWidth;
562 *mmHeight = private_output->caps.mmHeight;
564 _pthread_mutex_unlock(&private_display->lock);
570 tdm_output_get_subpixel(tdm_output *output, unsigned int *subpixel)
573 TDM_RETURN_VAL_IF_FAIL(subpixel != NULL, TDM_ERROR_INVALID_PARAMETER);
575 _pthread_mutex_lock(&private_display->lock);
577 *subpixel = private_output->caps.subpixel;
579 _pthread_mutex_unlock(&private_display->lock);
585 tdm_output_get_pipe(tdm_output *output, unsigned int *pipe)
588 TDM_RETURN_VAL_IF_FAIL(pipe != NULL, TDM_ERROR_INVALID_PARAMETER);
590 _pthread_mutex_lock(&private_display->lock);
592 *pipe = private_output->pipe;
594 _pthread_mutex_unlock(&private_display->lock);
600 tdm_output_get_primary_index(tdm_output *output, int *index)
602 tdm_private_layer *private_layer = NULL;
605 TDM_RETURN_VAL_IF_FAIL(index != NULL, TDM_ERROR_INVALID_PARAMETER);
607 _pthread_mutex_lock(&private_display->lock);
609 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
610 if (private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_PRIMARY) {
611 *index = private_layer->index;
616 _pthread_mutex_unlock(&private_display->lock);
622 tdm_output_set_property(tdm_output *output, unsigned int id, tdm_value value)
624 tdm_func_output *func_output;
627 _pthread_mutex_lock(&private_display->lock);
629 func_output = &private_display->func_output;
631 if (!func_output->output_set_property) {
632 /* LCOV_EXCL_START */
633 _pthread_mutex_unlock(&private_display->lock);
634 TDM_ERR("not implemented!!");
635 return TDM_ERROR_NOT_IMPLEMENTED;
639 ret = func_output->output_set_property(private_output->output_backend, id,
642 _pthread_mutex_unlock(&private_display->lock);
648 tdm_output_get_property(tdm_output *output, unsigned int id, tdm_value *value)
650 tdm_func_output *func_output;
653 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
655 _pthread_mutex_lock(&private_display->lock);
657 func_output = &private_display->func_output;
659 if (!func_output->output_get_property) {
660 /* LCOV_EXCL_START */
661 _pthread_mutex_unlock(&private_display->lock);
662 TDM_ERR("not implemented!!");
663 return TDM_ERROR_NOT_IMPLEMENTED;
667 ret = func_output->output_get_property(private_output->output_backend, id,
670 _pthread_mutex_unlock(&private_display->lock);
676 _tdm_output_thread_cb_vblank(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
678 tdm_thread_cb_output_vblank *output_vblank = (tdm_thread_cb_output_vblank *)cb_base;
679 tdm_private_output_vblank_handler *vblank_handler = output_vblank->base.data;
680 tdm_private_output_vblank_handler *v = NULL, *vv = NULL;
681 tdm_private_output *private_output = object;
682 struct list_head clone_list;
684 pid_t tid = syscall(SYS_gettid);
686 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
688 assert(vblank_handler->owner_tid == tid);
690 vblank_handler->sent_to_frontend = 0;
692 _tdm_output_vblank_timeout_update(private_output, 0);
694 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
696 interval = vblank_handler->interval;
697 sync = vblank_handler->sync;
699 LIST_INITHEAD(&clone_list);
701 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &private_output->vblank_handler_list, link) {
702 if (v->interval != interval || v->sync != sync || v->owner_tid != tid)
706 LIST_ADDTAIL(&v->link, &clone_list);
709 if (tdm_debug_module & TDM_DEBUG_COMMIT)
710 TDM_INFO("----------------------------------------- output(%d) got vblank", private_output->pipe);
712 _pthread_mutex_unlock(&private_display->lock);
713 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &clone_list, link) {
714 if (tdm_debug_module & TDM_DEBUG_COMMIT)
715 TDM_INFO("handler(%p)", v);
720 v->func(v->private_output,
721 output_vblank->sequence,
722 output_vblank->tv_sec,
723 output_vblank->tv_usec,
728 _pthread_mutex_lock(&private_display->lock);
730 if (tdm_debug_module & TDM_DEBUG_COMMIT)
731 TDM_INFO("-----------------------------------------...");
735 _tdm_output_cb_vblank(tdm_output *output_backend, unsigned int sequence,
736 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
738 tdm_private_output_vblank_handler *vblank_handler = user_data;
739 tdm_thread_cb_output_vblank output_vblank;
742 memset(&output_vblank, 0, sizeof output_vblank);
743 output_vblank.base.type = TDM_THREAD_CB_OUTPUT_VBLANK;
744 output_vblank.base.length = sizeof output_vblank;
745 output_vblank.base.object_stamp = vblank_handler->private_output->stamp;
746 output_vblank.base.data = vblank_handler;
747 output_vblank.base.sync = 0;
748 output_vblank.sequence = sequence;
749 output_vblank.tv_sec = tv_sec;
750 output_vblank.tv_usec = tv_usec;
752 vblank_handler->sent_to_frontend = 1;
754 ret = tdm_thread_cb_call(vblank_handler->private_output, &output_vblank.base);
755 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
759 _tdm_output_thread_cb_commit(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
761 tdm_thread_cb_output_commit *output_commit = (tdm_thread_cb_output_commit *)cb_base;
762 tdm_private_output_commit_handler *output_commit_handler = output_commit->base.data;
763 tdm_private_output *private_output = object;
764 tdm_private_layer *private_layer = NULL;
766 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
768 if (!output_commit_handler)
771 assert(output_commit_handler->owner_tid == syscall(SYS_gettid));
773 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
775 LIST_DEL(&output_commit_handler->link);
777 if (tdm_debug_module & TDM_DEBUG_COMMIT) {
778 TDM_INFO("----------------------------------------- output(%d) committed", private_output->pipe);
779 TDM_INFO("handler(%p)", output_commit_handler);
782 if (private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT) {
783 /* In case of layer commit, the below will be handled in the layer commit callback */
784 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
785 if (private_layer->committed_buffer)
786 tdm_layer_committed(private_layer, &private_layer->committed_buffer);
790 if (output_commit_handler->func) {
791 _pthread_mutex_unlock(&private_display->lock);
792 output_commit_handler->func(private_output,
793 output_commit->sequence,
794 output_commit->tv_sec,
795 output_commit->tv_usec,
796 output_commit_handler->user_data);
797 _pthread_mutex_lock(&private_display->lock);
800 free(output_commit_handler);
802 if (tdm_debug_module & TDM_DEBUG_COMMIT)
803 TDM_INFO("-----------------------------------------...");
807 _tdm_output_cb_commit(tdm_output *output_backend, unsigned int sequence,
808 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
810 tdm_private_output_commit_handler *output_commit_handler = user_data;
811 tdm_private_output *private_output;
812 tdm_thread_cb_output_commit output_commit;
815 if (output_commit_handler)
816 private_output = output_commit_handler->private_output;
818 private_output = tdm_display_find_private_output(tdm_display_get(), output_backend);
820 memset(&output_commit, 0, sizeof output_commit);
821 output_commit.base.type = TDM_THREAD_CB_OUTPUT_COMMIT;
822 output_commit.base.length = sizeof output_commit;
823 output_commit.base.object_stamp = private_output->stamp;
824 output_commit.base.data = output_commit_handler;
825 output_commit.base.sync = 0;
826 output_commit.sequence = sequence;
827 output_commit.tv_sec = tv_sec;
828 output_commit.tv_usec = tv_usec;
830 ret = tdm_thread_cb_call(private_output, &output_commit.base);
831 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
834 /* add_front: To distinguish between the user vblank handlers and the layer
835 * commit vblank handlers. The layer commit handlers will be called
836 * before calling the user vblank handlers.
839 _tdm_output_wait_vblank(tdm_private_output *private_output, int interval, int sync,
840 tdm_output_vblank_handler func, void *user_data,
841 unsigned int add_front)
843 tdm_func_output *func_output;
844 tdm_private_output_vblank_handler *vblank_handler = NULL, *v = NULL;
845 unsigned int skip_request = 0;
846 pid_t tid = syscall(SYS_gettid);
847 tdm_error ret = TDM_ERROR_NONE;
849 func_output = &private_output->private_display->func_output;
851 /* interval SHOULD be at least 1 */
855 if (!func_output->output_wait_vblank) {
856 /* LCOV_EXCL_START */
857 TDM_ERR("not implemented!!");
858 return TDM_ERROR_NOT_IMPLEMENTED;
862 if (!private_output->regist_vblank_cb) {
863 private_output->regist_vblank_cb = 1;
864 ret = func_output->output_set_vblank_handler(private_output->output_backend,
865 _tdm_output_cb_vblank);
868 vblank_handler = calloc(1, sizeof(tdm_private_output_vblank_handler));
869 if (!vblank_handler) {
870 /* LCOV_EXCL_START */
871 TDM_ERR("failed: alloc memory");
872 return TDM_ERROR_OUT_OF_MEMORY;
876 if (tdm_debug_module & TDM_DEBUG_COMMIT)
877 TDM_INFO("output(%d) wait_vblank: handler(%p)", private_output->pipe, vblank_handler);
879 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
880 if (v->interval == interval && v->sync == sync && v->owner_tid == tid) {
887 LIST_ADD(&vblank_handler->link, &private_output->vblank_handler_list);
889 LIST_ADDTAIL(&vblank_handler->link, &private_output->vblank_handler_list);
891 vblank_handler->private_output = private_output;
892 vblank_handler->interval = interval;
893 vblank_handler->sync = sync;
894 vblank_handler->func = func;
895 vblank_handler->user_data = user_data;
896 vblank_handler->owner_tid = tid;
898 /* If there is the previous request, we can skip to call output_wait_vblank() */
900 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
901 if (ret != TDM_ERROR_NONE) {
902 TDM_ERR("tdm_thread_cb_add failed");
906 ret = func_output->output_wait_vblank(private_output->output_backend, interval,
907 sync, vblank_handler);
908 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
910 _tdm_output_vblank_timeout_update(private_output, 1000);
912 if (tdm_debug_module & TDM_DEBUG_COMMIT)
913 TDM_INFO("output(%d) backend wait_vblank", private_output->pipe);
919 /* LCOV_EXCL_START */
920 if (vblank_handler) {
921 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
922 LIST_DEL(&vblank_handler->link);
923 free(vblank_handler);
930 tdm_output_wait_vblank(tdm_output *output, int interval, int sync,
931 tdm_output_vblank_handler func, void *user_data)
935 _pthread_mutex_lock(&private_display->lock);
937 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
938 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
939 tdm_dpms_str(private_output->current_dpms_value));
940 _pthread_mutex_unlock(&private_display->lock);
941 return TDM_ERROR_DPMS_OFF;
944 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 0);
946 _pthread_mutex_unlock(&private_display->lock);
951 /* LCOV_EXCL_START */
953 tdm_output_wait_vblank_add_front(tdm_output *output, int interval, int sync,
954 tdm_output_vblank_handler func, void *user_data)
958 _pthread_mutex_lock(&private_display->lock);
960 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
961 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
962 tdm_dpms_str(private_output->current_dpms_value));
963 _pthread_mutex_unlock(&private_display->lock);
964 return TDM_ERROR_DPMS_OFF;
967 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 1);
969 _pthread_mutex_unlock(&private_display->lock);
976 tdm_output_remove_vblank_handler_internal(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
978 tdm_private_output *private_output = (tdm_private_output*)output;
979 tdm_private_output_vblank_handler *v = NULL;
981 TDM_RETURN_IF_FAIL(private_output != NULL);
982 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
984 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
985 if (v->func == func && v->user_data == user_data) {
986 /* only set func & user_data to NULL. It will be freed when an event occurs */
995 tdm_output_remove_commit_handler_internal(tdm_output *output, tdm_output_commit_handler func, void *user_data)
997 tdm_private_output *private_output = (tdm_private_output*)output;
998 tdm_private_output_commit_handler *c = NULL;
1000 TDM_RETURN_IF_FAIL(private_output != NULL);
1001 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1003 LIST_FOR_EACH_ENTRY(c, &private_output->output_commit_handler_list, link) {
1004 if (c->func == func && c->user_data == user_data) {
1005 /* only set func & user_data to NULL. It will be freed when an event occurs */
1007 c->user_data = NULL;
1014 tdm_output_remove_vblank_handler(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1016 OUTPUT_FUNC_ENTRY();
1018 _pthread_mutex_lock(&private_display->lock);
1020 tdm_output_remove_vblank_handler_internal(output, func, user_data);
1022 _pthread_mutex_unlock(&private_display->lock);
1028 tdm_output_remove_commit_handler(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1030 OUTPUT_FUNC_ENTRY();
1032 _pthread_mutex_lock(&private_display->lock);
1034 tdm_output_remove_commit_handler_internal(output, func, user_data);
1036 _pthread_mutex_unlock(&private_display->lock);
1042 tdm_output_commit_internal(tdm_output *output, int sync, tdm_output_commit_handler func, void *user_data)
1044 tdm_func_output *func_output;
1045 tdm_private_output_commit_handler *output_commit_handler = NULL;
1046 tdm_private_layer *private_layer = NULL;
1047 tdm_output_dpms dpms_value = TDM_OUTPUT_DPMS_ON;
1049 OUTPUT_FUNC_ENTRY();
1051 func_output = &private_display->func_output;
1053 if (!func_output->output_commit) {
1054 /* LCOV_EXCL_START */
1055 TDM_ERR("not implemented!!");
1056 return TDM_ERROR_NOT_IMPLEMENTED;
1057 /* LCOV_EXCL_STOP */
1060 ret = tdm_output_get_dpms_internal(output, &dpms_value);
1061 TDM_RETURN_VAL_IF_FAIL(ret == TDM_ERROR_NONE, ret);
1063 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1065 if (!private_output->regist_commit_cb) {
1066 private_output->regist_commit_cb = 1;
1067 ret = func_output->output_set_commit_handler(private_output->output_backend, _tdm_output_cb_commit);
1068 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1071 output_commit_handler = calloc(1, sizeof(tdm_private_output_commit_handler));
1072 if (!output_commit_handler) {
1073 /* LCOV_EXCL_START */
1074 TDM_ERR("failed: alloc memory");
1075 return TDM_ERROR_OUT_OF_MEMORY;
1076 /* LCOV_EXCL_STOP */
1079 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1080 if (ret != TDM_ERROR_NONE) {
1081 TDM_ERR("tdm_thread_cb_add failed");
1082 free(output_commit_handler);
1086 LIST_ADDTAIL(&output_commit_handler->link, &private_output->output_commit_handler_list);
1087 output_commit_handler->private_output = private_output;
1088 output_commit_handler->func = func;
1089 output_commit_handler->user_data = user_data;
1090 output_commit_handler->owner_tid = syscall(SYS_gettid);
1093 ret = func_output->output_commit(private_output->output_backend, sync,
1094 output_commit_handler);
1095 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1097 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1098 TDM_INFO("output(%d) backend commit: handle(%p) func(%p) user_data(%p)",
1099 private_output->pipe, output_commit_handler, func, user_data);
1102 /* Even if DPMS is off, committed_buffer should be changed because it will be referred
1103 * for tdm_layer_committed() function.
1105 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1106 if (!private_layer->waiting_buffer)
1109 private_layer->committed_buffer = private_layer->waiting_buffer;
1110 private_layer->waiting_buffer = NULL;
1111 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1112 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
1113 private_layer, private_layer->waiting_buffer,
1114 private_layer->committed_buffer->buffer);
1117 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1118 TDM_WRN("dpms %s. Directly call commit handler instead of commit.", tdm_dpms_str(dpms_value));
1120 func(output, 0, 0, 0, user_data);
1126 /* LCOV_EXCL_START */
1127 if (output_commit_handler) {
1128 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1129 LIST_DEL(&output_commit_handler->link);
1130 free(output_commit_handler);
1133 /* LCOV_EXCL_STOP */
1137 tdm_output_commit(tdm_output *output, int sync, tdm_output_commit_handler func,
1140 tdm_private_layer *private_layer = NULL;
1142 OUTPUT_FUNC_ENTRY();
1144 _pthread_mutex_lock(&private_display->lock);
1146 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE)
1147 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1148 else if (private_output->commit_type == TDM_COMMIT_TYPE_LAYER) {
1149 TDM_ERR("Can't supported. Use tdm_layer_commit");
1150 _pthread_mutex_unlock(&private_display->lock);
1151 return TDM_ERROR_BAD_REQUEST;
1154 if (private_output->commit_per_vblank) {
1155 TDM_ERR("Use tdm_layer_commit");
1156 _pthread_mutex_unlock(&private_display->lock);
1157 return TDM_ERROR_BAD_REQUEST;
1160 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1161 TDM_ERR("output(%d) dpms: %s", private_output->pipe,
1162 tdm_dpms_str(private_output->current_dpms_value));
1163 _pthread_mutex_unlock(&private_display->lock);
1164 return TDM_ERROR_DPMS_OFF;
1167 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1168 TDM_INFO("output(%d) commit", private_output->pipe);
1170 /* apply the pending data of all layers */
1171 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1172 tdm_layer_commit_pending_data(private_layer);
1175 ret = tdm_output_commit_internal(output, sync, func, user_data);
1177 _pthread_mutex_unlock(&private_display->lock);
1183 tdm_output_set_mode(tdm_output *output, const tdm_output_mode *mode)
1185 tdm_func_output *func_output;
1186 OUTPUT_FUNC_ENTRY();
1188 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1190 _pthread_mutex_lock(&private_display->lock);
1192 func_output = &private_display->func_output;
1194 if (!func_output->output_set_mode) {
1195 /* LCOV_EXCL_START */
1196 _pthread_mutex_unlock(&private_display->lock);
1197 TDM_ERR("not implemented!!");
1198 return TDM_ERROR_NOT_IMPLEMENTED;
1199 /* LCOV_EXCL_STOP */
1202 ret = func_output->output_set_mode(private_output->output_backend, mode);
1203 if (ret == TDM_ERROR_NONE) {
1204 private_output->current_mode = mode;
1205 private_output->need_set_target_info = 1;
1206 TDM_INFO("mode: %dx%d %dhz", mode->hdisplay, mode->vdisplay, mode->vrefresh);
1209 _pthread_mutex_unlock(&private_display->lock);
1215 tdm_output_get_mode(tdm_output *output, const tdm_output_mode **mode)
1217 OUTPUT_FUNC_ENTRY();
1219 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1221 _pthread_mutex_lock(&private_display->lock);
1223 *mode = private_output->current_mode;
1225 _pthread_mutex_unlock(&private_display->lock);
1231 tdm_output_set_dpms(tdm_output *output, tdm_output_dpms dpms_value)
1233 tdm_func_output *func_output;
1234 OUTPUT_FUNC_ENTRY();
1236 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1237 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1238 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1239 return TDM_ERROR_BAD_REQUEST;
1242 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1243 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1244 private_output->pipe, tdm_dpms_str(dpms_value));
1245 return TDM_ERROR_BAD_REQUEST;
1249 _pthread_mutex_lock(&private_display->lock);
1251 if (private_output->waiting_dpms_change) {
1252 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1253 _pthread_mutex_unlock(&private_display->lock);
1254 return TDM_ERROR_BAD_REQUEST;
1257 func_output = &private_display->func_output;
1259 TDM_INFO("output(%d) dpms '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1261 if (func_output->output_set_dpms)
1262 ret = func_output->output_set_dpms(private_output->output_backend, dpms_value);
1264 /* LCOV_EXCL_START */
1265 ret = TDM_ERROR_NONE;
1266 TDM_WRN("not implemented!!");
1268 /* LCOV_EXCL_STOP */
1272 if (ret == TDM_ERROR_NONE) {
1273 if (private_output->current_dpms_value != dpms_value) {
1275 private_output->current_dpms_value = dpms_value;
1276 value.u32 = dpms_value;
1277 _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_DPMS, value);
1278 TDM_INFO("output(%d) dpms '%s' done", private_output->pipe, tdm_dpms_str(dpms_value));
1281 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1283 /* update current_dpms_value forcely */
1284 tdm_output_get_dpms_internal(output, &temp);
1286 TDM_ERR("output(%d) set_dpms failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1289 _pthread_mutex_unlock(&private_display->lock);
1294 /* LCOV_EXCL_START */
1296 tdm_output_set_dpms_async(tdm_output *output, tdm_output_dpms dpms_value)
1298 tdm_func_output *func_output;
1299 OUTPUT_FUNC_ENTRY();
1301 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_ASYNC_DPMS)) {
1302 TDM_ERR("output doesn't support the asynchronous DPMS control!");
1303 return TDM_ERROR_BAD_REQUEST;
1306 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1307 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1308 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1309 return TDM_ERROR_BAD_REQUEST;
1312 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1313 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1314 private_output->pipe, tdm_dpms_str(dpms_value));
1315 return TDM_ERROR_BAD_REQUEST;
1319 _pthread_mutex_lock(&private_display->lock);
1321 if (private_output->waiting_dpms_change) {
1322 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1323 _pthread_mutex_unlock(&private_display->lock);
1324 return TDM_ERROR_BAD_REQUEST;
1327 func_output = &private_display->func_output;
1328 if (!func_output->output_set_dpms_handler) {
1329 TDM_ERR("not implemented: output_set_dpms_handler");
1330 _pthread_mutex_unlock(&private_display->lock);
1331 return TDM_ERROR_NOT_IMPLEMENTED;
1334 if (!func_output->output_set_dpms_async) {
1335 TDM_ERR("not implemented: output_set_dpms_async");
1336 _pthread_mutex_unlock(&private_display->lock);
1337 return TDM_ERROR_NOT_IMPLEMENTED;
1340 if (!private_output->regist_dpms_cb) {
1341 private_output->regist_dpms_cb = 1;
1342 ret = func_output->output_set_dpms_handler(private_output->output_backend,
1343 tdm_output_cb_dpms, private_output);
1344 if (ret != TDM_ERROR_NONE) {
1345 _pthread_mutex_unlock(&private_display->lock);
1346 TDM_ERR("Can't set the dpms handler!!");
1351 TDM_INFO("output(%d) dpms async '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1353 ret = func_output->output_set_dpms_async(private_output->output_backend, dpms_value);
1355 if (ret == TDM_ERROR_NONE) {
1356 private_output->waiting_dpms_change = 1;
1357 TDM_INFO("output(%d) dpms async '%s' waiting", private_output->pipe, tdm_dpms_str(dpms_value));
1359 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1361 /* update current_dpms_value forcely */
1362 tdm_output_get_dpms_internal(output, &temp);
1364 TDM_ERR("output(%d) set_dpms_async failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1367 _pthread_mutex_unlock(&private_display->lock);
1371 /* LCOV_EXCL_STOP */
1374 tdm_output_get_dpms_internal(tdm_output *output, tdm_output_dpms *dpms_value)
1376 tdm_func_output *func_output;
1377 OUTPUT_FUNC_ENTRY();
1379 TDM_RETURN_VAL_IF_FAIL(private_output != NULL, TDM_ERROR_INVALID_PARAMETER);
1381 /* TODO: this is ugly. But before calling backend's output_get_dpms(), we have
1382 * to check if all backends's DPMS operation has no problem. In future, we'd
1383 * better use new env instead of using commit_per_vblank variable to distinguish
1384 * whether we use the stored value or backend's output_get_dpms.
1386 if (!private_output->commit_per_vblank) {
1387 *dpms_value = private_output->current_dpms_value;
1388 return TDM_ERROR_NONE;
1391 func_output = &private_display->func_output;
1393 if (!func_output->output_get_dpms) {
1394 /* LCOV_EXCL_START */
1395 *dpms_value = private_output->current_dpms_value;
1396 TDM_WRN("not implemented!!");
1397 return TDM_ERROR_NONE;
1398 /* LCOV_EXCL_STOP */
1401 ret = func_output->output_get_dpms(private_output->output_backend, dpms_value);
1402 if (ret != TDM_ERROR_NONE) {
1403 /* LCOV_EXCL_START */
1404 TDM_ERR("output_get_dpms failed");
1405 *dpms_value = TDM_OUTPUT_DPMS_OFF;
1406 /* LCOV_EXCL_STOP */
1409 /* checking with backend's value */
1410 if (*dpms_value != private_output->current_dpms_value) {
1412 TDM_ERR("output(%d) dpms changed suddenly: %s -> %s",
1413 private_output->pipe, private_output->current_dpms_value,
1414 tdm_dpms_str(*dpms_value));
1415 private_output->current_dpms_value = *dpms_value;
1416 value.u32 = *dpms_value;
1417 _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_DPMS, value);
1424 tdm_output_get_dpms(tdm_output *output, tdm_output_dpms *dpms_value)
1426 OUTPUT_FUNC_ENTRY();
1428 TDM_RETURN_VAL_IF_FAIL(dpms_value != NULL, TDM_ERROR_INVALID_PARAMETER);
1430 _pthread_mutex_lock(&private_display->lock);
1432 ret = tdm_output_get_dpms_internal(output, dpms_value);
1434 _pthread_mutex_unlock(&private_display->lock);
1439 EXTERN tdm_capture *
1440 tdm_output_create_capture(tdm_output *output, tdm_error *error)
1442 tdm_capture *capture = NULL;
1444 OUTPUT_FUNC_ENTRY_ERROR();
1446 _pthread_mutex_lock(&private_display->lock);
1448 capture = (tdm_capture *)tdm_capture_create_output_internal(private_output, error);
1450 _pthread_mutex_unlock(&private_display->lock);
1455 EXTERN tdm_hwc_window *
1456 tdm_output_hwc_create_window(tdm_output *output, tdm_error *error)
1458 tdm_hwc_window *hwc_window = NULL;
1460 OUTPUT_FUNC_ENTRY_ERROR();
1462 _pthread_mutex_lock(&private_display->lock);
1464 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1465 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 0, error);
1467 /* LCOV_EXCL_START */
1468 TDM_ERR("output(%p) not support HWC", private_output);
1470 *error = TDM_ERROR_BAD_REQUEST;
1471 /* LCOV_EXCL_STOP */
1474 _pthread_mutex_unlock(&private_display->lock);
1479 EXTERN tdm_hwc_window *
1480 tdm_output_hwc_create_video_window(tdm_output *output, tdm_error *error)
1482 tdm_hwc_window *hwc_window = NULL;
1484 OUTPUT_FUNC_ENTRY_ERROR();
1486 _pthread_mutex_lock(&private_display->lock);
1488 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1489 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 1, error);
1491 /* LCOV_EXCL_START */
1492 TDM_ERR("output(%p) not support HWC", private_output);
1494 *error = TDM_ERROR_BAD_REQUEST;
1495 /* LCOV_EXCL_STOP */
1498 _pthread_mutex_unlock(&private_display->lock);
1504 tdm_output_hwc_destroy_window(tdm_output *output, tdm_hwc_window *hwc_window)
1506 OUTPUT_FUNC_ENTRY();
1508 TDM_RETURN_VAL_IF_FAIL(hwc_window != NULL, TDM_ERROR_INVALID_PARAMETER);
1510 _pthread_mutex_lock(&private_display->lock);
1512 ret = tdm_hwc_window_destroy_internal(hwc_window);
1514 _pthread_mutex_unlock(&private_display->lock);
1520 tdm_output_hwc_validate(tdm_output *output, tdm_hwc_window **composited_wnds,
1521 uint32_t num_wnds, uint32_t *num_types)
1523 tdm_func_output *func_output = NULL;
1524 tdm_private_hwc_window **composited_wnds_frontend = NULL;
1525 tdm_hwc_window **composited_wnds_backend = NULL;
1528 OUTPUT_FUNC_ENTRY();
1530 TDM_RETURN_VAL_IF_FAIL(num_types != NULL, TDM_ERROR_INVALID_PARAMETER);
1532 _pthread_mutex_lock(&private_display->lock);
1534 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1535 TDM_ERR("output(%p) not support HWC", private_output);
1536 _pthread_mutex_unlock(&private_display->lock);
1537 return TDM_ERROR_BAD_REQUEST;
1540 func_output = &private_display->func_output;
1542 if (!func_output->output_hwc_validate) {
1543 /* LCOV_EXCL_START */
1544 _pthread_mutex_unlock(&private_display->lock);
1545 TDM_ERR("not implemented!!");
1546 return TDM_ERROR_NOT_IMPLEMENTED;
1547 /* LCOV_EXCL_STOP */
1550 if (num_wnds == 0) {
1551 ret = func_output->output_hwc_validate(private_output->output_backend, NULL, 0, num_types);
1553 _pthread_mutex_unlock(&private_display->lock);
1558 composited_wnds_backend = calloc(num_wnds, sizeof(tdm_hwc_window *));
1559 if (!composited_wnds_backend) {
1560 /* LCOV_EXCL_START */
1561 _pthread_mutex_unlock(&private_display->lock);
1562 return TDM_ERROR_OUT_OF_MEMORY;
1563 /* LCOV_EXCL_STOP */
1566 composited_wnds_frontend = (tdm_private_hwc_window **)composited_wnds;
1568 for (i = 0; i < num_wnds; i++)
1569 composited_wnds_backend[i] = composited_wnds_frontend[i]->hwc_window_backend;
1571 ret = func_output->output_hwc_validate(private_output->output_backend, composited_wnds_backend, num_wnds, num_types);
1573 free(composited_wnds_backend);
1575 _pthread_mutex_unlock(&private_display->lock);
1581 tdm_output_hwc_set_need_validate_handler(tdm_output *output,
1582 tdm_output_need_validate_handler hndl)
1584 OUTPUT_FUNC_ENTRY();
1586 TDM_RETURN_VAL_IF_FAIL(hndl != NULL, TDM_ERROR_INVALID_PARAMETER);
1588 _pthread_mutex_lock(&private_display->lock);
1590 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1591 TDM_ERR("output(%p) not support HWC", private_output);
1592 _pthread_mutex_unlock(&private_display->lock);
1593 return TDM_ERROR_BAD_REQUEST;
1596 /* there's no reason to allow this */
1597 if (private_output->need_validate.hndl) {
1599 _pthread_mutex_unlock(&private_display->lock);
1600 return TDM_ERROR_OPERATION_FAILED;
1603 private_output->need_validate.hndl = hndl;
1605 _pthread_mutex_unlock(&private_display->lock);
1611 tdm_output_hwc_get_changed_composition_types(tdm_output *output,
1612 uint32_t *num_elements,
1613 tdm_hwc_window **hwc_window,
1614 tdm_hwc_window_composition *composition_types)
1616 tdm_func_output *func_output = NULL;
1617 tdm_private_hwc_window * private_hwc_window = NULL;
1620 OUTPUT_FUNC_ENTRY();
1622 TDM_RETURN_VAL_IF_FAIL(num_elements != NULL, TDM_ERROR_INVALID_PARAMETER);
1624 _pthread_mutex_lock(&private_display->lock);
1626 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1627 TDM_ERR("output(%p) not support HWC", private_output);
1628 _pthread_mutex_unlock(&private_display->lock);
1629 return TDM_ERROR_BAD_REQUEST;
1632 func_output = &private_display->func_output;
1634 if (!func_output->output_hwc_get_changed_composition_types) {
1635 /* LCOV_EXCL_START */
1636 _pthread_mutex_unlock(&private_display->lock);
1637 TDM_ERR("not implemented!!");
1638 return TDM_ERROR_NOT_IMPLEMENTED;
1639 /* LCOV_EXCL_STOP */
1642 ret = func_output->output_hwc_get_changed_composition_types(private_output->output_backend,
1643 num_elements, hwc_window, composition_types);
1644 if (ret != TDM_ERROR_NONE) {
1645 /* LCOV_EXCL_START */
1646 _pthread_mutex_unlock(&private_display->lock);
1648 /* LCOV_EXCL_STOP */
1651 if (hwc_window == NULL || composition_types == NULL) {
1652 _pthread_mutex_unlock(&private_display->lock);
1653 return TDM_ERROR_NONE;
1656 for (i = 0; i < *num_elements; i++) {
1658 private_hwc_window = _tdm_output_find_private_hwc_window(private_output, hwc_window[i]);
1660 if (private_hwc_window == NULL) {
1661 /* LCOV_EXCL_START */
1662 TDM_ERR("failed! This should never happen!");
1663 func_output->output_hwc_destroy_window(private_output->output_backend, hwc_window[i]);
1665 _pthread_mutex_unlock(&private_display->lock);
1666 return TDM_ERROR_OPERATION_FAILED;
1667 /* LCOV_EXCL_STOP */
1670 hwc_window[i] = (tdm_hwc_window*)private_hwc_window;
1673 _pthread_mutex_unlock(&private_display->lock);
1679 tdm_output_hwc_accept_changes(tdm_output *output)
1681 tdm_func_output *func_output = NULL;
1683 OUTPUT_FUNC_ENTRY();
1685 _pthread_mutex_lock(&private_display->lock);
1687 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1688 TDM_ERR("output(%p) not support HWC", private_output);
1689 _pthread_mutex_unlock(&private_display->lock);
1690 return TDM_ERROR_BAD_REQUEST;
1693 func_output = &private_display->func_output;
1695 if (!func_output->output_hwc_validate) {
1696 /* LCOV_EXCL_START */
1697 _pthread_mutex_unlock(&private_display->lock);
1698 TDM_ERR("not implemented!!");
1699 return TDM_ERROR_NOT_IMPLEMENTED;
1700 /* LCOV_EXCL_STOP */
1703 ret = func_output->output_hwc_accept_changes(private_output->output_backend);
1705 _pthread_mutex_unlock(&private_display->lock);
1711 tdm_output_hwc_get_target_buffer_queue(tdm_output *output, tdm_error *error)
1713 tdm_func_output *func_output = NULL;
1714 tbm_surface_queue_h queue = NULL;
1716 OUTPUT_FUNC_ENTRY_ERROR();
1718 _pthread_mutex_lock(&private_display->lock);
1720 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1721 TDM_ERR("output(%p) not support HWC", private_output);
1723 *error = TDM_ERROR_BAD_REQUEST;
1724 _pthread_mutex_unlock(&private_display->lock);
1728 func_output = &private_display->func_output;
1730 if (!func_output->output_hwc_get_target_buffer_queue) {
1731 /* LCOV_EXCL_START */
1732 _pthread_mutex_unlock(&private_display->lock);
1733 TDM_ERR("not implemented!!");
1735 /* LCOV_EXCL_STOP */
1738 queue = func_output->output_hwc_get_target_buffer_queue(private_output->output_backend, error);
1740 _pthread_mutex_unlock(&private_display->lock);
1746 tdm_output_hwc_set_client_target_buffer(tdm_output *output, tbm_surface_h target_buffer, tdm_hwc_region damage)
1748 tdm_func_output *func_output = NULL;
1750 OUTPUT_FUNC_ENTRY();
1752 _pthread_mutex_lock(&private_display->lock);
1754 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1755 TDM_ERR("output(%p) not support HWC", private_output);
1756 _pthread_mutex_unlock(&private_display->lock);
1757 return TDM_ERROR_BAD_REQUEST;
1760 if (tdm_debug_dump & TDM_DUMP_FLAG_WINDOW) {
1761 /* LCOV_EXCL_START */
1762 char str[TDM_PATH_LEN];
1764 snprintf(str, TDM_PATH_LEN, "target_window_%d_%03d",
1765 private_output->index, i++);
1766 tdm_helper_dump_buffer_str(target_buffer, tdm_debug_dump_dir, str);
1767 /* LCOV_EXCL_STOP */
1770 func_output = &private_display->func_output;
1772 if (!func_output->output_hwc_set_client_target_buffer) {
1773 /* LCOV_EXCL_START */
1774 _pthread_mutex_unlock(&private_display->lock);
1775 TDM_ERR("not implemented!!");
1776 return TDM_ERROR_NOT_IMPLEMENTED;
1777 /* LCOV_EXCL_STOP */
1780 ret = func_output->output_hwc_set_client_target_buffer(private_output->output_backend, target_buffer, damage);
1782 _pthread_mutex_unlock(&private_display->lock);
1788 tdm_output_hwc_unset_client_target_buffer(tdm_output *output)
1790 tdm_func_output *func_output = NULL;
1792 OUTPUT_FUNC_ENTRY();
1794 _pthread_mutex_lock(&private_display->lock);
1796 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1797 TDM_ERR("output(%p) not support HWC", private_output);
1798 _pthread_mutex_unlock(&private_display->lock);
1799 return TDM_ERROR_BAD_REQUEST;
1802 func_output = &private_display->func_output;
1804 if (!func_output->output_hwc_unset_client_target_buffer) {
1805 /* LCOV_EXCL_START */
1806 _pthread_mutex_unlock(&private_display->lock);
1807 TDM_ERR("not implemented!!");
1808 return TDM_ERROR_NOT_IMPLEMENTED;
1809 /* LCOV_EXCL_STOP */
1812 ret = func_output->output_hwc_unset_client_target_buffer(private_output->output_backend);
1814 _pthread_mutex_unlock(&private_display->lock);
1820 _tdm_output_hwc_layer_commit_handler(tdm_layer *layer, unsigned int sequence,
1821 unsigned int tv_sec, unsigned int tv_usec,
1824 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler = (tdm_private_output_hwc_target_buffer_commit_handler *)user_data;
1825 tdm_output_hwc_target_buffer_commit_handler func = output_hwc_target_buffer_commit_handler->func;
1826 tdm_output *output = (tdm_output *)output_hwc_target_buffer_commit_handler->private_output;
1827 void *data = output_hwc_target_buffer_commit_handler->user_data;
1829 func(output, sequence, tv_sec, tv_usec, data);
1831 free(output_hwc_target_buffer_commit_handler);
1835 tdm_output_hwc_commit_client_target_buffer(tdm_output *output, tdm_output_hwc_target_buffer_commit_handler func, void *user_data)
1837 tdm_func_output *func_output;
1838 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler;
1839 tdm_layer *layer = NULL;
1840 tdm_private_layer *private_layer;
1841 const tdm_output_mode *mode;
1842 tbm_surface_h buffer;
1844 OUTPUT_FUNC_ENTRY();
1846 _pthread_mutex_lock(&private_display->lock);
1848 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1849 TDM_ERR("output(%p) not support HWC", private_output);
1850 _pthread_mutex_unlock(&private_display->lock);
1851 return TDM_ERROR_BAD_REQUEST;
1854 func_output = &private_display->func_output;
1856 if (!func_output->output_hwc_get_client_target_buffer_layer) {
1857 /* LCOV_EXCL_START */
1858 _pthread_mutex_unlock(&private_display->lock);
1859 TDM_ERR("not implemented!!");
1860 return TDM_ERROR_NOT_IMPLEMENTED;
1861 /* LCOV_EXCL_STOP */
1864 layer = func_output->output_hwc_get_client_target_buffer_layer(private_output->output_backend,
1867 /* LCOV_EXCL_START */
1868 _pthread_mutex_unlock(&private_display->lock);
1869 TDM_ERR("no assigned layer!!");
1870 return TDM_ERROR_INVALID_PARAMETER;
1871 /* LCOV_EXCL_STOP */
1874 private_layer = (tdm_private_layer*)layer;
1876 if (!func_output->output_hwc_get_client_target_buffer) {
1877 /* LCOV_EXCL_START */
1878 _pthread_mutex_unlock(&private_display->lock);
1879 TDM_ERR("not implemented!!");
1880 return TDM_ERROR_NOT_IMPLEMENTED;
1881 /* LCOV_EXCL_STOP */
1884 buffer = func_output->output_hwc_get_client_target_buffer(private_output->output_backend,
1887 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
1889 ret = tdm_layer_unset_buffer_internal(private_layer);
1890 if (ret != TDM_ERROR_NONE) {
1891 /* LCOV_EXCL_START */
1892 TDM_ERR("failed: layer set info(window)");
1893 /* LCOV_EXCL_STOP */
1897 if (private_output->need_set_target_info) {
1898 mode = private_output->current_mode;
1899 private_output->target_buffer_info.src_config.size.h = mode->hdisplay;
1900 private_output->target_buffer_info.src_config.size.v = mode->vdisplay;
1901 private_output->target_buffer_info.src_config.pos.x = 0;
1902 private_output->target_buffer_info.src_config.pos.y = 0;
1903 private_output->target_buffer_info.src_config.pos.w = mode->hdisplay;
1904 private_output->target_buffer_info.src_config.pos.h = mode->vdisplay;
1905 private_output->target_buffer_info.dst_pos.x = 0;
1906 private_output->target_buffer_info.dst_pos.y = 0;
1907 private_output->target_buffer_info.dst_pos.w = mode->hdisplay;
1908 private_output->target_buffer_info.dst_pos.h = mode->vdisplay;
1909 private_output->target_buffer_info.transform = TDM_TRANSFORM_NORMAL;
1911 ret = tdm_layer_set_info_internal(private_layer, &private_output->target_buffer_info);
1912 if (ret != TDM_ERROR_NONE) {
1913 /* LCOV_EXCL_START */
1914 TDM_ERR("failed: layer set info(window)");
1915 /* LCOV_EXCL_STOP */
1919 private_output->need_set_target_info = 0;
1922 output_hwc_target_buffer_commit_handler = calloc(1, sizeof(tdm_private_output_hwc_target_buffer_commit_handler));
1923 if (!output_hwc_target_buffer_commit_handler) {
1924 /* LCOV_EXCL_START */
1925 TDM_ERR("failed: alloc memory");
1926 return TDM_ERROR_OUT_OF_MEMORY;
1927 /* LCOV_EXCL_STOP */
1930 output_hwc_target_buffer_commit_handler->private_output = private_output;
1931 output_hwc_target_buffer_commit_handler->func = func;
1932 output_hwc_target_buffer_commit_handler->user_data = user_data;
1934 ret = tdm_layer_commit_internal(private_layer, _tdm_output_hwc_layer_commit_handler, user_data);
1935 if (ret != TDM_ERROR_NONE) {
1936 /* LCOV_EXCL_START */
1937 TDM_ERR("failed: commit layer(target buffer)");
1938 free(output_hwc_target_buffer_commit_handler);
1939 /* LCOV_EXCL_STOP */
1943 _pthread_mutex_unlock(&private_display->lock);
1949 tdm_output_hwc_get_video_supported_formats(tdm_output *output, const tbm_format **formats,
1952 tdm_func_output *func_output;
1953 OUTPUT_FUNC_ENTRY();
1955 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
1956 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
1958 _pthread_mutex_lock(&private_display->lock);
1960 func_output = &private_display->func_output;
1962 if (!func_output->output_hwc_get_video_supported_formats) {
1963 /* LCOV_EXCL_START */
1964 _pthread_mutex_unlock(&private_display->lock);
1965 TDM_ERR("not implemented!!");
1966 return TDM_ERROR_NOT_IMPLEMENTED;
1967 /* LCOV_EXCL_STOP */
1970 ret = func_output->output_hwc_get_video_supported_formats(
1971 private_output->output_backend, formats, count);
1973 _pthread_mutex_unlock(&private_display->lock);
1979 _is_hwc_output_still_existed(tdm_private_output *private_output)
1981 tdm_private_display *dpy;
1982 tdm_private_output *o = NULL;
1984 dpy = tdm_display_init(NULL);
1985 TDM_RETURN_VAL_IF_FAIL(dpy != NULL, TDM_ERROR_OPERATION_FAILED);
1987 LIST_FOR_EACH_ENTRY(o, &dpy->output_list, link) {
1988 if (!(o->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC))
1991 if (o == private_output)
1995 tdm_display_deinit(dpy);
1999 tdm_display_deinit(dpy);
2003 /* gets called on behalf of the ecore-main-loop thread */
2005 tdm_output_need_validate_handler_thread(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
2007 tdm_private_output *private_output = object;
2009 TDM_RETURN_IF_FAIL(private_output != NULL);
2011 _pthread_mutex_lock(&private_display->lock);
2013 /* as we get 'private_output' within an event, an output this 'private_output'
2014 * points to can be destroyed already */
2015 if (!_is_hwc_output_still_existed(private_output)) {
2016 _pthread_mutex_unlock(&private_display->lock);
2020 _pthread_mutex_unlock(&private_display->lock);
2022 TDM_INFO("tdm-backend asks for revalidation for the output:%p.", private_output);
2024 if (private_output->need_validate.hndl)
2025 private_output->need_validate.hndl((tdm_output*)private_output);
2028 /* gets called on behalf of the tdm-thread */
2030 _need_validate_handler(int fd, tdm_event_loop_mask mask, void *user_data)
2032 tdm_thread_cb_need_validate ev;
2033 tdm_private_output *private_output;
2037 private_output = (tdm_private_output *)user_data;
2039 if (read(private_output->need_validate.event_fd, &value, sizeof(value)) < 0) {
2040 TDM_ERR("error while trying to read from a need_validate.event_fd fd.");
2041 return TDM_ERROR_OPERATION_FAILED;
2044 memset(&ev, 0, sizeof ev);
2045 ev.base.type = TDM_THREAD_CB_NEED_VALIDATE;
2046 ev.base.length = sizeof ev;
2047 ev.base.object_stamp = private_output->stamp;
2048 ev.base.data = NULL;
2051 ret = tdm_thread_cb_call(private_output, &ev.base);
2052 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2054 TDM_INFO("tdm-thread: get a 'need to revalidate' event for the ouptut:%p.", private_output);
2056 /* who cares about this? */
2057 return TDM_ERROR_NONE;
2061 tdm_output_need_validate_event_init(tdm_output *output)
2065 OUTPUT_FUNC_ENTRY();
2067 TDM_RETURN_VAL_IF_FAIL(TDM_MUTEX_IS_LOCKED(), TDM_ERROR_OPERATION_FAILED);
2069 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
2070 TDM_ERR("output(%p) not support HWC", private_output);
2071 return TDM_ERROR_BAD_REQUEST;
2074 /* build in eventfd fds into event_loop listened & handled by the tdm-thread */
2076 TDM_WARNING_IF_FAIL(fd >= 0);
2078 private_output->need_validate.event_source = tdm_event_loop_add_fd_handler(private_display,
2079 fd, TDM_EVENT_LOOP_READABLE, _need_validate_handler, private_output, &ret);
2080 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2082 private_output->need_validate.event_fd = fd;
2084 TDM_INFO("register an output:%p for the revalidation, event_fd:%d.", private_output, fd);
2090 tdm_output_choose_commit_per_vblank_mode(tdm_private_output *private_output, int mode)
2092 if (!private_output)
2093 return TDM_ERROR_INVALID_PARAMETER;
2095 if (mode < 0 || mode > 2)
2096 return TDM_ERROR_INVALID_PARAMETER;
2098 private_output->commit_per_vblank = mode;
2100 if (private_output->commit_per_vblank == 0)
2101 TDM_INFO("commit per vblank: disable");
2102 else if (private_output->commit_per_vblank == 1)
2103 TDM_INFO("commit per vblank: enable (1 layer)");
2104 else if (private_output->commit_per_vblank == 2)
2105 TDM_INFO("commit per vblank: enable (previous commit)");
2107 return TDM_ERROR_NONE;