1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <sc1.lim@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define OUTPUT_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
48 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER); \
49 private_output = (tdm_private_output*)output; \
50 private_display = private_output->private_display
52 #define OUTPUT_FUNC_ENTRY_ERROR() \
53 tdm_private_display *private_display; \
54 tdm_private_output *private_output; \
55 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
56 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER, NULL); \
57 private_output = (tdm_private_output*)output; \
58 private_display = private_output->private_display
61 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay);
64 _tdm_output_vblank_timeout_cb(void *user_data)
66 tdm_private_output *private_output = user_data;
67 tdm_private_output_vblank_handler *v = NULL;
69 TDM_RETURN_VAL_IF_FAIL(private_output != NULL, TDM_ERROR_OPERATION_FAILED);
71 private_output->vblank_timeout_timer_expired++;
73 TDM_ERR("TDM output(%d) vblank TIMEOUT!! (%d time%s)",
75 private_output->vblank_timeout_timer_expired,
76 (private_output->vblank_timeout_timer_expired > 1) ? "s" : "");
78 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
79 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
80 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
83 return TDM_ERROR_NONE;
87 tdm_output_vblank_print_wait_information(tdm_private_output *private_output, void *user_data)
89 tdm_private_output_vblank_handler *v = NULL;
91 TDM_RETURN_IF_FAIL(private_output != NULL);
92 TDM_RETURN_IF_FAIL(user_data != NULL);
94 TDM_ERR("TDM output(%d) vblank user_data(%p) info!!", private_output->pipe, user_data);
96 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
97 if (v->user_data != user_data)
99 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
100 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
105 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay)
109 if (!private_output->vblank_timeout_timer) {
110 private_output->vblank_timeout_timer =
111 tdm_event_loop_add_timer_handler(private_output->private_display,
112 _tdm_output_vblank_timeout_cb,
115 if (!private_output->vblank_timeout_timer) {
116 TDM_ERR("output(%d) couldn't add timer", private_output->pipe);
119 TDM_INFO("output(%d) create vblank timeout timer", private_output->pipe);
120 private_output->vblank_timeout_timer_expired = 0;
123 ret = tdm_event_loop_source_timer_update(private_output->vblank_timeout_timer, ms_delay);
124 if (ret != TDM_ERROR_NONE) {
125 TDM_ERR("output(%d) couldn't update timer", private_output->pipe);
130 static tdm_private_hwc_window *
131 _tdm_output_find_private_hwc_window(tdm_private_output *private_output,
132 tdm_hwc_window *hwc_window_backend)
134 tdm_private_hwc_window *private_hwc_window = NULL;
136 LIST_FOR_EACH_ENTRY(private_hwc_window, &private_output->hwc_window_list, link) {
137 if (private_hwc_window->hwc_window_backend == hwc_window_backend)
138 return private_hwc_window;
145 tdm_output_init(tdm_private_display *private_display)
147 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_COMMIT, tdm_display_find_output_stamp);
148 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_VBLANK, tdm_display_find_output_stamp);
149 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_CHANGE, tdm_display_find_output_stamp);
151 return TDM_ERROR_NONE;
155 tdm_output_get_backend(tdm_output *output, tdm_error *error)
157 tdm_private_backend *private_backend;
159 OUTPUT_FUNC_ENTRY_ERROR();
161 _pthread_mutex_lock(&private_display->lock);
163 private_backend = private_output->private_backend;
166 *error = TDM_ERROR_NONE;
168 _pthread_mutex_unlock(&private_display->lock);
170 return private_backend;
174 tdm_output_get_model_info(tdm_output *output, const char **maker,
175 const char **model, const char **name)
179 _pthread_mutex_lock(&private_display->lock);
182 *maker = private_output->caps.maker;
184 *model = private_output->caps.model;
186 *name = private_output->caps.name;
188 _pthread_mutex_unlock(&private_display->lock);
194 tdm_output_get_capabilities(tdm_output *output, tdm_output_capability *capabilities)
198 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
200 _pthread_mutex_lock(&private_display->lock);
202 *capabilities = private_output->caps.capabilities;
204 _pthread_mutex_unlock(&private_display->lock);
210 tdm_output_get_conn_status(tdm_output *output, tdm_output_conn_status *status)
214 TDM_RETURN_VAL_IF_FAIL(status != NULL, TDM_ERROR_INVALID_PARAMETER);
216 _pthread_mutex_lock(&private_display->lock);
218 *status = private_output->caps.status;
220 _pthread_mutex_unlock(&private_display->lock);
225 /* LCOV_EXCL_START */
227 _tdm_output_update(tdm_output *output_backend, void *user_data)
229 tdm_private_output *private_output = user_data;
232 TDM_RETURN_IF_FAIL(private_output);
234 ret = tdm_display_update_output(private_output->private_backend, output_backend, private_output->pipe);
235 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
240 tdm_output_thread_cb_change(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
242 tdm_private_output *private_output = object;
243 tdm_thread_cb_output_change *output_change = (tdm_thread_cb_output_change *)cb_base;
244 tdm_private_output_change_handler *change_handler = user_data;
246 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
248 assert(change_handler->owner_tid == syscall(SYS_gettid));
250 _pthread_mutex_unlock(&private_display->lock);
251 change_handler->func(private_output, output_change->type, output_change->value, change_handler->user_data);
252 _pthread_mutex_lock(&private_display->lock);
256 _tdm_output_call_thread_cb_change(tdm_private_output *private_output, tdm_output_change_type type, tdm_value value)
258 tdm_thread_cb_output_change output_change;
261 memset(&output_change, 0, sizeof output_change);
262 output_change.base.type = TDM_THREAD_CB_OUTPUT_CHANGE;
263 output_change.base.length = sizeof output_change;
264 output_change.base.object_stamp = private_output->stamp;
265 output_change.base.data = NULL;
266 output_change.base.sync = 1;
267 output_change.type = type;
268 output_change.value = value;
270 ret = tdm_thread_cb_call(private_output, &output_change.base);
271 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
273 return TDM_ERROR_NONE;
277 tdm_output_cb_status(tdm_output *output_backend, tdm_output_conn_status status, void *user_data)
279 tdm_private_output *private_output = user_data;
283 TDM_INFO("output(%d) main %s", private_output->pipe, tdm_status_str(status));
285 _tdm_output_update(output_backend, user_data);
289 ret = _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_CONNECTION, value);
290 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
294 tdm_output_cb_dpms(tdm_output *output_backend, tdm_output_dpms dpms, void *user_data)
296 tdm_private_output *private_output = user_data;
300 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(dpms));
302 private_output->current_dpms_value = dpms;
303 private_output->waiting_dpms_change = 0;
304 TDM_INFO("output(%d) dpms async '%s' done", private_output->pipe, tdm_dpms_str(dpms));
308 ret = _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_DPMS, value);
309 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
313 tdm_output_add_change_handler(tdm_output *output,
314 tdm_output_change_handler func,
317 tdm_private_output_change_handler *change_handler = NULL;
320 TDM_RETURN_VAL_IF_FAIL(func != NULL, TDM_ERROR_INVALID_PARAMETER);
322 _pthread_mutex_lock(&private_display->lock);
324 LIST_FOR_EACH_ENTRY(change_handler, &private_output->change_handler_list, link) {
325 if (change_handler->func == func || change_handler->user_data == user_data) {
326 TDM_ERR("can't add twice");
327 _pthread_mutex_unlock(&private_display->lock);
328 return TDM_ERROR_BAD_REQUEST;
332 change_handler = calloc(1, sizeof(tdm_private_output_change_handler));
333 if (!change_handler) {
334 /* LCOV_EXCL_START */
335 TDM_ERR("failed: alloc memory");
336 _pthread_mutex_unlock(&private_display->lock);
337 return TDM_ERROR_OUT_OF_MEMORY;
341 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_CHANGE, NULL, tdm_output_thread_cb_change, change_handler);
342 if (ret != TDM_ERROR_NONE) {
343 /* LCOV_EXCL_START */
344 TDM_ERR("tdm_thread_cb_add failed");
345 free(change_handler);
346 _pthread_mutex_unlock(&private_display->lock);
347 return TDM_ERROR_OPERATION_FAILED;
351 change_handler->private_output = private_output;
352 change_handler->func = func;
353 change_handler->user_data = user_data;
354 change_handler->owner_tid = syscall(SYS_gettid);
356 LIST_ADDTAIL(&change_handler->link, &private_output->change_handler_list);
358 _pthread_mutex_unlock(&private_display->lock);
364 tdm_output_remove_change_handler(tdm_output *output,
365 tdm_output_change_handler func,
368 tdm_private_display *private_display;
369 tdm_private_output *private_output;
370 tdm_private_output_change_handler *change_handler = NULL, *hh = NULL;
372 TDM_RETURN_IF_FAIL(tdm_output_is_valid(output));
373 TDM_RETURN_IF_FAIL(func != NULL);
375 private_output = (tdm_private_output*)output;
376 private_display = private_output->private_display;
378 _pthread_mutex_lock(&private_display->lock);
380 LIST_FOR_EACH_ENTRY_SAFE(change_handler, hh, &private_output->change_handler_list, link) {
381 if (change_handler->func != func || change_handler->user_data != user_data)
384 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_CHANGE, NULL, tdm_output_thread_cb_change, change_handler);
386 LIST_DEL(&change_handler->link);
387 free(change_handler);
389 _pthread_mutex_unlock(&private_display->lock);
394 _pthread_mutex_unlock(&private_display->lock);
398 tdm_output_get_output_type(tdm_output *output, tdm_output_type *type)
402 TDM_RETURN_VAL_IF_FAIL(type != NULL, TDM_ERROR_INVALID_PARAMETER);
404 _pthread_mutex_lock(&private_display->lock);
406 *type = private_output->caps.type;
408 _pthread_mutex_unlock(&private_display->lock);
414 tdm_output_get_layer_count(tdm_output *output, int *count)
416 tdm_private_layer *private_layer = NULL;
420 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
422 _pthread_mutex_lock(&private_display->lock);
424 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
425 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
427 _pthread_mutex_unlock(&private_display->lock);
428 return TDM_ERROR_BAD_REQUEST;
432 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link)
435 _pthread_mutex_unlock(&private_display->lock);
436 return TDM_ERROR_NONE;
439 _pthread_mutex_unlock(&private_display->lock);
446 tdm_output_get_layer(tdm_output *output, int index, tdm_error *error)
448 tdm_private_layer *private_layer = NULL;
450 OUTPUT_FUNC_ENTRY_ERROR();
452 _pthread_mutex_lock(&private_display->lock);
455 *error = TDM_ERROR_NONE;
457 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
458 _pthread_mutex_unlock(&private_display->lock);
459 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
461 *error = TDM_ERROR_BAD_REQUEST;
465 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
466 if (private_layer->index == index) {
467 _pthread_mutex_unlock(&private_display->lock);
468 return private_layer;
472 _pthread_mutex_unlock(&private_display->lock);
478 tdm_output_get_available_properties(tdm_output *output, const tdm_prop **props,
483 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
484 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
486 _pthread_mutex_lock(&private_display->lock);
488 *props = (const tdm_prop *)private_output->caps.props;
489 *count = private_output->caps.prop_count;
491 _pthread_mutex_unlock(&private_display->lock);
497 tdm_output_get_available_modes(tdm_output *output,
498 const tdm_output_mode **modes, int *count)
502 TDM_RETURN_VAL_IF_FAIL(modes != NULL, TDM_ERROR_INVALID_PARAMETER);
503 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
505 _pthread_mutex_lock(&private_display->lock);
507 *modes = (const tdm_output_mode *)private_output->caps.modes;
508 *count = private_output->caps.mode_count;
510 _pthread_mutex_unlock(&private_display->lock);
516 tdm_output_get_available_size(tdm_output *output, int *min_w, int *min_h,
517 int *max_w, int *max_h, int *preferred_align)
521 _pthread_mutex_lock(&private_display->lock);
524 *min_w = TDM_FRONT_VALUE(private_output->caps.min_w);
526 *min_h = TDM_FRONT_VALUE(private_output->caps.min_h);
528 *max_w = TDM_FRONT_VALUE(private_output->caps.max_w);
530 *max_h = TDM_FRONT_VALUE(private_output->caps.max_h);
532 *preferred_align = TDM_FRONT_VALUE(private_output->caps.preferred_align);
534 _pthread_mutex_unlock(&private_display->lock);
540 tdm_output_get_cursor_available_size(tdm_output *output, int *min_w, int *min_h,
541 int *max_w, int *max_h, int *preferred_align)
545 _pthread_mutex_lock(&private_display->lock);
547 if (!tdm_backend_check_module_abi(private_output->private_backend, 1, 5)) {
548 _pthread_mutex_unlock(&private_display->lock);
549 return TDM_ERROR_BAD_REQUEST;
553 *min_w = TDM_FRONT_VALUE(private_output->caps.cursor_min_w);
555 *min_h = TDM_FRONT_VALUE(private_output->caps.cursor_min_h);
557 *max_w = TDM_FRONT_VALUE(private_output->caps.cursor_max_w);
559 *max_h = TDM_FRONT_VALUE(private_output->caps.cursor_max_h);
561 *preferred_align = TDM_FRONT_VALUE(private_output->caps.cursor_preferred_align);
563 _pthread_mutex_unlock(&private_display->lock);
569 tdm_output_get_physical_size(tdm_output *output, unsigned int *mmWidth,
570 unsigned int *mmHeight)
574 _pthread_mutex_lock(&private_display->lock);
577 *mmWidth = private_output->caps.mmWidth;
579 *mmHeight = private_output->caps.mmHeight;
581 _pthread_mutex_unlock(&private_display->lock);
587 tdm_output_get_subpixel(tdm_output *output, unsigned int *subpixel)
590 TDM_RETURN_VAL_IF_FAIL(subpixel != NULL, TDM_ERROR_INVALID_PARAMETER);
592 _pthread_mutex_lock(&private_display->lock);
594 *subpixel = private_output->caps.subpixel;
596 _pthread_mutex_unlock(&private_display->lock);
602 tdm_output_get_pipe(tdm_output *output, unsigned int *pipe)
605 TDM_RETURN_VAL_IF_FAIL(pipe != NULL, TDM_ERROR_INVALID_PARAMETER);
607 _pthread_mutex_lock(&private_display->lock);
609 *pipe = private_output->pipe;
611 _pthread_mutex_unlock(&private_display->lock);
617 tdm_output_get_primary_index(tdm_output *output, int *index)
619 tdm_private_layer *private_layer = NULL;
622 TDM_RETURN_VAL_IF_FAIL(index != NULL, TDM_ERROR_INVALID_PARAMETER);
624 _pthread_mutex_lock(&private_display->lock);
626 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
627 if (private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_PRIMARY) {
628 *index = private_layer->index;
633 _pthread_mutex_unlock(&private_display->lock);
639 tdm_output_set_property(tdm_output *output, unsigned int id, tdm_value value)
641 tdm_private_backend *private_backend;
642 tdm_func_output *func_output;
645 _pthread_mutex_lock(&private_display->lock);
647 private_backend = private_output->private_backend;
648 func_output = &private_backend->func_output;
650 if (!func_output->output_set_property) {
651 /* LCOV_EXCL_START */
652 _pthread_mutex_unlock(&private_display->lock);
653 TDM_WRN("not implemented!!");
654 return TDM_ERROR_NOT_IMPLEMENTED;
658 ret = func_output->output_set_property(private_output->output_backend, id,
661 _pthread_mutex_unlock(&private_display->lock);
667 tdm_output_get_property(tdm_output *output, unsigned int id, tdm_value *value)
669 tdm_private_backend *private_backend;
670 tdm_func_output *func_output;
673 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
675 _pthread_mutex_lock(&private_display->lock);
677 private_backend = private_output->private_backend;
678 func_output = &private_backend->func_output;
680 if (!func_output->output_get_property) {
681 /* LCOV_EXCL_START */
682 _pthread_mutex_unlock(&private_display->lock);
683 TDM_WRN("not implemented!!");
684 return TDM_ERROR_NOT_IMPLEMENTED;
688 ret = func_output->output_get_property(private_output->output_backend, id,
691 _pthread_mutex_unlock(&private_display->lock);
697 _tdm_output_thread_cb_vblank(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
699 tdm_thread_cb_output_vblank *output_vblank = (tdm_thread_cb_output_vblank *)cb_base;
700 tdm_private_output_vblank_handler *vblank_handler = output_vblank->base.data;
701 tdm_private_output_vblank_handler *v = NULL, *vv = NULL;
702 tdm_private_output *private_output = object;
703 struct list_head clone_list;
705 pid_t tid = syscall(SYS_gettid);
707 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
709 assert(vblank_handler->owner_tid == tid);
711 vblank_handler->sent_to_frontend = 0;
713 _tdm_output_vblank_timeout_update(private_output, 0);
715 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
717 interval = vblank_handler->interval;
718 sync = vblank_handler->sync;
720 LIST_INITHEAD(&clone_list);
722 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &private_output->vblank_handler_list, link) {
723 if (v->interval != interval || v->sync != sync || v->owner_tid != tid)
727 LIST_ADDTAIL(&v->link, &clone_list);
730 if (tdm_debug_module & TDM_DEBUG_COMMIT)
731 TDM_INFO("----------------------------------------- output(%d) got vblank", private_output->pipe);
733 _pthread_mutex_unlock(&private_display->lock);
734 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &clone_list, link) {
735 if (tdm_debug_module & TDM_DEBUG_COMMIT)
736 TDM_INFO("handler(%p)", v);
741 v->func(v->private_output,
742 output_vblank->sequence,
743 output_vblank->tv_sec,
744 output_vblank->tv_usec,
749 _pthread_mutex_lock(&private_display->lock);
751 if (tdm_debug_module & TDM_DEBUG_COMMIT)
752 TDM_INFO("-----------------------------------------...");
756 _tdm_output_cb_vblank(tdm_output *output_backend, unsigned int sequence,
757 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
759 tdm_private_output_vblank_handler *vblank_handler = user_data;
760 tdm_thread_cb_output_vblank output_vblank;
763 memset(&output_vblank, 0, sizeof output_vblank);
764 output_vblank.base.type = TDM_THREAD_CB_OUTPUT_VBLANK;
765 output_vblank.base.length = sizeof output_vblank;
766 output_vblank.base.object_stamp = vblank_handler->private_output->stamp;
767 output_vblank.base.data = vblank_handler;
768 output_vblank.base.sync = 0;
769 output_vblank.sequence = sequence;
770 output_vblank.tv_sec = tv_sec;
771 output_vblank.tv_usec = tv_usec;
773 vblank_handler->sent_to_frontend = 1;
775 ret = tdm_thread_cb_call(vblank_handler->private_output, &output_vblank.base);
776 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
780 _tdm_output_thread_cb_commit(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
782 tdm_thread_cb_output_commit *output_commit = (tdm_thread_cb_output_commit *)cb_base;
783 tdm_private_output_commit_handler *output_commit_handler = output_commit->base.data;
784 tdm_private_output *private_output = object;
785 tdm_private_layer *private_layer = NULL;
787 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
789 if (!output_commit_handler)
792 assert(output_commit_handler->owner_tid == syscall(SYS_gettid));
794 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
796 LIST_DEL(&output_commit_handler->link);
798 if (tdm_debug_module & TDM_DEBUG_COMMIT) {
799 TDM_INFO("----------------------------------------- output(%d) committed", private_output->pipe);
800 TDM_INFO("handler(%p)", output_commit_handler);
803 if (private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT) {
804 /* In case of layer commit, the below will be handled in the layer commit callback */
805 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
806 if (private_layer->committed_buffer)
807 tdm_layer_committed(private_layer, &private_layer->committed_buffer);
811 if (output_commit_handler->func) {
812 _pthread_mutex_unlock(&private_display->lock);
813 output_commit_handler->func(private_output,
814 output_commit->sequence,
815 output_commit->tv_sec,
816 output_commit->tv_usec,
817 output_commit_handler->user_data);
818 _pthread_mutex_lock(&private_display->lock);
821 free(output_commit_handler);
823 if (tdm_debug_module & TDM_DEBUG_COMMIT)
824 TDM_INFO("-----------------------------------------...");
828 _tdm_output_cb_commit(tdm_output *output_backend, unsigned int sequence,
829 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
831 tdm_private_output_commit_handler *output_commit_handler = user_data;
832 tdm_private_output *private_output;
833 tdm_thread_cb_output_commit output_commit;
836 if (output_commit_handler)
837 private_output = output_commit_handler->private_output;
839 private_output = tdm_display_find_private_output(tdm_display_get(), output_backend);
841 memset(&output_commit, 0, sizeof output_commit);
842 output_commit.base.type = TDM_THREAD_CB_OUTPUT_COMMIT;
843 output_commit.base.length = sizeof output_commit;
844 output_commit.base.object_stamp = private_output->stamp;
845 output_commit.base.data = output_commit_handler;
846 output_commit.base.sync = 0;
847 output_commit.sequence = sequence;
848 output_commit.tv_sec = tv_sec;
849 output_commit.tv_usec = tv_usec;
851 ret = tdm_thread_cb_call(private_output, &output_commit.base);
852 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
855 /* add_front: To distinguish between the user vblank handlers and the layer
856 * commit vblank handlers. The layer commit handlers will be called
857 * before calling the user vblank handlers.
860 _tdm_output_wait_vblank(tdm_private_output *private_output, int interval, int sync,
861 tdm_output_vblank_handler func, void *user_data,
862 unsigned int add_front)
864 tdm_private_backend *private_backend;
865 tdm_func_output *func_output;
866 tdm_private_output_vblank_handler *vblank_handler = NULL, *v = NULL;
867 unsigned int skip_request = 0;
868 pid_t tid = syscall(SYS_gettid);
869 tdm_error ret = TDM_ERROR_NONE;
871 private_backend = private_output->private_backend;
872 func_output = &private_backend->func_output;
874 /* interval SHOULD be at least 1 */
878 if (!func_output->output_wait_vblank) {
879 /* LCOV_EXCL_START */
880 TDM_WRN("not implemented!!");
881 return TDM_ERROR_NOT_IMPLEMENTED;
885 if (!private_output->regist_vblank_cb) {
886 private_output->regist_vblank_cb = 1;
887 ret = func_output->output_set_vblank_handler(private_output->output_backend,
888 _tdm_output_cb_vblank);
891 vblank_handler = calloc(1, sizeof(tdm_private_output_vblank_handler));
892 if (!vblank_handler) {
893 /* LCOV_EXCL_START */
894 TDM_ERR("failed: alloc memory");
895 return TDM_ERROR_OUT_OF_MEMORY;
899 if (tdm_debug_module & TDM_DEBUG_COMMIT)
900 TDM_INFO("output(%d) wait_vblank: handler(%p)", private_output->pipe, vblank_handler);
902 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
903 if (v->interval == interval && v->sync == sync && v->owner_tid == tid) {
910 LIST_ADD(&vblank_handler->link, &private_output->vblank_handler_list);
912 LIST_ADDTAIL(&vblank_handler->link, &private_output->vblank_handler_list);
914 vblank_handler->private_output = private_output;
915 vblank_handler->interval = interval;
916 vblank_handler->sync = sync;
917 vblank_handler->func = func;
918 vblank_handler->user_data = user_data;
919 vblank_handler->owner_tid = tid;
921 /* If there is the previous request, we can skip to call output_wait_vblank() */
923 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
924 if (ret != TDM_ERROR_NONE) {
925 TDM_ERR("tdm_thread_cb_add failed");
929 ret = func_output->output_wait_vblank(private_output->output_backend, interval,
930 sync, vblank_handler);
931 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
933 _tdm_output_vblank_timeout_update(private_output, 1000);
935 if (tdm_debug_module & TDM_DEBUG_COMMIT)
936 TDM_INFO("output(%d) backend wait_vblank", private_output->pipe);
942 /* LCOV_EXCL_START */
943 if (vblank_handler) {
944 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
945 LIST_DEL(&vblank_handler->link);
946 free(vblank_handler);
953 tdm_output_wait_vblank(tdm_output *output, int interval, int sync,
954 tdm_output_vblank_handler func, void *user_data)
958 _pthread_mutex_lock(&private_display->lock);
960 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
961 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
962 tdm_dpms_str(private_output->current_dpms_value));
963 _pthread_mutex_unlock(&private_display->lock);
964 return TDM_ERROR_DPMS_OFF;
967 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 0);
969 _pthread_mutex_unlock(&private_display->lock);
974 /* LCOV_EXCL_START */
976 tdm_output_wait_vblank_add_front(tdm_output *output, int interval, int sync,
977 tdm_output_vblank_handler func, void *user_data)
981 _pthread_mutex_lock(&private_display->lock);
983 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
984 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
985 tdm_dpms_str(private_output->current_dpms_value));
986 _pthread_mutex_unlock(&private_display->lock);
987 return TDM_ERROR_DPMS_OFF;
990 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 1);
992 _pthread_mutex_unlock(&private_display->lock);
999 tdm_output_remove_vblank_handler_internal(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1001 tdm_private_output *private_output = (tdm_private_output*)output;
1002 tdm_private_output_vblank_handler *v = NULL;
1004 TDM_RETURN_IF_FAIL(private_output != NULL);
1005 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1007 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
1008 if (v->func == func && v->user_data == user_data) {
1009 /* only set func & user_data to NULL. It will be freed when an event occurs */
1011 v->user_data = NULL;
1018 tdm_output_remove_commit_handler_internal(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1020 tdm_private_output *private_output = (tdm_private_output*)output;
1021 tdm_private_output_commit_handler *c = NULL;
1023 TDM_RETURN_IF_FAIL(private_output != NULL);
1024 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1026 LIST_FOR_EACH_ENTRY(c, &private_output->output_commit_handler_list, link) {
1027 if (c->func == func && c->user_data == user_data) {
1028 /* only set func & user_data to NULL. It will be freed when an event occurs */
1030 c->user_data = NULL;
1037 tdm_output_remove_vblank_handler(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1039 OUTPUT_FUNC_ENTRY();
1041 _pthread_mutex_lock(&private_display->lock);
1043 tdm_output_remove_vblank_handler_internal(output, func, user_data);
1045 _pthread_mutex_unlock(&private_display->lock);
1051 tdm_output_remove_commit_handler(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1053 OUTPUT_FUNC_ENTRY();
1055 _pthread_mutex_lock(&private_display->lock);
1057 tdm_output_remove_commit_handler_internal(output, func, user_data);
1059 _pthread_mutex_unlock(&private_display->lock);
1065 tdm_output_commit_internal(tdm_output *output, int sync, tdm_output_commit_handler func, void *user_data)
1067 tdm_private_output *private_output;
1068 tdm_private_backend *private_backend;
1069 tdm_func_output *func_output;
1070 tdm_private_output_commit_handler *output_commit_handler = NULL;
1071 tdm_private_layer *private_layer = NULL;
1072 tdm_output_dpms dpms_value = TDM_OUTPUT_DPMS_ON;
1073 tdm_error ret = TDM_ERROR_NONE;
1075 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER);
1077 private_output = (tdm_private_output*)output;
1078 private_backend = private_output->private_backend;
1079 func_output = &private_backend->func_output;
1081 if (!func_output->output_commit) {
1082 /* LCOV_EXCL_START */
1083 TDM_WRN("not implemented!!");
1084 return TDM_ERROR_NOT_IMPLEMENTED;
1085 /* LCOV_EXCL_STOP */
1088 ret = tdm_output_get_dpms_internal(output, &dpms_value);
1089 TDM_RETURN_VAL_IF_FAIL(ret == TDM_ERROR_NONE, ret);
1091 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1093 if (!private_output->regist_commit_cb) {
1094 private_output->regist_commit_cb = 1;
1095 ret = func_output->output_set_commit_handler(private_output->output_backend, _tdm_output_cb_commit);
1096 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1099 output_commit_handler = calloc(1, sizeof(tdm_private_output_commit_handler));
1100 if (!output_commit_handler) {
1101 /* LCOV_EXCL_START */
1102 TDM_ERR("failed: alloc memory");
1103 return TDM_ERROR_OUT_OF_MEMORY;
1104 /* LCOV_EXCL_STOP */
1107 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1108 if (ret != TDM_ERROR_NONE) {
1109 TDM_ERR("tdm_thread_cb_add failed");
1110 free(output_commit_handler);
1114 LIST_ADDTAIL(&output_commit_handler->link, &private_output->output_commit_handler_list);
1115 output_commit_handler->private_output = private_output;
1116 output_commit_handler->func = func;
1117 output_commit_handler->user_data = user_data;
1118 output_commit_handler->owner_tid = syscall(SYS_gettid);
1121 ret = func_output->output_commit(private_output->output_backend, sync,
1122 output_commit_handler);
1123 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1125 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1126 TDM_INFO("output(%d) backend commit: handle(%p) func(%p) user_data(%p)",
1127 private_output->pipe, output_commit_handler, func, user_data);
1130 /* Even if DPMS is off, committed_buffer should be changed because it will be referred
1131 * for tdm_layer_committed() function.
1133 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1134 if (!private_layer->waiting_buffer)
1137 private_layer->committed_buffer = private_layer->waiting_buffer;
1138 private_layer->waiting_buffer = NULL;
1139 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1140 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
1141 private_layer, private_layer->waiting_buffer,
1142 private_layer->committed_buffer->buffer);
1145 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1146 TDM_WRN("dpms %s. Directly call commit handler instead of commit.", tdm_dpms_str(dpms_value));
1148 func(output, 0, 0, 0, user_data);
1154 /* LCOV_EXCL_START */
1155 if (output_commit_handler) {
1156 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1157 LIST_DEL(&output_commit_handler->link);
1158 free(output_commit_handler);
1161 /* LCOV_EXCL_STOP */
1165 tdm_output_commit(tdm_output *output, int sync, tdm_output_commit_handler func,
1168 tdm_private_layer *private_layer = NULL;
1170 OUTPUT_FUNC_ENTRY();
1172 _pthread_mutex_lock(&private_display->lock);
1174 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE)
1175 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1176 else if (private_output->commit_type == TDM_COMMIT_TYPE_LAYER) {
1177 TDM_ERR("Can't supported. Use tdm_layer_commit");
1178 _pthread_mutex_unlock(&private_display->lock);
1179 return TDM_ERROR_BAD_REQUEST;
1182 if (private_output->commit_per_vblank) {
1183 TDM_ERR("Use tdm_layer_commit");
1184 _pthread_mutex_unlock(&private_display->lock);
1185 return TDM_ERROR_BAD_REQUEST;
1188 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1189 TDM_ERR("output(%d) dpms: %s", private_output->pipe,
1190 tdm_dpms_str(private_output->current_dpms_value));
1191 _pthread_mutex_unlock(&private_display->lock);
1192 return TDM_ERROR_DPMS_OFF;
1195 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1196 TDM_INFO("output(%d) commit", private_output->pipe);
1198 /* apply the pending data of all layers */
1199 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1200 tdm_layer_commit_pending_data(private_layer);
1203 ret = tdm_output_commit_internal(output, sync, func, user_data);
1205 _pthread_mutex_unlock(&private_display->lock);
1211 tdm_output_set_mode(tdm_output *output, const tdm_output_mode *mode)
1213 tdm_private_backend *private_backend;
1214 tdm_func_output *func_output;
1215 OUTPUT_FUNC_ENTRY();
1217 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1219 _pthread_mutex_lock(&private_display->lock);
1221 private_backend = private_output->private_backend;
1222 func_output = &private_backend->func_output;
1224 if (!func_output->output_set_mode) {
1225 /* LCOV_EXCL_START */
1226 _pthread_mutex_unlock(&private_display->lock);
1227 TDM_WRN("not implemented!!");
1228 return TDM_ERROR_NOT_IMPLEMENTED;
1229 /* LCOV_EXCL_STOP */
1232 ret = func_output->output_set_mode(private_output->output_backend, mode);
1233 if (ret == TDM_ERROR_NONE) {
1234 private_output->current_mode = mode;
1235 private_output->need_set_target_info = 1;
1236 TDM_INFO("mode: %dx%d %dhz", mode->hdisplay, mode->vdisplay, mode->vrefresh);
1239 _pthread_mutex_unlock(&private_display->lock);
1245 tdm_output_get_mode(tdm_output *output, const tdm_output_mode **mode)
1247 OUTPUT_FUNC_ENTRY();
1249 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1251 _pthread_mutex_lock(&private_display->lock);
1253 *mode = private_output->current_mode;
1255 _pthread_mutex_unlock(&private_display->lock);
1261 tdm_output_set_dpms(tdm_output *output, tdm_output_dpms dpms_value)
1263 tdm_private_backend *private_backend;
1264 tdm_func_output *func_output;
1265 OUTPUT_FUNC_ENTRY();
1267 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1268 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1269 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1270 return TDM_ERROR_BAD_REQUEST;
1273 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1274 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1275 private_output->pipe, tdm_dpms_str(dpms_value));
1276 return TDM_ERROR_BAD_REQUEST;
1280 _pthread_mutex_lock(&private_display->lock);
1282 if (private_output->waiting_dpms_change) {
1283 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1284 _pthread_mutex_unlock(&private_display->lock);
1285 return TDM_ERROR_BAD_REQUEST;
1288 private_backend = private_output->private_backend;
1289 func_output = &private_backend->func_output;
1291 TDM_INFO("output(%d) dpms '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1293 if (func_output->output_set_dpms)
1294 ret = func_output->output_set_dpms(private_output->output_backend, dpms_value);
1296 /* LCOV_EXCL_START */
1297 ret = TDM_ERROR_NONE;
1298 TDM_WRN("not implemented!!");
1300 /* LCOV_EXCL_STOP */
1304 if (ret == TDM_ERROR_NONE) {
1305 if (private_output->current_dpms_value != dpms_value) {
1307 private_output->current_dpms_value = dpms_value;
1308 value.u32 = dpms_value;
1309 _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_DPMS, value);
1310 TDM_INFO("output(%d) dpms '%s' done", private_output->pipe, tdm_dpms_str(dpms_value));
1313 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1315 /* update current_dpms_value forcely */
1316 tdm_output_get_dpms_internal(output, &temp);
1318 TDM_ERR("output(%d) set_dpms failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1321 _pthread_mutex_unlock(&private_display->lock);
1326 /* LCOV_EXCL_START */
1328 tdm_output_set_dpms_async(tdm_output *output, tdm_output_dpms dpms_value)
1330 tdm_private_backend *private_backend;
1331 tdm_func_output *func_output;
1332 OUTPUT_FUNC_ENTRY();
1334 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_ASYNC_DPMS)) {
1335 TDM_ERR("output doesn't support the asynchronous DPMS control!");
1336 return TDM_ERROR_BAD_REQUEST;
1339 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1340 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1341 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1342 return TDM_ERROR_BAD_REQUEST;
1345 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1346 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1347 private_output->pipe, tdm_dpms_str(dpms_value));
1348 return TDM_ERROR_BAD_REQUEST;
1352 _pthread_mutex_lock(&private_display->lock);
1354 if (private_output->waiting_dpms_change) {
1355 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1356 _pthread_mutex_unlock(&private_display->lock);
1357 return TDM_ERROR_BAD_REQUEST;
1360 private_backend = private_output->private_backend;
1361 func_output = &private_backend->func_output;
1362 if (!func_output->output_set_dpms_handler) {
1363 TDM_WRN("not implemented: output_set_dpms_handler");
1364 _pthread_mutex_unlock(&private_display->lock);
1365 return TDM_ERROR_NOT_IMPLEMENTED;
1368 if (!func_output->output_set_dpms_async) {
1369 TDM_WRN("not implemented: output_set_dpms_async");
1370 _pthread_mutex_unlock(&private_display->lock);
1371 return TDM_ERROR_NOT_IMPLEMENTED;
1374 if (!private_output->regist_dpms_cb) {
1375 private_output->regist_dpms_cb = 1;
1376 ret = func_output->output_set_dpms_handler(private_output->output_backend,
1377 tdm_output_cb_dpms, private_output);
1378 if (ret != TDM_ERROR_NONE) {
1379 _pthread_mutex_unlock(&private_display->lock);
1380 TDM_ERR("Can't set the dpms handler!!");
1385 TDM_INFO("output(%d) dpms async '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1387 ret = func_output->output_set_dpms_async(private_output->output_backend, dpms_value);
1389 if (ret == TDM_ERROR_NONE) {
1390 private_output->waiting_dpms_change = 1;
1391 TDM_INFO("output(%d) dpms async '%s' waiting", private_output->pipe, tdm_dpms_str(dpms_value));
1393 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1395 /* update current_dpms_value forcely */
1396 tdm_output_get_dpms_internal(output, &temp);
1398 TDM_ERR("output(%d) set_dpms_async failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1401 _pthread_mutex_unlock(&private_display->lock);
1405 /* LCOV_EXCL_STOP */
1408 tdm_output_get_dpms_internal(tdm_output *output, tdm_output_dpms *dpms_value)
1410 tdm_private_output *private_output;
1411 tdm_private_backend *private_backend;
1412 tdm_func_output *func_output;
1413 tdm_error ret = TDM_ERROR_NONE;
1415 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER);
1417 private_output = (tdm_private_output*)output;
1419 /* TODO: this is ugly. But before calling backend's output_get_dpms(), we have
1420 * to check if all backends's DPMS operation has no problem. In future, we'd
1421 * better use new env instead of using commit_per_vblank variable to distinguish
1422 * whether we use the stored value or backend's output_get_dpms.
1424 if (!private_output->commit_per_vblank) {
1425 *dpms_value = private_output->current_dpms_value;
1426 return TDM_ERROR_NONE;
1429 private_backend = private_output->private_backend;
1430 func_output = &private_backend->func_output;
1432 if (!func_output->output_get_dpms) {
1433 /* LCOV_EXCL_START */
1434 *dpms_value = private_output->current_dpms_value;
1435 TDM_WRN("not implemented!!");
1436 return TDM_ERROR_NONE;
1437 /* LCOV_EXCL_STOP */
1440 ret = func_output->output_get_dpms(private_output->output_backend, dpms_value);
1441 if (ret != TDM_ERROR_NONE) {
1442 /* LCOV_EXCL_START */
1443 TDM_ERR("output_get_dpms failed");
1444 *dpms_value = TDM_OUTPUT_DPMS_OFF;
1445 /* LCOV_EXCL_STOP */
1448 /* checking with backend's value */
1449 if (*dpms_value != private_output->current_dpms_value) {
1451 TDM_ERR("output(%d) dpms changed suddenly: %s -> %s",
1452 private_output->pipe, private_output->current_dpms_value,
1453 tdm_dpms_str(*dpms_value));
1454 private_output->current_dpms_value = *dpms_value;
1455 value.u32 = *dpms_value;
1456 _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_DPMS, value);
1463 tdm_output_get_dpms(tdm_output *output, tdm_output_dpms *dpms_value)
1465 OUTPUT_FUNC_ENTRY();
1467 TDM_RETURN_VAL_IF_FAIL(dpms_value != NULL, TDM_ERROR_INVALID_PARAMETER);
1469 _pthread_mutex_lock(&private_display->lock);
1471 ret = tdm_output_get_dpms_internal(output, dpms_value);
1473 _pthread_mutex_unlock(&private_display->lock);
1478 EXTERN tdm_capture *
1479 tdm_output_create_capture(tdm_output *output, tdm_error *error)
1481 tdm_capture *capture = NULL;
1483 OUTPUT_FUNC_ENTRY_ERROR();
1485 _pthread_mutex_lock(&private_display->lock);
1487 capture = (tdm_capture *)tdm_capture_create_output_internal(private_output, error);
1489 _pthread_mutex_unlock(&private_display->lock);
1494 EXTERN tdm_hwc_window *
1495 tdm_output_hwc_create_window(tdm_output *output, tdm_error *error)
1497 tdm_hwc_window *hwc_window = NULL;
1499 OUTPUT_FUNC_ENTRY_ERROR();
1501 _pthread_mutex_lock(&private_display->lock);
1503 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1504 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 0, error);
1506 /* LCOV_EXCL_START */
1507 TDM_ERR("output(%p) not support HWC", private_output);
1509 *error = TDM_ERROR_BAD_REQUEST;
1510 /* LCOV_EXCL_STOP */
1513 _pthread_mutex_unlock(&private_display->lock);
1518 EXTERN tdm_hwc_window *
1519 tdm_output_hwc_create_video_window(tdm_output *output, tdm_error *error)
1521 tdm_hwc_window *hwc_window = NULL;
1523 OUTPUT_FUNC_ENTRY_ERROR();
1525 _pthread_mutex_lock(&private_display->lock);
1527 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1528 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 1, error);
1530 /* LCOV_EXCL_START */
1531 TDM_ERR("output(%p) not support HWC", private_output);
1533 *error = TDM_ERROR_BAD_REQUEST;
1534 /* LCOV_EXCL_STOP */
1537 _pthread_mutex_unlock(&private_display->lock);
1543 tdm_output_hwc_destroy_window(tdm_output *output, tdm_hwc_window *hwc_window)
1545 OUTPUT_FUNC_ENTRY();
1547 TDM_RETURN_VAL_IF_FAIL(hwc_window != NULL, TDM_ERROR_INVALID_PARAMETER);
1549 _pthread_mutex_lock(&private_display->lock);
1551 ret = tdm_hwc_window_destroy_internal(hwc_window);
1553 _pthread_mutex_unlock(&private_display->lock);
1559 tdm_output_hwc_validate(tdm_output *output, tdm_hwc_window **composited_wnds,
1560 uint32_t num_wnds, uint32_t *num_types)
1562 tdm_private_backend *private_backend;
1563 tdm_func_output *func_output = NULL;
1564 tdm_private_hwc_window **composited_wnds_frontend = NULL;
1565 tdm_hwc_window **composited_wnds_backend = NULL;
1568 OUTPUT_FUNC_ENTRY();
1570 TDM_RETURN_VAL_IF_FAIL(num_types != NULL, TDM_ERROR_INVALID_PARAMETER);
1572 _pthread_mutex_lock(&private_display->lock);
1574 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1575 TDM_ERR("output(%p) not support HWC", private_output);
1576 _pthread_mutex_unlock(&private_display->lock);
1577 return TDM_ERROR_BAD_REQUEST;
1580 private_backend = private_output->private_backend;
1581 func_output = &private_backend->func_output;
1583 if (!func_output->output_hwc_validate) {
1584 /* LCOV_EXCL_START */
1585 _pthread_mutex_unlock(&private_display->lock);
1586 TDM_WRN("not implemented!!");
1587 return TDM_ERROR_NOT_IMPLEMENTED;
1588 /* LCOV_EXCL_STOP */
1591 if (num_wnds == 0) {
1592 ret = func_output->output_hwc_validate(private_output->output_backend, NULL, 0, num_types);
1594 _pthread_mutex_unlock(&private_display->lock);
1599 composited_wnds_backend = calloc(num_wnds, sizeof(tdm_hwc_window *));
1600 if (!composited_wnds_backend) {
1601 /* LCOV_EXCL_START */
1602 _pthread_mutex_unlock(&private_display->lock);
1603 return TDM_ERROR_OUT_OF_MEMORY;
1604 /* LCOV_EXCL_STOP */
1607 composited_wnds_frontend = (tdm_private_hwc_window **)composited_wnds;
1609 for (i = 0; i < num_wnds; i++)
1610 composited_wnds_backend[i] = composited_wnds_frontend[i]->hwc_window_backend;
1612 ret = func_output->output_hwc_validate(private_output->output_backend, composited_wnds_backend, num_wnds, num_types);
1614 free(composited_wnds_backend);
1616 _pthread_mutex_unlock(&private_display->lock);
1622 tdm_output_hwc_set_need_validate_handler(tdm_output *output,
1623 tdm_output_need_validate_handler hndl)
1625 OUTPUT_FUNC_ENTRY();
1627 TDM_RETURN_VAL_IF_FAIL(hndl != NULL, TDM_ERROR_INVALID_PARAMETER);
1629 _pthread_mutex_lock(&private_display->lock);
1631 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1632 TDM_ERR("output(%p) not support HWC", private_output);
1633 _pthread_mutex_unlock(&private_display->lock);
1634 return TDM_ERROR_BAD_REQUEST;
1637 /* there's no reason to allow this */
1638 if (private_output->need_validate.hndl) {
1640 _pthread_mutex_unlock(&private_display->lock);
1641 return TDM_ERROR_OPERATION_FAILED;
1644 private_output->need_validate.hndl = hndl;
1646 _pthread_mutex_unlock(&private_display->lock);
1652 tdm_output_hwc_get_changed_composition_types(tdm_output *output,
1653 uint32_t *num_elements,
1654 tdm_hwc_window **hwc_window,
1655 tdm_hwc_window_composition *composition_types)
1657 tdm_private_backend *private_backend;
1658 tdm_func_output *func_output = NULL;
1659 tdm_private_hwc_window * private_hwc_window = NULL;
1662 OUTPUT_FUNC_ENTRY();
1664 TDM_RETURN_VAL_IF_FAIL(num_elements != NULL, TDM_ERROR_INVALID_PARAMETER);
1666 _pthread_mutex_lock(&private_display->lock);
1668 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1669 TDM_ERR("output(%p) not support HWC", private_output);
1670 _pthread_mutex_unlock(&private_display->lock);
1671 return TDM_ERROR_BAD_REQUEST;
1674 private_backend = private_output->private_backend;
1675 func_output = &private_backend->func_output;
1677 if (!func_output->output_hwc_get_changed_composition_types) {
1678 /* LCOV_EXCL_START */
1679 _pthread_mutex_unlock(&private_display->lock);
1680 TDM_WRN("not implemented!!");
1681 return TDM_ERROR_NOT_IMPLEMENTED;
1682 /* LCOV_EXCL_STOP */
1685 ret = func_output->output_hwc_get_changed_composition_types(private_output->output_backend,
1686 num_elements, hwc_window, composition_types);
1687 if (ret != TDM_ERROR_NONE) {
1688 /* LCOV_EXCL_START */
1689 _pthread_mutex_unlock(&private_display->lock);
1691 /* LCOV_EXCL_STOP */
1694 if (hwc_window == NULL || composition_types == NULL) {
1695 _pthread_mutex_unlock(&private_display->lock);
1696 return TDM_ERROR_NONE;
1699 for (i = 0; i < *num_elements; i++) {
1701 private_hwc_window = _tdm_output_find_private_hwc_window(private_output, hwc_window[i]);
1703 if (private_hwc_window == NULL) {
1704 /* LCOV_EXCL_START */
1705 TDM_ERR("failed! This should never happen!");
1706 func_output->output_hwc_destroy_window(private_output->output_backend, hwc_window[i]);
1708 _pthread_mutex_unlock(&private_display->lock);
1709 return TDM_ERROR_OPERATION_FAILED;
1710 /* LCOV_EXCL_STOP */
1713 hwc_window[i] = (tdm_hwc_window*)private_hwc_window;
1716 _pthread_mutex_unlock(&private_display->lock);
1722 tdm_output_hwc_accept_changes(tdm_output *output)
1724 tdm_private_backend *private_backend;
1725 tdm_func_output *func_output = NULL;
1727 OUTPUT_FUNC_ENTRY();
1729 _pthread_mutex_lock(&private_display->lock);
1731 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1732 TDM_ERR("output(%p) not support HWC", private_output);
1733 _pthread_mutex_unlock(&private_display->lock);
1734 return TDM_ERROR_BAD_REQUEST;
1737 private_backend = private_output->private_backend;
1738 func_output = &private_backend->func_output;
1740 if (!func_output->output_hwc_validate) {
1741 /* LCOV_EXCL_START */
1742 _pthread_mutex_unlock(&private_display->lock);
1743 TDM_WRN("not implemented!!");
1744 return TDM_ERROR_NOT_IMPLEMENTED;
1745 /* LCOV_EXCL_STOP */
1748 ret = func_output->output_hwc_accept_changes(private_output->output_backend);
1750 _pthread_mutex_unlock(&private_display->lock);
1756 tdm_output_hwc_get_target_buffer_queue(tdm_output *output, tdm_error *error)
1758 tdm_private_backend *private_backend;
1759 tdm_func_output *func_output = NULL;
1760 tbm_surface_queue_h queue = NULL;
1762 OUTPUT_FUNC_ENTRY_ERROR();
1764 _pthread_mutex_lock(&private_display->lock);
1766 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1767 TDM_ERR("output(%p) not support HWC", private_output);
1769 *error = TDM_ERROR_BAD_REQUEST;
1770 _pthread_mutex_unlock(&private_display->lock);
1774 private_backend = private_output->private_backend;
1775 func_output = &private_backend->func_output;
1777 if (!func_output->output_hwc_get_target_buffer_queue) {
1778 /* LCOV_EXCL_START */
1779 _pthread_mutex_unlock(&private_display->lock);
1780 TDM_WRN("not implemented!!");
1782 /* LCOV_EXCL_STOP */
1785 queue = func_output->output_hwc_get_target_buffer_queue(private_output->output_backend, error);
1787 _pthread_mutex_unlock(&private_display->lock);
1793 tdm_output_hwc_set_client_target_buffer(tdm_output *output, tbm_surface_h target_buffer, tdm_hwc_region damage)
1795 tdm_private_backend *private_backend;
1796 tdm_func_output *func_output = NULL;
1798 OUTPUT_FUNC_ENTRY();
1800 _pthread_mutex_lock(&private_display->lock);
1802 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1803 TDM_ERR("output(%p) not support HWC", private_output);
1804 _pthread_mutex_unlock(&private_display->lock);
1805 return TDM_ERROR_BAD_REQUEST;
1808 if (tdm_debug_dump & TDM_DUMP_FLAG_WINDOW) {
1809 /* LCOV_EXCL_START */
1810 char str[TDM_PATH_LEN];
1812 snprintf(str, TDM_PATH_LEN, "target_window_%d_%03d",
1813 private_output->index, i++);
1814 tdm_helper_dump_buffer_str(target_buffer, tdm_debug_dump_dir, str);
1815 /* LCOV_EXCL_STOP */
1818 private_backend = private_output->private_backend;
1819 func_output = &private_backend->func_output;
1821 if (!func_output->output_hwc_set_client_target_buffer) {
1822 /* LCOV_EXCL_START */
1823 _pthread_mutex_unlock(&private_display->lock);
1824 TDM_WRN("not implemented!!");
1825 return TDM_ERROR_NOT_IMPLEMENTED;
1826 /* LCOV_EXCL_STOP */
1829 ret = func_output->output_hwc_set_client_target_buffer(private_output->output_backend, target_buffer, damage);
1831 _pthread_mutex_unlock(&private_display->lock);
1837 tdm_output_hwc_unset_client_target_buffer(tdm_output *output)
1839 tdm_private_backend *private_backend;
1840 tdm_func_output *func_output = NULL;
1842 OUTPUT_FUNC_ENTRY();
1844 _pthread_mutex_lock(&private_display->lock);
1846 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1847 TDM_ERR("output(%p) not support HWC", private_output);
1848 _pthread_mutex_unlock(&private_display->lock);
1849 return TDM_ERROR_BAD_REQUEST;
1852 private_backend = private_output->private_backend;
1853 func_output = &private_backend->func_output;
1855 if (!func_output->output_hwc_unset_client_target_buffer) {
1856 /* LCOV_EXCL_START */
1857 _pthread_mutex_unlock(&private_display->lock);
1858 TDM_ERR("not implemented!!");
1859 return TDM_ERROR_NOT_IMPLEMENTED;
1860 /* LCOV_EXCL_STOP */
1863 ret = func_output->output_hwc_unset_client_target_buffer(private_output->output_backend);
1865 _pthread_mutex_unlock(&private_display->lock);
1871 _tdm_output_hwc_layer_commit_handler(tdm_layer *layer, unsigned int sequence,
1872 unsigned int tv_sec, unsigned int tv_usec,
1875 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler = (tdm_private_output_hwc_target_buffer_commit_handler *)user_data;
1876 tdm_output_hwc_target_buffer_commit_handler func = output_hwc_target_buffer_commit_handler->func;
1877 tdm_output *output = (tdm_output *)output_hwc_target_buffer_commit_handler->private_output;
1878 void *data = output_hwc_target_buffer_commit_handler->user_data;
1880 func(output, sequence, tv_sec, tv_usec, data);
1882 free(output_hwc_target_buffer_commit_handler);
1886 tdm_output_hwc_commit_client_target_buffer(tdm_output *output, tdm_output_hwc_target_buffer_commit_handler func, void *user_data)
1888 tdm_private_backend *private_backend;
1889 tdm_func_output *func_output;
1890 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler;
1891 tdm_layer *layer = NULL;
1892 tdm_private_layer *private_layer;
1893 const tdm_output_mode *mode;
1894 tbm_surface_h buffer;
1896 OUTPUT_FUNC_ENTRY();
1898 _pthread_mutex_lock(&private_display->lock);
1900 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1901 TDM_ERR("output(%p) not support HWC", private_output);
1902 _pthread_mutex_unlock(&private_display->lock);
1903 return TDM_ERROR_BAD_REQUEST;
1906 private_backend = private_output->private_backend;
1907 func_output = &private_backend->func_output;
1909 if (!func_output->output_hwc_get_client_target_buffer_layer) {
1910 /* LCOV_EXCL_START */
1911 _pthread_mutex_unlock(&private_display->lock);
1912 TDM_ERR("not implemented!!");
1913 return TDM_ERROR_NOT_IMPLEMENTED;
1914 /* LCOV_EXCL_STOP */
1917 layer = func_output->output_hwc_get_client_target_buffer_layer(private_output->output_backend,
1920 /* LCOV_EXCL_START */
1921 _pthread_mutex_unlock(&private_display->lock);
1922 TDM_ERR("no assigned layer!!");
1923 return TDM_ERROR_INVALID_PARAMETER;
1924 /* LCOV_EXCL_STOP */
1927 private_layer = (tdm_private_layer*)layer;
1929 if (!func_output->output_hwc_get_client_target_buffer) {
1930 /* LCOV_EXCL_START */
1931 _pthread_mutex_unlock(&private_display->lock);
1932 TDM_ERR("not implemented!!");
1933 return TDM_ERROR_NOT_IMPLEMENTED;
1934 /* LCOV_EXCL_STOP */
1937 buffer = func_output->output_hwc_get_client_target_buffer(private_output->output_backend,
1940 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
1942 ret = tdm_layer_unset_buffer_internal(private_layer);
1943 if (ret != TDM_ERROR_NONE) {
1944 /* LCOV_EXCL_START */
1945 TDM_ERR("failed: layer set info(window)");
1946 /* LCOV_EXCL_STOP */
1950 if (private_output->need_set_target_info) {
1951 mode = private_output->current_mode;
1952 private_output->target_buffer_info.src_config.size.h = mode->hdisplay;
1953 private_output->target_buffer_info.src_config.size.v = mode->vdisplay;
1954 private_output->target_buffer_info.src_config.pos.x = 0;
1955 private_output->target_buffer_info.src_config.pos.y = 0;
1956 private_output->target_buffer_info.src_config.pos.w = mode->hdisplay;
1957 private_output->target_buffer_info.src_config.pos.h = mode->vdisplay;
1958 private_output->target_buffer_info.dst_pos.x = 0;
1959 private_output->target_buffer_info.dst_pos.y = 0;
1960 private_output->target_buffer_info.dst_pos.w = mode->hdisplay;
1961 private_output->target_buffer_info.dst_pos.h = mode->vdisplay;
1962 private_output->target_buffer_info.transform = TDM_TRANSFORM_NORMAL;
1964 ret = tdm_layer_set_info_internal(private_layer, &private_output->target_buffer_info);
1965 if (ret != TDM_ERROR_NONE) {
1966 /* LCOV_EXCL_START */
1967 TDM_ERR("failed: layer set info(window)");
1968 /* LCOV_EXCL_STOP */
1972 private_output->need_set_target_info = 0;
1975 output_hwc_target_buffer_commit_handler = calloc(1, sizeof(tdm_private_output_hwc_target_buffer_commit_handler));
1976 if (!output_hwc_target_buffer_commit_handler) {
1977 /* LCOV_EXCL_START */
1978 TDM_ERR("failed: alloc memory");
1979 return TDM_ERROR_OUT_OF_MEMORY;
1980 /* LCOV_EXCL_STOP */
1983 output_hwc_target_buffer_commit_handler->private_output = private_output;
1984 output_hwc_target_buffer_commit_handler->func = func;
1985 output_hwc_target_buffer_commit_handler->user_data = user_data;
1987 ret = tdm_layer_commit_internal(private_layer, _tdm_output_hwc_layer_commit_handler, user_data);
1988 if (ret != TDM_ERROR_NONE) {
1989 /* LCOV_EXCL_START */
1990 TDM_ERR("failed: commit layer(target buffer)");
1991 free(output_hwc_target_buffer_commit_handler);
1992 /* LCOV_EXCL_STOP */
1996 _pthread_mutex_unlock(&private_display->lock);
2002 tdm_output_hwc_get_video_supported_formats(tdm_output *output, const tbm_format **formats,
2005 tdm_private_backend *private_backend;
2006 tdm_func_output *func_output;
2007 OUTPUT_FUNC_ENTRY();
2009 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
2010 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
2012 _pthread_mutex_lock(&private_display->lock);
2014 private_backend = private_output->private_backend;
2015 func_output = &private_backend->func_output;
2017 if (!func_output->output_hwc_get_video_supported_formats) {
2018 /* LCOV_EXCL_START */
2019 _pthread_mutex_unlock(&private_display->lock);
2020 TDM_WRN("not implemented!!");
2021 return TDM_ERROR_NOT_IMPLEMENTED;
2022 /* LCOV_EXCL_STOP */
2025 ret = func_output->output_hwc_get_video_supported_formats(
2026 private_output->output_backend, formats, count);
2028 _pthread_mutex_unlock(&private_display->lock);
2034 _is_hwc_output_still_existed(tdm_private_output *private_output)
2036 tdm_private_backend *private_backend = private_output->private_backend;
2037 tdm_private_output *o = NULL;
2039 LIST_FOR_EACH_ENTRY(o, &private_backend->output_list, link) {
2040 if (!(o->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC))
2043 if (o == private_output)
2053 /* gets called on behalf of the ecore-main-loop thread */
2055 tdm_output_need_validate_handler_thread(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
2057 tdm_private_output *private_output = object;
2059 TDM_RETURN_IF_FAIL(private_output != NULL);
2061 _pthread_mutex_lock(&private_display->lock);
2063 /* as we get 'private_output' within an event, an output this 'private_output'
2064 * points to can be destroyed already */
2065 if (!_is_hwc_output_still_existed(private_output)) {
2066 _pthread_mutex_unlock(&private_display->lock);
2070 _pthread_mutex_unlock(&private_display->lock);
2072 TDM_INFO("tdm-backend asks for revalidation for the output:%p.", private_output);
2074 if (private_output->need_validate.hndl)
2075 private_output->need_validate.hndl((tdm_output*)private_output);
2078 /* gets called on behalf of the tdm-thread */
2080 _need_validate_handler(int fd, tdm_event_loop_mask mask, void *user_data)
2082 tdm_thread_cb_need_validate ev;
2083 tdm_private_output *private_output;
2087 private_output = (tdm_private_output *)user_data;
2089 if (read(private_output->need_validate.event_fd, &value, sizeof(value)) < 0) {
2090 TDM_ERR("error while trying to read from a need_validate.event_fd fd.");
2091 return TDM_ERROR_OPERATION_FAILED;
2094 memset(&ev, 0, sizeof ev);
2095 ev.base.type = TDM_THREAD_CB_NEED_VALIDATE;
2096 ev.base.length = sizeof ev;
2097 ev.base.object_stamp = private_output->stamp;
2098 ev.base.data = NULL;
2101 ret = tdm_thread_cb_call(private_output, &ev.base);
2102 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2104 TDM_INFO("tdm-thread: get a 'need to revalidate' event for the ouptut:%p.", private_output);
2106 /* who cares about this? */
2107 return TDM_ERROR_NONE;
2111 tdm_output_need_validate_event_init(tdm_output *output)
2115 OUTPUT_FUNC_ENTRY();
2117 TDM_RETURN_VAL_IF_FAIL(TDM_MUTEX_IS_LOCKED(), TDM_ERROR_OPERATION_FAILED);
2119 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
2120 TDM_ERR("output(%p) not support HWC", private_output);
2121 return TDM_ERROR_BAD_REQUEST;
2124 /* build in eventfd fds into event_loop listened & handled by the tdm-thread */
2126 TDM_WARNING_IF_FAIL(fd >= 0);
2128 private_output->need_validate.event_source = tdm_event_loop_add_fd_handler(private_display,
2129 fd, TDM_EVENT_LOOP_READABLE, _need_validate_handler, private_output, &ret);
2130 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2132 private_output->need_validate.event_fd = fd;
2134 TDM_INFO("register an output:%p for the revalidation, event_fd:%d.", private_output, fd);
2140 tdm_output_choose_commit_per_vblank_mode(tdm_private_output *private_output, int mode)
2142 if (!private_output)
2143 return TDM_ERROR_INVALID_PARAMETER;
2145 if (mode < 0 || mode > 2)
2146 return TDM_ERROR_INVALID_PARAMETER;
2148 private_output->commit_per_vblank = mode;
2150 if (private_output->commit_per_vblank == 0)
2151 TDM_INFO("commit per vblank: disable");
2152 else if (private_output->commit_per_vblank == 1)
2153 TDM_INFO("commit per vblank: enable (1 layer)");
2154 else if (private_output->commit_per_vblank == 2)
2155 TDM_INFO("commit per vblank: enable (previous commit)");
2157 return TDM_ERROR_NONE;