1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <sc1.lim@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define OUTPUT_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
48 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER); \
49 private_output = (tdm_private_output*)output; \
50 private_display = private_output->private_display
52 #define OUTPUT_FUNC_ENTRY_ERROR() \
53 tdm_private_display *private_display; \
54 tdm_private_output *private_output; \
55 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
56 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER, NULL); \
57 private_output = (tdm_private_output*)output; \
58 private_display = private_output->private_display
61 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay);
64 _tdm_output_vblank_timeout_cb(void *user_data)
66 tdm_private_output *private_output = user_data;
67 tdm_private_output_vblank_handler *v = NULL;
69 TDM_RETURN_VAL_IF_FAIL(private_output != NULL, TDM_ERROR_OPERATION_FAILED);
71 private_output->vblank_timeout_timer_expired++;
73 TDM_ERR("TDM output(%d) vblank TIMEOUT!! (%d time%s)",
75 private_output->vblank_timeout_timer_expired,
76 (private_output->vblank_timeout_timer_expired > 1) ? "s" : "");
78 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
79 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
80 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
83 return TDM_ERROR_NONE;
87 tdm_output_vblank_print_wait_information(tdm_private_output *private_output, void *user_data)
89 tdm_private_output_vblank_handler *v = NULL;
91 TDM_RETURN_IF_FAIL(private_output != NULL);
92 TDM_RETURN_IF_FAIL(user_data != NULL);
94 TDM_ERR("TDM output(%d) vblank user_data(%p) info!!", private_output->pipe, user_data);
96 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
97 if (v->user_data != user_data)
99 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
100 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
105 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay)
109 if (!private_output->vblank_timeout_timer) {
110 private_output->vblank_timeout_timer =
111 tdm_event_loop_add_timer_handler(private_output->private_display,
112 _tdm_output_vblank_timeout_cb,
115 if (!private_output->vblank_timeout_timer) {
116 TDM_ERR("output(%d) couldn't add timer", private_output->pipe);
119 TDM_INFO("output(%d) create vblank timeout timer", private_output->pipe);
120 private_output->vblank_timeout_timer_expired = 0;
123 ret = tdm_event_loop_source_timer_update(private_output->vblank_timeout_timer, ms_delay);
124 if (ret != TDM_ERROR_NONE) {
125 TDM_ERR("output(%d) couldn't update timer", private_output->pipe);
130 static tdm_private_hwc_window *
131 _tdm_output_find_private_hwc_window(tdm_private_output *private_output,
132 tdm_hwc_window *hwc_window_backend)
134 tdm_private_hwc_window *private_hwc_window = NULL;
136 LIST_FOR_EACH_ENTRY(private_hwc_window, &private_output->hwc_window_list, link) {
137 if (private_hwc_window->hwc_window_backend == hwc_window_backend)
138 return private_hwc_window;
145 tdm_output_init(tdm_private_display *private_display)
147 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_COMMIT, tdm_display_find_output_stamp);
148 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_VBLANK, tdm_display_find_output_stamp);
149 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_CHANGE, tdm_display_find_output_stamp);
151 return TDM_ERROR_NONE;
155 tdm_output_get_backend(tdm_output *output, tdm_error *error)
157 tdm_private_backend *private_backend;
159 OUTPUT_FUNC_ENTRY_ERROR();
161 _pthread_mutex_lock(&private_display->lock);
163 private_backend = private_output->private_backend;
166 *error = TDM_ERROR_NONE;
168 _pthread_mutex_unlock(&private_display->lock);
170 return private_backend;
174 tdm_output_get_model_info(tdm_output *output, const char **maker,
175 const char **model, const char **name)
179 _pthread_mutex_lock(&private_display->lock);
182 *maker = private_output->caps.maker;
184 *model = private_output->caps.model;
186 *name = private_output->caps.name;
188 _pthread_mutex_unlock(&private_display->lock);
194 tdm_output_get_capabilities(tdm_output *output, tdm_output_capability *capabilities)
198 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
200 _pthread_mutex_lock(&private_display->lock);
202 *capabilities = private_output->caps.capabilities;
204 _pthread_mutex_unlock(&private_display->lock);
210 tdm_output_get_conn_status(tdm_output *output, tdm_output_conn_status *status)
214 TDM_RETURN_VAL_IF_FAIL(status != NULL, TDM_ERROR_INVALID_PARAMETER);
216 _pthread_mutex_lock(&private_display->lock);
218 *status = private_output->caps.status;
220 _pthread_mutex_unlock(&private_display->lock);
226 tdm_output_thread_cb_change(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
228 tdm_private_output *private_output = object;
229 tdm_thread_cb_output_change *output_change = (tdm_thread_cb_output_change *)cb_base;
230 tdm_private_output_change_handler *change_handler = user_data;
232 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
234 assert(change_handler->owner_tid == syscall(SYS_gettid));
236 _pthread_mutex_unlock(&private_display->lock);
237 change_handler->func(private_output, output_change->type, output_change->value, change_handler->user_data);
238 _pthread_mutex_lock(&private_display->lock);
242 _tdm_output_call_thread_cb_change(tdm_private_output *private_output, tdm_output_change_type type, tdm_value value)
244 tdm_thread_cb_output_change output_change;
247 memset(&output_change, 0, sizeof output_change);
248 output_change.base.type = TDM_THREAD_CB_OUTPUT_CHANGE;
249 output_change.base.length = sizeof output_change;
250 output_change.base.object_stamp = private_output->stamp;
251 output_change.base.data = NULL;
252 output_change.base.sync = 1;
253 output_change.type = type;
254 output_change.value = value;
256 ret = tdm_thread_cb_call(private_output, &output_change.base);
257 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
259 return TDM_ERROR_NONE;
263 tdm_output_cb_status(tdm_output *output_backend, tdm_output_conn_status status, void *user_data)
265 tdm_private_output *private_output = user_data;
269 TDM_RETURN_IF_FAIL(private_output);
271 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(status));
273 if ((private_output->caps.status == TDM_OUTPUT_CONN_STATUS_DISCONNECTED && status != TDM_OUTPUT_CONN_STATUS_DISCONNECTED) ||
274 (private_output->caps.status != TDM_OUTPUT_CONN_STATUS_DISCONNECTED && status == TDM_OUTPUT_CONN_STATUS_DISCONNECTED)) {
275 ret = tdm_display_update_output(private_output->private_backend, output_backend, private_output->pipe, 1);
276 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
278 private_output->caps.status = status;
283 ret = _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_CONNECTION, value);
284 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
288 tdm_output_cb_dpms(tdm_output *output_backend, tdm_output_dpms dpms, void *user_data)
290 tdm_private_output *private_output = user_data;
294 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(dpms));
296 private_output->current_dpms_value = dpms;
297 private_output->waiting_dpms_change = 0;
298 TDM_INFO("output(%d) dpms async '%s' done", private_output->pipe, tdm_dpms_str(dpms));
302 ret = _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_DPMS, value);
303 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
307 tdm_output_add_change_handler(tdm_output *output,
308 tdm_output_change_handler func,
311 tdm_private_output_change_handler *change_handler = NULL;
314 TDM_RETURN_VAL_IF_FAIL(func != NULL, TDM_ERROR_INVALID_PARAMETER);
316 _pthread_mutex_lock(&private_display->lock);
318 LIST_FOR_EACH_ENTRY(change_handler, &private_output->change_handler_list, link) {
319 if (change_handler->func == func && change_handler->user_data == user_data) {
320 TDM_ERR("can't add twice");
321 _pthread_mutex_unlock(&private_display->lock);
322 return TDM_ERROR_BAD_REQUEST;
326 change_handler = calloc(1, sizeof(tdm_private_output_change_handler));
327 if (!change_handler) {
328 /* LCOV_EXCL_START */
329 TDM_ERR("failed: alloc memory");
330 _pthread_mutex_unlock(&private_display->lock);
331 return TDM_ERROR_OUT_OF_MEMORY;
335 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_CHANGE, NULL, tdm_output_thread_cb_change, change_handler);
336 if (ret != TDM_ERROR_NONE) {
337 /* LCOV_EXCL_START */
338 TDM_ERR("tdm_thread_cb_add failed");
339 free(change_handler);
340 _pthread_mutex_unlock(&private_display->lock);
341 return TDM_ERROR_OPERATION_FAILED;
345 change_handler->private_output = private_output;
346 change_handler->func = func;
347 change_handler->user_data = user_data;
348 change_handler->owner_tid = syscall(SYS_gettid);
350 LIST_ADDTAIL(&change_handler->link, &private_output->change_handler_list);
352 _pthread_mutex_unlock(&private_display->lock);
358 tdm_output_remove_change_handler(tdm_output *output,
359 tdm_output_change_handler func,
362 tdm_private_display *private_display;
363 tdm_private_output *private_output;
364 tdm_private_output_change_handler *change_handler = NULL, *hh = NULL;
366 TDM_RETURN_IF_FAIL(tdm_output_is_valid(output));
367 TDM_RETURN_IF_FAIL(func != NULL);
369 private_output = (tdm_private_output*)output;
370 private_display = private_output->private_display;
372 _pthread_mutex_lock(&private_display->lock);
374 LIST_FOR_EACH_ENTRY_SAFE(change_handler, hh, &private_output->change_handler_list, link) {
375 if (change_handler->func != func || change_handler->user_data != user_data)
378 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_CHANGE, NULL, tdm_output_thread_cb_change, change_handler);
380 LIST_DEL(&change_handler->link);
381 free(change_handler);
383 _pthread_mutex_unlock(&private_display->lock);
388 _pthread_mutex_unlock(&private_display->lock);
392 tdm_output_get_output_type(tdm_output *output, tdm_output_type *type)
396 TDM_RETURN_VAL_IF_FAIL(type != NULL, TDM_ERROR_INVALID_PARAMETER);
398 _pthread_mutex_lock(&private_display->lock);
400 *type = private_output->caps.type;
402 _pthread_mutex_unlock(&private_display->lock);
408 tdm_output_get_layer_count(tdm_output *output, int *count)
410 tdm_private_layer *private_layer = NULL;
414 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
416 _pthread_mutex_lock(&private_display->lock);
418 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
419 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
421 _pthread_mutex_unlock(&private_display->lock);
422 return TDM_ERROR_BAD_REQUEST;
426 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link)
429 _pthread_mutex_unlock(&private_display->lock);
430 return TDM_ERROR_NONE;
433 _pthread_mutex_unlock(&private_display->lock);
440 tdm_output_get_layer(tdm_output *output, int index, tdm_error *error)
442 tdm_private_layer *private_layer = NULL;
444 OUTPUT_FUNC_ENTRY_ERROR();
446 _pthread_mutex_lock(&private_display->lock);
449 *error = TDM_ERROR_NONE;
451 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
452 _pthread_mutex_unlock(&private_display->lock);
453 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
455 *error = TDM_ERROR_BAD_REQUEST;
459 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
460 if (private_layer->index == index) {
461 _pthread_mutex_unlock(&private_display->lock);
462 return private_layer;
466 _pthread_mutex_unlock(&private_display->lock);
472 tdm_output_get_available_properties(tdm_output *output, const tdm_prop **props,
477 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
478 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
480 _pthread_mutex_lock(&private_display->lock);
482 *props = (const tdm_prop *)private_output->caps.props;
483 *count = private_output->caps.prop_count;
485 _pthread_mutex_unlock(&private_display->lock);
491 tdm_output_get_available_modes(tdm_output *output,
492 const tdm_output_mode **modes, int *count)
496 TDM_RETURN_VAL_IF_FAIL(modes != NULL, TDM_ERROR_INVALID_PARAMETER);
497 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
499 _pthread_mutex_lock(&private_display->lock);
501 *modes = (const tdm_output_mode *)private_output->caps.modes;
502 *count = private_output->caps.mode_count;
504 _pthread_mutex_unlock(&private_display->lock);
510 tdm_output_get_available_size(tdm_output *output, int *min_w, int *min_h,
511 int *max_w, int *max_h, int *preferred_align)
515 _pthread_mutex_lock(&private_display->lock);
518 *min_w = TDM_FRONT_VALUE(private_output->caps.min_w);
520 *min_h = TDM_FRONT_VALUE(private_output->caps.min_h);
522 *max_w = TDM_FRONT_VALUE(private_output->caps.max_w);
524 *max_h = TDM_FRONT_VALUE(private_output->caps.max_h);
526 *preferred_align = TDM_FRONT_VALUE(private_output->caps.preferred_align);
528 _pthread_mutex_unlock(&private_display->lock);
534 tdm_output_get_cursor_available_size(tdm_output *output, int *min_w, int *min_h,
535 int *max_w, int *max_h, int *preferred_align)
539 _pthread_mutex_lock(&private_display->lock);
541 if (!tdm_backend_check_module_abi(private_output->private_backend, 1, 5)) {
542 _pthread_mutex_unlock(&private_display->lock);
543 return TDM_ERROR_BAD_REQUEST;
547 *min_w = TDM_FRONT_VALUE(private_output->caps.cursor_min_w);
549 *min_h = TDM_FRONT_VALUE(private_output->caps.cursor_min_h);
551 *max_w = TDM_FRONT_VALUE(private_output->caps.cursor_max_w);
553 *max_h = TDM_FRONT_VALUE(private_output->caps.cursor_max_h);
555 *preferred_align = TDM_FRONT_VALUE(private_output->caps.cursor_preferred_align);
557 _pthread_mutex_unlock(&private_display->lock);
563 tdm_output_get_physical_size(tdm_output *output, unsigned int *mmWidth,
564 unsigned int *mmHeight)
568 _pthread_mutex_lock(&private_display->lock);
571 *mmWidth = private_output->caps.mmWidth;
573 *mmHeight = private_output->caps.mmHeight;
575 _pthread_mutex_unlock(&private_display->lock);
581 tdm_output_get_subpixel(tdm_output *output, unsigned int *subpixel)
584 TDM_RETURN_VAL_IF_FAIL(subpixel != NULL, TDM_ERROR_INVALID_PARAMETER);
586 _pthread_mutex_lock(&private_display->lock);
588 *subpixel = private_output->caps.subpixel;
590 _pthread_mutex_unlock(&private_display->lock);
596 tdm_output_get_pipe(tdm_output *output, unsigned int *pipe)
599 TDM_RETURN_VAL_IF_FAIL(pipe != NULL, TDM_ERROR_INVALID_PARAMETER);
601 _pthread_mutex_lock(&private_display->lock);
603 *pipe = private_output->pipe;
605 _pthread_mutex_unlock(&private_display->lock);
611 tdm_output_get_primary_index(tdm_output *output, int *index)
613 tdm_private_layer *private_layer = NULL;
616 TDM_RETURN_VAL_IF_FAIL(index != NULL, TDM_ERROR_INVALID_PARAMETER);
618 _pthread_mutex_lock(&private_display->lock);
620 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
621 if (private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_PRIMARY) {
622 *index = private_layer->index;
627 _pthread_mutex_unlock(&private_display->lock);
633 tdm_output_set_property(tdm_output *output, unsigned int id, tdm_value value)
635 tdm_private_backend *private_backend;
636 tdm_func_output *func_output;
639 _pthread_mutex_lock(&private_display->lock);
641 private_backend = private_output->private_backend;
642 func_output = &private_backend->func_output;
644 if (!func_output->output_set_property) {
645 /* LCOV_EXCL_START */
646 _pthread_mutex_unlock(&private_display->lock);
647 TDM_WRN("not implemented!!");
648 return TDM_ERROR_NOT_IMPLEMENTED;
652 ret = func_output->output_set_property(private_output->output_backend, id,
655 _pthread_mutex_unlock(&private_display->lock);
661 tdm_output_get_property(tdm_output *output, unsigned int id, tdm_value *value)
663 tdm_private_backend *private_backend;
664 tdm_func_output *func_output;
667 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
669 _pthread_mutex_lock(&private_display->lock);
671 private_backend = private_output->private_backend;
672 func_output = &private_backend->func_output;
674 if (!func_output->output_get_property) {
675 /* LCOV_EXCL_START */
676 _pthread_mutex_unlock(&private_display->lock);
677 TDM_WRN("not implemented!!");
678 return TDM_ERROR_NOT_IMPLEMENTED;
682 ret = func_output->output_get_property(private_output->output_backend, id,
685 _pthread_mutex_unlock(&private_display->lock);
691 _tdm_output_thread_cb_vblank(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
693 tdm_thread_cb_output_vblank *output_vblank = (tdm_thread_cb_output_vblank *)cb_base;
694 tdm_private_output_vblank_handler *vblank_handler = output_vblank->base.data;
695 tdm_private_output_vblank_handler *v = NULL, *vv = NULL;
696 tdm_private_output *private_output = object;
697 struct list_head clone_list;
699 pid_t tid = syscall(SYS_gettid);
701 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
703 assert(vblank_handler->owner_tid == tid);
705 vblank_handler->sent_to_frontend = 0;
707 _tdm_output_vblank_timeout_update(private_output, 0);
709 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
711 interval = vblank_handler->interval;
712 sync = vblank_handler->sync;
714 LIST_INITHEAD(&clone_list);
716 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &private_output->vblank_handler_list, link) {
717 if (v->interval != interval || v->sync != sync || v->owner_tid != tid)
721 LIST_ADDTAIL(&v->link, &clone_list);
724 if (tdm_debug_module & TDM_DEBUG_COMMIT)
725 TDM_INFO("----------------------------------------- output(%d) got vblank", private_output->pipe);
727 _pthread_mutex_unlock(&private_display->lock);
728 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &clone_list, link) {
729 if (tdm_debug_module & TDM_DEBUG_COMMIT)
730 TDM_INFO("handler(%p)", v);
735 v->func(v->private_output,
736 output_vblank->sequence,
737 output_vblank->tv_sec,
738 output_vblank->tv_usec,
743 _pthread_mutex_lock(&private_display->lock);
745 if (tdm_debug_module & TDM_DEBUG_COMMIT)
746 TDM_INFO("-----------------------------------------...");
750 _tdm_output_cb_vblank(tdm_output *output_backend, unsigned int sequence,
751 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
753 tdm_private_output_vblank_handler *vblank_handler = user_data;
754 tdm_thread_cb_output_vblank output_vblank;
757 memset(&output_vblank, 0, sizeof output_vblank);
758 output_vblank.base.type = TDM_THREAD_CB_OUTPUT_VBLANK;
759 output_vblank.base.length = sizeof output_vblank;
760 output_vblank.base.object_stamp = vblank_handler->private_output->stamp;
761 output_vblank.base.data = vblank_handler;
762 output_vblank.base.sync = 0;
763 output_vblank.sequence = sequence;
764 output_vblank.tv_sec = tv_sec;
765 output_vblank.tv_usec = tv_usec;
767 vblank_handler->sent_to_frontend = 1;
769 ret = tdm_thread_cb_call(vblank_handler->private_output, &output_vblank.base);
770 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
774 _tdm_output_thread_cb_commit(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
776 tdm_thread_cb_output_commit *output_commit = (tdm_thread_cb_output_commit *)cb_base;
777 tdm_private_output_commit_handler *output_commit_handler = output_commit->base.data;
778 tdm_private_output *private_output = object;
779 tdm_private_layer *private_layer = NULL;
781 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
783 if (!output_commit_handler)
786 assert(output_commit_handler->owner_tid == syscall(SYS_gettid));
788 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
790 LIST_DEL(&output_commit_handler->link);
792 if (tdm_debug_module & TDM_DEBUG_COMMIT) {
793 TDM_INFO("----------------------------------------- output(%d) committed", private_output->pipe);
794 TDM_INFO("handler(%p)", output_commit_handler);
797 if (private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT) {
798 /* In case of layer commit, the below will be handled in the layer commit callback */
799 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
800 if (private_layer->committed_buffer)
801 tdm_layer_committed(private_layer, &private_layer->committed_buffer);
805 if (output_commit_handler->func) {
806 _pthread_mutex_unlock(&private_display->lock);
807 output_commit_handler->func(private_output,
808 output_commit->sequence,
809 output_commit->tv_sec,
810 output_commit->tv_usec,
811 output_commit_handler->user_data);
812 _pthread_mutex_lock(&private_display->lock);
815 free(output_commit_handler);
817 if (tdm_debug_module & TDM_DEBUG_COMMIT)
818 TDM_INFO("-----------------------------------------...");
822 _tdm_output_cb_commit(tdm_output *output_backend, unsigned int sequence,
823 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
825 tdm_private_output_commit_handler *output_commit_handler = user_data;
826 tdm_private_output *private_output;
827 tdm_thread_cb_output_commit output_commit;
830 if (output_commit_handler)
831 private_output = output_commit_handler->private_output;
833 private_output = tdm_display_find_private_output(tdm_display_get(), output_backend);
835 memset(&output_commit, 0, sizeof output_commit);
836 output_commit.base.type = TDM_THREAD_CB_OUTPUT_COMMIT;
837 output_commit.base.length = sizeof output_commit;
838 output_commit.base.object_stamp = private_output->stamp;
839 output_commit.base.data = output_commit_handler;
840 output_commit.base.sync = 0;
841 output_commit.sequence = sequence;
842 output_commit.tv_sec = tv_sec;
843 output_commit.tv_usec = tv_usec;
845 ret = tdm_thread_cb_call(private_output, &output_commit.base);
846 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
849 /* add_front: To distinguish between the user vblank handlers and the layer
850 * commit vblank handlers. The layer commit handlers will be called
851 * before calling the user vblank handlers.
854 _tdm_output_wait_vblank(tdm_private_output *private_output, int interval, int sync,
855 tdm_output_vblank_handler func, void *user_data,
856 unsigned int add_front)
858 tdm_private_backend *private_backend;
859 tdm_func_output *func_output;
860 tdm_private_output_vblank_handler *vblank_handler = NULL, *v = NULL;
861 unsigned int skip_request = 0;
862 pid_t tid = syscall(SYS_gettid);
863 tdm_error ret = TDM_ERROR_NONE;
865 private_backend = private_output->private_backend;
866 func_output = &private_backend->func_output;
868 /* interval SHOULD be at least 1 */
872 if (!func_output->output_wait_vblank) {
873 /* LCOV_EXCL_START */
874 TDM_WRN("not implemented!!");
875 return TDM_ERROR_NOT_IMPLEMENTED;
879 if (!private_output->regist_vblank_cb) {
880 private_output->regist_vblank_cb = 1;
881 ret = func_output->output_set_vblank_handler(private_output->output_backend,
882 _tdm_output_cb_vblank);
885 vblank_handler = calloc(1, sizeof(tdm_private_output_vblank_handler));
886 if (!vblank_handler) {
887 /* LCOV_EXCL_START */
888 TDM_ERR("failed: alloc memory");
889 return TDM_ERROR_OUT_OF_MEMORY;
893 if (tdm_debug_module & TDM_DEBUG_COMMIT)
894 TDM_INFO("output(%d) wait_vblank: handler(%p)", private_output->pipe, vblank_handler);
896 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
897 if (v->interval == interval && v->sync == sync && v->owner_tid == tid) {
904 LIST_ADD(&vblank_handler->link, &private_output->vblank_handler_list);
906 LIST_ADDTAIL(&vblank_handler->link, &private_output->vblank_handler_list);
908 vblank_handler->private_output = private_output;
909 vblank_handler->interval = interval;
910 vblank_handler->sync = sync;
911 vblank_handler->func = func;
912 vblank_handler->user_data = user_data;
913 vblank_handler->owner_tid = tid;
915 /* If there is the previous request, we can skip to call output_wait_vblank() */
917 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
918 if (ret != TDM_ERROR_NONE) {
919 TDM_ERR("tdm_thread_cb_add failed");
923 ret = func_output->output_wait_vblank(private_output->output_backend, interval,
924 sync, vblank_handler);
925 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
927 _tdm_output_vblank_timeout_update(private_output, 1000);
929 if (tdm_debug_module & TDM_DEBUG_COMMIT)
930 TDM_INFO("output(%d) backend wait_vblank", private_output->pipe);
936 /* LCOV_EXCL_START */
937 if (vblank_handler) {
938 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
939 LIST_DEL(&vblank_handler->link);
940 free(vblank_handler);
947 tdm_output_wait_vblank(tdm_output *output, int interval, int sync,
948 tdm_output_vblank_handler func, void *user_data)
951 TDM_RETURN_VAL_IF_FAIL(interval > 0, TDM_ERROR_INVALID_PARAMETER);
953 _pthread_mutex_lock(&private_display->lock);
955 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
956 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
957 tdm_dpms_str(private_output->current_dpms_value));
958 _pthread_mutex_unlock(&private_display->lock);
959 return TDM_ERROR_DPMS_OFF;
962 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 0);
964 _pthread_mutex_unlock(&private_display->lock);
969 /* LCOV_EXCL_START */
971 tdm_output_wait_vblank_add_front(tdm_output *output, int interval, int sync,
972 tdm_output_vblank_handler func, void *user_data)
976 _pthread_mutex_lock(&private_display->lock);
978 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
979 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
980 tdm_dpms_str(private_output->current_dpms_value));
981 _pthread_mutex_unlock(&private_display->lock);
982 return TDM_ERROR_DPMS_OFF;
985 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 1);
987 _pthread_mutex_unlock(&private_display->lock);
994 tdm_output_remove_vblank_handler_internal(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
996 tdm_private_output *private_output = (tdm_private_output*)output;
997 tdm_private_output_vblank_handler *v = NULL;
999 TDM_RETURN_IF_FAIL(private_output != NULL);
1000 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1002 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
1003 if (v->func == func && v->user_data == user_data) {
1004 /* only set func & user_data to NULL. It will be freed when an event occurs */
1006 v->user_data = NULL;
1013 tdm_output_remove_commit_handler_internal(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1015 tdm_private_output *private_output = (tdm_private_output*)output;
1016 tdm_private_output_commit_handler *c = NULL;
1018 TDM_RETURN_IF_FAIL(private_output != NULL);
1019 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1021 LIST_FOR_EACH_ENTRY(c, &private_output->output_commit_handler_list, link) {
1022 if (c->func == func && c->user_data == user_data) {
1023 /* only set func & user_data to NULL. It will be freed when an event occurs */
1025 c->user_data = NULL;
1032 tdm_output_remove_vblank_handler(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1034 OUTPUT_FUNC_ENTRY();
1036 _pthread_mutex_lock(&private_display->lock);
1038 tdm_output_remove_vblank_handler_internal(output, func, user_data);
1040 _pthread_mutex_unlock(&private_display->lock);
1046 tdm_output_remove_commit_handler(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1048 OUTPUT_FUNC_ENTRY();
1050 _pthread_mutex_lock(&private_display->lock);
1052 tdm_output_remove_commit_handler_internal(output, func, user_data);
1054 _pthread_mutex_unlock(&private_display->lock);
1060 tdm_output_commit_internal(tdm_output *output, int sync, tdm_output_commit_handler func, void *user_data)
1062 tdm_private_output *private_output;
1063 tdm_private_backend *private_backend;
1064 tdm_func_output *func_output;
1065 tdm_private_output_commit_handler *output_commit_handler = NULL;
1066 tdm_private_layer *private_layer = NULL;
1067 tdm_output_dpms dpms_value = TDM_OUTPUT_DPMS_ON;
1068 tdm_error ret = TDM_ERROR_NONE;
1070 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER);
1072 private_output = (tdm_private_output*)output;
1073 private_backend = private_output->private_backend;
1074 func_output = &private_backend->func_output;
1076 if (!func_output->output_commit) {
1077 /* LCOV_EXCL_START */
1078 TDM_WRN("not implemented!!");
1079 return TDM_ERROR_NOT_IMPLEMENTED;
1080 /* LCOV_EXCL_STOP */
1083 ret = tdm_output_get_dpms_internal(output, &dpms_value);
1084 TDM_RETURN_VAL_IF_FAIL(ret == TDM_ERROR_NONE, ret);
1086 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1088 if (!private_output->regist_commit_cb) {
1089 private_output->regist_commit_cb = 1;
1090 ret = func_output->output_set_commit_handler(private_output->output_backend, _tdm_output_cb_commit);
1091 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1094 output_commit_handler = calloc(1, sizeof(tdm_private_output_commit_handler));
1095 if (!output_commit_handler) {
1096 /* LCOV_EXCL_START */
1097 TDM_ERR("failed: alloc memory");
1098 return TDM_ERROR_OUT_OF_MEMORY;
1099 /* LCOV_EXCL_STOP */
1102 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1103 if (ret != TDM_ERROR_NONE) {
1104 TDM_ERR("tdm_thread_cb_add failed");
1105 free(output_commit_handler);
1109 LIST_ADDTAIL(&output_commit_handler->link, &private_output->output_commit_handler_list);
1110 output_commit_handler->private_output = private_output;
1111 output_commit_handler->func = func;
1112 output_commit_handler->user_data = user_data;
1113 output_commit_handler->owner_tid = syscall(SYS_gettid);
1116 ret = func_output->output_commit(private_output->output_backend, sync,
1117 output_commit_handler);
1118 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1120 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1121 TDM_INFO("output(%d) backend commit: handle(%p) func(%p) user_data(%p)",
1122 private_output->pipe, output_commit_handler, func, user_data);
1125 /* Even if DPMS is off, committed_buffer should be changed because it will be referred
1126 * for tdm_layer_committed() function.
1128 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1129 if (!private_layer->waiting_buffer)
1132 private_layer->committed_buffer = private_layer->waiting_buffer;
1133 private_layer->waiting_buffer = NULL;
1134 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1135 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
1136 private_layer, private_layer->waiting_buffer,
1137 private_layer->committed_buffer->buffer);
1140 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1141 TDM_WRN("dpms %s. Directly call commit handler instead of commit.", tdm_dpms_str(dpms_value));
1143 func(output, 0, 0, 0, user_data);
1149 /* LCOV_EXCL_START */
1150 if (output_commit_handler) {
1151 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1152 LIST_DEL(&output_commit_handler->link);
1153 free(output_commit_handler);
1156 /* LCOV_EXCL_STOP */
1160 tdm_output_commit(tdm_output *output, int sync, tdm_output_commit_handler func,
1163 tdm_private_layer *private_layer = NULL;
1165 OUTPUT_FUNC_ENTRY();
1167 _pthread_mutex_lock(&private_display->lock);
1169 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE)
1170 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1171 else if (private_output->commit_type == TDM_COMMIT_TYPE_LAYER) {
1172 TDM_ERR("Can't supported. Use tdm_layer_commit");
1173 _pthread_mutex_unlock(&private_display->lock);
1174 return TDM_ERROR_BAD_REQUEST;
1177 if (private_output->commit_per_vblank) {
1178 TDM_ERR("Use tdm_layer_commit");
1179 _pthread_mutex_unlock(&private_display->lock);
1180 return TDM_ERROR_BAD_REQUEST;
1183 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1184 TDM_ERR("output(%d) dpms: %s", private_output->pipe,
1185 tdm_dpms_str(private_output->current_dpms_value));
1186 _pthread_mutex_unlock(&private_display->lock);
1187 return TDM_ERROR_DPMS_OFF;
1190 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1191 TDM_INFO("output(%d) commit", private_output->pipe);
1193 /* apply the pending data of all layers */
1194 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1195 tdm_layer_commit_pending_data(private_layer);
1198 ret = tdm_output_commit_internal(output, sync, func, user_data);
1200 _pthread_mutex_unlock(&private_display->lock);
1206 tdm_output_set_mode(tdm_output *output, const tdm_output_mode *mode)
1208 tdm_private_backend *private_backend;
1209 tdm_func_output *func_output;
1210 OUTPUT_FUNC_ENTRY();
1212 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1214 _pthread_mutex_lock(&private_display->lock);
1216 private_backend = private_output->private_backend;
1217 func_output = &private_backend->func_output;
1219 if (!func_output->output_set_mode) {
1220 /* LCOV_EXCL_START */
1221 _pthread_mutex_unlock(&private_display->lock);
1222 TDM_WRN("not implemented!!");
1223 return TDM_ERROR_NOT_IMPLEMENTED;
1224 /* LCOV_EXCL_STOP */
1227 ret = func_output->output_set_mode(private_output->output_backend, mode);
1228 if (ret == TDM_ERROR_NONE) {
1229 private_output->current_mode = mode;
1230 private_output->need_set_target_info = 1;
1231 TDM_INFO("mode: %dx%d %dhz", mode->hdisplay, mode->vdisplay, mode->vrefresh);
1234 _pthread_mutex_unlock(&private_display->lock);
1240 tdm_output_get_mode(tdm_output *output, const tdm_output_mode **mode)
1242 OUTPUT_FUNC_ENTRY();
1244 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1246 _pthread_mutex_lock(&private_display->lock);
1248 *mode = private_output->current_mode;
1250 _pthread_mutex_unlock(&private_display->lock);
1256 tdm_output_set_dpms(tdm_output *output, tdm_output_dpms dpms_value)
1258 tdm_private_backend *private_backend;
1259 tdm_func_output *func_output;
1260 OUTPUT_FUNC_ENTRY();
1262 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1263 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1264 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1265 return TDM_ERROR_BAD_REQUEST;
1268 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1269 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1270 private_output->pipe, tdm_dpms_str(dpms_value));
1271 return TDM_ERROR_BAD_REQUEST;
1275 _pthread_mutex_lock(&private_display->lock);
1277 if (private_output->waiting_dpms_change) {
1278 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1279 _pthread_mutex_unlock(&private_display->lock);
1280 return TDM_ERROR_BAD_REQUEST;
1283 private_backend = private_output->private_backend;
1284 func_output = &private_backend->func_output;
1286 TDM_INFO("output(%d) dpms '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1288 if (func_output->output_set_dpms)
1289 ret = func_output->output_set_dpms(private_output->output_backend, dpms_value);
1291 /* LCOV_EXCL_START */
1292 ret = TDM_ERROR_NONE;
1293 TDM_WRN("not implemented!!");
1295 /* LCOV_EXCL_STOP */
1299 if (ret == TDM_ERROR_NONE) {
1300 if (private_output->current_dpms_value != dpms_value) {
1302 private_output->current_dpms_value = dpms_value;
1303 value.u32 = dpms_value;
1304 _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_DPMS, value);
1305 TDM_INFO("output(%d) dpms '%s' done", private_output->pipe, tdm_dpms_str(dpms_value));
1308 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1310 /* update current_dpms_value forcely */
1311 tdm_output_get_dpms_internal(output, &temp);
1313 TDM_ERR("output(%d) set_dpms failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1316 _pthread_mutex_unlock(&private_display->lock);
1321 /* LCOV_EXCL_START */
1323 tdm_output_set_dpms_async(tdm_output *output, tdm_output_dpms dpms_value)
1325 tdm_private_backend *private_backend;
1326 tdm_func_output *func_output;
1327 OUTPUT_FUNC_ENTRY();
1329 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_ASYNC_DPMS)) {
1330 TDM_ERR("output doesn't support the asynchronous DPMS control!");
1331 return TDM_ERROR_BAD_REQUEST;
1334 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1335 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1336 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1337 return TDM_ERROR_BAD_REQUEST;
1340 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1341 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1342 private_output->pipe, tdm_dpms_str(dpms_value));
1343 return TDM_ERROR_BAD_REQUEST;
1347 _pthread_mutex_lock(&private_display->lock);
1349 if (private_output->waiting_dpms_change) {
1350 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1351 _pthread_mutex_unlock(&private_display->lock);
1352 return TDM_ERROR_BAD_REQUEST;
1355 private_backend = private_output->private_backend;
1356 func_output = &private_backend->func_output;
1357 if (!func_output->output_set_dpms_handler) {
1358 TDM_WRN("not implemented: output_set_dpms_handler");
1359 _pthread_mutex_unlock(&private_display->lock);
1360 return TDM_ERROR_NOT_IMPLEMENTED;
1363 if (!func_output->output_set_dpms_async) {
1364 TDM_WRN("not implemented: output_set_dpms_async");
1365 _pthread_mutex_unlock(&private_display->lock);
1366 return TDM_ERROR_NOT_IMPLEMENTED;
1369 if (!private_output->regist_dpms_cb) {
1370 private_output->regist_dpms_cb = 1;
1371 ret = func_output->output_set_dpms_handler(private_output->output_backend,
1372 tdm_output_cb_dpms, private_output);
1373 if (ret != TDM_ERROR_NONE) {
1374 _pthread_mutex_unlock(&private_display->lock);
1375 TDM_ERR("Can't set the dpms handler!!");
1380 TDM_INFO("output(%d) dpms async '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1382 ret = func_output->output_set_dpms_async(private_output->output_backend, dpms_value);
1384 if (ret == TDM_ERROR_NONE) {
1385 private_output->waiting_dpms_change = 1;
1386 TDM_INFO("output(%d) dpms async '%s' waiting", private_output->pipe, tdm_dpms_str(dpms_value));
1388 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1390 /* update current_dpms_value forcely */
1391 tdm_output_get_dpms_internal(output, &temp);
1393 TDM_ERR("output(%d) set_dpms_async failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1396 _pthread_mutex_unlock(&private_display->lock);
1400 /* LCOV_EXCL_STOP */
1403 tdm_output_get_dpms_internal(tdm_output *output, tdm_output_dpms *dpms_value)
1405 tdm_private_output *private_output;
1406 tdm_private_backend *private_backend;
1407 tdm_func_output *func_output;
1408 tdm_error ret = TDM_ERROR_NONE;
1410 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER);
1412 private_output = (tdm_private_output*)output;
1414 /* TODO: this is ugly. But before calling backend's output_get_dpms(), we have
1415 * to check if all backends's DPMS operation has no problem. In future, we'd
1416 * better use new env instead of using commit_per_vblank variable to distinguish
1417 * whether we use the stored value or backend's output_get_dpms.
1419 if (!private_output->commit_per_vblank) {
1420 *dpms_value = private_output->current_dpms_value;
1421 return TDM_ERROR_NONE;
1424 private_backend = private_output->private_backend;
1425 func_output = &private_backend->func_output;
1427 if (!func_output->output_get_dpms) {
1428 /* LCOV_EXCL_START */
1429 *dpms_value = private_output->current_dpms_value;
1430 TDM_WRN("not implemented!!");
1431 return TDM_ERROR_NONE;
1432 /* LCOV_EXCL_STOP */
1435 ret = func_output->output_get_dpms(private_output->output_backend, dpms_value);
1436 if (ret != TDM_ERROR_NONE) {
1437 /* LCOV_EXCL_START */
1438 TDM_ERR("output_get_dpms failed");
1439 *dpms_value = TDM_OUTPUT_DPMS_OFF;
1440 /* LCOV_EXCL_STOP */
1443 /* checking with backend's value */
1444 if (*dpms_value != private_output->current_dpms_value) {
1446 TDM_ERR("output(%d) dpms changed suddenly: %s -> %s",
1447 private_output->pipe, private_output->current_dpms_value,
1448 tdm_dpms_str(*dpms_value));
1449 private_output->current_dpms_value = *dpms_value;
1450 value.u32 = *dpms_value;
1451 _tdm_output_call_thread_cb_change(private_output, TDM_OUTPUT_CHANGE_DPMS, value);
1458 tdm_output_get_dpms(tdm_output *output, tdm_output_dpms *dpms_value)
1460 OUTPUT_FUNC_ENTRY();
1462 TDM_RETURN_VAL_IF_FAIL(dpms_value != NULL, TDM_ERROR_INVALID_PARAMETER);
1464 _pthread_mutex_lock(&private_display->lock);
1466 ret = tdm_output_get_dpms_internal(output, dpms_value);
1468 _pthread_mutex_unlock(&private_display->lock);
1473 EXTERN tdm_capture *
1474 tdm_output_create_capture(tdm_output *output, tdm_error *error)
1476 tdm_capture *capture = NULL;
1478 OUTPUT_FUNC_ENTRY_ERROR();
1480 _pthread_mutex_lock(&private_display->lock);
1482 capture = (tdm_capture *)tdm_capture_create_output_internal(private_output, error);
1484 _pthread_mutex_unlock(&private_display->lock);
1489 EXTERN tdm_hwc_window *
1490 tdm_output_hwc_create_window(tdm_output *output, tdm_error *error)
1492 tdm_hwc_window *hwc_window = NULL;
1494 OUTPUT_FUNC_ENTRY_ERROR();
1496 _pthread_mutex_lock(&private_display->lock);
1498 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1499 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 0, error);
1501 /* LCOV_EXCL_START */
1502 TDM_ERR("output(%p) not support HWC", private_output);
1504 *error = TDM_ERROR_BAD_REQUEST;
1505 /* LCOV_EXCL_STOP */
1508 _pthread_mutex_unlock(&private_display->lock);
1513 EXTERN tdm_hwc_window *
1514 tdm_output_hwc_create_video_window(tdm_output *output, tdm_error *error)
1516 tdm_hwc_window *hwc_window = NULL;
1518 OUTPUT_FUNC_ENTRY_ERROR();
1520 _pthread_mutex_lock(&private_display->lock);
1522 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1523 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 1, error);
1525 /* LCOV_EXCL_START */
1526 TDM_ERR("output(%p) not support HWC", private_output);
1528 *error = TDM_ERROR_BAD_REQUEST;
1529 /* LCOV_EXCL_STOP */
1532 _pthread_mutex_unlock(&private_display->lock);
1538 tdm_output_hwc_destroy_window(tdm_output *output, tdm_hwc_window *hwc_window)
1540 OUTPUT_FUNC_ENTRY();
1542 TDM_RETURN_VAL_IF_FAIL(hwc_window != NULL, TDM_ERROR_INVALID_PARAMETER);
1544 _pthread_mutex_lock(&private_display->lock);
1546 ret = tdm_hwc_window_destroy_internal(hwc_window);
1548 _pthread_mutex_unlock(&private_display->lock);
1554 tdm_output_hwc_validate(tdm_output *output, tdm_hwc_window **composited_wnds,
1555 uint32_t num_wnds, uint32_t *num_types)
1557 tdm_private_backend *private_backend;
1558 tdm_func_output *func_output = NULL;
1559 tdm_private_hwc_window **composited_wnds_frontend = NULL;
1560 tdm_hwc_window **composited_wnds_backend = NULL;
1563 OUTPUT_FUNC_ENTRY();
1565 TDM_RETURN_VAL_IF_FAIL(num_types != NULL, TDM_ERROR_INVALID_PARAMETER);
1567 _pthread_mutex_lock(&private_display->lock);
1569 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1570 TDM_ERR("output(%p) not support HWC", private_output);
1571 _pthread_mutex_unlock(&private_display->lock);
1572 return TDM_ERROR_BAD_REQUEST;
1575 private_backend = private_output->private_backend;
1576 func_output = &private_backend->func_output;
1578 if (!func_output->output_hwc_validate) {
1579 /* LCOV_EXCL_START */
1580 _pthread_mutex_unlock(&private_display->lock);
1581 TDM_WRN("not implemented!!");
1582 return TDM_ERROR_NOT_IMPLEMENTED;
1583 /* LCOV_EXCL_STOP */
1586 if (num_wnds == 0) {
1587 ret = func_output->output_hwc_validate(private_output->output_backend, NULL, 0, num_types);
1589 _pthread_mutex_unlock(&private_display->lock);
1594 composited_wnds_backend = calloc(num_wnds, sizeof(tdm_hwc_window *));
1595 if (!composited_wnds_backend) {
1596 /* LCOV_EXCL_START */
1597 _pthread_mutex_unlock(&private_display->lock);
1598 return TDM_ERROR_OUT_OF_MEMORY;
1599 /* LCOV_EXCL_STOP */
1602 composited_wnds_frontend = (tdm_private_hwc_window **)composited_wnds;
1604 for (i = 0; i < num_wnds; i++)
1605 composited_wnds_backend[i] = composited_wnds_frontend[i]->hwc_window_backend;
1607 ret = func_output->output_hwc_validate(private_output->output_backend, composited_wnds_backend, num_wnds, num_types);
1609 free(composited_wnds_backend);
1611 _pthread_mutex_unlock(&private_display->lock);
1617 tdm_output_hwc_set_need_validate_handler(tdm_output *output,
1618 tdm_output_need_validate_handler hndl)
1620 OUTPUT_FUNC_ENTRY();
1622 TDM_RETURN_VAL_IF_FAIL(hndl != NULL, TDM_ERROR_INVALID_PARAMETER);
1624 _pthread_mutex_lock(&private_display->lock);
1626 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1627 TDM_ERR("output(%p) not support HWC", private_output);
1628 _pthread_mutex_unlock(&private_display->lock);
1629 return TDM_ERROR_BAD_REQUEST;
1632 /* there's no reason to allow this */
1633 if (private_output->need_validate.hndl) {
1635 _pthread_mutex_unlock(&private_display->lock);
1636 return TDM_ERROR_OPERATION_FAILED;
1639 private_output->need_validate.hndl = hndl;
1641 _pthread_mutex_unlock(&private_display->lock);
1647 tdm_output_hwc_get_changed_composition_types(tdm_output *output,
1648 uint32_t *num_elements,
1649 tdm_hwc_window **hwc_window,
1650 tdm_hwc_window_composition *composition_types)
1652 tdm_private_backend *private_backend;
1653 tdm_func_output *func_output = NULL;
1654 tdm_private_hwc_window * private_hwc_window = NULL;
1657 OUTPUT_FUNC_ENTRY();
1659 TDM_RETURN_VAL_IF_FAIL(num_elements != NULL, TDM_ERROR_INVALID_PARAMETER);
1661 _pthread_mutex_lock(&private_display->lock);
1663 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1664 TDM_ERR("output(%p) not support HWC", private_output);
1665 _pthread_mutex_unlock(&private_display->lock);
1666 return TDM_ERROR_BAD_REQUEST;
1669 private_backend = private_output->private_backend;
1670 func_output = &private_backend->func_output;
1672 if (!func_output->output_hwc_get_changed_composition_types) {
1673 /* LCOV_EXCL_START */
1674 _pthread_mutex_unlock(&private_display->lock);
1675 TDM_WRN("not implemented!!");
1676 return TDM_ERROR_NOT_IMPLEMENTED;
1677 /* LCOV_EXCL_STOP */
1680 ret = func_output->output_hwc_get_changed_composition_types(private_output->output_backend,
1681 num_elements, hwc_window, composition_types);
1682 if (ret != TDM_ERROR_NONE) {
1683 /* LCOV_EXCL_START */
1684 _pthread_mutex_unlock(&private_display->lock);
1686 /* LCOV_EXCL_STOP */
1689 if (hwc_window == NULL || composition_types == NULL) {
1690 _pthread_mutex_unlock(&private_display->lock);
1691 return TDM_ERROR_NONE;
1694 for (i = 0; i < *num_elements; i++) {
1696 private_hwc_window = _tdm_output_find_private_hwc_window(private_output, hwc_window[i]);
1698 if (private_hwc_window == NULL) {
1699 /* LCOV_EXCL_START */
1700 TDM_ERR("failed! This should never happen!");
1701 func_output->output_hwc_destroy_window(private_output->output_backend, hwc_window[i]);
1703 _pthread_mutex_unlock(&private_display->lock);
1704 return TDM_ERROR_OPERATION_FAILED;
1705 /* LCOV_EXCL_STOP */
1708 hwc_window[i] = (tdm_hwc_window*)private_hwc_window;
1711 _pthread_mutex_unlock(&private_display->lock);
1717 tdm_output_hwc_accept_changes(tdm_output *output)
1719 tdm_private_backend *private_backend;
1720 tdm_func_output *func_output = NULL;
1722 OUTPUT_FUNC_ENTRY();
1724 _pthread_mutex_lock(&private_display->lock);
1726 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1727 TDM_ERR("output(%p) not support HWC", private_output);
1728 _pthread_mutex_unlock(&private_display->lock);
1729 return TDM_ERROR_BAD_REQUEST;
1732 private_backend = private_output->private_backend;
1733 func_output = &private_backend->func_output;
1735 if (!func_output->output_hwc_validate) {
1736 /* LCOV_EXCL_START */
1737 _pthread_mutex_unlock(&private_display->lock);
1738 TDM_WRN("not implemented!!");
1739 return TDM_ERROR_NOT_IMPLEMENTED;
1740 /* LCOV_EXCL_STOP */
1743 ret = func_output->output_hwc_accept_changes(private_output->output_backend);
1745 _pthread_mutex_unlock(&private_display->lock);
1751 tdm_output_hwc_get_target_buffer_queue(tdm_output *output, tdm_error *error)
1753 tdm_private_backend *private_backend;
1754 tdm_func_output *func_output = NULL;
1755 tbm_surface_queue_h queue = NULL;
1757 OUTPUT_FUNC_ENTRY_ERROR();
1759 _pthread_mutex_lock(&private_display->lock);
1761 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1762 TDM_ERR("output(%p) not support HWC", private_output);
1764 *error = TDM_ERROR_BAD_REQUEST;
1765 _pthread_mutex_unlock(&private_display->lock);
1769 private_backend = private_output->private_backend;
1770 func_output = &private_backend->func_output;
1772 if (!func_output->output_hwc_get_target_buffer_queue) {
1773 /* LCOV_EXCL_START */
1774 _pthread_mutex_unlock(&private_display->lock);
1775 TDM_WRN("not implemented!!");
1777 /* LCOV_EXCL_STOP */
1780 queue = func_output->output_hwc_get_target_buffer_queue(private_output->output_backend, error);
1782 _pthread_mutex_unlock(&private_display->lock);
1788 tdm_output_hwc_set_client_target_buffer(tdm_output *output, tbm_surface_h target_buffer, tdm_hwc_region damage)
1790 tdm_private_backend *private_backend;
1791 tdm_func_output *func_output = NULL;
1793 OUTPUT_FUNC_ENTRY();
1795 _pthread_mutex_lock(&private_display->lock);
1797 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1798 TDM_ERR("output(%p) not support HWC", private_output);
1799 _pthread_mutex_unlock(&private_display->lock);
1800 return TDM_ERROR_BAD_REQUEST;
1803 if (tdm_debug_dump & TDM_DUMP_FLAG_WINDOW) {
1804 /* LCOV_EXCL_START */
1805 char str[TDM_PATH_LEN];
1807 snprintf(str, TDM_PATH_LEN, "target_window_%d_%03d",
1808 private_output->index, i++);
1809 tdm_helper_dump_buffer_str(target_buffer, tdm_debug_dump_dir, str);
1810 /* LCOV_EXCL_STOP */
1813 private_backend = private_output->private_backend;
1814 func_output = &private_backend->func_output;
1816 if (!func_output->output_hwc_set_client_target_buffer) {
1817 /* LCOV_EXCL_START */
1818 _pthread_mutex_unlock(&private_display->lock);
1819 TDM_WRN("not implemented!!");
1820 return TDM_ERROR_NOT_IMPLEMENTED;
1821 /* LCOV_EXCL_STOP */
1824 ret = func_output->output_hwc_set_client_target_buffer(private_output->output_backend, target_buffer, damage);
1826 _pthread_mutex_unlock(&private_display->lock);
1832 tdm_output_hwc_unset_client_target_buffer(tdm_output *output)
1834 tdm_private_backend *private_backend;
1835 tdm_func_output *func_output = NULL;
1837 OUTPUT_FUNC_ENTRY();
1839 _pthread_mutex_lock(&private_display->lock);
1841 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1842 TDM_ERR("output(%p) not support HWC", private_output);
1843 _pthread_mutex_unlock(&private_display->lock);
1844 return TDM_ERROR_BAD_REQUEST;
1847 private_backend = private_output->private_backend;
1848 func_output = &private_backend->func_output;
1850 if (!func_output->output_hwc_unset_client_target_buffer) {
1851 /* LCOV_EXCL_START */
1852 _pthread_mutex_unlock(&private_display->lock);
1853 TDM_ERR("not implemented!!");
1854 return TDM_ERROR_NOT_IMPLEMENTED;
1855 /* LCOV_EXCL_STOP */
1858 ret = func_output->output_hwc_unset_client_target_buffer(private_output->output_backend);
1860 _pthread_mutex_unlock(&private_display->lock);
1866 _tdm_output_hwc_layer_commit_handler(tdm_layer *layer, unsigned int sequence,
1867 unsigned int tv_sec, unsigned int tv_usec,
1870 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler = (tdm_private_output_hwc_target_buffer_commit_handler *)user_data;
1871 tdm_output_hwc_target_buffer_commit_handler func = output_hwc_target_buffer_commit_handler->func;
1872 tdm_output *output = (tdm_output *)output_hwc_target_buffer_commit_handler->private_output;
1873 void *data = output_hwc_target_buffer_commit_handler->user_data;
1875 func(output, sequence, tv_sec, tv_usec, data);
1877 free(output_hwc_target_buffer_commit_handler);
1881 tdm_output_hwc_commit_client_target_buffer(tdm_output *output, tdm_output_hwc_target_buffer_commit_handler func, void *user_data)
1883 tdm_private_backend *private_backend;
1884 tdm_func_output *func_output;
1885 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler;
1886 tdm_layer *layer = NULL;
1887 tdm_private_layer *private_layer;
1888 const tdm_output_mode *mode;
1889 tbm_surface_h buffer;
1891 OUTPUT_FUNC_ENTRY();
1893 _pthread_mutex_lock(&private_display->lock);
1895 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1896 TDM_ERR("output(%p) not support HWC", private_output);
1897 _pthread_mutex_unlock(&private_display->lock);
1898 return TDM_ERROR_BAD_REQUEST;
1901 private_backend = private_output->private_backend;
1902 func_output = &private_backend->func_output;
1904 if (!func_output->output_hwc_get_client_target_buffer_layer) {
1905 /* LCOV_EXCL_START */
1906 _pthread_mutex_unlock(&private_display->lock);
1907 TDM_ERR("not implemented!!");
1908 return TDM_ERROR_NOT_IMPLEMENTED;
1909 /* LCOV_EXCL_STOP */
1912 layer = func_output->output_hwc_get_client_target_buffer_layer(private_output->output_backend,
1915 /* LCOV_EXCL_START */
1916 _pthread_mutex_unlock(&private_display->lock);
1917 TDM_ERR("no assigned layer!!");
1918 return TDM_ERROR_INVALID_PARAMETER;
1919 /* LCOV_EXCL_STOP */
1922 private_layer = (tdm_private_layer*)layer;
1924 if (!func_output->output_hwc_get_client_target_buffer) {
1925 /* LCOV_EXCL_START */
1926 _pthread_mutex_unlock(&private_display->lock);
1927 TDM_ERR("not implemented!!");
1928 return TDM_ERROR_NOT_IMPLEMENTED;
1929 /* LCOV_EXCL_STOP */
1932 buffer = func_output->output_hwc_get_client_target_buffer(private_output->output_backend,
1935 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
1937 ret = tdm_layer_unset_buffer_internal(private_layer);
1938 if (ret != TDM_ERROR_NONE) {
1939 /* LCOV_EXCL_START */
1940 TDM_ERR("failed: layer set info(window)");
1941 /* LCOV_EXCL_STOP */
1945 if (private_output->need_set_target_info) {
1946 mode = private_output->current_mode;
1947 private_output->target_buffer_info.src_config.size.h = mode->hdisplay;
1948 private_output->target_buffer_info.src_config.size.v = mode->vdisplay;
1949 private_output->target_buffer_info.src_config.pos.x = 0;
1950 private_output->target_buffer_info.src_config.pos.y = 0;
1951 private_output->target_buffer_info.src_config.pos.w = mode->hdisplay;
1952 private_output->target_buffer_info.src_config.pos.h = mode->vdisplay;
1953 private_output->target_buffer_info.dst_pos.x = 0;
1954 private_output->target_buffer_info.dst_pos.y = 0;
1955 private_output->target_buffer_info.dst_pos.w = mode->hdisplay;
1956 private_output->target_buffer_info.dst_pos.h = mode->vdisplay;
1957 private_output->target_buffer_info.transform = TDM_TRANSFORM_NORMAL;
1959 ret = tdm_layer_set_info_internal(private_layer, &private_output->target_buffer_info);
1960 if (ret != TDM_ERROR_NONE) {
1961 /* LCOV_EXCL_START */
1962 TDM_ERR("failed: layer set info(window)");
1963 /* LCOV_EXCL_STOP */
1967 private_output->need_set_target_info = 0;
1970 output_hwc_target_buffer_commit_handler = calloc(1, sizeof(tdm_private_output_hwc_target_buffer_commit_handler));
1971 if (!output_hwc_target_buffer_commit_handler) {
1972 /* LCOV_EXCL_START */
1973 TDM_ERR("failed: alloc memory");
1974 return TDM_ERROR_OUT_OF_MEMORY;
1975 /* LCOV_EXCL_STOP */
1978 output_hwc_target_buffer_commit_handler->private_output = private_output;
1979 output_hwc_target_buffer_commit_handler->func = func;
1980 output_hwc_target_buffer_commit_handler->user_data = user_data;
1982 ret = tdm_layer_commit_internal(private_layer, _tdm_output_hwc_layer_commit_handler, user_data);
1983 if (ret != TDM_ERROR_NONE) {
1984 /* LCOV_EXCL_START */
1985 TDM_ERR("failed: commit layer(target buffer)");
1986 free(output_hwc_target_buffer_commit_handler);
1987 /* LCOV_EXCL_STOP */
1991 _pthread_mutex_unlock(&private_display->lock);
1997 tdm_output_hwc_get_video_supported_formats(tdm_output *output, const tbm_format **formats,
2000 tdm_private_backend *private_backend;
2001 tdm_func_output *func_output;
2002 OUTPUT_FUNC_ENTRY();
2004 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
2005 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
2007 _pthread_mutex_lock(&private_display->lock);
2009 private_backend = private_output->private_backend;
2010 func_output = &private_backend->func_output;
2012 if (!func_output->output_hwc_get_video_supported_formats) {
2013 /* LCOV_EXCL_START */
2014 _pthread_mutex_unlock(&private_display->lock);
2015 TDM_WRN("not implemented!!");
2016 return TDM_ERROR_NOT_IMPLEMENTED;
2017 /* LCOV_EXCL_STOP */
2020 ret = func_output->output_hwc_get_video_supported_formats(
2021 private_output->output_backend, formats, count);
2023 _pthread_mutex_unlock(&private_display->lock);
2029 _is_hwc_output_still_existed(tdm_private_output *private_output)
2031 tdm_private_backend *private_backend = private_output->private_backend;
2032 tdm_private_output *o = NULL;
2034 LIST_FOR_EACH_ENTRY(o, &private_backend->output_list, link) {
2035 if (!(o->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC))
2038 if (o == private_output)
2048 /* gets called on behalf of the ecore-main-loop thread */
2050 tdm_output_need_validate_handler_thread(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
2052 tdm_private_output *private_output = object;
2054 TDM_RETURN_IF_FAIL(private_output != NULL);
2056 _pthread_mutex_lock(&private_display->lock);
2058 /* as we get 'private_output' within an event, an output this 'private_output'
2059 * points to can be destroyed already */
2060 if (!_is_hwc_output_still_existed(private_output)) {
2061 _pthread_mutex_unlock(&private_display->lock);
2065 _pthread_mutex_unlock(&private_display->lock);
2067 TDM_INFO("tdm-backend asks for revalidation for the output:%p.", private_output);
2069 if (private_output->need_validate.hndl)
2070 private_output->need_validate.hndl((tdm_output*)private_output);
2073 /* gets called on behalf of the tdm-thread */
2075 _need_validate_handler(int fd, tdm_event_loop_mask mask, void *user_data)
2077 tdm_thread_cb_need_validate ev;
2078 tdm_private_output *private_output;
2082 private_output = (tdm_private_output *)user_data;
2084 if (read(private_output->need_validate.event_fd, &value, sizeof(value)) < 0) {
2085 TDM_ERR("error while trying to read from a need_validate.event_fd fd.");
2086 return TDM_ERROR_OPERATION_FAILED;
2089 memset(&ev, 0, sizeof ev);
2090 ev.base.type = TDM_THREAD_CB_NEED_VALIDATE;
2091 ev.base.length = sizeof ev;
2092 ev.base.object_stamp = private_output->stamp;
2093 ev.base.data = NULL;
2096 ret = tdm_thread_cb_call(private_output, &ev.base);
2097 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2099 TDM_INFO("tdm-thread: get a 'need to revalidate' event for the ouptut:%p.", private_output);
2101 /* who cares about this? */
2102 return TDM_ERROR_NONE;
2106 tdm_output_need_validate_event_init(tdm_output *output)
2110 OUTPUT_FUNC_ENTRY();
2112 TDM_RETURN_VAL_IF_FAIL(TDM_MUTEX_IS_LOCKED(), TDM_ERROR_OPERATION_FAILED);
2114 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
2115 TDM_ERR("output(%p) not support HWC", private_output);
2116 return TDM_ERROR_BAD_REQUEST;
2119 /* build in eventfd fds into event_loop listened & handled by the tdm-thread */
2121 TDM_WARNING_IF_FAIL(fd >= 0);
2123 private_output->need_validate.event_source = tdm_event_loop_add_fd_handler(private_display,
2124 fd, TDM_EVENT_LOOP_READABLE, _need_validate_handler, private_output, &ret);
2125 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2127 private_output->need_validate.event_fd = fd;
2129 TDM_INFO("register an output:%p for the revalidation, event_fd:%d.", private_output, fd);
2135 tdm_output_choose_commit_per_vblank_mode(tdm_private_output *private_output, int mode)
2137 if (!private_output)
2138 return TDM_ERROR_INVALID_PARAMETER;
2140 if (mode < 0 || mode > 2)
2141 return TDM_ERROR_INVALID_PARAMETER;
2143 private_output->commit_per_vblank = mode;
2145 if (private_output->commit_per_vblank == 0)
2146 TDM_INFO("commit per vblank: disable");
2147 else if (private_output->commit_per_vblank == 1)
2148 TDM_INFO("commit per vblank: enable (1 layer)");
2149 else if (private_output->commit_per_vblank == 2)
2150 TDM_INFO("commit per vblank: enable (previous commit)");
2152 return TDM_ERROR_NONE;