1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <sc1.lim@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define OUTPUT_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
48 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER); \
49 private_output = (tdm_private_output*)output; \
50 private_display = private_output->private_display
52 #define OUTPUT_FUNC_ENTRY_ERROR() \
53 tdm_private_display *private_display; \
54 tdm_private_output *private_output; \
55 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
56 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER, NULL); \
57 private_output = (tdm_private_output*)output; \
58 private_display = private_output->private_display
61 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay);
64 _tdm_output_vblank_timeout_cb(void *user_data)
66 tdm_private_output *private_output = user_data;
67 tdm_private_output_vblank_handler *v = NULL;
69 TDM_RETURN_VAL_IF_FAIL(private_output != NULL, TDM_ERROR_OPERATION_FAILED);
71 private_output->vblank_timeout_timer_expired++;
73 TDM_ERR("TDM output(%d) vblank TIMEOUT!! (%d time%s)",
75 private_output->vblank_timeout_timer_expired,
76 (private_output->vblank_timeout_timer_expired > 1) ? "s" : "");
78 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
79 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
80 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
83 return TDM_ERROR_NONE;
87 tdm_output_vblank_print_wait_information(tdm_private_output *private_output, void *user_data)
89 tdm_private_output_vblank_handler *v = NULL;
91 TDM_RETURN_IF_FAIL(private_output != NULL);
92 TDM_RETURN_IF_FAIL(user_data != NULL);
94 TDM_ERR("TDM output(%d) vblank user_data(%p) info!!", private_output->pipe, user_data);
96 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
97 if (v->user_data != user_data)
99 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
100 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
105 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay)
109 if (!private_output->vblank_timeout_timer) {
110 private_output->vblank_timeout_timer =
111 tdm_event_loop_add_timer_handler(private_output->private_display,
112 _tdm_output_vblank_timeout_cb,
115 if (!private_output->vblank_timeout_timer) {
116 TDM_ERR("output(%d) couldn't add timer", private_output->pipe);
119 TDM_INFO("output(%d) create vblank timeout timer", private_output->pipe);
120 private_output->vblank_timeout_timer_expired = 0;
123 ret = tdm_event_loop_source_timer_update(private_output->vblank_timeout_timer, ms_delay);
124 if (ret != TDM_ERROR_NONE) {
125 TDM_ERR("output(%d) couldn't update timer", private_output->pipe);
130 static tdm_private_hwc_window *
131 _tdm_output_find_private_hwc_window(tdm_private_output *private_output,
132 tdm_hwc_window *hwc_window_backend)
134 tdm_private_hwc_window *private_hwc_window = NULL;
136 LIST_FOR_EACH_ENTRY(private_hwc_window, &private_output->hwc_window_list, link) {
137 if (private_hwc_window->hwc_window_backend == hwc_window_backend)
138 return private_hwc_window;
145 tdm_output_init(tdm_private_display *private_display)
147 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_COMMIT, tdm_display_find_output_stamp);
148 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_VBLANK, tdm_display_find_output_stamp);
149 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_STATUS, tdm_display_find_output_stamp);
150 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_DPMS, tdm_display_find_output_stamp);
153 return TDM_ERROR_NONE;
157 tdm_output_get_backend_module(tdm_output *output, tdm_error *error)
159 tdm_private_module *private_module;
161 OUTPUT_FUNC_ENTRY_ERROR();
163 _pthread_mutex_lock(&private_display->lock);
165 private_module = private_output->private_module;
168 *error = TDM_ERROR_NONE;
170 _pthread_mutex_unlock(&private_display->lock);
172 return private_module;
176 tdm_output_get_model_info(tdm_output *output, const char **maker,
177 const char **model, const char **name)
181 _pthread_mutex_lock(&private_display->lock);
184 *maker = private_output->caps.maker;
186 *model = private_output->caps.model;
188 *name = private_output->caps.name;
190 _pthread_mutex_unlock(&private_display->lock);
196 tdm_output_get_capabilities(tdm_output *output, tdm_output_capability *capabilities)
200 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
202 _pthread_mutex_lock(&private_display->lock);
204 *capabilities = private_output->caps.capabilities;
206 _pthread_mutex_unlock(&private_display->lock);
212 tdm_output_get_conn_status(tdm_output *output, tdm_output_conn_status *status)
216 TDM_RETURN_VAL_IF_FAIL(status != NULL, TDM_ERROR_INVALID_PARAMETER);
218 _pthread_mutex_lock(&private_display->lock);
220 *status = private_output->caps.status;
222 _pthread_mutex_unlock(&private_display->lock);
228 tdm_output_thread_cb_change(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
230 tdm_private_output *private_output = object;
231 tdm_private_output_change_handler *change_handler = user_data;
232 tdm_output_change_type type = TDM_OUTPUT_CHANGE_CONNECTION;
233 tdm_value value = {.u32 = 0 };
235 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
237 assert(change_handler->owner_tid == syscall(SYS_gettid));
239 if (cb_base->type == TDM_THREAD_CB_OUTPUT_STATUS) {
240 tdm_thread_cb_output_status *output_status = (tdm_thread_cb_output_status *)cb_base;
241 type = TDM_OUTPUT_CHANGE_CONNECTION;
242 value.u32 = output_status->status;
243 } else if (cb_base->type == TDM_THREAD_CB_OUTPUT_DPMS) {
244 tdm_thread_cb_output_dpms *output_dpms = (tdm_thread_cb_output_dpms *)cb_base;
245 type = TDM_OUTPUT_CHANGE_DPMS;
246 value.u32 = output_dpms->dpms;
248 TDM_NEVER_GET_HERE();
252 _pthread_mutex_unlock(&private_display->lock);
253 change_handler->func(private_output, type, value, change_handler->user_data);
254 _pthread_mutex_lock(&private_display->lock);
259 _tdm_output_call_thread_cb_status(tdm_private_output *private_output, tdm_output_conn_status status)
261 tdm_thread_cb_output_status output_status;
264 memset(&output_status, 0, sizeof output_status);
265 output_status.base.type = TDM_THREAD_CB_OUTPUT_STATUS;
266 output_status.base.length = sizeof output_status;
267 output_status.base.object_stamp = private_output->stamp;
268 output_status.base.data = NULL;
269 output_status.base.sync = 1;
270 output_status.status = status;
272 ret = tdm_thread_cb_call(private_output, &output_status.base);
273 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
275 return TDM_ERROR_NONE;
279 _tdm_output_call_thread_cb_dpms(tdm_private_output *private_output, tdm_output_dpms dpms)
281 tdm_thread_cb_output_dpms output_dpms;
284 memset(&output_dpms, 0, sizeof output_dpms);
285 output_dpms.base.type = TDM_THREAD_CB_OUTPUT_DPMS;
286 output_dpms.base.length = sizeof output_dpms;
287 output_dpms.base.object_stamp = private_output->stamp;
288 output_dpms.base.data = NULL;
289 output_dpms.base.sync = 0;
290 output_dpms.dpms = dpms;
292 ret = tdm_thread_cb_call(private_output, &output_dpms.base);
293 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
295 return TDM_ERROR_NONE;
299 tdm_output_cb_status(tdm_output *output_backend, tdm_output_conn_status status, void *user_data)
301 tdm_private_output *private_output = user_data;
304 TDM_RETURN_IF_FAIL(private_output);
306 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(status));
308 if ((private_output->caps.status == TDM_OUTPUT_CONN_STATUS_DISCONNECTED && status != TDM_OUTPUT_CONN_STATUS_DISCONNECTED) ||
309 (private_output->caps.status != TDM_OUTPUT_CONN_STATUS_DISCONNECTED && status == TDM_OUTPUT_CONN_STATUS_DISCONNECTED)) {
310 ret = tdm_display_update_output(private_output->private_module, output_backend, private_output->pipe, 1);
311 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
313 private_output->caps.status = status;
316 ret = _tdm_output_call_thread_cb_status(private_output, status);
317 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
321 tdm_output_cb_dpms(tdm_output *output_backend, tdm_output_dpms dpms, void *user_data)
323 tdm_private_output *private_output = user_data;
326 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(dpms));
328 private_output->current_dpms_value = dpms;
329 private_output->waiting_dpms_change = 0;
330 TDM_INFO("output(%d) dpms async '%s' done", private_output->pipe, tdm_dpms_str(dpms));
332 ret = _tdm_output_call_thread_cb_dpms(private_output, dpms);
333 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
337 tdm_output_add_change_handler(tdm_output *output,
338 tdm_output_change_handler func,
341 tdm_private_output_change_handler *change_handler = NULL;
344 TDM_RETURN_VAL_IF_FAIL(func != NULL, TDM_ERROR_INVALID_PARAMETER);
346 _pthread_mutex_lock(&private_display->lock);
348 LIST_FOR_EACH_ENTRY(change_handler, &private_output->change_handler_list, link) {
349 if (change_handler->func == func && change_handler->user_data == user_data) {
350 TDM_ERR("can't add twice");
351 _pthread_mutex_unlock(&private_display->lock);
352 return TDM_ERROR_BAD_REQUEST;
356 change_handler = calloc(1, sizeof(tdm_private_output_change_handler));
357 if (!change_handler) {
358 /* LCOV_EXCL_START */
359 TDM_ERR("failed: alloc memory");
360 _pthread_mutex_unlock(&private_display->lock);
361 return TDM_ERROR_OUT_OF_MEMORY;
365 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_DPMS, NULL, tdm_output_thread_cb_change, change_handler);
366 if (ret != TDM_ERROR_NONE) {
367 /* LCOV_EXCL_START */
368 TDM_ERR("tdm_thread_cb_add failed");
369 free(change_handler);
370 _pthread_mutex_unlock(&private_display->lock);
371 return TDM_ERROR_OPERATION_FAILED;
375 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_STATUS, NULL, tdm_output_thread_cb_change, change_handler);
376 if (ret != TDM_ERROR_NONE) {
377 /* LCOV_EXCL_START */
378 TDM_ERR("tdm_thread_cb_add failed");
379 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_DPMS, NULL, tdm_output_thread_cb_change, change_handler);
380 free(change_handler);
381 _pthread_mutex_unlock(&private_display->lock);
382 return TDM_ERROR_OPERATION_FAILED;
386 change_handler->private_output = private_output;
387 change_handler->func = func;
388 change_handler->user_data = user_data;
389 change_handler->owner_tid = syscall(SYS_gettid);
391 LIST_ADDTAIL(&change_handler->link, &private_output->change_handler_list);
393 _pthread_mutex_unlock(&private_display->lock);
399 tdm_output_remove_change_handler(tdm_output *output,
400 tdm_output_change_handler func,
403 tdm_private_display *private_display;
404 tdm_private_output *private_output;
405 tdm_private_output_change_handler *change_handler = NULL, *hh = NULL;
407 TDM_RETURN_IF_FAIL(tdm_output_is_valid(output));
408 TDM_RETURN_IF_FAIL(func != NULL);
410 private_output = (tdm_private_output*)output;
411 private_display = private_output->private_display;
413 _pthread_mutex_lock(&private_display->lock);
415 LIST_FOR_EACH_ENTRY_SAFE(change_handler, hh, &private_output->change_handler_list, link) {
416 if (change_handler->func != func || change_handler->user_data != user_data)
419 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_DPMS, NULL, tdm_output_thread_cb_change, change_handler);
420 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_STATUS, NULL, tdm_output_thread_cb_change, change_handler);
422 LIST_DEL(&change_handler->link);
423 free(change_handler);
425 _pthread_mutex_unlock(&private_display->lock);
430 _pthread_mutex_unlock(&private_display->lock);
434 tdm_output_get_output_type(tdm_output *output, tdm_output_type *type)
438 TDM_RETURN_VAL_IF_FAIL(type != NULL, TDM_ERROR_INVALID_PARAMETER);
440 _pthread_mutex_lock(&private_display->lock);
442 *type = private_output->caps.type;
444 _pthread_mutex_unlock(&private_display->lock);
450 tdm_output_get_layer_count(tdm_output *output, int *count)
452 tdm_private_layer *private_layer = NULL;
456 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
458 _pthread_mutex_lock(&private_display->lock);
460 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
461 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
463 _pthread_mutex_unlock(&private_display->lock);
464 return TDM_ERROR_BAD_REQUEST;
468 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link)
471 _pthread_mutex_unlock(&private_display->lock);
472 return TDM_ERROR_NONE;
475 _pthread_mutex_unlock(&private_display->lock);
482 tdm_output_get_layer(tdm_output *output, int index, tdm_error *error)
484 tdm_private_layer *private_layer = NULL;
486 OUTPUT_FUNC_ENTRY_ERROR();
488 _pthread_mutex_lock(&private_display->lock);
491 *error = TDM_ERROR_NONE;
493 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
494 _pthread_mutex_unlock(&private_display->lock);
495 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
497 *error = TDM_ERROR_BAD_REQUEST;
501 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
502 if (private_layer->index == index) {
503 _pthread_mutex_unlock(&private_display->lock);
504 return private_layer;
508 _pthread_mutex_unlock(&private_display->lock);
514 tdm_output_get_available_properties(tdm_output *output, const tdm_prop **props,
519 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
520 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
522 _pthread_mutex_lock(&private_display->lock);
524 *props = (const tdm_prop *)private_output->caps.props;
525 *count = private_output->caps.prop_count;
527 _pthread_mutex_unlock(&private_display->lock);
533 tdm_output_get_available_modes(tdm_output *output,
534 const tdm_output_mode **modes, int *count)
538 TDM_RETURN_VAL_IF_FAIL(modes != NULL, TDM_ERROR_INVALID_PARAMETER);
539 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
541 _pthread_mutex_lock(&private_display->lock);
543 *modes = (const tdm_output_mode *)private_output->caps.modes;
544 *count = private_output->caps.mode_count;
546 _pthread_mutex_unlock(&private_display->lock);
552 tdm_output_get_available_size(tdm_output *output, int *min_w, int *min_h,
553 int *max_w, int *max_h, int *preferred_align)
557 _pthread_mutex_lock(&private_display->lock);
560 *min_w = TDM_FRONT_VALUE(private_output->caps.min_w);
562 *min_h = TDM_FRONT_VALUE(private_output->caps.min_h);
564 *max_w = TDM_FRONT_VALUE(private_output->caps.max_w);
566 *max_h = TDM_FRONT_VALUE(private_output->caps.max_h);
568 *preferred_align = TDM_FRONT_VALUE(private_output->caps.preferred_align);
570 _pthread_mutex_unlock(&private_display->lock);
576 tdm_output_get_cursor_available_size(tdm_output *output, int *min_w, int *min_h,
577 int *max_w, int *max_h, int *preferred_align)
581 _pthread_mutex_lock(&private_display->lock);
583 if (!tdm_module_check_abi(private_output->private_module, 1, 5)) {
584 _pthread_mutex_unlock(&private_display->lock);
585 return TDM_ERROR_BAD_REQUEST;
589 *min_w = TDM_FRONT_VALUE(private_output->caps.cursor_min_w);
591 *min_h = TDM_FRONT_VALUE(private_output->caps.cursor_min_h);
593 *max_w = TDM_FRONT_VALUE(private_output->caps.cursor_max_w);
595 *max_h = TDM_FRONT_VALUE(private_output->caps.cursor_max_h);
597 *preferred_align = TDM_FRONT_VALUE(private_output->caps.cursor_preferred_align);
599 _pthread_mutex_unlock(&private_display->lock);
605 tdm_output_get_physical_size(tdm_output *output, unsigned int *mmWidth,
606 unsigned int *mmHeight)
610 _pthread_mutex_lock(&private_display->lock);
613 *mmWidth = private_output->caps.mmWidth;
615 *mmHeight = private_output->caps.mmHeight;
617 _pthread_mutex_unlock(&private_display->lock);
623 tdm_output_get_subpixel(tdm_output *output, unsigned int *subpixel)
626 TDM_RETURN_VAL_IF_FAIL(subpixel != NULL, TDM_ERROR_INVALID_PARAMETER);
628 _pthread_mutex_lock(&private_display->lock);
630 *subpixel = private_output->caps.subpixel;
632 _pthread_mutex_unlock(&private_display->lock);
638 tdm_output_get_pipe(tdm_output *output, unsigned int *pipe)
641 TDM_RETURN_VAL_IF_FAIL(pipe != NULL, TDM_ERROR_INVALID_PARAMETER);
643 _pthread_mutex_lock(&private_display->lock);
645 *pipe = private_output->pipe;
647 _pthread_mutex_unlock(&private_display->lock);
653 tdm_output_get_primary_index(tdm_output *output, int *index)
655 tdm_private_layer *private_layer = NULL;
658 TDM_RETURN_VAL_IF_FAIL(index != NULL, TDM_ERROR_INVALID_PARAMETER);
660 _pthread_mutex_lock(&private_display->lock);
662 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
663 if (private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_PRIMARY) {
664 *index = private_layer->index;
669 _pthread_mutex_unlock(&private_display->lock);
675 tdm_output_set_property(tdm_output *output, unsigned int id, tdm_value value)
677 tdm_private_module *private_module;
678 tdm_func_output *func_output;
681 _pthread_mutex_lock(&private_display->lock);
683 private_module = private_output->private_module;
684 func_output = &private_module->func_output;
686 if (!func_output->output_set_property) {
687 /* LCOV_EXCL_START */
688 _pthread_mutex_unlock(&private_display->lock);
689 TDM_WRN("not implemented!!");
690 return TDM_ERROR_NOT_IMPLEMENTED;
694 ret = func_output->output_set_property(private_output->output_backend, id,
697 _pthread_mutex_unlock(&private_display->lock);
703 tdm_output_get_property(tdm_output *output, unsigned int id, tdm_value *value)
705 tdm_private_module *private_module;
706 tdm_func_output *func_output;
709 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
711 _pthread_mutex_lock(&private_display->lock);
713 private_module = private_output->private_module;
714 func_output = &private_module->func_output;
716 if (!func_output->output_get_property) {
717 /* LCOV_EXCL_START */
718 _pthread_mutex_unlock(&private_display->lock);
719 TDM_WRN("not implemented!!");
720 return TDM_ERROR_NOT_IMPLEMENTED;
724 ret = func_output->output_get_property(private_output->output_backend, id,
727 _pthread_mutex_unlock(&private_display->lock);
733 _tdm_output_thread_cb_vblank(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
735 tdm_thread_cb_output_vblank *output_vblank = (tdm_thread_cb_output_vblank *)cb_base;
736 tdm_private_output_vblank_handler *vblank_handler = output_vblank->base.data;
737 tdm_private_output_vblank_handler *v = NULL, *vv = NULL;
738 tdm_private_output *private_output = object;
739 struct list_head clone_list;
741 pid_t tid = syscall(SYS_gettid);
743 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
745 assert(vblank_handler->owner_tid == tid);
747 vblank_handler->sent_to_frontend = 0;
749 _tdm_output_vblank_timeout_update(private_output, 0);
751 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
753 interval = vblank_handler->interval;
754 sync = vblank_handler->sync;
756 LIST_INITHEAD(&clone_list);
758 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &private_output->vblank_handler_list, link) {
759 if (v->interval != interval || v->sync != sync || v->owner_tid != tid)
763 LIST_ADDTAIL(&v->link, &clone_list);
766 if (tdm_debug_module & TDM_DEBUG_COMMIT)
767 TDM_INFO("----------------------------------------- output(%d) got vblank", private_output->pipe);
769 _pthread_mutex_unlock(&private_display->lock);
770 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &clone_list, link) {
771 if (tdm_debug_module & TDM_DEBUG_COMMIT)
772 TDM_INFO("handler(%p)", v);
777 v->func(v->private_output,
778 output_vblank->sequence,
779 output_vblank->tv_sec,
780 output_vblank->tv_usec,
785 _pthread_mutex_lock(&private_display->lock);
787 if (tdm_debug_module & TDM_DEBUG_COMMIT)
788 TDM_INFO("-----------------------------------------...");
792 _tdm_output_cb_vblank(tdm_output *output_backend, unsigned int sequence,
793 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
795 tdm_private_output_vblank_handler *vblank_handler = user_data;
796 tdm_thread_cb_output_vblank output_vblank;
799 memset(&output_vblank, 0, sizeof output_vblank);
800 output_vblank.base.type = TDM_THREAD_CB_OUTPUT_VBLANK;
801 output_vblank.base.length = sizeof output_vblank;
802 output_vblank.base.object_stamp = vblank_handler->private_output->stamp;
803 output_vblank.base.data = vblank_handler;
804 output_vblank.base.sync = 0;
805 output_vblank.sequence = sequence;
806 output_vblank.tv_sec = tv_sec;
807 output_vblank.tv_usec = tv_usec;
809 vblank_handler->sent_to_frontend = 1;
811 if (tdm_debug_module & TDM_DEBUG_COMMIT)
812 TDM_INFO("output(%d) wait_vblank: handler(%p)", vblank_handler->private_output->pipe, vblank_handler);
814 ret = tdm_thread_cb_call(vblank_handler->private_output, &output_vblank.base);
815 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
819 _tdm_output_thread_cb_commit(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
821 tdm_thread_cb_output_commit *output_commit = (tdm_thread_cb_output_commit *)cb_base;
822 tdm_private_output_commit_handler *output_commit_handler = output_commit->base.data;
823 tdm_private_output *private_output = object;
824 tdm_private_layer *private_layer = NULL;
826 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
828 if (!output_commit_handler)
831 assert(output_commit_handler->owner_tid == syscall(SYS_gettid));
833 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
835 LIST_DEL(&output_commit_handler->link);
837 if (tdm_debug_module & TDM_DEBUG_COMMIT) {
838 TDM_INFO("----------------------------------------- output(%d) committed", private_output->pipe);
839 TDM_INFO("handler(%p)", output_commit_handler);
842 if (private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT) {
843 /* In case of layer commit, the below will be handled in the layer commit callback */
844 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
845 if (private_layer->committed_buffer)
846 tdm_layer_committed(private_layer, &private_layer->committed_buffer);
850 if (output_commit_handler->func) {
851 _pthread_mutex_unlock(&private_display->lock);
852 output_commit_handler->func(private_output,
853 output_commit->sequence,
854 output_commit->tv_sec,
855 output_commit->tv_usec,
856 output_commit_handler->user_data);
857 _pthread_mutex_lock(&private_display->lock);
860 free(output_commit_handler);
862 if (tdm_debug_module & TDM_DEBUG_COMMIT)
863 TDM_INFO("-----------------------------------------...");
867 _tdm_output_cb_commit(tdm_output *output_backend, unsigned int sequence,
868 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
870 tdm_private_output_commit_handler *output_commit_handler = user_data;
871 tdm_private_output *private_output;
872 tdm_thread_cb_output_commit output_commit;
875 if (output_commit_handler)
876 private_output = output_commit_handler->private_output;
878 private_output = tdm_display_find_private_output(tdm_display_get(), output_backend);
880 memset(&output_commit, 0, sizeof output_commit);
881 output_commit.base.type = TDM_THREAD_CB_OUTPUT_COMMIT;
882 output_commit.base.length = sizeof output_commit;
883 output_commit.base.object_stamp = private_output->stamp;
884 output_commit.base.data = output_commit_handler;
885 output_commit.base.sync = 0;
886 output_commit.sequence = sequence;
887 output_commit.tv_sec = tv_sec;
888 output_commit.tv_usec = tv_usec;
890 ret = tdm_thread_cb_call(private_output, &output_commit.base);
891 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
894 /* add_front: To distinguish between the user vblank handlers and the layer
895 * commit vblank handlers. The layer commit handlers will be called
896 * before calling the user vblank handlers.
899 _tdm_output_wait_vblank(tdm_private_output *private_output, int interval, int sync,
900 tdm_output_vblank_handler func, void *user_data,
901 unsigned int add_front)
903 tdm_private_module *private_module;
904 tdm_func_output *func_output;
905 tdm_private_output_vblank_handler *vblank_handler = NULL, *v = NULL;
906 unsigned int skip_request = 0;
907 pid_t tid = syscall(SYS_gettid);
908 tdm_error ret = TDM_ERROR_NONE;
910 private_module = private_output->private_module;
911 func_output = &private_module->func_output;
913 /* interval SHOULD be at least 1 */
917 if (!func_output->output_wait_vblank) {
918 /* LCOV_EXCL_START */
919 TDM_WRN("not implemented!!");
920 return TDM_ERROR_NOT_IMPLEMENTED;
924 if (!private_output->regist_vblank_cb) {
925 private_output->regist_vblank_cb = 1;
926 ret = func_output->output_set_vblank_handler(private_output->output_backend,
927 _tdm_output_cb_vblank);
930 vblank_handler = calloc(1, sizeof(tdm_private_output_vblank_handler));
931 if (!vblank_handler) {
932 /* LCOV_EXCL_START */
933 TDM_ERR("failed: alloc memory");
934 return TDM_ERROR_OUT_OF_MEMORY;
938 if (tdm_debug_module & TDM_DEBUG_COMMIT)
939 TDM_INFO("output(%d) wait_vblank: handler(%p)", private_output->pipe, vblank_handler);
941 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
942 if (v->interval == interval && v->sync == sync && v->owner_tid == tid) {
949 LIST_ADD(&vblank_handler->link, &private_output->vblank_handler_list);
951 LIST_ADDTAIL(&vblank_handler->link, &private_output->vblank_handler_list);
953 vblank_handler->private_output = private_output;
954 vblank_handler->interval = interval;
955 vblank_handler->sync = sync;
956 vblank_handler->func = func;
957 vblank_handler->user_data = user_data;
958 vblank_handler->owner_tid = tid;
960 /* If there is the previous request, we can skip to call output_wait_vblank() */
962 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
963 if (ret != TDM_ERROR_NONE) {
964 TDM_ERR("tdm_thread_cb_add failed");
968 ret = func_output->output_wait_vblank(private_output->output_backend, interval,
969 sync, vblank_handler);
970 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
972 _tdm_output_vblank_timeout_update(private_output, 1000);
974 if (tdm_debug_module & TDM_DEBUG_COMMIT)
975 TDM_INFO("output(%d) backend wait_vblank", private_output->pipe);
981 /* LCOV_EXCL_START */
982 if (vblank_handler) {
983 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
984 LIST_DEL(&vblank_handler->link);
985 free(vblank_handler);
992 tdm_output_wait_vblank(tdm_output *output, int interval, int sync,
993 tdm_output_vblank_handler func, void *user_data)
996 TDM_RETURN_VAL_IF_FAIL(interval > 0, TDM_ERROR_INVALID_PARAMETER);
998 _pthread_mutex_lock(&private_display->lock);
1000 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1001 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
1002 tdm_dpms_str(private_output->current_dpms_value));
1003 _pthread_mutex_unlock(&private_display->lock);
1004 return TDM_ERROR_DPMS_OFF;
1007 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 0);
1009 _pthread_mutex_unlock(&private_display->lock);
1014 /* LCOV_EXCL_START */
1016 tdm_output_wait_vblank_add_front(tdm_output *output, int interval, int sync,
1017 tdm_output_vblank_handler func, void *user_data)
1019 OUTPUT_FUNC_ENTRY();
1021 _pthread_mutex_lock(&private_display->lock);
1023 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1024 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
1025 tdm_dpms_str(private_output->current_dpms_value));
1026 _pthread_mutex_unlock(&private_display->lock);
1027 return TDM_ERROR_DPMS_OFF;
1030 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 1);
1032 _pthread_mutex_unlock(&private_display->lock);
1036 /* LCOV_EXCL_STOP */
1039 tdm_output_remove_vblank_handler_internal(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1041 tdm_private_output *private_output = (tdm_private_output*)output;
1042 tdm_private_output_vblank_handler *v = NULL;
1044 TDM_RETURN_IF_FAIL(private_output != NULL);
1045 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1047 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
1048 if (v->func == func && v->user_data == user_data) {
1049 /* only set func & user_data to NULL. It will be freed when an event occurs */
1051 v->user_data = NULL;
1058 tdm_output_remove_commit_handler_internal(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1060 tdm_private_output *private_output = (tdm_private_output*)output;
1061 tdm_private_output_commit_handler *c = NULL;
1063 TDM_RETURN_IF_FAIL(private_output != NULL);
1064 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1066 LIST_FOR_EACH_ENTRY(c, &private_output->output_commit_handler_list, link) {
1067 if (c->func == func && c->user_data == user_data) {
1068 /* only set func & user_data to NULL. It will be freed when an event occurs */
1070 c->user_data = NULL;
1077 tdm_output_remove_vblank_handler(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1079 OUTPUT_FUNC_ENTRY();
1081 _pthread_mutex_lock(&private_display->lock);
1083 tdm_output_remove_vblank_handler_internal(output, func, user_data);
1085 _pthread_mutex_unlock(&private_display->lock);
1091 tdm_output_remove_commit_handler(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1093 OUTPUT_FUNC_ENTRY();
1095 _pthread_mutex_lock(&private_display->lock);
1097 tdm_output_remove_commit_handler_internal(output, func, user_data);
1099 _pthread_mutex_unlock(&private_display->lock);
1105 tdm_output_commit_internal(tdm_output *output, int sync, tdm_output_commit_handler func, void *user_data)
1107 tdm_private_output *private_output;
1108 tdm_private_module *private_module;
1109 tdm_func_output *func_output;
1110 tdm_private_output_commit_handler *output_commit_handler = NULL;
1111 tdm_private_layer *private_layer = NULL;
1112 tdm_output_dpms dpms_value = TDM_OUTPUT_DPMS_ON;
1113 tdm_error ret = TDM_ERROR_NONE;
1115 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER);
1117 private_output = (tdm_private_output*)output;
1118 private_module = private_output->private_module;
1119 func_output = &private_module->func_output;
1121 if (!func_output->output_commit) {
1122 /* LCOV_EXCL_START */
1123 TDM_WRN("not implemented!!");
1124 return TDM_ERROR_NOT_IMPLEMENTED;
1125 /* LCOV_EXCL_STOP */
1128 ret = tdm_output_get_dpms_internal(output, &dpms_value);
1129 TDM_RETURN_VAL_IF_FAIL(ret == TDM_ERROR_NONE, ret);
1131 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1132 if (!private_output->regist_commit_cb) {
1133 private_output->regist_commit_cb = 1;
1134 ret = func_output->output_set_commit_handler(private_output->output_backend, _tdm_output_cb_commit);
1135 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1138 output_commit_handler = calloc(1, sizeof(tdm_private_output_commit_handler));
1139 if (!output_commit_handler) {
1140 /* LCOV_EXCL_START */
1141 TDM_ERR("failed: alloc memory");
1142 return TDM_ERROR_OUT_OF_MEMORY;
1143 /* LCOV_EXCL_STOP */
1146 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1147 if (ret != TDM_ERROR_NONE) {
1148 TDM_ERR("tdm_thread_cb_add failed");
1149 free(output_commit_handler);
1153 LIST_ADDTAIL(&output_commit_handler->link, &private_output->output_commit_handler_list);
1154 output_commit_handler->private_output = private_output;
1155 output_commit_handler->func = func;
1156 output_commit_handler->user_data = user_data;
1157 output_commit_handler->owner_tid = syscall(SYS_gettid);
1159 ret = func_output->output_commit(private_output->output_backend, sync,
1160 output_commit_handler);
1161 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1163 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1164 TDM_INFO("output(%d) backend commit: handle(%p) func(%p) user_data(%p)",
1165 private_output->pipe, output_commit_handler, func, user_data);
1168 /* Even if DPMS is off, committed_buffer should be changed because it will be referred
1169 * for tdm_layer_committed() function.
1171 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1172 if (!private_layer->waiting_buffer)
1175 if (private_layer->committed_buffer)
1176 tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
1178 private_layer->committed_buffer = private_layer->waiting_buffer;
1179 private_layer->waiting_buffer = NULL;
1180 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1181 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
1182 private_layer, private_layer->waiting_buffer,
1183 private_layer->committed_buffer->buffer);
1186 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1187 TDM_WRN("dpms %s. Directly call commit handler instead of commit.", tdm_dpms_str(dpms_value));
1189 func(output, 0, 0, 0, user_data);
1195 /* LCOV_EXCL_START */
1196 if (output_commit_handler) {
1197 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1198 LIST_DEL(&output_commit_handler->link);
1199 free(output_commit_handler);
1202 /* LCOV_EXCL_STOP */
1206 tdm_output_commit(tdm_output *output, int sync, tdm_output_commit_handler func,
1209 tdm_private_layer *private_layer = NULL;
1211 OUTPUT_FUNC_ENTRY();
1213 _pthread_mutex_lock(&private_display->lock);
1215 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE)
1216 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1217 else if (private_output->commit_type == TDM_COMMIT_TYPE_LAYER) {
1218 TDM_ERR("Can't supported. Use tdm_layer_commit");
1219 _pthread_mutex_unlock(&private_display->lock);
1220 return TDM_ERROR_BAD_REQUEST;
1223 if (private_output->commit_per_vblank) {
1224 TDM_ERR("Use tdm_layer_commit");
1225 _pthread_mutex_unlock(&private_display->lock);
1226 return TDM_ERROR_BAD_REQUEST;
1229 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1230 TDM_ERR("output(%d) dpms: %s", private_output->pipe,
1231 tdm_dpms_str(private_output->current_dpms_value));
1232 _pthread_mutex_unlock(&private_display->lock);
1233 return TDM_ERROR_DPMS_OFF;
1236 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1237 TDM_INFO("output(%d) commit", private_output->pipe);
1239 /* apply the pending data of all layers */
1240 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1241 tdm_layer_commit_pending_data(private_layer);
1244 ret = tdm_output_commit_internal(output, sync, func, user_data);
1246 _pthread_mutex_unlock(&private_display->lock);
1252 tdm_output_set_mode(tdm_output *output, const tdm_output_mode *mode)
1254 tdm_private_module *private_module;
1255 tdm_func_output *func_output;
1256 OUTPUT_FUNC_ENTRY();
1258 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1260 _pthread_mutex_lock(&private_display->lock);
1262 private_module = private_output->private_module;
1263 func_output = &private_module->func_output;
1265 if (!func_output->output_set_mode) {
1266 /* LCOV_EXCL_START */
1267 _pthread_mutex_unlock(&private_display->lock);
1268 TDM_WRN("not implemented!!");
1269 return TDM_ERROR_NOT_IMPLEMENTED;
1270 /* LCOV_EXCL_STOP */
1273 ret = func_output->output_set_mode(private_output->output_backend, mode);
1274 if (ret == TDM_ERROR_NONE) {
1275 private_output->current_mode = mode;
1276 private_output->need_set_target_info = 1;
1277 TDM_INFO("mode: %dx%d %dhz", mode->hdisplay, mode->vdisplay, mode->vrefresh);
1280 _pthread_mutex_unlock(&private_display->lock);
1286 tdm_output_get_mode(tdm_output *output, const tdm_output_mode **mode)
1288 OUTPUT_FUNC_ENTRY();
1290 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1292 _pthread_mutex_lock(&private_display->lock);
1294 *mode = private_output->current_mode;
1296 _pthread_mutex_unlock(&private_display->lock);
1302 tdm_output_set_dpms(tdm_output *output, tdm_output_dpms dpms_value)
1304 tdm_private_module *private_module;
1305 tdm_func_output *func_output;
1306 OUTPUT_FUNC_ENTRY();
1308 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1309 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1310 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1311 return TDM_ERROR_BAD_REQUEST;
1314 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1315 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1316 private_output->pipe, tdm_dpms_str(dpms_value));
1317 return TDM_ERROR_BAD_REQUEST;
1321 _pthread_mutex_lock(&private_display->lock);
1323 if (private_output->waiting_dpms_change) {
1324 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1325 _pthread_mutex_unlock(&private_display->lock);
1326 return TDM_ERROR_BAD_REQUEST;
1329 private_module = private_output->private_module;
1330 func_output = &private_module->func_output;
1332 TDM_INFO("output(%d) dpms '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1334 if (func_output->output_set_dpms)
1335 ret = func_output->output_set_dpms(private_output->output_backend, dpms_value);
1337 /* LCOV_EXCL_START */
1338 ret = TDM_ERROR_NONE;
1339 TDM_WRN("not implemented!!");
1341 /* LCOV_EXCL_STOP */
1345 if (ret == TDM_ERROR_NONE) {
1346 if (private_output->current_dpms_value != dpms_value) {
1347 private_output->current_dpms_value = dpms_value;
1348 _tdm_output_call_thread_cb_dpms(private_output, dpms_value);
1349 TDM_INFO("output(%d) dpms '%s' done", private_output->pipe, tdm_dpms_str(dpms_value));
1352 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1354 /* update current_dpms_value forcely */
1355 tdm_output_get_dpms_internal(output, &temp);
1357 TDM_ERR("output(%d) set_dpms failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1360 _pthread_mutex_unlock(&private_display->lock);
1365 /* LCOV_EXCL_START */
1367 tdm_output_set_dpms_async(tdm_output *output, tdm_output_dpms dpms_value)
1369 tdm_private_module *private_module;
1370 tdm_func_output *func_output;
1371 OUTPUT_FUNC_ENTRY();
1373 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_ASYNC_DPMS)) {
1374 TDM_ERR("output doesn't support the asynchronous DPMS control!");
1375 return TDM_ERROR_BAD_REQUEST;
1378 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1379 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1380 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1381 return TDM_ERROR_BAD_REQUEST;
1384 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1385 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1386 private_output->pipe, tdm_dpms_str(dpms_value));
1387 return TDM_ERROR_BAD_REQUEST;
1391 _pthread_mutex_lock(&private_display->lock);
1393 if (private_output->waiting_dpms_change) {
1394 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1395 _pthread_mutex_unlock(&private_display->lock);
1396 return TDM_ERROR_BAD_REQUEST;
1399 private_module = private_output->private_module;
1400 func_output = &private_module->func_output;
1401 if (!func_output->output_set_dpms_handler) {
1402 TDM_WRN("not implemented: output_set_dpms_handler");
1403 _pthread_mutex_unlock(&private_display->lock);
1404 return TDM_ERROR_NOT_IMPLEMENTED;
1407 if (!func_output->output_set_dpms_async) {
1408 TDM_WRN("not implemented: output_set_dpms_async");
1409 _pthread_mutex_unlock(&private_display->lock);
1410 return TDM_ERROR_NOT_IMPLEMENTED;
1413 if (!private_output->regist_dpms_cb) {
1414 private_output->regist_dpms_cb = 1;
1415 ret = func_output->output_set_dpms_handler(private_output->output_backend,
1416 tdm_output_cb_dpms, private_output);
1417 if (ret != TDM_ERROR_NONE) {
1418 _pthread_mutex_unlock(&private_display->lock);
1419 TDM_ERR("Can't set the dpms handler!!");
1424 TDM_INFO("output(%d) dpms async '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1426 ret = func_output->output_set_dpms_async(private_output->output_backend, dpms_value);
1428 if (ret == TDM_ERROR_NONE) {
1429 private_output->waiting_dpms_change = 1;
1430 TDM_INFO("output(%d) dpms async '%s' waiting", private_output->pipe, tdm_dpms_str(dpms_value));
1432 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1434 /* update current_dpms_value forcely */
1435 tdm_output_get_dpms_internal(output, &temp);
1437 TDM_ERR("output(%d) set_dpms_async failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1440 _pthread_mutex_unlock(&private_display->lock);
1444 /* LCOV_EXCL_STOP */
1447 tdm_output_get_dpms_internal(tdm_output *output, tdm_output_dpms *dpms_value)
1449 tdm_private_output *private_output;
1450 tdm_private_module *private_module;
1451 tdm_func_output *func_output;
1452 tdm_error ret = TDM_ERROR_NONE;
1454 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER);
1456 private_output = (tdm_private_output*)output;
1458 /* TODO: this is ugly. But before calling backend's output_get_dpms(), we have
1459 * to check if all backends's DPMS operation has no problem. In future, we'd
1460 * better use new env instead of using commit_per_vblank variable to distinguish
1461 * whether we use the stored value or backend's output_get_dpms.
1463 if (!private_output->commit_per_vblank) {
1464 *dpms_value = private_output->current_dpms_value;
1465 return TDM_ERROR_NONE;
1468 private_module = private_output->private_module;
1469 func_output = &private_module->func_output;
1471 if (!func_output->output_get_dpms) {
1472 /* LCOV_EXCL_START */
1473 *dpms_value = private_output->current_dpms_value;
1474 TDM_WRN("not implemented!!");
1475 return TDM_ERROR_NONE;
1476 /* LCOV_EXCL_STOP */
1479 ret = func_output->output_get_dpms(private_output->output_backend, dpms_value);
1480 if (ret != TDM_ERROR_NONE) {
1481 /* LCOV_EXCL_START */
1482 TDM_ERR("output_get_dpms failed");
1483 *dpms_value = TDM_OUTPUT_DPMS_OFF;
1484 /* LCOV_EXCL_STOP */
1487 /* checking with backend's value */
1488 if (*dpms_value != private_output->current_dpms_value) {
1489 TDM_ERR("output(%d) dpms changed suddenly: %s -> %s",
1490 private_output->pipe, tdm_dpms_str(private_output->current_dpms_value),
1491 tdm_dpms_str(*dpms_value));
1492 private_output->current_dpms_value = *dpms_value;
1493 _tdm_output_call_thread_cb_dpms(private_output, *dpms_value);
1500 tdm_output_get_dpms(tdm_output *output, tdm_output_dpms *dpms_value)
1502 OUTPUT_FUNC_ENTRY();
1504 TDM_RETURN_VAL_IF_FAIL(dpms_value != NULL, TDM_ERROR_INVALID_PARAMETER);
1506 _pthread_mutex_lock(&private_display->lock);
1508 ret = tdm_output_get_dpms_internal(output, dpms_value);
1510 _pthread_mutex_unlock(&private_display->lock);
1516 tdm_output_has_capture_capability(tdm_output *output, unsigned int *has_capability)
1518 tdm_private_module *private_module;
1520 OUTPUT_FUNC_ENTRY();
1522 TDM_RETURN_VAL_IF_FAIL(has_capability != NULL, TDM_ERROR_INVALID_PARAMETER);
1524 _pthread_mutex_lock(&private_display->lock);
1526 private_module = private_output->private_module;
1528 if (!(private_module->capabilities & TDM_DISPLAY_CAPABILITY_CAPTURE))
1529 *has_capability = 0;
1530 else if (!(private_module->caps_capture.capabilities & TDM_CAPTURE_CAPABILITY_OUTPUT))
1531 *has_capability = 0;
1533 *has_capability = 1;
1535 _pthread_mutex_unlock(&private_display->lock);
1540 EXTERN tdm_capture *
1541 tdm_output_create_capture(tdm_output *output, tdm_error *error)
1543 tdm_capture *capture = NULL;
1545 OUTPUT_FUNC_ENTRY_ERROR();
1547 _pthread_mutex_lock(&private_display->lock);
1549 capture = (tdm_capture *)tdm_capture_create_output_internal(private_output, error);
1551 _pthread_mutex_unlock(&private_display->lock);
1556 EXTERN tdm_hwc_window *
1557 tdm_output_hwc_create_window(tdm_output *output, tdm_error *error)
1559 tdm_hwc_window *hwc_window = NULL;
1561 OUTPUT_FUNC_ENTRY_ERROR();
1563 _pthread_mutex_lock(&private_display->lock);
1565 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1566 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 0, error);
1568 /* LCOV_EXCL_START */
1569 TDM_ERR("output(%p) not support HWC", private_output);
1571 *error = TDM_ERROR_BAD_REQUEST;
1572 /* LCOV_EXCL_STOP */
1575 _pthread_mutex_unlock(&private_display->lock);
1580 EXTERN tdm_hwc_window *
1581 tdm_output_hwc_create_video_window(tdm_output *output, tdm_error *error)
1583 tdm_hwc_window *hwc_window = NULL;
1585 OUTPUT_FUNC_ENTRY_ERROR();
1587 _pthread_mutex_lock(&private_display->lock);
1589 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1590 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 1, error);
1592 /* LCOV_EXCL_START */
1593 TDM_ERR("output(%p) not support HWC", private_output);
1595 *error = TDM_ERROR_BAD_REQUEST;
1596 /* LCOV_EXCL_STOP */
1599 _pthread_mutex_unlock(&private_display->lock);
1605 tdm_output_hwc_destroy_window(tdm_output *output, tdm_hwc_window *hwc_window)
1607 OUTPUT_FUNC_ENTRY();
1609 TDM_RETURN_VAL_IF_FAIL(hwc_window != NULL, TDM_ERROR_INVALID_PARAMETER);
1611 _pthread_mutex_lock(&private_display->lock);
1613 ret = tdm_hwc_window_destroy_internal(hwc_window);
1615 _pthread_mutex_unlock(&private_display->lock);
1621 tdm_output_hwc_validate(tdm_output *output, tdm_hwc_window **composited_wnds,
1622 uint32_t num_wnds, uint32_t *num_types)
1624 tdm_private_module *private_module;
1625 tdm_func_output *func_output = NULL;
1626 tdm_private_hwc_window **composited_wnds_frontend = NULL;
1627 tdm_hwc_window **composited_wnds_backend = NULL;
1630 OUTPUT_FUNC_ENTRY();
1632 TDM_RETURN_VAL_IF_FAIL(num_types != NULL, TDM_ERROR_INVALID_PARAMETER);
1634 _pthread_mutex_lock(&private_display->lock);
1636 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1637 TDM_ERR("output(%p) not support HWC", private_output);
1638 _pthread_mutex_unlock(&private_display->lock);
1639 return TDM_ERROR_BAD_REQUEST;
1642 private_module = private_output->private_module;
1643 func_output = &private_module->func_output;
1645 if (!func_output->output_hwc_validate) {
1646 /* LCOV_EXCL_START */
1647 _pthread_mutex_unlock(&private_display->lock);
1648 TDM_WRN("not implemented!!");
1649 return TDM_ERROR_NOT_IMPLEMENTED;
1650 /* LCOV_EXCL_STOP */
1653 if (num_wnds == 0) {
1654 ret = func_output->output_hwc_validate(private_output->output_backend, NULL, 0, num_types);
1656 _pthread_mutex_unlock(&private_display->lock);
1661 composited_wnds_backend = calloc(num_wnds, sizeof(tdm_hwc_window *));
1662 if (!composited_wnds_backend) {
1663 /* LCOV_EXCL_START */
1664 _pthread_mutex_unlock(&private_display->lock);
1665 return TDM_ERROR_OUT_OF_MEMORY;
1666 /* LCOV_EXCL_STOP */
1669 composited_wnds_frontend = (tdm_private_hwc_window **)composited_wnds;
1671 for (i = 0; i < num_wnds; i++)
1672 composited_wnds_backend[i] = composited_wnds_frontend[i]->hwc_window_backend;
1674 ret = func_output->output_hwc_validate(private_output->output_backend, composited_wnds_backend, num_wnds, num_types);
1676 free(composited_wnds_backend);
1678 _pthread_mutex_unlock(&private_display->lock);
1684 tdm_output_hwc_set_need_validate_handler(tdm_output *output,
1685 tdm_output_need_validate_handler hndl)
1687 OUTPUT_FUNC_ENTRY();
1689 TDM_RETURN_VAL_IF_FAIL(hndl != NULL, TDM_ERROR_INVALID_PARAMETER);
1691 _pthread_mutex_lock(&private_display->lock);
1693 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1694 TDM_ERR("output(%p) not support HWC", private_output);
1695 _pthread_mutex_unlock(&private_display->lock);
1696 return TDM_ERROR_BAD_REQUEST;
1699 /* there's no reason to allow this */
1700 if (private_output->need_validate.hndl) {
1702 _pthread_mutex_unlock(&private_display->lock);
1703 return TDM_ERROR_OPERATION_FAILED;
1706 private_output->need_validate.hndl = hndl;
1708 _pthread_mutex_unlock(&private_display->lock);
1714 tdm_output_hwc_get_changed_composition_types(tdm_output *output,
1715 uint32_t *num_elements,
1716 tdm_hwc_window **hwc_window,
1717 tdm_hwc_window_composition *composition_types)
1719 tdm_private_module *private_module;
1720 tdm_func_output *func_output = NULL;
1721 tdm_private_hwc_window * private_hwc_window = NULL;
1724 OUTPUT_FUNC_ENTRY();
1726 TDM_RETURN_VAL_IF_FAIL(num_elements != NULL, TDM_ERROR_INVALID_PARAMETER);
1728 _pthread_mutex_lock(&private_display->lock);
1730 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1731 TDM_ERR("output(%p) not support HWC", private_output);
1732 _pthread_mutex_unlock(&private_display->lock);
1733 return TDM_ERROR_BAD_REQUEST;
1736 private_module = private_output->private_module;
1737 func_output = &private_module->func_output;
1739 if (!func_output->output_hwc_get_changed_composition_types) {
1740 /* LCOV_EXCL_START */
1741 _pthread_mutex_unlock(&private_display->lock);
1742 TDM_WRN("not implemented!!");
1743 return TDM_ERROR_NOT_IMPLEMENTED;
1744 /* LCOV_EXCL_STOP */
1747 ret = func_output->output_hwc_get_changed_composition_types(private_output->output_backend,
1748 num_elements, hwc_window, composition_types);
1749 if (ret != TDM_ERROR_NONE) {
1750 /* LCOV_EXCL_START */
1751 _pthread_mutex_unlock(&private_display->lock);
1753 /* LCOV_EXCL_STOP */
1756 if (hwc_window == NULL || composition_types == NULL) {
1757 _pthread_mutex_unlock(&private_display->lock);
1758 return TDM_ERROR_NONE;
1761 for (i = 0; i < *num_elements; i++) {
1763 private_hwc_window = _tdm_output_find_private_hwc_window(private_output, hwc_window[i]);
1765 if (private_hwc_window == NULL) {
1766 /* LCOV_EXCL_START */
1767 TDM_ERR("failed! This should never happen!");
1768 func_output->output_hwc_destroy_window(private_output->output_backend, hwc_window[i]);
1770 _pthread_mutex_unlock(&private_display->lock);
1771 return TDM_ERROR_OPERATION_FAILED;
1772 /* LCOV_EXCL_STOP */
1775 hwc_window[i] = (tdm_hwc_window*)private_hwc_window;
1778 _pthread_mutex_unlock(&private_display->lock);
1784 tdm_output_hwc_accept_changes(tdm_output *output)
1786 tdm_private_module *private_module;
1787 tdm_func_output *func_output = NULL;
1789 OUTPUT_FUNC_ENTRY();
1791 _pthread_mutex_lock(&private_display->lock);
1793 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1794 TDM_ERR("output(%p) not support HWC", private_output);
1795 _pthread_mutex_unlock(&private_display->lock);
1796 return TDM_ERROR_BAD_REQUEST;
1799 private_module = private_output->private_module;
1800 func_output = &private_module->func_output;
1802 if (!func_output->output_hwc_validate) {
1803 /* LCOV_EXCL_START */
1804 _pthread_mutex_unlock(&private_display->lock);
1805 TDM_WRN("not implemented!!");
1806 return TDM_ERROR_NOT_IMPLEMENTED;
1807 /* LCOV_EXCL_STOP */
1810 ret = func_output->output_hwc_accept_changes(private_output->output_backend);
1812 _pthread_mutex_unlock(&private_display->lock);
1818 tdm_output_hwc_get_target_buffer_queue(tdm_output *output, tdm_error *error)
1820 tdm_private_module *private_module;
1821 tdm_func_output *func_output = NULL;
1822 tbm_surface_queue_h queue = NULL;
1824 OUTPUT_FUNC_ENTRY_ERROR();
1826 _pthread_mutex_lock(&private_display->lock);
1828 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1829 TDM_ERR("output(%p) not support HWC", private_output);
1831 *error = TDM_ERROR_BAD_REQUEST;
1832 _pthread_mutex_unlock(&private_display->lock);
1836 private_module = private_output->private_module;
1837 func_output = &private_module->func_output;
1839 if (!func_output->output_hwc_get_target_buffer_queue) {
1840 /* LCOV_EXCL_START */
1841 _pthread_mutex_unlock(&private_display->lock);
1842 TDM_WRN("not implemented!!");
1844 /* LCOV_EXCL_STOP */
1847 queue = func_output->output_hwc_get_target_buffer_queue(private_output->output_backend, error);
1849 _pthread_mutex_unlock(&private_display->lock);
1855 tdm_output_hwc_set_client_target_buffer(tdm_output *output, tbm_surface_h target_buffer, tdm_hwc_region damage)
1857 tdm_private_module *private_module;
1858 tdm_func_output *func_output = NULL;
1860 OUTPUT_FUNC_ENTRY();
1862 _pthread_mutex_lock(&private_display->lock);
1864 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1865 TDM_ERR("output(%p) not support HWC", private_output);
1866 _pthread_mutex_unlock(&private_display->lock);
1867 return TDM_ERROR_BAD_REQUEST;
1870 if (tdm_debug_dump & TDM_DUMP_FLAG_WINDOW) {
1871 /* LCOV_EXCL_START */
1872 char str[TDM_PATH_LEN];
1874 snprintf(str, TDM_PATH_LEN, "target_window_%d_%03d",
1875 private_output->index, i++);
1876 tdm_helper_dump_buffer_str(target_buffer, tdm_debug_dump_dir, str);
1877 /* LCOV_EXCL_STOP */
1880 private_module = private_output->private_module;
1881 func_output = &private_module->func_output;
1883 if (!func_output->output_hwc_set_client_target_buffer) {
1884 /* LCOV_EXCL_START */
1885 _pthread_mutex_unlock(&private_display->lock);
1886 TDM_WRN("not implemented!!");
1887 return TDM_ERROR_NOT_IMPLEMENTED;
1888 /* LCOV_EXCL_STOP */
1891 ret = func_output->output_hwc_set_client_target_buffer(private_output->output_backend, target_buffer, damage);
1893 _pthread_mutex_unlock(&private_display->lock);
1899 tdm_output_hwc_unset_client_target_buffer(tdm_output *output)
1901 tdm_private_module *private_module;
1902 tdm_func_output *func_output = NULL;
1904 OUTPUT_FUNC_ENTRY();
1906 _pthread_mutex_lock(&private_display->lock);
1908 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1909 TDM_ERR("output(%p) not support HWC", private_output);
1910 _pthread_mutex_unlock(&private_display->lock);
1911 return TDM_ERROR_BAD_REQUEST;
1914 private_module = private_output->private_module;
1915 func_output = &private_module->func_output;
1917 if (!func_output->output_hwc_unset_client_target_buffer) {
1918 /* LCOV_EXCL_START */
1919 _pthread_mutex_unlock(&private_display->lock);
1920 TDM_ERR("not implemented!!");
1921 return TDM_ERROR_NOT_IMPLEMENTED;
1922 /* LCOV_EXCL_STOP */
1925 ret = func_output->output_hwc_unset_client_target_buffer(private_output->output_backend);
1927 _pthread_mutex_unlock(&private_display->lock);
1933 _tdm_output_hwc_layer_commit_handler(tdm_layer *layer, unsigned int sequence,
1934 unsigned int tv_sec, unsigned int tv_usec,
1937 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler = (tdm_private_output_hwc_target_buffer_commit_handler *)user_data;
1938 tdm_output_hwc_target_buffer_commit_handler func = output_hwc_target_buffer_commit_handler->func;
1939 tdm_output *output = (tdm_output *)output_hwc_target_buffer_commit_handler->private_output;
1940 void *data = output_hwc_target_buffer_commit_handler->user_data;
1942 func(output, sequence, tv_sec, tv_usec, data);
1944 free(output_hwc_target_buffer_commit_handler);
1948 tdm_output_hwc_commit_client_target_buffer(tdm_output *output, tdm_output_hwc_target_buffer_commit_handler func, void *user_data)
1950 tdm_private_module *private_module;
1951 tdm_func_output *func_output;
1952 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler;
1953 tdm_layer *layer = NULL;
1954 tdm_private_layer *private_layer;
1955 const tdm_output_mode *mode;
1956 tbm_surface_h buffer;
1958 OUTPUT_FUNC_ENTRY();
1960 _pthread_mutex_lock(&private_display->lock);
1962 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1963 TDM_ERR("output(%p) not support HWC", private_output);
1964 _pthread_mutex_unlock(&private_display->lock);
1965 return TDM_ERROR_BAD_REQUEST;
1968 private_module = private_output->private_module;
1969 func_output = &private_module->func_output;
1971 if (!func_output->output_hwc_get_client_target_buffer_layer) {
1972 /* LCOV_EXCL_START */
1973 _pthread_mutex_unlock(&private_display->lock);
1974 TDM_ERR("not implemented!!");
1975 return TDM_ERROR_NOT_IMPLEMENTED;
1976 /* LCOV_EXCL_STOP */
1979 layer = func_output->output_hwc_get_client_target_buffer_layer(private_output->output_backend,
1982 /* LCOV_EXCL_START */
1983 _pthread_mutex_unlock(&private_display->lock);
1984 TDM_ERR("no assigned layer!!");
1985 return TDM_ERROR_INVALID_PARAMETER;
1986 /* LCOV_EXCL_STOP */
1989 private_layer = (tdm_private_layer*)layer;
1991 if (!func_output->output_hwc_get_client_target_buffer) {
1992 /* LCOV_EXCL_START */
1993 _pthread_mutex_unlock(&private_display->lock);
1994 TDM_ERR("not implemented!!");
1995 return TDM_ERROR_NOT_IMPLEMENTED;
1996 /* LCOV_EXCL_STOP */
1999 buffer = func_output->output_hwc_get_client_target_buffer(private_output->output_backend,
2002 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
2004 ret = tdm_layer_unset_buffer_internal(private_layer);
2005 if (ret != TDM_ERROR_NONE) {
2006 /* LCOV_EXCL_START */
2007 TDM_ERR("failed: layer set info(window)");
2008 _pthread_mutex_unlock(&private_display->lock);
2009 /* LCOV_EXCL_STOP */
2013 if (private_output->need_set_target_info) {
2014 mode = private_output->current_mode;
2015 private_output->target_buffer_info.src_config.size.h = mode->hdisplay;
2016 private_output->target_buffer_info.src_config.size.v = mode->vdisplay;
2017 private_output->target_buffer_info.src_config.pos.x = 0;
2018 private_output->target_buffer_info.src_config.pos.y = 0;
2019 private_output->target_buffer_info.src_config.pos.w = mode->hdisplay;
2020 private_output->target_buffer_info.src_config.pos.h = mode->vdisplay;
2021 private_output->target_buffer_info.dst_pos.x = 0;
2022 private_output->target_buffer_info.dst_pos.y = 0;
2023 private_output->target_buffer_info.dst_pos.w = mode->hdisplay;
2024 private_output->target_buffer_info.dst_pos.h = mode->vdisplay;
2025 private_output->target_buffer_info.transform = TDM_TRANSFORM_NORMAL;
2027 ret = tdm_layer_set_info_internal(private_layer, &private_output->target_buffer_info);
2028 if (ret != TDM_ERROR_NONE) {
2029 /* LCOV_EXCL_START */
2030 TDM_ERR("failed: layer set info(window)");
2031 _pthread_mutex_unlock(&private_display->lock);
2032 /* LCOV_EXCL_STOP */
2036 private_output->need_set_target_info = 0;
2039 output_hwc_target_buffer_commit_handler = calloc(1, sizeof(tdm_private_output_hwc_target_buffer_commit_handler));
2040 if (!output_hwc_target_buffer_commit_handler) {
2041 /* LCOV_EXCL_START */
2042 TDM_ERR("failed: alloc memory");
2043 _pthread_mutex_unlock(&private_display->lock);
2044 return TDM_ERROR_OUT_OF_MEMORY;
2045 /* LCOV_EXCL_STOP */
2048 output_hwc_target_buffer_commit_handler->private_output = private_output;
2049 output_hwc_target_buffer_commit_handler->func = func;
2050 output_hwc_target_buffer_commit_handler->user_data = user_data;
2052 ret = tdm_layer_commit_internal(private_layer, _tdm_output_hwc_layer_commit_handler, output_hwc_target_buffer_commit_handler);
2053 if (ret != TDM_ERROR_NONE) {
2054 /* LCOV_EXCL_START */
2055 TDM_ERR("failed: commit layer(target buffer)");
2056 free(output_hwc_target_buffer_commit_handler);
2057 _pthread_mutex_unlock(&private_display->lock);
2058 /* LCOV_EXCL_STOP */
2062 _pthread_mutex_unlock(&private_display->lock);
2068 tdm_output_hwc_get_video_supported_formats(tdm_output *output, const tbm_format **formats,
2071 tdm_private_module *private_module;
2072 tdm_func_output *func_output;
2073 OUTPUT_FUNC_ENTRY();
2075 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
2076 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
2078 _pthread_mutex_lock(&private_display->lock);
2080 private_module = private_output->private_module;
2081 func_output = &private_module->func_output;
2083 if (!func_output->output_hwc_get_video_supported_formats) {
2084 /* LCOV_EXCL_START */
2085 _pthread_mutex_unlock(&private_display->lock);
2086 TDM_WRN("not implemented!!");
2087 return TDM_ERROR_NOT_IMPLEMENTED;
2088 /* LCOV_EXCL_STOP */
2091 ret = func_output->output_hwc_get_video_supported_formats(
2092 private_output->output_backend, formats, count);
2094 _pthread_mutex_unlock(&private_display->lock);
2100 _is_hwc_output_still_existed(tdm_private_output *private_output)
2102 tdm_private_module *private_module = private_output->private_module;
2103 tdm_private_output *o = NULL;
2105 LIST_FOR_EACH_ENTRY(o, &private_module->output_list, link) {
2106 if (!(o->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC))
2109 if (o == private_output)
2119 /* gets called on behalf of the ecore-main-loop thread */
2121 tdm_output_need_validate_handler_thread(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
2123 tdm_private_output *private_output = object;
2125 TDM_RETURN_IF_FAIL(private_output != NULL);
2127 _pthread_mutex_lock(&private_display->lock);
2129 /* as we get 'private_output' within an event, an output this 'private_output'
2130 * points to can be destroyed already */
2131 if (!_is_hwc_output_still_existed(private_output)) {
2132 _pthread_mutex_unlock(&private_display->lock);
2136 _pthread_mutex_unlock(&private_display->lock);
2138 TDM_INFO("tdm-backend asks for revalidation for the output:%p.", private_output);
2140 if (private_output->need_validate.hndl)
2141 private_output->need_validate.hndl((tdm_output*)private_output);
2144 /* gets called on behalf of the tdm-thread */
2146 _need_validate_handler(int fd, tdm_event_loop_mask mask, void *user_data)
2148 tdm_thread_cb_need_validate ev;
2149 tdm_private_output *private_output;
2153 private_output = (tdm_private_output *)user_data;
2155 if (read(private_output->need_validate.event_fd, &value, sizeof(value)) < 0) {
2156 TDM_ERR("error while trying to read from a need_validate.event_fd fd.");
2157 return TDM_ERROR_OPERATION_FAILED;
2160 memset(&ev, 0, sizeof ev);
2161 ev.base.type = TDM_THREAD_CB_NEED_VALIDATE;
2162 ev.base.length = sizeof ev;
2163 ev.base.object_stamp = private_output->stamp;
2164 ev.base.data = NULL;
2167 ret = tdm_thread_cb_call(private_output, &ev.base);
2168 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2170 TDM_INFO("tdm-thread: get a 'need to revalidate' event for the ouptut:%p.", private_output);
2172 /* who cares about this? */
2173 return TDM_ERROR_NONE;
2177 tdm_output_need_validate_event_init(tdm_output *output)
2181 OUTPUT_FUNC_ENTRY();
2183 TDM_RETURN_VAL_IF_FAIL(TDM_MUTEX_IS_LOCKED(), TDM_ERROR_OPERATION_FAILED);
2185 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
2186 TDM_ERR("output(%p) not support HWC", private_output);
2187 return TDM_ERROR_BAD_REQUEST;
2190 /* build in eventfd fds into event_loop listened & handled by the tdm-thread */
2192 TDM_WARNING_IF_FAIL(fd >= 0);
2194 private_output->need_validate.event_source = tdm_event_loop_add_fd_handler(private_display,
2195 fd, TDM_EVENT_LOOP_READABLE, _need_validate_handler, private_output, &ret);
2196 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2198 private_output->need_validate.event_fd = fd;
2200 TDM_INFO("register an output:%p for the revalidation, event_fd:%d.", private_output, fd);
2206 tdm_output_choose_commit_per_vblank_mode(tdm_private_output *private_output, int mode)
2208 if (!private_output)
2209 return TDM_ERROR_INVALID_PARAMETER;
2211 if (mode < 0 || mode > 2)
2212 return TDM_ERROR_INVALID_PARAMETER;
2214 private_output->commit_per_vblank = mode;
2216 if (private_output->commit_per_vblank == 0)
2217 TDM_INFO("commit per vblank: disable");
2218 else if (private_output->commit_per_vblank == 1)
2219 TDM_INFO("commit per vblank: enable (1 layer)");
2220 else if (private_output->commit_per_vblank == 2)
2221 TDM_INFO("commit per vblank: enable (previous commit)");
2223 return TDM_ERROR_NONE;