1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <boram1288.park@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define OUTPUT_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
48 TDM_RETURN_VAL_IF_FAIL(output != NULL, TDM_ERROR_INVALID_PARAMETER); \
49 private_output = (tdm_private_output*)output; \
50 private_display = private_output->private_display
52 #define OUTPUT_FUNC_ENTRY_ERROR() \
53 tdm_private_display *private_display; \
54 tdm_private_output *private_output; \
55 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
56 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(output != NULL, TDM_ERROR_INVALID_PARAMETER, NULL); \
57 private_output = (tdm_private_output*)output; \
58 private_display = private_output->private_display
61 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay);
64 _tdm_output_vblank_timeout_cb(void *user_data)
66 tdm_private_output *private_output = user_data;
67 tdm_private_output_vblank_handler *v = NULL;
69 TDM_RETURN_VAL_IF_FAIL(private_output != NULL, TDM_ERROR_OPERATION_FAILED);
71 private_output->vblank_timeout_timer_expired++;
73 TDM_ERR("TDM output(%d) vblank TIMEOUT!! (%d time%s)",
75 private_output->vblank_timeout_timer_expired,
76 (private_output->vblank_timeout_timer_expired > 1) ? "s" : "");
78 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
79 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
80 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
83 return TDM_ERROR_NONE;
87 tdm_output_vblank_print_wait_information(tdm_private_output *private_output, void *user_data)
89 tdm_private_output_vblank_handler *v = NULL;
91 TDM_RETURN_IF_FAIL(private_output != NULL);
92 TDM_RETURN_IF_FAIL(user_data != NULL);
94 TDM_ERR("TDM output(%d) vblank user_data(%p) info!!", private_output->pipe, user_data);
96 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
97 if (v->user_data != user_data)
99 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
100 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
105 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay)
109 if (!private_output->vblank_timeout_timer) {
110 private_output->vblank_timeout_timer =
111 tdm_event_loop_add_timer_handler(private_output->private_display,
112 _tdm_output_vblank_timeout_cb,
115 if (!private_output->vblank_timeout_timer) {
116 TDM_ERR("output(%d) couldn't add timer", private_output->pipe);
119 TDM_INFO("output(%d) create vblank timeout timer", private_output->pipe);
120 private_output->vblank_timeout_timer_expired = 0;
123 ret = tdm_event_loop_source_timer_update(private_output->vblank_timeout_timer, ms_delay);
124 if (ret != TDM_ERROR_NONE) {
125 TDM_ERR("output(%d) couldn't update timer", private_output->pipe);
130 static tdm_private_hwc_window *
131 _tdm_output_find_private_hwc_window(tdm_private_output *private_output,
132 tdm_hwc_window *hwc_window_backend)
134 tdm_private_hwc_window *private_hwc_window = NULL;
136 LIST_FOR_EACH_ENTRY(private_hwc_window, &private_output->hwc_window_list, link) {
137 if (private_hwc_window->hwc_window_backend == hwc_window_backend)
138 return private_hwc_window;
145 tdm_output_init(tdm_private_display *private_display)
147 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_COMMIT, tdm_display_find_output_stamp);
148 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_VBLANK, tdm_display_find_output_stamp);
149 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_STATUS, tdm_display_find_output_stamp);
150 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_DPMS, tdm_display_find_output_stamp);
153 return TDM_ERROR_NONE;
157 tdm_output_get_backend_module(tdm_output *output, tdm_error *error)
159 tdm_private_module *private_module;
161 OUTPUT_FUNC_ENTRY_ERROR();
163 _pthread_mutex_lock(&private_display->lock);
165 private_module = private_output->private_module;
168 *error = TDM_ERROR_NONE;
170 _pthread_mutex_unlock(&private_display->lock);
172 return private_module;
176 tdm_output_get_model_info(tdm_output *output, const char **maker,
177 const char **model, const char **name)
181 _pthread_mutex_lock(&private_display->lock);
184 *maker = private_output->caps.maker;
186 *model = private_output->caps.model;
188 *name = private_output->caps.name;
190 _pthread_mutex_unlock(&private_display->lock);
196 tdm_output_get_capabilities(tdm_output *output, tdm_output_capability *capabilities)
200 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
202 _pthread_mutex_lock(&private_display->lock);
204 *capabilities = private_output->caps.capabilities;
206 _pthread_mutex_unlock(&private_display->lock);
212 tdm_output_get_conn_status(tdm_output *output, tdm_output_conn_status *status)
216 TDM_RETURN_VAL_IF_FAIL(status != NULL, TDM_ERROR_INVALID_PARAMETER);
218 _pthread_mutex_lock(&private_display->lock);
220 *status = private_output->caps.status;
222 _pthread_mutex_unlock(&private_display->lock);
228 tdm_output_thread_cb_change(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
230 tdm_private_output *private_output = object;
231 tdm_private_output_change_handler *change_handler = user_data;
232 tdm_output_change_type type = TDM_OUTPUT_CHANGE_CONNECTION;
233 tdm_value value = {.u32 = 0 };
235 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
237 assert(change_handler->owner_tid == syscall(SYS_gettid));
239 if (cb_base->type == TDM_THREAD_CB_OUTPUT_STATUS) {
240 tdm_thread_cb_output_status *output_status = (tdm_thread_cb_output_status *)cb_base;
241 type = TDM_OUTPUT_CHANGE_CONNECTION;
242 value.u32 = output_status->status;
243 } else if (cb_base->type == TDM_THREAD_CB_OUTPUT_DPMS) {
244 tdm_thread_cb_output_dpms *output_dpms = (tdm_thread_cb_output_dpms *)cb_base;
245 type = TDM_OUTPUT_CHANGE_DPMS;
246 value.u32 = output_dpms->dpms;
248 TDM_NEVER_GET_HERE();
252 _pthread_mutex_unlock(&private_display->lock);
253 change_handler->func(private_output, type, value, change_handler->user_data);
254 _pthread_mutex_lock(&private_display->lock);
259 _tdm_output_call_thread_cb_status(tdm_private_output *private_output, tdm_output_conn_status status)
261 tdm_thread_cb_output_status output_status;
264 memset(&output_status, 0, sizeof output_status);
265 output_status.base.type = TDM_THREAD_CB_OUTPUT_STATUS;
266 output_status.base.length = sizeof output_status;
267 output_status.base.object_stamp = private_output->stamp;
268 output_status.base.data = NULL;
269 output_status.base.sync = 1;
270 output_status.status = status;
272 ret = tdm_thread_cb_call(private_output, &output_status.base, 1);
273 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
275 return TDM_ERROR_NONE;
279 _tdm_output_call_thread_cb_dpms(tdm_private_output *private_output, tdm_output_dpms dpms)
281 tdm_thread_cb_output_dpms output_dpms;
284 memset(&output_dpms, 0, sizeof output_dpms);
285 output_dpms.base.type = TDM_THREAD_CB_OUTPUT_DPMS;
286 output_dpms.base.length = sizeof output_dpms;
287 output_dpms.base.object_stamp = private_output->stamp;
288 output_dpms.base.data = NULL;
289 output_dpms.base.sync = 0;
290 output_dpms.dpms = dpms;
292 ret = tdm_thread_cb_call(private_output, &output_dpms.base, 1);
293 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
295 return TDM_ERROR_NONE;
299 tdm_output_cb_status(tdm_output *output_backend, tdm_output_conn_status status, void *user_data)
301 tdm_private_output *private_output = user_data;
304 TDM_RETURN_IF_FAIL(private_output);
306 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(status));
308 if ((private_output->caps.status == TDM_OUTPUT_CONN_STATUS_DISCONNECTED && status != TDM_OUTPUT_CONN_STATUS_DISCONNECTED) ||
309 (private_output->caps.status != TDM_OUTPUT_CONN_STATUS_DISCONNECTED && status == TDM_OUTPUT_CONN_STATUS_DISCONNECTED)) {
310 ret = tdm_display_update_output(private_output->private_module, output_backend, private_output->pipe, 1);
311 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
313 private_output->caps.status = status;
316 ret = _tdm_output_call_thread_cb_status(private_output, status);
317 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
321 tdm_output_cb_dpms(tdm_output *output_backend, tdm_output_dpms dpms, void *user_data)
323 tdm_private_output *private_output = user_data;
326 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(dpms));
328 private_output->current_dpms_value = dpms;
329 private_output->waiting_dpms_change = 0;
330 TDM_INFO("output(%d) dpms async '%s' done", private_output->pipe, tdm_dpms_str(dpms));
332 ret = _tdm_output_call_thread_cb_dpms(private_output, dpms);
333 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
337 tdm_output_add_change_handler(tdm_output *output,
338 tdm_output_change_handler func,
341 tdm_private_output_change_handler *change_handler = NULL;
344 TDM_RETURN_VAL_IF_FAIL(func != NULL, TDM_ERROR_INVALID_PARAMETER);
346 _pthread_mutex_lock(&private_display->lock);
348 LIST_FOR_EACH_ENTRY(change_handler, &private_output->change_handler_list, link) {
349 if (change_handler->func == func && change_handler->user_data == user_data) {
350 TDM_ERR("can't add twice");
351 _pthread_mutex_unlock(&private_display->lock);
352 return TDM_ERROR_BAD_REQUEST;
356 change_handler = calloc(1, sizeof(tdm_private_output_change_handler));
357 if (!change_handler) {
358 /* LCOV_EXCL_START */
359 TDM_ERR("failed: alloc memory");
360 _pthread_mutex_unlock(&private_display->lock);
361 return TDM_ERROR_OUT_OF_MEMORY;
365 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_DPMS, NULL, tdm_output_thread_cb_change, change_handler);
366 if (ret != TDM_ERROR_NONE) {
367 /* LCOV_EXCL_START */
368 TDM_ERR("tdm_thread_cb_add failed");
369 free(change_handler);
370 _pthread_mutex_unlock(&private_display->lock);
371 return TDM_ERROR_OPERATION_FAILED;
375 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_STATUS, NULL, tdm_output_thread_cb_change, change_handler);
376 if (ret != TDM_ERROR_NONE) {
377 /* LCOV_EXCL_START */
378 TDM_ERR("tdm_thread_cb_add failed");
379 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_DPMS, NULL, tdm_output_thread_cb_change, change_handler);
380 free(change_handler);
381 _pthread_mutex_unlock(&private_display->lock);
382 return TDM_ERROR_OPERATION_FAILED;
386 change_handler->private_output = private_output;
387 change_handler->func = func;
388 change_handler->user_data = user_data;
389 change_handler->owner_tid = syscall(SYS_gettid);
391 LIST_ADDTAIL(&change_handler->link, &private_output->change_handler_list);
393 _pthread_mutex_unlock(&private_display->lock);
399 tdm_output_remove_change_handler(tdm_output *output,
400 tdm_output_change_handler func,
403 tdm_private_display *private_display;
404 tdm_private_output *private_output;
405 tdm_private_output_change_handler *change_handler = NULL, *hh = NULL;
407 TDM_RETURN_IF_FAIL(output != NULL);
408 TDM_RETURN_IF_FAIL(func != NULL);
410 private_output = (tdm_private_output*)output;
411 private_display = private_output->private_display;
413 _pthread_mutex_lock(&private_display->lock);
415 LIST_FOR_EACH_ENTRY_SAFE(change_handler, hh, &private_output->change_handler_list, link) {
416 if (change_handler->func != func || change_handler->user_data != user_data)
419 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_DPMS, NULL, tdm_output_thread_cb_change, change_handler);
420 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_STATUS, NULL, tdm_output_thread_cb_change, change_handler);
422 LIST_DEL(&change_handler->link);
423 free(change_handler);
425 _pthread_mutex_unlock(&private_display->lock);
430 _pthread_mutex_unlock(&private_display->lock);
434 tdm_output_get_output_type(tdm_output *output, tdm_output_type *type)
438 TDM_RETURN_VAL_IF_FAIL(type != NULL, TDM_ERROR_INVALID_PARAMETER);
440 _pthread_mutex_lock(&private_display->lock);
442 *type = private_output->caps.type;
444 _pthread_mutex_unlock(&private_display->lock);
450 tdm_output_get_layer_count(tdm_output *output, int *count)
452 tdm_private_layer *private_layer = NULL;
456 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
458 _pthread_mutex_lock(&private_display->lock);
460 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
461 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
463 _pthread_mutex_unlock(&private_display->lock);
464 return TDM_ERROR_BAD_REQUEST;
468 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link)
471 _pthread_mutex_unlock(&private_display->lock);
472 return TDM_ERROR_NONE;
475 _pthread_mutex_unlock(&private_display->lock);
482 tdm_output_get_layer(tdm_output *output, int index, tdm_error *error)
484 tdm_private_layer *private_layer = NULL;
486 OUTPUT_FUNC_ENTRY_ERROR();
488 _pthread_mutex_lock(&private_display->lock);
491 *error = TDM_ERROR_NONE;
493 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
494 _pthread_mutex_unlock(&private_display->lock);
495 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
497 *error = TDM_ERROR_BAD_REQUEST;
501 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
502 if (private_layer->index == index) {
503 _pthread_mutex_unlock(&private_display->lock);
504 return private_layer;
508 _pthread_mutex_unlock(&private_display->lock);
514 tdm_output_get_available_properties(tdm_output *output, const tdm_prop **props,
519 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
520 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
522 _pthread_mutex_lock(&private_display->lock);
524 *props = (const tdm_prop *)private_output->caps.props;
525 *count = private_output->caps.prop_count;
527 _pthread_mutex_unlock(&private_display->lock);
533 tdm_output_get_available_modes(tdm_output *output,
534 const tdm_output_mode **modes, int *count)
538 TDM_RETURN_VAL_IF_FAIL(modes != NULL, TDM_ERROR_INVALID_PARAMETER);
539 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
541 _pthread_mutex_lock(&private_display->lock);
543 *modes = (const tdm_output_mode *)private_output->caps.modes;
544 *count = private_output->caps.mode_count;
546 _pthread_mutex_unlock(&private_display->lock);
552 tdm_output_get_available_size(tdm_output *output, int *min_w, int *min_h,
553 int *max_w, int *max_h, int *preferred_align)
557 _pthread_mutex_lock(&private_display->lock);
560 *min_w = TDM_FRONT_VALUE(private_output->caps.min_w);
562 *min_h = TDM_FRONT_VALUE(private_output->caps.min_h);
564 *max_w = TDM_FRONT_VALUE(private_output->caps.max_w);
566 *max_h = TDM_FRONT_VALUE(private_output->caps.max_h);
568 *preferred_align = TDM_FRONT_VALUE(private_output->caps.preferred_align);
570 _pthread_mutex_unlock(&private_display->lock);
576 tdm_output_get_cursor_available_size(tdm_output *output, int *min_w, int *min_h,
577 int *max_w, int *max_h, int *preferred_align)
581 _pthread_mutex_lock(&private_display->lock);
583 if (!tdm_module_check_abi(private_output->private_module, 1, 5)) {
584 _pthread_mutex_unlock(&private_display->lock);
585 return TDM_ERROR_BAD_REQUEST;
589 *min_w = TDM_FRONT_VALUE(private_output->caps.cursor_min_w);
591 *min_h = TDM_FRONT_VALUE(private_output->caps.cursor_min_h);
593 *max_w = TDM_FRONT_VALUE(private_output->caps.cursor_max_w);
595 *max_h = TDM_FRONT_VALUE(private_output->caps.cursor_max_h);
597 *preferred_align = TDM_FRONT_VALUE(private_output->caps.cursor_preferred_align);
599 _pthread_mutex_unlock(&private_display->lock);
605 tdm_output_get_physical_size(tdm_output *output, unsigned int *mmWidth,
606 unsigned int *mmHeight)
610 _pthread_mutex_lock(&private_display->lock);
613 *mmWidth = private_output->caps.mmWidth;
615 *mmHeight = private_output->caps.mmHeight;
617 _pthread_mutex_unlock(&private_display->lock);
623 tdm_output_get_subpixel(tdm_output *output, unsigned int *subpixel)
626 TDM_RETURN_VAL_IF_FAIL(subpixel != NULL, TDM_ERROR_INVALID_PARAMETER);
628 _pthread_mutex_lock(&private_display->lock);
630 *subpixel = private_output->caps.subpixel;
632 _pthread_mutex_unlock(&private_display->lock);
638 tdm_output_get_pipe(tdm_output *output, unsigned int *pipe)
641 TDM_RETURN_VAL_IF_FAIL(pipe != NULL, TDM_ERROR_INVALID_PARAMETER);
643 _pthread_mutex_lock(&private_display->lock);
645 *pipe = private_output->pipe;
647 _pthread_mutex_unlock(&private_display->lock);
653 tdm_output_get_primary_index(tdm_output *output, int *index)
655 tdm_private_layer *private_layer = NULL;
658 TDM_RETURN_VAL_IF_FAIL(index != NULL, TDM_ERROR_INVALID_PARAMETER);
660 _pthread_mutex_lock(&private_display->lock);
662 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
663 if (private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_PRIMARY) {
664 *index = private_layer->index;
669 _pthread_mutex_unlock(&private_display->lock);
675 tdm_output_set_property(tdm_output *output, unsigned int id, tdm_value value)
677 tdm_private_module *private_module;
678 tdm_func_output *func_output;
681 _pthread_mutex_lock(&private_display->lock);
683 private_module = private_output->private_module;
684 func_output = &private_module->func_output;
686 if (!func_output->output_set_property) {
687 /* LCOV_EXCL_START */
688 _pthread_mutex_unlock(&private_display->lock);
689 TDM_WRN("not implemented!!");
690 return TDM_ERROR_NOT_IMPLEMENTED;
694 ret = func_output->output_set_property(private_output->output_backend, id,
697 _pthread_mutex_unlock(&private_display->lock);
703 tdm_output_get_property(tdm_output *output, unsigned int id, tdm_value *value)
705 tdm_private_module *private_module;
706 tdm_func_output *func_output;
709 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
711 _pthread_mutex_lock(&private_display->lock);
713 private_module = private_output->private_module;
714 func_output = &private_module->func_output;
716 if (!func_output->output_get_property) {
717 /* LCOV_EXCL_START */
718 _pthread_mutex_unlock(&private_display->lock);
719 TDM_WRN("not implemented!!");
720 return TDM_ERROR_NOT_IMPLEMENTED;
724 ret = func_output->output_get_property(private_output->output_backend, id,
727 _pthread_mutex_unlock(&private_display->lock);
733 _tdm_output_thread_cb_vblank(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
735 tdm_thread_cb_output_vblank *output_vblank = (tdm_thread_cb_output_vblank *)cb_base;
736 tdm_private_output_vblank_handler *vblank_handler = output_vblank->base.data;
737 tdm_private_output_vblank_handler *v = NULL, *vv = NULL;
738 tdm_private_output *private_output = object;
739 struct list_head clone_list;
741 pid_t tid = syscall(SYS_gettid);
743 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
745 assert(vblank_handler->owner_tid == tid);
747 vblank_handler->sent_to_frontend = 0;
749 _tdm_output_vblank_timeout_update(private_output, 0);
751 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
753 interval = vblank_handler->interval;
754 sync = vblank_handler->sync;
756 LIST_INITHEAD(&clone_list);
758 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &private_output->vblank_handler_list, link) {
759 if (v->interval != interval || v->sync != sync || v->owner_tid != tid)
763 LIST_ADDTAIL(&v->link, &clone_list);
766 if (tdm_debug_module & TDM_DEBUG_COMMIT)
767 TDM_INFO("----------------------------------------- output(%d) got vblank", private_output->pipe);
769 _pthread_mutex_unlock(&private_display->lock);
770 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &clone_list, link) {
771 if (tdm_debug_module & TDM_DEBUG_COMMIT)
772 TDM_INFO("handler(%p)", v);
777 v->func(v->private_output,
778 output_vblank->sequence,
779 output_vblank->tv_sec,
780 output_vblank->tv_usec,
785 _pthread_mutex_lock(&private_display->lock);
787 if (tdm_debug_module & TDM_DEBUG_COMMIT)
788 TDM_INFO("-----------------------------------------...");
792 _tdm_output_cb_vblank(tdm_output *output_backend, unsigned int sequence,
793 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
795 tdm_private_output_vblank_handler *vblank_handler = user_data;
796 tdm_thread_cb_output_vblank output_vblank;
799 memset(&output_vblank, 0, sizeof output_vblank);
800 output_vblank.base.type = TDM_THREAD_CB_OUTPUT_VBLANK;
801 output_vblank.base.length = sizeof output_vblank;
802 output_vblank.base.object_stamp = vblank_handler->private_output->stamp;
803 output_vblank.base.data = vblank_handler;
804 output_vblank.base.sync = 0;
805 output_vblank.sequence = sequence;
806 output_vblank.tv_sec = tv_sec;
807 output_vblank.tv_usec = tv_usec;
809 vblank_handler->sent_to_frontend = 1;
811 if (tdm_debug_module & TDM_DEBUG_COMMIT)
812 TDM_INFO("output(%d) wait_vblank: handler(%p)", vblank_handler->private_output->pipe, vblank_handler);
814 ret = tdm_thread_cb_call(vblank_handler->private_output, &output_vblank.base, 1);
815 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
819 _tdm_output_thread_cb_commit(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
821 tdm_thread_cb_output_commit *output_commit = (tdm_thread_cb_output_commit *)cb_base;
822 tdm_private_output_commit_handler *output_commit_handler = output_commit->base.data;
823 tdm_private_output *private_output = object;
824 tdm_private_layer *private_layer = NULL;
826 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
828 if (!output_commit_handler)
831 assert(output_commit_handler->owner_tid == syscall(SYS_gettid));
833 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
835 LIST_DEL(&output_commit_handler->link);
837 if (tdm_debug_module & TDM_DEBUG_COMMIT) {
838 TDM_INFO("----------------------------------------- output(%d) committed", private_output->pipe);
839 TDM_INFO("handler(%p)", output_commit_handler);
842 if (private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT) {
843 /* In case of layer commit, the below will be handled in the layer commit callback */
844 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
845 if (private_layer->committed_buffer)
846 tdm_layer_committed(private_layer, &private_layer->committed_buffer);
850 if (output_commit_handler->func) {
851 _pthread_mutex_unlock(&private_display->lock);
852 output_commit_handler->func(private_output,
853 output_commit->sequence,
854 output_commit->tv_sec,
855 output_commit->tv_usec,
856 output_commit_handler->user_data);
857 _pthread_mutex_lock(&private_display->lock);
860 free(output_commit_handler);
862 if (tdm_debug_module & TDM_DEBUG_COMMIT)
863 TDM_INFO("-----------------------------------------...");
867 _tdm_output_cb_commit(tdm_output *output_backend, unsigned int sequence,
868 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
870 tdm_private_output_commit_handler *output_commit_handler = user_data;
871 tdm_private_output *private_output;
872 tdm_thread_cb_output_commit output_commit;
875 if (output_commit_handler)
876 private_output = output_commit_handler->private_output;
878 private_output = tdm_display_find_private_output(tdm_display_get(), output_backend);
880 memset(&output_commit, 0, sizeof output_commit);
881 output_commit.base.type = TDM_THREAD_CB_OUTPUT_COMMIT;
882 output_commit.base.length = sizeof output_commit;
883 output_commit.base.object_stamp = private_output->stamp;
884 output_commit.base.data = output_commit_handler;
885 output_commit.base.sync = 0;
886 output_commit.sequence = sequence;
887 output_commit.tv_sec = tv_sec;
888 output_commit.tv_usec = tv_usec;
890 ret = tdm_thread_cb_call(private_output, &output_commit.base, 1);
891 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
894 /* add_front: To distinguish between the user vblank handlers and the layer
895 * commit vblank handlers. The layer commit handlers will be called
896 * before calling the user vblank handlers.
899 _tdm_output_wait_vblank(tdm_private_output *private_output, int interval, int sync,
900 tdm_output_vblank_handler func, void *user_data,
901 unsigned int add_front)
903 tdm_private_module *private_module;
904 tdm_func_output *func_output;
905 tdm_private_output_vblank_handler *vblank_handler = NULL, *v = NULL;
906 unsigned int skip_request = 0;
907 pid_t tid = syscall(SYS_gettid);
908 tdm_error ret = TDM_ERROR_NONE;
910 private_module = private_output->private_module;
911 func_output = &private_module->func_output;
913 /* interval SHOULD be at least 1 */
917 if (!func_output->output_wait_vblank) {
918 /* LCOV_EXCL_START */
919 TDM_WRN("not implemented!!");
920 return TDM_ERROR_NOT_IMPLEMENTED;
924 if (!private_output->regist_vblank_cb) {
925 private_output->regist_vblank_cb = 1;
926 ret = func_output->output_set_vblank_handler(private_output->output_backend,
927 _tdm_output_cb_vblank);
930 vblank_handler = calloc(1, sizeof(tdm_private_output_vblank_handler));
931 if (!vblank_handler) {
932 /* LCOV_EXCL_START */
933 TDM_ERR("failed: alloc memory");
934 return TDM_ERROR_OUT_OF_MEMORY;
938 if (tdm_debug_module & TDM_DEBUG_COMMIT)
939 TDM_INFO("output(%d) wait_vblank: handler(%p)", private_output->pipe, vblank_handler);
941 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
942 if (v->interval == interval && v->sync == sync && v->owner_tid == tid) {
949 LIST_ADD(&vblank_handler->link, &private_output->vblank_handler_list);
951 LIST_ADDTAIL(&vblank_handler->link, &private_output->vblank_handler_list);
953 vblank_handler->private_output = private_output;
954 vblank_handler->interval = interval;
955 vblank_handler->sync = sync;
956 vblank_handler->func = func;
957 vblank_handler->user_data = user_data;
958 vblank_handler->owner_tid = tid;
960 /* If there is the previous request, we can skip to call output_wait_vblank() */
962 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
963 if (ret != TDM_ERROR_NONE) {
964 TDM_ERR("tdm_thread_cb_add failed");
968 ret = func_output->output_wait_vblank(private_output->output_backend, interval,
969 sync, vblank_handler);
970 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
972 _tdm_output_vblank_timeout_update(private_output, 1000);
974 if (tdm_debug_module & TDM_DEBUG_COMMIT)
975 TDM_INFO("output(%d) backend wait_vblank", private_output->pipe);
981 /* LCOV_EXCL_START */
982 if (vblank_handler) {
983 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
984 LIST_DEL(&vblank_handler->link);
985 free(vblank_handler);
992 tdm_output_wait_vblank(tdm_output *output, int interval, int sync,
993 tdm_output_vblank_handler func, void *user_data)
996 TDM_RETURN_VAL_IF_FAIL(interval > 0, TDM_ERROR_INVALID_PARAMETER);
998 _pthread_mutex_lock(&private_display->lock);
1000 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1001 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
1002 tdm_dpms_str(private_output->current_dpms_value));
1003 _pthread_mutex_unlock(&private_display->lock);
1004 return TDM_ERROR_DPMS_OFF;
1007 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 0);
1009 _pthread_mutex_unlock(&private_display->lock);
1014 /* LCOV_EXCL_START */
1016 tdm_output_wait_vblank_add_front(tdm_output *output, int interval, int sync,
1017 tdm_output_vblank_handler func, void *user_data)
1019 OUTPUT_FUNC_ENTRY();
1021 _pthread_mutex_lock(&private_display->lock);
1023 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1024 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
1025 tdm_dpms_str(private_output->current_dpms_value));
1026 _pthread_mutex_unlock(&private_display->lock);
1027 return TDM_ERROR_DPMS_OFF;
1030 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 1);
1032 _pthread_mutex_unlock(&private_display->lock);
1036 /* LCOV_EXCL_STOP */
1039 tdm_output_remove_vblank_handler_internal(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1041 tdm_private_output *private_output = (tdm_private_output*)output;
1042 tdm_private_output_vblank_handler *v = NULL;
1044 TDM_RETURN_IF_FAIL(private_output != NULL);
1045 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1047 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
1048 if (v->func == func && v->user_data == user_data) {
1049 /* only set func & user_data to NULL. It will be freed when an event occurs */
1051 v->user_data = NULL;
1058 tdm_output_remove_commit_handler_internal(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1060 tdm_private_output *private_output = (tdm_private_output*)output;
1061 tdm_private_output_commit_handler *c = NULL;
1063 TDM_RETURN_IF_FAIL(private_output != NULL);
1064 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1066 LIST_FOR_EACH_ENTRY(c, &private_output->output_commit_handler_list, link) {
1067 if (c->func == func && c->user_data == user_data) {
1068 /* only set func & user_data to NULL. It will be freed when an event occurs */
1070 c->user_data = NULL;
1077 tdm_output_remove_vblank_handler(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1079 OUTPUT_FUNC_ENTRY();
1081 _pthread_mutex_lock(&private_display->lock);
1083 tdm_output_remove_vblank_handler_internal(output, func, user_data);
1085 _pthread_mutex_unlock(&private_display->lock);
1091 tdm_output_remove_commit_handler(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1093 OUTPUT_FUNC_ENTRY();
1095 _pthread_mutex_lock(&private_display->lock);
1097 tdm_output_remove_commit_handler_internal(output, func, user_data);
1099 _pthread_mutex_unlock(&private_display->lock);
1105 tdm_output_commit_internal(tdm_output *output, int sync, tdm_output_commit_handler func, void *user_data)
1107 tdm_private_output *private_output;
1108 tdm_private_module *private_module;
1109 tdm_func_output *func_output;
1110 tdm_private_output_commit_handler *output_commit_handler = NULL;
1111 tdm_private_layer *private_layer = NULL;
1112 tdm_output_dpms dpms_value = TDM_OUTPUT_DPMS_ON;
1113 tdm_error ret = TDM_ERROR_NONE;
1115 TDM_RETURN_VAL_IF_FAIL(output != NULL, TDM_ERROR_INVALID_PARAMETER);
1117 private_output = (tdm_private_output*)output;
1118 private_module = private_output->private_module;
1119 func_output = &private_module->func_output;
1121 if (!func_output->output_commit) {
1122 /* LCOV_EXCL_START */
1123 TDM_WRN("not implemented!!");
1124 return TDM_ERROR_NOT_IMPLEMENTED;
1125 /* LCOV_EXCL_STOP */
1128 ret = tdm_output_get_dpms_internal(output, &dpms_value);
1129 TDM_RETURN_VAL_IF_FAIL(ret == TDM_ERROR_NONE, ret);
1131 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1132 if (!private_output->regist_commit_cb) {
1133 private_output->regist_commit_cb = 1;
1134 ret = func_output->output_set_commit_handler(private_output->output_backend, _tdm_output_cb_commit);
1135 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1138 output_commit_handler = calloc(1, sizeof(tdm_private_output_commit_handler));
1139 if (!output_commit_handler) {
1140 /* LCOV_EXCL_START */
1141 TDM_ERR("failed: alloc memory");
1142 return TDM_ERROR_OUT_OF_MEMORY;
1143 /* LCOV_EXCL_STOP */
1146 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1147 if (ret != TDM_ERROR_NONE) {
1148 TDM_ERR("tdm_thread_cb_add failed");
1149 free(output_commit_handler);
1153 LIST_ADDTAIL(&output_commit_handler->link, &private_output->output_commit_handler_list);
1154 output_commit_handler->private_output = private_output;
1155 output_commit_handler->func = func;
1156 output_commit_handler->user_data = user_data;
1157 output_commit_handler->owner_tid = syscall(SYS_gettid);
1159 ret = func_output->output_commit(private_output->output_backend, sync,
1160 output_commit_handler);
1161 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1163 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1164 TDM_INFO("output(%d) backend commit: handle(%p) func(%p) user_data(%p)",
1165 private_output->pipe, output_commit_handler, func, user_data);
1168 /* Even if DPMS is off, committed_buffer should be changed because it will be referred
1169 * for tdm_layer_committed() function.
1171 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1172 if (!private_layer->waiting_buffer)
1175 if (private_layer->committed_buffer) {
1176 tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
1177 private_layer->committed_buffer = NULL;
1178 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1179 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
1180 private_layer, private_layer->waiting_buffer->buffer,
1181 private_layer->committed_buffer);
1184 private_layer->committed_buffer = private_layer->waiting_buffer;
1185 private_layer->waiting_buffer = NULL;
1186 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1187 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
1188 private_layer, private_layer->waiting_buffer,
1189 private_layer->committed_buffer->buffer);
1192 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1193 TDM_WRN("dpms %s. Directly call commit handler instead of commit.", tdm_dpms_str(dpms_value));
1195 func(output, 0, 0, 0, user_data);
1201 /* LCOV_EXCL_START */
1202 if (output_commit_handler) {
1203 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1204 LIST_DEL(&output_commit_handler->link);
1205 free(output_commit_handler);
1208 /* LCOV_EXCL_STOP */
1212 tdm_output_commit(tdm_output *output, int sync, tdm_output_commit_handler func,
1215 tdm_private_layer *private_layer = NULL;
1217 OUTPUT_FUNC_ENTRY();
1219 _pthread_mutex_lock(&private_display->lock);
1221 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE)
1222 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1223 else if (private_output->commit_type == TDM_COMMIT_TYPE_LAYER) {
1224 TDM_ERR("Can't supported. Use tdm_layer_commit");
1225 _pthread_mutex_unlock(&private_display->lock);
1226 return TDM_ERROR_BAD_REQUEST;
1229 if (private_output->commit_per_vblank) {
1230 TDM_ERR("Use tdm_layer_commit");
1231 _pthread_mutex_unlock(&private_display->lock);
1232 return TDM_ERROR_BAD_REQUEST;
1235 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1236 TDM_ERR("output(%d) dpms: %s", private_output->pipe,
1237 tdm_dpms_str(private_output->current_dpms_value));
1238 _pthread_mutex_unlock(&private_display->lock);
1239 return TDM_ERROR_DPMS_OFF;
1242 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1243 TDM_INFO("output(%d) commit", private_output->pipe);
1245 /* apply the pending data of all layers */
1246 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1247 tdm_layer_commit_pending_data(private_layer);
1250 ret = tdm_output_commit_internal(output, sync, func, user_data);
1252 _pthread_mutex_unlock(&private_display->lock);
1258 tdm_output_set_mode(tdm_output *output, const tdm_output_mode *mode)
1260 tdm_private_module *private_module;
1261 tdm_func_output *func_output;
1262 OUTPUT_FUNC_ENTRY();
1264 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1266 _pthread_mutex_lock(&private_display->lock);
1268 private_module = private_output->private_module;
1269 func_output = &private_module->func_output;
1271 if (!func_output->output_set_mode) {
1272 /* LCOV_EXCL_START */
1273 _pthread_mutex_unlock(&private_display->lock);
1274 TDM_WRN("not implemented!!");
1275 return TDM_ERROR_NOT_IMPLEMENTED;
1276 /* LCOV_EXCL_STOP */
1279 ret = func_output->output_set_mode(private_output->output_backend, mode);
1280 if (ret == TDM_ERROR_NONE) {
1281 private_output->current_mode = mode;
1282 private_output->need_set_target_info = 1;
1283 TDM_INFO("mode: %dx%d %dhz", mode->hdisplay, mode->vdisplay, mode->vrefresh);
1286 _pthread_mutex_unlock(&private_display->lock);
1292 tdm_output_get_mode(tdm_output *output, const tdm_output_mode **mode)
1294 OUTPUT_FUNC_ENTRY();
1296 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1298 _pthread_mutex_lock(&private_display->lock);
1300 *mode = private_output->current_mode;
1302 _pthread_mutex_unlock(&private_display->lock);
1308 tdm_output_set_dpms(tdm_output *output, tdm_output_dpms dpms_value)
1310 tdm_private_module *private_module;
1311 tdm_func_output *func_output;
1312 OUTPUT_FUNC_ENTRY();
1314 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1315 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1316 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1317 return TDM_ERROR_BAD_REQUEST;
1320 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1321 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1322 private_output->pipe, tdm_dpms_str(dpms_value));
1323 return TDM_ERROR_BAD_REQUEST;
1327 _pthread_mutex_lock(&private_display->lock);
1329 if (private_output->waiting_dpms_change) {
1330 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1331 _pthread_mutex_unlock(&private_display->lock);
1332 return TDM_ERROR_BAD_REQUEST;
1335 private_module = private_output->private_module;
1336 func_output = &private_module->func_output;
1338 TDM_INFO("output(%d) dpms '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1340 if (func_output->output_set_dpms)
1341 ret = func_output->output_set_dpms(private_output->output_backend, dpms_value);
1343 /* LCOV_EXCL_START */
1344 ret = TDM_ERROR_NONE;
1345 TDM_WRN("not implemented!!");
1347 /* LCOV_EXCL_STOP */
1351 if (ret == TDM_ERROR_NONE) {
1352 if (private_output->current_dpms_value != dpms_value) {
1353 private_output->current_dpms_value = dpms_value;
1354 _tdm_output_call_thread_cb_dpms(private_output, dpms_value);
1356 TDM_INFO("output(%d) dpms '%s' done", private_output->pipe, tdm_dpms_str(dpms_value));
1358 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1360 /* update current_dpms_value forcely */
1361 tdm_output_get_dpms_internal(output, &temp);
1363 TDM_ERR("output(%d) set_dpms failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1366 _pthread_mutex_unlock(&private_display->lock);
1371 /* LCOV_EXCL_START */
1373 tdm_output_set_dpms_async(tdm_output *output, tdm_output_dpms dpms_value)
1375 tdm_private_module *private_module;
1376 tdm_func_output *func_output;
1377 OUTPUT_FUNC_ENTRY();
1379 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_ASYNC_DPMS)) {
1380 TDM_ERR("output doesn't support the asynchronous DPMS control!");
1381 return TDM_ERROR_BAD_REQUEST;
1384 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1385 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1386 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1387 return TDM_ERROR_BAD_REQUEST;
1390 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1391 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1392 private_output->pipe, tdm_dpms_str(dpms_value));
1393 return TDM_ERROR_BAD_REQUEST;
1397 _pthread_mutex_lock(&private_display->lock);
1399 if (private_output->waiting_dpms_change) {
1400 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1401 _pthread_mutex_unlock(&private_display->lock);
1402 return TDM_ERROR_BAD_REQUEST;
1405 private_module = private_output->private_module;
1406 func_output = &private_module->func_output;
1407 if (!func_output->output_set_dpms_handler) {
1408 TDM_WRN("not implemented: output_set_dpms_handler");
1409 _pthread_mutex_unlock(&private_display->lock);
1410 return TDM_ERROR_NOT_IMPLEMENTED;
1413 if (!func_output->output_set_dpms_async) {
1414 TDM_WRN("not implemented: output_set_dpms_async");
1415 _pthread_mutex_unlock(&private_display->lock);
1416 return TDM_ERROR_NOT_IMPLEMENTED;
1419 if (!private_output->regist_dpms_cb) {
1420 private_output->regist_dpms_cb = 1;
1421 ret = func_output->output_set_dpms_handler(private_output->output_backend,
1422 tdm_output_cb_dpms, private_output);
1423 if (ret != TDM_ERROR_NONE) {
1424 _pthread_mutex_unlock(&private_display->lock);
1425 TDM_ERR("Can't set the dpms handler!!");
1430 TDM_INFO("output(%d) dpms async '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1432 ret = func_output->output_set_dpms_async(private_output->output_backend, dpms_value);
1434 if (ret == TDM_ERROR_NONE) {
1435 private_output->waiting_dpms_change = 1;
1436 TDM_INFO("output(%d) dpms async '%s' waiting", private_output->pipe, tdm_dpms_str(dpms_value));
1438 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1440 /* update current_dpms_value forcely */
1441 tdm_output_get_dpms_internal(output, &temp);
1443 TDM_ERR("output(%d) set_dpms_async failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1446 _pthread_mutex_unlock(&private_display->lock);
1450 /* LCOV_EXCL_STOP */
1453 tdm_output_get_dpms_internal(tdm_output *output, tdm_output_dpms *dpms_value)
1455 tdm_private_output *private_output;
1456 tdm_private_module *private_module;
1457 tdm_func_output *func_output;
1458 tdm_error ret = TDM_ERROR_NONE;
1460 TDM_RETURN_VAL_IF_FAIL(output != NULL, TDM_ERROR_INVALID_PARAMETER);
1462 private_output = (tdm_private_output*)output;
1464 /* TODO: this is ugly. But before calling backend's output_get_dpms(), we have
1465 * to check if all backends's DPMS operation has no problem. In future, we'd
1466 * better use new env instead of using commit_per_vblank variable to distinguish
1467 * whether we use the stored value or backend's output_get_dpms.
1469 if (!private_output->commit_per_vblank) {
1470 *dpms_value = private_output->current_dpms_value;
1471 return TDM_ERROR_NONE;
1474 private_module = private_output->private_module;
1475 func_output = &private_module->func_output;
1477 if (!func_output->output_get_dpms) {
1478 /* LCOV_EXCL_START */
1479 *dpms_value = private_output->current_dpms_value;
1480 TDM_WRN("not implemented!!");
1481 return TDM_ERROR_NONE;
1482 /* LCOV_EXCL_STOP */
1485 ret = func_output->output_get_dpms(private_output->output_backend, dpms_value);
1486 if (ret != TDM_ERROR_NONE) {
1487 /* LCOV_EXCL_START */
1488 TDM_ERR("output_get_dpms failed");
1489 *dpms_value = TDM_OUTPUT_DPMS_OFF;
1490 /* LCOV_EXCL_STOP */
1493 /* checking with backend's value */
1494 if (*dpms_value != private_output->current_dpms_value) {
1495 TDM_ERR("output(%d) dpms changed suddenly: %s -> %s",
1496 private_output->pipe, tdm_dpms_str(private_output->current_dpms_value),
1497 tdm_dpms_str(*dpms_value));
1498 private_output->current_dpms_value = *dpms_value;
1499 _tdm_output_call_thread_cb_dpms(private_output, *dpms_value);
1506 tdm_output_get_dpms(tdm_output *output, tdm_output_dpms *dpms_value)
1508 OUTPUT_FUNC_ENTRY();
1510 TDM_RETURN_VAL_IF_FAIL(dpms_value != NULL, TDM_ERROR_INVALID_PARAMETER);
1512 _pthread_mutex_lock(&private_display->lock);
1514 ret = tdm_output_get_dpms_internal(output, dpms_value);
1516 _pthread_mutex_unlock(&private_display->lock);
1522 tdm_output_has_capture_capability(tdm_output *output, unsigned int *has_capability)
1524 tdm_private_module *private_module;
1526 OUTPUT_FUNC_ENTRY();
1528 TDM_RETURN_VAL_IF_FAIL(has_capability != NULL, TDM_ERROR_INVALID_PARAMETER);
1530 _pthread_mutex_lock(&private_display->lock);
1532 private_module = private_output->private_module;
1534 if (!(private_module->capabilities & TDM_DISPLAY_CAPABILITY_CAPTURE))
1535 *has_capability = 0;
1536 else if (!(private_module->caps_capture.capabilities & TDM_CAPTURE_CAPABILITY_OUTPUT))
1537 *has_capability = 0;
1539 *has_capability = 1;
1541 _pthread_mutex_unlock(&private_display->lock);
1546 EXTERN tdm_capture *
1547 tdm_output_create_capture(tdm_output *output, tdm_error *error)
1549 tdm_capture *capture = NULL;
1551 OUTPUT_FUNC_ENTRY_ERROR();
1553 _pthread_mutex_lock(&private_display->lock);
1555 capture = (tdm_capture *)tdm_capture_create_output_internal(private_output, error);
1557 _pthread_mutex_unlock(&private_display->lock);
1562 EXTERN tdm_hwc_window *
1563 tdm_output_hwc_create_window(tdm_output *output, tdm_error *error)
1565 tdm_hwc_window *hwc_window = NULL;
1567 OUTPUT_FUNC_ENTRY_ERROR();
1569 _pthread_mutex_lock(&private_display->lock);
1571 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1572 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 0, error);
1574 /* LCOV_EXCL_START */
1575 TDM_ERR("output(%p) not support HWC", private_output);
1577 *error = TDM_ERROR_BAD_REQUEST;
1578 /* LCOV_EXCL_STOP */
1581 _pthread_mutex_unlock(&private_display->lock);
1586 EXTERN tdm_hwc_window *
1587 tdm_output_hwc_create_video_window(tdm_output *output, tdm_error *error)
1589 tdm_hwc_window *hwc_window = NULL;
1591 OUTPUT_FUNC_ENTRY_ERROR();
1593 _pthread_mutex_lock(&private_display->lock);
1595 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1596 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 1, error);
1598 /* LCOV_EXCL_START */
1599 TDM_ERR("output(%p) not support HWC", private_output);
1601 *error = TDM_ERROR_BAD_REQUEST;
1602 /* LCOV_EXCL_STOP */
1605 _pthread_mutex_unlock(&private_display->lock);
1611 tdm_output_hwc_destroy_window(tdm_output *output, tdm_hwc_window *hwc_window)
1613 OUTPUT_FUNC_ENTRY();
1615 TDM_RETURN_VAL_IF_FAIL(hwc_window != NULL, TDM_ERROR_INVALID_PARAMETER);
1617 _pthread_mutex_lock(&private_display->lock);
1619 ret = tdm_hwc_window_destroy_internal(hwc_window);
1621 _pthread_mutex_unlock(&private_display->lock);
1627 tdm_output_hwc_validate(tdm_output *output, tdm_hwc_window **composited_wnds,
1628 uint32_t num_wnds, uint32_t *num_types)
1630 tdm_private_module *private_module;
1631 tdm_func_output *func_output = NULL;
1632 tdm_private_hwc_window **composited_wnds_frontend = NULL;
1633 tdm_hwc_window **composited_wnds_backend = NULL;
1636 OUTPUT_FUNC_ENTRY();
1638 TDM_RETURN_VAL_IF_FAIL(num_types != NULL, TDM_ERROR_INVALID_PARAMETER);
1640 _pthread_mutex_lock(&private_display->lock);
1642 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1643 TDM_ERR("output(%p) not support HWC", private_output);
1644 _pthread_mutex_unlock(&private_display->lock);
1645 return TDM_ERROR_BAD_REQUEST;
1648 private_module = private_output->private_module;
1649 func_output = &private_module->func_output;
1651 if (!func_output->output_hwc_validate) {
1652 /* LCOV_EXCL_START */
1653 _pthread_mutex_unlock(&private_display->lock);
1654 TDM_WRN("not implemented!!");
1655 return TDM_ERROR_NOT_IMPLEMENTED;
1656 /* LCOV_EXCL_STOP */
1659 if (num_wnds == 0) {
1660 ret = func_output->output_hwc_validate(private_output->output_backend, NULL, 0, num_types);
1662 _pthread_mutex_unlock(&private_display->lock);
1667 composited_wnds_backend = calloc(num_wnds, sizeof(tdm_hwc_window *));
1668 if (!composited_wnds_backend) {
1669 /* LCOV_EXCL_START */
1670 _pthread_mutex_unlock(&private_display->lock);
1671 return TDM_ERROR_OUT_OF_MEMORY;
1672 /* LCOV_EXCL_STOP */
1675 composited_wnds_frontend = (tdm_private_hwc_window **)composited_wnds;
1677 for (i = 0; i < num_wnds; i++)
1678 composited_wnds_backend[i] = composited_wnds_frontend[i]->hwc_window_backend;
1680 ret = func_output->output_hwc_validate(private_output->output_backend, composited_wnds_backend, num_wnds, num_types);
1682 free(composited_wnds_backend);
1684 _pthread_mutex_unlock(&private_display->lock);
1690 tdm_output_hwc_set_need_validate_handler(tdm_output *output,
1691 tdm_output_need_validate_handler hndl)
1693 OUTPUT_FUNC_ENTRY();
1695 TDM_RETURN_VAL_IF_FAIL(hndl != NULL, TDM_ERROR_INVALID_PARAMETER);
1697 _pthread_mutex_lock(&private_display->lock);
1699 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1700 TDM_ERR("output(%p) not support HWC", private_output);
1701 _pthread_mutex_unlock(&private_display->lock);
1702 return TDM_ERROR_BAD_REQUEST;
1705 /* there's no reason to allow this */
1706 if (private_output->need_validate.hndl) {
1708 _pthread_mutex_unlock(&private_display->lock);
1709 return TDM_ERROR_OPERATION_FAILED;
1712 private_output->need_validate.hndl = hndl;
1714 _pthread_mutex_unlock(&private_display->lock);
1720 tdm_output_hwc_get_changed_composition_types(tdm_output *output,
1721 uint32_t *num_elements,
1722 tdm_hwc_window **hwc_window,
1723 tdm_hwc_window_composition *composition_types)
1725 tdm_private_module *private_module;
1726 tdm_func_output *func_output = NULL;
1727 tdm_private_hwc_window * private_hwc_window = NULL;
1730 OUTPUT_FUNC_ENTRY();
1732 TDM_RETURN_VAL_IF_FAIL(num_elements != NULL, TDM_ERROR_INVALID_PARAMETER);
1734 _pthread_mutex_lock(&private_display->lock);
1736 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1737 TDM_ERR("output(%p) not support HWC", private_output);
1738 _pthread_mutex_unlock(&private_display->lock);
1739 return TDM_ERROR_BAD_REQUEST;
1742 private_module = private_output->private_module;
1743 func_output = &private_module->func_output;
1745 if (!func_output->output_hwc_get_changed_composition_types) {
1746 /* LCOV_EXCL_START */
1747 _pthread_mutex_unlock(&private_display->lock);
1748 TDM_WRN("not implemented!!");
1749 return TDM_ERROR_NOT_IMPLEMENTED;
1750 /* LCOV_EXCL_STOP */
1753 ret = func_output->output_hwc_get_changed_composition_types(private_output->output_backend,
1754 num_elements, hwc_window, composition_types);
1755 if (ret != TDM_ERROR_NONE) {
1756 /* LCOV_EXCL_START */
1757 _pthread_mutex_unlock(&private_display->lock);
1759 /* LCOV_EXCL_STOP */
1762 if (hwc_window == NULL || composition_types == NULL) {
1763 _pthread_mutex_unlock(&private_display->lock);
1764 return TDM_ERROR_NONE;
1767 for (i = 0; i < *num_elements; i++) {
1769 private_hwc_window = _tdm_output_find_private_hwc_window(private_output, hwc_window[i]);
1771 if (private_hwc_window == NULL) {
1772 /* LCOV_EXCL_START */
1773 TDM_ERR("failed! This should never happen!");
1774 func_output->output_hwc_destroy_window(private_output->output_backend, hwc_window[i]);
1776 _pthread_mutex_unlock(&private_display->lock);
1777 return TDM_ERROR_OPERATION_FAILED;
1778 /* LCOV_EXCL_STOP */
1781 hwc_window[i] = (tdm_hwc_window*)private_hwc_window;
1784 _pthread_mutex_unlock(&private_display->lock);
1790 tdm_output_hwc_accept_changes(tdm_output *output)
1792 tdm_private_module *private_module;
1793 tdm_func_output *func_output = NULL;
1795 OUTPUT_FUNC_ENTRY();
1797 _pthread_mutex_lock(&private_display->lock);
1799 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1800 TDM_ERR("output(%p) not support HWC", private_output);
1801 _pthread_mutex_unlock(&private_display->lock);
1802 return TDM_ERROR_BAD_REQUEST;
1805 private_module = private_output->private_module;
1806 func_output = &private_module->func_output;
1808 if (!func_output->output_hwc_validate) {
1809 /* LCOV_EXCL_START */
1810 _pthread_mutex_unlock(&private_display->lock);
1811 TDM_WRN("not implemented!!");
1812 return TDM_ERROR_NOT_IMPLEMENTED;
1813 /* LCOV_EXCL_STOP */
1816 ret = func_output->output_hwc_accept_changes(private_output->output_backend);
1818 _pthread_mutex_unlock(&private_display->lock);
1824 tdm_output_hwc_get_target_buffer_queue(tdm_output *output, tdm_error *error)
1826 tdm_private_module *private_module;
1827 tdm_func_output *func_output = NULL;
1828 tbm_surface_queue_h queue = NULL;
1830 OUTPUT_FUNC_ENTRY_ERROR();
1832 _pthread_mutex_lock(&private_display->lock);
1834 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1835 TDM_ERR("output(%p) not support HWC", private_output);
1837 *error = TDM_ERROR_BAD_REQUEST;
1838 _pthread_mutex_unlock(&private_display->lock);
1842 private_module = private_output->private_module;
1843 func_output = &private_module->func_output;
1845 if (!func_output->output_hwc_get_target_buffer_queue) {
1846 /* LCOV_EXCL_START */
1847 _pthread_mutex_unlock(&private_display->lock);
1848 TDM_WRN("not implemented!!");
1850 /* LCOV_EXCL_STOP */
1853 queue = func_output->output_hwc_get_target_buffer_queue(private_output->output_backend, error);
1855 _pthread_mutex_unlock(&private_display->lock);
1861 tdm_output_hwc_set_client_target_buffer(tdm_output *output, tbm_surface_h target_buffer, tdm_hwc_region damage)
1863 tdm_private_module *private_module;
1864 tdm_func_output *func_output = NULL;
1866 OUTPUT_FUNC_ENTRY();
1868 _pthread_mutex_lock(&private_display->lock);
1870 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1871 TDM_ERR("output(%p) not support HWC", private_output);
1872 _pthread_mutex_unlock(&private_display->lock);
1873 return TDM_ERROR_BAD_REQUEST;
1876 if (tdm_debug_dump & TDM_DUMP_FLAG_WINDOW) {
1877 /* LCOV_EXCL_START */
1878 char str[TDM_PATH_LEN];
1880 snprintf(str, TDM_PATH_LEN, "target_window_%d_%03d",
1881 private_output->index, i++);
1882 tdm_helper_dump_buffer_str(target_buffer, tdm_debug_dump_dir, str);
1883 /* LCOV_EXCL_STOP */
1886 private_module = private_output->private_module;
1887 func_output = &private_module->func_output;
1889 if (!func_output->output_hwc_set_client_target_buffer) {
1890 /* LCOV_EXCL_START */
1891 _pthread_mutex_unlock(&private_display->lock);
1892 TDM_WRN("not implemented!!");
1893 return TDM_ERROR_NOT_IMPLEMENTED;
1894 /* LCOV_EXCL_STOP */
1897 ret = func_output->output_hwc_set_client_target_buffer(private_output->output_backend, target_buffer, damage);
1899 _pthread_mutex_unlock(&private_display->lock);
1905 tdm_output_hwc_unset_client_target_buffer(tdm_output *output)
1907 tdm_private_module *private_module;
1908 tdm_func_output *func_output = NULL;
1910 OUTPUT_FUNC_ENTRY();
1912 _pthread_mutex_lock(&private_display->lock);
1914 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1915 TDM_ERR("output(%p) not support HWC", private_output);
1916 _pthread_mutex_unlock(&private_display->lock);
1917 return TDM_ERROR_BAD_REQUEST;
1920 private_module = private_output->private_module;
1921 func_output = &private_module->func_output;
1923 if (!func_output->output_hwc_unset_client_target_buffer) {
1924 /* LCOV_EXCL_START */
1925 _pthread_mutex_unlock(&private_display->lock);
1926 TDM_ERR("not implemented!!");
1927 return TDM_ERROR_NOT_IMPLEMENTED;
1928 /* LCOV_EXCL_STOP */
1931 ret = func_output->output_hwc_unset_client_target_buffer(private_output->output_backend);
1933 _pthread_mutex_unlock(&private_display->lock);
1939 _tdm_output_hwc_layer_commit_handler(tdm_layer *layer, unsigned int sequence,
1940 unsigned int tv_sec, unsigned int tv_usec,
1943 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler = (tdm_private_output_hwc_target_buffer_commit_handler *)user_data;
1944 tdm_output_hwc_target_buffer_commit_handler func = output_hwc_target_buffer_commit_handler->func;
1945 tdm_output *output = (tdm_output *)output_hwc_target_buffer_commit_handler->private_output;
1946 void *data = output_hwc_target_buffer_commit_handler->user_data;
1948 func(output, sequence, tv_sec, tv_usec, data);
1950 free(output_hwc_target_buffer_commit_handler);
1954 tdm_output_hwc_commit_client_target_buffer(tdm_output *output, tdm_output_hwc_target_buffer_commit_handler func, void *user_data)
1956 tdm_private_module *private_module;
1957 tdm_func_output *func_output;
1958 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler;
1959 tdm_layer *layer = NULL;
1960 tdm_private_layer *private_layer;
1961 const tdm_output_mode *mode;
1962 tbm_surface_h buffer;
1964 OUTPUT_FUNC_ENTRY();
1966 _pthread_mutex_lock(&private_display->lock);
1968 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1969 TDM_ERR("output(%p) not support HWC", private_output);
1970 _pthread_mutex_unlock(&private_display->lock);
1971 return TDM_ERROR_BAD_REQUEST;
1974 private_module = private_output->private_module;
1975 func_output = &private_module->func_output;
1977 if (!func_output->output_hwc_get_client_target_buffer_layer) {
1978 /* LCOV_EXCL_START */
1979 _pthread_mutex_unlock(&private_display->lock);
1980 TDM_ERR("not implemented!!");
1981 return TDM_ERROR_NOT_IMPLEMENTED;
1982 /* LCOV_EXCL_STOP */
1985 layer = func_output->output_hwc_get_client_target_buffer_layer(private_output->output_backend,
1988 /* LCOV_EXCL_START */
1989 _pthread_mutex_unlock(&private_display->lock);
1990 TDM_ERR("no assigned layer!!");
1991 return TDM_ERROR_INVALID_PARAMETER;
1992 /* LCOV_EXCL_STOP */
1995 private_layer = (tdm_private_layer*)layer;
1997 if (!func_output->output_hwc_get_client_target_buffer) {
1998 /* LCOV_EXCL_START */
1999 _pthread_mutex_unlock(&private_display->lock);
2000 TDM_ERR("not implemented!!");
2001 return TDM_ERROR_NOT_IMPLEMENTED;
2002 /* LCOV_EXCL_STOP */
2005 buffer = func_output->output_hwc_get_client_target_buffer(private_output->output_backend,
2008 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
2010 ret = tdm_layer_unset_buffer_internal(private_layer);
2011 if (ret != TDM_ERROR_NONE) {
2012 /* LCOV_EXCL_START */
2013 TDM_ERR("failed: layer set info(window)");
2014 _pthread_mutex_unlock(&private_display->lock);
2015 /* LCOV_EXCL_STOP */
2019 if (private_output->need_set_target_info) {
2020 mode = private_output->current_mode;
2021 private_output->target_buffer_info.src_config.size.h = mode->hdisplay;
2022 private_output->target_buffer_info.src_config.size.v = mode->vdisplay;
2023 private_output->target_buffer_info.src_config.pos.x = 0;
2024 private_output->target_buffer_info.src_config.pos.y = 0;
2025 private_output->target_buffer_info.src_config.pos.w = mode->hdisplay;
2026 private_output->target_buffer_info.src_config.pos.h = mode->vdisplay;
2027 private_output->target_buffer_info.dst_pos.x = 0;
2028 private_output->target_buffer_info.dst_pos.y = 0;
2029 private_output->target_buffer_info.dst_pos.w = mode->hdisplay;
2030 private_output->target_buffer_info.dst_pos.h = mode->vdisplay;
2031 private_output->target_buffer_info.transform = TDM_TRANSFORM_NORMAL;
2033 ret = tdm_layer_set_info_internal(private_layer, &private_output->target_buffer_info);
2034 if (ret != TDM_ERROR_NONE) {
2035 /* LCOV_EXCL_START */
2036 TDM_ERR("failed: layer set info(window)");
2037 _pthread_mutex_unlock(&private_display->lock);
2038 /* LCOV_EXCL_STOP */
2042 private_output->need_set_target_info = 0;
2045 output_hwc_target_buffer_commit_handler = calloc(1, sizeof(tdm_private_output_hwc_target_buffer_commit_handler));
2046 if (!output_hwc_target_buffer_commit_handler) {
2047 /* LCOV_EXCL_START */
2048 TDM_ERR("failed: alloc memory");
2049 _pthread_mutex_unlock(&private_display->lock);
2050 return TDM_ERROR_OUT_OF_MEMORY;
2051 /* LCOV_EXCL_STOP */
2054 output_hwc_target_buffer_commit_handler->private_output = private_output;
2055 output_hwc_target_buffer_commit_handler->func = func;
2056 output_hwc_target_buffer_commit_handler->user_data = user_data;
2058 ret = tdm_layer_commit_internal(private_layer, _tdm_output_hwc_layer_commit_handler, output_hwc_target_buffer_commit_handler);
2059 if (ret != TDM_ERROR_NONE) {
2060 /* LCOV_EXCL_START */
2061 TDM_ERR("failed: commit layer(target buffer)");
2062 free(output_hwc_target_buffer_commit_handler);
2063 _pthread_mutex_unlock(&private_display->lock);
2064 /* LCOV_EXCL_STOP */
2068 _pthread_mutex_unlock(&private_display->lock);
2074 tdm_output_hwc_get_video_supported_formats(tdm_output *output, const tbm_format **formats,
2077 tdm_private_module *private_module;
2078 tdm_func_output *func_output;
2079 OUTPUT_FUNC_ENTRY();
2081 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
2082 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
2084 _pthread_mutex_lock(&private_display->lock);
2086 private_module = private_output->private_module;
2087 func_output = &private_module->func_output;
2089 if (!func_output->output_hwc_get_video_supported_formats) {
2090 /* LCOV_EXCL_START */
2091 _pthread_mutex_unlock(&private_display->lock);
2092 TDM_WRN("not implemented!!");
2093 return TDM_ERROR_NOT_IMPLEMENTED;
2094 /* LCOV_EXCL_STOP */
2097 ret = func_output->output_hwc_get_video_supported_formats(
2098 private_output->output_backend, formats, count);
2100 _pthread_mutex_unlock(&private_display->lock);
2106 _is_hwc_output_still_existed(tdm_private_output *private_output)
2108 tdm_private_module *private_module = private_output->private_module;
2109 tdm_private_output *o = NULL;
2111 LIST_FOR_EACH_ENTRY(o, &private_module->output_list, link) {
2112 if (!(o->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC))
2115 if (o == private_output)
2125 /* gets called on behalf of the ecore-main-loop thread */
2127 tdm_output_need_validate_handler_thread(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
2129 tdm_private_output *private_output = object;
2131 TDM_RETURN_IF_FAIL(private_output != NULL);
2133 _pthread_mutex_lock(&private_display->lock);
2135 /* as we get 'private_output' within an event, an output this 'private_output'
2136 * points to can be destroyed already */
2137 if (!_is_hwc_output_still_existed(private_output)) {
2138 _pthread_mutex_unlock(&private_display->lock);
2142 _pthread_mutex_unlock(&private_display->lock);
2144 TDM_INFO("tdm-backend asks for revalidation for the output:%p.", private_output);
2146 if (private_output->need_validate.hndl)
2147 private_output->need_validate.hndl((tdm_output*)private_output);
2150 /* gets called on behalf of the tdm-thread */
2152 _need_validate_handler(int fd, tdm_event_loop_mask mask, void *user_data)
2154 tdm_thread_cb_need_validate ev;
2155 tdm_private_output *private_output;
2159 private_output = (tdm_private_output *)user_data;
2161 if (read(private_output->need_validate.event_fd, &value, sizeof(value)) < 0) {
2162 TDM_ERR("error while trying to read from a need_validate.event_fd fd.");
2163 return TDM_ERROR_OPERATION_FAILED;
2166 memset(&ev, 0, sizeof ev);
2167 ev.base.type = TDM_THREAD_CB_NEED_VALIDATE;
2168 ev.base.length = sizeof ev;
2169 ev.base.object_stamp = private_output->stamp;
2170 ev.base.data = NULL;
2173 ret = tdm_thread_cb_call(private_output, &ev.base, 1);
2174 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2176 TDM_INFO("tdm-thread: get a 'need to revalidate' event for the ouptut:%p.", private_output);
2178 /* who cares about this? */
2179 return TDM_ERROR_NONE;
2183 tdm_output_need_validate_event_init(tdm_output *output)
2187 OUTPUT_FUNC_ENTRY();
2189 TDM_RETURN_VAL_IF_FAIL(TDM_MUTEX_IS_LOCKED(), TDM_ERROR_OPERATION_FAILED);
2191 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
2192 TDM_ERR("output(%p) not support HWC", private_output);
2193 return TDM_ERROR_BAD_REQUEST;
2196 /* build in eventfd fds into event_loop listened & handled by the tdm-thread */
2198 TDM_WARNING_IF_FAIL(fd >= 0);
2200 private_output->need_validate.event_source = tdm_event_loop_add_fd_handler(private_display,
2201 fd, TDM_EVENT_LOOP_READABLE, _need_validate_handler, private_output, &ret);
2202 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2204 private_output->need_validate.event_fd = fd;
2206 TDM_INFO("register an output:%p for the revalidation, event_fd:%d.", private_output, fd);
2212 tdm_output_choose_commit_per_vblank_mode(tdm_private_output *private_output, int mode)
2214 if (!private_output)
2215 return TDM_ERROR_INVALID_PARAMETER;
2217 if (mode < 0 || mode > 2)
2218 return TDM_ERROR_INVALID_PARAMETER;
2220 private_output->commit_per_vblank = mode;
2222 if (private_output->commit_per_vblank == 0)
2223 TDM_INFO("commit per vblank: disable");
2224 else if (private_output->commit_per_vblank == 1)
2225 TDM_INFO("commit per vblank: enable (1 layer)");
2226 else if (private_output->commit_per_vblank == 2)
2227 TDM_INFO("commit per vblank: enable (previous commit)");
2229 return TDM_ERROR_NONE;