1 /**************************************************************************
5 * Copyright 2015 Samsung Electronics co., Ltd. All Rights Reserved.
7 * Contact: Eunchul Kim <chulspro.kim@samsung.com>,
8 * JinYoung Jeon <jy0.jeon@samsung.com>,
9 * Taeheon Kim <th908.kim@samsung.com>,
10 * YoungJun Cho <yj44.cho@samsung.com>,
11 * SooChan Lim <sc1.lim@samsung.com>,
12 * Boram Park <sc1.lim@samsung.com>
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the
16 * "Software"), to deal in the Software without restriction, including
17 * without limitation the rights to use, copy, modify, merge, publish,
18 * distribute, sub license, and/or sell copies of the Software, and to
19 * permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
27 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
29 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
30 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
31 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
32 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 **************************************************************************/
40 #include "tdm_private.h"
44 #define OUTPUT_FUNC_ENTRY() \
45 tdm_private_display *private_display; \
46 tdm_private_output *private_output; \
47 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
48 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER); \
49 private_output = (tdm_private_output*)output; \
50 private_display = private_output->private_display
52 #define OUTPUT_FUNC_ENTRY_ERROR() \
53 tdm_private_display *private_display; \
54 tdm_private_output *private_output; \
55 tdm_error ret = TDM_ERROR_NONE; /* default TDM_ERROR_NONE */\
56 TDM_RETURN_VAL_IF_FAIL_WITH_ERROR(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER, NULL); \
57 private_output = (tdm_private_output*)output; \
58 private_display = private_output->private_display
61 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay);
64 _tdm_output_vblank_timeout_cb(void *user_data)
66 tdm_private_output *private_output = user_data;
67 tdm_private_output_vblank_handler *v = NULL;
69 TDM_RETURN_VAL_IF_FAIL(private_output != NULL, TDM_ERROR_OPERATION_FAILED);
71 private_output->vblank_timeout_timer_expired++;
73 TDM_ERR("TDM output(%d) vblank TIMEOUT!! (%d time%s)",
75 private_output->vblank_timeout_timer_expired,
76 (private_output->vblank_timeout_timer_expired > 1) ? "s" : "");
78 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
79 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
80 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
83 return TDM_ERROR_NONE;
87 tdm_output_vblank_print_wait_information(tdm_private_output *private_output, void *user_data)
89 tdm_private_output_vblank_handler *v = NULL;
91 TDM_RETURN_IF_FAIL(private_output != NULL);
92 TDM_RETURN_IF_FAIL(user_data != NULL);
94 TDM_ERR("TDM output(%d) vblank user_data(%p) info!!", private_output->pipe, user_data);
96 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
97 if (v->user_data != user_data)
99 TDM_ERR("vblank_handler(%p) interval(%d) sync(%d) sent_to_frontend(%u) owner_tid(%d)",
100 v, v->interval, v->sync, v->sent_to_frontend, v->owner_tid);
105 _tdm_output_vblank_timeout_update(tdm_private_output *private_output, int ms_delay)
109 if (!private_output->vblank_timeout_timer) {
110 private_output->vblank_timeout_timer =
111 tdm_event_loop_add_timer_handler(private_output->private_display,
112 _tdm_output_vblank_timeout_cb,
115 if (!private_output->vblank_timeout_timer) {
116 TDM_ERR("output(%d) couldn't add timer", private_output->pipe);
119 TDM_INFO("output(%d) create vblank timeout timer", private_output->pipe);
120 private_output->vblank_timeout_timer_expired = 0;
123 ret = tdm_event_loop_source_timer_update(private_output->vblank_timeout_timer, ms_delay);
124 if (ret != TDM_ERROR_NONE) {
125 TDM_ERR("output(%d) couldn't update timer", private_output->pipe);
130 static tdm_private_hwc_window *
131 _tdm_output_find_private_hwc_window(tdm_private_output *private_output,
132 tdm_hwc_window *hwc_window_backend)
134 tdm_private_hwc_window *private_hwc_window = NULL;
136 LIST_FOR_EACH_ENTRY(private_hwc_window, &private_output->hwc_window_list, link) {
137 if (private_hwc_window->hwc_window_backend == hwc_window_backend)
138 return private_hwc_window;
145 tdm_output_init(tdm_private_display *private_display)
147 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_COMMIT, tdm_display_find_output_stamp);
148 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_VBLANK, tdm_display_find_output_stamp);
149 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_STATUS, tdm_display_find_output_stamp);
150 tdm_thread_cb_set_find_func(TDM_THREAD_CB_OUTPUT_DPMS, tdm_display_find_output_stamp);
153 return TDM_ERROR_NONE;
157 tdm_output_get_backend_module(tdm_output *output, tdm_error *error)
159 tdm_private_module *private_module;
161 OUTPUT_FUNC_ENTRY_ERROR();
163 _pthread_mutex_lock(&private_display->lock);
165 private_module = private_output->private_module;
168 *error = TDM_ERROR_NONE;
170 _pthread_mutex_unlock(&private_display->lock);
172 return private_module;
176 tdm_output_get_model_info(tdm_output *output, const char **maker,
177 const char **model, const char **name)
181 _pthread_mutex_lock(&private_display->lock);
184 *maker = private_output->caps.maker;
186 *model = private_output->caps.model;
188 *name = private_output->caps.name;
190 _pthread_mutex_unlock(&private_display->lock);
196 tdm_output_get_capabilities(tdm_output *output, tdm_output_capability *capabilities)
200 TDM_RETURN_VAL_IF_FAIL(capabilities != NULL, TDM_ERROR_INVALID_PARAMETER);
202 _pthread_mutex_lock(&private_display->lock);
204 *capabilities = private_output->caps.capabilities;
206 _pthread_mutex_unlock(&private_display->lock);
212 tdm_output_get_conn_status(tdm_output *output, tdm_output_conn_status *status)
216 TDM_RETURN_VAL_IF_FAIL(status != NULL, TDM_ERROR_INVALID_PARAMETER);
218 _pthread_mutex_lock(&private_display->lock);
220 *status = private_output->caps.status;
222 _pthread_mutex_unlock(&private_display->lock);
228 tdm_output_thread_cb_change(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
230 tdm_private_output *private_output = object;
231 tdm_private_output_change_handler *change_handler = user_data;
232 tdm_output_change_type type = TDM_OUTPUT_CHANGE_CONNECTION;
233 tdm_value value = {.u32 = 0 };
235 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
237 assert(change_handler->owner_tid == syscall(SYS_gettid));
239 if (cb_base->type == TDM_THREAD_CB_OUTPUT_STATUS) {
240 tdm_thread_cb_output_status *output_status = (tdm_thread_cb_output_status *)cb_base;
241 type = TDM_OUTPUT_CHANGE_CONNECTION;
242 value.u32 = output_status->status;
243 } else if (cb_base->type == TDM_THREAD_CB_OUTPUT_DPMS) {
244 tdm_thread_cb_output_dpms *output_dpms = (tdm_thread_cb_output_dpms *)cb_base;
245 type = TDM_OUTPUT_CHANGE_DPMS;
246 value.u32 = output_dpms->dpms;
248 TDM_NEVER_GET_HERE();
252 _pthread_mutex_unlock(&private_display->lock);
253 change_handler->func(private_output, type, value, change_handler->user_data);
254 _pthread_mutex_lock(&private_display->lock);
259 _tdm_output_call_thread_cb_status(tdm_private_output *private_output, tdm_output_conn_status status)
261 tdm_thread_cb_output_status output_status;
264 memset(&output_status, 0, sizeof output_status);
265 output_status.base.type = TDM_THREAD_CB_OUTPUT_STATUS;
266 output_status.base.length = sizeof output_status;
267 output_status.base.object_stamp = private_output->stamp;
268 output_status.base.data = NULL;
269 output_status.base.sync = 1;
270 output_status.status = status;
272 ret = tdm_thread_cb_call(private_output, &output_status.base);
273 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
275 return TDM_ERROR_NONE;
279 _tdm_output_call_thread_cb_dpms(tdm_private_output *private_output, tdm_output_dpms dpms)
281 tdm_thread_cb_output_dpms output_dpms;
284 memset(&output_dpms, 0, sizeof output_dpms);
285 output_dpms.base.type = TDM_THREAD_CB_OUTPUT_DPMS;
286 output_dpms.base.length = sizeof output_dpms;
287 output_dpms.base.object_stamp = private_output->stamp;
288 output_dpms.base.data = NULL;
289 output_dpms.base.sync = 0;
290 output_dpms.dpms = dpms;
292 ret = tdm_thread_cb_call(private_output, &output_dpms.base);
293 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
295 return TDM_ERROR_NONE;
299 tdm_output_cb_status(tdm_output *output_backend, tdm_output_conn_status status, void *user_data)
301 tdm_private_output *private_output = user_data;
304 TDM_RETURN_IF_FAIL(private_output);
306 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(status));
308 if ((private_output->caps.status == TDM_OUTPUT_CONN_STATUS_DISCONNECTED && status != TDM_OUTPUT_CONN_STATUS_DISCONNECTED) ||
309 (private_output->caps.status != TDM_OUTPUT_CONN_STATUS_DISCONNECTED && status == TDM_OUTPUT_CONN_STATUS_DISCONNECTED)) {
310 ret = tdm_display_update_output(private_output->private_module, output_backend, private_output->pipe, 1);
311 TDM_RETURN_IF_FAIL(ret == TDM_ERROR_NONE);
313 private_output->caps.status = status;
316 ret = _tdm_output_call_thread_cb_status(private_output, status);
317 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
321 tdm_output_cb_dpms(tdm_output *output_backend, tdm_output_dpms dpms, void *user_data)
323 tdm_private_output *private_output = user_data;
326 TDM_INFO("output(%d) %s", private_output->pipe, tdm_status_str(dpms));
328 private_output->current_dpms_value = dpms;
329 private_output->waiting_dpms_change = 0;
330 TDM_INFO("output(%d) dpms async '%s' done", private_output->pipe, tdm_dpms_str(dpms));
332 ret = _tdm_output_call_thread_cb_dpms(private_output, dpms);
333 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
337 tdm_output_add_change_handler(tdm_output *output,
338 tdm_output_change_handler func,
341 tdm_private_output_change_handler *change_handler = NULL;
344 TDM_RETURN_VAL_IF_FAIL(func != NULL, TDM_ERROR_INVALID_PARAMETER);
346 _pthread_mutex_lock(&private_display->lock);
348 LIST_FOR_EACH_ENTRY(change_handler, &private_output->change_handler_list, link) {
349 if (change_handler->func == func && change_handler->user_data == user_data) {
350 TDM_ERR("can't add twice");
351 _pthread_mutex_unlock(&private_display->lock);
352 return TDM_ERROR_BAD_REQUEST;
356 change_handler = calloc(1, sizeof(tdm_private_output_change_handler));
357 if (!change_handler) {
358 /* LCOV_EXCL_START */
359 TDM_ERR("failed: alloc memory");
360 _pthread_mutex_unlock(&private_display->lock);
361 return TDM_ERROR_OUT_OF_MEMORY;
365 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_DPMS, NULL, tdm_output_thread_cb_change, change_handler);
366 if (ret != TDM_ERROR_NONE) {
367 /* LCOV_EXCL_START */
368 TDM_ERR("tdm_thread_cb_add failed");
369 free(change_handler);
370 _pthread_mutex_unlock(&private_display->lock);
371 return TDM_ERROR_OPERATION_FAILED;
375 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_STATUS, NULL, tdm_output_thread_cb_change, change_handler);
376 if (ret != TDM_ERROR_NONE) {
377 /* LCOV_EXCL_START */
378 TDM_ERR("tdm_thread_cb_add failed");
379 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_DPMS, NULL, tdm_output_thread_cb_change, change_handler);
380 free(change_handler);
381 _pthread_mutex_unlock(&private_display->lock);
382 return TDM_ERROR_OPERATION_FAILED;
386 change_handler->private_output = private_output;
387 change_handler->func = func;
388 change_handler->user_data = user_data;
389 change_handler->owner_tid = syscall(SYS_gettid);
391 LIST_ADDTAIL(&change_handler->link, &private_output->change_handler_list);
393 _pthread_mutex_unlock(&private_display->lock);
399 tdm_output_remove_change_handler(tdm_output *output,
400 tdm_output_change_handler func,
403 tdm_private_display *private_display;
404 tdm_private_output *private_output;
405 tdm_private_output_change_handler *change_handler = NULL, *hh = NULL;
407 TDM_RETURN_IF_FAIL(tdm_output_is_valid(output));
408 TDM_RETURN_IF_FAIL(func != NULL);
410 private_output = (tdm_private_output*)output;
411 private_display = private_output->private_display;
413 _pthread_mutex_lock(&private_display->lock);
415 LIST_FOR_EACH_ENTRY_SAFE(change_handler, hh, &private_output->change_handler_list, link) {
416 if (change_handler->func != func || change_handler->user_data != user_data)
419 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_DPMS, NULL, tdm_output_thread_cb_change, change_handler);
420 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_STATUS, NULL, tdm_output_thread_cb_change, change_handler);
422 LIST_DEL(&change_handler->link);
423 free(change_handler);
425 _pthread_mutex_unlock(&private_display->lock);
430 _pthread_mutex_unlock(&private_display->lock);
434 tdm_output_get_output_type(tdm_output *output, tdm_output_type *type)
438 TDM_RETURN_VAL_IF_FAIL(type != NULL, TDM_ERROR_INVALID_PARAMETER);
440 _pthread_mutex_lock(&private_display->lock);
442 *type = private_output->caps.type;
444 _pthread_mutex_unlock(&private_display->lock);
450 tdm_output_get_layer_count(tdm_output *output, int *count)
452 tdm_private_layer *private_layer = NULL;
456 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
458 _pthread_mutex_lock(&private_display->lock);
460 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
461 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
463 _pthread_mutex_unlock(&private_display->lock);
464 return TDM_ERROR_BAD_REQUEST;
468 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link)
471 _pthread_mutex_unlock(&private_display->lock);
472 return TDM_ERROR_NONE;
475 _pthread_mutex_unlock(&private_display->lock);
482 tdm_output_get_layer(tdm_output *output, int index, tdm_error *error)
484 tdm_private_layer *private_layer = NULL;
486 OUTPUT_FUNC_ENTRY_ERROR();
488 _pthread_mutex_lock(&private_display->lock);
491 *error = TDM_ERROR_NONE;
493 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC) {
494 _pthread_mutex_unlock(&private_display->lock);
495 TDM_ERR("output(%p) support HWC. Use HWC functions", private_output);
497 *error = TDM_ERROR_BAD_REQUEST;
501 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
502 if (private_layer->index == index) {
503 _pthread_mutex_unlock(&private_display->lock);
504 return private_layer;
508 _pthread_mutex_unlock(&private_display->lock);
514 tdm_output_get_available_properties(tdm_output *output, const tdm_prop **props,
519 TDM_RETURN_VAL_IF_FAIL(props != NULL, TDM_ERROR_INVALID_PARAMETER);
520 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
522 _pthread_mutex_lock(&private_display->lock);
524 *props = (const tdm_prop *)private_output->caps.props;
525 *count = private_output->caps.prop_count;
527 _pthread_mutex_unlock(&private_display->lock);
533 tdm_output_get_available_modes(tdm_output *output,
534 const tdm_output_mode **modes, int *count)
538 TDM_RETURN_VAL_IF_FAIL(modes != NULL, TDM_ERROR_INVALID_PARAMETER);
539 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
541 _pthread_mutex_lock(&private_display->lock);
543 *modes = (const tdm_output_mode *)private_output->caps.modes;
544 *count = private_output->caps.mode_count;
546 _pthread_mutex_unlock(&private_display->lock);
552 tdm_output_get_available_size(tdm_output *output, int *min_w, int *min_h,
553 int *max_w, int *max_h, int *preferred_align)
557 _pthread_mutex_lock(&private_display->lock);
560 *min_w = TDM_FRONT_VALUE(private_output->caps.min_w);
562 *min_h = TDM_FRONT_VALUE(private_output->caps.min_h);
564 *max_w = TDM_FRONT_VALUE(private_output->caps.max_w);
566 *max_h = TDM_FRONT_VALUE(private_output->caps.max_h);
568 *preferred_align = TDM_FRONT_VALUE(private_output->caps.preferred_align);
570 _pthread_mutex_unlock(&private_display->lock);
576 tdm_output_get_cursor_available_size(tdm_output *output, int *min_w, int *min_h,
577 int *max_w, int *max_h, int *preferred_align)
581 _pthread_mutex_lock(&private_display->lock);
583 if (!tdm_module_check_abi(private_output->private_module, 1, 5)) {
584 _pthread_mutex_unlock(&private_display->lock);
585 return TDM_ERROR_BAD_REQUEST;
589 *min_w = TDM_FRONT_VALUE(private_output->caps.cursor_min_w);
591 *min_h = TDM_FRONT_VALUE(private_output->caps.cursor_min_h);
593 *max_w = TDM_FRONT_VALUE(private_output->caps.cursor_max_w);
595 *max_h = TDM_FRONT_VALUE(private_output->caps.cursor_max_h);
597 *preferred_align = TDM_FRONT_VALUE(private_output->caps.cursor_preferred_align);
599 _pthread_mutex_unlock(&private_display->lock);
605 tdm_output_get_physical_size(tdm_output *output, unsigned int *mmWidth,
606 unsigned int *mmHeight)
610 _pthread_mutex_lock(&private_display->lock);
613 *mmWidth = private_output->caps.mmWidth;
615 *mmHeight = private_output->caps.mmHeight;
617 _pthread_mutex_unlock(&private_display->lock);
623 tdm_output_get_subpixel(tdm_output *output, unsigned int *subpixel)
626 TDM_RETURN_VAL_IF_FAIL(subpixel != NULL, TDM_ERROR_INVALID_PARAMETER);
628 _pthread_mutex_lock(&private_display->lock);
630 *subpixel = private_output->caps.subpixel;
632 _pthread_mutex_unlock(&private_display->lock);
638 tdm_output_get_pipe(tdm_output *output, unsigned int *pipe)
641 TDM_RETURN_VAL_IF_FAIL(pipe != NULL, TDM_ERROR_INVALID_PARAMETER);
643 _pthread_mutex_lock(&private_display->lock);
645 *pipe = private_output->pipe;
647 _pthread_mutex_unlock(&private_display->lock);
653 tdm_output_get_primary_index(tdm_output *output, int *index)
655 tdm_private_layer *private_layer = NULL;
658 TDM_RETURN_VAL_IF_FAIL(index != NULL, TDM_ERROR_INVALID_PARAMETER);
660 _pthread_mutex_lock(&private_display->lock);
662 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
663 if (private_layer->caps.capabilities & TDM_LAYER_CAPABILITY_PRIMARY) {
664 *index = private_layer->index;
669 _pthread_mutex_unlock(&private_display->lock);
675 tdm_output_set_property(tdm_output *output, unsigned int id, tdm_value value)
677 tdm_private_module *private_module;
678 tdm_func_output *func_output;
681 _pthread_mutex_lock(&private_display->lock);
683 private_module = private_output->private_module;
684 func_output = &private_module->func_output;
686 if (!func_output->output_set_property) {
687 /* LCOV_EXCL_START */
688 _pthread_mutex_unlock(&private_display->lock);
689 TDM_WRN("not implemented!!");
690 return TDM_ERROR_NOT_IMPLEMENTED;
694 ret = func_output->output_set_property(private_output->output_backend, id,
697 _pthread_mutex_unlock(&private_display->lock);
703 tdm_output_get_property(tdm_output *output, unsigned int id, tdm_value *value)
705 tdm_private_module *private_module;
706 tdm_func_output *func_output;
709 TDM_RETURN_VAL_IF_FAIL(value != NULL, TDM_ERROR_INVALID_PARAMETER);
711 _pthread_mutex_lock(&private_display->lock);
713 private_module = private_output->private_module;
714 func_output = &private_module->func_output;
716 if (!func_output->output_get_property) {
717 /* LCOV_EXCL_START */
718 _pthread_mutex_unlock(&private_display->lock);
719 TDM_WRN("not implemented!!");
720 return TDM_ERROR_NOT_IMPLEMENTED;
724 ret = func_output->output_get_property(private_output->output_backend, id,
727 _pthread_mutex_unlock(&private_display->lock);
733 _tdm_output_thread_cb_vblank(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
735 tdm_thread_cb_output_vblank *output_vblank = (tdm_thread_cb_output_vblank *)cb_base;
736 tdm_private_output_vblank_handler *vblank_handler = output_vblank->base.data;
737 tdm_private_output_vblank_handler *v = NULL, *vv = NULL;
738 tdm_private_output *private_output = object;
739 struct list_head clone_list;
741 pid_t tid = syscall(SYS_gettid);
743 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
745 assert(vblank_handler->owner_tid == tid);
747 vblank_handler->sent_to_frontend = 0;
749 _tdm_output_vblank_timeout_update(private_output, 0);
751 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
753 interval = vblank_handler->interval;
754 sync = vblank_handler->sync;
756 LIST_INITHEAD(&clone_list);
758 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &private_output->vblank_handler_list, link) {
759 if (v->interval != interval || v->sync != sync || v->owner_tid != tid)
763 LIST_ADDTAIL(&v->link, &clone_list);
766 if (tdm_debug_module & TDM_DEBUG_COMMIT)
767 TDM_INFO("----------------------------------------- output(%d) got vblank", private_output->pipe);
769 _pthread_mutex_unlock(&private_display->lock);
770 LIST_FOR_EACH_ENTRY_SAFE(v, vv, &clone_list, link) {
771 if (tdm_debug_module & TDM_DEBUG_COMMIT)
772 TDM_INFO("handler(%p)", v);
777 v->func(v->private_output,
778 output_vblank->sequence,
779 output_vblank->tv_sec,
780 output_vblank->tv_usec,
785 _pthread_mutex_lock(&private_display->lock);
787 if (tdm_debug_module & TDM_DEBUG_COMMIT)
788 TDM_INFO("-----------------------------------------...");
792 _tdm_output_cb_vblank(tdm_output *output_backend, unsigned int sequence,
793 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
795 tdm_private_output_vblank_handler *vblank_handler = user_data;
796 tdm_thread_cb_output_vblank output_vblank;
799 memset(&output_vblank, 0, sizeof output_vblank);
800 output_vblank.base.type = TDM_THREAD_CB_OUTPUT_VBLANK;
801 output_vblank.base.length = sizeof output_vblank;
802 output_vblank.base.object_stamp = vblank_handler->private_output->stamp;
803 output_vblank.base.data = vblank_handler;
804 output_vblank.base.sync = 0;
805 output_vblank.sequence = sequence;
806 output_vblank.tv_sec = tv_sec;
807 output_vblank.tv_usec = tv_usec;
809 vblank_handler->sent_to_frontend = 1;
811 if (tdm_debug_module & TDM_DEBUG_COMMIT)
812 TDM_INFO("output(%d) wait_vblank: handler(%p)", vblank_handler->private_output->pipe, vblank_handler);
814 ret = tdm_thread_cb_call(vblank_handler->private_output, &output_vblank.base);
815 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
819 _tdm_output_thread_cb_commit(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
821 tdm_thread_cb_output_commit *output_commit = (tdm_thread_cb_output_commit *)cb_base;
822 tdm_private_output_commit_handler *output_commit_handler = output_commit->base.data;
823 tdm_private_output *private_output = object;
824 tdm_private_layer *private_layer = NULL;
826 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
828 if (!output_commit_handler)
831 assert(output_commit_handler->owner_tid == syscall(SYS_gettid));
833 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
835 LIST_DEL(&output_commit_handler->link);
837 if (tdm_debug_module & TDM_DEBUG_COMMIT) {
838 TDM_INFO("----------------------------------------- output(%d) committed", private_output->pipe);
839 TDM_INFO("handler(%p)", output_commit_handler);
842 if (private_output->commit_type == TDM_COMMIT_TYPE_OUTPUT) {
843 /* In case of layer commit, the below will be handled in the layer commit callback */
844 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
845 if (private_layer->committed_buffer)
846 tdm_layer_committed(private_layer, &private_layer->committed_buffer);
850 if (output_commit_handler->func) {
851 _pthread_mutex_unlock(&private_display->lock);
852 output_commit_handler->func(private_output,
853 output_commit->sequence,
854 output_commit->tv_sec,
855 output_commit->tv_usec,
856 output_commit_handler->user_data);
857 _pthread_mutex_lock(&private_display->lock);
860 free(output_commit_handler);
862 if (tdm_debug_module & TDM_DEBUG_COMMIT)
863 TDM_INFO("-----------------------------------------...");
867 _tdm_output_cb_commit(tdm_output *output_backend, unsigned int sequence,
868 unsigned int tv_sec, unsigned int tv_usec, void *user_data)
870 tdm_private_output_commit_handler *output_commit_handler = user_data;
871 tdm_private_output *private_output;
872 tdm_thread_cb_output_commit output_commit;
875 if (output_commit_handler)
876 private_output = output_commit_handler->private_output;
878 private_output = tdm_display_find_private_output(tdm_display_get(), output_backend);
880 memset(&output_commit, 0, sizeof output_commit);
881 output_commit.base.type = TDM_THREAD_CB_OUTPUT_COMMIT;
882 output_commit.base.length = sizeof output_commit;
883 output_commit.base.object_stamp = private_output->stamp;
884 output_commit.base.data = output_commit_handler;
885 output_commit.base.sync = 0;
886 output_commit.sequence = sequence;
887 output_commit.tv_sec = tv_sec;
888 output_commit.tv_usec = tv_usec;
890 ret = tdm_thread_cb_call(private_output, &output_commit.base);
891 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
894 /* add_front: To distinguish between the user vblank handlers and the layer
895 * commit vblank handlers. The layer commit handlers will be called
896 * before calling the user vblank handlers.
899 _tdm_output_wait_vblank(tdm_private_output *private_output, int interval, int sync,
900 tdm_output_vblank_handler func, void *user_data,
901 unsigned int add_front)
903 tdm_private_module *private_module;
904 tdm_func_output *func_output;
905 tdm_private_output_vblank_handler *vblank_handler = NULL, *v = NULL;
906 unsigned int skip_request = 0;
907 pid_t tid = syscall(SYS_gettid);
908 tdm_error ret = TDM_ERROR_NONE;
910 private_module = private_output->private_module;
911 func_output = &private_module->func_output;
913 /* interval SHOULD be at least 1 */
917 if (!func_output->output_wait_vblank) {
918 /* LCOV_EXCL_START */
919 TDM_WRN("not implemented!!");
920 return TDM_ERROR_NOT_IMPLEMENTED;
924 if (!private_output->regist_vblank_cb) {
925 private_output->regist_vblank_cb = 1;
926 ret = func_output->output_set_vblank_handler(private_output->output_backend,
927 _tdm_output_cb_vblank);
930 vblank_handler = calloc(1, sizeof(tdm_private_output_vblank_handler));
931 if (!vblank_handler) {
932 /* LCOV_EXCL_START */
933 TDM_ERR("failed: alloc memory");
934 return TDM_ERROR_OUT_OF_MEMORY;
938 if (tdm_debug_module & TDM_DEBUG_COMMIT)
939 TDM_INFO("output(%d) wait_vblank: handler(%p)", private_output->pipe, vblank_handler);
941 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
942 if (v->interval == interval && v->sync == sync && v->owner_tid == tid) {
949 LIST_ADD(&vblank_handler->link, &private_output->vblank_handler_list);
951 LIST_ADDTAIL(&vblank_handler->link, &private_output->vblank_handler_list);
953 vblank_handler->private_output = private_output;
954 vblank_handler->interval = interval;
955 vblank_handler->sync = sync;
956 vblank_handler->func = func;
957 vblank_handler->user_data = user_data;
958 vblank_handler->owner_tid = tid;
960 /* If there is the previous request, we can skip to call output_wait_vblank() */
962 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
963 if (ret != TDM_ERROR_NONE) {
964 TDM_ERR("tdm_thread_cb_add failed");
968 ret = func_output->output_wait_vblank(private_output->output_backend, interval,
969 sync, vblank_handler);
970 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, wait_failed);
972 _tdm_output_vblank_timeout_update(private_output, 1000);
974 if (tdm_debug_module & TDM_DEBUG_COMMIT)
975 TDM_INFO("output(%d) backend wait_vblank", private_output->pipe);
981 /* LCOV_EXCL_START */
982 if (vblank_handler) {
983 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_VBLANK, vblank_handler, _tdm_output_thread_cb_vblank, NULL);
984 LIST_DEL(&vblank_handler->link);
985 free(vblank_handler);
992 tdm_output_wait_vblank(tdm_output *output, int interval, int sync,
993 tdm_output_vblank_handler func, void *user_data)
996 TDM_RETURN_VAL_IF_FAIL(interval > 0, TDM_ERROR_INVALID_PARAMETER);
998 _pthread_mutex_lock(&private_display->lock);
1000 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1001 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
1002 tdm_dpms_str(private_output->current_dpms_value));
1003 _pthread_mutex_unlock(&private_display->lock);
1004 return TDM_ERROR_DPMS_OFF;
1007 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 0);
1009 _pthread_mutex_unlock(&private_display->lock);
1014 /* LCOV_EXCL_START */
1016 tdm_output_wait_vblank_add_front(tdm_output *output, int interval, int sync,
1017 tdm_output_vblank_handler func, void *user_data)
1019 OUTPUT_FUNC_ENTRY();
1021 _pthread_mutex_lock(&private_display->lock);
1023 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1024 TDM_WRN("output(%d) dpms: %s", private_output->pipe,
1025 tdm_dpms_str(private_output->current_dpms_value));
1026 _pthread_mutex_unlock(&private_display->lock);
1027 return TDM_ERROR_DPMS_OFF;
1030 ret = _tdm_output_wait_vblank(private_output, interval, sync, func, user_data, 1);
1032 _pthread_mutex_unlock(&private_display->lock);
1036 /* LCOV_EXCL_STOP */
1039 tdm_output_remove_vblank_handler_internal(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1041 tdm_private_output *private_output = (tdm_private_output*)output;
1042 tdm_private_output_vblank_handler *v = NULL;
1044 TDM_RETURN_IF_FAIL(private_output != NULL);
1045 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1047 LIST_FOR_EACH_ENTRY(v, &private_output->vblank_handler_list, link) {
1048 if (v->func == func && v->user_data == user_data) {
1049 /* only set func & user_data to NULL. It will be freed when an event occurs */
1051 v->user_data = NULL;
1058 tdm_output_remove_commit_handler_internal(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1060 tdm_private_output *private_output = (tdm_private_output*)output;
1061 tdm_private_output_commit_handler *c = NULL;
1063 TDM_RETURN_IF_FAIL(private_output != NULL);
1064 TDM_RETURN_IF_FAIL(TDM_MUTEX_IS_LOCKED());
1066 LIST_FOR_EACH_ENTRY(c, &private_output->output_commit_handler_list, link) {
1067 if (c->func == func && c->user_data == user_data) {
1068 /* only set func & user_data to NULL. It will be freed when an event occurs */
1070 c->user_data = NULL;
1077 tdm_output_remove_vblank_handler(tdm_output *output, tdm_output_vblank_handler func, void *user_data)
1079 OUTPUT_FUNC_ENTRY();
1081 _pthread_mutex_lock(&private_display->lock);
1083 tdm_output_remove_vblank_handler_internal(output, func, user_data);
1085 _pthread_mutex_unlock(&private_display->lock);
1091 tdm_output_remove_commit_handler(tdm_output *output, tdm_output_commit_handler func, void *user_data)
1093 OUTPUT_FUNC_ENTRY();
1095 _pthread_mutex_lock(&private_display->lock);
1097 tdm_output_remove_commit_handler_internal(output, func, user_data);
1099 _pthread_mutex_unlock(&private_display->lock);
1105 tdm_output_commit_internal(tdm_output *output, int sync, tdm_output_commit_handler func, void *user_data)
1107 tdm_private_output *private_output;
1108 tdm_private_module *private_module;
1109 tdm_func_output *func_output;
1110 tdm_private_output_commit_handler *output_commit_handler = NULL;
1111 tdm_private_layer *private_layer = NULL;
1112 tdm_output_dpms dpms_value = TDM_OUTPUT_DPMS_ON;
1113 tdm_error ret = TDM_ERROR_NONE;
1115 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER);
1117 private_output = (tdm_private_output*)output;
1118 private_module = private_output->private_module;
1119 func_output = &private_module->func_output;
1121 if (!func_output->output_commit) {
1122 /* LCOV_EXCL_START */
1123 TDM_WRN("not implemented!!");
1124 return TDM_ERROR_NOT_IMPLEMENTED;
1125 /* LCOV_EXCL_STOP */
1128 ret = tdm_output_get_dpms_internal(output, &dpms_value);
1129 TDM_RETURN_VAL_IF_FAIL(ret == TDM_ERROR_NONE, ret);
1131 if (!TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1133 if (!private_output->regist_commit_cb) {
1134 private_output->regist_commit_cb = 1;
1135 ret = func_output->output_set_commit_handler(private_output->output_backend, _tdm_output_cb_commit);
1136 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1139 output_commit_handler = calloc(1, sizeof(tdm_private_output_commit_handler));
1140 if (!output_commit_handler) {
1141 /* LCOV_EXCL_START */
1142 TDM_ERR("failed: alloc memory");
1143 return TDM_ERROR_OUT_OF_MEMORY;
1144 /* LCOV_EXCL_STOP */
1147 ret = tdm_thread_cb_add(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1148 if (ret != TDM_ERROR_NONE) {
1149 TDM_ERR("tdm_thread_cb_add failed");
1150 free(output_commit_handler);
1154 LIST_ADDTAIL(&output_commit_handler->link, &private_output->output_commit_handler_list);
1155 output_commit_handler->private_output = private_output;
1156 output_commit_handler->func = func;
1157 output_commit_handler->user_data = user_data;
1158 output_commit_handler->owner_tid = syscall(SYS_gettid);
1161 ret = func_output->output_commit(private_output->output_backend, sync,
1162 output_commit_handler);
1163 TDM_GOTO_IF_FAIL(ret == TDM_ERROR_NONE, commit_failed);
1165 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1166 TDM_INFO("output(%d) backend commit: handle(%p) func(%p) user_data(%p)",
1167 private_output->pipe, output_commit_handler, func, user_data);
1170 /* Even if DPMS is off, committed_buffer should be changed because it will be referred
1171 * for tdm_layer_committed() function.
1173 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1174 if (!private_layer->waiting_buffer)
1177 if (private_layer->committed_buffer)
1178 tdm_layer_free_buffer(private_layer, private_layer->committed_buffer);
1180 private_layer->committed_buffer = private_layer->waiting_buffer;
1181 private_layer->waiting_buffer = NULL;
1182 if (tdm_debug_module & TDM_DEBUG_BUFFER)
1183 TDM_INFO("layer(%p) waiting_buffer(%p) committed_buffer(%p)",
1184 private_layer, private_layer->waiting_buffer,
1185 private_layer->committed_buffer->buffer);
1188 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(dpms_value)) {
1189 TDM_WRN("dpms %s. Directly call commit handler instead of commit.", tdm_dpms_str(dpms_value));
1191 func(output, 0, 0, 0, user_data);
1197 /* LCOV_EXCL_START */
1198 if (output_commit_handler) {
1199 tdm_thread_cb_remove(private_output, TDM_THREAD_CB_OUTPUT_COMMIT, output_commit_handler, _tdm_output_thread_cb_commit, NULL);
1200 LIST_DEL(&output_commit_handler->link);
1201 free(output_commit_handler);
1204 /* LCOV_EXCL_STOP */
1208 tdm_output_commit(tdm_output *output, int sync, tdm_output_commit_handler func,
1211 tdm_private_layer *private_layer = NULL;
1213 OUTPUT_FUNC_ENTRY();
1215 _pthread_mutex_lock(&private_display->lock);
1217 if (private_output->commit_type == TDM_COMMIT_TYPE_NONE)
1218 private_output->commit_type = TDM_COMMIT_TYPE_OUTPUT;
1219 else if (private_output->commit_type == TDM_COMMIT_TYPE_LAYER) {
1220 TDM_ERR("Can't supported. Use tdm_layer_commit");
1221 _pthread_mutex_unlock(&private_display->lock);
1222 return TDM_ERROR_BAD_REQUEST;
1225 if (private_output->commit_per_vblank) {
1226 TDM_ERR("Use tdm_layer_commit");
1227 _pthread_mutex_unlock(&private_display->lock);
1228 return TDM_ERROR_BAD_REQUEST;
1231 if (TDM_OUTPUT_DPMS_VSYNC_IS_OFF(private_output->current_dpms_value)) {
1232 TDM_ERR("output(%d) dpms: %s", private_output->pipe,
1233 tdm_dpms_str(private_output->current_dpms_value));
1234 _pthread_mutex_unlock(&private_display->lock);
1235 return TDM_ERROR_DPMS_OFF;
1238 if (tdm_debug_module & TDM_DEBUG_COMMIT)
1239 TDM_INFO("output(%d) commit", private_output->pipe);
1241 /* apply the pending data of all layers */
1242 LIST_FOR_EACH_ENTRY(private_layer, &private_output->layer_list, link) {
1243 tdm_layer_commit_pending_data(private_layer);
1246 ret = tdm_output_commit_internal(output, sync, func, user_data);
1248 _pthread_mutex_unlock(&private_display->lock);
1254 tdm_output_set_mode(tdm_output *output, const tdm_output_mode *mode)
1256 tdm_private_module *private_module;
1257 tdm_func_output *func_output;
1258 OUTPUT_FUNC_ENTRY();
1260 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1262 _pthread_mutex_lock(&private_display->lock);
1264 private_module = private_output->private_module;
1265 func_output = &private_module->func_output;
1267 if (!func_output->output_set_mode) {
1268 /* LCOV_EXCL_START */
1269 _pthread_mutex_unlock(&private_display->lock);
1270 TDM_WRN("not implemented!!");
1271 return TDM_ERROR_NOT_IMPLEMENTED;
1272 /* LCOV_EXCL_STOP */
1275 ret = func_output->output_set_mode(private_output->output_backend, mode);
1276 if (ret == TDM_ERROR_NONE) {
1277 private_output->current_mode = mode;
1278 private_output->need_set_target_info = 1;
1279 TDM_INFO("mode: %dx%d %dhz", mode->hdisplay, mode->vdisplay, mode->vrefresh);
1282 _pthread_mutex_unlock(&private_display->lock);
1288 tdm_output_get_mode(tdm_output *output, const tdm_output_mode **mode)
1290 OUTPUT_FUNC_ENTRY();
1292 TDM_RETURN_VAL_IF_FAIL(mode != NULL, TDM_ERROR_INVALID_PARAMETER);
1294 _pthread_mutex_lock(&private_display->lock);
1296 *mode = private_output->current_mode;
1298 _pthread_mutex_unlock(&private_display->lock);
1304 tdm_output_set_dpms(tdm_output *output, tdm_output_dpms dpms_value)
1306 tdm_private_module *private_module;
1307 tdm_func_output *func_output;
1308 OUTPUT_FUNC_ENTRY();
1310 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1311 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1312 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1313 return TDM_ERROR_BAD_REQUEST;
1316 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1317 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1318 private_output->pipe, tdm_dpms_str(dpms_value));
1319 return TDM_ERROR_BAD_REQUEST;
1323 _pthread_mutex_lock(&private_display->lock);
1325 if (private_output->waiting_dpms_change) {
1326 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1327 _pthread_mutex_unlock(&private_display->lock);
1328 return TDM_ERROR_BAD_REQUEST;
1331 private_module = private_output->private_module;
1332 func_output = &private_module->func_output;
1334 TDM_INFO("output(%d) dpms '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1336 if (func_output->output_set_dpms)
1337 ret = func_output->output_set_dpms(private_output->output_backend, dpms_value);
1339 /* LCOV_EXCL_START */
1340 ret = TDM_ERROR_NONE;
1341 TDM_WRN("not implemented!!");
1343 /* LCOV_EXCL_STOP */
1347 if (ret == TDM_ERROR_NONE) {
1348 if (private_output->current_dpms_value != dpms_value) {
1349 private_output->current_dpms_value = dpms_value;
1350 _tdm_output_call_thread_cb_dpms(private_output, dpms_value);
1351 TDM_INFO("output(%d) dpms '%s' done", private_output->pipe, tdm_dpms_str(dpms_value));
1354 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1356 /* update current_dpms_value forcely */
1357 tdm_output_get_dpms_internal(output, &temp);
1359 TDM_ERR("output(%d) set_dpms failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1362 _pthread_mutex_unlock(&private_display->lock);
1367 /* LCOV_EXCL_START */
1369 tdm_output_set_dpms_async(tdm_output *output, tdm_output_dpms dpms_value)
1371 tdm_private_module *private_module;
1372 tdm_func_output *func_output;
1373 OUTPUT_FUNC_ENTRY();
1375 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_ASYNC_DPMS)) {
1376 TDM_ERR("output doesn't support the asynchronous DPMS control!");
1377 return TDM_ERROR_BAD_REQUEST;
1380 if (dpms_value > TDM_OUTPUT_DPMS_OFF) {
1381 if (dpms_value & TDM_OUTPUT_DPMS_DEFAULT_MASK) {
1382 TDM_ERR("Don't use the low-4bit for an extended DPMS mode: dpms_value(%x)", dpms_value);
1383 return TDM_ERROR_BAD_REQUEST;
1386 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_EXTENDED_DPMS)) {
1387 TDM_ERR("output(%d) doesn't support the extended DPMS control: '%s'",
1388 private_output->pipe, tdm_dpms_str(dpms_value));
1389 return TDM_ERROR_BAD_REQUEST;
1393 _pthread_mutex_lock(&private_display->lock);
1395 if (private_output->waiting_dpms_change) {
1396 TDM_ERR("DPMS is not changed yet. Can't be changed twice");
1397 _pthread_mutex_unlock(&private_display->lock);
1398 return TDM_ERROR_BAD_REQUEST;
1401 private_module = private_output->private_module;
1402 func_output = &private_module->func_output;
1403 if (!func_output->output_set_dpms_handler) {
1404 TDM_WRN("not implemented: output_set_dpms_handler");
1405 _pthread_mutex_unlock(&private_display->lock);
1406 return TDM_ERROR_NOT_IMPLEMENTED;
1409 if (!func_output->output_set_dpms_async) {
1410 TDM_WRN("not implemented: output_set_dpms_async");
1411 _pthread_mutex_unlock(&private_display->lock);
1412 return TDM_ERROR_NOT_IMPLEMENTED;
1415 if (!private_output->regist_dpms_cb) {
1416 private_output->regist_dpms_cb = 1;
1417 ret = func_output->output_set_dpms_handler(private_output->output_backend,
1418 tdm_output_cb_dpms, private_output);
1419 if (ret != TDM_ERROR_NONE) {
1420 _pthread_mutex_unlock(&private_display->lock);
1421 TDM_ERR("Can't set the dpms handler!!");
1426 TDM_INFO("output(%d) dpms async '%s'", private_output->pipe, tdm_dpms_str(dpms_value));
1428 ret = func_output->output_set_dpms_async(private_output->output_backend, dpms_value);
1430 if (ret == TDM_ERROR_NONE) {
1431 private_output->waiting_dpms_change = 1;
1432 TDM_INFO("output(%d) dpms async '%s' waiting", private_output->pipe, tdm_dpms_str(dpms_value));
1434 tdm_output_dpms temp = TDM_OUTPUT_DPMS_OFF;
1436 /* update current_dpms_value forcely */
1437 tdm_output_get_dpms_internal(output, &temp);
1439 TDM_ERR("output(%d) set_dpms_async failed: dpms '%s'", private_output->pipe, tdm_dpms_str(temp));
1442 _pthread_mutex_unlock(&private_display->lock);
1446 /* LCOV_EXCL_STOP */
1449 tdm_output_get_dpms_internal(tdm_output *output, tdm_output_dpms *dpms_value)
1451 tdm_private_output *private_output;
1452 tdm_private_module *private_module;
1453 tdm_func_output *func_output;
1454 tdm_error ret = TDM_ERROR_NONE;
1456 TDM_RETURN_VAL_IF_FAIL(tdm_output_is_valid(output), TDM_ERROR_INVALID_PARAMETER);
1458 private_output = (tdm_private_output*)output;
1460 /* TODO: this is ugly. But before calling backend's output_get_dpms(), we have
1461 * to check if all backends's DPMS operation has no problem. In future, we'd
1462 * better use new env instead of using commit_per_vblank variable to distinguish
1463 * whether we use the stored value or backend's output_get_dpms.
1465 if (!private_output->commit_per_vblank) {
1466 *dpms_value = private_output->current_dpms_value;
1467 return TDM_ERROR_NONE;
1470 private_module = private_output->private_module;
1471 func_output = &private_module->func_output;
1473 if (!func_output->output_get_dpms) {
1474 /* LCOV_EXCL_START */
1475 *dpms_value = private_output->current_dpms_value;
1476 TDM_WRN("not implemented!!");
1477 return TDM_ERROR_NONE;
1478 /* LCOV_EXCL_STOP */
1481 ret = func_output->output_get_dpms(private_output->output_backend, dpms_value);
1482 if (ret != TDM_ERROR_NONE) {
1483 /* LCOV_EXCL_START */
1484 TDM_ERR("output_get_dpms failed");
1485 *dpms_value = TDM_OUTPUT_DPMS_OFF;
1486 /* LCOV_EXCL_STOP */
1489 /* checking with backend's value */
1490 if (*dpms_value != private_output->current_dpms_value) {
1491 TDM_ERR("output(%d) dpms changed suddenly: %s -> %s",
1492 private_output->pipe, tdm_dpms_str(private_output->current_dpms_value),
1493 tdm_dpms_str(*dpms_value));
1494 private_output->current_dpms_value = *dpms_value;
1495 _tdm_output_call_thread_cb_dpms(private_output, *dpms_value);
1502 tdm_output_get_dpms(tdm_output *output, tdm_output_dpms *dpms_value)
1504 OUTPUT_FUNC_ENTRY();
1506 TDM_RETURN_VAL_IF_FAIL(dpms_value != NULL, TDM_ERROR_INVALID_PARAMETER);
1508 _pthread_mutex_lock(&private_display->lock);
1510 ret = tdm_output_get_dpms_internal(output, dpms_value);
1512 _pthread_mutex_unlock(&private_display->lock);
1518 tdm_output_has_capture_capability(tdm_output *output, unsigned int *has_capability)
1520 tdm_private_module *private_module;
1522 OUTPUT_FUNC_ENTRY();
1524 TDM_RETURN_VAL_IF_FAIL(has_capability != NULL, TDM_ERROR_INVALID_PARAMETER);
1526 _pthread_mutex_lock(&private_display->lock);
1528 private_module = private_output->private_module;
1530 if (!(private_module->capabilities & TDM_DISPLAY_CAPABILITY_CAPTURE))
1531 *has_capability = 0;
1532 else if (!(private_module->caps_capture.capabilities & TDM_CAPTURE_CAPABILITY_OUTPUT))
1533 *has_capability = 0;
1535 *has_capability = 1;
1537 _pthread_mutex_unlock(&private_display->lock);
1542 EXTERN tdm_capture *
1543 tdm_output_create_capture(tdm_output *output, tdm_error *error)
1545 tdm_capture *capture = NULL;
1547 OUTPUT_FUNC_ENTRY_ERROR();
1549 _pthread_mutex_lock(&private_display->lock);
1551 capture = (tdm_capture *)tdm_capture_create_output_internal(private_output, error);
1553 _pthread_mutex_unlock(&private_display->lock);
1558 EXTERN tdm_hwc_window *
1559 tdm_output_hwc_create_window(tdm_output *output, tdm_error *error)
1561 tdm_hwc_window *hwc_window = NULL;
1563 OUTPUT_FUNC_ENTRY_ERROR();
1565 _pthread_mutex_lock(&private_display->lock);
1567 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1568 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 0, error);
1570 /* LCOV_EXCL_START */
1571 TDM_ERR("output(%p) not support HWC", private_output);
1573 *error = TDM_ERROR_BAD_REQUEST;
1574 /* LCOV_EXCL_STOP */
1577 _pthread_mutex_unlock(&private_display->lock);
1582 EXTERN tdm_hwc_window *
1583 tdm_output_hwc_create_video_window(tdm_output *output, tdm_error *error)
1585 tdm_hwc_window *hwc_window = NULL;
1587 OUTPUT_FUNC_ENTRY_ERROR();
1589 _pthread_mutex_lock(&private_display->lock);
1591 if (private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)
1592 hwc_window = (tdm_hwc_window *)tdm_hwc_window_create_internal(private_output, 1, error);
1594 /* LCOV_EXCL_START */
1595 TDM_ERR("output(%p) not support HWC", private_output);
1597 *error = TDM_ERROR_BAD_REQUEST;
1598 /* LCOV_EXCL_STOP */
1601 _pthread_mutex_unlock(&private_display->lock);
1607 tdm_output_hwc_destroy_window(tdm_output *output, tdm_hwc_window *hwc_window)
1609 OUTPUT_FUNC_ENTRY();
1611 TDM_RETURN_VAL_IF_FAIL(hwc_window != NULL, TDM_ERROR_INVALID_PARAMETER);
1613 _pthread_mutex_lock(&private_display->lock);
1615 ret = tdm_hwc_window_destroy_internal(hwc_window);
1617 _pthread_mutex_unlock(&private_display->lock);
1623 tdm_output_hwc_validate(tdm_output *output, tdm_hwc_window **composited_wnds,
1624 uint32_t num_wnds, uint32_t *num_types)
1626 tdm_private_module *private_module;
1627 tdm_func_output *func_output = NULL;
1628 tdm_private_hwc_window **composited_wnds_frontend = NULL;
1629 tdm_hwc_window **composited_wnds_backend = NULL;
1632 OUTPUT_FUNC_ENTRY();
1634 TDM_RETURN_VAL_IF_FAIL(num_types != NULL, TDM_ERROR_INVALID_PARAMETER);
1636 _pthread_mutex_lock(&private_display->lock);
1638 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1639 TDM_ERR("output(%p) not support HWC", private_output);
1640 _pthread_mutex_unlock(&private_display->lock);
1641 return TDM_ERROR_BAD_REQUEST;
1644 private_module = private_output->private_module;
1645 func_output = &private_module->func_output;
1647 if (!func_output->output_hwc_validate) {
1648 /* LCOV_EXCL_START */
1649 _pthread_mutex_unlock(&private_display->lock);
1650 TDM_WRN("not implemented!!");
1651 return TDM_ERROR_NOT_IMPLEMENTED;
1652 /* LCOV_EXCL_STOP */
1655 if (num_wnds == 0) {
1656 ret = func_output->output_hwc_validate(private_output->output_backend, NULL, 0, num_types);
1658 _pthread_mutex_unlock(&private_display->lock);
1663 composited_wnds_backend = calloc(num_wnds, sizeof(tdm_hwc_window *));
1664 if (!composited_wnds_backend) {
1665 /* LCOV_EXCL_START */
1666 _pthread_mutex_unlock(&private_display->lock);
1667 return TDM_ERROR_OUT_OF_MEMORY;
1668 /* LCOV_EXCL_STOP */
1671 composited_wnds_frontend = (tdm_private_hwc_window **)composited_wnds;
1673 for (i = 0; i < num_wnds; i++)
1674 composited_wnds_backend[i] = composited_wnds_frontend[i]->hwc_window_backend;
1676 ret = func_output->output_hwc_validate(private_output->output_backend, composited_wnds_backend, num_wnds, num_types);
1678 free(composited_wnds_backend);
1680 _pthread_mutex_unlock(&private_display->lock);
1686 tdm_output_hwc_set_need_validate_handler(tdm_output *output,
1687 tdm_output_need_validate_handler hndl)
1689 OUTPUT_FUNC_ENTRY();
1691 TDM_RETURN_VAL_IF_FAIL(hndl != NULL, TDM_ERROR_INVALID_PARAMETER);
1693 _pthread_mutex_lock(&private_display->lock);
1695 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1696 TDM_ERR("output(%p) not support HWC", private_output);
1697 _pthread_mutex_unlock(&private_display->lock);
1698 return TDM_ERROR_BAD_REQUEST;
1701 /* there's no reason to allow this */
1702 if (private_output->need_validate.hndl) {
1704 _pthread_mutex_unlock(&private_display->lock);
1705 return TDM_ERROR_OPERATION_FAILED;
1708 private_output->need_validate.hndl = hndl;
1710 _pthread_mutex_unlock(&private_display->lock);
1716 tdm_output_hwc_get_changed_composition_types(tdm_output *output,
1717 uint32_t *num_elements,
1718 tdm_hwc_window **hwc_window,
1719 tdm_hwc_window_composition *composition_types)
1721 tdm_private_module *private_module;
1722 tdm_func_output *func_output = NULL;
1723 tdm_private_hwc_window * private_hwc_window = NULL;
1726 OUTPUT_FUNC_ENTRY();
1728 TDM_RETURN_VAL_IF_FAIL(num_elements != NULL, TDM_ERROR_INVALID_PARAMETER);
1730 _pthread_mutex_lock(&private_display->lock);
1732 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1733 TDM_ERR("output(%p) not support HWC", private_output);
1734 _pthread_mutex_unlock(&private_display->lock);
1735 return TDM_ERROR_BAD_REQUEST;
1738 private_module = private_output->private_module;
1739 func_output = &private_module->func_output;
1741 if (!func_output->output_hwc_get_changed_composition_types) {
1742 /* LCOV_EXCL_START */
1743 _pthread_mutex_unlock(&private_display->lock);
1744 TDM_WRN("not implemented!!");
1745 return TDM_ERROR_NOT_IMPLEMENTED;
1746 /* LCOV_EXCL_STOP */
1749 ret = func_output->output_hwc_get_changed_composition_types(private_output->output_backend,
1750 num_elements, hwc_window, composition_types);
1751 if (ret != TDM_ERROR_NONE) {
1752 /* LCOV_EXCL_START */
1753 _pthread_mutex_unlock(&private_display->lock);
1755 /* LCOV_EXCL_STOP */
1758 if (hwc_window == NULL || composition_types == NULL) {
1759 _pthread_mutex_unlock(&private_display->lock);
1760 return TDM_ERROR_NONE;
1763 for (i = 0; i < *num_elements; i++) {
1765 private_hwc_window = _tdm_output_find_private_hwc_window(private_output, hwc_window[i]);
1767 if (private_hwc_window == NULL) {
1768 /* LCOV_EXCL_START */
1769 TDM_ERR("failed! This should never happen!");
1770 func_output->output_hwc_destroy_window(private_output->output_backend, hwc_window[i]);
1772 _pthread_mutex_unlock(&private_display->lock);
1773 return TDM_ERROR_OPERATION_FAILED;
1774 /* LCOV_EXCL_STOP */
1777 hwc_window[i] = (tdm_hwc_window*)private_hwc_window;
1780 _pthread_mutex_unlock(&private_display->lock);
1786 tdm_output_hwc_accept_changes(tdm_output *output)
1788 tdm_private_module *private_module;
1789 tdm_func_output *func_output = NULL;
1791 OUTPUT_FUNC_ENTRY();
1793 _pthread_mutex_lock(&private_display->lock);
1795 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1796 TDM_ERR("output(%p) not support HWC", private_output);
1797 _pthread_mutex_unlock(&private_display->lock);
1798 return TDM_ERROR_BAD_REQUEST;
1801 private_module = private_output->private_module;
1802 func_output = &private_module->func_output;
1804 if (!func_output->output_hwc_validate) {
1805 /* LCOV_EXCL_START */
1806 _pthread_mutex_unlock(&private_display->lock);
1807 TDM_WRN("not implemented!!");
1808 return TDM_ERROR_NOT_IMPLEMENTED;
1809 /* LCOV_EXCL_STOP */
1812 ret = func_output->output_hwc_accept_changes(private_output->output_backend);
1814 _pthread_mutex_unlock(&private_display->lock);
1820 tdm_output_hwc_get_target_buffer_queue(tdm_output *output, tdm_error *error)
1822 tdm_private_module *private_module;
1823 tdm_func_output *func_output = NULL;
1824 tbm_surface_queue_h queue = NULL;
1826 OUTPUT_FUNC_ENTRY_ERROR();
1828 _pthread_mutex_lock(&private_display->lock);
1830 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1831 TDM_ERR("output(%p) not support HWC", private_output);
1833 *error = TDM_ERROR_BAD_REQUEST;
1834 _pthread_mutex_unlock(&private_display->lock);
1838 private_module = private_output->private_module;
1839 func_output = &private_module->func_output;
1841 if (!func_output->output_hwc_get_target_buffer_queue) {
1842 /* LCOV_EXCL_START */
1843 _pthread_mutex_unlock(&private_display->lock);
1844 TDM_WRN("not implemented!!");
1846 /* LCOV_EXCL_STOP */
1849 queue = func_output->output_hwc_get_target_buffer_queue(private_output->output_backend, error);
1851 _pthread_mutex_unlock(&private_display->lock);
1857 tdm_output_hwc_set_client_target_buffer(tdm_output *output, tbm_surface_h target_buffer, tdm_hwc_region damage)
1859 tdm_private_module *private_module;
1860 tdm_func_output *func_output = NULL;
1862 OUTPUT_FUNC_ENTRY();
1864 _pthread_mutex_lock(&private_display->lock);
1866 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1867 TDM_ERR("output(%p) not support HWC", private_output);
1868 _pthread_mutex_unlock(&private_display->lock);
1869 return TDM_ERROR_BAD_REQUEST;
1872 if (tdm_debug_dump & TDM_DUMP_FLAG_WINDOW) {
1873 /* LCOV_EXCL_START */
1874 char str[TDM_PATH_LEN];
1876 snprintf(str, TDM_PATH_LEN, "target_window_%d_%03d",
1877 private_output->index, i++);
1878 tdm_helper_dump_buffer_str(target_buffer, tdm_debug_dump_dir, str);
1879 /* LCOV_EXCL_STOP */
1882 private_module = private_output->private_module;
1883 func_output = &private_module->func_output;
1885 if (!func_output->output_hwc_set_client_target_buffer) {
1886 /* LCOV_EXCL_START */
1887 _pthread_mutex_unlock(&private_display->lock);
1888 TDM_WRN("not implemented!!");
1889 return TDM_ERROR_NOT_IMPLEMENTED;
1890 /* LCOV_EXCL_STOP */
1893 ret = func_output->output_hwc_set_client_target_buffer(private_output->output_backend, target_buffer, damage);
1895 _pthread_mutex_unlock(&private_display->lock);
1901 tdm_output_hwc_unset_client_target_buffer(tdm_output *output)
1903 tdm_private_module *private_module;
1904 tdm_func_output *func_output = NULL;
1906 OUTPUT_FUNC_ENTRY();
1908 _pthread_mutex_lock(&private_display->lock);
1910 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1911 TDM_ERR("output(%p) not support HWC", private_output);
1912 _pthread_mutex_unlock(&private_display->lock);
1913 return TDM_ERROR_BAD_REQUEST;
1916 private_module = private_output->private_module;
1917 func_output = &private_module->func_output;
1919 if (!func_output->output_hwc_unset_client_target_buffer) {
1920 /* LCOV_EXCL_START */
1921 _pthread_mutex_unlock(&private_display->lock);
1922 TDM_ERR("not implemented!!");
1923 return TDM_ERROR_NOT_IMPLEMENTED;
1924 /* LCOV_EXCL_STOP */
1927 ret = func_output->output_hwc_unset_client_target_buffer(private_output->output_backend);
1929 _pthread_mutex_unlock(&private_display->lock);
1935 _tdm_output_hwc_layer_commit_handler(tdm_layer *layer, unsigned int sequence,
1936 unsigned int tv_sec, unsigned int tv_usec,
1939 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler = (tdm_private_output_hwc_target_buffer_commit_handler *)user_data;
1940 tdm_output_hwc_target_buffer_commit_handler func = output_hwc_target_buffer_commit_handler->func;
1941 tdm_output *output = (tdm_output *)output_hwc_target_buffer_commit_handler->private_output;
1942 void *data = output_hwc_target_buffer_commit_handler->user_data;
1944 func(output, sequence, tv_sec, tv_usec, data);
1946 free(output_hwc_target_buffer_commit_handler);
1950 tdm_output_hwc_commit_client_target_buffer(tdm_output *output, tdm_output_hwc_target_buffer_commit_handler func, void *user_data)
1952 tdm_private_module *private_module;
1953 tdm_func_output *func_output;
1954 tdm_private_output_hwc_target_buffer_commit_handler *output_hwc_target_buffer_commit_handler;
1955 tdm_layer *layer = NULL;
1956 tdm_private_layer *private_layer;
1957 const tdm_output_mode *mode;
1958 tbm_surface_h buffer;
1960 OUTPUT_FUNC_ENTRY();
1962 _pthread_mutex_lock(&private_display->lock);
1964 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
1965 TDM_ERR("output(%p) not support HWC", private_output);
1966 _pthread_mutex_unlock(&private_display->lock);
1967 return TDM_ERROR_BAD_REQUEST;
1970 private_module = private_output->private_module;
1971 func_output = &private_module->func_output;
1973 if (!func_output->output_hwc_get_client_target_buffer_layer) {
1974 /* LCOV_EXCL_START */
1975 _pthread_mutex_unlock(&private_display->lock);
1976 TDM_ERR("not implemented!!");
1977 return TDM_ERROR_NOT_IMPLEMENTED;
1978 /* LCOV_EXCL_STOP */
1981 layer = func_output->output_hwc_get_client_target_buffer_layer(private_output->output_backend,
1984 /* LCOV_EXCL_START */
1985 _pthread_mutex_unlock(&private_display->lock);
1986 TDM_ERR("no assigned layer!!");
1987 return TDM_ERROR_INVALID_PARAMETER;
1988 /* LCOV_EXCL_STOP */
1991 private_layer = (tdm_private_layer*)layer;
1993 if (!func_output->output_hwc_get_client_target_buffer) {
1994 /* LCOV_EXCL_START */
1995 _pthread_mutex_unlock(&private_display->lock);
1996 TDM_ERR("not implemented!!");
1997 return TDM_ERROR_NOT_IMPLEMENTED;
1998 /* LCOV_EXCL_STOP */
2001 buffer = func_output->output_hwc_get_client_target_buffer(private_output->output_backend,
2004 ret = tdm_layer_set_buffer_internal(private_layer, buffer);
2006 ret = tdm_layer_unset_buffer_internal(private_layer);
2007 if (ret != TDM_ERROR_NONE) {
2008 /* LCOV_EXCL_START */
2009 TDM_ERR("failed: layer set info(window)");
2010 _pthread_mutex_unlock(&private_display->lock);
2011 /* LCOV_EXCL_STOP */
2015 if (private_output->need_set_target_info) {
2016 mode = private_output->current_mode;
2017 private_output->target_buffer_info.src_config.size.h = mode->hdisplay;
2018 private_output->target_buffer_info.src_config.size.v = mode->vdisplay;
2019 private_output->target_buffer_info.src_config.pos.x = 0;
2020 private_output->target_buffer_info.src_config.pos.y = 0;
2021 private_output->target_buffer_info.src_config.pos.w = mode->hdisplay;
2022 private_output->target_buffer_info.src_config.pos.h = mode->vdisplay;
2023 private_output->target_buffer_info.dst_pos.x = 0;
2024 private_output->target_buffer_info.dst_pos.y = 0;
2025 private_output->target_buffer_info.dst_pos.w = mode->hdisplay;
2026 private_output->target_buffer_info.dst_pos.h = mode->vdisplay;
2027 private_output->target_buffer_info.transform = TDM_TRANSFORM_NORMAL;
2029 ret = tdm_layer_set_info_internal(private_layer, &private_output->target_buffer_info);
2030 if (ret != TDM_ERROR_NONE) {
2031 /* LCOV_EXCL_START */
2032 TDM_ERR("failed: layer set info(window)");
2033 _pthread_mutex_unlock(&private_display->lock);
2034 /* LCOV_EXCL_STOP */
2038 private_output->need_set_target_info = 0;
2041 output_hwc_target_buffer_commit_handler = calloc(1, sizeof(tdm_private_output_hwc_target_buffer_commit_handler));
2042 if (!output_hwc_target_buffer_commit_handler) {
2043 /* LCOV_EXCL_START */
2044 TDM_ERR("failed: alloc memory");
2045 _pthread_mutex_unlock(&private_display->lock);
2046 return TDM_ERROR_OUT_OF_MEMORY;
2047 /* LCOV_EXCL_STOP */
2050 output_hwc_target_buffer_commit_handler->private_output = private_output;
2051 output_hwc_target_buffer_commit_handler->func = func;
2052 output_hwc_target_buffer_commit_handler->user_data = user_data;
2054 ret = tdm_layer_commit_internal(private_layer, _tdm_output_hwc_layer_commit_handler, output_hwc_target_buffer_commit_handler);
2055 if (ret != TDM_ERROR_NONE) {
2056 /* LCOV_EXCL_START */
2057 TDM_ERR("failed: commit layer(target buffer)");
2058 free(output_hwc_target_buffer_commit_handler);
2059 _pthread_mutex_unlock(&private_display->lock);
2060 /* LCOV_EXCL_STOP */
2064 _pthread_mutex_unlock(&private_display->lock);
2070 tdm_output_hwc_get_video_supported_formats(tdm_output *output, const tbm_format **formats,
2073 tdm_private_module *private_module;
2074 tdm_func_output *func_output;
2075 OUTPUT_FUNC_ENTRY();
2077 TDM_RETURN_VAL_IF_FAIL(formats != NULL, TDM_ERROR_INVALID_PARAMETER);
2078 TDM_RETURN_VAL_IF_FAIL(count != NULL, TDM_ERROR_INVALID_PARAMETER);
2080 _pthread_mutex_lock(&private_display->lock);
2082 private_module = private_output->private_module;
2083 func_output = &private_module->func_output;
2085 if (!func_output->output_hwc_get_video_supported_formats) {
2086 /* LCOV_EXCL_START */
2087 _pthread_mutex_unlock(&private_display->lock);
2088 TDM_WRN("not implemented!!");
2089 return TDM_ERROR_NOT_IMPLEMENTED;
2090 /* LCOV_EXCL_STOP */
2093 ret = func_output->output_hwc_get_video_supported_formats(
2094 private_output->output_backend, formats, count);
2096 _pthread_mutex_unlock(&private_display->lock);
2102 _is_hwc_output_still_existed(tdm_private_output *private_output)
2104 tdm_private_module *private_module = private_output->private_module;
2105 tdm_private_output *o = NULL;
2107 LIST_FOR_EACH_ENTRY(o, &private_module->output_list, link) {
2108 if (!(o->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC))
2111 if (o == private_output)
2121 /* gets called on behalf of the ecore-main-loop thread */
2123 tdm_output_need_validate_handler_thread(tdm_private_display *private_display, void *object, tdm_thread_cb_base *cb_base, void *user_data)
2125 tdm_private_output *private_output = object;
2127 TDM_RETURN_IF_FAIL(private_output != NULL);
2129 _pthread_mutex_lock(&private_display->lock);
2131 /* as we get 'private_output' within an event, an output this 'private_output'
2132 * points to can be destroyed already */
2133 if (!_is_hwc_output_still_existed(private_output)) {
2134 _pthread_mutex_unlock(&private_display->lock);
2138 _pthread_mutex_unlock(&private_display->lock);
2140 TDM_INFO("tdm-backend asks for revalidation for the output:%p.", private_output);
2142 if (private_output->need_validate.hndl)
2143 private_output->need_validate.hndl((tdm_output*)private_output);
2146 /* gets called on behalf of the tdm-thread */
2148 _need_validate_handler(int fd, tdm_event_loop_mask mask, void *user_data)
2150 tdm_thread_cb_need_validate ev;
2151 tdm_private_output *private_output;
2155 private_output = (tdm_private_output *)user_data;
2157 if (read(private_output->need_validate.event_fd, &value, sizeof(value)) < 0) {
2158 TDM_ERR("error while trying to read from a need_validate.event_fd fd.");
2159 return TDM_ERROR_OPERATION_FAILED;
2162 memset(&ev, 0, sizeof ev);
2163 ev.base.type = TDM_THREAD_CB_NEED_VALIDATE;
2164 ev.base.length = sizeof ev;
2165 ev.base.object_stamp = private_output->stamp;
2166 ev.base.data = NULL;
2169 ret = tdm_thread_cb_call(private_output, &ev.base);
2170 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2172 TDM_INFO("tdm-thread: get a 'need to revalidate' event for the ouptut:%p.", private_output);
2174 /* who cares about this? */
2175 return TDM_ERROR_NONE;
2179 tdm_output_need_validate_event_init(tdm_output *output)
2183 OUTPUT_FUNC_ENTRY();
2185 TDM_RETURN_VAL_IF_FAIL(TDM_MUTEX_IS_LOCKED(), TDM_ERROR_OPERATION_FAILED);
2187 if (!(private_output->caps.capabilities & TDM_OUTPUT_CAPABILITY_HWC)) {
2188 TDM_ERR("output(%p) not support HWC", private_output);
2189 return TDM_ERROR_BAD_REQUEST;
2192 /* build in eventfd fds into event_loop listened & handled by the tdm-thread */
2194 TDM_WARNING_IF_FAIL(fd >= 0);
2196 private_output->need_validate.event_source = tdm_event_loop_add_fd_handler(private_display,
2197 fd, TDM_EVENT_LOOP_READABLE, _need_validate_handler, private_output, &ret);
2198 TDM_WARNING_IF_FAIL(ret == TDM_ERROR_NONE);
2200 private_output->need_validate.event_fd = fd;
2202 TDM_INFO("register an output:%p for the revalidation, event_fd:%d.", private_output, fd);
2208 tdm_output_choose_commit_per_vblank_mode(tdm_private_output *private_output, int mode)
2210 if (!private_output)
2211 return TDM_ERROR_INVALID_PARAMETER;
2213 if (mode < 0 || mode > 2)
2214 return TDM_ERROR_INVALID_PARAMETER;
2216 private_output->commit_per_vblank = mode;
2218 if (private_output->commit_per_vblank == 0)
2219 TDM_INFO("commit per vblank: disable");
2220 else if (private_output->commit_per_vblank == 1)
2221 TDM_INFO("commit per vblank: enable (1 layer)");
2222 else if (private_output->commit_per_vblank == 2)
2223 TDM_INFO("commit per vblank: enable (previous commit)");
2225 return TDM_ERROR_NONE;