shutup stupid emotion debug!
authorbarbieri <barbieri>
Thu, 7 Oct 2010 22:16:43 +0000 (22:16 +0000)
committerbarbieri <barbieri@7cbeb6ba-43b4-40fd-8cce-4c39aea84d33>
Thu, 7 Oct 2010 22:16:43 +0000 (22:16 +0000)
to get them back, use EINA_LOG_LEVELS for each module or the catch-all:

   export EINA_LOG_LEVELS_GLOB='emotion*:4'

ah, that was making me sick while playing enjoy... :-D

git-svn-id: http://svn.enlightenment.org/svn/e/trunk/emotion@53170 7cbeb6ba-43b4-40fd-8cce-4c39aea84d33

src/lib/emotion_smart.c
src/modules/gstreamer/emotion_gstreamer.c
src/modules/gstreamer/emotion_gstreamer.h
src/modules/gstreamer/emotion_gstreamer_pipeline.c
src/modules/gstreamer/emotion_gstreamer_pipeline_cdda.c
src/modules/gstreamer/emotion_gstreamer_pipeline_dvd.c
src/modules/xine/emotion_xine.c
src/modules/xine/emotion_xine.h
src/modules/xine/emotion_xine_vo_out.c

index 34b6329..0526bb1 100644 (file)
       if (strcmp(_e_smart_str, type)) return ret; \
    }
 
+#define DBG(...) EINA_LOG_DOM_DBG(_log_domain, __VA_ARGS__)
+#define INF(...) EINA_LOG_DOM_INFO(_log_domain, __VA_ARGS__)
+#define WRN(...) EINA_LOG_DOM_WARN(_log_domain, __VA_ARGS__)
+#define ERR(...) EINA_LOG_DOM_ERR(_log_domain, __VA_ARGS__)
+#define CRITICAL(...) EINA_LOG_DOM_CRIT(_log_domain, __VA_ARGS__)
+
 #define E_OBJ_NAME "emotion_object"
 
 typedef struct _Smart_Data Smart_Data;
@@ -88,6 +94,7 @@ static void _smart_clip_unset(Evas_Object * obj);
 static Evas_Smart  *smart = NULL;
 static Eina_Hash *_backends = NULL;
 static Eina_Array *_modules = NULL;
+static int _log_domain = -1;
 
 static const char *_backend_priority[] = {
   "xine",
@@ -160,7 +167,7 @@ _emotion_module_open(const char *name, Evas_Object *obj, Emotion_Video_Module **
    E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
    if (!_backends)
      {
-       fprintf(stderr, "No backend loaded\n");
+        ERR("No backend loaded");
        return NULL;
      }
 
@@ -175,7 +182,7 @@ _emotion_module_open(const char *name, Evas_Object *obj, Emotion_Video_Module **
        if (index != 0 && index < (sizeof (_backend_priority) / sizeof (char*)))
          goto retry;
 
-       fprintf(stderr, "No backend loaded\n");
+       ERR("No backend loaded");
        return EINA_FALSE;
      }
 
@@ -191,7 +198,7 @@ _emotion_module_open(const char *name, Evas_Object *obj, Emotion_Video_Module **
    if (index != 0 && index < (sizeof (_backend_priority) / sizeof (char*)))
      goto retry;
 
-   fprintf (stderr, "Unable to load module %s\n", name);
+   ERR("Unable to load module: %s", name);
 
    return NULL;
 }
@@ -298,6 +305,7 @@ emotion_object_file_set(Evas_Object *obj, const char *file)
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
 
+   DBG("file=%s", file);
    if (!sd->module) return;
 
    if ((file) && (sd->file) && (!strcmp(file, sd->file))) return;
@@ -344,6 +352,7 @@ emotion_object_play_set(Evas_Object *obj, Eina_Bool play)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("play=%hhu, was=%hhu", play, sd->play);
    if (play == sd->play) return;
    if (!sd->module) return;
    if (!sd->video) return;
@@ -369,6 +378,7 @@ emotion_object_position_set(Evas_Object *obj, double sec)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("sec=%f", sec);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->seek_pos = sec;
@@ -513,6 +523,7 @@ emotion_object_audio_volume_set(Evas_Object *obj, double vol)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("vol=%f", vol);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->module->audio_channel_volume_set(sd->video, vol);
@@ -535,6 +546,7 @@ emotion_object_audio_mute_set(Evas_Object *obj, Eina_Bool mute)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("mute=%hhu", mute);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->module->audio_channel_mute_set(sd->video, mute);
@@ -579,6 +591,7 @@ emotion_object_audio_channel_set(Evas_Object *obj, int channel)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("channel=%d", channel);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->module->audio_channel_set(sd->video, channel);
@@ -601,6 +614,7 @@ emotion_object_video_mute_set(Evas_Object *obj, Eina_Bool mute)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("mute=%hhu", mute);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->module->video_channel_mute_set(sd->video, mute);
@@ -645,6 +659,7 @@ emotion_object_video_channel_set(Evas_Object *obj, int channel)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("channel=%d", channel);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->module->video_channel_set(sd->video, channel);
@@ -667,6 +682,7 @@ emotion_object_spu_mute_set(Evas_Object *obj, Eina_Bool mute)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("mute=%hhu", mute);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->module->spu_channel_mute_set(sd->video, mute);
@@ -711,6 +727,7 @@ emotion_object_spu_channel_set(Evas_Object *obj, int channel)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("channel=%d", channel);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->module->spu_channel_set(sd->video, channel);
@@ -744,6 +761,7 @@ emotion_object_chapter_set(Evas_Object *obj, int chapter)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("chapter=%d", chapter);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->module->chapter_set(sd->video, chapter);
@@ -777,6 +795,7 @@ emotion_object_play_speed_set(Evas_Object *obj, double speed)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("speed=%f", speed);
    if (!sd->module) return;
    if (!sd->video) return;
    sd->module->speed_set(sd->video, speed);
@@ -910,6 +929,7 @@ emotion_object_vis_set(Evas_Object *obj, Emotion_Vis visualization)
    Smart_Data *sd;
 
    E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+   DBG("visualization=%d", visualization);
    if (!sd->module) return;
    if (!sd->video) return;
    if (!sd->module->vis_set) return;
@@ -1315,6 +1335,14 @@ _smart_init(void)
      {
        eina_init();
 
+        _log_domain = eina_log_domain_register("emotion", EINA_COLOR_LIGHTCYAN);
+        if (_log_domain < 0)
+          {
+             EINA_LOG_CRIT("Could not register log domain 'emotion'");
+             eina_shutdown();
+             return;
+          }
+
        _backends = eina_hash_string_small_new(free);
 
        _modules = eina_module_list_get(NULL, PACKAGE_LIB_DIR "/emotion/", 0, NULL, NULL);
@@ -1333,8 +1361,8 @@ _smart_init(void)
 
        if (!_modules)
          {
-            fprintf(stderr, "No module found !\n");
-            return ;
+            ERR("No module found!");
+            return;
          }
 
        eina_module_list_load(_modules);
index 7670f49..435e1ba 100644 (file)
@@ -8,6 +8,8 @@
 #include "emotion_gstreamer_pipeline.h"
 #include "Emotion.h"
 
+int _emotion_gstreamer_log_domain = -1;
+
 /* Callbacks to get the eos */
 static Eina_Bool  _eos_timer_fct   (void *data);
 static void _em_buffer_read(void *data, void *buffer, unsigned int nbyte);
@@ -345,10 +347,10 @@ em_file_open(const char   *file,
             device = NULL;
             sscanf(file, "cdda://%d", &track);
          }
-       fprintf(stderr, "[Emotion] [gst] build CD Audio pipeline\n");
+       DBG("Build CD Audio pipeline");
        if (!(emotion_pipeline_cdda_build(ev, device, track)))
          {
-            fprintf(stderr, "[Emotion] [gst] error while building CD Audio pipeline\n");
+            ERR("Could not build CD Audio pipeline");
             gst_object_unref(ev->pipeline);
             return 0;
          }
@@ -357,10 +359,10 @@ em_file_open(const char   *file,
    else if (strstr(file, "dvd://"))
      {
 
-       fprintf(stderr, "[Emotion] [gst] build DVD pipeline\n");
+       DBG("Build DVD pipeline");
        if (!(emotion_pipeline_dvd_build(ev, NULL)))
          {
-            fprintf(stderr, "[Emotion] [gst] error while building DVD pipeline\n");
+             ERR("Could not build DVD pipeline");
             gst_object_unref(ev->pipeline);
             return 0;
          }
@@ -368,10 +370,10 @@ em_file_open(const char   *file,
    /* http */
    else if (strstr(file, "http://"))
      {
-       fprintf(stderr, "[Emotion] [gst] build URI pipeline\n");
+       DBG("Build URI pipeline");
        if (!(emotion_pipeline_uri_build(ev, file)))
          {
-            fprintf(stderr, "[Emotion] [gst] error while building URI pipeline\n");
+            ERR("Could not build URI pipeline");
             gst_object_unref(ev->pipeline);
             return 0;
          }
@@ -379,10 +381,10 @@ em_file_open(const char   *file,
    /* v4l */
    else if (strstr(file, "v4l://"))
      {
-       fprintf(stderr, "[Emotion] [gst] build V4L pipeline\n");
+       DBG("Build V4L pipeline");
        if (!(emotion_pipeline_v4l_build(ev, file)))
          {
-            fprintf(stderr, "[Emotion] [gst] error while building V4L pipeline\n");
+            ERR("Could not build V4L pipeline");
             gst_object_unref(ev->pipeline);
             return 0;
          }
@@ -396,10 +398,10 @@ em_file_open(const char   *file,
                   ? file + strlen("file://")
                   : file;
 
-       fprintf(stderr, "[Emotion] [gst] build file pipeline\n");
+       DBG("Build file pipeline");
        if (!(emotion_pipeline_file_build(ev, filename)))
          {
-            fprintf(stderr, "[Emotion] [gst] error while building File pipeline\n");
+            ERR("Could not build File pipeline");
             gst_object_unref(ev->pipeline);
             return 0;
          }
@@ -415,22 +417,19 @@ em_file_open(const char   *file,
        vsink = (Emotion_Video_Sink *)eina_list_data_get(ev->video_sinks);
        if (vsink)
          {
-            fprintf(stderr, "video : \n");
-            fprintf(stderr, "  size   : %dx%d\n", vsink->width, vsink->height);
-            fprintf(stderr, "  fps    : %d/%d\n", vsink->fps_num, vsink->fps_den);
-            fprintf(stderr, "  fourcc : %" GST_FOURCC_FORMAT "\n", GST_FOURCC_ARGS(vsink->fourcc));
-            fprintf(stderr, "  length : %" GST_TIME_FORMAT "\n\n",
-                    GST_TIME_ARGS((guint64)(vsink->length_time * GST_SECOND)));
+             DBG("video size=%dx%d, fps=%d/%d, "
+                 "fourcc=%"GST_FOURCC_FORMAT", length=%"GST_TIME_FORMAT,
+                 vsink->width, vsink->height, vsink->fps_num, vsink->fps_den,
+                 GST_FOURCC_ARGS(vsink->fourcc),
+                 GST_TIME_ARGS((guint64)(vsink->length_time * GST_SECOND)));
          }
 
        asink = (Emotion_Audio_Sink *)eina_list_data_get(ev->audio_sinks);
        if (asink)
          {
-            fprintf(stderr, "audio : \n");
-            fprintf(stderr, "  chan   : %d\n", asink->channels);
-            fprintf(stderr, "  rate   : %d\n", asink->samplerate);
-            fprintf(stderr, "  length : %" GST_TIME_FORMAT "\n\n",
-                    GST_TIME_ARGS((guint64)(asink->length_time * GST_SECOND)));
+             DBG("audio channels=%d, rate=%d, length=%"GST_TIME_FORMAT,
+                 asink->channels, asink->samplerate,
+                 GST_TIME_ARGS((guint64)(asink->length_time * GST_SECOND)));
          }
      }
 
@@ -598,7 +597,7 @@ em_len_get(void *video)
 
    if (fmt != GST_FORMAT_TIME)
      {
-       fprintf(stderr, "requrested duration in time, but got %s instead.",
+       DBG("requrested duration in time, but got %s instead.",
                gst_format_get_name(fmt));
        goto fallback;
      }
@@ -681,8 +680,8 @@ em_pos_get(void *video)
 
    if (fmt != GST_FORMAT_TIME)
      {
-       fprintf(stderr, "requrested position in time, but got %s instead.",
-               gst_format_get_name(fmt));
+        ERR("requrested position in time, but got %s instead.",
+            gst_format_get_name(fmt));
        return ev->position;
      }
 
@@ -1199,6 +1198,19 @@ module_open(Evas_Object           *obj,
    if (!module)
      return EINA_FALSE;
 
+   if (_emotion_gstreamer_log_domain < 0)
+     {
+        eina_threads_init();
+        eina_log_threads_enable();
+        _emotion_gstreamer_log_domain = eina_log_domain_register
+          ("emotion-gstreamer", EINA_COLOR_LIGHTCYAN);
+        if (_emotion_gstreamer_log_domain < 0)
+          {
+             EINA_LOG_CRIT("Could not register log domain 'emotion-gstreamer'");
+             return EINA_FALSE;
+          }
+     }
+
    if (!em_module.init(obj, video, opt))
      return EINA_FALSE;
 
@@ -1404,7 +1416,7 @@ _eos_timer_fct(void *data)
                   gst_message_parse_error(msg, &err, &debug);
                   g_free(debug);
 
-                  g_print("Error: %s\n", err->message);
+                  ERR("Error: %s", err->message);
                   g_error_free(err);
 
                   break;
index fa59353..4892525 100644 (file)
@@ -92,4 +92,11 @@ struct _Emotion_Gstreamer_Video
    unsigned char     audio_mute : 1;
 };
 
+extern int _emotion_gstreamer_log_domain;
+#define DBG(...) EINA_LOG_DOM_DBG(_emotion_gstreamer_log_domain, __VA_ARGS__)
+#define INF(...) EINA_LOG_DOM_INFO(_emotion_gstreamer_log_domain, __VA_ARGS__)
+#define WRN(...) EINA_LOG_DOM_WARN(_emotion_gstreamer_log_domain, __VA_ARGS__)
+#define ERR(...) EINA_LOG_DOM_ERR(_emotion_gstreamer_log_domain, __VA_ARGS__)
+#define CRITICAL(...) EINA_LOG_DOM_CRIT(_emotion_gstreamer_log_domain, __VA_ARGS__)
+
 #endif /* __EMOTION_GSTREAMER_H__ */
index 3411172..c71bd11 100644 (file)
@@ -14,14 +14,14 @@ emotion_pipeline_pause(GstElement *pipeline)
    res = gst_element_set_state((pipeline), GST_STATE_PAUSED);
    if (res == GST_STATE_CHANGE_FAILURE)
      {
-       g_print("Emotion-Gstreamer ERROR: could not pause\n");
+       ERR("could not pause");
        return 0;
      }
 
    res = gst_element_get_state((pipeline), NULL, NULL, GST_CLOCK_TIME_NONE);
    if (res != GST_STATE_CHANGE_SUCCESS)
      {
-       g_print("Emotion-Gstreamer ERROR: could not complete pause\n");
+       ERR("could not complete pause");
        return 0;
      }
 
@@ -487,7 +487,7 @@ emotion_streams_sinks_get(Emotion_Gstreamer_Video *ev, GstElement *decoder)
 
        caps = gst_pad_get_caps(pad);
        str = gst_caps_to_string(caps);
-       g_print("caps !! %s\n", str);
+       DBG("caps %s", str);
 
        /* video stream */
        if (g_str_has_prefix(str, "video/"))
index e0bec27..3bd9db1 100644 (file)
@@ -21,7 +21,7 @@ emotion_pipeline_cdda_build(void *video, const char * device, unsigned int track
    cdiocddasrc = gst_element_factory_make("cdiocddasrc", "src");
    if (!cdiocddasrc)
      {
-       g_print("cdiocddasrc element missing. Install it.\n");
+       ERR("cdiocddasrc gstreamer element missing. Install it.");
        goto failure_cdiocddasrc;
      }
 
index 738b93c..8f5f5a0 100644 (file)
@@ -9,7 +9,7 @@ static void dvd_pad_added_cb    (GstElement *dvddemuxer,
 static void dvd_no_more_pads_cb (GstElement *dvddemuxer,
                                  gpointer    user_data);
 
-static int no_more_pads = 0;
+static volatile int no_more_pads = 0;
 
 
 int
@@ -47,7 +47,7 @@ emotion_pipeline_dvd_build(void *video, const char *device)
 
    while (no_more_pads == 0)
      {
-       g_print("toto\n");
+       DBG("toto");
      }
    no_more_pads = 0;
 
@@ -70,7 +70,7 @@ emotion_pipeline_dvd_build(void *video, const char *device)
 
             caps = gst_pad_get_caps(pad);
             str = gst_caps_to_string(caps);
-            g_print("caps !! %s\n", str);
+            DBG("caps %s", str);
             /* video stream */
             if (g_str_has_prefix(str, "video/mpeg"))
               {
@@ -83,7 +83,7 @@ emotion_pipeline_dvd_build(void *video, const char *device)
                  sink_pad = gst_element_get_pad(gst_bin_get_by_name(GST_BIN(ev->pipeline), "mpeg2dec"), "src");
                  sink_caps = gst_pad_get_caps(sink_pad);
                  str = gst_caps_to_string(sink_caps);
-                 g_print(" ** caps v !! %s\n", str);
+                 DBG("caps video %s", str);
 
                  emotion_video_sink_fill(vsink, sink_pad, sink_caps);
 
index 0afbde7..874270f 100644 (file)
@@ -2,6 +2,8 @@
 #include "emotion_private.h"
 #include "emotion_xine.h"
 
+int _emotion_xine_log_domain = -1;
+
 /* module api */
 static unsigned char  em_init                    (Evas_Object *obj, void **emotion_video, Emotion_Module_Options *opt);
 static int            em_shutdown                (void *ef);
@@ -112,11 +114,11 @@ _em_slave(void *par)
                                 xine_config_update_entry(ev->decoder, &cf);
                              }
                         }
-                      printf("OPEN VIDEO PLUGIN...\n");
+                      DBG("OPEN VIDEO PLUGIN...");
                       if (!ev->opt_no_video)
                         ev->video = xine_open_video_driver(ev->decoder, "emotion",
                                                            XINE_VISUAL_TYPE_NONE, ev);
-                      printf("RESULT: xine_open_video_driver() = %p\n", ev->video);
+                      DBG("RESULT: xine_open_video_driver() = %p", ev->video);
                       // Let xine autodetect the best audio output driver
                       if (!ev->opt_no_audio)
                         ev->audio = xine_open_audio_driver(ev->decoder, NULL, ev);
@@ -136,31 +138,31 @@ _em_slave(void *par)
                case 3: /* shutdown */
                    {
                       _em_module_event(ev, 3);
-                      printf("EX shutdown stop\n");
+                      DBG("shutdown stop");
                       xine_stop(ev->stream);
                       //   pthread_mutex_lock(&(ev->get_pos_len_mutex));
                       if (!ev->get_pos_thread_deleted)
                         {
-                           printf("closing get_pos thread, %p\n", ev);
+                           DBG("closing get_pos thread, %p", ev);
                            pthread_mutex_lock(&(ev->get_pos_len_mutex));
                            pthread_cond_broadcast(&(ev->get_pos_len_cond));
                            pthread_mutex_unlock(&(ev->get_pos_len_mutex));
                            while (ev->get_poslen);
                         }
-                      printf("EX dispose %p\n", ev);
+                      DBG("dispose %p", ev);
                       xine_dispose(ev->stream);
-                      printf("EX dispose evq %p\n", ev);
+                      DBG("dispose evq %p", ev);
                       xine_event_dispose_queue(ev->queue);
-                      printf("EX close video drv %p\n", ev);
+                      DBG("close video drv %p", ev);
                       if (ev->video) xine_close_video_driver(ev->decoder, ev->video);
-                      printf("EX wait for vo to go\n");
+                      DBG("wait for vo to go");
                       while (ev->have_vo);
-                      printf("EX vo gone\n");
-                      printf("EX close audio drv %p\n", ev);
+                      DBG("vo gone");
+                      DBG("close audio drv %p", ev);
                       if (ev->audio) xine_close_audio_driver(ev->decoder, ev->audio);
-                      printf("EX xine exit %p\n", ev);
+                      DBG("xine exit %p", ev);
                       xine_exit(ev->decoder);
-                      printf("EX DONE %p\n", ev);
+                      DBG("DONE %p", ev);
                       close(ev->fd_write);
                       close(ev->fd_read);
                       close(ev->fd_ev_write);
@@ -183,7 +185,7 @@ _em_slave(void *par)
                       char *file;
                       
                       file = eev->xine_event;
-                      printf("OPN STREAM %s\n", file);
+                      DBG("OPEN STREAM %s", file);
                       if (xine_open(ev->stream, file))
                         {
                            if (xine_get_pos_length(ev->stream, &pos_stream, &pos_time, &length_time))
@@ -222,13 +224,13 @@ _em_slave(void *par)
                  break;
                case 11: /* file close */
                    {
-                      printf("EX done %p\n", ev);
+                      DBG("done %p", ev);
                       em_frame_done(ev); 
-                      printf("EX stop %p\n", ev);
+                      DBG("stop %p", ev);
                       xine_stop(ev->stream);
-                      printf("EX close %p\n", ev);
+                      DBG("close %p", ev);
                       xine_close(ev->stream);
-                      printf("EX close done %p\n", ev);
+                      DBG("close done %p", ev);
                       _em_module_event(ev, 11);
                    }
                  break;
@@ -428,7 +430,7 @@ em_shutdown(void *ef)
    ev = (Emotion_Xine_Video *)ef;
    ev->closing = 1;
    ev->delete_me = 1;
-   printf("EXM del fds %p\n", ev);
+   DBG("del fds %p", ev);
    ecore_main_fd_handler_del(ev->fd_handler);
    ev->fd_handler = NULL;
    ecore_main_fd_handler_del(ev->fd_ev_handler);
@@ -441,7 +443,7 @@ em_shutdown(void *ef)
    
    ev->closing = 1;
    _em_slave_event(ev, 3, NULL);
-   printf("EXM done %p\n", ev);
+   DBG("done %p", ev);
    return 1;
 }
 
@@ -1367,7 +1369,7 @@ _em_fd_ev_active(void *data, Ecore_Fd_Handler *fdh)
                       break;
                     case XINE_EVENT_UI_MESSAGE:
                         {
-                           printf("EV: UI Message [FIXME: break this out to emotion api]\n");
+                           WRN("UI Message [FIXME: break this out to emotion api]");
                            // e->type = error type(XINE_MSG_NO_ERROR, XINE_MSG_GENERAL_WARNING, XINE_MSG_UNKNOWN_HOST etc.)
                            // e->messages is a list of messages DOUBLE null terminated
                         }
@@ -1375,7 +1377,7 @@ _em_fd_ev_active(void *data, Ecore_Fd_Handler *fdh)
                     case XINE_EVENT_AUDIO_LEVEL:
                         {
                            _emotion_audio_level_change(ev->obj);
-                           printf("EV: Audio Level [FIXME: break this out to emotion api]\n");
+                           WRN("Audio Level [FIXME: break this out to emotion api]");
                            // e->left (0->100) 
                            // e->right
                            // e->mute
@@ -1386,7 +1388,7 @@ _em_fd_ev_active(void *data, Ecore_Fd_Handler *fdh)
                            xine_progress_data_t *e;
                            
                            e = (xine_progress_data_t *)eev->xine_event;
-                           printf("PROGRESS: %i\n", e->percent);
+                           DBG("PROGRESS: %i", e->percent);
                            _emotion_progress_set(ev->obj, (char *)e->description, (double)e->percent / 100.0);
                         }
                       break;
@@ -1422,13 +1424,13 @@ _em_fd_ev_active(void *data, Ecore_Fd_Handler *fdh)
                            xine_dropped_frames_t *e;
                            
                            e = (xine_dropped_frames_t *)eev->xine_event;
-                           printf("EV: Dropped Frames (skipped %i) (discarded %i) [FIXME: break this out to the emotion api]\n", e->skipped_frames, e->discarded_frames);
+                           WRN("Dropped Frames (skipped %i) (discarded %i) [FIXME: break this out to the emotion api]", e->skipped_frames, e->discarded_frames);
                            // e->skipped_frames = % frames skipped * 10
                            // e->discarded_frames = % frames skipped * 10
                         }
                       break;
                     default:
-                      // printf("EV: unknown event type %i\n", eev->type);
+                      // DBG("unknown event type %i", eev->type);
                       break;
                    }
               }
@@ -1474,7 +1476,7 @@ _em_get_pos_len_th(void *par)
               }
             ev->get_poslen = 0;
              _em_module_event(ev, 15); /* event - getpos done */
-            //printf("get pos %3.3f\n", ev->pos);
+            //DBG("get pos %3.3f", ev->pos);
          }
        if (ev->delete_me)
          {
@@ -1563,6 +1565,19 @@ module_open(Evas_Object *obj, const Emotion_Video_Module **module, void **video,
    if (!module)
       return EINA_FALSE;
 
+   if (_emotion_xine_log_domain < 0)
+     {
+        eina_threads_init();
+        eina_log_threads_enable();
+        _emotion_xine_log_domain = eina_log_domain_register
+          ("emotion-xine", EINA_COLOR_LIGHTCYAN);
+        if (_emotion_xine_log_domain < 0)
+          {
+             EINA_LOG_CRIT("Could not register log domain 'emotion-xine'");
+             return EINA_FALSE;
+          }
+     }
+
    if (!em_module.init(obj, video, opt))
       return EINA_FALSE;
 
@@ -1637,48 +1652,46 @@ em_debug(Emotion_Xine_Video *ev)
    spu_channel = xine_get_param(ev->stream, XINE_PARAM_SPU_CHANNEL);
    video_ratio = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_RATIO);
    audio_mode = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_AUDIO_MODE);
-   printf("has_chapters = %i\n", has_chapters);
-   printf("max_spu = %i\n", max_spu);
-   printf("max_audio = %i\n", max_audio);
-   printf("video_channels = %i\n", video_channels);
-   printf("video_streams = %i\n", video_streams);
-   printf("video_seekable = %i\n", video_seekable);
-   printf("title = %s\n", title);
-   printf("comment = %s\n", comment);
-   printf("artist = %s\n", artist);
-   printf("genre = %s\n", genre);
-   printf("album = %s\n", album);
-   printf("year = %s\n", year);
-   printf("cdindex_discid = %s\n", cdindex_discid);
-   printf("video_channel = %i\n", video_channel);
-   printf("audio_channel = %i\n", audio_channel);
-   printf("spu_channels = %i\n", spu_channel);
-   printf("video_ratio = %i\n", video_ratio);
-   printf("audio_mode = %i\n", audio_mode);
+   DBG("has_chapters = %i", has_chapters);
+   DBG("max_spu = %i", max_spu);
+   DBG("max_audio = %i", max_audio);
+   DBG("video_channels = %i", video_channels);
+   DBG("video_streams = %i", video_streams);
+   DBG("video_seekable = %i", video_seekable);
+   DBG("title = %s", title);
+   DBG("comment = %s", comment);
+   DBG("artist = %s", artist);
+   DBG("genre = %s", genre);
+   DBG("album = %s", album);
+   DBG("year = %s", year);
+   DBG("cdindex_discid = %s", cdindex_discid);
+   DBG("video_channel = %i", video_channel);
+   DBG("audio_channel = %i", audio_channel);
+   DBG("spu_channels = %i", spu_channel);
+   DBG("video_ratio = %i", video_ratio);
+   DBG("audio_mode = %i", audio_mode);
      {
        int i;
        
        for (i = 0; i <= max_audio; i++)
          {
             char lang[XINE_LANG_MAX + 1];
-            
+             char buf[128] = "NONE";
+
             lang[0] = 0;
-            printf("  AUDIO %i = ", i);
             if (xine_get_audio_lang(ev->stream, i, lang))
-              printf("%s\n", lang);
-            else
-              printf("NONE\n");
+              eina_strlcpy(buf, lang, sizeof(buf));
+            DBG("  AUDIO %i = %s", i, buf);
          }
        for (i = 0; i <= max_spu; i++)
          {
             char lang[XINE_LANG_MAX + 1];
-            
+             char buf[128] = "NONE";
+
             lang[0] = 0;
-            printf("  SPU %i = ", i);
             if (xine_get_spu_lang(ev->stream, i, lang))
-              printf("%s\n", lang);
-            else
-              printf("NONE\n");
+               eina_strlcpy(buf, lang, sizeof(buf));
+            DBG("  SPU %i = %s", i, buf);
          }
      }
 }
index fa6c082..97aed72 100644 (file)
@@ -87,4 +87,11 @@ struct _Emotion_Xine_Event
    int   mtype;
 };
 
+extern int _emotion_xine_log_domain;
+#define DBG(...) EINA_LOG_DOM_DBG(_emotion_xine_log_domain, __VA_ARGS__)
+#define INF(...) EINA_LOG_DOM_INFO(_emotion_xine_log_domain, __VA_ARGS__)
+#define WRN(...) EINA_LOG_DOM_WARN(_emotion_xine_log_domain, __VA_ARGS__)
+#define ERR(...) EINA_LOG_DOM_ERR(_emotion_xine_log_domain, __VA_ARGS__)
+#define CRITICAL(...) EINA_LOG_DOM_CRIT(_emotion_xine_log_domain, __VA_ARGS__)
+
 #endif
index f8741bd..6ecb947 100644 (file)
@@ -115,7 +115,7 @@ _emotion_class_init(xine_t *xine, void *visual __UNUSED__)
 {
    Emotion_Class *cl;
    
-//   printf("emotion: _emotion_class_init()\n");
+//   DBG("");
    cl = (Emotion_Class *) malloc(sizeof(Emotion_Class));
    if (!cl) return NULL;
    cl->driver_class.open_plugin     = _emotion_open;
@@ -158,7 +158,7 @@ _emotion_open(video_driver_class_t *driver_class, const void *visual)
    
    cl = (Emotion_Class *)driver_class;
    /* visual here is the data ptr passed to xine_open_video_driver() */
-//   printf("emotion: _emotion_open()\n");
+//   DBG("");
    dv = (Emotion_Driver *)malloc(sizeof(Emotion_Driver));
    if (!dv) return NULL;
    
@@ -180,7 +180,7 @@ _emotion_open(video_driver_class_t *driver_class, const void *visual)
    dv->vo_driver.redraw_needed        = _emotion_redraw;
    dv->ev                             = (Emotion_Xine_Video *)visual;
    dv->ev->have_vo = 1;
-   printf("emotion: _emotion_open = %p\n", &dv->vo_driver);
+   DBG("vo_driver = %p", &dv->vo_driver);
    return &dv->vo_driver;
 }    
 
@@ -191,7 +191,7 @@ _emotion_dispose(vo_driver_t *vo_driver)
    
    dv = (Emotion_Driver *)vo_driver;
    dv->ev->have_vo = 0;
-   printf("emotion: _emotion_dispose(%p)\n", dv);
+   DBG("vo_driver = %p", dv);
    free(dv);
 }
 
@@ -199,7 +199,7 @@ _emotion_dispose(vo_driver_t *vo_driver)
 static int
 _emotion_redraw(vo_driver_t *vo_driver __UNUSED__)
 {
-//   printf("emotion: _emotion_redraw()\n");
+//   DBG("");
    return 0;
 }
 
@@ -207,7 +207,7 @@ _emotion_redraw(vo_driver_t *vo_driver __UNUSED__)
 static uint32_t
 _emotion_capabilities_get(vo_driver_t *vo_driver __UNUSED__)
 {
-//   printf("emotion: _emotion_capabilities_get()\n");
+//   DBG("");
    return VO_CAP_YV12 | VO_CAP_YUY2;
 }
 
@@ -215,7 +215,7 @@ _emotion_capabilities_get(vo_driver_t *vo_driver __UNUSED__)
 static int
 _emotion_gui_data_exchange(vo_driver_t *vo_driver __UNUSED__, int data_type, void *data __UNUSED__)
 {
-//   printf("emotion: _emotion_gui_data_exchange()\n");
+//   DBG("");
    switch (data_type)
      {
       case XINE_GUI_SEND_COMPLETION_EVENT:
@@ -243,13 +243,13 @@ _emotion_property_set(vo_driver_t *vo_driver, int property, int value)
    Emotion_Driver *dv;
    
    dv = (Emotion_Driver *)vo_driver;
-//   printf("emotion: _emotion_property_set()\n");
+//   DBG("");
    switch (property)
      {
       case VO_PROP_ASPECT_RATIO:
        if (value >= XINE_VO_ASPECT_NUM_RATIOS)
          value = XINE_VO_ASPECT_AUTO;
-//     printf("DRIVER RATIO SET %i!\n", value);
+//     DBG("DRIVER RATIO SET %i!", value);
        dv->ratio = value;
        break;
       default:
@@ -264,7 +264,7 @@ _emotion_property_get(vo_driver_t *vo_driver, int property)
    Emotion_Driver *dv;
    
    dv = (Emotion_Driver *)vo_driver;
-//   printf("emotion: _emotion_property_get()\n");
+//   DBG("");
    switch (property)
      {
       case VO_PROP_ASPECT_RATIO:
@@ -279,7 +279,7 @@ _emotion_property_get(vo_driver_t *vo_driver, int property)
 static void
 _emotion_property_min_max_get(vo_driver_t *vo_driver __UNUSED__, int property __UNUSED__, int *min, int *max)
 {
-//   printf("emotion: _emotion_property_min_max_get()\n");
+//   DBG("");
    *min = 0;
    *max = 0;
 }
@@ -290,7 +290,7 @@ _emotion_frame_alloc(vo_driver_t *vo_driver __UNUSED__)
 {
    Emotion_Frame *fr;
    
-//   printf("emotion: _emotion_frame_alloc()\n");
+//   DBG("");
    fr = (Emotion_Frame *)calloc(1, sizeof(Emotion_Frame));
    if (!fr) return NULL;
    
@@ -313,7 +313,7 @@ _emotion_frame_dispose(vo_frame_t *vo_frame)
    Emotion_Frame *fr;
    
    fr = (Emotion_Frame *)vo_frame;
-//   printf("emotion: _emotion_frame_dispose()\n");
+//   DBG("");
    _emotion_frame_data_free(fr);  
    free(fr);
 }
@@ -330,7 +330,7 @@ _emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint3
    if ((fr->width != width) ||  (fr->height != height) || 
        (fr->format != format) || (!fr->vo_frame.base[0]))
      {
-//     printf("emotion: _emotion_frame_format_update()\n");
+//   DBG("");
        _emotion_frame_data_free(fr);
        
        fr->width  = width;
@@ -417,8 +417,7 @@ _emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
    
    dv = (Emotion_Driver *)vo_driver;
    fr = (Emotion_Frame *)vo_frame;
-//   printf("emotion: _emotion_frame_display()\n");
-//   printf("EX VO: fq %i %p\n", dv->ev->fq, dv->ev);
+//   DBG("fq %i %p", dv->ev->fq, dv->ev);
 // if my frame queue is too deep ( > 4 frames) simply block and wait for them
 // to drain
 //   while (dv->ev->fq > 4) usleep(1);
@@ -436,9 +435,9 @@ _emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
        fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
        fr->frame.done_func = _emotion_frame_data_unlock;
        fr->frame.done_data = fr;
-//     printf("FRAME FOR %p\n", dv->ev);
+//     DBG("FRAME FOR %p", dv->ev);
        write(dv->ev->fd_write, &buf, sizeof(void *));
-//     printf("-- FRAME DEC %p == %i\n", fr->frame.obj, ret);
+//     DBG("-- FRAME DEC %p == %i", fr->frame.obj, ret);
        fr->in_use = 1;
        dv->ev->fq++;
      }
@@ -449,7 +448,7 @@ _emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
 static void
 _emotion_frame_field(vo_frame_t *vo_frame __UNUSED__, int which_field __UNUSED__)
 {
-//   printf("emotion: _emotion_frame_field()\n");
+//   DBG("");
 }
 
 /***************************************************************************/
@@ -476,7 +475,7 @@ _emotion_frame_data_free(Emotion_Frame *fr)
 static void
 _emotion_frame_data_unlock(Emotion_Frame *fr)
 {
-//   printf("emotion: _emotion_frame_data_unlock()\n");
+//   DBG("");
    if (fr->in_use)
      {
        fr->vo_frame.free(&fr->vo_frame);
@@ -488,13 +487,13 @@ _emotion_frame_data_unlock(Emotion_Frame *fr)
 static void
 _emotion_overlay_begin(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame __UNUSED__, int changed __UNUSED__)
 {
-//   printf("emotion: _emotion_overlay_begin()\n");
+//   DBG("");
 }
 
 static void
 _emotion_overlay_end(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame __UNUSED__)
 {
-//   printf("emotion: _emotion_overlay_end()\n");
+//   DBG("");
 }
 
 static void
@@ -503,7 +502,7 @@ _emotion_overlay_blend(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame,
    Emotion_Frame *fr;
    
    fr = (Emotion_Frame *)vo_frame;
-//   printf("emotion: _emotion_overlay_blend()\n");
+//   DBG("");
    _emotion_overlay_blend_yuv(fr->vo_frame.base, vo_overlay,
                              fr->width, fr->height, 
                              fr->vo_frame.pitches);