* Copyright (C) 2008 Ole André Vadla Ravnås <ole.andre.ravnas@tandberg.com>
* Copyright (C) 2013 Collabora Ltd.
* Author: Sebastian Dröge <sebastian.droege@collabora.co.uk>
+ * Copyright (C) 2018 Centricular Ltd.
+ * Author: Nirbheek Chauhan <nirbheek@centricular.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
#include "gstwasapisink.h"
+#include <mmdeviceapi.h>
+
GST_DEBUG_CATEGORY_STATIC (gst_wasapi_sink_debug);
#define GST_CAT_DEFAULT gst_wasapi_sink_debug
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS ("audio/x-raw, "
- "format = (string) S16LE, "
+ "format = (string) " GST_AUDIO_FORMATS_ALL ", "
"layout = (string) interleaved, "
- "rate = (int) 44100, " "channels = (int) 2"));
+ "rate = " GST_AUDIO_RATE_RANGE ", channels = (int) [1, 2]"));
+
+#define DEFAULT_ROLE GST_WASAPI_DEVICE_ROLE_CONSOLE
+#define DEFAULT_MUTE FALSE
+
+enum
+{
+ PROP_0,
+ PROP_ROLE,
+ PROP_MUTE,
+ PROP_DEVICE
+};
static void gst_wasapi_sink_dispose (GObject * object);
static void gst_wasapi_sink_finalize (GObject * object);
+static void gst_wasapi_sink_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec);
+static void gst_wasapi_sink_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec);
static GstCaps *gst_wasapi_sink_get_caps (GstBaseSink * bsink,
GstCaps * filter);
+
static gboolean gst_wasapi_sink_prepare (GstAudioSink * asink,
GstAudioRingBufferSpec * spec);
static gboolean gst_wasapi_sink_unprepare (GstAudioSink * asink);
static guint gst_wasapi_sink_delay (GstAudioSink * asink);
static void gst_wasapi_sink_reset (GstAudioSink * asink);
+#define gst_wasapi_sink_parent_class parent_class
G_DEFINE_TYPE (GstWasapiSink, gst_wasapi_sink, GST_TYPE_AUDIO_SINK);
static void
gobject_class->dispose = gst_wasapi_sink_dispose;
gobject_class->finalize = gst_wasapi_sink_finalize;
+ gobject_class->set_property = gst_wasapi_sink_set_property;
+ gobject_class->get_property = gst_wasapi_sink_get_property;
+
+ g_object_class_install_property (gobject_class,
+ PROP_ROLE,
+ g_param_spec_enum ("role", "Role",
+ "Role of the device: communications, multimedia, etc",
+ GST_WASAPI_DEVICE_TYPE_ROLE, DEFAULT_ROLE, G_PARAM_READWRITE |
+ G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class,
+ PROP_MUTE,
+ g_param_spec_boolean ("mute", "Mute", "Mute state of this stream",
+ DEFAULT_MUTE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_PLAYING));
+
+ g_object_class_install_property (gobject_class,
+ PROP_DEVICE,
+ g_param_spec_string ("device", "Device",
+ "WASAPI playback device as a GUID string",
+ NULL, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
gst_element_class_add_static_pad_template (gstelement_class, &sink_template);
gst_element_class_set_static_metadata (gstelement_class, "WasapiSrc",
G_OBJECT_CLASS (gst_wasapi_sink_parent_class)->finalize (object);
}
+static void
+gst_wasapi_sink_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec)
+{
+ GstWasapiSink *self = GST_WASAPI_SINK (object);
+
+ switch (prop_id) {
+ case PROP_ROLE:
+ self->role = gst_wasapi_device_role_to_erole (g_value_get_enum (value));
+ break;
+ case PROP_MUTE:
+ self->mute = g_value_get_boolean (value);
+ break;
+ case PROP_DEVICE:
+ {
+ gchar *device = g_value_get_string (value);
+ g_free (self->device);
+ self->device =
+ device ? g_utf8_to_utf16 (device, 0, NULL, NULL, NULL) : NULL;
+ break;
+ }
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_wasapi_sink_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec)
+{
+ GstWasapiSink *self = GST_WASAPI_SINK (object);
+
+ switch (prop_id) {
+ case PROP_ROLE:
+ g_value_set_enum (value, gst_wasapi_erole_to_device_role (self->role));
+ break;
+ case PROP_MUTE:
+ g_value_set_boolean (value, self->mute);
+ break;
+ case PROP_DEVICE:
+ g_value_take_string (value, self->device ?
+ g_utf16_to_utf8 (self->device, 0, NULL, NULL, NULL) : NULL);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
static GstCaps *
gst_wasapi_sink_get_caps (GstBaseSink * bsink, GstCaps * filter)
{
- /* FIXME: Implement */
- return NULL;
+ GstWasapiSink *self = GST_WASAPI_SINK (bsink);
+ WAVEFORMATEX *format = NULL;
+ GstCaps *caps = NULL;
+ HRESULT hr;
+
+ GST_DEBUG_OBJECT (self, "entering get caps");
+
+ if (self->cached_caps) {
+ caps = gst_caps_ref (self->cached_caps);
+ } else {
+ GstCaps *template_caps;
+
+ template_caps = gst_pad_get_pad_template_caps (bsink->sinkpad);
+
+ if (!self->client)
+ gst_wasapi_sink_open (GST_AUDIO_SINK (bsink));
+
+ hr = IAudioClient_GetMixFormat (self->client, &format);
+ if (hr != S_OK || format == NULL) {
+ GST_ELEMENT_ERROR (self, STREAM, FORMAT, (NULL),
+ ("GetMixFormat failed: %s", gst_wasapi_util_hresult_to_string (hr)));
+ goto out;
+ }
+
+ caps =
+ gst_wasapi_util_waveformatex_to_caps ((WAVEFORMATEXTENSIBLE *) format,
+ template_caps);
+ if (caps == NULL) {
+ GST_ELEMENT_ERROR (self, STREAM, FORMAT, (NULL), ("unknown format"));
+ goto out;
+ }
+
+ self->mix_format = format;
+ gst_caps_replace (&self->cached_caps, caps);
+ gst_caps_unref (template_caps);
+ }
+
+ if (filter) {
+ GstCaps *filtered =
+ gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (caps);
+ caps = filtered;
+ }
+
+ GST_DEBUG_OBJECT (self, "returning caps %" GST_PTR_FORMAT, caps);
+
+out:
+ return caps;
}
static gboolean
gboolean res = FALSE;
IAudioClient *client = NULL;
- if (!gst_wasapi_util_get_default_device_client (GST_ELEMENT (self), FALSE,
- &client)) {
- GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, (NULL),
- ("Failed to get default device"));
+ GST_DEBUG_OBJECT (self, "opening device");
+
+ if (self->client)
+ return TRUE;
+
+ if (!gst_wasapi_util_get_device_client (GST_ELEMENT (self), FALSE,
+ self->role, self->device, &client)) {
+ if (!self->device)
+ GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, (NULL),
+ ("Failed to get default device"));
+ else
+ GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, (NULL),
+ ("Failed to open device %S", self->device));
goto beach;
}
GstWasapiSink *self = GST_WASAPI_SINK (asink);
gboolean res = FALSE;
HRESULT hr;
- REFERENCE_TIME latency_rt, def_period, min_period;
- WAVEFORMATEXTENSIBLE format;
+ REFERENCE_TIME latency_rt;
IAudioRenderClient *render_client = NULL;
- hr = IAudioClient_GetDevicePeriod (self->client, &def_period, &min_period);
- if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioClient::GetDevicePeriod () failed");
- goto beach;
- }
-
- gst_wasapi_util_audio_info_to_waveformatex (&spec->info, &format);
- self->info = spec->info;
-
hr = IAudioClient_Initialize (self->client, AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
- spec->buffer_time / 100, 0, (WAVEFORMATEX *) & format, NULL);
+ spec->buffer_time * 10, 0, self->mix_format, NULL);
if (hr != S_OK) {
GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, (NULL),
("IAudioClient::Initialize () failed: %s",
goto beach;
}
+ /* Get latency for logging */
hr = IAudioClient_GetStreamLatency (self->client, &latency_rt);
if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioClient::GetStreamLatency () failed");
+ GST_ERROR_OBJECT (self, "IAudioClient::GetStreamLatency failed");
goto beach;
}
+ GST_INFO_OBJECT (self, "wasapi stream latency: %" G_GINT64_FORMAT " (%"
+ G_GINT64_FORMAT "ms)", latency_rt, latency_rt / 10000);
- GST_INFO_OBJECT (self, "default period: %d (%d ms), "
- "minimum period: %d (%d ms), "
- "latency: %d (%d ms)",
- (guint32) def_period, (guint32) def_period / 10000,
- (guint32) min_period, (guint32) min_period / 10000,
- (guint32) latency_rt, (guint32) latency_rt / 10000);
-
- /* FIXME: What to do with the latency? */
-
+ /* Set the event handler which will trigger writes */
hr = IAudioClient_SetEventHandle (self->client, self->event_handle);
if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioClient::SetEventHandle () failed");
+ GST_ERROR_OBJECT (self, "IAudioClient::SetEventHandle failed");
+ goto beach;
+ }
+
+ /* Total size of the allocated buffer that we will write to
+ * XXX: Will this ever change while playing? */
+ hr = IAudioClient_GetBufferSize (self->client, &self->buffer_frame_count);
+ if (hr != S_OK) {
+ GST_ERROR_OBJECT (self, "IAudioClient::GetBufferSize failed");
goto beach;
}
+ GST_INFO_OBJECT (self, "frame count is %i, blockAlign is %i, "
+ "buffer_time is %" G_GINT64_FORMAT, self->buffer_frame_count,
+ self->mix_format->nBlockAlign, spec->buffer_time);
+ /* Get render sink client and start it up */
if (!gst_wasapi_util_get_render_client (GST_ELEMENT (self), self->client,
&render_client)) {
goto beach;
}
+ GST_INFO_OBJECT (self, "got render client");
+
hr = IAudioClient_Start (self->client);
if (hr != S_OK) {
GST_ERROR_OBJECT (self, "IAudioClient::Start failed");
GstWasapiSink *self = GST_WASAPI_SINK (asink);
HRESULT hr;
gint16 *dst = NULL;
- guint nsamples;
+ guint pending = length;
- nsamples = length / self->info.bpf;
+ while (pending > 0) {
+ guint have_frames, can_frames, n_frames, n_frames_padding, write_len;
- WaitForSingleObject (self->event_handle, INFINITE);
+ /* We have N frames to be written out */
+ have_frames = pending / (self->mix_format->nBlockAlign);
- hr = IAudioRenderClient_GetBuffer (self->render_client, nsamples,
- (BYTE **) & dst);
- if (hr != S_OK) {
- GST_ELEMENT_ERROR (self, RESOURCE, WRITE, (NULL),
- ("IAudioRenderClient::GetBuffer () failed: %s",
- gst_wasapi_util_hresult_to_string (hr)));
- length = 0;
- goto beach;
- }
+ WaitForSingleObject (self->event_handle, INFINITE);
- memcpy (dst, data, length);
+ /* Frames the card hasn't rendered yet */
+ hr = IAudioClient_GetCurrentPadding (self->client, &n_frames_padding);
+ if (hr != S_OK) {
+ GST_ERROR_OBJECT (self, "IAudioClient::GetCurrentPadding failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
+ length = 0;
+ goto beach;
+ }
- hr = IAudioRenderClient_ReleaseBuffer (self->render_client, nsamples, 0);
- if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioRenderClient::ReleaseBuffer () failed: %s",
- gst_wasapi_util_hresult_to_string (hr));
- length = 0;
- goto beach;
+ /* We can write out these many frames */
+ can_frames = self->buffer_frame_count - n_frames_padding;
+
+ /* We will write out these many frames, and this much length */
+ n_frames = MIN (can_frames, have_frames);
+ write_len = n_frames * self->mix_format->nBlockAlign;
+
+ GST_TRACE_OBJECT (self, "total: %i, unread: %i, have: %i (%i bytes), "
+ "will write: %i (%i bytes)", self->buffer_frame_count, n_frames_padding,
+ have_frames, pending, n_frames, write_len);
+
+ hr = IAudioRenderClient_GetBuffer (self->render_client, n_frames,
+ (BYTE **) & dst);
+ if (hr != S_OK) {
+ GST_ELEMENT_ERROR (self, RESOURCE, WRITE, (NULL),
+ ("IAudioRenderClient::GetBuffer failed: %s",
+ gst_wasapi_util_hresult_to_string (hr)));
+ length = 0;
+ goto beach;
+ }
+
+ memcpy (dst, data, write_len);
+
+ hr = IAudioRenderClient_ReleaseBuffer (self->render_client, n_frames,
+ self->mute ? AUDCLNT_BUFFERFLAGS_SILENT : 0);
+ if (hr != S_OK) {
+ GST_ERROR_OBJECT (self, "IAudioRenderClient::ReleaseBuffer failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
+ length = 0;
+ goto beach;
+ }
+
+ pending -= write_len;
}
beach:
static guint
gst_wasapi_sink_delay (GstAudioSink * asink)
{
- /* FIXME: Implement */
- return 0;
+ GstWasapiSink *self = GST_WASAPI_SINK (asink);
+ guint delay = 0;
+ HRESULT hr;
+
+ hr = IAudioClient_GetCurrentPadding (self->client, &delay);
+ if (hr != S_OK) {
+ GST_ELEMENT_ERROR (self, RESOURCE, READ, (NULL),
+ ("IAudioClient::GetCurrentPadding failed %s",
+ gst_wasapi_util_hresult_to_string (hr)));
+ }
+
+ return delay;
}
static void
#include "gstwasapiutil.h"
G_BEGIN_DECLS
-
#define GST_TYPE_WASAPI_SINK \
(gst_wasapi_sink_get_type ())
#define GST_WASAPI_SINK(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_WASAPI_SINK))
#define GST_IS_WASAPI_SINK_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE ((klass), GST_TYPE_WASAPI_SINK))
-
-typedef struct _GstWasapiSink GstWasapiSink;
+typedef struct _GstWasapiSink GstWasapiSink;
typedef struct _GstWasapiSinkClass GstWasapiSinkClass;
struct _GstWasapiSink
{
GstAudioSink parent;
- GstAudioInfo info;
-
- IAudioClient * client;
- IAudioRenderClient * render_client;
+ IAudioClient *client;
+ IAudioRenderClient *render_client;
HANDLE event_handle;
+
+ /* Actual size of the allocated buffer */
+ guint buffer_frame_count;
+ /* The mix format that wasapi prefers in shared mode */
+ WAVEFORMATEX *mix_format;
+ /* The probed caps that we can accept */
+ GstCaps *cached_caps;
+
+ /* properties */
+ gint role;
+ gboolean mute;
+ wchar_t *device;
};
struct _GstWasapiSinkClass
GType gst_wasapi_sink_get_type (void);
G_END_DECLS
-
#endif /* __GST_WASAPI_SINK_H__ */
-
/*
* Copyright (C) 2008 Ole André Vadla Ravnås <ole.andre.ravnas@tandberg.com>
+ * Copyright (C) 2018 Centricular Ltd.
+ * Author: Nirbheek Chauhan <nirbheek@centricular.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
#include "gstwasapisrc.h"
+#include <mmdeviceapi.h>
+
GST_DEBUG_CATEGORY_STATIC (gst_wasapi_src_debug);
#define GST_CAT_DEFAULT gst_wasapi_src_debug
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS ("audio/x-raw, "
- "format = (string) S16LE, "
+ "format = (string) " GST_AUDIO_FORMATS_ALL ", "
"layout = (string) interleaved, "
- "rate = (int) 44100, " "channels = (int) 1"));
+ "rate = " GST_AUDIO_RATE_RANGE ", channels = (int) [1, 2]"));
+
+#define DEFAULT_ROLE GST_WASAPI_DEVICE_ROLE_CONSOLE
+
+enum
+{
+ PROP_0,
+ PROP_ROLE,
+ PROP_DEVICE
+};
static void gst_wasapi_src_dispose (GObject * object);
static void gst_wasapi_src_finalize (GObject * object);
+static void gst_wasapi_src_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec);
+static void gst_wasapi_src_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec);
static GstCaps *gst_wasapi_src_get_caps (GstBaseSrc * bsrc, GstCaps * filter);
static GstClockTime gst_wasapi_src_get_time (GstClock * clock,
gpointer user_data);
+#define gst_wasapi_src_parent_class parent_class
G_DEFINE_TYPE (GstWasapiSrc, gst_wasapi_src, GST_TYPE_AUDIO_SRC);
static void
gobject_class->dispose = gst_wasapi_src_dispose;
gobject_class->finalize = gst_wasapi_src_finalize;
+ gobject_class->set_property = gst_wasapi_src_set_property;
+ gobject_class->get_property = gst_wasapi_src_get_property;
+
+ g_object_class_install_property (gobject_class,
+ PROP_ROLE,
+ g_param_spec_enum ("role", "Role",
+ "Role of the device: communications, multimedia, etc",
+ GST_WASAPI_DEVICE_TYPE_ROLE, DEFAULT_ROLE, G_PARAM_READWRITE |
+ G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class,
+ PROP_DEVICE,
+ g_param_spec_string ("device", "Device",
+ "WASAPI playback device as a GUID string",
+ NULL, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
gst_element_class_add_static_pad_template (gstelement_class, &src_template);
gst_element_class_set_static_metadata (gstelement_class, "WasapiSrc",
self->event_handle = NULL;
}
- G_OBJECT_CLASS (gst_wasapi_src_parent_class)->dispose (object);
+ G_OBJECT_CLASS (parent_class)->dispose (object);
}
static void
{
CoUninitialize ();
- G_OBJECT_CLASS (gst_wasapi_src_parent_class)->finalize (object);
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+}
+
+static void
+gst_wasapi_src_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec)
+{
+ GstWasapiSrc *self = GST_WASAPI_SRC (object);
+
+ switch (prop_id) {
+ case PROP_ROLE:
+ self->role = gst_wasapi_device_role_to_erole (g_value_get_enum (value));
+ break;
+ case PROP_DEVICE:
+ {
+ gchar *device = g_value_get_string (value);
+ g_free (self->device);
+ self->device =
+ device ? g_utf8_to_utf16 (device, 0, NULL, NULL, NULL) : NULL;
+ break;
+ }
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_wasapi_src_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec)
+{
+ GstWasapiSrc *self = GST_WASAPI_SRC (object);
+
+ switch (prop_id) {
+ case PROP_ROLE:
+ g_value_set_enum (value, gst_wasapi_erole_to_device_role (self->role));
+ break;
+ case PROP_DEVICE:
+ g_value_take_string (value, self->device ?
+ g_utf16_to_utf8 (self->device, 0, NULL, NULL, NULL) : NULL);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
}
static GstCaps *
gst_wasapi_src_get_caps (GstBaseSrc * bsrc, GstCaps * filter)
{
- /* TODO: Implement */
- return NULL;
+ GstWasapiSrc *self = GST_WASAPI_SRC (bsrc);
+ WAVEFORMATEX *format = NULL;
+ GstCaps *caps = NULL;
+ HRESULT hr;
+
+ GST_DEBUG_OBJECT (self, "entering get caps");
+
+ if (self->cached_caps) {
+ caps = gst_caps_ref (self->cached_caps);
+ } else {
+ GstCaps *template_caps;
+
+ template_caps = gst_pad_get_pad_template_caps (bsrc->srcpad);
+
+ if (!self->client)
+ gst_wasapi_src_open (GST_AUDIO_SRC (bsrc));
+
+ hr = IAudioClient_GetMixFormat (self->client, &format);
+ if (hr != S_OK || format == NULL) {
+ GST_ELEMENT_ERROR (self, STREAM, FORMAT, (NULL),
+ ("GetMixFormat failed: %s", gst_wasapi_util_hresult_to_string (hr)));
+ goto out;
+ }
+
+ caps =
+ gst_wasapi_util_waveformatex_to_caps ((WAVEFORMATEXTENSIBLE *) format,
+ template_caps);
+ if (caps == NULL) {
+ GST_ELEMENT_ERROR (self, STREAM, FORMAT, (NULL), ("unknown format"));
+ goto out;
+ }
+
+ self->mix_format = format;
+ gst_caps_replace (&self->cached_caps, caps);
+ gst_caps_unref (template_caps);
+ }
+
+ if (filter) {
+ GstCaps *filtered =
+ gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (caps);
+ caps = filtered;
+ }
+
+ GST_DEBUG_OBJECT (self, "returning caps %" GST_PTR_FORMAT, caps);
+
+out:
+ return caps;
}
static gboolean
gboolean res = FALSE;
IAudioClient *client = NULL;
- if (!gst_wasapi_util_get_default_device_client (GST_ELEMENT (self), TRUE,
- &client)) {
- GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, (NULL),
- ("Failed to get default device"));
+ if (self->client)
+ return TRUE;
+
+ if (!gst_wasapi_util_get_device_client (GST_ELEMENT (self), TRUE,
+ self->role, self->device, &client)) {
+ if (!self->device)
+ GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, (NULL),
+ ("Failed to get default device"));
+ else
+ GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, (NULL),
+ ("Failed to open device %S", self->device));
goto beach;
}
IAudioClock *client_clock = NULL;
guint64 client_clock_freq = 0;
IAudioCaptureClient *capture_client = NULL;
- REFERENCE_TIME latency_rt, def_period, min_period;
- WAVEFORMATEXTENSIBLE format;
+ REFERENCE_TIME latency_rt;
HRESULT hr;
- hr = IAudioClient_GetDevicePeriod (self->client, &def_period, &min_period);
- if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioClient::GetDevicePeriod () failed");
- goto beach;
- }
-
- gst_wasapi_util_audio_info_to_waveformatex (&spec->info, &format);
- self->info = spec->info;
-
hr = IAudioClient_Initialize (self->client, AUDCLNT_SHAREMODE_SHARED,
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK, spec->buffer_time / 100, 0,
- (WAVEFORMATEX *) & format, NULL);
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK, spec->buffer_time * 10, 0,
+ self->mix_format, NULL);
if (hr != S_OK) {
GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, (NULL),
- ("IAudioClient::Initialize () failed: %s",
+ ("IAudioClient::Initialize failed: %s",
gst_wasapi_util_hresult_to_string (hr)));
goto beach;
}
+ /* Get latency for logging */
hr = IAudioClient_GetStreamLatency (self->client, &latency_rt);
if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioClient::GetStreamLatency () failed");
+ GST_ERROR_OBJECT (self, "IAudioClient::GetStreamLatency failed");
goto beach;
}
+ GST_INFO_OBJECT (self, "wasapi stream latency: %" G_GINT64_FORMAT " (%"
+ G_GINT64_FORMAT " ms)", latency_rt, latency_rt / 10000);
- GST_INFO_OBJECT (self, "default period: %d (%d ms), "
- "minimum period: %d (%d ms), "
- "latency: %d (%d ms)",
- (guint32) def_period, (guint32) def_period / 10000,
- (guint32) min_period, (guint32) min_period / 10000,
- (guint32) latency_rt, (guint32) latency_rt / 10000);
-
- /* FIXME: What to do with the latency? */
-
+ /* Set the event handler which will trigger reads */
hr = IAudioClient_SetEventHandle (self->client, self->event_handle);
if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioClient::SetEventHandle () failed");
+ GST_ERROR_OBJECT (self, "IAudioClient::SetEventHandle failed");
goto beach;
}
+ GST_INFO_OBJECT (self, "we got till here");
+
+ /* Get the clock and the clock freq */
if (!gst_wasapi_util_get_clock (GST_ELEMENT (self), self->client,
&client_clock)) {
goto beach;
hr = IAudioClock_GetFrequency (client_clock, &client_clock_freq);
if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioClock::GetFrequency () failed");
+ GST_ERROR_OBJECT (self, "IAudioClock::GetFrequency failed");
goto beach;
}
+ /* Total size of the allocated buffer that we will read from
+ * XXX: Will this ever change while playing? */
+ hr = IAudioClient_GetBufferSize (self->client, &self->buffer_frame_count);
+ if (hr != S_OK) {
+ GST_ERROR_OBJECT (self, "IAudioClient::GetBufferSize failed");
+ goto beach;
+ }
+ GST_INFO_OBJECT (self, "frame count is %i, blockAlign is %i, "
+ "buffer_time is %" G_GINT64_FORMAT, self->buffer_frame_count,
+ self->mix_format->nBlockAlign, spec->buffer_time);
+
+ /* Get capture source client and start it up */
if (!gst_wasapi_util_get_capture_client (GST_ELEMENT (self), self->client,
&capture_client)) {
goto beach;
{
GstWasapiSrc *self = GST_WASAPI_SRC (asrc);
HRESULT hr;
- gint16 *samples = NULL;
- guint32 nsamples = 0, length_samples;
- DWORD flags = 0;
- guint64 devpos;
- guint i;
- gint16 *dst;
+ gint16 *from = NULL;
+ guint wanted = length;
+ DWORD flags;
- WaitForSingleObject (self->event_handle, INFINITE);
+ while (wanted > 0) {
+ guint have_frames, n_frames, want_frames, read_len;
+
+ /* Wait for data to become available */
+ WaitForSingleObject (self->event_handle, INFINITE);
- do {
hr = IAudioCaptureClient_GetBuffer (self->capture_client,
- (BYTE **) & samples, &nsamples, &flags, &devpos, NULL);
- }
- while (hr == AUDCLNT_S_BUFFER_EMPTY);
+ (BYTE **) & from, &have_frames, &flags, NULL, NULL);
+ if (hr != S_OK) {
+ GST_ERROR_OBJECT (self, "IAudioCaptureClient::GetBuffer () failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
+ length = 0;
+ goto beach;
+ }
- if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioCaptureClient::GetBuffer () failed: %s",
- gst_wasapi_util_hresult_to_string (hr));
- length = 0;
- goto beach;
- }
+ if (flags != 0)
+ GST_INFO_OBJECT (self, "buffer flags=%#08x", (guint) flags);
- if (flags != 0) {
- GST_WARNING_OBJECT (self, "devpos %" G_GUINT64_FORMAT ": flags=0x%08x",
- devpos, (guint) flags);
- }
+ /* XXX: How do we handle AUDCLNT_BUFFERFLAGS_SILENT? We're supposed to write
+ * out silence when that flag is set? See:
+ * https://msdn.microsoft.com/en-us/library/windows/desktop/dd370800(v=vs.85).aspx */
- length_samples = length / self->info.bpf;
- nsamples = MIN (length_samples, nsamples);
- length = nsamples * self->info.bpf;
+ if (flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY)
+ GST_WARNING_OBJECT (self, "WASAPI reported glitch in buffer");
- dst = (gint16 *) data;
- for (i = 0; i < nsamples; i++) {
- *dst = *samples;
+ want_frames = wanted / self->mix_format->nBlockAlign;
- samples += 2;
- dst++;
- }
+ /* If GetBuffer is returning more frames than we can handle, all we can do is
+ * hope that this is temporary and that things will settle down later. */
+ if (G_UNLIKELY (have_frames > want_frames))
+ GST_WARNING_OBJECT (self, "captured too many frames: have %i, want %i",
+ have_frames, want_frames);
- hr = IAudioCaptureClient_ReleaseBuffer (self->capture_client, nsamples);
- if (hr != S_OK) {
- GST_ERROR_OBJECT (self, "IAudioCaptureClient::ReleaseBuffer () failed: %s",
- gst_wasapi_util_hresult_to_string (hr));
- goto beach;
+ /* Only copy data that will fit into the allocated buffer of size @length */
+ n_frames = MIN (have_frames, want_frames);
+ read_len = n_frames * self->mix_format->nBlockAlign;
+
+ {
+ guint bpf = self->mix_format->nBlockAlign;
+ GST_TRACE_OBJECT (self, "have: %i (%i bytes), can read: %i (%i bytes), "
+ "will read: %i (%i bytes)", have_frames, have_frames * bpf,
+ want_frames, wanted, n_frames, read_len);
+ }
+
+ memcpy (data, from, read_len);
+ wanted -= read_len;
+
+ /* Always release all captured buffers if we've captured any at all */
+ hr = IAudioCaptureClient_ReleaseBuffer (self->capture_client, have_frames);
+ if (hr != S_OK) {
+ GST_ERROR_OBJECT (self,
+ "IAudioCaptureClient::ReleaseBuffer () failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
+ goto beach;
+ }
}
+
beach:
return length;
static guint
gst_wasapi_src_delay (GstAudioSrc * asrc)
{
- /* FIXME: Implement */
- return 0;
+ GstWasapiSrc *self = GST_WASAPI_SRC (asrc);
+ guint delay = 0;
+ HRESULT hr;
+
+ hr = IAudioClient_GetCurrentPadding (self->client, &delay);
+ if (hr != S_OK) {
+ GST_ELEMENT_ERROR (self, RESOURCE, READ, (NULL),
+ ("IAudioClient::GetCurrentPadding failed %s",
+ gst_wasapi_util_hresult_to_string (hr)));
+ }
+
+ return delay;
}
static void
#include "gstwasapiutil.h"
G_BEGIN_DECLS
-
#define GST_TYPE_WASAPI_SRC \
(gst_wasapi_src_get_type ())
#define GST_WASAPI_SRC(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_WASAPI_SRC))
#define GST_IS_WASAPI_SRC_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE ((klass), GST_TYPE_WASAPI_SRC))
-
-typedef struct _GstWasapiSrc GstWasapiSrc;
+typedef struct _GstWasapiSrc GstWasapiSrc;
typedef struct _GstWasapiSrcClass GstWasapiSrcClass;
struct _GstWasapiSrc
{
GstAudioSrc parent;
- GstAudioInfo info;
-
- IAudioClient * client;
- IAudioClock * client_clock;
+ IAudioClient *client;
+ IAudioClock *client_clock;
guint64 client_clock_freq;
- IAudioCaptureClient * capture_client;
+ IAudioCaptureClient *capture_client;
HANDLE event_handle;
+
+ /* Actual size of the allocated buffer */
+ guint buffer_frame_count;
+ /* The mix format that wasapi prefers in shared mode */
+ WAVEFORMATEX *mix_format;
+ /* The probed caps that we can accept */
+ GstCaps *cached_caps;
+
+ /* properties */
+ gint role;
+ wchar_t *device;
};
struct _GstWasapiSrcClass
GType gst_wasapi_src_get_type (void);
G_END_DECLS
-
#endif /* __GST_WASAPI_SRC_H__ */
-
/*
* Copyright (C) 2008 Ole André Vadla Ravnås <ole.andre.ravnas@tandberg.com>
+ * Copyright (C) 2018 Centricular Ltd.
+ * Author: Nirbheek Chauhan <nirbheek@centricular.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
#include <mmdeviceapi.h>
-/* These seem to be missing in the Windows SDK... */
+#ifdef __uuidof
+const CLSID CLSID_MMDeviceEnumerator = __uuidof (MMDeviceEnumerator);
+const IID IID_IMMDeviceEnumerator = __uuidof (IMMDeviceEnumerator);
+const IID IID_IAudioClient = __uuidof (IAudioClient);
+const IID IID_IAudioRenderClient = __uuidof (IAudioRenderClient);
+const IID IID_IAudioCaptureClient = __uuidof (IAudioCaptureClient);
+const IID IID_IAudioClock = __uuidof (IAudioClock);
+#else
+/* __uuidof is not implemented in our Cerbero's ancient MinGW toolchain so we
+ * hard-code the GUID values for all these. This is ok because these are ABI. */
const CLSID CLSID_MMDeviceEnumerator = { 0xbcde0395, 0xe52f, 0x467c,
{0x8e, 0x3d, 0xc4, 0x57, 0x92, 0x91, 0x69, 0x2e}
};
const IID IID_IAudioRenderClient = { 0xf294acfc, 0x3146, 0x4483,
{0xa7, 0xbf, 0xad, 0xdc, 0xa7, 0xc2, 0x60, 0xe2}
};
+#endif
+
+GType
+gst_wasapi_device_role_get_type (void)
+{
+ static const GEnumValue values[] = {
+ {GST_WASAPI_DEVICE_ROLE_CONSOLE,
+ "Games, system notifications, voice commands", "console"},
+ {GST_WASAPI_DEVICE_ROLE_MULTIMEDIA, "Music, movies, recorded media",
+ "multimedia"},
+ {GST_WASAPI_DEVICE_ROLE_COMMS, "Voice communications", "comms"},
+ {0, NULL, NULL}
+ };
+ static volatile GType id = 0;
+
+ if (g_once_init_enter ((gsize *) & id)) {
+ GType _id;
+
+ _id = g_enum_register_static ("GstWasapiDeviceRole", values);
+
+ g_once_init_leave ((gsize *) & id, _id);
+ }
+
+ return id;
+}
+
+gint
+gst_wasapi_device_role_to_erole (gint role)
+{
+ switch (role) {
+ case GST_WASAPI_DEVICE_ROLE_CONSOLE:
+ return eConsole;
+ case GST_WASAPI_DEVICE_ROLE_MULTIMEDIA:
+ return eMultimedia;
+ case GST_WASAPI_DEVICE_ROLE_COMMS:
+ return eCommunications;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+gint
+gst_wasapi_erole_to_device_role (gint erole)
+{
+ switch (erole) {
+ case eConsole:
+ return GST_WASAPI_DEVICE_ROLE_CONSOLE;
+ case eMultimedia:
+ return GST_WASAPI_DEVICE_ROLE_MULTIMEDIA;
+ case eCommunications:
+ return GST_WASAPI_DEVICE_ROLE_COMMS;
+ default:
+ g_assert_not_reached ();
+ }
+}
const gchar *
gst_wasapi_util_hresult_to_string (HRESULT hr)
{
- const gchar *s = "AUDCLNT_E_UNKNOWN";
+ const gchar *s = "unknown error";
switch (hr) {
case AUDCLNT_E_NOT_INITIALIZED:
case AUDCLNT_E_UNSUPPORTED_FORMAT:
s = "AUDCLNT_E_UNSUPPORTED_FORMAT";
break;
+ case AUDCLNT_E_INVALID_DEVICE_PERIOD:
+ s = "AUDCLNT_E_INVALID_DEVICE_PERIOD";
+ break;
case AUDCLNT_E_INVALID_SIZE:
s = "AUDCLNT_E_INVALID_SIZE";
break;
case AUDCLNT_E_BUFFER_OPERATION_PENDING:
s = "AUDCLNT_E_BUFFER_OPERATION_PENDING";
break;
+ case AUDCLNT_E_BUFFER_SIZE_ERROR:
+ s = "AUDCLNT_E_BUFFER_SIZE_ERROR";
+ break;
+ case AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED:
+ s = "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
+ break;
case AUDCLNT_E_THREAD_NOT_REGISTERED:
s = "AUDCLNT_E_THREAD_NOT_REGISTERED";
break;
case AUDCLNT_E_INCORRECT_BUFFER_SIZE:
s = "AUDCLNT_E_INCORRECT_BUFFER_SIZE";
break;
- case AUDCLNT_E_BUFFER_SIZE_ERROR:
- s = "AUDCLNT_E_BUFFER_SIZE_ERROR";
- break;
case AUDCLNT_E_CPUUSAGE_EXCEEDED:
s = "AUDCLNT_E_CPUUSAGE_EXCEEDED";
break;
case AUDCLNT_S_POSITION_STALLED:
s = "AUDCLNT_S_POSITION_STALLED";
break;
+ case E_INVALIDARG:
+ s = "E_INVALIDARG";
+ break;
}
return s;
}
gboolean
-gst_wasapi_util_get_default_device_client (GstElement * element,
- gboolean capture, IAudioClient ** ret_client)
+gst_wasapi_util_get_device_client (GstElement * element,
+ gboolean capture, gint role, const wchar_t * device_name,
+ IAudioClient ** ret_client)
{
gboolean res = FALSE;
HRESULT hr;
hr = CoCreateInstance (&CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL,
&IID_IMMDeviceEnumerator, (void **) &enumerator);
if (hr != S_OK) {
- GST_ERROR_OBJECT (element, "CoCreateInstance (MMDeviceEnumerator) failed");
+ GST_ERROR ("CoCreateInstance (MMDeviceEnumerator) failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
goto beach;
}
- hr = IMMDeviceEnumerator_GetDefaultAudioEndpoint (enumerator,
- (capture) ? eCapture : eRender, eCommunications, &device);
- if (hr != S_OK) {
- GST_ERROR_OBJECT (element,
- "IMMDeviceEnumerator::GetDefaultAudioEndpoint () failed");
- goto beach;
+ if (!device_name) {
+ hr = IMMDeviceEnumerator_GetDefaultAudioEndpoint (enumerator,
+ capture ? eCapture : eRender, role, &device);
+ if (hr != S_OK) {
+ GST_ERROR ("IMMDeviceEnumerator::GetDefaultAudioEndpoint () failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
+ goto beach;
+ }
+ } else {
+ hr = IMMDeviceEnumerator_GetDevice (enumerator, device_name, &device);
+ if (hr != S_OK) {
+ GST_ERROR ("IMMDeviceEnumerator::GetDevice (\"%S\") failed", device_name);
+ goto beach;
+ }
}
hr = IMMDevice_Activate (device, &IID_IAudioClient, CLSCTX_ALL, NULL,
(void **) &client);
if (hr != S_OK) {
- GST_ERROR_OBJECT (element, "IMMDevice::Activate (IID_IAudioClient) failed");
+ GST_ERROR ("IMMDevice::Activate (IID_IAudioClient) failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
goto beach;
}
hr = IAudioClient_GetService (client, &IID_IAudioRenderClient,
(void **) &render_client);
if (hr != S_OK) {
- GST_ERROR_OBJECT (element, "IAudioClient::GetService "
- "(IID_IAudioRenderClient) failed");
+ GST_ERROR ("IAudioClient::GetService (IID_IAudioRenderClient) failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
goto beach;
}
*ret_render_client = render_client;
+ res = TRUE;
beach:
return res;
hr = IAudioClient_GetService (client, &IID_IAudioCaptureClient,
(void **) &capture_client);
if (hr != S_OK) {
- GST_ERROR_OBJECT (element, "IAudioClient::GetService "
- "(IID_IAudioCaptureClient) failed");
+ GST_ERROR ("IAudioClient::GetService (IID_IAudioCaptureClient) failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
goto beach;
}
*ret_capture_client = capture_client;
+ res = TRUE;
beach:
return res;
HRESULT hr;
IAudioClock *clock = NULL;
- hr = IAudioClient_GetService (client, &IID_IAudioClock,
- (void **) &clock);
+ hr = IAudioClient_GetService (client, &IID_IAudioClock, (void **) &clock);
if (hr != S_OK) {
- GST_ERROR_OBJECT (element, "IAudioClient::GetService "
- "(IID_IAudioClock) failed");
+ GST_ERROR ("IAudioClient::GetService (IID_IAudioClock) failed: %s",
+ gst_wasapi_util_hresult_to_string (hr));
goto beach;
}
*ret_clock = clock;
+ res = TRUE;
beach:
return res;
}
-void
-gst_wasapi_util_audio_info_to_waveformatex (GstAudioInfo * info,
- WAVEFORMATEXTENSIBLE * format)
-{
- memset (format, 0, sizeof (*format));
- format->Format.cbSize = sizeof (*format) - sizeof (format->Format);
- format->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- format->Format.nChannels = info->channels;
- format->Format.nSamplesPerSec = info->rate;
- format->Format.wBitsPerSample = (info->bpf * 8) / format->Format.nChannels;
- format->Format.nBlockAlign = info->bpf;
- format->Format.nAvgBytesPerSec =
- format->Format.nSamplesPerSec * format->Format.nBlockAlign;
- format->Samples.wValidBitsPerSample = info->finfo->depth;
- /* FIXME: Implement something here */
- format->dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;
- format->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
-}
-
-#if 0
-static WAVEFORMATEXTENSIBLE *
-gst_wasapi_src_probe_device_format (GstWasapiSrc * self, IMMDevice * device)
+const gchar *
+gst_waveformatex_to_audio_format (WAVEFORMATEXTENSIBLE * format)
{
- HRESULT hr;
- IPropertyStore *props = NULL;
- PROPVARIANT format_prop;
- WAVEFORMATEXTENSIBLE *format = NULL;
-
- hr = IMMDevice_OpenPropertyStore (device, STGM_READ, &props);
- if (hr != S_OK)
- goto beach;
-
- PropVariantInit (&format_prop);
- hr = IPropertyStore_GetValue (props, &PKEY_AudioEngine_DeviceFormat,
- &format_prop);
- if (hr != S_OK)
- goto beach;
+ const gchar *fmt_str = NULL;
+ GstAudioFormat fmt = GST_AUDIO_FORMAT_UNKNOWN;
+
+ if (format->Format.wFormatTag == WAVE_FORMAT_PCM) {
+ fmt = gst_audio_format_build_integer (TRUE, G_LITTLE_ENDIAN,
+ format->Format.wBitsPerSample, format->Format.wBitsPerSample);
+ } else if (format->Format.wFormatTag == WAVE_FORMAT_IEEE_FLOAT) {
+ if (format->Format.wBitsPerSample == 32)
+ fmt = GST_AUDIO_FORMAT_F32LE;
+ else if (format->Format.wBitsPerSample == 64)
+ fmt = GST_AUDIO_FORMAT_F64LE;
+ } else if (format->Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
+ if (IsEqualGUID (&format->SubFormat, &KSDATAFORMAT_SUBTYPE_PCM)) {
+ fmt = gst_audio_format_build_integer (TRUE, G_LITTLE_ENDIAN,
+ format->Format.wBitsPerSample, format->Samples.wValidBitsPerSample);
+ } else if (IsEqualGUID (&format->SubFormat,
+ &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) {
+ if (format->Format.wBitsPerSample == 32
+ && format->Samples.wValidBitsPerSample == 32)
+ fmt = GST_AUDIO_FORMAT_F32LE;
+ else if (format->Format.wBitsPerSample == 64 &&
+ format->Samples.wValidBitsPerSample == 64)
+ fmt = GST_AUDIO_FORMAT_F64LE;
+ }
+ }
- format = (WAVEFORMATEXTENSIBLE *) format_prop.blob.pBlobData;
+ if (fmt != GST_AUDIO_FORMAT_UNKNOWN)
+ fmt_str = gst_audio_format_to_string (fmt);
- /* hmm: HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\MMDevices\Audio\Capture\{64adb8b7-9716-4c02-8929-96e53f5642da}\Properties */
+ return fmt_str;
+}
-beach:
- if (props != NULL)
- IUnknown_Release (props);
+GstCaps *
+gst_wasapi_util_waveformatex_to_caps (WAVEFORMATEXTENSIBLE * format,
+ GstCaps * template_caps)
+{
+ int ii;
+ const gchar *afmt;
+ GstCaps *caps = gst_caps_copy (template_caps);
+
+ /* TODO: handle SPDIF and other encoded formats */
+
+ /* 1 or 2 channels <= 16 bits sample size OR
+ * 1 or 2 channels > 16 bits sample size or >2 channels */
+ if (format->Format.wFormatTag != WAVE_FORMAT_PCM &&
+ format->Format.wFormatTag != WAVE_FORMAT_IEEE_FLOAT &&
+ format->Format.wFormatTag != WAVE_FORMAT_EXTENSIBLE)
+ /* Unhandled format tag */
+ return NULL;
+
+ /* WASAPI can only tell us one canonical mix format that it will accept. The
+ * alternative is calling IsFormatSupported on all combinations of formats.
+ * Instead, it's simpler and faster to require conversion inside gstreamer */
+ afmt = gst_waveformatex_to_audio_format (format);
+ if (afmt == NULL)
+ return NULL;
+
+ for (ii = 0; ii < gst_caps_get_size (caps); ii++) {
+ GstStructure *s = gst_caps_get_structure (caps, ii);
+
+ gst_structure_set (s,
+ "format", G_TYPE_STRING, afmt,
+ "channels", G_TYPE_INT, format->Format.nChannels,
+ "rate", G_TYPE_INT, format->Format.nSamplesPerSec, NULL);
+ }
- return format;
+ return caps;
}
-#endif
#include <audioclient.h>
-const gchar *
-gst_wasapi_util_hresult_to_string (HRESULT hr);
+/* Device role enum property */
+typedef enum
+{
+ GST_WASAPI_DEVICE_ROLE_CONSOLE,
+ GST_WASAPI_DEVICE_ROLE_MULTIMEDIA,
+ GST_WASAPI_DEVICE_ROLE_COMMS
+} GstWasapiDeviceRole;
+#define GST_WASAPI_DEVICE_TYPE_ROLE (gst_wasapi_device_role_get_type())
+GType gst_wasapi_device_role_get_type (void);
+
+/* Utilities */
+
+gint gst_wasapi_device_role_to_erole (gint role);
+
+gint gst_wasapi_erole_to_device_role (gint erole);
+
+const gchar *gst_wasapi_util_hresult_to_string (HRESULT hr);
gboolean
-gst_wasapi_util_get_default_device_client (GstElement * element,
- gboolean capture,
- IAudioClient ** ret_client);
+gst_wasapi_util_get_device_client (GstElement * element,
+ gboolean capture,
+ gint role, const wchar_t * device_name, IAudioClient ** ret_client);
gboolean gst_wasapi_util_get_render_client (GstElement * element,
- IAudioClient *client,
- IAudioRenderClient ** ret_render_client);
+ IAudioClient * client, IAudioRenderClient ** ret_render_client);
gboolean gst_wasapi_util_get_capture_client (GstElement * element,
- IAudioClient * client,
- IAudioCaptureClient ** ret_capture_client);
+ IAudioClient * client, IAudioCaptureClient ** ret_capture_client);
gboolean gst_wasapi_util_get_clock (GstElement * element,
- IAudioClient * client,
- IAudioClock ** ret_clock);
+ IAudioClient * client, IAudioClock ** ret_clock);
-void
-gst_wasapi_util_audio_info_to_waveformatex (GstAudioInfo *info,
- WAVEFORMATEXTENSIBLE *format);
+GstCaps *gst_wasapi_util_waveformatex_to_caps (WAVEFORMATEXTENSIBLE * format,
+ GstCaps * template_caps);
#endif /* __GST_WASAPI_UTIL_H__ */
-