#include "libavformat/avformat.h"
#include "libavutil/pixdesc.h"
-#include "debug_ch.h"
+#include "util/new_debug_ch.h"
/* define debug channel */
-MULTI_DEBUG_CHANNEL(qemu, brillcodec);
+DECLARE_DEBUG_CHANNEL(brillcodec);
// device memory
#define CODEC_META_DATA_SIZE (256)
qemu_mutex_unlock(&profile_mutex);
total_latency_time = decoding_time + copying_time;
- INFO("decoding fps=%d, latency=%f(decode=%f + get_picture=%f)\n",
+ LOG_INFO("decoding fps=%d, latency=%f(decode=%f + get_picture=%f)\n",
decoding_fps, total_latency_time/decoding_fps,
decoding_time/decoding_fps, copying_time/decoding_fps);
} else {
memcpy(&readbuf_size, device_mem, sizeof(readbuf_size));
- TRACE("readbuf size: %d\n", readbuf_size);
+ LOG_TRACE("readbuf size: %d\n", readbuf_size);
if (readbuf_size == 0) {
- TRACE("inbuf size is 0. api_id %d, ctx_id %d, mem_offset %x\n",
+ LOG_TRACE("inbuf size is 0. api_id %d, ctx_id %d, mem_offset %x\n",
ioparam->api_index, ioparam->ctx_index, ioparam->mem_offset);
} else {
readbuf = g_malloc0(readbuf_size);
- TRACE("copy input buffer from guest. ctx_id: %d, mem_offset: %x\n",
+ LOG_TRACE("copy input buffer from guest. ctx_id: %d, mem_offset: %x\n",
ioparam->ctx_index, ioparam->mem_offset);
memcpy(readbuf, device_mem + size, readbuf_size);
}
case DEINIT:
case FLUSH_BUFFERS:
default:
- TRACE("no buffer from guest\n");
+ LOG_TRACE("no buffer from guest\n");
break;
}
DeviceMemEntry *elem = NULL;
uint32_t mem_offset = 0;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
if (ctx_idx < 1 || ctx_idx > (CODEC_CONTEXT_MAX - 1)) {
- ERR("invalid buffer index. %d\n", ctx_idx);
+ LOG_SEVERE("invalid buffer index. %d\n", ctx_idx);
return;
}
- TRACE("pop_writeqeue. context index: %d\n", ctx_idx);
+ LOG_TRACE("pop_writeqeue. context index: %d\n", ctx_idx);
elem = entry[ctx_idx];
if (elem) {
mem_offset = s->ioparam.mem_offset;
if (mem_offset < CODEC_MEM_SIZE) {
elem->get_data(s->vaddr + mem_offset, elem->opaque, elem->data_size);
} else {
- TRACE("mem_offset is corrupted!!\n");
+ LOG_TRACE("mem_offset is corrupted!!\n");
}
- TRACE("pop_writequeue. release elem: %p\n", elem);
+ LOG_TRACE("pop_writequeue. release elem: %p\n", elem);
g_free(elem);
entry[ctx_idx] = NULL;
} else {
- TRACE("there is no buffer to copy data to guest\n");
+ LOG_TRACE("there is no buffer to copy data to guest\n");
}
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
}
// threads
{
int index;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
/* stop to run dedicated threads. */
s->is_thread_running = false;
qemu_thread_join(&s->threadpool.threads[index]);
}
- TRACE("destroy mutex and conditional.\n");
+ LOG_TRACE("destroy mutex and conditional.\n");
qemu_mutex_destroy(&s->threadpool.mutex);
qemu_cond_destroy(&s->threadpool.cond);
s->threadpool.threads = NULL;
}
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
}
void brillcodec_wakeup_threads(MaruBrillCodecState *s, int api_index)
memcpy(ioparam, &s->ioparam, sizeof(CodecParam));
- TRACE("wakeup thread. ctx_id: %u, api_id: %u, mem_offset: 0x%x\n",
+ LOG_TRACE("wakeup thread. ctx_id: %u, api_id: %u, mem_offset: 0x%x\n",
ioparam->ctx_index, ioparam->api_index, ioparam->mem_offset);
qemu_mutex_lock(&s->context_mutex);
if (ioparam->api_index != INIT) {
if (!CONTEXT(s, ioparam->ctx_index)->opened_context) {
- INFO("abandon api %d for context %d\n",
+ LOG_INFO("abandon api %d for context %d\n",
ioparam->api_index, ioparam->ctx_index);
qemu_mutex_unlock(&s->context_mutex);
return;
// W/A for threads starvation.
while (s->idle_thread_cnt == 0) {
qemu_mutex_unlock(&s->context_mutex);
- TRACE("Worker threads are exhausted\n");
+ LOG_TRACE("Worker threads are exhausted\n");
usleep(2000); // wait 2ms.
qemu_mutex_lock(&s->context_mutex);
}
qemu_cond_signal(&s->threadpool.cond);
qemu_mutex_unlock(&s->context_mutex);
- TRACE("after sending conditional signal\n");
+ LOG_TRACE("after sending conditional signal\n");
}
void *brillcodec_threads(void *opaque)
MaruBrillCodecState *s = (MaruBrillCodecState *)opaque;
bool ret = false;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
while (s->is_thread_running) {
int ctx_id = 0, api_id = 0;
indata_buf = elem->data_buf;
- TRACE("api_id: %d ctx_id: %d\n", api_id, ctx_id);
+ LOG_TRACE("api_id: %d ctx_id: %d\n", api_id, ctx_id);
qemu_mutex_lock(&s->context_mutex);
CONTEXT(s, ctx_id)->occupied_thread = true;
ret = codec_func_handler[api_id](s, ctx_id, indata_buf);
if (!ret) {
- ERR("fail api %d for context %d\n", api_id, ctx_id);
+ LOG_SEVERE("fail api %d for context %d\n", api_id, ctx_id);
g_free(elem->param_buf);
continue;
}
- TRACE("release a buffer of CodecParam\n");
+ LOG_TRACE("release a buffer of CodecParam\n");
g_free(elem->param_buf);
elem->param_buf = NULL;
if (elem->data_buf) {
if (elem->data_buf->opaque &&
!(s->memory_monopolizing & (1 << api_id))) {
- TRACE("release inbuf\n");
+ LOG_TRACE("release inbuf\n");
g_free(elem->data_buf->opaque);
elem->data_buf->opaque = NULL;
}
- TRACE("release a buffer indata_buf\n");
+ LOG_TRACE("release a buffer indata_buf\n");
g_free(elem->data_buf);
elem->data_buf = NULL;
}
- TRACE("release an element of CodecDataStg\n");
+ LOG_TRACE("release an element of CodecDataStg\n");
g_free(elem);
qemu_mutex_lock(&s->context_mutex);
if (CONTEXT(s, ctx_id)->requested_close) {
- INFO("make worker thread to handle deinit\n");
+ LOG_INFO("make worker thread to handle deinit\n");
// codec_deinit(s, ctx_id, NULL);
brillcodec_release_context(s, ctx_id);
CONTEXT(s, ctx_id)->requested_close = false;
}
qemu_mutex_unlock(&s->context_mutex);
- TRACE("switch context to raise interrupt.\n");
+ LOG_TRACE("switch context to raise interrupt.\n");
qemu_bh_schedule(s->codec_bh);
qemu_mutex_lock(&s->context_mutex);
maru_brill_codec_thread_exit(s);
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return NULL;
}
DeviceMemEntry *wq_elem = NULL, *wnext = NULL;
CodecDataStg *rq_elem = NULL, *rnext = NULL;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
- TRACE("release %d of context\n", ctx_id);
+ LOG_TRACE("release %d of context\n", ctx_id);
qemu_mutex_lock(&s->threadpool.mutex);
if (CONTEXT(s, ctx_id)->opened_context) {
if (rq_elem && rq_elem->data_buf &&
(rq_elem->data_buf->ctx_id == ctx_id)) {
- TRACE("remove unused node from codec_rq. ctx_id: %d\n", ctx_id);
+ LOG_TRACE("remove unused node from codec_rq. ctx_id: %d\n", ctx_id);
qemu_mutex_lock(&s->context_queue_mutex);
QTAILQ_REMOVE(&codec_rq, rq_elem, node);
qemu_mutex_unlock(&s->context_queue_mutex);
if (rq_elem && rq_elem->data_buf) {
- TRACE("release rq_buffer: %p\n", rq_elem->data_buf);
+ LOG_TRACE("release rq_buffer: %p\n", rq_elem->data_buf);
g_free(rq_elem->data_buf);
}
- TRACE("release rq_elem: %p\n", rq_elem);
+ LOG_TRACE("release rq_elem: %p\n", rq_elem);
g_free(rq_elem);
} else {
- TRACE("no elem of %d context in the codec_rq.\n", ctx_id);
+ LOG_TRACE("no elem of %d context in the codec_rq.\n", ctx_id);
}
}
QTAILQ_FOREACH_SAFE(wq_elem, &codec_wq, node, wnext) {
if (wq_elem && wq_elem->ctx_id == ctx_id) {
- TRACE("remove unused node from codec_wq. ctx_id: %d\n", ctx_id);
+ LOG_TRACE("remove unused node from codec_wq. ctx_id: %d\n", ctx_id);
qemu_mutex_lock(&s->context_queue_mutex);
QTAILQ_REMOVE(&codec_wq, wq_elem, node);
qemu_mutex_unlock(&s->context_queue_mutex);
if (wq_elem && wq_elem->opaque) {
- TRACE("release wq_buffer: %p\n", wq_elem->opaque);
+ LOG_TRACE("release wq_buffer: %p\n", wq_elem->opaque);
g_free(wq_elem->opaque);
wq_elem->opaque = NULL;
}
- TRACE("release wq_elem: %p\n", wq_elem);
+ LOG_TRACE("release wq_elem: %p\n", wq_elem);
g_free(wq_elem);
} else {
- TRACE("no elem of %d context in the codec_wq.\n", ctx_id);
+ LOG_TRACE("no elem of %d context in the codec_wq.\n", ctx_id);
}
}
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
}
struct codec_element {
AVCodec *codec = NULL;
/* register avcodec */
- TRACE("register avcodec\n");
+ LOG_TRACE("register avcodec\n");
av_register_all();
codec = av_codec_next(NULL);
if (!codec) {
- ERR("failed to get codec info.\n");
+ LOG_SEVERE("failed to get codec info.\n");
return -1;
}
}
}
} else {
- ERR("unknown media type: %d\n", codec->type);
+ LOG_SEVERE("unknown media type: %d\n", codec->type);
}
memset(element, 0x00, sizeof(struct codec_element));
g_strlcpy(element->long_name, codec->long_name, sizeof(element->long_name));
memcpy(element->pix_fmts, codec_fmts, sizeof(codec_fmts));
- TRACE("register %s %s\n", codec->name, codec->decode ? "decoder" : "encoder");
+ LOG_TRACE("register %s %s\n", codec->name, codec->decode ? "decoder" : "encoder");
++element;
{
int ctx_id;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
// requires mutex_lock? its function is protected by critical section.
qemu_mutex_lock(&s->threadpool.mutex);
for (ctx_id = 1; ctx_id < CODEC_CONTEXT_MAX; ctx_id++) {
if (CONTEXT(s, ctx_id)->occupied_context == false) {
- TRACE("get %d of codec context successfully.\n", ctx_id);
+ LOG_TRACE("get %d of codec context successfully.\n", ctx_id);
CONTEXT(s, ctx_id)->occupied_context = true;
break;
}
qemu_mutex_unlock(&s->threadpool.mutex);
if (ctx_id == CODEC_CONTEXT_MAX) {
- ERR("failed to get available codec context. ");
- ERR("try to run codec again.\n");
+ LOG_SEVERE("failed to get available codec context. ");
+ LOG_SEVERE("try to run codec again.\n");
ctx_id = -1;
}
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return ctx_id;
}
AVCodecContext *avctx = NULL;
bool ret = true;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
avctx = CONTEXT(s, ctx_id)->avctx;
if (!avctx) {
- ERR("%d of AVCodecContext is NULL.\n", ctx_id);
+ LOG_SEVERE("%d of AVCodecContext is NULL.\n", ctx_id);
ret = false;
} else if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
+ LOG_SEVERE("%d of AVCodec is NULL.\n", ctx_id);
ret = false;
} else {
- TRACE("flush %d context of buffers.\n", ctx_id);
+ LOG_TRACE("flush %d context of buffers.\n", ctx_id);
AVCodecParserContext *pctx = NULL;
uint8_t *poutbuf = NULL;
int poutbuf_size = 0;
if (pctx) {
res = av_parser_parse2(pctx, avctx, &poutbuf, &poutbuf_size,
p_inbuf, p_inbuf_size, -1, -1, -1);
- INFO("before flush buffers, using parser. res: %d\n", res);
+ LOG_INFO("before flush buffers, using parser. res: %d\n", res);
}
avcodec_flush_buffers(avctx);
brillcodec_push_writequeue(s, NULL, 0, ctx_id, NULL);
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return ret;
}
int pict_size = avpicture_get_size(pix_fmt, frame->width, frame->height);
if (pict_size < 0) {
// cannot enter here...
- ERR("Invalid picture size\n");
+ LOG_SEVERE("Invalid picture size\n");
return;
}
avpicture_layout((AVPicture *)frame, pix_fmt,
parser_buf += ret;
}
- TRACE("after parsing ret: %d parser_outbuf_size %d parser_buf_size %d pts %lld\n",
+ LOG_TRACE("after parsing ret: %d parser_outbuf_size %d parser_buf_size %d pts %lld\n",
ret, parser_outbuf_size, parser_buf_size, pctx->pts);
/* if there is no output, we must break and wait for more data.
*/
if (parser_outbuf_size == 0) {
if (parser_buf_size > 0) {
- TRACE("parsing data have been left\n");
+ LOG_TRACE("parsing data have been left\n");
continue;
} else {
- TRACE("finish parsing data\n");
+ LOG_TRACE("finish parsing data\n");
break;
}
}
packet->data = parser_outbuf;
packet->size = parser_outbuf_size;
} else {
- TRACE("not using parser %s\n", avctx->codec->name);
+ LOG_TRACE("not using parser %s\n", avctx->codec->name);
}
// begin video decode profile
BEGIN_VIDEO_DECODE_PROFILE();
len = avcodec_decode_video2(avctx, picture, (int *)got_picture, packet);
- TRACE("decode_video. len %d, got_picture %d\n", len, *got_picture);
+ LOG_TRACE("decode_video. len %d, got_picture %d\n", len, *got_picture);
// end video decode profile
END_VIDEO_DECODE_PROFILE();
if (!pctx) {
if (len == 0 && (*got_picture) == 0) {
- ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
+ LOG_SEVERE("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
break;
} else if (len < 0) {
- ERR("decoding video error! ctx_id %d len %d\n", ctx_id, len);
+ LOG_SEVERE("decoding video error! ctx_id %d len %d\n", ctx_id, len);
break;
}
parser_buf_size -= len;
parser_buf += len;
} else {
if (len == 0) {
- ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
+ LOG_SEVERE("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
*got_picture = 0;
break;
} else if (len < 0) {
- ERR("decoding video error! trying next ctx_id %d len %d\n", ctx_id, len);
+ LOG_SEVERE("decoding video error! trying next ctx_id %d len %d\n", ctx_id, len);
break;
}
}
uint32_t got_picture = 0;
int32_t len = -1;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
if (!elem || !elem->opaque) {
- TRACE("decode_video. no input buffer\n");
+ LOG_TRACE("decode_video. no input buffer\n");
} else {
decode_input = elem->opaque;
}
pctx = CONTEXT(s, ctx_id)->parser_ctx;
if (!avctx || !avctx->codec || !frame) {
- ERR("critical error !!!\n");
+ LOG_SEVERE("critical error !!!\n");
assert(0);
}
- TRACE("decode_video. bitrate %d resolution(%dx%d)\n",
+ LOG_TRACE("decode_video. bitrate %d resolution(%dx%d)\n",
avctx->bit_rate, avctx->width, avctx->height);
len = parse_and_decode_video(avctx, frame, pctx, ctx_id,
brillcodec_push_writequeue(s, dc, 0, ctx_id, copy_video_decode_data);
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return true;
}
{
DataContainer *dc = g_malloc0(sizeof(DataContainer));
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
- TRACE("copy decoded image of %d context.\n", ctx_id);
+ LOG_TRACE("copy decoded image of %d context.\n", ctx_id);
dc->avctx = CONTEXT(s, ctx_id)->avctx;
dc->frame = CONTEXT(s, ctx_id)->frame;
brillcodec_push_writequeue(s, dc, 0, ctx_id, copy_video_decode_data);
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return true;
}
struct video_encode_input empty_input = { 0, };
struct video_encode_input *encode_input = &empty_input;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
if (!elem || !elem->opaque) {
- TRACE("encode_video. no input buffer\n");
+ LOG_TRACE("encode_video. no input buffer\n");
} else {
encode_input = elem->opaque;
}
pict = CONTEXT(s, ctx_id)->frame;
if(!avctx || !avctx->codec) {
- ERR("critical error !!!\n");
+ LOG_SEVERE("critical error !!!\n");
assert(0);
}
- TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
+ LOG_TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
avctx->pix_fmt, inbuf, pict->data[0]);
ret = avpicture_fill((AVPicture *)pict, &encode_input->inbuf, avctx->pix_fmt,
avctx->width, avctx->height);
if (ret < 0) {
- ERR("after avpicture_fill, ret:%d\n", ret);
+ LOG_SEVERE("after avpicture_fill, ret:%d\n", ret);
} else {
if (avctx->time_base.num == 0) {
pict->pts = AV_NOPTS_VALUE;
{1, (G_USEC_PER_SEC * G_GINT64_CONSTANT(1000))};
pict->pts = av_rescale_q(encode_input->in_timestamp, bq, avctx->time_base);
}
- TRACE("encode video. ticks_per_frame:%d, pts:%lld\n",
+ LOG_TRACE("encode video. ticks_per_frame:%d, pts:%lld\n",
avctx->ticks_per_frame, pict->pts);
outbuf_size =
ret = avcodec_encode_video2(avctx, avpkt, pict, &got_frame);
- TRACE("encode video. ret %d got_frame %d outbuf_size %d\n", ret, got_frame, avpkt->size);
+ LOG_TRACE("encode video. ret %d got_frame %d outbuf_size %d\n", ret, got_frame, avpkt->size);
if (avctx->coded_frame) {
- TRACE("encode video. keyframe %d\n", avctx->coded_frame->key_frame);
+ LOG_TRACE("encode video. keyframe %d\n", avctx->coded_frame->key_frame);
}
}
brillcodec_push_writequeue(s, dc, 0, ctx_id, copy_video_encode_data);
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return true;
}
}
if (codec_type != AVMEDIA_TYPE_AUDIO) {
- ERR("this codec_type is invalid %d\n", codec_type);
+ LOG_SEVERE("this codec_type is invalid %d\n", codec_type);
return audio_sample_fmt;
}
audio_sample_fmt = AV_SAMPLE_FMT_FLT;
}
} else {
- INFO("cannot handle %s codec\n", codec->name);
+ LOG_INFO("cannot handle %s codec\n", codec->name);
}
- TRACE("convert audio sample_fmt %d\n", audio_sample_fmt);
+ LOG_TRACE("convert audio sample_fmt %d\n", audio_sample_fmt);
return audio_sample_fmt;
}
int result = 0;
if (!avctx) {
- ERR("fill_audio. AVCodecContext is NULL!!\n");
+ LOG_SEVERE("fill_audio. AVCodecContext is NULL!!\n");
return -1;
}
if (!frame) {
- ERR("fill_audio. AVFrame is NULL!!\n");
+ LOG_SEVERE("fill_audio. AVFrame is NULL!!\n");
return -1;
}
result =
avcodec_fill_audio_frame(frame, avctx->channels, audio_sample_fmt, (const uint8_t *)samples, samples_size, 0);
- TRACE("fill audio in_frame. ret: %d in_frame->ch_layout %lld\n", result, frame->channel_layout);
+ LOG_TRACE("fill audio in_frame. ret: %d in_frame->ch_layout %lld\n", result, frame->channel_layout);
return result;
}
avr = avresample_alloc_context();
if (!avr) {
- ERR("failed to allocate avresample context\n");
+ LOG_SEVERE("failed to allocate avresample context\n");
return NULL;
}
- TRACE("channel_layout %lld sample_rate %d in_sample_fmt %d resample_sample_fmt %d\n",
+ LOG_TRACE("channel_layout %lld sample_rate %d in_sample_fmt %d resample_sample_fmt %d\n",
avctx->channel_layout, avctx->sample_rate, avctx->sample_fmt, resample_sample_fmt);
av_opt_set_int(avr, "in_channel_layout", avctx->channel_layout, 0);
av_opt_set_int(avr, "out_sample_fmt", resample_sample_fmt, 0);
av_opt_set_int(avr, "out_sample_rate", avctx->sample_rate, 0);
- TRACE("open avresample context\n");
+ LOG_TRACE("open avresample context\n");
if (avresample_open(avr) < 0) {
- ERR("failed to open avresample context\n");
+ LOG_SEVERE("failed to open avresample context\n");
avresample_free(&avr);
return NULL;
}
#else
resample_frame = av_frame_alloc();
#endif
- TRACE("resample audio. nb_samples %d sample_fmt %d\n", resample_nb_samples, resample_sample_fmt);
+ LOG_TRACE("resample audio. nb_samples %d sample_fmt %d\n", resample_nb_samples, resample_sample_fmt);
*resample_buffer_size = av_samples_get_buffer_size(NULL, avctx->channels, resample_nb_samples, resample_sample_fmt, 0);
if (*resample_buffer_size < 0) {
- ERR("failed to get size of resample buffer %d\n", *resample_buffer_size);
+ LOG_SEVERE("failed to get size of resample buffer %d\n", *resample_buffer_size);
avresample_close(avr);
avresample_free(&avr);
return NULL;
resample_buffer = av_mallocz(*resample_buffer_size);
if (!resample_buffer) {
- ERR("failed to allocate resample buffer\n");
+ LOG_SEVERE("failed to allocate resample buffer\n");
avresample_close(avr);
avresample_free(&avr);
return NULL;
sample_frame->data, sample_buffer_size,
sample_frame->nb_samples);
- TRACE("resample_audio buffer_size %d\n", buffer_size);
+ LOG_TRACE("resample_audio buffer_size %d\n", buffer_size);
avresample_close(avr);
avresample_free(&avr);
struct audio_decode_input *decode_input = &empty_input;
int len = -1, got_frame = 0;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
if (!elem || !elem->opaque) {
- TRACE("decode_audio. no input buffer\n");
+ LOG_TRACE("decode_audio. no input buffer\n");
} else {
decode_input = elem->opaque;
}
frame = CONTEXT(s, ctx_id)->frame;
if(!avctx || !avctx->codec || !frame) {
- ERR("critical error !!!\n");
+ LOG_SEVERE("critical error !!!\n");
assert(0);
}
len = avcodec_decode_audio4(avctx, frame, &got_frame, &avpkt);
- TRACE("decode_audio. len %d, channel_layout %lld, got_frame %d\n",
+ LOG_TRACE("decode_audio. len %d, channel_layout %lld, got_frame %d\n",
len, avctx->channel_layout, got_frame);
AVFrame *resample_frame = NULL;
if (avctx->channel_layout == 0) {
avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
- TRACE("decode_audio. channel_layout %lld channels %d\n",
+ LOG_TRACE("decode_audio. channel_layout %lld channels %d\n",
avctx->channel_layout, avctx->channels);
}
resample_frame = resample_audio(avctx, frame, frame->linesize[0],
avctx->sample_fmt, NULL, &resample_buf_size,
out_sample_fmt);
if (!resample_frame) {
- ERR("failed to resample decoded audio buffer\n");
+ LOG_SEVERE("failed to resample decoded audio buffer\n");
len = -1;
got_frame = 0;
}
brillcodec_push_writequeue(s, dc, 0, ctx_id, ©_audio_decode_data);
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return true;
}
struct audio_encode_input empty_input = { 0, };
struct audio_encode_input *encode_input = &empty_input;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
/*
* copy raw audio data from gstreamer encoder plugin
*/
elem = (DeviceMemEntry *)data_buf;
if (!elem || !elem->opaque) {
- TRACE("encode_audio. no input buffer\n");
+ LOG_TRACE("encode_audio. no input buffer\n");
} else {
encode_input = elem->opaque;
}
in_frame = CONTEXT(s, ctx_id)->frame;
if (!avctx || !avctx->codec || !in_frame) {
- ERR("critical error !!!\n");
+ LOG_SEVERE("critical error !!!\n");
assert(0);
}
int ret = 0;
bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt);
- TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt);
+ LOG_TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt);
nb_samples = encode_input->inbuf_size / (bytes_per_sample * avctx->channels);
- TRACE("nb_samples %d\n", nb_samples);
+ LOG_TRACE("nb_samples %d\n", nb_samples);
ret = fill_audio_into_frame(avctx, in_frame,
&encode_input->inbuf, encode_input->inbuf_size,
nb_samples, audio_in_sample_fmt);
if (ret < 0) {
- ERR("failed to fill audio into frame\n");
+ LOG_SEVERE("failed to fill audio into frame\n");
} else {
resample_sample_fmt =
convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 1);
if (resample_frame) {
len = avcodec_encode_audio2(avctx, avpkt, (const AVFrame *)resample_frame, &got_frame);
- TRACE("encode audio. len %d got_frame %d avpkt->size %d frame_number %d\n",
+ LOG_TRACE("encode audio. len %d got_frame %d avpkt->size %d frame_number %d\n",
len, got_frame, avpkt->size, avctx->frame_number);
}
}
brillcodec_push_writequeue(s, dc, 0, ctx_id, ©_audio_encode_data);
- TRACE("[%s] leave:\n", __func__);
+ LOG_TRACE("[%s] leave:\n", __func__);
return true;
}
avctx->ticks_per_frame = video->ticks_per_frame;
}
- INFO("codec_init. video, resolution: %dx%d, framerate: %d/%d "
+ LOG_INFO("codec_init. video, resolution: %dx%d, framerate: %d/%d "
"pixel_fmt: %d sample_aspect_ratio: %d/%d bpp %d\n",
avctx->width, avctx->height, avctx->time_base.num,
avctx->time_base.den, avctx->pix_fmt, avctx->sample_aspect_ratio.num,
avctx->sample_fmt = audio->sample_fmt;
}
- INFO("codec_init. audio, channel %d sample_rate %d sample_fmt %d ch_layout %lld\n",
+ LOG_INFO("codec_init. audio, channel %d sample_rate %d sample_fmt %d ch_layout %lld\n",
avctx->channels, avctx->sample_rate, avctx->sample_fmt, avctx->channel_layout);
}
goto end;
}
- INFO("HW_ACCEL is enabled with pix_fmt [%s]\n", av_get_pix_fmt_name(pi_fmt[i]));
+ LOG_INFO("HW_ACCEL is enabled with pix_fmt [%s]\n", av_get_pix_fmt_name(pi_fmt[i]));
context->is_hwaccel = true;
return pi_fmt[i];
end:
- INFO("HW_ACCEL is disabled\n");
+ LOG_INFO("HW_ACCEL is disabled\n");
context->is_hwaccel = false;
return avcodec_default_get_format(avctx, pi_fmt);
}
// allocate avcontext and avframe struct.
static AVCodecContext *maru_brill_codec_alloc_context(MaruBrillCodecState *s, int ctx_id)
{
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
- TRACE("allocate %d of context and frame.\n", ctx_id);
+ LOG_TRACE("allocate %d of context and frame.\n", ctx_id);
CONTEXT(s, ctx_id)->avctx = avcodec_alloc_context3(NULL);
CONTEXT(s, ctx_id)->opened_context = false;
CONTEXT(s, ctx_id)->state = s;
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return avctx;
}
memcpy(codec_name, mem_buf + size, sizeof(codec_name));
size += sizeof(codec_name);
- TRACE("type: %d, name: %s\n", encode, codec_name);
+ LOG_TRACE("type: %d, name: %s\n", encode, codec_name);
if (encode) {
codec = avcodec_find_encoder_by_name (codec_name);
} else {
codec = avcodec_find_decoder_by_name (codec_name);
}
- INFO("%s!! find %s %s\n", codec ? "success" : "failure",
+ LOG_INFO("%s!! find %s %s\n", codec ? "success" : "failure",
codec_name, encode ? "encoder" : "decoder");
return codec;
memcpy(&avctx->extradata_size,
mem_buf + size, sizeof(avctx->extradata_size));
size += sizeof(avctx->extradata_size);
- INFO("extradata size: %d.\n", avctx->extradata_size);
+ LOG_INFO("extradata size: %d.\n", avctx->extradata_size);
if (avctx->extradata_size > 0) {
avctx->extradata =
memcpy(avctx->extradata, mem_buf + size, avctx->extradata_size);
}
} else {
- TRACE("no extra data.\n");
+ LOG_TRACE("no extra data.\n");
avctx->extradata =
av_mallocz(ROUND_UP_X(FF_INPUT_BUFFER_PADDING_SIZE, 4));
}
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
int osize = av_get_bytes_per_sample(avctx->sample_fmt);
- INFO("avcodec_open. sample_fmt %d, bytes_per_sample %d\n", avctx->sample_fmt, osize);
+ LOG_INFO("avcodec_open. sample_fmt %d, bytes_per_sample %d\n", avctx->sample_fmt, osize);
if ((avctx->codec_id == AV_CODEC_ID_AAC) && avctx->codec->encode2) {
osize = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
AVCodecParserContext *parser = NULL;
if (!avctx) {
- ERR("context is NULL\n");
+ LOG_SEVERE("context is NULL\n");
return NULL;
}
switch (avctx->codec_id) {
case AV_CODEC_ID_MPEG4:
case AV_CODEC_ID_VC1:
- TRACE("not using parser\n");
+ LOG_TRACE("not using parser\n");
break;
case AV_CODEC_ID_H264:
if (avctx->extradata_size == 0) {
- TRACE("H.264 with no extradata, creating parser.\n");
+ LOG_TRACE("H.264 with no extradata, creating parser.\n");
parser = av_parser_init (avctx->codec_id);
}
break;
default:
parser = av_parser_init(avctx->codec_id);
if (parser) {
- INFO("using parser: %s\n", avctx->codec->name);
+ LOG_INFO("using parser: %s\n", avctx->codec->name);
}
break;
}
uint8_t *tempbuf = NULL;
int tempbuf_size = 0;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
// allocate AVCodecContext
avctx = maru_brill_codec_alloc_context(s, ctx_id);
if (!avctx) {
- ERR("[%d] failed to allocate context.\n", __LINE__);
+ LOG_SEVERE("[%d] failed to allocate context.\n", __LINE__);
ret = -1;
} else {
codec = maru_brill_codec_find_avcodec(elem->opaque);
// in case of aac encoder, sample format is float
if (!strcmp(codec->name, "aac") && codec->encode2) {
- TRACE("convert sample format into SAMPLE_FMT_FLTP\n");
+ LOG_TRACE("convert sample format into SAMPLE_FMT_FLTP\n");
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
- INFO("aac encoder!! channels %d channel_layout %lld\n", avctx->channels, avctx->channel_layout);
+ LOG_INFO("aac encoder!! channels %d channel_layout %lld\n", avctx->channels, avctx->channel_layout);
avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
}
- TRACE("audio sample format %d\n", avctx->sample_fmt);
- TRACE("strict_std_compliance %d\n", avctx->strict_std_compliance);
+ LOG_TRACE("audio sample format %d\n", avctx->sample_fmt);
+ LOG_TRACE("strict_std_compliance %d\n", avctx->strict_std_compliance);
ret = avcodec_open2(avctx, codec, NULL);
- INFO("avcodec_open. ret 0x%x ctx_id %d\n", ret, ctx_id);
-
- INFO("channels %d sample_rate %d sample_fmt %d "
+ LOG_INFO("avcodec_open. ret 0x%x ctx_id %d\n", ret, ctx_id);
+ LOG_INFO("channels %d sample_rate %d sample_fmt %d "
"channel_layout %lld frame_size %d\n",
avctx->channels, avctx->sample_rate, avctx->sample_fmt,
avctx->channel_layout, avctx->frame_size);
CONTEXT(s, ctx_id)->parser_ctx =
maru_brill_codec_parser_init(avctx);
} else {
- ERR("failed to find codec. ctx_id: %d\n", ctx_id);
+ LOG_SEVERE("failed to find codec. ctx_id: %d\n", ctx_id);
ret = -1;
}
}
tempbuf = g_malloc(tempbuf_size);
if (!tempbuf) {
- ERR("failed to allocate a buffer\n");
+ LOG_SEVERE("failed to allocate a buffer\n");
tempbuf_size = 0;
} else {
memcpy(tempbuf, &ret, sizeof(ret));
size = sizeof(ret);
if (ret < 0) {
- ERR("failed to open codec contex.\n");
+ LOG_SEVERE("failed to open codec contex.\n");
} else {
size += write_codec_init_data(avctx, tempbuf + size);
- TRACE("codec_init. copyback!! size %d\n", size);
+ LOG_TRACE("codec_init. copyback!! size %d\n", size);
{
memcpy(tempbuf + size, &avctx->extradata_size, sizeof(avctx->extradata_size));
size += sizeof(avctx->extradata_size);
- INFO("codec_init. extradata_size: %d\n", avctx->extradata_size);
+ LOG_INFO("codec_init. extradata_size: %d\n", avctx->extradata_size);
if (avctx->extradata) {
memcpy(tempbuf + size, avctx->extradata, avctx->extradata_size);
size += avctx->extradata_size;
brillcodec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return true;
}
AVFrame *frame = NULL;
AVCodecParserContext *parserctx = NULL;
- TRACE("enter: %s\n", __func__);
+ LOG_TRACE("enter: %s\n", __func__);
avctx = CONTEXT(s, ctx_id)->avctx;
frame = CONTEXT(s, ctx_id)->frame;
parserctx = CONTEXT(s, ctx_id)->parser_ctx;
if (!avctx || !frame) {
- TRACE("%d of AVCodecContext or AVFrame is NULL. "
+ LOG_TRACE("%d of AVCodecContext or AVFrame is NULL. "
" Those resources have been released before.\n", ctx_id);
return false;
}
- INFO("close avcontext of %d\n", ctx_id);
+ LOG_INFO("close avcontext of %d\n", ctx_id);
avcodec_close(avctx);
if (CONTEXT(s, ctx_id)->is_hwaccel) {
}
if (avctx->extradata) {
- TRACE("free context extradata\n");
+ LOG_TRACE("free context extradata\n");
av_free(avctx->extradata);
}
if (frame) {
- TRACE("free frame\n");
+ LOG_TRACE("free frame\n");
#if LIBAVUTIL_VERSION_CHECK
avcodec_free_frame(&frame);
#else
}
if (avctx) {
- TRACE("free codec context\n");
+ LOG_TRACE("free codec context\n");
av_free(avctx);
}
if (parserctx) {
- INFO("close parser context\n");
+ LOG_INFO("close parser context\n");
av_parser_close(parserctx);
}
// reset profile resource
RESET_CODEC_PROFILE();
- TRACE("leave: %s\n", __func__);
+ LOG_TRACE("leave: %s\n", __func__);
return true;
}
*
*/
-#if 0
-#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x600
-#else
-# if _WIN32_WINNT < 0x600
-/* dxva2 needs Vista support */
-# undef _WIN32_WINNT
-# define _WIN32_WINNT 0x600
-# endif
-#endif
-#endif
-
#define DXVA2API_USE_BITFIELDS
#define COBJMACROS
#endif /* __MINGW32__ */
#include "maru_brillcodec_plugin.h"
-#include "debug_ch.h"
+#include "util/new_debug_ch.h"
/* define debug channel */
-MULTI_DEBUG_CHANNEL(qemu, dxva2);
+DECLARE_DEBUG_CHANNEL(dxva2);
MS_GUID(IID_IDirectXVideoDecoderService, 0xfc51a551, 0xd5e7, 0x11d9, 0xaf,0x55,0x00,0x05,0x4e,0x43,0xff,0x02);
LPDIRECT3D9 (WINAPI *Create9)(UINT SDKVersion);
Create9 = (void *)GetProcAddress(dxva_dev->hd3d9_dll, "Direct3DCreate9");
if (!Create9) {
- ERR("Cannot locate reference to Direct3DCreate9 ABI in DLL\n");
+ LOG_SEVERE("Cannot locate reference to Direct3DCreate9 ABI in DLL\n");
return -1;
}
LPDIRECT3D9 d3dobj;
d3dobj = Create9(D3D_SDK_VERSION);
if (!d3dobj) {
- ERR("Direct3DCreate9 failed\n");
+ LOG_SEVERE("Direct3DCreate9 failed\n");
return -1;
}
dxva_dev->d3dobj = d3dobj;
D3DCREATE_SOFTWARE_VERTEXPROCESSING |
D3DCREATE_MULTITHREADED,
&d3dpp, &d3ddev))) {
- ERR("IDirect3D9_CreateDevice failed\n");
+ LOG_SEVERE("IDirect3D9_CreateDevice failed\n");
return -1;
}
dxva_dev->d3ddev = d3ddev;
"DXVA2CreateDirect3DDeviceManager9");
if (!CreateDeviceManager9) {
- ERR("cannot load function\n");
+ LOG_SEVERE("cannot load function\n");
return -1;
}
UINT token;
IDirect3DDeviceManager9 *devmng;
if (FAILED(CreateDeviceManager9(&token, &devmng))) {
- ERR("OurDirect3DCreateDeviceManager9 failed\n");
+ LOG_SEVERE("OurDirect3DCreateDeviceManager9 failed\n");
return -1;
}
- TRACE("OurDirect3DCreateDeviceManager9 Success!\n");
+ LOG_TRACE("OurDirect3DCreateDeviceManager9 Success!\n");
dxva_dev->devmng = devmng;
- TRACE("obtained IDirect3DDeviceManager9\n");
+ LOG_TRACE("obtained IDirect3DDeviceManager9\n");
HRESULT hr = IDirect3DDeviceManager9_ResetDevice(devmng, dxva_dev->d3ddev, token);
if (FAILED(hr)) {
- ERR("IDirect3DDeviceManager9_ResetDevice failed: %08x\n", (unsigned)hr);
+ LOG_SEVERE("IDirect3DDeviceManager9_ResetDevice failed: %08x\n", (unsigned)hr);
return -1;
}
(void *)GetProcAddress(dxva_dev->hdxva2_dll, "DXVA2CreateVideoService");
if (!CreateVideoService) {
- ERR("cannot load function\n");
+ LOG_SEVERE("cannot load function\n");
return -1;
}
- TRACE("DXVA2CreateVideoService Success!\n");
+ LOG_TRACE("DXVA2CreateVideoService Success!\n");
HRESULT hr;
HANDLE hd3ddev;
hr = IDirect3DDeviceManager9_OpenDeviceHandle(dxva_dev->devmng, &hd3ddev);
if (FAILED(hr)) {
- ERR("OpenDeviceHandle failed\n");
+ LOG_SEVERE("OpenDeviceHandle failed\n");
return -1;
}
dxva_dev->hd3ddev = hd3ddev;
hr = IDirect3DDeviceManager9_GetVideoService(dxva_dev->devmng, hd3ddev,
&IID_IDirectXVideoDecoderService, &pv);
if (FAILED(hr)) {
- ERR("GetVideoService failed\n");
+ LOG_SEVERE("GetVideoService failed\n");
return -1;
}
dxva_dev->vs = pv;
if (FAILED(IDirectXVideoDecoderService_GetDecoderDeviceGuids(dxva_dev->vs,
&decdev_count,
&guid_decdev_list))) {
- ERR("IDirectXVideoDecoderService_GetDecoderDeviceGuids failed\n");
+ LOG_SEVERE("IDirectXVideoDecoderService_GetDecoderDeviceGuids failed\n");
return -1;
}
- INFO("IDirectXVideoDecoderService_GetDecoderDeviceGuids success. count=%d\n", decdev_count);
+ LOG_INFO("IDirectXVideoDecoderService_GetDecoderDeviceGuids success. count=%d\n", decdev_count);
for (i = 0; i < decdev_count; i++) {
const GUID *g = &guid_decdev_list[i];
const dxva2_mode_t *mode = Dxva2FindMode(g);
if (mode) {
- INFO("- '%s' is supported by hardware\n", mode->name);
+ LOG_INFO("- '%s' is supported by hardware\n", mode->name);
} else {
WARN("- Unknown GUID = %08X-%04x-%04x-XXXX\n",
(unsigned)g->Data1, g->Data2, g->Data3);
continue;
/* */
- INFO("Trying to use '%s' as input\n", mode->name);
+ LOG_INFO("Trying to use '%s' as input\n", mode->name);
UINT render_fmt_count = 0;
D3DFORMAT *render_fmt_list = NULL;
if (FAILED(IDirectXVideoDecoderService_GetDecoderRenderTargets(dxva_dev->vs, mode->guid,
&render_fmt_count,
&render_fmt_list))) {
- ERR("IDirectXVideoDecoderService_GetDecoderRenderTargets failed\n");
+ LOG_SEVERE("IDirectXVideoDecoderService_GetDecoderRenderTargets failed\n");
continue;
}
for (j = 0; j < render_fmt_count; j++) {
const D3DFORMAT f = render_fmt_list[j];
const d3d_format_t *format = D3dFindFormat(f);
- INFO("HOST supported format %d (%4.4s)\n", f, (const char*)&f);
+ LOG_INFO("HOST supported format %d (%4.4s)\n", f, (const char*)&f);
if (format) {
- INFO("%s is supported for output\n", format->name);
+ LOG_INFO("%s is supported for output\n", format->name);
} else {
- INFO("%d is supported for output (%4.4s)\n", f, (const char*)&f);
+ LOG_INFO("%d is supported for output (%4.4s)\n", f, (const char*)&f);
}
}
continue;
/* We have our solution */
- INFO("Using '%s' to decode to '%s'\n", mode->name, format->name);
+ LOG_INFO("Using '%s' to decode to '%s'\n", mode->name, format->name);
*guid_decdev = *mode->guid;
*render_fmt = format->format;
CoTaskMemFree(render_fmt_list);
codec_id = dec_ctx->codec_id;
- TRACE("DxCreateVideoDecoder id %d %dx%d\n",
+ LOG_TRACE("DxCreateVideoDecoder id %d %dx%d\n",
codec_id, width, height);
/* Allocates all surfaces needed for the decoder */
DXVA2_VideoDecoderRenderTarget,
dxva_ctx->hw_surface,
NULL))) {
- ERR("IDirectXVideoAccelerationService_CreateSurface failed\n");
+ LOG_SEVERE("IDirectXVideoAccelerationService_CreateSurface failed\n");
dxva_ctx->surface_count = 0;
return -1;
}
dxva_ctx->surface[i].is_occupied = false;
dxva_ctx->surface[i].dxva_ctx = dxva_ctx;
}
- TRACE("IDirectXVideoAccelerationService_CreateSurface succeed with %d surfaces (%dx%d)\n",
+ LOG_TRACE("IDirectXVideoAccelerationService_CreateSurface succeed with %d surfaces (%dx%d)\n",
dxva_ctx->surface_count, width, height);
/* */
NULL,
&cfg_count,
&cfg_list))) {
- ERR("IDirectXVideoDecoderService_GetDecoderConfigurations failed\n");
+ LOG_SEVERE("IDirectXVideoDecoderService_GetDecoderConfigurations failed\n");
return -1;
}
- TRACE("we got %d decoder configurations\n", cfg_count);
+ LOG_TRACE("we got %d decoder configurations\n", cfg_count);
/* Select the best decoder configuration */
int cfg_score = 0;
const DXVA2_ConfigPictureDecode *cfg = &cfg_list[i];
/* */
- TRACE("configuration[%d] ConfigBitstreamRaw %d\n",
+ LOG_TRACE("configuration[%d] ConfigBitstreamRaw %d\n",
i, cfg->ConfigBitstreamRaw);
/* */
}
CoTaskMemFree(cfg_list);
if (cfg_score <= 0) {
- ERR("Failed to find a supported decoder configuration\n");
+ LOG_SEVERE("Failed to find a supported decoder configuration\n");
return -1;
}
dxva_ctx->hw_surface,
dxva_ctx->surface_count,
&decoder))) {
- ERR("IDirectXVideoDecoderService_CreateVideoDecoder failed\n");
+ LOG_SEVERE("IDirectXVideoDecoderService_CreateVideoDecoder failed\n");
return -1;
}
dxva_ctx->decoder = decoder;
- TRACE("IDirectXVideoDecoderService_CreateVideoDecoder succeed\n");
+ LOG_TRACE("IDirectXVideoDecoderService_CreateVideoDecoder succeed\n");
return 0;
}
#if 0
static int DxResetVideoDecoder(void)
{
- ERR("DxResetVideoDecoder unimplemented\n");
+ LOG_SEVERE("DxResetVideoDecoder unimplemented\n");
return -1;
}
#endif
if (dxva_dev->hd3d9_dll == NULL || dxva_dev->hdxva2_dll == NULL) {
dxva_dev->hd3d9_dll = LoadLibrary(TEXT("D3D9.DLL"));
if (!dxva_dev->hd3d9_dll) {
- ERR("cannot load d3d9.dll\n");
+ LOG_SEVERE("cannot load d3d9.dll\n");
goto error;
}
dxva_dev->hdxva2_dll = LoadLibrary(TEXT("DXVA2.DLL"));
if (!dxva_dev->hdxva2_dll) {
- ERR("cannot load dxva2.dll\n");
+ LOG_SEVERE("cannot load dxva2.dll\n");
goto error;
}
- TRACE("DLLs loaded\n");
+ LOG_TRACE("DLLs loaded\n");
if (D3dCreateDevice() < 0) {
- ERR("Failed to create Direct3D device\n");
+ LOG_SEVERE("Failed to create Direct3D device\n");
goto error;
}
- TRACE("D3dCreateDevice succeed\n");
+ LOG_TRACE("D3dCreateDevice succeed\n");
if (D3dCreateDeviceManager() < 0) {
- ERR("D3dCreateDeviceManager failed\n");
+ LOG_SEVERE("D3dCreateDeviceManager failed\n");
goto error;
}
if (DxCreateVideoService() < 0) {
- ERR("DxCreateVideoService failed\n");
+ LOG_SEVERE("DxCreateVideoService failed\n");
goto error;
}
}
DXVAPluginContext *dxva_ctx = g_malloc0(sizeof(DXVAPluginContext));
if (DxFindVideoServiceConversion(dxva_ctx, dec_ctx->codec_id)) {
- ERR("DxFindVideoServiceConversion failed\n");
+ LOG_SEVERE("DxFindVideoServiceConversion failed\n");
return NULL;
}
dxva_ctx->thread_count = dec_ctx->thread_count;
if (DxCreateVideoDecoder(dxva_ctx, dec_ctx) < -1) {
- ERR("DxCreateVideoDecoder failed\n");
+ LOG_SEVERE("DxCreateVideoDecoder failed\n");
return NULL;
}
if (DxResetVideoDecoder())
return -1;
} else if (FAILED(hr)) {
- ERR("IDirect3DDeviceManager9_TestDevice %u\n", (unsigned)hr);
+ LOG_SEVERE("IDirect3DDeviceManager9_TestDevice %u\n", (unsigned)hr);
return -1;
}
#endif
#else
frame->buf[0] = av_buffer_create(frame->data[0], 0, dxva_release_surface, frame->opaque, 0);
if (!frame->buf[0]) {
- ERR("failed to create AVBufferRef\n");
+ LOG_SEVERE("failed to create AVBufferRef\n");
}
#endif
/* */
D3DLOCKED_RECT lock;
if (FAILED(IDirect3DSurface9_LockRect(d3d, &lock, NULL, D3DLOCK_READONLY))) {
- ERR("Failed to lock surface\n");
+ LOG_SEVERE("Failed to lock surface\n");
return;
}
av_image_fill_pointers(data, PIX_FMT_YUV420P, frame->height, dst, linesizes);
copy_nv12(data, linesizes, plane, pitch, frame->width, frame->height);
} else {
- ERR("Not supported format.(%x)\n", dxva_ctx->render_fmt);
+ LOG_SEVERE("Not supported format.(%x)\n", dxva_ctx->render_fmt);
IDirect3DSurface9_UnlockRect(d3d);
return;
}