|
#include <QtDebug> |
|
|
|
extern "C" |
|
{ |
|
#include <libavformat/avformat.h> |
|
#include <libavcodec/avcodec.h> |
|
#include <libswscale/swscale.h> |
|
} |
|
|
|
#if HAVE_VDPAU |
|
extern "C" |
|
{ |
|
#include <libavcodec/vdpau.h> |
|
} |
|
|
|
#include <vdpau/vdpau.h> |
|
#include <vdpau/vdpau_x11.h> |
|
#include "vdpau_funcs.h" |
|
#endif |
|
|
|
#include <QMutexLocker> |
|
#include <QVector> |
|
#include <QMutex> |
|
#include <QHash> |
|
|
|
#include <cinttypes> |
|
#include <mutex> |
|
|
|
#include "avdecoder.hpp" |
|
|
|
struct Packet |
|
{ |
|
explicit Packet(AVFormatContext* ctxt = nullptr) |
|
{ |
|
av_init_packet(&packet); |
|
packet.data = nullptr; |
|
packet.size = 0; |
|
if(ctxt) |
|
reset(ctxt); |
|
} |
|
|
|
Packet(Packet&& other) |
|
:packet(std::move(other.packet)) |
|
{ |
|
other.packet.data = nullptr; |
|
other.packet.size = 0; |
|
} |
|
|
|
~Packet() |
|
{ |
|
if (packet.data) |
|
av_free_packet(&packet); |
|
} |
|
|
|
bool reset(AVFormatContext* ctxt) |
|
{ |
|
if(packet.data) |
|
av_free_packet(&packet); |
|
if(av_read_frame(ctxt, &packet) < 0) |
|
{ |
|
packet.data = nullptr; |
|
packet.size = 0; |
|
return false; |
|
} |
|
return true; |
|
} |
|
|
|
void reset() |
|
{ |
|
if(packet.data) |
|
av_free_packet(&packet); |
|
packet.data = nullptr; |
|
packet.size = 0; |
|
} |
|
|
|
AVPacket packet; |
|
}; |
|
|
|
struct AVDecoder_private |
|
{ |
|
bool isOpen; |
|
QString openPath; |
|
|
|
QSharedPointer<AVFormatContext> lavfCtx; |
|
QSharedPointer<AVCodecContext> lavcCtx; |
|
SwsContext* swsCtx; |
|
QSharedPointer<quint8> extradata; |
|
AVStream *vstream; |
|
QSharedPointer<AVFrame> frame; |
|
|
|
Packet curPacket; |
|
qint64 offset; |
|
|
|
quint64 frameNum; |
|
|
|
#if HAVE_VDPAU |
|
QSharedPointer<VDPAUFuncs> vdp; |
|
|
|
QSharedPointer<AVVDPAUContext> hwctx; |
|
|
|
|
|
QMutex vidSurfLock; |
|
QList<QSharedPointer<AVVdpFrame> > videoSurfaces; |
|
|
|
VdpDecoder decoder; |
|
#endif |
|
}; |
|
|
|
QMutex openLock(QMutex::Recursive); |
|
|
|
static int ff_lockmgr(void **mutex, enum AVLockOp op) |
|
{ |
|
QMutex **m = (QMutex**)mutex; |
|
|
|
switch(op) |
|
{ |
|
case AV_LOCK_CREATE: |
|
*m = new QMutex(); |
|
break; |
|
case AV_LOCK_OBTAIN: |
|
(*m)->lock(); |
|
break; |
|
case AV_LOCK_RELEASE: |
|
(*m)->unlock(); |
|
break; |
|
case AV_LOCK_DESTROY: |
|
delete (*m); |
|
break; |
|
} |
|
|
|
return 0; |
|
} |
|
|
|
static void ff_log(void *ptr, int level, const char *fmt, va_list vl) |
|
{ |
|
av_log_default_callback(ptr, level, fmt, vl); |
|
fflush(stderr); |
|
fflush(stdout); |
|
} |
|
|
|
static std::once_flag avFormatInitFlag; |
|
void ff_init_av() |
|
{ |
|
std::call_once(avFormatInitFlag, []() |
|
{ |
|
av_lockmgr_register(&ff_lockmgr); |
|
av_log_set_callback(&ff_log); |
|
|
|
av_register_all(); |
|
avcodec_register_all(); |
|
}); |
|
} |
|
|
|
AVDecoder::AVDecoder(QObject *parent) |
|
:QObject(parent) |
|
{ |
|
p = new AVDecoder_private; |
|
p->isOpen = false; |
|
p->swsCtx = 0; |
|
|
|
ff_init_av(); |
|
|
|
#if HAVE_VDPAU |
|
p->decoder = VDP_INVALID_HANDLE; |
|
p->vdp = VDPAUFuncs::get(); |
|
#endif |
|
|
|
p->frame = QSharedPointer<AVFrame>(av_frame_alloc(), [](AVFrame *f) |
|
{ |
|
av_frame_free(&f); |
|
}); |
|
} |
|
|
|
static void freePrivData(AVDecoder_private *p) |
|
{ |
|
if(p->swsCtx) |
|
sws_freeContext(p->swsCtx); |
|
|
|
#if HAVE_VDPAU |
|
if(p->decoder != VDP_INVALID_HANDLE) |
|
{ |
|
p->vdp->vdpDecoderDestroy(p->decoder); |
|
p->decoder = VDP_INVALID_HANDLE; |
|
} |
|
|
|
p->videoSurfaces.clear(); |
|
#endif |
|
} |
|
|
|
AVDecoder::~AVDecoder() |
|
{ |
|
p->lavcCtx.clear(); |
|
p->lavfCtx.clear(); |
|
freePrivData(p); |
|
delete p; |
|
} |
|
|
|
#if HAVE_VDPAU |
|
void ReadVdpFormatOf(AVCodecID codec, VdpDecoderProfile &vdp_decoder_profile, VdpChromaType &vdp_chroma_type) |
|
{ |
|
switch (codec) |
|
{ |
|
case AV_CODEC_ID_MPEG1VIDEO: |
|
vdp_decoder_profile = VDP_DECODER_PROFILE_MPEG1; |
|
vdp_chroma_type = VDP_CHROMA_TYPE_420; |
|
break; |
|
case AV_CODEC_ID_MPEG2VIDEO: |
|
vdp_decoder_profile = VDP_DECODER_PROFILE_MPEG2_MAIN; |
|
vdp_chroma_type = VDP_CHROMA_TYPE_420; |
|
break; |
|
case AV_CODEC_ID_H264: |
|
vdp_decoder_profile = VDP_DECODER_PROFILE_H264_HIGH; |
|
vdp_chroma_type = VDP_CHROMA_TYPE_420; |
|
break; |
|
case AV_CODEC_ID_WMV3: |
|
vdp_decoder_profile = VDP_DECODER_PROFILE_VC1_MAIN; |
|
vdp_chroma_type = VDP_CHROMA_TYPE_420; |
|
break; |
|
case AV_CODEC_ID_VC1: |
|
vdp_decoder_profile = VDP_DECODER_PROFILE_VC1_ADVANCED; |
|
vdp_chroma_type = VDP_CHROMA_TYPE_420; |
|
break; |
|
case AV_CODEC_ID_MPEG4: |
|
vdp_decoder_profile = VDP_DECODER_PROFILE_MPEG4_PART2_ASP; |
|
vdp_chroma_type = VDP_CHROMA_TYPE_420; |
|
break; |
|
default: |
|
vdp_decoder_profile = 0; |
|
vdp_chroma_type = 0; |
|
break; |
|
} |
|
} |
|
|
|
static void ReleaseBuffer(void *opaque, quint8 *data) |
|
{ |
|
AVDecoder_private *p = (AVDecoder_private*)opaque; |
|
|
|
QMutexLocker lock(&p->vidSurfLock); |
|
|
|
VdpVideoSurface surf = (VdpVideoSurface)(quintptr)data; |
|
|
|
for(QSharedPointer<AVVdpFrame> f: p->videoSurfaces) |
|
{ |
|
if(f->getSurf() == surf) |
|
{ |
|
f->setFFState(0); |
|
return; |
|
} |
|
} |
|
|
|
qDebug() << "Tried to release unknown surface!"; |
|
} |
|
|
|
static int GetBuffer(AVCodecContext *avctx, AVFrame *pic, int flags) |
|
{ |
|
Q_UNUSED(flags); |
|
|
|
AVDecoder_private *p = (AVDecoder_private*)avctx->opaque; |
|
QSharedPointer<VDPAUFuncs> vdp = p->vdp; |
|
|
|
QMutexLocker lock(&p->vidSurfLock); |
|
|
|
QSharedPointer<AVVdpFrame> videoFrame; |
|
for(QSharedPointer<AVVdpFrame> f: p->videoSurfaces) |
|
{ |
|
if(f->getState() == 0 && f->getFFState() == 0) |
|
{ |
|
videoFrame = f; |
|
break; |
|
} |
|
} |
|
|
|
if(videoFrame.isNull()) |
|
{ |
|
VdpDecoderProfile profile; |
|
VdpChromaType chroma; |
|
ReadVdpFormatOf(avctx->codec_id, profile, chroma); |
|
|
|
VdpVideoSurface surf; |
|
|
|
VdpStatus err = p->vdp->vdpVideoSurfaceCreate(p->vdp->getDevice(), |
|
chroma, |
|
avctx->coded_width, |
|
avctx->coded_height, |
|
&surf); |
|
|
|
if(err != VDP_STATUS_OK) |
|
{ |
|
qDebug() << "vdpVideoSurfaceCreate failed:" << p->vdp->vdpGetErrorString(err); |
|
return -1; |
|
} |
|
|
|
videoFrame = QSharedPointer<AVVdpFrame>(new AVVdpFrame(surf), [vdp](AVVdpFrame *f) |
|
{ |
|
vdp->vdpVideoSurfaceDestroy(f->getSurf()); |
|
delete f; |
|
}); |
|
|
|
p->videoSurfaces << videoFrame; |
|
} |
|
|
|
videoFrame->setFFState(1); |
|
|
|
pic->data[1] = pic->data[2] = NULL; |
|
pic->data[0] = (quint8*)(quintptr)videoFrame->getSurf(); |
|
pic->data[3] = (quint8*)(quintptr)videoFrame->getSurf(); |
|
pic->linesize[0] = pic->linesize[1] = pic->linesize[2] = 0; |
|
|
|
AVBufferRef *buf = av_buffer_create(pic->data[3], 0, &ReleaseBuffer, (void*)p, 0); |
|
|
|
if(!buf) |
|
{ |
|
qDebug() << "Error creating AVBuffer"; |
|
return -1; |
|
} |
|
|
|
pic->buf[0] = buf; |
|
pic->reordered_opaque = avctx->reordered_opaque; |
|
|
|
return 0; |
|
} |
|
|
|
static AVPixelFormat GetFormat(AVCodecContext *avctx, const AVPixelFormat *fmt) |
|
{ |
|
if(!have_vdpau || avctx->coded_width == 0 || avctx->coded_height == 0) |
|
return avcodec_default_get_format(avctx, fmt); |
|
|
|
AVDecoder_private *p = (AVDecoder_private*)avctx->opaque; |
|
|
|
const AVPixelFormat *cur = fmt - 1; |
|
|
|
while(*(++cur) != AV_PIX_FMT_NONE) |
|
{ |
|
if(*cur != AV_PIX_FMT_VDPAU) |
|
continue; |
|
|
|
VdpDecoderProfile profile; |
|
VdpChromaType chroma; |
|
|
|
ReadVdpFormatOf(avctx->codec_id, profile, chroma); |
|
|
|
if(!profile) |
|
continue; |
|
|
|
VdpBool is_supported = 0; |
|
quint32 max_level, max_macroblocks, max_width, max_height; |
|
|
|
VdpStatus err = p->vdp->vdpDecoderQueryCapabilities(p->vdp->getDevice(), profile, &is_supported, &max_level, &max_macroblocks, &max_width, &max_height); |
|
if(err != VDP_STATUS_OK) |
|
{ |
|
qDebug() << "vdpDecoderQueryCapabilities failed:" << p->vdp->vdpGetErrorString(err); |
|
continue; |
|
} |
|
|
|
if(max_width < (quint32)avctx->coded_width || max_height < (quint32)avctx->coded_height) |
|
{ |
|
qDebug() << "Frame size too big for VDPAU"; |
|
continue; |
|
} |
|
|
|
if(p->decoder != VDP_INVALID_HANDLE) |
|
{ |
|
p->vdp->vdpDecoderDestroy(p->decoder); |
|
p->decoder = VDP_INVALID_HANDLE; |
|
} |
|
|
|
err = p->vdp->vdpDecoderCreate(p->vdp->getDevice(), profile, avctx->coded_width, avctx->coded_height, 16, &p->decoder); |
|
if(err != VDP_STATUS_OK) |
|
{ |
|
qDebug() << "Creating test decoder failed:" << p->vdp->vdpGetErrorString(err); |
|
continue; |
|
} |
|
|
|
p->hwctx = QSharedPointer<AVVDPAUContext>(av_vdpau_alloc_context(), [](AVVDPAUContext *c) |
|
{ |
|
av_free(c); |
|
}); |
|
|
|
p->hwctx->render = p->vdp->vdpDecoderRender; |
|
p->hwctx->decoder = p->decoder; |
|
|
|
avctx->get_buffer2 = &GetBuffer; |
|
avctx->hwaccel_context = (void*)p->hwctx.data(); |
|
|
|
p->lavcCtx->thread_count = 1; |
|
p->lavcCtx->thread_type = FF_THREAD_SLICE; |
|
|
|
return AV_PIX_FMT_VDPAU; |
|
} |
|
|
|
return avcodec_default_get_format(avctx, fmt); |
|
} |
|
#endif |
|
|
|
bool AVDecoder::open(const QString &path) |
|
{ |
|
QMutexLocker locker(&openLock); |
|
|
|
AVFormatContext *lavfCtx = nullptr; |
|
|
|
p->isOpen = false; |
|
p->lavcCtx.reset(); |
|
p->lavfCtx.reset(); |
|
|
|
freePrivData(p); |
|
|
|
#if HAVE_VDPAU |
|
if(p->vdp.isNull()) |
|
{ |
|
qDebug() << "VDPAU funcs not loaded!"; |
|
return false; |
|
} |
|
#endif |
|
|
|
if(path.isEmpty()) |
|
{ |
|
qDebug() << "Tried to open with empty path"; |
|
return false; |
|
} |
|
|
|
if(avformat_open_input(&lavfCtx, path.toUtf8().constData(), nullptr, nullptr) != 0) |
|
{ |
|
qDebug() << "avformat_open_input failed"; |
|
return false; |
|
} |
|
|
|
p->lavfCtx = QSharedPointer<AVFormatContext>(lavfCtx, &avformat_free_context); |
|
|
|
if(avformat_find_stream_info(lavfCtx, nullptr) < 0) |
|
{ |
|
qDebug() << "avformat_find_stream_info failed"; |
|
return false; |
|
} |
|
|
|
p->vstream = nullptr; |
|
|
|
for(unsigned int i = 0; i < lavfCtx->nb_streams; ++i) |
|
{ |
|
if(lavfCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) |
|
{ |
|
p->vstream = lavfCtx->streams[i]; |
|
break; |
|
} |
|
} |
|
|
|
if(!p->vstream) |
|
{ |
|
p->lavfCtx.reset(); |
|
qDebug() << "No video stream found"; |
|
return false; |
|
} |
|
|
|
AVCodec *codec = avcodec_find_decoder(p->vstream->codec->codec_id); |
|
|
|
if(!codec) |
|
{ |
|
p->lavfCtx.reset(); |
|
qDebug() << "No codec found"; |
|
return false; |
|
} |
|
|
|
p->lavcCtx = QSharedPointer<AVCodecContext>(avcodec_alloc_context3(codec), [](AVCodecContext* c) |
|
{ |
|
avcodec_close(c); |
|
av_free(c); |
|
}); |
|
|
|
p->extradata = QSharedPointer<quint8>((quint8*)av_mallocz(p->vstream->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE), &av_free); |
|
memcpy(p->extradata.data(), p->vstream->codec->extradata, p->vstream->codec->extradata_size); |
|
|
|
p->lavcCtx->extradata = p->extradata.data(); |
|
p->lavcCtx->extradata_size = p->vstream->codec->extradata_size; |
|
|
|
p->lavcCtx->workaround_bugs = FF_BUG_AUTODETECT; |
|
|
|
#if HAVE_VDPAU |
|
p->lavcCtx->opaque = (void*)p; |
|
p->lavcCtx->get_format = &GetFormat; |
|
#endif |
|
|
|
if(avcodec_open2(p->lavcCtx.data(), codec, nullptr) < 0) |
|
{ |
|
p->lavcCtx.reset(); |
|
p->lavfCtx.reset(); |
|
qDebug() << "avcodec_open2 failed"; |
|
return false; |
|
} |
|
|
|
qDebug() << "Successfully opened" << path; |
|
|
|
p->offset = 0; |
|
p->curPacket.reset(); |
|
p->isOpen = true; |
|
p->openPath = path; |
|
p->frameNum = 0; |
|
return true; |
|
} |
|
|
|
void AVDecoder::rewind() |
|
{ |
|
if(!p->isOpen) |
|
return; |
|
|
|
int res = av_seek_frame(p->lavfCtx.data(), p->vstream->index, 0, AVSEEK_FLAG_BACKWARD); |
|
|
|
if(res < 0) |
|
res = av_seek_frame(p->lavfCtx.data(), p->vstream->index, 0, AVSEEK_FLAG_ANY); |
|
|
|
if(res < 0) |
|
open(p->openPath); |
|
else |
|
{ |
|
avcodec_flush_buffers(p->lavcCtx.data()); |
|
|
|
#if HAVE_VDPAU |
|
if(p->decoder != VDP_INVALID_HANDLE) |
|
{ |
|
VdpDecoderProfile profile; |
|
quint32 width, height; |
|
VdpStatus err = p->vdp->vdpDecoderGetParameters(p->decoder, &profile, &width, &height); |
|
|
|
if(err != VDP_STATUS_OK) |
|
{ |
|
qDebug() << "Failed getting decoder parameters:" << p->vdp->vdpGetErrorString(err); |
|
} |
|
else |
|
{ |
|
p->vdp->vdpDecoderDestroy(p->decoder); |
|
p->decoder = VDP_INVALID_HANDLE; |
|
|
|
err = p->vdp->vdpDecoderCreate(p->vdp->getDevice(), profile, width, height, 16, &p->decoder); |
|
if(err != VDP_STATUS_OK) |
|
{ |
|
qDebug() << "Failed creating new decoder:" << p->vdp->vdpGetErrorString(err); |
|
} |
|
} |
|
} |
|
#endif |
|
|
|
p->curPacket.reset(); |
|
p->offset = 0; |
|
} |
|
|
|
emit rewindDone(); |
|
} |
|
|
|
bool AVDecoder::decodeFrame() |
|
{ |
|
if(!p->isOpen) |
|
{ |
|
qDebug() << "Tried to decodeFrame without open decoder"; |
|
return false; |
|
} |
|
|
|
bool ok = true; |
|
|
|
if(p->offset >= p->curPacket.packet.size) |
|
{ |
|
p->offset = 0; |
|
ok = p->curPacket.reset(p->lavfCtx.data()); |
|
|
|
while(ok && p->curPacket.packet.stream_index != p->vstream->index) |
|
{ |
|
ok = p->curPacket.reset(p->lavfCtx.data()); |
|
} |
|
} |
|
|
|
AVPacket packetToSend; |
|
av_init_packet(&packetToSend); |
|
|
|
if(ok) |
|
{ |
|
packetToSend.data = p->curPacket.packet.data + p->offset; |
|
packetToSend.size = p->curPacket.packet.size - p->offset; |
|
} |
|
else |
|
{ |
|
packetToSend.data = nullptr; |
|
packetToSend.size = 0; |
|
} |
|
|
|
int gotFrame = 0; |
|
int procLen = avcodec_decode_video2(p->lavcCtx.data(), p->frame.data(), &gotFrame, &packetToSend); |
|
|
|
if(procLen < 0) |
|
{ |
|
qDebug() << "Error decoding frame"; |
|
emit decodeError("avcodec_decode_video2 failed"); |
|
return false; |
|
} |
|
|
|
p->offset += procLen; |
|
|
|
if(!ok && !gotFrame) |
|
{ |
|
emit decodeDone(); |
|
} |
|
#if HAVE_VDPAU |
|
else if(gotFrame && p->frame.data()->format == AV_PIX_FMT_VDPAU) |
|
{ |
|
VdpVideoSurface surf = (VdpVideoSurface)(quintptr)p->frame->data[3]; |
|
|
|
QSharedPointer<AVVdpFrame> res; |
|
for(QSharedPointer<AVVdpFrame> f: p->videoSurfaces) |
|
{ |
|
if(f->getSurf() == surf) |
|
{ |
|
res = f; |
|
break; |
|
} |
|
} |
|
|
|
if(res.isNull()) |
|
{ |
|
qDebug() << "Decoded surface in unknown!"; |
|
return false; |
|
} |
|
|
|
res->setState(1); |
|
|
|
emit sendingDecodedFrame(); |
|
emit decodedFrameVA(QSharedPointer<AVVdpFrameHolder>(new AVVdpFrameHolder(res)), |
|
p->lavcCtx->coded_width, |
|
p->lavcCtx->coded_height, |
|
p->lavcCtx->width, |
|
p->lavcCtx->height); |
|
|
|
p->frameNum += 1; |
|
} |
|
#endif |
|
else if(gotFrame) |
|
{ |
|
p->swsCtx = sws_getCachedContext(p->swsCtx, |
|
p->frame->width, |
|
p->frame->height, |
|
(AVPixelFormat)p->frame->format, |
|
p->frame->width, |
|
p->frame->height, |
|
AV_PIX_FMT_RGB24, |
|
SWS_BILINEAR, |
|
nullptr, |
|
nullptr, |
|
nullptr); |
|
|
|
if(!p->swsCtx) |
|
{ |
|
qDebug() << "Failed getting sws context!"; |
|
emit decodeError("sws_getCachedContext failed"); |
|
return false; |
|
} |
|
|
|
QByteArray dst(p->frame->width * p->frame->height * 3, '\0'); |
|
char *datas[] = { dst.data(), nullptr }; |
|
int strides[] = { p->frame->width * 3, 0 }; |
|
|
|
int res = sws_scale(p->swsCtx, |
|
p->frame->data, |
|
p->frame->linesize, |
|
0, |
|
p->frame->height, |
|
(uint8_t**)datas, |
|
strides); |
|
|
|
if(res != p->frame->height) |
|
{ |
|
qDebug() << "Scaling frame failed"; |
|
emit decodeError("sws_scale failed"); |
|
return false; |
|
} |
|
|
|
emit sendingDecodedFrame(); |
|
emit decodedFrame(dst, p->frame->width, p->frame->height); |
|
|
|
p->frameNum += 1; |
|
} |
|
else |
|
{ |
|
return decodeFrame(); |
|
} |
|
|
|
return true; |
|
} |