如果想要用OpenCV接收IPcam的串流來源,其中一種方法可以透過FFMPEG的Library實現。
首先電腦必須安裝FFMPEG
以下是Sample Code:
#include "stdafx.h"
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
extern "C" {
#include <libavutil/opt.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
#include <libavcodec/avcodec.h>
}
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "swscale.lib")
int _tmain(int argc, _TCHAR* argv[])
{
av_register_all();//註冊muxers/demuxers
avdevice_register_all();
avcodec_register_all();//註冊codec與硬體加速器
avformat_network_init();
const char *filenameSrc = "rtsp://admin:pass@192.168.10.211/axis-media/media.amp"; //Axis
AVCodecContext *pCodecCtx;
AVFormatContext *pFormatCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameRGB;
pFormatCtx = avformat_alloc_context();
if ( pFormatCtx == NULL )
return -8;
//打開檔案,建立AVFormatContext以供後續decode使用
if ( avformat_open_input( &pFormatCtx, filenameSrc, NULL, NULL ) != 0 ) {
std::cout << "Open File Error 12" << std::endl;
return -12;
}
//判斷檔案內是否有stream相關資訊
if ( av_find_stream_info( pFormatCtx ) < 0 ) {
std::cout << "Get Stream Information Error 13" << std::endl;
av_close_input_file( pFormatCtx );
pFormatCtx = NULL;
return -13;
}
av_dump_format( pFormatCtx, 0, filenameSrc, 0 );
int video_stream_index = -1;
//顯示此媒體有幾個stream
for ( int i = 0; i < pFormatCtx->nb_streams; i++ ) {
//判斷此stream是否屬於video
if ( pFormatCtx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO ) {
video_stream_index = i;
break;
}
}
//沒有找到可用的video stream
if ( video_stream_index < 0 ) {
std::cout << "Video stream was not found Error 14" << std::endl;
av_close_input_file( pFormatCtx );
pFormatCtx = NULL;
return -14;
}
//根據codec id找到對應的codec
pCodecCtx = pFormatCtx->streams[video_stream_index]->codec;
pCodec = avcodec_find_decoder( pCodecCtx->codec_id );
if ( pCodec == NULL ) {
std::cout << "codec not found Error 15" << std::endl;
return -15;
}
if (avcodec_open2( pCodecCtx, pCodec, NULL ) < 0) {
std::cout << "Open Codec Error 16" << std::endl;
return -16;
}
//配置一個AVFrame,準備用來放置待會解開的影像資料
pFrame = avcodec_alloc_frame();
pFrameRGB = avcodec_alloc_frame();
AVPixelFormat pFormat = AV_PIX_FMT_RGB24 ;
int numBytes = avpicture_get_size( pFormat, pCodecCtx->width, pCodecCtx->height );
uint8_t *buffer = (uint8_t *)av_malloc( numBytes * sizeof(uint8_t) );
avpicture_fill( (AVPicture *)pFrameRGB, buffer, pFormat, pCodecCtx->width, pCodecCtx->height );
int y_size = pCodecCtx->width * pCodecCtx->height;
AVPacket *packet = (AVPacket *)malloc(sizeof(AVPacket));
av_new_packet( packet, y_size );
int res;
int frameFinished;
while ( res = av_read_frame( pFormatCtx, packet ) >= 0 )
{
if ( packet->stream_index == video_stream_index ) {
avcodec_decode_video2( pCodecCtx, pFrame, &frameFinished, packet );
if (frameFinished) {
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getCachedContext( NULL,
pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width,
pCodecCtx->height,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL,
NULL,
NULL );
sws_scale( img_convert_ctx,
((AVPicture*)pFrame)->data,
((AVPicture*)pFrame)->linesize,
0,
pCodecCtx->height,
((AVPicture *)pFrameRGB)->data,
((AVPicture *)pFrameRGB)->linesize );
cv::Mat img( pFrame->height, pFrame->width, CV_8UC3, pFrameRGB->data[0] );
cv::imshow( "Display" , img );
cv::waitKey( 30 );
av_free_packet( packet );
sws_freeContext( img_convert_ctx );
}
}
}
av_free_packet( packet );
avcodec_close( pCodecCtx );
av_free( pFrame );
av_free( pFrameRGB );
avformat_close_input( &pFormatCtx );
return ( EXIT_SUCCESS );
}