Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
diff --git a/configure b/configure
index 080d93a129..86bdb73dc7 100755
--- a/configure
+++ b/configure
@@ -2681,6 +2681,9 @@ bink_decoder_select="blockdsp hpeldsp"
binkaudio_dct_decoder_select="mdct rdft dct sinewin wma_freqs"
binkaudio_rdft_decoder_select="mdct rdft sinewin wma_freqs"
cavs_decoder_select="blockdsp golomb h264chroma idctdsp qpeldsp videodsp"
+ccaption_raw_608_decoder_select="ccaption_decoder"
clearvideo_decoder_select="idctdsp"
cllc_decoder_select="bswapdsp"
comfortnoise_encoder_select="lpc"
diff --git a/doc/decoders.texi b/doc/decoders.texi
index 0c5a39bc9c..2f98138b3b 100644
--- a/doc/decoders.texi
+++ b/doc/decoders.texi
@@ -252,6 +252,68 @@ Enabled by default.
@end table
+@section cc_dec_raw_608
+
+Closed Caption decoder.
+Decodes "raw 608 byte pairs" closed captions.
+
+@subsection Options
+
+@table @option
+@item real_time
+Specifies if the decoder should output "the current screen"
+after each new character appears (in general, with closed
+captions, one new character can appear per frame).
+Default is false, decoder outputs "the current screen"
+only when a line feed occurs.
+
+@item rollup_override_line_count
+Specifies the number of lines to output (if the closed
+caption is using rollup, which is common).
+Typically the stream itself designates how many lines of
+closed caption material to display on the screen.
+This is typically more than one and means some lines are
+repeated in the output, since output occurs with each carriage
+return.
+Default is "0" (starts with rollup and line count 2,
+then then changes to whatever the closed caption stream
+later specifies).
+If you want a line output (no duplication), set this value to "1".
+
+@end table
+
+@section dvbsub
+@section cc_dec
+
+Closed Caption decoder.
+
+Decodes "608 over 708" closed captions.
+
+@subsection Options
+
+@table @option
+@item real_time
+Specifies if the decoder should output "the current screen"
+after each new character appears (in general, with closed
+captions, one new character can appear per frame).
+Default is false, decoder outputs "the current screen"
+only when a line feed occurs.
+
+@item rollup_override_line_count
+Specifies the number of lines to output (if the closed
+caption is using rollup, which is common).
+Typically the stream itself designates how many lines of
+closed caption material to display on the screen.
+This is typically more than one and means some lines are
+repeated in the output, since output occurs with each carriage
+return.
+Default is "0" (starts with rollup and line count 2,
+then then changes to whatever the closed caption stream
+later specifies).
+If you want a line output (no duplication), set this value to "1".
+
+@end table
+
@section dvbsub
@subsection Options
diff --git a/doc/indevs.texi b/doc/indevs.texi
index 6f5afaf344..71706133f5 100644
--- a/doc/indevs.texi
+++ b/doc/indevs.texi
@@ -452,8 +452,12 @@ The input name should be in the format:
@var{TYPE}=@var{NAME}[:@var{TYPE}=@var{NAME}]
@end example
-where @var{TYPE} can be either @var{audio} or @var{video},
-and @var{NAME} is the device's name or alternative name..
+where @var{TYPE} can be either @var{audio}, @var{video}, or @var{closed_caption}
+and @var{NAME} is the device's name or alternative name.
+
+@var{closed_caption} devices must advertise format VBI and have an intermediate
+filter available to convert from VBI to raw EIA 608 closed caption
+format byte pairs.
@subsection Options
@@ -615,6 +619,13 @@ Open video device @var{Camera} and audio device @var{Microphone}:
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
@end example
+@item
+Open video device @var{Camera}, closed caption device @var{Camera},
+and audio device @var{Microphone}:
+@example
+$ ffmpeg -f dshow -i video="Camera":audio="Microphone":closed_caption="Camera"
+@end example
+
@item
Print the list of supported options in selected device and exit:
@example
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 28076c2c83..006eb40107 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -246,6 +246,7 @@ OBJS-$(CONFIG_C93_DECODER) += c93.o
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
cavsdata.o
OBJS-$(CONFIG_CCAPTION_DECODER) += ccaption_dec.o ass.o
+OBJS-$(CONFIG_CCAPTION_RAW_608_DECODER) += ccaption_dec.o ass.o
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
OBJS-$(CONFIG_CDTOONS_DECODER) += cdtoons.o
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
@@ -463,7 +464,6 @@ OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o mpeg4audio.o
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
-OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpeg12.o
OBJS-$(CONFIG_MPEG1_CUVID_DECODER) += cuviddec.o
OBJS-$(CONFIG_MPEG1_V4L2M2M_DECODER) += v4l2_m2m_dec.o
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 54d40ebdbc..b78f58cb4c 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -645,6 +645,7 @@ extern AVCodec ff_ssa_decoder;
extern AVCodec ff_ass_encoder;
extern AVCodec ff_ass_decoder;
extern AVCodec ff_ccaption_decoder;
+extern AVCodec ff_ccaption_raw_608_decoder;
extern AVCodec ff_dvbsub_encoder;
extern AVCodec ff_dvbsub_decoder;
extern AVCodec ff_dvdsub_encoder;
diff --git a/libavcodec/ccaption_dec.c b/libavcodec/ccaption_dec.c
index bf3563a0bc..e54d01484c 100644
--- a/libavcodec/ccaption_dec.c
+++ b/libavcodec/ccaption_dec.c
@@ -238,6 +238,7 @@ typedef struct CCaptionSubContext {
AVBPrint buffer;
int buffer_changed;
int rollup;
+ int rollup_override_line_count;
enum cc_mode mode;
int64_t start_time;
/* visible screen time */
@@ -261,7 +262,7 @@ static av_cold int init_decoder(AVCodecContext *avctx)
av_bprint_init(&ctx->buffer, 0, AV_BPRINT_SIZE_UNLIMITED);
/* taking by default roll up to 2 */
ctx->mode = CCMODE_ROLLUP;
- ctx->rollup = 2;
+ ctx->rollup = ctx->rollup_override_line_count || 2;
ctx->cursor_row = 10;
ret = ff_ass_subtitle_header(avctx, "Monospace",
ASS_DEFAULT_FONT_SIZE,
@@ -341,43 +342,53 @@ static void write_char(CCaptionSubContext *ctx, struct Screen *screen, char ch)
}
/**
- * This function after validating parity bit, also remove it from data pair.
- * The first byte doesn't pass parity, we replace it with a solid blank
- * and process the pair.
- * If the second byte doesn't pass parity, it returns INVALIDDATA
- * user can ignore the whole pair and pass the other pair.
+ * This function accepts a byte pair [EIA 608 first byte, EIA 608 second byte]
+ * checks both for parity and strips parity on success.
+ * If the first byte doesn't pass parity, replace it with a solid blank
+ * and process the pair anyway.
+ * Returns failure for parity failure or "no data" (padding bytes).
+ */
+static int validate_eia_608_byte_pair(uint8_t *cc_data_pair) {
+ if (!av_parity(cc_data_pair[1])) {
+ return AVERROR_INVALIDDATA;
+ }
+ if (!av_parity(cc_data_pair[0])) {
+ cc_data_pair[0]=0x7F; // solid blank
+ }
+ if ((cc_data_pair[0] & 0x7F) == 0 && (cc_data_pair[1] & 0x7F) == 0) {
+ return AVERROR_INVALIDDATA; // padding bytes
+ }
+ /* remove parity bit */
+ cc_data_pair[0] &= 0x7F;
+ cc_data_pair[1] &= 0x7F;
+ return 0;
+}
+
+/**
+ * This function accepts "cc_data_pair" = [708 header byte, EIA 608 first byte, EIA 608 second byte]
+ * This function after validating parity bits, also removes parity bits them from 608 data pair.
*/
static int validate_cc_data_pair(uint8_t *cc_data_pair)
{
+ int ret;
uint8_t cc_valid = (*cc_data_pair & 4) >>2;
uint8_t cc_type = *cc_data_pair & 3;
if (!cc_valid)
return AVERROR_INVALIDDATA;
- // if EIA-608 data then verify parity.
- if (cc_type==0 || cc_type==1) {
- if (!av_parity(cc_data_pair[2])) {
- return AVERROR_INVALIDDATA;
- }
- if (!av_parity(cc_data_pair[1])) {
- cc_data_pair[1]=0x7F;
- }
- }
-
- //Skip non-data
- if ((cc_data_pair[0] == 0xFA || cc_data_pair[0] == 0xFC || cc_data_pair[0] == 0xFD)
- && (cc_data_pair[1] & 0x7F) == 0 && (cc_data_pair[2] & 0x7F) == 0)
- return AVERROR_PATCHWELCOME;
-
- //skip 708 data
+ // skip 708 data, we only support "608 over 708" not native 708
if (cc_type == 3 || cc_type == 2)
+ {
return AVERROR_PATCHWELCOME;
+ }
- /* remove parity bit */
- cc_data_pair[1] &= 0x7F;
- cc_data_pair[2] &= 0x7F;
-
+ // Must be EIA-608 data, verify parity.
+ if (cc_type==0 || cc_type==1) {
+ if (ret = validate_eia_608_byte_pair(cc_data_pair + 1)) {
+ return ret;
+ }
+ }
return 0;
}
@@ -687,7 +698,8 @@ static void process_cc608(CCaptionSubContext *ctx, int64_t pts, uint8_t hi, uint
case 0x25:
case 0x26:
case 0x27:
- ctx->rollup = lo - 0x23;
+ ctx->rollup = ctx->rollup_override_line_count || (lo - 0x23);
+ av_log(ctx, AV_LOG_DEBUG, "setting rollup to %d\n", ctx->rollup);
ctx->mode = CCMODE_ROLLUP;
break;
case 0x29:
@@ -756,6 +768,8 @@ static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avp
int len = avpkt->size;
int ret = 0;
int i;
+ int stride;
+ int raw_608 = avctx->codec_id == AV_CODEC_ID_EIA_608_RAW_BYTE_PAIRS;
av_fast_padded_malloc(&ctx->pktbuf, &ctx->pktbuf_size, len);
if (!ctx->pktbuf) {
@@ -764,16 +778,27 @@ static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avp
}
memcpy(ctx->pktbuf, avpkt->data, len);
bptr = ctx->pktbuf;
-
- for (i = 0; i < len; i += 3) {
- uint8_t cc_type = *(bptr + i) & 3;
- if (validate_cc_data_pair(bptr + i))
- continue;
- /* ignoring data field 1 */
- if(cc_type == 1)
- continue;
- else
- process_cc608(ctx, start_time, *(bptr + i + 1) & 0x7f, *(bptr + i + 2) & 0x7f);
+ if (raw_608) {
+ stride = 2; // expect 2 byte "per packet"
+ }
+ for (i = 0; i < len; i += stride) {
+ if (raw_608) {
+ if (validate_eia_608_byte_pair(bptr)) {
+ continue;
+ }
+ process_cc608(ctx, start_time, *(bptr + i), *(bptr + i + 1));
+ } else {
+ // look for 608 over 708 bytes
+ uint8_t cc_type = *(bptr + i) & 3;
+ if (validate_cc_data_pair(bptr + i))
+ continue;
+ /* ignore NTSC_CC_FIELD_2 (cc_type 1) for now */
+ if (cc_type == 1)
+ continue;
+ else {
+ process_cc608(ctx, start_time, *(bptr + i + 1), *(bptr + i + 2));
+ }
+ }
if (!ctx->buffer_changed)
continue;
@@ -781,7 +806,7 @@ static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avp
if (*ctx->buffer.str || ctx->real_time)
{
- ff_dlog(ctx, "cdp writing data (%s)\n",ctx->buffer.str);
+ ff_dlog(ctx, "writing data (%s)\n",ctx->buffer.str);
ret = ff_ass_add_rect(sub, ctx->buffer.str, ctx->readorder++, 0, NULL, NULL);
if (ret < 0)
return ret;
@@ -819,6 +844,7 @@ static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avp
#define SD AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "real_time", "emit subtitle events as they are decoded for real-time display", OFFSET(real_time), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, SD },
+ { "rollup_override_line_count", "hard code number of rollup lines [overrides any count specified by the captions themselves]", OFFSET(rollup_override_line_count), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, SD },
{NULL}
};
@@ -829,9 +855,16 @@ static const AVClass ccaption_dec_class = {
.version = LIBAVUTIL_VERSION_INT,
};
+static const AVClass ccaption_raw_608_dec_class = {
+ .class_name = "Closed caption Decoder Raw 608",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
AVCodec ff_ccaption_decoder = {
.name = "cc_dec",
- .long_name = NULL_IF_CONFIG_SMALL("Closed Caption (EIA-608 / CEA-708)"),
+ .long_name = NULL_IF_CONFIG_SMALL("Closed Caption (EIA-608 over CEA-708)"),
.type = AVMEDIA_TYPE_SUBTITLE,
.id = AV_CODEC_ID_EIA_608,
.priv_data_size = sizeof(CCaptionSubContext),
@@ -841,3 +874,16 @@ AVCodec ff_ccaption_decoder = {
.decode = decode,
.priv_class = &ccaption_dec_class,
};
+
+AVCodec ff_ccaption_raw_608_decoder = {
+ .name = "cc_raw_608_dec",
+ .long_name = NULL_IF_CONFIG_SMALL("Closed Caption (EIA-608 raw byte pairs)"),
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .id = AV_CODEC_ID_EIA_608_RAW_BYTE_PAIRS,
+ .priv_data_size = sizeof(CCaptionSubContext),
+ .init = init_decoder,
+ .close = close_decoder,
+ .flush = flush_decoder,
+ .decode = decode,
+ .priv_class = &ccaption_raw_608_dec_class,
+};
diff --git a/libavcodec/codec_desc.c b/libavcodec/codec_desc.c
index 93433b5a27..c706a5ba08 100644
--- a/libavcodec/codec_desc.c
+++ b/libavcodec/codec_desc.c
@@ -3173,6 +3173,13 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("MicroDVD subtitle"),
.props = AV_CODEC_PROP_TEXT_SUB,
},
+ {
+ .id = AV_CODEC_ID_EIA_608_RAW_BYTE_PAIRS,
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .name = "eia_608_raw_byte_pairs",
+ .long_name = NULL_IF_CONFIG_SMALL("EIA-608 closed captions raw byte pairs"),
+ .props = AV_CODEC_PROP_TEXT_SUB,
+ },
{
.id = AV_CODEC_ID_EIA_608,
.type = AVMEDIA_TYPE_SUBTITLE,
diff --git a/libavcodec/codec_id.h b/libavcodec/codec_id.h
index e7d6e059db..805e18758b 100644
--- a/libavcodec/codec_id.h
+++ b/libavcodec/codec_id.h
@@ -513,6 +513,7 @@ enum AVCodecID {
AV_CODEC_ID_MICRODVD = 0x17800,
AV_CODEC_ID_EIA_608,
+ AV_CODEC_ID_EIA_608_RAW_BYTE_PAIRS,
AV_CODEC_ID_JACOSUB,
AV_CODEC_ID_SAMI,
AV_CODEC_ID_REALTEXT,
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 3de16c884c..fc3aababc8 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -28,8 +28,8 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 58
-#define LIBAVCODEC_VERSION_MINOR 82
-#define LIBAVCODEC_VERSION_MICRO 100
+#define LIBAVCODEC_VERSION_MINOR 83
+#define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \
diff --git a/libavdevice/dshow.c b/libavdevice/dshow.c
index d7f5bd7069..61b58b2047 100644
--- a/libavdevice/dshow.c
+++ b/libavdevice/dshow.c
@@ -90,24 +90,34 @@ dshow_read_close(AVFormatContext *s)
libAVPin_Release(ctx->capture_pin[VideoDevice]);
if (ctx->capture_pin[AudioDevice])
libAVPin_Release(ctx->capture_pin[AudioDevice]);
+ if (ctx->capture_pin[ClosedCaptionDevice])
+ libAVPin_Release(ctx->capture_pin[ClosedCaptionDevice]);
if (ctx->capture_filter[VideoDevice])
libAVFilter_Release(ctx->capture_filter[VideoDevice]);
if (ctx->capture_filter[AudioDevice])
libAVFilter_Release(ctx->capture_filter[AudioDevice]);
+ if (ctx->capture_filter[ClosedCaptionDevice])
+ libAVFilter_Release(ctx->capture_filter[ClosedCaptionDevice]);
if (ctx->device_pin[VideoDevice])
IPin_Release(ctx->device_pin[VideoDevice]);
if (ctx->device_pin[AudioDevice])
IPin_Release(ctx->device_pin[AudioDevice]);
+ if (ctx->device_pin[ClosedCaptionDevice])
+ IPin_Release(ctx->device_pin[ClosedCaptionDevice]);
if (ctx->device_filter[VideoDevice])
IBaseFilter_Release(ctx->device_filter[VideoDevice]);
if (ctx->device_filter[AudioDevice])
IBaseFilter_Release(ctx->device_filter[AudioDevice]);
+ if (ctx->device_filter[ClosedCaptionDevice])
+ IBaseFilter_Release(ctx->device_filter[ClosedCaptionDevice]);
av_freep(&ctx->device_name[0]);
av_freep(&ctx->device_name[1]);
+ av_freep(&ctx->device_name[2]);
av_freep(&ctx->device_unique_name[0]);
av_freep(&ctx->device_unique_name[1]);
+ av_freep(&ctx->device_unique_name[2]);
if(ctx->mutex)
CloseHandle(ctx->mutex);
@@ -219,8 +229,8 @@ dshow_cycle_devices(AVFormatContext *avctx, ICreateDevEnum *devenum,
const GUID *device_guid[2] = { &CLSID_VideoInputDeviceCategory,
&CLSID_AudioInputDeviceCategory };
- const char *devtypename = (devtype == VideoDevice) ? "video" : "audio only";
- const char *sourcetypename = (sourcetype == VideoSourceDevice) ? "video" : "audio";
+ const char *devtypename = (devtype == VideoDevice) ? "video" : (devtype == AudioDevice) ? "audio" : "VBI";
+ const char *sourcetypename = (sourcetype == VideoSourceDevice) ? "video" : "audio only";
r = ICreateDevEnum_CreateClassEnumerator(devenum, device_guid[sourcetype],
(IEnumMoniker **) &classenum, 0);
@@ -569,9 +579,9 @@ dshow_cycle_pins(AVFormatContext *avctx, enum dshowDeviceType devtype,
IPin *pin;
int r;
- const GUID *mediatype[2] = { &MEDIATYPE_Video, &MEDIATYPE_Audio };
- const char *devtypename = (devtype == VideoDevice) ? "video" : "audio only";
- const char *sourcetypename = (sourcetype == VideoSourceDevice) ? "video" : "audio";
+ const GUID *mediatype[3] = { &MEDIATYPE_Video, &MEDIATYPE_Audio, &MEDIATYPE_VBI };
+ const char *devtypename = (devtype == VideoDevice) ? "video" : (devtype == AudioDevice) ? "audio" : "VBI";
+ const char *sourcetypename = (sourcetype == VideoSourceDevice) ? "video" : "audio only";
int set_format = (devtype == VideoDevice && (ctx->framerate ||
(ctx->requested_width && ctx->requested_height) ||
@@ -617,7 +627,7 @@ dshow_cycle_pins(AVFormatContext *avctx, enum dshowDeviceType devtype,
if (IKsPropertySet_Get(p, &AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY,
NULL, 0, &category, sizeof(GUID), &r2) != S_OK)
goto next;
- if (!IsEqualGUID(&category, &PIN_CATEGORY_CAPTURE))
+ if (!IsEqualGUID(&category, &PIN_CATEGORY_CAPTURE) && !IsEqualGUID(&category, &PIN_CATEGORY_VBI))
goto next;
name_buf = dup_wchar_to_utf8(info.achName);
@@ -739,10 +749,9 @@ dshow_open_device(AVFormatContext *avctx, ICreateDevEnum *devenum,
IStream *ifile_stream = NULL;
IStream *ofile_stream = NULL;
IPersistStream *pers_stream = NULL;
- enum dshowDeviceType otherDevType = (devtype == VideoDevice) ? AudioDevice : VideoDevice;
-
- const wchar_t *filter_name[2] = { L"Audio capture filter", L"Video capture filter" };
+ enum dshowDeviceType all_device_types[3] = {VideoDevice, AudioDevice, ClosedCaptionDevice};
+ const wchar_t *filter_name[3] = { L"Audio ffmpeg capture filter", L"Video ffmpeg capture filter", L"VBI ffmpeg capture filter" };
if ( ((ctx->audio_filter_load_file) && (strlen(ctx->audio_filter_load_file)>0) && (sourcetype == AudioSourceDevice)) ||
((ctx->video_filter_load_file) && (strlen(ctx->video_filter_load_file)>0) && (sourcetype == VideoSourceDevice)) ) {
@@ -778,17 +787,24 @@ dshow_open_device(AVFormatContext *avctx, ICreateDevEnum *devenum,
goto error;
}
}
- if (ctx->device_filter[otherDevType]) {
+ for(int i = 0; i < sizeof(all_device_types) / sizeof(all_device_types[0]); i++) {
+ enum dshowDeviceType candidate = all_device_types[i];
+ if (candidate == devtype) {
+ continue; // ourself hasn't added one yet, skip
+ }
+ if (ctx->device_filter[candidate]) {
// avoid adding add two instances of the same device to the graph, one for video, one for audio
// a few devices don't support this (could also do this check earlier to avoid double crossbars, etc. but they seem OK)
- if (strcmp(device_filter_unique_name, ctx->device_unique_name[otherDevType]) == 0) {
+ if (strcmp(device_filter_unique_name, ctx->device_unique_name[candidate]) == 0) {
av_log(avctx, AV_LOG_DEBUG, "reusing previous graph capture filter... %s\n", device_filter_unique_name);
IBaseFilter_Release(device_filter);
- device_filter = ctx->device_filter[otherDevType];
- IBaseFilter_AddRef(ctx->device_filter[otherDevType]);
+ device_filter = ctx->device_filter[candidate];
+ IBaseFilter_AddRef(ctx->device_filter[candidate]);
} else {
- av_log(avctx, AV_LOG_DEBUG, "not reusing previous graph capture filter %s != %s\n", device_filter_unique_name, ctx->device_unique_name[otherDevType]);
+ av_log(avctx, AV_LOG_DEBUG, "not reusing previous graph capture filter %s != %s\n", device_filter_unique_name, ctx->device_unique_name[candidate]);
}
+ break;
+ }
}
ctx->device_filter [devtype] = device_filter;
@@ -1011,7 +1027,7 @@ dshow_add_device(AVFormatContext *avctx,
}
}
}
- } else {
+ } else if (devtype == AudioDevice) {
WAVEFORMATEX *fx = NULL;
if (IsEqualGUID(&type.formattype, &FORMAT_WaveFormatEx)) {
@@ -1027,6 +1043,10 @@ dshow_add_device(AVFormatContext *avctx,
par->codec_id = waveform_codec_id(par->format);
par->sample_rate = fx->nSamplesPerSec;
par->channels = fx->nChannels;
+ } else {
+ // closed captions
+ par->codec_type = AVMEDIA_TYPE_SUBTITLE;
+ par->codec_id = AV_CODEC_ID_EIA_608_RAW_BYTE_PAIRS;
}
avpriv_set_pts_info(st, 64, 1, 10000000);
@@ -1052,24 +1072,29 @@ static int parse_device_name(AVFormatContext *avctx)
char *token = strtok(NULL, ":");
tmp = NULL;
- if (!strcmp(type, "video")) {
+ if (!strcmp(type, "video")) {
device_name[0] = token;
} else if (!strcmp(type, "audio")) {
device_name[1] = token;
+ } else if (!strcmp(type, "closed_caption")) {
+ device_name[2] = token;
} else {
device_name[0] = NULL;
device_name[1] = NULL;
+ device_name[2] = NULL;
break;
}
}
- if (!device_name[0] && !device_name[1]) {
+ if (!device_name[0] && !device_name[1] && !device_name[2]) {
ret = 0;
} else {
if (device_name[0])
device_name[0] = av_strdup(device_name[0]);
if (device_name[1])
device_name[1] = av_strdup(device_name[1]);
+ if (device_name[2])
+ device_name[2] = av_strdup(device_name[2]);
}
av_free(name);
@@ -1129,9 +1154,9 @@ static int dshow_read_header(AVFormatContext *avctx)
}
if (ctx->list_devices) {
- av_log(avctx, AV_LOG_INFO, "DirectShow video devices (some may be both video and audio devices)\n");
+ av_log(avctx, AV_LOG_INFO, "DirectShow video devices (some may be both video and audio or VBI devices)\n"); // VBI seems to only be on video devices
dshow_cycle_devices(avctx, devenum, VideoDevice, VideoSourceDevice, NULL, NULL);
- av_log(avctx, AV_LOG_INFO, "DirectShow audio devices\n");
+ av_log(avctx, AV_LOG_INFO, "DirectShow audio [only] devices\n");
dshow_cycle_devices(avctx, devenum, AudioDevice, AudioSourceDevice, NULL, NULL);
ret = AVERROR_EXIT;
goto error;
@@ -1151,6 +1176,11 @@ static int dshow_read_header(AVFormatContext *avctx)
}
}
}
+ if (ctx->device_name[ClosedCaptionDevice])
+ if ((r = dshow_list_device_options(avctx, devenum, ClosedCaptionDevice, VideoSourceDevice))) {
+ ret = r;
+ goto error;
+ }
}
if (ctx->device_name[VideoDevice]) {
if ((r = dshow_open_device(avctx, devenum, VideoDevice, VideoSourceDevice)) < 0 ||
@@ -1171,6 +1201,13 @@ static int dshow_read_header(AVFormatContext *avctx)
}
}
}
+ if (ctx->device_name[ClosedCaptionDevice]) {
+ if ((r = dshow_open_device(avctx, devenum, ClosedCaptionDevice, VideoSourceDevice)) < 0 ||
+ (r = dshow_add_device(avctx, ClosedCaptionDevice)) < 0) {
+ ret = r;
+ goto error;
+ }
+ }
if (ctx->list_options) {
/* allow it to list crossbar options in dshow_open_device */
ret = AVERROR_EXIT;
@@ -1285,7 +1322,9 @@ static int dshow_read_packet(AVFormatContext *s, AVPacket *pkt)
}
}
}
-
+ if (pkt) {
+ av_log(ctx, AV_LOG_DEBUG, "dshow passing to ffmpeg pipeline packet of %8d ", pkt->size);
+ }
return ctx->eof ? AVERROR(EIO) : pkt->size;
}
diff --git a/libavdevice/dshow_capture.h b/libavdevice/dshow_capture.h
index 475d62ba99..d4aa5f809e 100644
--- a/libavdevice/dshow_capture.h
+++ b/libavdevice/dshow_capture.h
@@ -61,8 +61,10 @@ struct GUIDoffset {
enum dshowDeviceType {
VideoDevice = 0,
AudioDevice = 1,
+ ClosedCaptionDevice = 2,
};
+// We sometimes want "audio from a video source device" so differentiate this way:
enum dshowSourceFilterType {
VideoSourceDevice = 0,
AudioSourceDevice = 1,
@@ -288,8 +290,8 @@ struct dshow_ctx {
IGraphBuilder *graph;
- char *device_name[2];
- char *device_unique_name[2];
+ char *device_name[3];
+ char *device_unique_name[3];
int video_device_number;
int audio_device_number;
@@ -312,10 +314,10 @@ struct dshow_ctx {
char *video_filter_load_file;
char *video_filter_save_file;
- IBaseFilter *device_filter[2];
- IPin *device_pin[2];
- libAVFilter *capture_filter[2];
- libAVPin *capture_pin[2];
+ IBaseFilter *device_filter[3];
+ IPin *device_pin[3];
+ libAVFilter *capture_filter[3];
+ libAVPin *capture_pin[3];
HANDLE mutex;
HANDLE event[2]; /* event[0] is set by DirectShow
@@ -324,7 +326,7 @@ struct dshow_ctx {
int eof;
- int64_t curbufsize[2];
+ int64_t curbufsize[3];
unsigned int video_frame_num;
IMediaControl *control;
diff --git a/libavdevice/dshow_pin.c b/libavdevice/dshow_pin.c
index 53b1c9150d..f1bf93935a 100644
--- a/libavdevice/dshow_pin.c
+++ b/libavdevice/dshow_pin.c
@@ -52,9 +52,15 @@ libAVPin_ReceiveConnection(libAVPin *this, IPin *pin,
if (devtype == VideoDevice) {
if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Video))
return VFW_E_TYPE_NOT_ACCEPTED;
- } else {
+ } else if (devtype == AudioDevice) {
if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Audio))
return VFW_E_TYPE_NOT_ACCEPTED;
+ } else {
+ if (IsEqualGUID(&type->majortype, &MEDIATYPE_AUXLine21Data) && IsEqualGUID(&type->subtype, &MEDIASUBTYPE_Line21_BytePair )) {
+ dshowdebug("accepting VBI RAW 608 input\n");
+ } else {
+ return VFW_E_TYPE_NOT_ACCEPTED;
+ }
}
IPin_AddRef(pin);
@@ -322,7 +328,7 @@ libAVMemInputPin_Receive(libAVMemInputPin *this, IMediaSample *sample)
int64_t curtime;
int64_t orig_curtime;
int64_t graphtime;
- const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
+ const char *devtypename = (devtype == VideoDevice) ? "video" : (devtype == AudioDevice) ? "audio" : "VBI";
IReferenceClock *clock = pin->filter->clock;
int64_t dummy;
struct dshow_ctx *ctx;
@@ -359,7 +365,7 @@ libAVMemInputPin_Receive(libAVMemInputPin *this, IMediaSample *sample)
ctx = s->priv_data;
index = pin->filter->stream_index;
- av_log(NULL, AV_LOG_VERBOSE, "dshow passing through packet of type %s size %8d "
+ av_log(NULL, AV_LOG_VERBOSE, "dshow captured packet of type %s size %8d "
"timestamp %"PRId64" orig timestamp %"PRId64" graph timestamp %"PRId64" diff %"PRId64" %s\n",
devtypename, buf_size, curtime, orig_curtime, graphtime, graphtime - orig_curtime, ctx->device_name[devtype]);
pin->filter->callback(priv_data, index, buf, buf_size, curtime, devtype);
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.