-
-
Save anonymous/507fc57c9f934c1b64a3 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/Changelog b/Changelog | |
index 20dadcf..7d672fb 100644 | |
--- a/Changelog | |
+++ b/Changelog | |
@@ -4,6 +4,8 @@ releases are sorted from youngest to oldest. | |
version <next>: | |
- DXVA2-accelerated HEVC Main10 decoding | |
- fieldhint filter | |
+- loop video filter and aloop audio filter | |
+- Bob Weaver deinterlacing filter | |
version 3.0: | |
diff --git a/configure b/configure | |
index fb81a19..f2f7c3d 100755 | |
--- a/configure | |
+++ b/configure | |
@@ -579,15 +579,8 @@ enable(){ | |
set_all yes $* | |
} | |
-check_requested() { | |
- for var; do | |
- eval test "x\$${var#!}_requested" = "xyes" && die "${var%%_*} cannot be enabled" | |
- done | |
-} | |
- | |
disable(){ | |
set_all no $* | |
- check_requested $* | |
} | |
enable_weak(){ | |
@@ -638,32 +631,6 @@ enable_deep_weak(){ | |
done | |
} | |
-do_enable_deep_force(){ | |
- for var; do | |
- enabled $var && continue | |
- eval sel="\$${var}_deps\ \$${var}_deps_any\ \$${var}_select\ \$${var}_suggest\ \$${var}_select_any" | |
- pushvar var | |
- enable_deep_force $sel | |
- popvar var | |
- done | |
-} | |
- | |
-enable_deep_force(){ | |
- do_enable_deep_force $* | |
- for var; do | |
- is_in $var $ALL_COMPONENTS $COMPONENT_LIST $LIBRARY_LIST && enable $var | |
- done | |
-} | |
- | |
-request(){ | |
- disable $* # force the refresh of the dependencies | |
- for var; do | |
- enable ${var}_requested | |
- done | |
- enable_deep_force $* | |
- enable $* | |
-} | |
- | |
enabled(){ | |
test "${1#!}" = "$1" && op='=' || op=!= | |
eval test "x\$${1#!}" $op "xyes" | |
@@ -723,7 +690,7 @@ do_check_deps(){ | |
append allopts $cfg | |
eval dep_all="\$${cfg}_deps" | |
- eval dep_any="\$${cfg}_deps_any\ \$${cfg}_select_any" | |
+ eval dep_any="\$${cfg}_deps_any" | |
eval dep_sel="\$${cfg}_select" | |
eval dep_sgs="\$${cfg}_suggest" | |
eval dep_ifa="\$${cfg}_if" | |
@@ -1765,6 +1732,7 @@ BUILTIN_LIST=" | |
mm_empty | |
rdtsc | |
sarestart | |
+ sem_timedwait | |
sync_val_compare_and_swap | |
" | |
HAVE_LIST_CMDLINE=" | |
@@ -1791,6 +1759,7 @@ HEADERS_LIST=" | |
dev_ic_bt8xx_h | |
dev_video_bktr_ioctl_bt848_h | |
dev_video_meteor_ioctl_meteor_h | |
+ dispatch_dispatch_h | |
direct_h | |
dirent_h | |
dlfcn_h | |
@@ -2790,7 +2759,7 @@ gdigrab_indev_deps="CreateDIBSection" | |
gdigrab_indev_extralibs="-lgdi32" | |
gdigrab_indev_select="bmp_decoder" | |
iec61883_indev_deps="libiec61883" | |
-jack_indev_deps="jack_jack_h sem_timedwait" | |
+jack_indev_deps="jack_jack_h" | |
lavfi_indev_deps="avfilter" | |
libcdio_indev_deps="libcdio" | |
libdc1394_indev_deps="libdc1394" | |
@@ -2860,7 +2829,7 @@ tls_schannel_protocol_deps="schannel" | |
tls_schannel_protocol_select="tcp_protocol" | |
tls_securetransport_protocol_deps="securetransport" | |
tls_securetransport_protocol_select="tcp_protocol" | |
-tls_protocol_select_any="tls_schannel_protocol tls_securetransport_protocol tls_gnutls_protocol tls_openssl_protocol" | |
+tls_protocol_deps_any="tls_schannel_protocol tls_securetransport_protocol tls_gnutls_protocol tls_openssl_protocol" | |
udp_protocol_select="network" | |
udplite_protocol_select="network" | |
unix_protocol_deps="sys_un_h" | |
@@ -3047,7 +3016,10 @@ cpu="generic" | |
intrinsics="none" | |
# configurable options | |
+enable $PROGRAM_LIST | |
enable $DOCUMENT_LIST | |
+enable $EXAMPLE_LIST | |
+enable $(filter_out avresample $LIBRARY_LIST) | |
enable stripping | |
enable asm | |
@@ -3168,6 +3140,12 @@ ALL_COMPONENTS=" | |
$PROTOCOL_LIST | |
" | |
+for n in $COMPONENT_LIST; do | |
+ v=$(toupper ${n%s})_LIST | |
+ eval enable \$$v | |
+ eval ${n}_if_any="\$$v" | |
+done | |
+ | |
enable $ARCH_EXT_LIST | |
die_unknown(){ | |
@@ -3234,11 +3212,10 @@ for opt do | |
disable $PROGRAM_LIST | |
;; | |
--disable-everything) | |
- map 'eval disable \${$(toupper ${v%s})_LIST}' $COMPONENT_LIST | |
- enable_deep_force $(filter_out avresample $LIBRARY_LIST) $PROGRAM_LIST | |
+ map 'eval unset \${$(toupper ${v%s})_LIST}' $COMPONENT_LIST | |
;; | |
--disable-all) | |
- map 'eval disable \${$(toupper ${v%s})_LIST}' $COMPONENT_LIST | |
+ map 'eval unset \${$(toupper ${v%s})_LIST}' $COMPONENT_LIST | |
disable $LIBRARY_LIST $PROGRAM_LIST doc | |
enable avutil | |
;; | |
@@ -3255,7 +3232,6 @@ for opt do | |
is_in "${thing}s" $COMPONENT_LIST || die_unknown "$opt" | |
eval list=\$$(toupper $thing)_LIST | |
name=$(echo "${optval}" | sed "s/,/_${thing}|/g")_${thing} | |
- test $action = enable && action="request" | |
list=$(filter "$name" $list) | |
[ "$list" = "" ] && warn "Option $opt did not match anything" | |
$action $list | |
@@ -3263,10 +3239,9 @@ for opt do | |
--enable-?*|--disable-?*) | |
eval $(echo "$opt" | sed 's/--/action=/;s/-/ option=/;s/-/_/g') | |
if is_in $option $COMPONENT_LIST; then | |
- test $action = enable && action="enable_deep_force" | |
+ test $action = disable && action=unset | |
eval $action \$$(toupper ${option%s})_LIST | |
elif is_in $option $CMDLINE_SELECT; then | |
- test $action = enable && action="request" | |
$action $option | |
else | |
die_unknown $opt | |
@@ -3303,15 +3278,11 @@ done | |
disabled logging && logfile=/dev/null | |
-# Enable the default components if not disabled explicitly | |
- | |
-enable_weak $(filter_out avresample $LIBRARY_LIST) $PROGRAM_LIST | |
- | |
# Disable all the library-specific components if the library itself | |
# is disabled, see AVCODEC_LIST and following _LIST variables. | |
disable_components(){ | |
- disabled ${1} && disable_weak $( | |
+ disabled ${1} && disable $( | |
eval components="\$$(toupper ${1})_COMPONENTS" | |
map 'eval echo \${$(toupper ${v%s})_LIST}' $components | |
) | |
@@ -3320,25 +3291,6 @@ disable_components(){ | |
map 'disable_components $v' $LIBRARY_LIST | |
echo "# $0 $FFMPEG_CONFIGURATION" > $logfile | |
- | |
-# Mark components that had not been enabled/disabled explicitly | |
-# as enabled | |
- | |
-for n in $COMPONENT_LIST; do | |
- v=$(toupper ${n%s})_LIST | |
- eval enable_weak \$$v | |
- eval ${n}_if_any="\$$v" | |
-done | |
- | |
-# Make so that disabled libraries are enabled if a component | |
-# of them is requested | |
- | |
-for n in $LIBRARY_LIST; do | |
- v=$(toupper ${n})_COMPONENTS | |
- eval ${n}_if_any="\$$v" | |
-done | |
- | |
-echo "# $0 $FFMPEG_CONFIGURATION" > $logfile | |
set >> $logfile | |
test -n "$valgrind" && toolchain="valgrind-memcheck" | |
@@ -5353,6 +5305,7 @@ check_func mprotect | |
check_func_headers time.h nanosleep || { check_func_headers time.h nanosleep -lrt && add_extralibs -lrt && LIBRT="-lrt"; } | |
check_func sched_getaffinity | |
check_func setrlimit | |
+check_func sem_timedwait | |
check_struct "sys/stat.h" "struct stat" st_mtim.tv_nsec -D_BSD_SOURCE | |
check_func strerror_r | |
check_func sysconf | |
@@ -5380,6 +5333,7 @@ check_func_headers glob.h glob | |
enabled xlib && | |
check_func_headers "X11/Xlib.h X11/extensions/Xvlib.h" XvGetPortAttribute -lXv -lX11 -lXext | |
+check_header dispatch/dispatch.h | |
check_header direct.h | |
check_header dirent.h | |
check_header dlfcn.h | |
@@ -5752,8 +5706,12 @@ check_header soundcard.h | |
enabled_any alsa_indev alsa_outdev && | |
check_lib2 alsa/asoundlib.h snd_pcm_htimestamp -lasound | |
-enabled jack_indev && check_lib2 jack/jack.h jack_client_open -ljack && check_func sem_timedwait && | |
- check_func jack_port_get_latency_range -ljack | |
+if enabled jack_indev; then | |
+ { check_lib2 jack/jack.h jack_client_open -ljack && | |
+ check_func jack_port_get_latency_range -ljack && | |
+ enabled_any sem_timedwait dispatch_dispatch_h; } || | |
+ disable jack_indev | |
+fi | |
enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio | |
diff --git a/doc/APIchanges b/doc/APIchanges | |
index fe6fff5..1194709 100644 | |
--- a/doc/APIchanges | |
+++ b/doc/APIchanges | |
@@ -15,6 +15,9 @@ libavutil: 2015-08-28 | |
API changes, most recent first: | |
+2016-xx-xx - lavu 55.18.100 | |
+ xxxxxxx audio_fifo.h - Add av_audio_fifo_peek_at(). | |
+ | |
2016-xx-xx - lavu 55.18.0 | |
xxxxxxx buffer.h - Add av_buffer_pool_init2(). | |
xxxxxxx hwcontext.h - Add a new installed header hwcontext.h with a new API | |
diff --git a/doc/filters.texi b/doc/filters.texi | |
index f30b926..c533ef7 100644 | |
--- a/doc/filters.texi | |
+++ b/doc/filters.texi | |
@@ -4256,6 +4256,59 @@ tblend=all_mode=difference128 | |
@end example | |
@end itemize | |
+@section bwdif | |
+ | |
+Deinterlace the input video ("bwdif" stands for "Bob Weaver | |
+Deinterlacing Filter"). | |
+ | |
+Motion adaptive deinterlacing based on yadif with the use of w3fdif and cubic | |
+interpolation algorithms. | |
+It accepts the following parameters: | |
+ | |
+@table @option | |
+@item mode | |
+The interlacing mode to adopt. It accepts one of the following values: | |
+ | |
+@table @option | |
+@item 0, send_frame | |
+Output one frame for each frame. | |
+@item 1, send_field | |
+Output one frame for each field. | |
+@end table | |
+ | |
+The default value is @code{send_field}. | |
+ | |
+@item parity | |
+The picture field parity assumed for the input interlaced video. It accepts one | |
+of the following values: | |
+ | |
+@table @option | |
+@item 0, tff | |
+Assume the top field is first. | |
+@item 1, bff | |
+Assume the bottom field is first. | |
+@item -1, auto | |
+Enable automatic detection of field parity. | |
+@end table | |
+ | |
+The default value is @code{auto}. | |
+If the interlacing is unknown or the decoder does not export this information, | |
+top field first will be assumed. | |
+ | |
+@item deint | |
+Specify which frames to deinterlace. Accept one of the following | |
+values: | |
+ | |
+@table @option | |
+@item 0, all | |
+Deinterlace all frames. | |
+@item 1, interlaced | |
+Only deinterlace frames marked as interlaced. | |
+@end table | |
+ | |
+The default value is @code{all}. | |
+@end table | |
+ | |
@section boxblur | |
Apply a boxblur algorithm to the input video. | |
@@ -8185,6 +8238,25 @@ The formula that generates the correction is: | |
where @var{r_0} is halve of the image diagonal and @var{r_src} and @var{r_tgt} are the | |
distances from the focal point in the source and target images, respectively. | |
+@section loop, aloop | |
+ | |
+Loop video frames or audio samples. | |
+ | |
+Those filters accepts the following options: | |
+ | |
+@table @option | |
+@item loop | |
+Set the number of loops. | |
+ | |
+@item size | |
+Set maximal size in number of frames for @code{loop} filter or maximal number | |
+of samples in case of @code{aloop} filter. | |
+ | |
+@item start | |
+Set first frame of loop for @code{loop} filter or first sample of loop in case | |
+of @code{aloop} filter. | |
+@end table | |
+ | |
@anchor{lut3d} | |
@section lut3d | |
diff --git a/doc/formats.texi b/doc/formats.texi | |
index 617cda5..f79ebe2 100644 | |
--- a/doc/formats.texi | |
+++ b/doc/formats.texi | |
@@ -147,7 +147,7 @@ a packet for each stream, regardless of the maximum timestamp | |
difference between the buffered packets. | |
@item use_wallclock_as_timestamps @var{integer} (@emph{input}) | |
-Use wallclock as timestamps. | |
+Use wallclock as timestamps if set to 1. Default is 0. | |
@item avoid_negative_ts @var{integer} (@emph{output}) | |
diff --git a/ffmpeg.c b/ffmpeg.c | |
index a5ec3c3..c20f75a 100644 | |
--- a/ffmpeg.c | |
+++ b/ffmpeg.c | |
@@ -2123,8 +2123,12 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) | |
ist->hwaccel_retrieved_pix_fmt = decoded_frame->format; | |
best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame); | |
- if(best_effort_timestamp != AV_NOPTS_VALUE) | |
- ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q); | |
+ if(best_effort_timestamp != AV_NOPTS_VALUE) { | |
+ int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q); | |
+ | |
+ if (ts != AV_NOPTS_VALUE) | |
+ ist->next_pts = ist->pts = ts; | |
+ } | |
if (debug_ts) { | |
av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video " | |
diff --git a/libavdevice/jack.c b/libavdevice/jack.c | |
index 9ecbf9e..b5b8666 100644 | |
--- a/libavdevice/jack.c | |
+++ b/libavdevice/jack.c | |
@@ -35,6 +35,18 @@ | |
#include "timefilter.h" | |
#include "avdevice.h" | |
+#if HAVE_DISPATCH_DISPATCH_H | |
+ | |
+#include <dispatch/dispatch.h> | |
+#define sem_t dispatch_semaphore_t | |
+#define sem_init(psem,x,val) *psem = dispatch_semaphore_create(val) | |
+#define sem_post(psem) dispatch_semaphore_signal(*psem) | |
+#define sem_wait(psem) dispatch_semaphore_wait(*psem, DISPATCH_TIME_FOREVER) | |
+#define sem_timedwait(psem, val) dispatch_semaphore_wait(*psem, dispatch_walltime(val, 0)) | |
+#define sem_destroy(psem) dispatch_release(*psem) | |
+ | |
+#endif /* HAVE_DISPATCH_DISPATCH_H */ | |
+ | |
/** | |
* Size of the internal FIFO buffers as a number of audio packets | |
*/ | |
diff --git a/libavfilter/Makefile b/libavfilter/Makefile | |
index 9120ecc..10c2e0b 100644 | |
--- a/libavfilter/Makefile | |
+++ b/libavfilter/Makefile | |
@@ -38,6 +38,7 @@ OBJS-$(CONFIG_AGATE_FILTER) += af_agate.o | |
OBJS-$(CONFIG_AINTERLEAVE_FILTER) += f_interleave.o | |
OBJS-$(CONFIG_ALIMITER_FILTER) += af_alimiter.o | |
OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o | |
+OBJS-$(CONFIG_ALOOP_FILTER) += f_loop.o | |
OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o | |
OBJS-$(CONFIG_AMETADATA_FILTER) += f_metadata.o | |
OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o | |
@@ -118,6 +119,7 @@ OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o | |
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o | |
OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o framesync.o | |
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o | |
+OBJS-$(CONFIG_BWDIF_FILTER) += vf_bwdif.o | |
OBJS-$(CONFIG_CHROMAKEY_FILTER) += vf_chromakey.o | |
OBJS-$(CONFIG_CODECVIEW_FILTER) += vf_codecview.o | |
OBJS-$(CONFIG_COLORBALANCE_FILTER) += vf_colorbalance.o | |
@@ -181,6 +183,7 @@ OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o | |
OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o | |
OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o | |
OBJS-$(CONFIG_LENSCORRECTION_FILTER) += vf_lenscorrection.o | |
+OBJS-$(CONFIG_LOOP_FILTER) += f_loop.o | |
OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o | |
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o | |
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o | |
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c | |
index 0fe72d6..ed52649 100644 | |
--- a/libavfilter/allfilters.c | |
+++ b/libavfilter/allfilters.c | |
@@ -58,6 +58,7 @@ void avfilter_register_all(void) | |
REGISTER_FILTER(AINTERLEAVE, ainterleave, af); | |
REGISTER_FILTER(ALIMITER, alimiter, af); | |
REGISTER_FILTER(ALLPASS, allpass, af); | |
+ REGISTER_FILTER(ALOOP, aloop, af); | |
REGISTER_FILTER(AMERGE, amerge, af); | |
REGISTER_FILTER(AMETADATA, ametadata, af); | |
REGISTER_FILTER(AMIX, amix, af); | |
@@ -139,6 +140,7 @@ void avfilter_register_all(void) | |
REGISTER_FILTER(BLACKFRAME, blackframe, vf); | |
REGISTER_FILTER(BLEND, blend, vf); | |
REGISTER_FILTER(BOXBLUR, boxblur, vf); | |
+ REGISTER_FILTER(BWDIF, bwdif, vf); | |
REGISTER_FILTER(CHROMAKEY, chromakey, vf); | |
REGISTER_FILTER(CODECVIEW, codecview, vf); | |
REGISTER_FILTER(COLORBALANCE, colorbalance, vf); | |
@@ -202,6 +204,7 @@ void avfilter_register_all(void) | |
REGISTER_FILTER(INTERLEAVE, interleave, vf); | |
REGISTER_FILTER(KERNDEINT, kerndeint, vf); | |
REGISTER_FILTER(LENSCORRECTION, lenscorrection, vf); | |
+ REGISTER_FILTER(LOOP, loop, vf); | |
REGISTER_FILTER(LUT3D, lut3d, vf); | |
REGISTER_FILTER(LUT, lut, vf); | |
REGISTER_FILTER(LUTRGB, lutrgb, vf); | |
diff --git a/libavfilter/f_loop.c b/libavfilter/f_loop.c | |
new file mode 100644 | |
index 0000000..d8eb692 | |
--- /dev/null | |
+++ b/libavfilter/f_loop.c | |
@@ -0,0 +1,381 @@ | |
+/* | |
+ * Copyright (c) 2016 Paul B Mahol | |
+ * | |
+ * This file is part of FFmpeg. | |
+ * | |
+ * FFmpeg is free software; you can redistribute it and/or | |
+ * modify it under the terms of the GNU Lesser General Public | |
+ * License as published by the Free Software Foundation; either | |
+ * version 2.1 of the License, or (at your option) any later version. | |
+ * | |
+ * FFmpeg is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
+ * Lesser General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU Lesser General Public | |
+ * License along with FFmpeg; if not, write to the Free Software | |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
+ */ | |
+ | |
+#include "libavutil/audio_fifo.h" | |
+#include "libavutil/avassert.h" | |
+#include "libavutil/fifo.h" | |
+#include "libavutil/internal.h" | |
+#include "libavutil/opt.h" | |
+#include "avfilter.h" | |
+#include "audio.h" | |
+#include "formats.h" | |
+#include "internal.h" | |
+#include "video.h" | |
+ | |
+typedef struct LoopContext { | |
+ const AVClass *class; | |
+ | |
+ AVAudioFifo *fifo; | |
+ AVAudioFifo *left; | |
+ AVFrame **frames; | |
+ int nb_frames; | |
+ int current_frame; | |
+ int64_t start_pts; | |
+ int64_t duration; | |
+ int64_t current_sample; | |
+ int64_t nb_samples; | |
+ int64_t ignored_samples; | |
+ | |
+ int loop; | |
+ int64_t size; | |
+ int64_t start; | |
+ int64_t pts; | |
+} LoopContext; | |
+ | |
+#define AFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM | |
+#define VFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM | |
+#define OFFSET(x) offsetof(LoopContext, x) | |
+ | |
+#if CONFIG_ALOOP_FILTER | |
+ | |
+static int aconfig_input(AVFilterLink *inlink) | |
+{ | |
+ AVFilterContext *ctx = inlink->dst; | |
+ LoopContext *s = ctx->priv; | |
+ | |
+ s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, 8192); | |
+ s->left = av_audio_fifo_alloc(inlink->format, inlink->channels, 8192); | |
+ if (!s->fifo || !s->left) | |
+ return AVERROR(ENOMEM); | |
+ | |
+ return 0; | |
+} | |
+ | |
+static av_cold void auninit(AVFilterContext *ctx) | |
+{ | |
+ LoopContext *s = ctx->priv; | |
+ | |
+ av_audio_fifo_free(s->fifo); | |
+ av_audio_fifo_free(s->left); | |
+} | |
+ | |
+static int push_samples(AVFilterContext *ctx, int nb_samples) | |
+{ | |
+ AVFilterLink *outlink = ctx->outputs[0]; | |
+ LoopContext *s = ctx->priv; | |
+ AVFrame *out; | |
+ int ret, i = 0; | |
+ | |
+ while (s->loop != 0 && i < nb_samples) { | |
+ out = ff_get_audio_buffer(outlink, FFMIN(nb_samples, s->nb_samples - s->current_sample)); | |
+ if (!out) | |
+ return AVERROR(ENOMEM); | |
+ ret = av_audio_fifo_peek_at(s->fifo, (void **)out->extended_data, out->nb_samples, s->current_sample); | |
+ if (ret < 0) | |
+ return ret; | |
+ out->pts = s->pts; | |
+ out->nb_samples = ret; | |
+ s->pts += out->nb_samples; | |
+ i += out->nb_samples; | |
+ s->current_sample += out->nb_samples; | |
+ | |
+ ret = ff_filter_frame(outlink, out); | |
+ if (ret < 0) | |
+ return ret; | |
+ | |
+ if (s->current_sample >= s->nb_samples) { | |
+ s->current_sample = 0; | |
+ | |
+ if (s->loop > 0) | |
+ s->loop--; | |
+ } | |
+ } | |
+ | |
+ return ret; | |
+} | |
+ | |
+static int afilter_frame(AVFilterLink *inlink, AVFrame *frame) | |
+{ | |
+ AVFilterContext *ctx = inlink->dst; | |
+ AVFilterLink *outlink = ctx->outputs[0]; | |
+ LoopContext *s = ctx->priv; | |
+ int ret = 0; | |
+ | |
+ if (s->ignored_samples + frame->nb_samples > s->start && s->size > 0 && s->loop != 0) { | |
+ if (s->nb_samples < s->size) { | |
+ int written = FFMIN(frame->nb_samples, s->size - s->nb_samples); | |
+ int drain = 0; | |
+ | |
+ ret = av_audio_fifo_write(s->fifo, (void **)frame->extended_data, written); | |
+ if (ret < 0) | |
+ return ret; | |
+ if (!s->nb_samples) { | |
+ drain = FFMAX(0, s->start - s->ignored_samples); | |
+ s->pts = frame->pts; | |
+ av_audio_fifo_drain(s->fifo, drain); | |
+ s->pts += s->start - s->ignored_samples; | |
+ } | |
+ s->nb_samples += ret - drain; | |
+ drain = frame->nb_samples - written; | |
+ if (s->nb_samples == s->size && drain > 0) { | |
+ int ret2; | |
+ | |
+ ret2 = av_audio_fifo_write(s->left, (void **)frame->extended_data, frame->nb_samples); | |
+ if (ret2 < 0) | |
+ return ret2; | |
+ av_audio_fifo_drain(s->left, drain); | |
+ } | |
+ frame->nb_samples = ret; | |
+ s->pts += ret; | |
+ ret = ff_filter_frame(outlink, frame); | |
+ } else { | |
+ int nb_samples = frame->nb_samples; | |
+ | |
+ av_frame_free(&frame); | |
+ ret = push_samples(ctx, nb_samples); | |
+ } | |
+ } else { | |
+ s->ignored_samples += frame->nb_samples; | |
+ frame->pts = s->pts; | |
+ s->pts += frame->nb_samples; | |
+ ret = ff_filter_frame(outlink, frame); | |
+ } | |
+ | |
+ return ret; | |
+} | |
+ | |
+static int arequest_frame(AVFilterLink *outlink) | |
+{ | |
+ AVFilterContext *ctx = outlink->src; | |
+ LoopContext *s = ctx->priv; | |
+ int ret = 0; | |
+ | |
+ if ((!s->size) || | |
+ (s->nb_samples < s->size) || | |
+ (s->nb_samples >= s->size && s->loop == 0)) { | |
+ int nb_samples = av_audio_fifo_size(s->left); | |
+ | |
+ if (s->loop == 0 && nb_samples > 0) { | |
+ AVFrame *out; | |
+ | |
+ out = ff_get_audio_buffer(outlink, nb_samples); | |
+ if (!out) | |
+ return AVERROR(ENOMEM); | |
+ av_audio_fifo_read(s->left, (void **)out->extended_data, nb_samples); | |
+ out->pts = s->pts; | |
+ s->pts += nb_samples; | |
+ ret = ff_filter_frame(outlink, out); | |
+ if (ret < 0) | |
+ return ret; | |
+ } | |
+ ret = ff_request_frame(ctx->inputs[0]); | |
+ } else { | |
+ ret = push_samples(ctx, 1024); | |
+ } | |
+ | |
+ if (ret == AVERROR_EOF && s->nb_samples > 0 && s->loop != 0) { | |
+ ret = push_samples(ctx, outlink->sample_rate); | |
+ } | |
+ | |
+ return ret; | |
+} | |
+ | |
+static const AVOption aloop_options[] = { | |
+ { "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, AFLAGS }, | |
+ { "size", "max number of samples to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT32_MAX, AFLAGS }, | |
+ { "start", "set the loop start sample", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, AFLAGS }, | |
+ { NULL } | |
+}; | |
+ | |
+AVFILTER_DEFINE_CLASS(aloop); | |
+ | |
+static const AVFilterPad ainputs[] = { | |
+ { | |
+ .name = "default", | |
+ .type = AVMEDIA_TYPE_AUDIO, | |
+ .filter_frame = afilter_frame, | |
+ .config_props = aconfig_input, | |
+ }, | |
+ { NULL } | |
+}; | |
+ | |
+static const AVFilterPad aoutputs[] = { | |
+ { | |
+ .name = "default", | |
+ .type = AVMEDIA_TYPE_AUDIO, | |
+ .request_frame = arequest_frame, | |
+ }, | |
+ { NULL } | |
+}; | |
+ | |
+AVFilter ff_af_aloop = { | |
+ .name = "aloop", | |
+ .description = NULL_IF_CONFIG_SMALL("Loop audio samples."), | |
+ .priv_size = sizeof(LoopContext), | |
+ .priv_class = &aloop_class, | |
+ .uninit = auninit, | |
+ .query_formats = ff_query_formats_all, | |
+ .inputs = ainputs, | |
+ .outputs = aoutputs, | |
+}; | |
+#endif /* CONFIG_ALOOP_FILTER */ | |
+ | |
+#if CONFIG_LOOP_FILTER | |
+ | |
+static av_cold int init(AVFilterContext *ctx) | |
+{ | |
+ LoopContext *s = ctx->priv; | |
+ | |
+ s->frames = av_calloc(s->size, sizeof(*s->frames)); | |
+ if (!s->frames) | |
+ return AVERROR(ENOMEM); | |
+ | |
+ return 0; | |
+} | |
+ | |
+static av_cold void uninit(AVFilterContext *ctx) | |
+{ | |
+ LoopContext *s = ctx->priv; | |
+ int i; | |
+ | |
+ for (i = 0; i < s->nb_frames; i++) | |
+ av_frame_free(&s->frames[i]); | |
+ | |
+ av_freep(&s->frames); | |
+ s->nb_frames = 0; | |
+} | |
+ | |
+static int push_frame(AVFilterContext *ctx) | |
+{ | |
+ AVFilterLink *outlink = ctx->outputs[0]; | |
+ LoopContext *s = ctx->priv; | |
+ int64_t pts; | |
+ int ret; | |
+ | |
+ AVFrame *out = av_frame_clone(s->frames[s->current_frame]); | |
+ | |
+ if (!out) | |
+ return AVERROR(ENOMEM); | |
+ out->pts += s->duration - s->start_pts; | |
+ pts = out->pts + av_frame_get_pkt_duration(out); | |
+ ret = ff_filter_frame(outlink, out); | |
+ s->current_frame++; | |
+ | |
+ if (s->current_frame >= s->nb_frames) { | |
+ s->duration = pts; | |
+ s->current_frame = 0; | |
+ | |
+ if (s->loop > 0) | |
+ s->loop--; | |
+ } | |
+ | |
+ return ret; | |
+} | |
+ | |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |
+{ | |
+ AVFilterContext *ctx = inlink->dst; | |
+ AVFilterLink *outlink = ctx->outputs[0]; | |
+ LoopContext *s = ctx->priv; | |
+ int ret = 0; | |
+ | |
+ if (inlink->frame_count >= s->start && s->size > 0 && s->loop != 0) { | |
+ if (s->nb_frames < s->size) { | |
+ if (!s->nb_frames) | |
+ s->start_pts = frame->pts; | |
+ s->frames[s->nb_frames] = av_frame_clone(frame); | |
+ if (!s->frames[s->nb_frames]) { | |
+ av_frame_free(&frame); | |
+ return AVERROR(ENOMEM); | |
+ } | |
+ s->nb_frames++; | |
+ s->duration = frame->pts + av_frame_get_pkt_duration(frame); | |
+ ret = ff_filter_frame(outlink, frame); | |
+ } else { | |
+ av_frame_free(&frame); | |
+ ret = push_frame(ctx); | |
+ } | |
+ } else { | |
+ frame->pts += s->duration; | |
+ ret = ff_filter_frame(outlink, frame); | |
+ } | |
+ | |
+ return ret; | |
+} | |
+ | |
+static int request_frame(AVFilterLink *outlink) | |
+{ | |
+ AVFilterContext *ctx = outlink->src; | |
+ LoopContext *s = ctx->priv; | |
+ int ret = 0; | |
+ | |
+ if ((!s->size) || | |
+ (s->nb_frames < s->size) || | |
+ (s->nb_frames >= s->size && s->loop == 0)) { | |
+ ret = ff_request_frame(ctx->inputs[0]); | |
+ } else { | |
+ ret = push_frame(ctx); | |
+ } | |
+ | |
+ if (ret == AVERROR_EOF && s->nb_frames > 0 && s->loop != 0) { | |
+ ret = push_frame(ctx); | |
+ } | |
+ | |
+ return ret; | |
+} | |
+ | |
+static const AVOption loop_options[] = { | |
+ { "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, VFLAGS }, | |
+ { "size", "max number of frames to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT16_MAX, VFLAGS }, | |
+ { "start", "set the loop start frame", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, VFLAGS }, | |
+ { NULL } | |
+}; | |
+ | |
+AVFILTER_DEFINE_CLASS(loop); | |
+ | |
+static const AVFilterPad inputs[] = { | |
+ { | |
+ .name = "default", | |
+ .type = AVMEDIA_TYPE_VIDEO, | |
+ .filter_frame = filter_frame, | |
+ }, | |
+ { NULL } | |
+}; | |
+ | |
+static const AVFilterPad outputs[] = { | |
+ { | |
+ .name = "default", | |
+ .type = AVMEDIA_TYPE_VIDEO, | |
+ .request_frame = request_frame, | |
+ }, | |
+ { NULL } | |
+}; | |
+ | |
+AVFilter ff_vf_loop = { | |
+ .name = "loop", | |
+ .description = NULL_IF_CONFIG_SMALL("Loop video frames."), | |
+ .priv_size = sizeof(LoopContext), | |
+ .priv_class = &loop_class, | |
+ .init = init, | |
+ .uninit = uninit, | |
+ .inputs = inputs, | |
+ .outputs = outputs, | |
+}; | |
+#endif /* CONFIG_LOOP_FILTER */ | |
diff --git a/libavfilter/version.h b/libavfilter/version.h | |
index fe0539c..4a462e7 100644 | |
--- a/libavfilter/version.h | |
+++ b/libavfilter/version.h | |
@@ -30,7 +30,7 @@ | |
#include "libavutil/version.h" | |
#define LIBAVFILTER_VERSION_MAJOR 6 | |
-#define LIBAVFILTER_VERSION_MINOR 32 | |
+#define LIBAVFILTER_VERSION_MINOR 34 | |
#define LIBAVFILTER_VERSION_MICRO 100 | |
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ | |
diff --git a/libavfilter/vf_bwdif.c b/libavfilter/vf_bwdif.c | |
new file mode 100644 | |
index 0000000..7985054 | |
--- /dev/null | |
+++ b/libavfilter/vf_bwdif.c | |
@@ -0,0 +1,625 @@ | |
+/* | |
+ * BobWeaver Deinterlacing Filter | |
+ * Copyright (C) 2016 Thomas Mundt <loudmax@yahoo.de> | |
+ * | |
+ * Based on YADIF (Yet Another Deinterlacing Filter) | |
+ * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at> | |
+ * 2010 James Darnley <james.darnley@gmail.com> | |
+ * | |
+ * With use of Weston 3 Field Deinterlacing Filter algorithm | |
+ * Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved | |
+ * Author of de-interlace algorithm: Jim Easterbrook for BBC R&D | |
+ * Based on the process described by Martin Weston for BBC R&D | |
+ * | |
+ * This file is part of FFmpeg. | |
+ * | |
+ * FFmpeg is free software; you can redistribute it and/or | |
+ * modify it under the terms of the GNU Lesser General Public | |
+ * License as published by the Free Software Foundation; either | |
+ * version 2.1 of the License, or (at your option) any later version. | |
+ * | |
+ * FFmpeg is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
+ * Lesser General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU Lesser General Public | |
+ * License along with FFmpeg; if not, write to the Free Software | |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
+ */ | |
+ | |
+#include "libavutil/avassert.h" | |
+#include "libavutil/common.h" | |
+#include "libavutil/opt.h" | |
+#include "libavutil/pixdesc.h" | |
+#include "libavutil/imgutils.h" | |
+#include "avfilter.h" | |
+#include "formats.h" | |
+#include "internal.h" | |
+#include "video.h" | |
+ | |
+/* | |
+ * Filter coefficients coef_lf and coef_hf taken from BBC PH-2071 (Weston 3 Field Deinterlacer). | |
+ * Used when there is spatial and temporal interpolation. | |
+ * Filter coefficients coef_sp are used when there is spatial interpolation only. | |
+ * Adjusted for matching visual sharpness impression of spatial and temporal interpolation. | |
+ */ | |
+static const uint16_t coef_lf[2] = { 4309, 213 }; | |
+static const uint16_t coef_hf[3] = { 5570, 3801, 1016 }; | |
+static const uint16_t coef_sp[2] = { 5077, 981 }; | |
+ | |
+enum BWDIFMode { | |
+ BWDIF_MODE_SEND_FRAME = 0, ///< send 1 frame for each frame | |
+ BWDIF_MODE_SEND_FIELD = 1, ///< send 1 frame for each field | |
+}; | |
+ | |
+enum BWDIFParity { | |
+ BWDIF_PARITY_TFF = 0, ///< top field first | |
+ BWDIF_PARITY_BFF = 1, ///< bottom field first | |
+ BWDIF_PARITY_AUTO = -1, ///< auto detection | |
+}; | |
+ | |
+enum BWDIFDeint { | |
+ BWDIF_DEINT_ALL = 0, ///< deinterlace all frames | |
+ BWDIF_DEINT_INTERLACED = 1, ///< only deinterlace frames marked as interlaced | |
+}; | |
+ | |
+typedef struct BWDIFContext { | |
+ const AVClass *class; | |
+ | |
+ int mode; ///< BWDIFMode | |
+ int parity; ///< BWDIFParity | |
+ int deint; ///< BWDIFDeint | |
+ | |
+ int frame_pending; | |
+ | |
+ AVFrame *cur; | |
+ AVFrame *next; | |
+ AVFrame *prev; | |
+ AVFrame *out; | |
+ | |
+ void (*filter_intra)(void *dst1, void *cur1, int w, int prefs, int mrefs, | |
+ int prefs3, int mrefs3, int parity, int clip_max); | |
+ void (*filter_line)(void *dst, void *prev, void *cur, void *next, | |
+ int w, int prefs, int mrefs, int prefs2, int mrefs2, | |
+ int prefs3, int mrefs3, int prefs4, int mrefs4, | |
+ int parity, int clip_max); | |
+ void (*filter_edge)(void *dst, void *prev, void *cur, void *next, | |
+ int w, int prefs, int mrefs, int prefs2, int mrefs2, | |
+ int parity, int clip_max, int spat); | |
+ | |
+ const AVPixFmtDescriptor *csp; | |
+ int inter_field; | |
+ int eof; | |
+} BWDIFContext; | |
+ | |
+typedef struct ThreadData { | |
+ AVFrame *frame; | |
+ int plane; | |
+ int w, h; | |
+ int parity; | |
+ int tff; | |
+} ThreadData; | |
+ | |
+#define FILTER_INTRA() \ | |
+ for (x = 0; x < w; x++) { \ | |
+ interpol = (coef_sp[0] * (cur[mrefs] + cur[prefs]) - coef_sp[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \ | |
+ dst[0] = av_clip(interpol, 0, clip_max); \ | |
+ \ | |
+ dst++; \ | |
+ cur++; \ | |
+ } | |
+ | |
+#define FILTER1() \ | |
+ for (x = 0; x < w; x++) { \ | |
+ int c = cur[mrefs]; \ | |
+ int d = (prev2[0] + next2[0]) >> 1; \ | |
+ int e = cur[prefs]; \ | |
+ int temporal_diff0 = FFABS(prev2[0] - next2[0]); \ | |
+ int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e)) >> 1; \ | |
+ int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e)) >> 1; \ | |
+ int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \ | |
+ \ | |
+ if (!diff) { \ | |
+ dst[0] = d; \ | |
+ } else { | |
+ | |
+#define SPAT_CHECK() \ | |
+ int b = ((prev2[mrefs2] + next2[mrefs2]) >> 1) - c; \ | |
+ int f = ((prev2[prefs2] + next2[prefs2]) >> 1) - e; \ | |
+ int dc = d - c; \ | |
+ int de = d - e; \ | |
+ int max = FFMAX3(de, dc, FFMIN(b, f)); \ | |
+ int min = FFMIN3(de, dc, FFMAX(b, f)); \ | |
+ diff = FFMAX3(diff, min, -max); | |
+ | |
+#define FILTER_LINE() \ | |
+ SPAT_CHECK() \ | |
+ if (FFABS(c - e) > temporal_diff0) { \ | |
+ interpol = (((coef_hf[0] * (prev2[0] + next2[0]) \ | |
+ - coef_hf[1] * (prev2[mrefs2] + next2[mrefs2] + prev2[prefs2] + next2[prefs2]) \ | |
+ + coef_hf[2] * (prev2[mrefs4] + next2[mrefs4] + prev2[prefs4] + next2[prefs4])) >> 2) \ | |
+ + coef_lf[0] * (c + e) - coef_lf[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \ | |
+ } else { \ | |
+ interpol = (coef_sp[0] * (c + e) - coef_sp[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \ | |
+ } | |
+ | |
+#define FILTER_EDGE() \ | |
+ if (spat) { \ | |
+ SPAT_CHECK() \ | |
+ } \ | |
+ interpol = (c + e) >> 1; | |
+ | |
+#define FILTER2() \ | |
+ if (interpol > d + diff) \ | |
+ interpol = d + diff; \ | |
+ else if (interpol < d - diff) \ | |
+ interpol = d - diff; \ | |
+ \ | |
+ dst[0] = av_clip(interpol, 0, clip_max); \ | |
+ } \ | |
+ \ | |
+ dst++; \ | |
+ cur++; \ | |
+ prev++; \ | |
+ next++; \ | |
+ prev2++; \ | |
+ next2++; \ | |
+ } | |
+ | |
+static void filter_intra(void *dst1, void *cur1, int w, int prefs, int mrefs, | |
+ int prefs3, int mrefs3, int parity, int clip_max) | |
+{ | |
+ uint8_t *dst = dst1; | |
+ uint8_t *cur = cur1; | |
+ int interpol, x; | |
+ | |
+ FILTER_INTRA() | |
+} | |
+ | |
+static void filter_line(void *dst1, void *prev1, void *cur1, void *next1, | |
+ int w, int prefs, int mrefs, int prefs2, int mrefs2, | |
+ int prefs3, int mrefs3, int prefs4, int mrefs4, | |
+ int parity, int clip_max) | |
+{ | |
+ uint8_t *dst = dst1; | |
+ uint8_t *prev = prev1; | |
+ uint8_t *cur = cur1; | |
+ uint8_t *next = next1; | |
+ uint8_t *prev2 = parity ? prev : cur ; | |
+ uint8_t *next2 = parity ? cur : next; | |
+ int interpol, x; | |
+ | |
+ FILTER1() | |
+ FILTER_LINE() | |
+ FILTER2() | |
+} | |
+ | |
+static void filter_edge(void *dst1, void *prev1, void *cur1, void *next1, | |
+ int w, int prefs, int mrefs, int prefs2, int mrefs2, | |
+ int parity, int clip_max, int spat) | |
+{ | |
+ uint8_t *dst = dst1; | |
+ uint8_t *prev = prev1; | |
+ uint8_t *cur = cur1; | |
+ uint8_t *next = next1; | |
+ uint8_t *prev2 = parity ? prev : cur ; | |
+ uint8_t *next2 = parity ? cur : next; | |
+ int interpol, x; | |
+ | |
+ FILTER1() | |
+ FILTER_EDGE() | |
+ FILTER2() | |
+} | |
+ | |
+static void filter_intra_16bit(void *dst1, void *cur1, int w, int prefs, int mrefs, | |
+ int prefs3, int mrefs3, int parity, int clip_max) | |
+{ | |
+ uint16_t *dst = dst1; | |
+ uint16_t *cur = cur1; | |
+ int interpol, x; | |
+ | |
+ FILTER_INTRA() | |
+} | |
+ | |
+static void filter_line_16bit(void *dst1, void *prev1, void *cur1, void *next1, | |
+ int w, int prefs, int mrefs, int prefs2, int mrefs2, | |
+ int prefs3, int mrefs3, int prefs4, int mrefs4, | |
+ int parity, int clip_max) | |
+{ | |
+ uint16_t *dst = dst1; | |
+ uint16_t *prev = prev1; | |
+ uint16_t *cur = cur1; | |
+ uint16_t *next = next1; | |
+ uint16_t *prev2 = parity ? prev : cur ; | |
+ uint16_t *next2 = parity ? cur : next; | |
+ int interpol, x; | |
+ | |
+ FILTER1() | |
+ FILTER_LINE() | |
+ FILTER2() | |
+} | |
+ | |
+static void filter_edge_16bit(void *dst1, void *prev1, void *cur1, void *next1, | |
+ int w, int prefs, int mrefs, int prefs2, int mrefs2, | |
+ int parity, int clip_max, int spat) | |
+{ | |
+ uint16_t *dst = dst1; | |
+ uint16_t *prev = prev1; | |
+ uint16_t *cur = cur1; | |
+ uint16_t *next = next1; | |
+ uint16_t *prev2 = parity ? prev : cur ; | |
+ uint16_t *next2 = parity ? cur : next; | |
+ int interpol, x; | |
+ | |
+ FILTER1() | |
+ FILTER_EDGE() | |
+ FILTER2() | |
+} | |
+ | |
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) | |
+{ | |
+ BWDIFContext *s = ctx->priv; | |
+ ThreadData *td = arg; | |
+ int linesize = s->cur->linesize[td->plane]; | |
+ int clip_max = (1 << (s->csp->comp[td->plane].depth)) - 1; | |
+ int df = (s->csp->comp[td->plane].depth + 7) / 8; | |
+ int refs = linesize / df; | |
+ int slice_start = (td->h * jobnr ) / nb_jobs; | |
+ int slice_end = (td->h * (jobnr+1)) / nb_jobs; | |
+ int y; | |
+ | |
+ for (y = slice_start; y < slice_end; y++) { | |
+ if ((y ^ td->parity) & 1) { | |
+ uint8_t *prev = &s->prev->data[td->plane][y * linesize]; | |
+ uint8_t *cur = &s->cur ->data[td->plane][y * linesize]; | |
+ uint8_t *next = &s->next->data[td->plane][y * linesize]; | |
+ uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]]; | |
+ if (!s->inter_field) { | |
+ s->filter_intra(dst, cur, td->w, (y + df) < td->h ? refs : -refs, | |
+ y > (df - 1) ? -refs : refs, | |
+ (y + 3*df) < td->h ? 3 * refs : -refs, | |
+ y > (3*df - 1) ? -3 * refs : refs, | |
+ td->parity ^ td->tff, clip_max); | |
+ } else if ((y < 4) || ((y + 5) > td->h)) { | |
+ s->filter_edge(dst, prev, cur, next, td->w, | |
+ (y + df) < td->h ? refs : -refs, | |
+ y > (df - 1) ? -refs : refs, | |
+ refs << 1, -(refs << 1), | |
+ td->parity ^ td->tff, clip_max, | |
+ (y < 2) || ((y + 3) > td->h) ? 0 : 1); | |
+ } else { | |
+ s->filter_line(dst, prev, cur, next, td->w, | |
+ refs, -refs, refs << 1, -(refs << 1), | |
+ 3 * refs, -3 * refs, refs << 2, -(refs << 2), | |
+ td->parity ^ td->tff, clip_max); | |
+ } | |
+ } else { | |
+ memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]], | |
+ &s->cur->data[td->plane][y * linesize], td->w * df); | |
+ } | |
+ } | |
+ return 0; | |
+} | |
+ | |
+static void filter(AVFilterContext *ctx, AVFrame *dstpic, | |
+ int parity, int tff) | |
+{ | |
+ BWDIFContext *bwdif = ctx->priv; | |
+ ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff }; | |
+ int i; | |
+ | |
+ for (i = 0; i < bwdif->csp->nb_components; i++) { | |
+ int w = dstpic->width; | |
+ int h = dstpic->height; | |
+ | |
+ if (i == 1 || i == 2) { | |
+ w = AV_CEIL_RSHIFT(w, bwdif->csp->log2_chroma_w); | |
+ h = AV_CEIL_RSHIFT(h, bwdif->csp->log2_chroma_h); | |
+ } | |
+ | |
+ td.w = w; | |
+ td.h = h; | |
+ td.plane = i; | |
+ | |
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads)); | |
+ } | |
+ if (!bwdif->inter_field) { | |
+ bwdif->inter_field = 1; | |
+ } | |
+ | |
+ emms_c(); | |
+} | |
+ | |
+static int return_frame(AVFilterContext *ctx, int is_second) | |
+{ | |
+ BWDIFContext *bwdif = ctx->priv; | |
+ AVFilterLink *link = ctx->outputs[0]; | |
+ int tff, ret; | |
+ | |
+ if (bwdif->parity == -1) { | |
+ tff = bwdif->cur->interlaced_frame ? | |
+ bwdif->cur->top_field_first : 1; | |
+ } else { | |
+ tff = bwdif->parity ^ 1; | |
+ } | |
+ | |
+ if (is_second) { | |
+ bwdif->out = ff_get_video_buffer(link, link->w, link->h); | |
+ if (!bwdif->out) | |
+ return AVERROR(ENOMEM); | |
+ | |
+ av_frame_copy_props(bwdif->out, bwdif->cur); | |
+ bwdif->out->interlaced_frame = 0; | |
+ if (bwdif->inter_field < 0) | |
+ bwdif->inter_field = 0; | |
+ } | |
+ | |
+ filter(ctx, bwdif->out, tff ^ !is_second, tff); | |
+ | |
+ if (is_second) { | |
+ int64_t cur_pts = bwdif->cur->pts; | |
+ int64_t next_pts = bwdif->next->pts; | |
+ | |
+ if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) { | |
+ bwdif->out->pts = cur_pts + next_pts; | |
+ } else { | |
+ bwdif->out->pts = AV_NOPTS_VALUE; | |
+ } | |
+ } | |
+ ret = ff_filter_frame(ctx->outputs[0], bwdif->out); | |
+ | |
+ bwdif->frame_pending = (bwdif->mode&1) && !is_second; | |
+ return ret; | |
+} | |
+ | |
+static int checkstride(BWDIFContext *bwdif, const AVFrame *a, const AVFrame *b) | |
+{ | |
+ int i; | |
+ for (i = 0; i < bwdif->csp->nb_components; i++) | |
+ if (a->linesize[i] != b->linesize[i]) | |
+ return 1; | |
+ return 0; | |
+} | |
+ | |
+static void fixstride(AVFilterLink *link, AVFrame *f) | |
+{ | |
+ AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height); | |
+ if(!dst) | |
+ return; | |
+ av_frame_copy_props(dst, f); | |
+ av_image_copy(dst->data, dst->linesize, | |
+ (const uint8_t **)f->data, f->linesize, | |
+ dst->format, dst->width, dst->height); | |
+ av_frame_unref(f); | |
+ av_frame_move_ref(f, dst); | |
+ av_frame_free(&dst); | |
+} | |
+ | |
+static int filter_frame(AVFilterLink *link, AVFrame *frame) | |
+{ | |
+ AVFilterContext *ctx = link->dst; | |
+ BWDIFContext *bwdif = ctx->priv; | |
+ | |
+ av_assert0(frame); | |
+ | |
+ if (bwdif->frame_pending) | |
+ return_frame(ctx, 1); | |
+ | |
+ if (bwdif->prev) | |
+ av_frame_free(&bwdif->prev); | |
+ bwdif->prev = bwdif->cur; | |
+ bwdif->cur = bwdif->next; | |
+ bwdif->next = frame; | |
+ | |
+ if (!bwdif->cur) { | |
+ bwdif->cur = av_frame_clone(bwdif->next); | |
+ if (!bwdif->cur) | |
+ return AVERROR(ENOMEM); | |
+ bwdif->inter_field = 0; | |
+ } | |
+ | |
+ if (checkstride(bwdif, bwdif->next, bwdif->cur)) { | |
+ av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n"); | |
+ fixstride(link, bwdif->next); | |
+ } | |
+ if (checkstride(bwdif, bwdif->next, bwdif->cur)) | |
+ fixstride(link, bwdif->cur); | |
+ if (bwdif->prev && checkstride(bwdif, bwdif->next, bwdif->prev)) | |
+ fixstride(link, bwdif->prev); | |
+ if (checkstride(bwdif, bwdif->next, bwdif->cur) || (bwdif->prev && checkstride(bwdif, bwdif->next, bwdif->prev))) { | |
+ av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n"); | |
+ return -1; | |
+ } | |
+ | |
+ if (!bwdif->prev) | |
+ return 0; | |
+ | |
+ if ((bwdif->deint && !bwdif->cur->interlaced_frame) || | |
+ ctx->is_disabled || | |
+ (bwdif->deint && !bwdif->prev->interlaced_frame && bwdif->prev->repeat_pict) || | |
+ (bwdif->deint && !bwdif->next->interlaced_frame && bwdif->next->repeat_pict) | |
+ ) { | |
+ bwdif->out = av_frame_clone(bwdif->cur); | |
+ if (!bwdif->out) | |
+ return AVERROR(ENOMEM); | |
+ | |
+ av_frame_free(&bwdif->prev); | |
+ if (bwdif->out->pts != AV_NOPTS_VALUE) | |
+ bwdif->out->pts *= 2; | |
+ return ff_filter_frame(ctx->outputs[0], bwdif->out); | |
+ } | |
+ | |
+ bwdif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h); | |
+ if (!bwdif->out) | |
+ return AVERROR(ENOMEM); | |
+ | |
+ av_frame_copy_props(bwdif->out, bwdif->cur); | |
+ bwdif->out->interlaced_frame = 0; | |
+ | |
+ if (bwdif->out->pts != AV_NOPTS_VALUE) | |
+ bwdif->out->pts *= 2; | |
+ | |
+ return return_frame(ctx, 0); | |
+} | |
+ | |
+static int request_frame(AVFilterLink *link) | |
+{ | |
+ AVFilterContext *ctx = link->src; | |
+ BWDIFContext *bwdif = ctx->priv; | |
+ int ret; | |
+ | |
+ if (bwdif->frame_pending) { | |
+ return_frame(ctx, 1); | |
+ return 0; | |
+ } | |
+ | |
+ if (bwdif->eof) | |
+ return AVERROR_EOF; | |
+ | |
+ ret = ff_request_frame(link->src->inputs[0]); | |
+ | |
+ if (ret == AVERROR_EOF && bwdif->cur) { | |
+ AVFrame *next = av_frame_clone(bwdif->next); | |
+ | |
+ if (!next) | |
+ return AVERROR(ENOMEM); | |
+ | |
+ bwdif->inter_field = -1; | |
+ next->pts = bwdif->next->pts * 2 - bwdif->cur->pts; | |
+ | |
+ filter_frame(link->src->inputs[0], next); | |
+ bwdif->eof = 1; | |
+ } else if (ret < 0) { | |
+ return ret; | |
+ } | |
+ | |
+ return 0; | |
+} | |
+ | |
+static av_cold void uninit(AVFilterContext *ctx) | |
+{ | |
+ BWDIFContext *bwdif = ctx->priv; | |
+ | |
+ av_frame_free(&bwdif->prev); | |
+ av_frame_free(&bwdif->cur ); | |
+ av_frame_free(&bwdif->next); | |
+} | |
+ | |
+static int query_formats(AVFilterContext *ctx) | |
+{ | |
+ static const enum AVPixelFormat pix_fmts[] = { | |
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV420P, | |
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P, | |
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, | |
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P, | |
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, | |
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, | |
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, | |
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, | |
+ AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, | |
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, | |
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9, | |
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10, | |
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16, | |
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, | |
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16, | |
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP16, | |
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16, | |
+ AV_PIX_FMT_NONE | |
+ }; | |
+ | |
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts); | |
+ if (!fmts_list) | |
+ return AVERROR(ENOMEM); | |
+ | |
+ return ff_set_common_formats(ctx, fmts_list); | |
+} | |
+ | |
+static int config_props(AVFilterLink *link) | |
+{ | |
+ AVFilterContext *ctx = link->src; | |
+ BWDIFContext *s = link->src->priv; | |
+ | |
+ link->time_base.num = link->src->inputs[0]->time_base.num; | |
+ link->time_base.den = link->src->inputs[0]->time_base.den * 2; | |
+ link->w = link->src->inputs[0]->w; | |
+ link->h = link->src->inputs[0]->h; | |
+ | |
+ if(s->mode&1) | |
+ link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate, (AVRational){2,1}); | |
+ | |
+ if (link->w < 3 || link->h < 3) { | |
+ av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n"); | |
+ return AVERROR(EINVAL); | |
+ } | |
+ | |
+ s->csp = av_pix_fmt_desc_get(link->format); | |
+ if (s->csp->comp[0].depth > 8) { | |
+ s->filter_intra = filter_intra_16bit; | |
+ s->filter_line = filter_line_16bit; | |
+ s->filter_edge = filter_edge_16bit; | |
+ } else { | |
+ s->filter_intra = filter_intra; | |
+ s->filter_line = filter_line; | |
+ s->filter_edge = filter_edge; | |
+ } | |
+ | |
+ return 0; | |
+} | |
+ | |
+ | |
+#define OFFSET(x) offsetof(BWDIFContext, x) | |
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM | |
+ | |
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit } | |
+ | |
+static const AVOption bwdif_options[] = { | |
+ { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=BWDIF_MODE_SEND_FIELD}, 0, 1, FLAGS, "mode"}, | |
+ CONST("send_frame", "send one frame for each frame", BWDIF_MODE_SEND_FRAME, "mode"), | |
+ CONST("send_field", "send one frame for each field", BWDIF_MODE_SEND_FIELD, "mode"), | |
+ | |
+ { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=BWDIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" }, | |
+ CONST("tff", "assume top field first", BWDIF_PARITY_TFF, "parity"), | |
+ CONST("bff", "assume bottom field first", BWDIF_PARITY_BFF, "parity"), | |
+ CONST("auto", "auto detect parity", BWDIF_PARITY_AUTO, "parity"), | |
+ | |
+ { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=BWDIF_DEINT_INTERLACED}, 0, 1, FLAGS, "deint" }, | |
+ CONST("all", "deinterlace all frames", BWDIF_DEINT_ALL, "deint"), | |
+ CONST("interlaced", "only deinterlace frames marked as interlaced", BWDIF_DEINT_INTERLACED, "deint"), | |
+ | |
+ { NULL } | |
+}; | |
+ | |
+AVFILTER_DEFINE_CLASS(bwdif); | |
+ | |
+static const AVFilterPad avfilter_vf_bwdif_inputs[] = { | |
+ { | |
+ .name = "default", | |
+ .type = AVMEDIA_TYPE_VIDEO, | |
+ .filter_frame = filter_frame, | |
+ }, | |
+ { NULL } | |
+}; | |
+ | |
+static const AVFilterPad avfilter_vf_bwdif_outputs[] = { | |
+ { | |
+ .name = "default", | |
+ .type = AVMEDIA_TYPE_VIDEO, | |
+ .request_frame = request_frame, | |
+ .config_props = config_props, | |
+ }, | |
+ { NULL } | |
+}; | |
+ | |
+AVFilter ff_vf_bwdif = { | |
+ .name = "bwdif", | |
+ .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."), | |
+ .priv_size = sizeof(BWDIFContext), | |
+ .priv_class = &bwdif_class, | |
+ .uninit = uninit, | |
+ .query_formats = query_formats, | |
+ .inputs = avfilter_vf_bwdif_inputs, | |
+ .outputs = avfilter_vf_bwdif_outputs, | |
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, | |
+}; | |
diff --git a/libavformat/Makefile b/libavformat/Makefile | |
index 35a383d..001b3f1 100644 | |
--- a/libavformat/Makefile | |
+++ b/libavformat/Makefile | |
@@ -98,7 +98,7 @@ OBJS-$(CONFIG_AST_MUXER) += ast.o astenc.o | |
OBJS-$(CONFIG_AU_DEMUXER) += au.o pcm.o | |
OBJS-$(CONFIG_AU_MUXER) += au.o rawenc.o | |
OBJS-$(CONFIG_AVI_DEMUXER) += avidec.o isom.o | |
-OBJS-$(CONFIG_AVI_MUXER) += avienc.o mpegtsenc.o avlanguage.o | |
+OBJS-$(CONFIG_AVI_MUXER) += avienc.o mpegtsenc.o avlanguage.o rawutils.o | |
OBJS-$(CONFIG_AVISYNTH) += avisynth.o | |
OBJS-$(CONFIG_AVM2_MUXER) += swfenc.o swf.o | |
OBJS-$(CONFIG_AVR_DEMUXER) += avr.o pcm.o | |
diff --git a/libavformat/avienc.c b/libavformat/avienc.c | |
index 649961d..09ec63b 100644 | |
--- a/libavformat/avienc.c | |
+++ b/libavformat/avienc.c | |
@@ -79,7 +79,7 @@ typedef struct AVIStream { | |
AVIIndex indexes; | |
} AVIStream; | |
-static int avi_write_packet(AVFormatContext *s, AVPacket *pkt); | |
+static int avi_write_packet_internal(AVFormatContext *s, AVPacket *pkt); | |
static inline AVIIentry *avi_get_ientry(const AVIIndex *idx, int ent_id) | |
{ | |
@@ -637,7 +637,7 @@ static int write_skip_frames(AVFormatContext *s, int stream_index, int64_t dts) | |
empty_packet.size = 0; | |
empty_packet.data = NULL; | |
empty_packet.stream_index = stream_index; | |
- avi_write_packet(s, &empty_packet); | |
+ avi_write_packet_internal(s, &empty_packet); | |
ff_dlog(s, "dup dts:%s packet_count:%d\n", av_ts2str(dts), avist->packet_count); | |
} | |
@@ -646,13 +646,7 @@ static int write_skip_frames(AVFormatContext *s, int stream_index, int64_t dts) | |
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt) | |
{ | |
- unsigned char tag[5]; | |
- unsigned int flags = 0; | |
const int stream_index = pkt->stream_index; | |
- int size = pkt->size; | |
- AVIContext *avi = s->priv_data; | |
- AVIOContext *pb = s->pb; | |
- AVIStream *avist = s->streams[stream_index]->priv_data; | |
AVCodecContext *enc = s->streams[stream_index]->codec; | |
int ret; | |
@@ -665,6 +659,35 @@ static int avi_write_packet(AVFormatContext *s, AVPacket *pkt) | |
if ((ret = write_skip_frames(s, stream_index, pkt->dts)) < 0) | |
return ret; | |
+ if (enc->codec_id == AV_CODEC_ID_RAWVIDEO && enc->codec_tag == 0) { | |
+ int64_t bpc = enc->bits_per_coded_sample != 15 ? enc->bits_per_coded_sample : 16; | |
+ int expected_stride = ((enc->width * bpc + 31) >> 5)*4; | |
+ | |
+ ret = ff_reshuffle_raw_rgb(s, &pkt, enc, expected_stride); | |
+ if (ret < 0) | |
+ return ret; | |
+ if (ret) { | |
+ ret = avi_write_packet_internal(s, pkt); | |
+ av_packet_free(&pkt); | |
+ return ret; | |
+ } | |
+ } | |
+ | |
+ return avi_write_packet_internal(s, pkt); | |
+} | |
+ | |
+static int avi_write_packet_internal(AVFormatContext *s, AVPacket *pkt) | |
+{ | |
+ unsigned char tag[5]; | |
+ unsigned int flags = 0; | |
+ const int stream_index = pkt->stream_index; | |
+ int size = pkt->size; | |
+ AVIContext *avi = s->priv_data; | |
+ AVIOContext *pb = s->pb; | |
+ AVIStream *avist = s->streams[stream_index]->priv_data; | |
+ AVCodecContext *enc = s->streams[stream_index]->codec; | |
+ int ret; | |
+ | |
if (pkt->dts != AV_NOPTS_VALUE) | |
avist->last_dts = pkt->dts + pkt->duration; | |
diff --git a/libavformat/internal.h b/libavformat/internal.h | |
index fee823d..93be632 100644 | |
--- a/libavformat/internal.h | |
+++ b/libavformat/internal.h | |
@@ -560,4 +560,12 @@ void ff_format_io_close(AVFormatContext *s, AVIOContext **pb); | |
*/ | |
int ff_parse_creation_time_metadata(AVFormatContext *s, int64_t *timestamp, int return_seconds); | |
+/** | |
+ * Reshuffles the lines to use the user specified stride. | |
+ * | |
+ * @param ppkt input and output packet | |
+ * @return negative error code or 0 or 1, 1 indicates that ppkt needs to be freed | |
+ */ | |
+int ff_reshuffle_raw_rgb(AVFormatContext *s, AVPacket **ppkt, AVCodecContext *enc, int expected_stride); | |
+ | |
#endif /* AVFORMAT_INTERNAL_H */ | |
diff --git a/libavformat/options_table.h b/libavformat/options_table.h | |
index 8926fe5..74923d8 100644 | |
--- a/libavformat/options_table.h | |
+++ b/libavformat/options_table.h | |
@@ -78,7 +78,7 @@ static const AVOption avformat_options[] = { | |
{"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, D, "err_detect"}, | |
{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT }, INT_MIN, INT_MAX, D, "err_detect"}, | |
{"aggressive", "consider things that a sane encoder shouldn't do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE }, INT_MIN, INT_MAX, D, "err_detect"}, | |
-{"use_wallclock_as_timestamps", "use wallclock as timestamps", OFFSET(use_wallclock_as_timestamps), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX-1, D}, | |
+{"use_wallclock_as_timestamps", "use wallclock as timestamps", OFFSET(use_wallclock_as_timestamps), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, D}, | |
{"skip_initial_bytes", "set number of bytes to skip before reading header and frames", OFFSET(skip_initial_bytes), AV_OPT_TYPE_INT64, {.i64 = 0}, 0, INT64_MAX-1, D}, | |
{"correct_ts_overflow", "correct single timestamp overflows", OFFSET(correct_ts_overflow), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, D}, | |
{"flush_packets", "enable flushing of the I/O context after each packet", OFFSET(flush_packets), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, E}, | |
diff --git a/libavformat/rawutils.c b/libavformat/rawutils.c | |
new file mode 100644 | |
index 0000000..1e6148d | |
--- /dev/null | |
+++ b/libavformat/rawutils.c | |
@@ -0,0 +1,66 @@ | |
+/* | |
+ * Raw video utils | |
+ * Copyright (c) 2016 Michael Niedermayer | |
+ * | |
+ * This file is part of FFmpeg. | |
+ * | |
+ * FFmpeg is free software; you can redistribute it and/or | |
+ * modify it under the terms of the GNU Lesser General Public | |
+ * License as published by the Free Software Foundation; either | |
+ * version 2.1 of the License, or (at your option) any later version. | |
+ * | |
+ * FFmpeg is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
+ * Lesser General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU Lesser General Public | |
+ * License along with FFmpeg; if not, write to the Free Software | |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
+ */ | |
+ | |
+#include "avformat.h" | |
+#include "internal.h" | |
+ | |
+int ff_reshuffle_raw_rgb(AVFormatContext *s, AVPacket **ppkt, AVCodecContext *enc, int expected_stride) | |
+{ | |
+ int ret; | |
+ AVPacket *pkt = *ppkt; | |
+ int64_t bpc = enc->bits_per_coded_sample != 15 ? enc->bits_per_coded_sample : 16; | |
+ int min_stride = (enc->width * bpc + 7) >> 3; | |
+ int with_pal_size = min_stride * enc->height + 1024; | |
+ int size = bpc == 8 && pkt->size == with_pal_size ? min_stride * enc->height : pkt->size; | |
+ int stride = size / enc->height; | |
+ int padding = expected_stride - FFMIN(expected_stride, stride); | |
+ int y; | |
+ AVPacket *new_pkt; | |
+ | |
+ if (pkt->size == expected_stride * enc->height) | |
+ return 0; | |
+ if (size != stride * enc->height) | |
+ return 0; | |
+ | |
+ new_pkt = av_packet_alloc(); | |
+ if (!new_pkt) | |
+ return AVERROR(ENOMEM); | |
+ | |
+ ret = av_new_packet(new_pkt, expected_stride * enc->height); | |
+ if (ret < 0) | |
+ goto fail; | |
+ | |
+ ret = av_packet_copy_props(new_pkt, pkt); | |
+ if (ret < 0) | |
+ goto fail; | |
+ | |
+ for (y = 0; y<enc->height; y++) { | |
+ memcpy(new_pkt->data + y*expected_stride, pkt->data + y*stride, FFMIN(expected_stride, stride)); | |
+ memset(new_pkt->data + y*expected_stride + expected_stride - padding, 0, padding); | |
+ } | |
+ | |
+ *ppkt = new_pkt; | |
+ return 1; | |
+fail: | |
+ av_packet_free(&new_pkt); | |
+ | |
+ return ret; | |
+} | |
diff --git a/libavformat/version.h b/libavformat/version.h | |
index 024ab91..62050a2 100644 | |
--- a/libavformat/version.h | |
+++ b/libavformat/version.h | |
@@ -31,7 +31,7 @@ | |
#define LIBAVFORMAT_VERSION_MAJOR 57 | |
#define LIBAVFORMAT_VERSION_MINOR 25 | |
-#define LIBAVFORMAT_VERSION_MICRO 100 | |
+#define LIBAVFORMAT_VERSION_MICRO 101 | |
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ | |
LIBAVFORMAT_VERSION_MINOR, \ | |
diff --git a/libavutil/audio_fifo.c b/libavutil/audio_fifo.c | |
index d5298cc..e4d38e0 100644 | |
--- a/libavutil/audio_fifo.c | |
+++ b/libavutil/audio_fifo.c | |
@@ -155,6 +155,30 @@ int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples) | |
return nb_samples; | |
} | |
+int av_audio_fifo_peek_at(AVAudioFifo *af, void **data, int nb_samples, int offset) | |
+{ | |
+ int i, ret, size; | |
+ | |
+ if (offset < 0 || offset >= af->nb_samples) | |
+ return AVERROR(EINVAL); | |
+ if (nb_samples < 0) | |
+ return AVERROR(EINVAL); | |
+ nb_samples = FFMIN(nb_samples, af->nb_samples); | |
+ if (!nb_samples) | |
+ return 0; | |
+ if (offset > af->nb_samples - nb_samples) | |
+ return AVERROR(EINVAL); | |
+ | |
+ offset *= af->sample_size; | |
+ size = nb_samples * af->sample_size; | |
+ for (i = 0; i < af->nb_buffers; i++) { | |
+ if ((ret = av_fifo_generic_peek_at(af->buf[i], data[i], offset, size, NULL)) < 0) | |
+ return AVERROR_BUG; | |
+ } | |
+ | |
+ return nb_samples; | |
+} | |
+ | |
int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples) | |
{ | |
int i, ret, size; | |
diff --git a/libavutil/audio_fifo.h b/libavutil/audio_fifo.h | |
index 24f91da..d8a9194 100644 | |
--- a/libavutil/audio_fifo.h | |
+++ b/libavutil/audio_fifo.h | |
@@ -111,6 +111,23 @@ int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples); | |
int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples); | |
/** | |
+ * Peek data from an AVAudioFifo. | |
+ * | |
+ * @see enum AVSampleFormat | |
+ * The documentation for AVSampleFormat describes the data layout. | |
+ * | |
+ * @param af AVAudioFifo to read from | |
+ * @param data audio data plane pointers | |
+ * @param nb_samples number of samples to peek | |
+ * @param offset offset from current read position | |
+ * @return number of samples actually peek, or negative AVERROR code | |
+ * on failure. The number of samples actually peek will not | |
+ * be greater than nb_samples, and will only be less than | |
+ * nb_samples if av_audio_fifo_size is less than nb_samples. | |
+ */ | |
+int av_audio_fifo_peek_at(AVAudioFifo *af, void **data, int nb_samples, int offset); | |
+ | |
+/** | |
* Read data from an AVAudioFifo. | |
* | |
* @see enum AVSampleFormat | |
diff --git a/tests/ref/vsynth/vsynth3-bpp1 b/tests/ref/vsynth/vsynth3-bpp1 | |
index 5a65728..39f27f3 100644 | |
--- a/tests/ref/vsynth/vsynth3-bpp1 | |
+++ b/tests/ref/vsynth/vsynth3-bpp1 | |
@@ -1,4 +1,4 @@ | |
-98852649c5201df7d85d0e9b5a5b9f15 *tests/data/fate/vsynth3-bpp1.avi | |
-15352 tests/data/fate/vsynth3-bpp1.avi | |
+d5689d1f5c2d4c28a345d5964a6161a8 *tests/data/fate/vsynth3-bpp1.avi | |
+20452 tests/data/fate/vsynth3-bpp1.avi | |
0b1ea21b69d384564dd3a978065443b2 *tests/data/fate/vsynth3-bpp1.out.rawvideo | |
stddev: 97.64 PSNR: 8.34 MAXDIFF: 248 bytes: 86700/ 86700 | |
diff --git a/tests/ref/vsynth/vsynth3-rgb b/tests/ref/vsynth/vsynth3-rgb | |
index c0a8563..f67d285 100644 | |
--- a/tests/ref/vsynth/vsynth3-rgb | |
+++ b/tests/ref/vsynth/vsynth3-rgb | |
@@ -1,4 +1,4 @@ | |
-a2cb86007b8945e2d1399b56585b983a *tests/data/fate/vsynth3-rgb.avi | |
-180252 tests/data/fate/vsynth3-rgb.avi | |
+000bd5f3251bfd6a2a2b590b2d16fe0b *tests/data/fate/vsynth3-rgb.avi | |
+183652 tests/data/fate/vsynth3-rgb.avi | |
693aff10c094f8bd31693f74cf79d2b2 *tests/data/fate/vsynth3-rgb.out.rawvideo | |
stddev: 3.67 PSNR: 36.82 MAXDIFF: 43 bytes: 86700/ 86700 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment