Skip to content

Instantly share code, notes, and snippets.

@Wollw
Last active January 1, 2016 17:09
Show Gist options
  • Save Wollw/8174884 to your computer and use it in GitHub Desktop.
Save Wollw/8174884 to your computer and use it in GitHub Desktop.
thrown together sorting filter for ffmpeg
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* sort video filter
*/
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <errno.h>
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/eval.h"
#include "avfilter.h"
#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
typedef struct {
const AVClass *class;
double weightY;
double weightU;
double weightV;
int64_t averageY;
} SortContext;
typedef struct ThreadData {
AVFilterLink *in, *out;
AVFrame *frame;
SortContext *sortctx;
const AVPixFmtDescriptor *desc;
} ThreadData;
static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
{
ThreadData *td = arg;
AVFilterLink *link = td->in;
//AVFilterLink *outlink = td->out;
AVFrame *frame = td->frame;
SortContext *s = td->sortctx;
const AVPixFmtDescriptor *desc = td->desc;
int plane = 1;
for (plane = 1; plane < 2 && frame->data[plane] && frame->linesize[plane]; plane++) {
//uint8_t linesizeY = frame->linesize[0];
//uint8_t linesizeUV = frame->linesize[1];
//int vsubY = 0;
int vsubUV = desc->log2_chroma_h;
//int heightY = FF_CEIL_RSHIFT(link->h, vsubY);
int heightUV = FF_CEIL_RSHIFT(link->h, vsubUV);
int hsubY = 0;
int hsubUV = desc->log2_chroma_w;
int widthY = FF_CEIL_RSHIFT(link->w, hsubY);
int widthUV = FF_CEIL_RSHIFT(link->w, hsubUV);
uint8_t *dataY = frame->data[0] + (widthY * 2 * job);
uint8_t *dataU = frame->data[1] + (widthUV * job);
uint8_t *dataV = frame->data[2] + (widthUV * job);
int i;
for (i = job; i < heightUV; i+=nb_jobs) {
for(int x = 0; x < widthUV; x++) {
int min = x;
int temp;
for(int y = x; y < widthUV; y++) {
int avgYy = (dataY[y*2] + dataY[y*2+1] + dataY[y*2+widthY] + dataY[y*2+widthY+1]) / 4;
int avgYmin = (dataY[min*2] + dataY[min*2+1] + dataY[min*2+widthY] + dataY[min*2+widthY+1]) / 4;
if((int)dataU[min] * s->weightU + (int)dataV[min] * s->weightV + avgYmin * s-> weightY > (int)dataU[y] * s->weightU + (int)dataV[y] * s->weightV + avgYy * s->weightY) {
min = y;
}
}
temp = dataU[x];
dataU[x] = dataU[min];
dataU[min] = temp;
temp = dataV[x];
dataV[x] = dataV[min];
dataV[min] = temp;
if (s->averageY) {
temp = ( dataY[min*2]
+ dataY[min*2+1]
+ dataY[min*2+widthY]
+ dataY[min*2+widthY+1]
) / 4;
dataY[min * 2] = dataY[x * 2];
dataY[x * 2] = temp;
dataY[min * 2 + 1] = dataY[x * 2 + 1];
dataY[x * 2 + 1] = temp;
dataY[min * 2 + widthY] = dataY[x * 2 + widthY];
dataY[x * 2 + widthY] = temp;
dataY[min * 2 + widthY + 1] = dataY[x * 2 + widthY + 1];
dataY[x * 2 + widthY + 1] = temp;
} else {
temp = dataY[x * 2];
dataY[x * 2] = dataY[min * 2];
dataY[min * 2] = temp;
temp = dataY[x * 2 + 1];
dataY[x * 2 + 1] = dataY[min * 2 + 1];
dataY[min * 2 + 1] = temp;
temp = dataY[x * 2 + widthY];
dataY[x * 2 + widthY] = dataY[min * 2 + widthY];
dataY[min * 2 + widthY] = temp;
temp = dataY[x * 2 + widthY + 1];
dataY[x * 2 + widthY + 1] = dataY[min * 2 + widthY + 1];
dataY[min * 2 + widthY + 1] = temp;
}
}
dataY += widthY * 2 * nb_jobs;
dataU += widthUV * nb_jobs;
dataV += widthUV * nb_jobs;
}
}
return 0;
}
static int filter_frame(AVFilterLink *link, AVFrame *frame) {
AVFilterLink *outlink = link->dst->outputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
SortContext *s = link->dst->priv;
AVFilterContext *ctx = link->dst;
ThreadData td;
if (!av_frame_is_writable(frame)) {
printf("not writable");
exit(1);
}
td.in = link;
td.out = outlink;
td.frame = frame;
td.sortctx = s;
td.desc = desc;
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
return ff_filter_frame(outlink, frame);
}
#define OFFSET(x) offsetof(SortContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption sort_options[] = {
{ "y", "weight for y", OFFSET(weightY), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, -1, 1, FLAGS},
{ "u", "weight for u", OFFSET(weightU), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, -1, 1, FLAGS},
{ "v", "weight for v", OFFSET(weightV), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, -1, 1, FLAGS},
{ "avg", "flag for averaging Y values", OFFSET(averageY), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS},
{ NULL }
};
AVFILTER_DEFINE_CLASS(sort);
static const AVFilterPad avfilter_vf_sort_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.needs_writable = 1
},
{ NULL }
};
static const AVFilterPad avfilter_vf_sort_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
AVFilter ff_vf_sort = {
.name = "sort",
.description = NULL_IF_CONFIG_SMALL("Sort the colors of each row."),
.priv_size = sizeof(SortContext),
.priv_class = &sort_class,
.inputs = avfilter_vf_sort_inputs,
.outputs = avfilter_vf_sort_outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS,
};
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment