Skip to content

Instantly share code, notes, and snippets.

@filipnavara
Created December 29, 2020 18:04
Show Gist options
  • Save filipnavara/f6b7afe5bc488e8ac9aad4d399a438f5 to your computer and use it in GitHub Desktop.
Save filipnavara/f6b7afe5bc488e8ac9aad4d399a438f5 to your computer and use it in GitHub Desktop.
using System;
using System.Diagnostics;
using System.IO;
namespace bunzip
{
public class BZip2InputStream : Stream
{
bunzip_data bunzip;
public BZip2InputStream(Stream stream)
{
int i = start_bunzip(out bunzip, stream, null, 0);
Debug.Assert(i == 0);
}
public override bool CanRead => true;
public override bool CanSeek => false;
public override bool CanWrite => false;
public override long Length => throw new NotSupportedException();
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
public override void Flush()
{
throw new NotSupportedException();
}
public override int Read(byte[] buffer, int offset, int count)
{
int i = write_bunzip_data(bunzip, bunzip.bwdata[0], buffer.AsSpan(offset, count));
if (i == RETVAL_LAST_BLOCK)
{
if (bunzip.bwdata[0].headerCRC == bunzip.totalCRC) i = 0;
else i = RETVAL_DATA_ERROR;
}
return Math.Max(0, i);
//flush_bunzip_outbuf(bd, dst_fd);
}
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotSupportedException();
}
public override void SetLength(long value)
{
throw new NotSupportedException();
}
public override void Write(byte[] buffer, int offset, int count)
{
throw new NotSupportedException();
}
/* bzcat.c - bzip2 decompression
*
* Copyright 2003, 2007 Rob Landley <rob@landley.net>
*
* Based on a close reading (but not the actual code) of the original bzip2
* decompression code by Julian R Seward (jseward@acm.org), which also
* acknowledges contributions by Mike Burrows, David Wheeler, Peter Fenwick,
* Alistair Moffat, Radford Neal, Ian H. Witten, Robert Sedgewick, and
* Jon L. Bentley.
*
* No standard.
USE_BZCAT(NEWTOY(bzcat, NULL, TOYFLAG_USR|TOYFLAG_BIN))
USE_BUNZIP2(NEWTOY(bunzip2, "cftkv", TOYFLAG_USR|TOYFLAG_BIN))
config BUNZIP2
bool "bunzip2"
default y
help
usage: bunzip2 [-cftkv] [FILE...]
Decompress listed files (file.bz becomes file) deleting archive file(s).
Read from stdin if no files listed.
-c Force output to stdout
-f Force decompression (if FILE doesn't end in .bz, replace original)
-k Keep input files (-c and -t imply this)
-t Test integrity
-v Verbose
config BZCAT
bool "bzcat"
default y
help
usage: bzcat [FILE...]
Decompress listed files to stdout. Use stdin if no files listed.
*/
const int THREADS = 1;
// Constants for huffman coding
const int MAX_GROUPS = 6;
const int GROUP_SIZE = 50; /* 64 would have been more efficient */
const int MAX_HUFCODE_BITS = 20; /* Longest huffman code allowed */
const int MAX_SYMBOLS = 258; /* 256 literals + RUNA + RUNB */
//const int SYMBOL_RUNA = 0;
const int SYMBOL_RUNB = 1;
// Other housekeeping constants
const int IOBUF_SIZE = 4096;
// Status return values
const int RETVAL_LAST_BLOCK = (-100);
const int RETVAL_NOT_BZIP_DATA = (-1);
const int RETVAL_DATA_ERROR = (-2);
const int RETVAL_OBSOLETE_INPUT = (-3);
// This is what we know about each huffman coding group
class group_data
{
public uint[] limit = new uint[MAX_HUFCODE_BITS + 2]; // + 1
public uint[] @base = new uint[MAX_HUFCODE_BITS + 1]; // + 1
public uint[] permute = new uint[MAX_SYMBOLS];
public byte minLen;
public byte maxLen;
};
// Data for burrows wheeler transform
class bwdata
{
public uint origPtr;
public uint[] byteCount = new uint[256];
// State saved when interrupting output
public uint writePos;
public int writeRun, writeCount, writeCurrent, writePrevious;
public int writeCopies;
public byte writeOutByte;
public uint dataCRC, headerCRC;
public uint[] dbuf;
};
// Structure holding all the housekeeping data, including IO buffers and
// memory that persists between calls to bunzip
class bunzip_data
{
// Input stream, input buffer, input bit buffer
public Stream in_fd;
public int inbufCount, inbufPos;
public byte[] inbuf = new byte[IOBUF_SIZE];
public byte inbufBitCount;
public uint inbufBits;
// Output buffer
public uint totalCRC;
// First pass decompression data (Huffman and MTF decoding)
public byte[] selectors = new byte[32768]; // nSelectors=15 bits
public group_data[] groups = new group_data[MAX_GROUPS]; // huffman coding tables
public uint symTotal, groupCount, nSelectors;
public byte[] symToByte = new byte[256];
public byte[] mtfSymbol = new byte[256];
// The CRC values stored in the block header and calculated from the data
public uint[] crc32Table = new uint[256];
// Second pass decompression data (burrows-wheeler transform)
public uint dbufSize;
public bwdata[] bwdata = new bwdata[THREADS];
public bunzip_data()
{
for (int i = 0; i < MAX_GROUPS; i++)
groups[i] = new group_data();
}
};
// Return the next nnn bits of input. All reads from the compressed input
// are done through this function. All reads are big endian.
static uint get_bits(bunzip_data bd, byte bits_wanted)
{
uint bits = 0;
// If we need to get more data from the byte buffer, do so. (Loop getting
// one byte at a time to enforce endianness and avoid unaligned access.)
while (bd.inbufBitCount < bits_wanted)
{
// If we need to read more data from file into byte buffer, do so
if (bd.inbufPos == bd.inbufCount)
{
bd.inbufCount = bd.in_fd.Read(bd.inbuf);
if (bd.inbufCount == 0)
throw new EndOfStreamException();
bd.inbufPos = 0;
}
// Avoid 32-bit overflow (dump bit buffer to top of output)
if (bd.inbufBitCount >= 24)
{
bits = bd.inbufBits & (uint)((1 << bd.inbufBitCount) - 1);
bits_wanted -= bd.inbufBitCount;
bits <<= bits_wanted;
bd.inbufBitCount = 0;
}
// Grab next 8 bits of input from buffer.
bd.inbufBits = (bd.inbufBits << 8) | bd.inbuf[bd.inbufPos++];
bd.inbufBitCount += 8;
}
// Calculate result
bd.inbufBitCount -= bits_wanted;
bits |= (bd.inbufBits >> bd.inbufBitCount) & (uint)((1 << bits_wanted) - 1);
return bits;
}
/* Read block header at start of a new compressed data block. Consists of:
*
* 48 bits : Block signature, either pi (data block) or e (EOF block).
* 32 bits : bw->headerCRC
* 1 bit : obsolete feature flag.
* 24 bits : origPtr (Burrows-wheeler unwind index, only 20 bits ever used)
* 16 bits : Mapping table index.
*[16 bits]: symToByte[symTotal] (Mapping table. For each bit set in mapping
* table index above, read another 16 bits of mapping table data.
* If correspondig bit is unset, all bits in that mapping table
* section are 0.)
* 3 bits : groupCount (how many huffman tables used to encode, anywhere
* from 2 to MAX_GROUPS)
* variable: hufGroup[groupCount] (MTF encoded huffman table data.)
*/
static int read_block_header(bunzip_data bd, bwdata bw)
{
group_data hufGroup;
uint hh, ii, jj, kk;
uint symCount;
uint[] @base;
uint[] limit;
byte uc;
// Read in header signature and CRC (which is stored big endian)
ii = get_bits(bd, 24);
jj = get_bits(bd, 24);
bw.headerCRC = get_bits(bd, 32);
// Is this the EOF block with CRC for whole file? (Constant is "e")
if (ii == 0x177245 && jj == 0x385090) return RETVAL_LAST_BLOCK;
// Is this a valid data block? (Constant is "pi".)
if (ii != 0x314159 || jj != 0x265359) return RETVAL_NOT_BZIP_DATA;
// We can add support for blockRandomised if anybody complains.
if (get_bits(bd, 1) > 0) return RETVAL_OBSOLETE_INPUT;
if ((bw.origPtr = get_bits(bd, 24)) > bd.dbufSize) return RETVAL_DATA_ERROR;
// mapping table: if some byte values are never used (encoding things
// like ascii text), the compression code removes the gaps to have fewer
// symbols to deal with, and writes a sparse bitfield indicating which
// values were present. We make a translation table to convert the symbols
// back to the corresponding bytes.
hh = get_bits(bd, 16);
bd.symTotal = 0;
for (ii = 0; ii < 16; ii++)
{
if ((hh & (uint)(1 << (int)(15 - ii))) > 0)
{
kk = get_bits(bd, 16);
for (jj = 0; jj < 16; jj++)
if ((kk & (uint)(1 << (int)(15 - jj))) > 0)
bd.symToByte[bd.symTotal++] = (byte)((16 * ii) + jj);
}
}
// How many different huffman coding groups does this block use?
bd.groupCount = get_bits(bd, 3);
if (bd.groupCount < 2 || bd.groupCount > MAX_GROUPS) return RETVAL_DATA_ERROR;
// nSelectors: Every GROUP_SIZE many symbols we switch huffman coding
// tables. Each group has a selector, which is an index into the huffman
// coding table arrays.
//
// Read in the group selector array, which is stored as MTF encoded
// bit runs. (MTF = Move To Front. Every time a symbol occurs it's moved
// to the front of the table, so it has a shorter encoding next time.)
if ((bd.nSelectors = get_bits(bd, 15)) == 0) return RETVAL_DATA_ERROR;
for (ii = 0; ii < bd.groupCount; ii++) bd.mtfSymbol[ii] = (byte)ii;
for (ii = 0; ii < bd.nSelectors; ii++)
{
// Get next value
for (jj = 0; get_bits(bd, 1) > 0; jj++)
if (jj >= bd.groupCount) return RETVAL_DATA_ERROR;
// Decode MTF to get the next selector, and move it to the front.
uc = bd.mtfSymbol[jj];
Buffer.BlockCopy(bd.mtfSymbol, 0, bd.mtfSymbol, 1, (int)jj);
//memmove(bd->mtfSymbol + 1, bd->mtfSymbol, jj);
bd.mtfSymbol[0] = bd.selectors[ii] = uc;
}
// Read the huffman coding tables for each group, which code for symTotal
// literal symbols, plus two run symbols (RUNA, RUNB)
symCount = bd.symTotal + 2;
for (jj = 0; jj < bd.groupCount; jj++)
{
byte[] length = new byte[MAX_SYMBOLS];
uint[] temp = new uint[MAX_HUFCODE_BITS + 1];
byte minLen, maxLen;
uint pp;
// Read lengths
hh = get_bits(bd, 5);
for (ii = 0; ii < symCount; ii++)
{
for (; ; )
{
// !hh || hh > MAX_HUFCODE_BITS in one test.
if (MAX_HUFCODE_BITS - 1 < (uint)hh - 1) return RETVAL_DATA_ERROR;
// Grab 2 bits instead of 1 (slightly smaller/faster). Stop if
// first bit is 0, otherwise second bit says whether to
// increment or decrement.
kk = get_bits(bd, 2);
if ((kk & 2) > 0) hh += 1 - ((kk & 1) << 1);
else
{
bd.inbufBitCount++;
break;
}
}
length[ii] = (byte)hh;
}
// Find largest and smallest lengths in this group
minLen = maxLen = length[0];
for (ii = 1; ii < symCount; ii++)
{
if (length[ii] > maxLen) maxLen = length[ii];
else if (length[ii] < minLen) minLen = length[ii];
}
/* Calculate permute[], base[], and limit[] tables from length[].
*
* permute[] is the lookup table for converting huffman coded symbols
* into decoded symbols. It contains symbol values sorted by length.
*
* base[] is the amount to subtract from the value of a huffman symbol
* of a given length when using permute[].
*
* limit[] indicates the largest numerical value a symbol with a given
* number of bits can have. It lets us know when to stop reading.
*
* To use these, keep reading bits until value <= limit[bitcount] or
* you've read over 20 bits (error). Then the decoded symbol
* equals permute[hufcode_value - base[hufcode_bitcount]].
*/
hufGroup = bd.groups[jj];
hufGroup.minLen = minLen;
hufGroup.maxLen = maxLen;
// Note that minLen can't be smaller than 1, so we adjust the base
// and limit array pointers so we're not always wasting the first
// entry. We do this again when using them (during symbol decoding).
@base = hufGroup.@base; // - 1
limit = hufGroup.limit; // - 1
// zero temp[] and limit[], and calculate permute[]
pp = 0;
for (ii = minLen; ii <= maxLen; ii++)
{
temp[ii] = limit[ii] = 0;
for (hh = 0; hh < symCount; hh++)
if (length[hh] == ii) hufGroup.permute[pp++] = hh;
}
// Count symbols coded for at each bit length
for (ii = 0; ii < symCount; ii++) temp[length[ii]]++;
/* Calculate limit[] (the largest symbol-coding value at each bit
* length, which is (previous limit<<1)+symbols at this level), and
* base[] (number of symbols to ignore at each bit length, which is
* limit minus the cumulative count of symbols coded for already). */
pp = hh = 0;
for (ii = minLen; ii < maxLen; ii++)
{
pp += temp[ii];
limit[ii] = pp - 1;
pp <<= 1;
@base[ii + 1] = pp - (hh += temp[ii]);
}
limit[maxLen] = pp + temp[maxLen] - 1;
limit[maxLen + 1] = /*INT_MAX*/int.MaxValue;
@base[minLen] = 0;
}
return 0;
}
/* First pass, read block's symbols into dbuf[dbufCount].
*
* This undoes three types of compression: huffman coding, run length encoding,
* and move to front encoding. We have to undo all those to know when we've
* read enough input.
*/
static int read_huffman_data(bunzip_data bd, bwdata bw)
{
group_data hufGroup;
uint ii, jj, kk;
int runPos, dbufCount, symCount, selector;
uint nextSym;
uint[] byteCount;
uint[] @base;
uint[] limit;
uint hh;
uint[] dbuf = bw.dbuf;
byte uc;
// We've finished reading and digesting the block header. Now read this
// block's huffman coded symbols from the file and undo the huffman coding
// and run length encoding, saving the result into dbuf[dbufCount++] = uc
// Initialize symbol occurrence counters and symbol mtf table
byteCount = bw.byteCount;
for (ii = 0; ii < 256; ii++)
{
byteCount[ii] = 0;
bd.mtfSymbol[ii] = (byte)ii;
}
// Loop through compressed symbols. This is the first "tight inner loop"
// that needs to be micro-optimized for speed. (This one fills out dbuf[]
// linearly, staying in cache more, so isn't as limited by DRAM access.)
runPos = dbufCount = symCount = selector = 0;
// Some unnecessary initializations to shut gcc up.
@base = limit = null;
hufGroup = null;
hh = 0;
for (; ; )
{
// Have we reached the end of this huffman group?
if ((symCount--) == 0)
{
// Determine which huffman coding group to use.
symCount = GROUP_SIZE - 1;
if (selector >= bd.nSelectors) return RETVAL_DATA_ERROR;
hufGroup = bd.groups[bd.selectors[selector++]];
@base = hufGroup.@base; // - 1
limit = hufGroup.limit; // - 1
}
// Read next huffman-coded symbol (into jj).
ii = hufGroup.minLen;
jj = get_bits(bd, (byte)ii);
while (jj > limit[ii])
{
// if (ii > hufGroup->maxLen) return RETVAL_DATA_ERROR;
ii++;
// Unroll get_bits() to avoid a function call when the data's in
// the buffer already.
kk = bd.inbufBitCount > 0 ? (bd.inbufBits >> --(bd.inbufBitCount)) & 1 : get_bits(bd, 1);
jj = (jj << 1) | kk;
}
// Huffman decode jj into nextSym (with bounds checking)
jj -= @base[ii];
if (ii > hufGroup.maxLen || (uint)jj >= MAX_SYMBOLS)
return RETVAL_DATA_ERROR;
nextSym = hufGroup.permute[jj];
// If this is a repeated run, loop collecting data
if ((uint)nextSym <= SYMBOL_RUNB)
{
// If this is the start of a new run, zero out counter
if (runPos == 0)
{
runPos = 1;
hh = 0;
}
/* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at
each bit position, add 1 or 2 instead. For example,
1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2.
You can make any bit pattern that way using 1 less symbol than
the basic or 0/1 method (except all bits 0, which would use no
symbols, but a run of length 0 doesn't mean anything in this
context). Thus space is saved. */
hh += (uint)(runPos << (int)nextSym); // +runPos if RUNA; +2*runPos if RUNB
runPos <<= 1;
continue;
}
/* When we hit the first non-run symbol after a run, we now know
how many times to repeat the last literal, so append that many
copies to our buffer of decoded symbols (dbuf) now. (The last
literal used is the one at the head of the mtfSymbol array.) */
if (runPos > 0)
{
runPos = 0;
// Check for integer overflow
if (hh > bd.dbufSize || dbufCount + hh > bd.dbufSize)
return RETVAL_DATA_ERROR;
uc = bd.symToByte[bd.mtfSymbol[0]];
byteCount[uc] += hh;
while (hh-- > 0) dbuf[dbufCount++] = uc;
}
// Is this the terminating symbol?
if (nextSym > bd.symTotal) break;
/* At this point, the symbol we just decoded indicates a new literal
character. Subtract one to get the position in the MTF array
at which this literal is currently to be found. (Note that the
result can't be -1 or 0, because 0 and 1 are RUNA and RUNB.
Another instance of the first symbol in the mtf array, position 0,
would have been handled as part of a run.) */
if (dbufCount >= bd.dbufSize) return RETVAL_DATA_ERROR;
ii = nextSym - 1;
uc = bd.mtfSymbol[ii];
// On my laptop, unrolling this memmove() into a loop shaves 3.5% off
// the total running time.
while (ii-- > 0) bd.mtfSymbol[ii + 1] = bd.mtfSymbol[ii];
bd.mtfSymbol[0] = uc;
uc = bd.symToByte[uc];
// We have our literal byte. Save it into dbuf.
byteCount[uc]++;
dbuf[dbufCount++] = (uint)uc;
}
// Now we know what dbufCount is, do a better sanity check on origPtr.
if (bw.origPtr >= (bw.writeCount = dbufCount)) return RETVAL_DATA_ERROR;
return 0;
}
static void burrows_wheeler_prep(bunzip_data bd, bwdata bw)
{
uint ii, jj;
uint[] dbuf = bw.dbuf;
uint[] byteCount = bw.byteCount;
// Turn byteCount into cumulative occurrence counts of 0 to n-1.
jj = 0;
for (ii = 0; ii < 256; ii++)
{
uint kk = jj + byteCount[ii];
byteCount[ii] = jj;
jj = kk;
}
// Use occurrence counts to quickly figure out what order dbuf would be in
// if we sorted it.
for (ii = 0; ii < bw.writeCount; ii++)
{
byte uc = (byte)dbuf[ii];
dbuf[byteCount[uc]] |= (ii << 8);
byteCount[uc]++;
}
// blockRandomised support would go here.
// Using ii as position, jj as previous character, hh as current character,
// and uc as run count.
bw.dataCRC = 0xffffffffu;
/* Decode first byte by hand to initialize "previous" byte. Note that it
doesn't get output, and if the first three characters are identical
it doesn't qualify as a run (hence uc=255, which will either wrap
to 1 or get reset). */
if (bw.writeCount > 0)
{
bw.writePos = dbuf[bw.origPtr];
bw.writeCurrent = (byte)bw.writePos;
bw.writePos >>= 8;
bw.writeRun = -1;
}
}
// Decompress a block of text to intermediate buffer
static int read_bunzip_data(bunzip_data bd)
{
int rc = read_block_header(bd, bd.bwdata[0]);
if (rc == 0)
rc = read_huffman_data(bd, bd.bwdata[0]);
// First thing that can be done by a background thread.
burrows_wheeler_prep(bd, bd.bwdata[0]);
return rc;
}
// Undo burrows-wheeler transform on intermediate buffer to produce output.
// If !len, write up to len bytes of data to buf. Otherwise write to out_fd.
// Returns len ? bytes written : 0. Notice all errors are negative #'s.
//
// Burrows-wheeler transform is described at:
// http://dogma.net/markn/articles/bwt/bwt.htm
// http://marknelson.us/1996/09/01/bwt/
static int write_bunzip_data(bunzip_data bd, bwdata bw, Span<byte> outbuf)
{
uint[] dbuf = bw.dbuf;
int count, current, run, copies, outbyte, previous;
int outbufPos = 0;
uint pos;
for (; ; )
{
// If last read was short due to end of file, return last block now
if (bw.writeCount < 0) return bw.writeCount;
// If we need to refill dbuf, do it.
if (bw.writeCount == 0)
{
int i = read_bunzip_data(bd);
if (i < 0)
{
if (i == RETVAL_LAST_BLOCK)
{
bw.writeCount = i;
return outbufPos;
}
else return i;
}
}
// loop generating output
count = bw.writeCount;
pos = bw.writePos;
current = bw.writeCurrent;
previous = bw.writePrevious;
run = bw.writeRun;
copies = bw.writeCopies;
outbyte = bw.writeOutByte;
if (copies > 0)
{
while (copies > 0)
{
if (outbufPos >= outbuf.Length) goto dataus_interruptus;
outbuf[outbufPos++] = (byte)outbyte;
bw.dataCRC = (bw.dataCRC << 8) ^ bd.crc32Table[(bw.dataCRC >> 24) ^ outbyte];
copies--;
}
if (current != previous) run = 0;
}
while (count > 0)
{
// If somebody (like tar) wants a certain number of bytes of
// data from memory instead of written to a file, humor them.
if (outbufPos >= outbuf.Length) goto dataus_interruptus;
count--;
// Follow sequence vector to undo Burrows-Wheeler transform.
previous = current;
pos = dbuf[pos];
current = (int)(pos & 0xff);
pos >>= 8;
// Whenever we see 3 consecutive copies of the same byte,
// the 4th is a repeat count
if (run++ == 3)
{
copies = current;
outbyte = previous;
current = -1;
}
else
{
copies = 1;
outbyte = current;
}
// Output bytes to buffer, flushing to file if necessary
while (copies > 0)
{
if (outbufPos >= outbuf.Length)
{
// FIXME: Break out of this state
//flush_bunzip_outbuf(bd, out_fd);
goto dataus_interruptus;
}
outbuf[outbufPos++] = (byte)outbyte;
bw.dataCRC = (bw.dataCRC << 8) ^ bd.crc32Table[(bw.dataCRC >> 24) ^ outbyte];
copies--;
}
if (current != previous) run = 0;
}
// decompression of this block completed successfully
bw.dataCRC = ~(bw.dataCRC);
bd.totalCRC = ((bd.totalCRC << 1) | (bd.totalCRC >> 31)) ^ bw.dataCRC;
// if this block had a crc error, force file level crc error.
if (bw.dataCRC != bw.headerCRC)
{
bd.totalCRC = bw.headerCRC + 1;
return RETVAL_LAST_BLOCK;
}
dataus_interruptus:
bw.writeCount = count;
bw.writeCopies = copies;
bw.writeOutByte = (byte)outbyte;
bw.writePos = pos;
bw.writeCurrent = current;
bw.writeRun = run;
bw.writePrevious = previous;
// If we got enough data, checkpoint loop state and return
if (outbufPos == outbuf.Length)
{
return outbufPos;
}
}
}
static void crc_init(uint[] crc_table, bool little_endian)
{
uint i;
// Init the CRC32 table (big endian)
for (i = 0; i < 256; i++)
{
uint j, c = little_endian ? i : i << 24;
for (j = 8; j > 0; j--)
if (little_endian) c = (c & 1) > 0 ? (c >> 1) ^ 0xEDB88320 : c >> 1;
else c = (c & 0x80000000) > 0 ? (c << 1) ^ 0x04c11db7 : (c << 1);
crc_table[i] = c;
}
}
// Allocate the structure, read file header. If !len, src_fd contains
// filehandle to read from. Else inbuf contains data.
static int start_bunzip(out bunzip_data bd, Stream src_fd, byte[] inbuf, int len)
{
uint i;
bd = new bunzip_data();
if (len > 0)
{
bd.inbuf = inbuf;
bd.inbufCount = len;
bd.in_fd = null;
}
else
{
bd.inbuf = new byte[IOBUF_SIZE];
bd.in_fd = src_fd;
}
crc_init(bd.crc32Table, false);
// Ensure that file starts with "BZh".
var BZh = new byte[] { (byte)'B', (byte)'Z', (byte)'h' };
for (i = 0; i < 3; i++) if (get_bits(bd, 8) != BZh[i]) return RETVAL_NOT_BZIP_DATA;
// Next byte ascii '1'-'9', indicates block size in units of 100k of
// uncompressed data. Allocate intermediate buffer for block.
i = get_bits(bd, 8);
if (i < '1' || i > '9') return RETVAL_NOT_BZIP_DATA;
bd.dbufSize = 100000 * (i - '0') * THREADS;
bd.bwdata = new bwdata[THREADS];
for (i = 0; i < THREADS; i++)
bd.bwdata[i] = new bwdata { dbuf = new uint[bd.dbufSize] };
return 0;
}
}
class Program
{
static void Main(string[] args)
{
using (var inputStream = new BZip2InputStream(File.OpenRead("input.bz2")))
using (var outputStream = File.OpenWrite("output.bin"))
inputStream.CopyTo(outputStream);
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment