Skip to content

Instantly share code, notes, and snippets.

@shuffle2
Last active August 29, 2015 06:28
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save shuffle2/b1282635af092a1a1a57 to your computer and use it in GitHub Desktop.
Save shuffle2/b1282635af092a1a1a57 to your computer and use it in GitHub Desktop.
easily dump files from jffs2 without jumping through the mtd hoops
from construct import *
import zlib
from array import array
import struct
import binascii
import os
import sys
u8 = UBInt8
u16 = UBInt16
u32 = UBInt32
jffs2_unknown_node_t = Struct('jffs2_unknown_node',
u16('magic'),
u16('nodetype'),
u32('totlen'),
u32('hdr_crc'),
)
jffs2_raw_dirent_t = Struct('jffs2_raw_dirent',
Embed(jffs2_unknown_node_t), # 12 + 5 * 4
u32('pino'),
u32('version'),
u32('ino'),
u32('mctime'),
u8('nsize'),
u8('dtype'),
Array(2, u8('unused')),
u32('node_crc'),
u32('name_crc'),
String('name', length = lambda c: c.nsize, encoding = 'utf-8'),
)
jffs2_raw_inode_t = Struct('jffs2_raw_inode',
Embed(jffs2_unknown_node_t), # 12 + 12 * 4
u32('ino'),
u32('version'),
u32('mode'),
u16('uid'),
u16('gid'),
u32('isize'),
u32('atime'),
u32('mtime'),
u32('ctime'),
u32('offset'),
u32('csize'),
u32('dsize'),
u8('compr'),
u8('usercompr'),
u16('flags'),
u32('data_crc'),
u32('node_crc'),
Bytes('data', length = lambda c: c.csize),
)
def crc32_le(buf):
crc = 0
POLY = 0xedb88320
for i in range(len(buf)):
crc ^= buf[i]
for b in range(8):
if crc & 1:
crc = (crc >> 1) ^ POLY
else:
crc >>= 1
return crc
def crc32(buf): return crc32_le(buf)
def rtime_decompress(buf, destlen):
positions = array('h', [0] * 256)
pos = 0
outpos = 0
cpage_out = array('B')
while outpos < destlen:
value, repeat = struct.unpack('BB', buf[pos:pos+2])
pos += 2
cpage_out.append(value)
outpos += 1
backoffs = positions[value]
positions[value] = outpos
if repeat:
if backoffs + repeat >= outpos:
while repeat:
cpage_out.append(cpage_out[backoffs])
backoffs += 1
outpos += 1
repeat -= 1
else:
cpage_out.extend(cpage_out[backoffs:backoffs+repeat])
outpos += repeat
return cpage_out.tobytes()
f = open(sys.argv[1], 'rb').read()
JFFS2_MAGIC = 0x1985
JFFS2_COMPAT_MASK = 0xc000
JFFS2_NODE_ACCURATE = 0x2000
JFFS2_FEATURE_INCOMPAT = 0xc000
JFFS2_FEATURE_ROCOMPAT = 0x8000
JFFS2_FEATURE_RWCOMPAT_COPY = 0x4000
JFFS2_FEATURE_RWCOMPAT_DELETE = 0x0000
JFFS2_NODETYPE_DIRENT = (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 1)
JFFS2_NODETYPE_INODE = (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 2)
JFFS2_NODETYPE_CLEANMARKER = (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
JFFS2_NODETYPE_PADDING = (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 4)
JFFS2_NODETYPE_SUMMARY = (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 6)
JFFS2_NODETYPE_XATTR = (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 8)
JFFS2_NODETYPE_XREF = (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 9)
JFFS2_COMPR_NONE = 0x00
JFFS2_COMPR_ZERO = 0x01
JFFS2_COMPR_RTIME = 0x02
JFFS2_COMPR_RUBINMIPS = 0x03
JFFS2_COMPR_COPY = 0x04
JFFS2_COMPR_DYNRUBIN = 0x05
JFFS2_COMPR_ZLIB = 0x06
JFFS2_COMPR_LZO = 0x07
compr_names = {
JFFS2_COMPR_NONE : 'NONE ',
JFFS2_COMPR_ZERO : 'ZERO ',
JFFS2_COMPR_RTIME : 'RTIME ',
JFFS2_COMPR_RUBINMIPS: 'RUBINMIPS',
JFFS2_COMPR_COPY : 'COPY ',
JFFS2_COMPR_DYNRUBIN : 'DYNRUBIN ',
JFFS2_COMPR_ZLIB : 'ZLIB ',
JFFS2_COMPR_LZO : 'LZO ',
}
nt_names = {
JFFS2_NODETYPE_DIRENT & 0xf:'JFFS2_NODETYPE_DIRENT ',
JFFS2_NODETYPE_INODE & 0xf:'JFFS2_NODETYPE_INODE ',
JFFS2_NODETYPE_CLEANMARKER& 0xf:'JFFS2_NODETYPE_CLEANMARKER',
JFFS2_NODETYPE_PADDING & 0xf:'JFFS2_NODETYPE_PADDING ',
JFFS2_NODETYPE_SUMMARY & 0xf:'JFFS2_NODETYPE_SUMMARY ',
JFFS2_NODETYPE_XATTR & 0xf:'JFFS2_NODETYPE_XATTR ',
JFFS2_NODETYPE_XREF & 0xf:'JFFS2_NODETYPE_XREF ',
}
dt_names = {
0 : 'UNKNOWN',
1 : 'FIFO ',
2 : 'CHR ',
4 : 'DIR ',
6 : 'BLK ',
8 : 'REG ',
10 : 'LNK ',
12 : 'SOCK ',
14 : 'WHT ',
}
dirents = {}
inodes = {}
def do_dirent(buf):
n = jffs2_raw_dirent_t.parse(buf)
print('ino %08x pino %08x ver %08x <%s> %s' % (n.ino, n.pino, n.version, dt_names[n.dtype], n.name))
assert crc32(buf[:8]) == n.hdr_crc
assert crc32(buf[:12 + 5 * 4]) == n.node_crc
assert crc32(bytes(n.name, 'utf-8')) == n.name_crc
assert n.ino not in dirents
dirents[n.ino] = n
def do_inode(buf):
n = jffs2_raw_inode_t.parse(buf)
data = None
if n.compr == JFFS2_COMPR_NONE:
data = n.data
elif n.compr == JFFS2_COMPR_ZLIB:
data = zlib.decompress(n.data)
elif n.compr == JFFS2_COMPR_RTIME:
data = rtime_decompress(n.data, n.dsize)
else:
print('unknown compr %02x' % (n.compr))
raise
#print('inode %08x ver %08x off %08x len %08x isize %08x' % (n.ino, n.version, n.offset, len(data), n.isize))
assert len(data) == n.dsize
hdr_crc = crc32(buf[:8])
if (n.nodetype & JFFS2_NODE_ACCURATE) == 0:
# HAX: replace the value so other crcs are OK as well
buf = bytearray(buf)
del buf[2:3]
buf.insert(2, 0xe0)
buf = bytes(buf)
hdr_crc = crc32(struct.pack('>HHL', n.magic, n.nodetype | JFFS2_NODE_ACCURATE, n.totlen))
assert hdr_crc == n.hdr_crc
assert crc32(buf[:12 + 12 * 4]) == n.node_crc
assert crc32(n.data) == n.data_crc
#print(binascii.hexlify(data))
n.compr = JFFS2_COMPR_NONE
n.csize = n.dsize
n.data = data
if n.ino in inodes:
inodes[n.ino].append(n)
else:
inodes[n.ino] = [n]
def do_cleanmarker(buf): pass
def do_padding(buf): raise
def do_summary(buf): raise
def do_xattr(buf): raise
def do_xref(buf): raise
print('parsing nodes...get coffee')
pos = 0
while pos <= len(f) - jffs2_unknown_node_t.sizeof():
n = jffs2_unknown_node_t.parse(f[pos:pos+jffs2_unknown_node_t.sizeof()])
if n.magic != JFFS2_MAGIC:
pos += 1
continue
nt = n.nodetype & 0xf
print('node @ %08x : %s(%04x)' % (pos, nt_names[nt], n.nodetype))
{1:do_dirent,
2:do_inode,
3:do_cleanmarker,
4:do_padding,
6:do_summary,
8:do_xattr,
9:do_xref}[nt](f[pos:pos+n.totlen])
pos += n.totlen
''' Helpful to find good number to start creating new inodes at
de_ver = 0
for de in dirents.values():
de_ver = max(de_ver, de.version)
ino_ver = 0
for inos in inodes.values():
for ino in inos:
ino_ver = max(ino_ver, ino.version)
print('max versions: inode %08x dirent %08x' % (ino_ver, de_ver))
exit(0)
#'''
# now do something useful
for di, de in sorted(dirents.items()):
path = [de.name]
prev = de
while prev.pino in dirents:
prev = dirents[prev.pino]
path.insert(0, prev.name)
path = '/'.join(path)
print('dirent %i <%s> : %s' % (di, dt_names[de.dtype], path))
# this is very simple way...more tricky stuff could be used to recover old files/chunks
# gets most recent filesize and just fills buffer with most recent inodes which happen to fit
chunks = {}
ranges = []
def is_new_range(start, end):
for r in ranges:
if start >= r[0] and start < r[1]:
return False
elif end >= r[0] and end < r[1]:
return False
elif start <= r[0] and end >= r[1]:
return False
return True
fsize = None
cursize = 0
for din in reversed(sorted(inodes[di], key = lambda x: x.version)):
if fsize is None:
fsize = din.isize
assert cursize <= fsize
if cursize == fsize:
break
start, end = din.offset, din.offset + din.dsize - 1
if is_new_range(start, end):
print('add range: %i %i ver %i' % (start, end, din.version))
chunks[start] = din.data
cursize += din.dsize
ranges.append((start, end))
else:
print('skip range: %i %i ver %i' % (start, end, din.version))
assert cursize == fsize
fpath = './dump/' + path
if de.dtype not in (8, 10): # reg, lnk
os.makedirs(fpath, exist_ok = True)
else:
with open(fpath, 'wb') as fout:
for o, d in sorted(chunks.items()):
fout.write(d)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment