Skip to content

Instantly share code, notes, and snippets.

@pedrocr
Created November 18, 2020 00:29
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save pedrocr/317599244c8822fd9e4af54e7882db57 to your computer and use it in GitHub Desktop.
Save pedrocr/317599244c8822fd9e4af54e7882db57 to your computer and use it in GitHub Desktop.
Expanded ARW multiversion
mod arw {
use std::f32::NAN;
use std::cmp;
use crate::decoders::*;
use crate::decoders::tiff::*;
use crate::decoders::basics::*;
use multiversion::multiversion;
pub struct ArwDecoder<'a> {
buffer: &'a [u8],
rawloader: &'a RawLoader,
tiff: TiffIFD<'a>,
}
#[automatically_derived]
#[allow(unused_qualifications)]
impl<'a> ::core::fmt::Debug for ArwDecoder<'a> {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match *self {
ArwDecoder {
buffer: ref __self_0_0,
rawloader: ref __self_0_1,
tiff: ref __self_0_2,
} => {
let mut debug_trait_builder = f.debug_struct("ArwDecoder");
let _ = debug_trait_builder.field("buffer", &&(*__self_0_0));
let _ = debug_trait_builder.field("rawloader", &&(*__self_0_1));
let _ = debug_trait_builder.field("tiff", &&(*__self_0_2));
debug_trait_builder.finish()
}
}
}
}
#[automatically_derived]
#[allow(unused_qualifications)]
impl<'a> ::core::clone::Clone for ArwDecoder<'a> {
#[inline]
fn clone(&self) -> ArwDecoder<'a> {
match *self {
ArwDecoder {
buffer: ref __self_0_0,
rawloader: ref __self_0_1,
tiff: ref __self_0_2,
} => ArwDecoder {
buffer: ::core::clone::Clone::clone(&(*__self_0_0)),
rawloader: ::core::clone::Clone::clone(&(*__self_0_1)),
tiff: ::core::clone::Clone::clone(&(*__self_0_2)),
},
}
}
}
impl<'a> ArwDecoder<'a> {
pub fn new(buf: &'a [u8], tiff: TiffIFD<'a>, rawloader: &'a RawLoader) -> ArwDecoder<'a> {
ArwDecoder {
buffer: buf,
tiff: tiff,
rawloader: rawloader,
}
}
}
impl<'a> Decoder for ArwDecoder<'a> {
fn image(&self, dummy: bool) -> Result<RawImage, String> {
let camera = self.rawloader.check_supported(&self.tiff)?;
let data = self.tiff.find_ifds_with_tag(Tag::StripOffsets);
if data.len() == 0 {
if camera.model == "DSLR-A100" {
return self.image_a100(camera, dummy);
} else {
return self.image_srf(camera, dummy);
}
}
let raw = data[0];
let width = raw
.find_entry(Tag::ImageWidth)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::ImageWidth",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0);
let mut height = raw
.find_entry(Tag::ImageLength)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::ImageLength",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0);
let offset = raw
.find_entry(Tag::StripOffsets)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::StripOffsets",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0);
let count = raw
.find_entry(Tag::StripByteCounts)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::StripByteCounts",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0);
let compression = raw
.find_entry(Tag::Compression)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::Compression",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_u32(0);
let bps = if camera.bps != 0 {
camera.bps
} else {
raw.find_entry(Tag::BitsPerSample)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::BitsPerSample",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0)
};
let mut white = camera.whitelevels[0];
let mut black = camera.blacklevels[0];
let src = &self.buffer[offset..];
let image = match compression {
1 => {
if camera.model == "DSC-R1" {
decode_14be_unpacked(src, width, height, dummy)
} else {
decode_16le(src, width, height, dummy)
}
}
32767 => {
if (width * height * bps) != count * 8 {
height += 8;
ArwDecoder::decode_arw1(src, width, height, dummy)
} else {
match bps {
8 => {
let curve = ArwDecoder::get_curve(raw)?;
ArwDecoder::decode_arw2(src, width, height, &curve, dummy)
}
12 => {
white >>= 2;
black >>= 2;
decode_12le(src, width, height, dummy)
}
_ => {
return Err({
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["ARW2: Don\'t know how to decode images with ", " bps"],
&match (&bps,) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
})
}
}
}
}
_ => {
return Err({
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["ARW: Don\'t know how to decode type "],
&match (&compression,) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string())
}
};
ok_image_with_black_white(camera, width, height, self.get_wb()?, black, white, image)
}
}
impl<'a> ArwDecoder<'a> {
fn image_a100(&self, camera: Camera, dummy: bool) -> Result<RawImage, String> {
let data = self.tiff.find_ifds_with_tag(Tag::SubIFDs);
if data.len() == 0 {
return Err("ARW: Couldn't find the data IFD!".to_string());
}
let raw = data[0];
let width = 3881;
let height = 2608;
let offset = raw
.find_entry(Tag::SubIFDs)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::SubIFDs",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0);
let src = &self.buffer[offset..];
let image = ArwDecoder::decode_arw1(src, width, height, dummy);
let priv_offset = self
.tiff
.find_entry(Tag::DNGPrivateArea)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::DNGPrivateArea",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_force_u32(0) as usize;
let buf = &self.buffer[priv_offset..];
let mut currpos: usize = 8;
let mut wb_coeffs: [f32; 4] = [0.0, 0.0, 0.0, NAN];
while currpos + 20 < buf.len() {
let tag: u32 = BEu32(buf, currpos);
let len: usize = LEu32(buf, currpos + 4) as usize;
if tag == 0x574247 {
wb_coeffs[0] = LEu16(buf, currpos + 12) as f32;
wb_coeffs[1] = LEu16(buf, currpos + 14) as f32;
wb_coeffs[2] = LEu16(buf, currpos + 18) as f32;
break;
}
currpos += len + 8;
}
ok_image(camera, width, height, wb_coeffs, image)
}
fn image_srf(&self, camera: Camera, dummy: bool) -> Result<RawImage, String> {
let data = self.tiff.find_ifds_with_tag(Tag::ImageWidth);
if data.len() == 0 {
return Err("ARW: Couldn't find the data IFD!".to_string());
}
let raw = data[0];
let width = raw
.find_entry(Tag::ImageWidth)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::ImageWidth",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0);
let height = raw
.find_entry(Tag::ImageLength)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::ImageLength",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0);
let image = if dummy {
<[_]>::into_vec(box [0])
} else {
let len = width * height * 2;
let off: usize = 862144;
let key_off: usize = 200896;
let head_off: usize = 164600;
let offset = (self.buffer[key_off] as usize) * 4;
let first_key = BEu32(self.buffer, key_off + offset);
let head = ArwDecoder::sony_decrypt(self.buffer, head_off, 40, first_key);
let second_key = LEu32(&head, 22);
let image_data = ArwDecoder::sony_decrypt(self.buffer, off, len, second_key);
decode_16be(&image_data, width, height, dummy)
};
ok_image(camera, width, height, [NAN, NAN, NAN, NAN], image)
}
#[inline(always)]
#[doc(hidden)]
#[cfg(any(target_arch = "x86_64"))]
pub(crate) fn __decode_arw1_avx_avx2_bmi1_bmi2_fma_lzcnt_xsave_static_dispatch(
buf: &[u8],
width: usize,
height: usize,
dummy: bool,
) -> Vec<u16> {
unsafe {
decode_arw1_avx_avx2_bmi1_bmi2_fma_lzcnt_xsave_version(buf, width, height, dummy)
}
}
#[cfg(any(target_arch = "x86_64"))]
#[target_feature(enable = "avx")]
#[target_feature(enable = "avx2")]
#[target_feature(enable = "bmi1")]
#[target_feature(enable = "bmi2")]
#[target_feature(enable = "fma")]
#[target_feature(enable = "lzcnt")]
#[target_feature(enable = "xsave")]
#[inline]
#[doc(hidden)]
pub(crate) unsafe fn decode_arw1_avx_avx2_bmi1_bmi2_fma_lzcnt_xsave_version(
buf: &[u8],
width: usize,
height: usize,
dummy: bool,
) -> Vec<u16> {
__safe_inner_decode_arw1_avx_avx2_bmi1_bmi2_fma_lzcnt_xsave_version(
buf, width, height, dummy,
)
}
#[cfg(any(target_arch = "x86_64"))]
#[inline(always)]
fn __safe_inner_decode_arw1_avx_avx2_bmi1_bmi2_fma_lzcnt_xsave_version(
buf: &[u8],
width: usize,
height: usize,
dummy: bool,
) -> Vec<u16> {
let mut out: Vec<u16> = {
let out = {
if width * height > 500000000 || width > 50000 || height > 50000 {
{
:: std :: rt :: begin_panic ("rawloader: surely there's no such thing as a >500MP or >50000 px wide/tall image!")
};
}
if dummy {
<[_]>::into_vec(box [0])
} else {
::alloc::vec::from_elem(0, width * height)
}
};
if dummy {
return out;
}
out
};
let mut pump = BitPumpMSB::new(buf);
let mut sum: i32 = 0;
for x in 0..width {
let col = width - 1 - x;
let mut row = 0;
while row <= height {
if row == height {
row = 1;
}
let mut len: u32 = 4 - pump.get_bits(2);
if len == 3 && pump.get_bits(1) != 0 {
len = 0;
} else if len == 4 {
let zeros = pump.peek_bits(13).leading_zeros() - 19;
len += zeros;
pump.get_bits(cmp::min(13, zeros + 1));
}
let diff: i32 = pump.get_ibits(len);
sum += diff;
if len > 0 && (diff & (1 << (len - 1))) == 0 {
sum -= (1 << len) - 1;
}
out[row * width + col] = sum as u16;
row += 2
}
}
out
}
#[inline(always)]
#[doc(hidden)]
#[cfg(not(any()))]
pub(crate) fn decode_arw1_default_version(
buf: &[u8],
width: usize,
height: usize,
dummy: bool,
) -> Vec<u16> {
let mut out: Vec<u16> = {
let out = {
if width * height > 500000000 || width > 50000 || height > 50000 {
{
:: std :: rt :: begin_panic ("rawloader: surely there's no such thing as a >500MP or >50000 px wide/tall image!")
};
}
if dummy {
<[_]>::into_vec(box [0])
} else {
::alloc::vec::from_elem(0, width * height)
}
};
if dummy {
return out;
}
out
};
let mut pump = BitPumpMSB::new(buf);
let mut sum: i32 = 0;
for x in 0..width {
let col = width - 1 - x;
let mut row = 0;
while row <= height {
if row == height {
row = 1;
}
let mut len: u32 = 4 - pump.get_bits(2);
if len == 3 && pump.get_bits(1) != 0 {
len = 0;
} else if len == 4 {
let zeros = pump.peek_bits(13).leading_zeros() - 19;
len += zeros;
pump.get_bits(cmp::min(13, zeros + 1));
}
let diff: i32 = pump.get_ibits(len);
sum += diff;
if len > 0 && (diff & (1 << (len - 1))) == 0 {
sum -= (1 << len) - 1;
}
out[row * width + col] = sum as u16;
row += 2
}
}
out
}
pub(crate) fn decode_arw1(
buf: &[u8],
width: usize,
height: usize,
dummy: bool,
) -> Vec<u16> {
use core::sync::atomic::{AtomicPtr, Ordering};
#[cold]
fn __resolver_fn(buf: &[u8], width: usize, height: usize, dummy: bool) -> Vec<u16> {
fn __get_fn() -> fn(&[u8], usize, usize, bool) -> Vec<u16> {
#[cfg(any(target_arch = "x86_64"))]
{[cfg (any (target_arch = "x86_64"))] { if {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{[cfg (any (target_arch = "x86" , target_arch = "x86_64"))] { ::std::detect::__is_feature_detected::avx()
}
} && {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{[cfg (any (target_arch = "x86" , target_arch = "x86_64"))] { ::std::detect::__is_feature_detected::avx2()
}
} && {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{[cfg (any (target_arch = "x86" , target_arch = "x86_64"))] { ::std::detect::__is_feature_detected::bmi1()
}
} && {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{[cfg (any (target_arch = "x86" , target_arch = "x86_64"))] { ::std::detect::__is_feature_detected::bmi2()
}
} && {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{[cfg (any (target_arch = "x86" , target_arch = "x86_64"))] { ::std::detect::__is_feature_detected::fma()
}
} && {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{[cfg (any (target_arch = "x86" , target_arch = "x86_64"))] { ::std::detect::__is_feature_detected::lzcnt()
}
} && {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{[cfg (any (target_arch = "x86" , target_arch = "x86_64"))] { ::std::detect::__is_feature_detected::xsave()
}
} {
return __decode_arw1_avx_avx2_bmi1_bmi2_fma_lzcnt_xsave_static_dispatch;
}
}
decode_arw1_default_version
}
let __current_fn = __get_fn();
__DISPATCHED_FN.store(__current_fn as *mut (), Ordering::Relaxed);
__current_fn(buf, width, height, dummy)
}
static __DISPATCHED_FN: AtomicPtr<()> = AtomicPtr::new(__resolver_fn as *mut ());
let __current_ptr = __DISPATCHED_FN.load(Ordering::Relaxed);
unsafe {
let __current_fn = core::mem::transmute::<
*mut (),
fn(&[u8], usize, usize, bool) -> Vec<u16>,
>(__current_ptr);
__current_fn(buf, width, height, dummy)
}
}
pub(crate) fn decode_arw2(
buf: &[u8],
width: usize,
height: usize,
curve: &LookupTable,
dummy: bool,
) -> Vec<u16> {
decode_threaded(
width,
height,
dummy,
&(|out: &mut [u16], row| {
let mut pump = BitPumpLSB::new(&buf[(row * width)..]);
let mut random = pump.peek_bits(16);
for out in out.chunks_exact_mut(32) {
for j in 0..2 {
let max = pump.get_bits(11);
let min = pump.get_bits(11);
let delta = max - min;
let delta_shift: u32 =
cmp::max(0, (32 - (delta.leading_zeros() as i32)) - 7) as u32;
let imax = pump.get_bits(4) as usize;
let imin = pump.get_bits(4) as usize;
for i in 0..16 {
let val = if i == imax {
max
} else if i == imin {
min
} else {
cmp::min(0x7ff, (pump.get_bits(7) << delta_shift) + min)
};
out[j + (i * 2)] = curve.dither((val << 1) as u16, &mut random);
}
}
}
}),
)
}
fn get_wb(&self) -> Result<[f32; 4], String> {
let priv_offset = self
.tiff
.find_entry(Tag::DNGPrivateArea)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::DNGPrivateArea",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_force_u32(0) as usize;
let priv_tiff = TiffIFD::new(self.buffer, priv_offset, 0, 0, 0, LITTLE_ENDIAN)?;
let sony_offset = priv_tiff
.find_entry(Tag::SonyOffset)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::SonyOffset",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0);
let sony_length = priv_tiff
.find_entry(Tag::SonyLength)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::SonyLength",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_usize(0);
let sony_key = priv_tiff
.find_entry(Tag::SonyKey)
.ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::SonyKey",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?
.get_u32(0);
let decrypted_buf =
ArwDecoder::sony_decrypt(self.buffer, sony_offset, sony_length, sony_key);
let decrypted_tiff =
TiffIFD::new(&decrypted_buf, 0, sony_offset, 0, 0, LITTLE_ENDIAN).unwrap();
let grgb_levels = decrypted_tiff.find_entry(Tag::SonyGRBG);
let rggb_levels = decrypted_tiff.find_entry(Tag::SonyRGGB);
if grgb_levels.is_some() {
let levels = grgb_levels.unwrap();
Ok([
levels.get_u32(1) as f32,
levels.get_u32(0) as f32,
levels.get_u32(2) as f32,
NAN,
])
} else if rggb_levels.is_some() {
let levels = rggb_levels.unwrap();
Ok([
levels.get_u32(0) as f32,
levels.get_u32(1) as f32,
levels.get_u32(3) as f32,
NAN,
])
} else {
Err("ARW: Couldn't find GRGB or RGGB levels".to_string())
}
}
fn get_curve(raw: &TiffIFD) -> Result<LookupTable, String> {
let centry = raw.find_entry(Tag::SonyCurve).ok_or(
{
let res = ::alloc::fmt::format(::core::fmt::Arguments::new_v1(
&["Couldn\'t find tag "],
&match (&"Tag::SonyCurve",) {
(arg0,) => [::core::fmt::ArgumentV1::new(
arg0,
::core::fmt::Display::fmt,
)],
},
));
res
}
.to_string(),
)?;
let mut curve: [usize; 6] = [0, 0, 0, 0, 0, 4095];
for i in 0..4 {
curve[i + 1] = ((centry.get_u32(i) >> 2) & 0xfff) as usize;
}
Ok(Self::calculate_curve(curve))
}
pub(crate) fn calculate_curve(curve: [usize; 6]) -> LookupTable {
let mut out = ::alloc::vec::from_elem(0 as u16, curve[5] + 1);
for i in 0..5 {
for j in (curve[i] + 1)..(curve[i + 1] + 1) {
out[j] = out[(j - 1)] + (1 << i);
}
}
LookupTable::new(&out)
}
pub(crate) fn sony_decrypt(buf: &[u8], offset: usize, length: usize, key: u32) -> Vec<u8> {
let mut pad: [u32; 128] = [0 as u32; 128];
let mut mkey = key;
for p in 0..4 {
mkey = mkey.wrapping_mul(48828125).wrapping_add(1);
pad[p] = mkey;
}
pad[3] = pad[3] << 1 | (pad[0] ^ pad[2]) >> 31;
for p in 4..127 {
pad[p] = (pad[p - 4] ^ pad[p - 2]) << 1 | (pad[p - 3] ^ pad[p - 1]) >> 31;
}
for p in 0..127 {
pad[p] = u32::from_be(pad[p]);
}
let mut out = Vec::with_capacity(length + 4);
for i in 0..(length / 4 + 1) {
let p = i + 127;
pad[p & 127] = pad[(p + 1) & 127] ^ pad[(p + 1 + 64) & 127];
let output = LEu32(buf, offset + i * 4) ^ pad[p & 127];
out.push(((output >> 0) & 0xff) as u8);
out.push(((output >> 8) & 0xff) as u8);
out.push(((output >> 16) & 0xff) as u8);
out.push(((output >> 24) & 0xff) as u8);
}
out
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment