Skip to content

Instantly share code, notes, and snippets.

@bgK
Created November 30, 2012 19:46
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save bgK/4178077 to your computer and use it in GitHub Desktop.
Save bgK/4178077 to your computer and use it in GitHub Desktop.
diff --git a/engines/myst3/inventory.cpp b/engines/myst3/inventory.cpp
index 49e563b..254c680 100644
--- a/engines/myst3/inventory.cpp
+++ b/engines/myst3/inventory.cpp
@@ -284,6 +284,7 @@ DragItem::DragItem(Myst3Engine *vm, uint id):
_movieStream = movieDesc->getData();
_bink.setDefaultHighColorFormat(Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
_bink.loadStream(_movieStream);
+ _bink.start();
const Graphics::Surface *frame = _bink.decodeNextFrame();
_texture = _vm->_gfx->createTexture(frame);
diff --git a/engines/myst3/menu.cpp b/engines/myst3/menu.cpp
index f8c24f1..8304c7a 100644
--- a/engines/myst3/menu.cpp
+++ b/engines/myst3/menu.cpp
@@ -177,6 +177,7 @@ Dialog::Dialog(Myst3Engine *vm, uint id):
_movieStream = movieDesc->getData();
_bink.setDefaultHighColorFormat(Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
_bink.loadStream(_movieStream);
+ _bink.start();
const Graphics::Surface *frame = _bink.decodeNextFrame();
_texture = _vm->_gfx->createTexture(frame);
diff --git a/engines/myst3/movie.cpp b/engines/myst3/movie.cpp
index b4bf87f..b4aaaa3 100644
--- a/engines/myst3/movie.cpp
+++ b/engines/myst3/movie.cpp
@@ -63,6 +63,7 @@ Movie::Movie(Myst3Engine *vm, uint16 id) :
uint language = ConfMan.getInt("audio_language");
_bink.setAudioTrack(language);
_bink.loadStream(binkStream);
+ _bink.start();
if (ConfMan.getBool("subtitles"))
_subtitles = Subtitles::create(_vm, id);
diff --git a/engines/myst3/puzzles.cpp b/engines/myst3/puzzles.cpp
index de4aaa8..b9c67be 100644
--- a/engines/myst3/puzzles.cpp
+++ b/engines/myst3/puzzles.cpp
@@ -1517,6 +1517,7 @@ void Puzzles::projectorLoadBitmap(uint16 bitmap) {
Video::BinkDecoder bink;
bink.setDefaultHighColorFormat(Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
bink.loadStream(movieStream);
+ bink.start();
for (uint i = 0; i < 1024; i += 256)
for (uint j = 0; j < 1024; j += 256) {
@@ -1542,6 +1543,7 @@ void Puzzles::projectorAddSpotItem(uint16 bitmap, uint16 x, uint16 y) {
Video::BinkDecoder bink;
bink.setDefaultHighColorFormat(Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
bink.loadStream(movieStream);
+ bink.start();
const Graphics::Surface *frame = bink.decodeNextFrame();
copySurfaceRect(_vm->_projectorBackground, Common::Point(x, y), frame);
diff --git a/graphics/yuva_to_rgba.cpp b/graphics/yuva_to_rgba.cpp
index d47ad1e..a2d09d2 100644
--- a/graphics/yuva_to_rgba.cpp
+++ b/graphics/yuva_to_rgba.cpp
@@ -83,143 +83,138 @@
// BASIS, AND BROWN UNIVERSITY HAS NO OBLIGATION TO PROVIDE MAINTENANCE,
// SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-#include "common/scummsys.h"
-#include "common/singleton.h"
-#include "common/textconsole.h"
-
#include "graphics/surface.h"
+#include "graphics/yuva_to_rgba.h"
+
+namespace Common {
+DECLARE_SINGLETON(Graphics::YUVAToRGBAManager);
+}
namespace Graphics {
class YUVAToRGBALookup {
public:
- YUVAToRGBALookup(Graphics::PixelFormat format);
- ~YUVAToRGBALookup();
-
- int16 *_colorTab;
- uint32 *_rgbToPix;
- uint32 *_alphaToPix;
-};
+ YUVAToRGBALookup(Graphics::PixelFormat format, YUVAToRGBAManager::LuminanceScale scale);
-YUVAToRGBALookup::YUVAToRGBALookup(Graphics::PixelFormat format) {
- _colorTab = new int16[4 * 256]; // 2048 bytes
+ Graphics::PixelFormat getFormat() const { return _format; }
+ YUVAToRGBAManager::LuminanceScale getScale() const { return _scale; }
+ const uint32 *getRGBToPix() const { return _rgbToPix; }
+ const uint32 *getAlphaToPix() const { return _alphaToPix; }
- int16 *Cr_r_tab = &_colorTab[0 * 256];
- int16 *Cr_g_tab = &_colorTab[1 * 256];
- int16 *Cb_g_tab = &_colorTab[2 * 256];
- int16 *Cb_b_tab = &_colorTab[3 * 256];
+private:
+ Graphics::PixelFormat _format;
+ YUVAToRGBAManager::LuminanceScale _scale;
+ uint32 _rgbToPix[3 * 768]; // 9216 bytes
+ uint32 _alphaToPix[256]; // 958 bytes
+};
- _rgbToPix = new uint32[3 * 768]; // 9216 bytes
+YUVAToRGBALookup::YUVAToRGBALookup(Graphics::PixelFormat format, YUVAToRGBAManager::LuminanceScale scale) {
+ _format = format;
+ _scale = scale;
uint32 *r_2_pix_alloc = &_rgbToPix[0 * 768];
uint32 *g_2_pix_alloc = &_rgbToPix[1 * 768];
uint32 *b_2_pix_alloc = &_rgbToPix[2 * 768];
- _alphaToPix = new uint32[256]; // 958 bytes
-
- int16 CR, CB;
- int i;
-
- // Generate the tables for the display surface
+ if (scale == YUVAToRGBAManager::kScaleFull) {
+ // Set up entries 0-255 in rgb-to-pixel value tables.
+ for (int i = 0; i < 256; i++) {
+ r_2_pix_alloc[i + 256] = format.ARGBToColor(0, i, 0, 0);
+ g_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, i, 0);
+ b_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, 0, i);
+ }
- for (i = 0; i < 256; i++) {
- // Gamma correction (luminescence table) and chroma correction
- // would be done here. See the Berkeley mpeg_play sources.
+ // Spread out the values we have to the rest of the array so that we do
+ // not need to check for overflow.
+ for (int i = 0; i < 256; i++) {
+ r_2_pix_alloc[i] = r_2_pix_alloc[256];
+ r_2_pix_alloc[i + 512] = r_2_pix_alloc[511];
+ g_2_pix_alloc[i] = g_2_pix_alloc[256];
+ g_2_pix_alloc[i + 512] = g_2_pix_alloc[511];
+ b_2_pix_alloc[i] = b_2_pix_alloc[256];
+ b_2_pix_alloc[i + 512] = b_2_pix_alloc[511];
+ }
+ } else {
+ // Set up entries 16-235 in rgb-to-pixel value tables
+ for (int i = 16; i < 236; i++) {
+ int scaledValue = (i - 16) * 255 / 219;
+ r_2_pix_alloc[i + 256] = format.ARGBToColor(0, scaledValue, 0, 0);
+ g_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, scaledValue, 0);
+ b_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, 0, scaledValue);
+ }
- CR = CB = (i - 128);
- Cr_r_tab[i] = (int16) ( (0.419 / 0.299) * CR) + 0 * 768 + 256;
- Cr_g_tab[i] = (int16) (-(0.299 / 0.419) * CR) + 1 * 768 + 256;
- Cb_g_tab[i] = (int16) (-(0.114 / 0.331) * CB);
- Cb_b_tab[i] = (int16) ( (0.587 / 0.331) * CB) + 2 * 768 + 256;
- }
+ // Spread out the values we have to the rest of the array so that we do
+ // not need to check for overflow. We have to do it here in two steps.
+ for (int i = 0; i < 256 + 16; i++) {
+ r_2_pix_alloc[i] = r_2_pix_alloc[256 + 16];
+ g_2_pix_alloc[i] = g_2_pix_alloc[256 + 16];
+ b_2_pix_alloc[i] = b_2_pix_alloc[256 + 16];
+ }
- // Set up entries 0-255 in rgb-to-pixel value tables.
- for (i = 0; i < 256; i++) {
- r_2_pix_alloc[i + 256] = format.ARGBToColor(0, i, 0, 0);
- g_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, i, 0);
- b_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, 0, i);
+ for (int i = 256 + 236; i < 768; i++) {
+ r_2_pix_alloc[i] = r_2_pix_alloc[256 + 236 - 1];
+ g_2_pix_alloc[i] = g_2_pix_alloc[256 + 236 - 1];
+ b_2_pix_alloc[i] = b_2_pix_alloc[256 + 236 - 1];
+ }
}
// Set up entries 0-255 in alpha-to-pixel value table.
- for (i = 0; i < 256; i++) {
+ for (int i = 0; i < 256; i++) {
_alphaToPix[i] = format.ARGBToColor(i, 0, 0, 0);
}
-
- // Spread out the values we have to the rest of the array so that we do
- // not need to check for overflow.
- for (i = 0; i < 256; i++) {
- r_2_pix_alloc[i] = r_2_pix_alloc[256];
- r_2_pix_alloc[i + 512] = r_2_pix_alloc[511];
- g_2_pix_alloc[i] = g_2_pix_alloc[256];
- g_2_pix_alloc[i + 512] = g_2_pix_alloc[511];
- b_2_pix_alloc[i] = b_2_pix_alloc[256];
- b_2_pix_alloc[i + 512] = b_2_pix_alloc[511];
- }
}
-YUVAToRGBALookup::~YUVAToRGBALookup() {
- delete[] _rgbToPix;
- delete[] _colorTab;
- delete[] _alphaToPix;
-}
+YUVAToRGBAManager::YUVAToRGBAManager() {
+ _lookup = 0;
-class YUVAToRGBAManager : public Common::Singleton<YUVAToRGBAManager> {
-public:
- const YUVAToRGBALookup *getLookup(Graphics::PixelFormat format);
+ int16 *Cr_r_tab = &_colorTab[0 * 256];
+ int16 *Cr_g_tab = &_colorTab[1 * 256];
+ int16 *Cb_g_tab = &_colorTab[2 * 256];
+ int16 *Cb_b_tab = &_colorTab[3 * 256];
-private:
- friend class Common::Singleton<SingletonBaseType>;
- YUVAToRGBAManager();
- ~YUVAToRGBAManager();
+ // Generate the tables for the display surface
- Graphics::PixelFormat _lastFormat;
- YUVAToRGBALookup *_lookup;
-};
+ for (int i = 0; i < 256; i++) {
+ // Gamma correction (luminescence table) and chroma correction
+ // would be done here. See the Berkeley mpeg_play sources.
-YUVAToRGBAManager::YUVAToRGBAManager() {
- _lookup = 0;
+ int16 CR = (i - 128), CB = CR;
+ Cr_r_tab[i] = (int16) ( (0.419 / 0.299) * CR) + 0 * 768 + 256;
+ Cr_g_tab[i] = (int16) (-(0.299 / 0.419) * CR) + 1 * 768 + 256;
+ Cb_g_tab[i] = (int16) (-(0.114 / 0.331) * CB);
+ Cb_b_tab[i] = (int16) ( (0.587 / 0.331) * CB) + 2 * 768 + 256;
+ }
}
YUVAToRGBAManager::~YUVAToRGBAManager() {
delete _lookup;
}
-const YUVAToRGBALookup *YUVAToRGBAManager::getLookup(Graphics::PixelFormat format) {
- if (_lastFormat == format)
+const YUVAToRGBALookup *YUVAToRGBAManager::getLookup(Graphics::PixelFormat format, YUVAToRGBAManager::LuminanceScale scale) {
+ if (_lookup && _lookup->getFormat() == format && _lookup->getScale() == scale)
return _lookup;
delete _lookup;
- _lookup = new YUVAToRGBALookup(format);
- _lastFormat = format;
+ _lookup = new YUVAToRGBALookup(format, scale);
return _lookup;
}
-} // End of namespace Graphics
-
-namespace Common {
-DECLARE_SINGLETON(Graphics::YUVAToRGBAManager);
-}
-
-#define YUVAToRGBAMan (Graphics::YUVAToRGBAManager::instance())
-
-namespace Graphics {
-
#define PUT_PIXELA(s, a, d) \
L = &rgbToPix[(s)]; \
*((PixelInt *)(d)) = (L[cr_r] | L[crb_g] | L[cb_b] | aToPix[a])
template<typename PixelInt>
-void convertYUVA420ToRGBA(byte *dstPtr, int dstPitch, const YUVAToRGBALookup *lookup, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch) {
+void convertYUVA420ToRGBA(byte *dstPtr, int dstPitch, const YUVAToRGBALookup *lookup, int16 *colorTab, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch) {
int halfHeight = yHeight >> 1;
int halfWidth = yWidth >> 1;
// Keep the tables in pointers here to avoid a dereference on each pixel
- const int16 *Cr_r_tab = lookup->_colorTab;
+ const int16 *Cr_r_tab = colorTab;
const int16 *Cr_g_tab = Cr_r_tab + 256;
const int16 *Cb_g_tab = Cr_g_tab + 256;
const int16 *Cb_b_tab = Cb_g_tab + 256;
- const uint32 *rgbToPix = lookup->_rgbToPix;
- const uint32 *aToPix = lookup->_alphaToPix;
+ const uint32 *rgbToPix = lookup->getRGBToPix();
+ const uint32 *aToPix = lookup->getAlphaToPix();
for (int h = 0; h < halfHeight; h++) {
for (int w = 0; w < halfWidth; w++) {
@@ -241,7 +236,6 @@ void convertYUVA420ToRGBA(byte *dstPtr, int dstPitch, const YUVAToRGBALookup *lo
ySrc++;
aSrc++;
dstPtr += sizeof(PixelInt);
-
}
dstPtr += dstPitch;
@@ -252,23 +246,21 @@ void convertYUVA420ToRGBA(byte *dstPtr, int dstPitch, const YUVAToRGBALookup *lo
}
}
-void convertYUVA420ToRGBA(Graphics::Surface *dst, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch) {
+void YUVAToRGBAManager::convert420(Graphics::Surface *dst, YUVAToRGBAManager::LuminanceScale scale, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch) {
// Sanity checks
assert(dst && dst->pixels);
assert(dst->format.bytesPerPixel == 2 || dst->format.bytesPerPixel == 4);
- assert(ySrc && uSrc && vSrc && aSrc);
+ assert(ySrc && uSrc && vSrc);
assert((yWidth & 1) == 0);
+ assert((yHeight & 1) == 0);
- if (yHeight & 1) // Odd height, the last line won't be converted
- warning("Decoding YUV420 data with an odd height %d", yHeight);
-
- const YUVAToRGBALookup *lookup = YUVAToRGBAMan.getLookup(dst->format);
+ const YUVAToRGBALookup *lookup = getLookup(dst->format, scale);
// Use a templated function to avoid an if check on every pixel
if (dst->format.bytesPerPixel == 2)
- convertYUVA420ToRGBA<uint16>((byte *)dst->pixels, dst->pitch, lookup, ySrc, uSrc, vSrc, aSrc, yWidth, yHeight, yPitch, uvPitch);
+ convertYUVA420ToRGBA<uint16>((byte *)dst->pixels, dst->pitch, lookup, _colorTab, ySrc, uSrc, vSrc, aSrc, yWidth, yHeight, yPitch, uvPitch);
else
- convertYUVA420ToRGBA<uint32>((byte *)dst->pixels, dst->pitch, lookup, ySrc, uSrc, vSrc, aSrc, yWidth, yHeight, yPitch, uvPitch);
+ convertYUVA420ToRGBA<uint32>((byte *)dst->pixels, dst->pitch, lookup, _colorTab, ySrc, uSrc, vSrc, aSrc, yWidth, yHeight, yPitch, uvPitch);
}
} // End of namespace Graphics
diff --git a/graphics/yuva_to_rgba.h b/graphics/yuva_to_rgba.h
index abc010e..e4e495e 100644
--- a/graphics/yuva_to_rgba.h
+++ b/graphics/yuva_to_rgba.h
@@ -23,6 +23,7 @@
/**
* @file
* YUV to RGB conversion used in engines:
+ * - mohawk
* - scumm (he)
* - sword25
*/
@@ -31,27 +32,50 @@
#define GRAPHICS_YUVA_TO_RGBA_H
#include "common/scummsys.h"
+#include "common/singleton.h"
#include "graphics/surface.h"
namespace Graphics {
-struct Surface;
+class YUVAToRGBALookup;
-/**
- * Convert a YUVA420 image to an RGBA surface
- *
- * @param dst the destination surface
- * @param ySrc the source of the y component
- * @param uSrc the source of the u component
- * @param vSrc the source of the v component
- * @param aSrc the source of the a component
- * @param yWidth the width of the y surface (must be divisible by 2)
- * @param yHeight the height of the y surface (must be divisible by 2)
- * @param yPitch the pitch of the y surface
- * @param uvPitch the pitch of the u and v surfaces
- */
-void convertYUVA420ToRGBA(Graphics::Surface *dst, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch);
+class YUVAToRGBAManager : public Common::Singleton<YUVAToRGBAManager> {
+public:
+ /** The scale of the luminance values */
+ enum LuminanceScale {
+ kScaleFull, /** Luminance values range from [0, 255] */
+ kScaleITU /** Luminance values range from [16, 235], the range from ITU-R BT.601 */
+ };
+
+ /**
+ * Convert a YUV420 image to an RGB surface
+ *
+ * @param dst the destination surface
+ * @param scale the scale of the luminance values
+ * @param ySrc the source of the y component
+ * @param uSrc the source of the u component
+ * @param vSrc the source of the v component
+ * @param aSrc the source of the a component
+ * @param yWidth the width of the y surface (must be divisible by 2)
+ * @param yHeight the height of the y surface (must be divisible by 2)
+ * @param yPitch the pitch of the y surface
+ * @param uvPitch the pitch of the u and v surfaces
+ */
+ void convert420(Graphics::Surface *dst, LuminanceScale scale, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch);
+
+private:
+ friend class Common::Singleton<SingletonBaseType>;
+ YUVAToRGBAManager();
+ ~YUVAToRGBAManager();
+
+ const YUVAToRGBALookup *getLookup(Graphics::PixelFormat format, LuminanceScale scale);
+
+ YUVAToRGBALookup *_lookup;
+ int16 _colorTab[4 * 256]; // 2048 bytes
+};
} // End of namespace Graphics
+#define YUVAToRGBAMan (::Graphics::YUVAToRGBAManager::instance())
+
#endif
diff --git a/video/bink_decoder.cpp b/video/bink_decoder.cpp
index db7e5ea..f6a63ee 100644
--- a/video/bink_decoder.cpp
+++ b/video/bink_decoder.cpp
@@ -40,7 +40,7 @@
#include "common/dct.h"
#include "common/system.h"
-#include "graphics/yuv_to_rgb.h"
+#include "graphics/yuva_to_rgba.h"
#include "graphics/surface.h"
#include "video/binkdata.h"
@@ -63,7 +63,7 @@ namespace Video {
BinkDecoder::BinkDecoder() {
_bink = 0;
- _selectedAudioTrack = -1; // ResidualVM-specific
+ _selectedAudioTrack = 0; // ResidualVM-specific
}
BinkDecoder::~BinkDecoder() {
@@ -122,8 +122,7 @@ bool BinkDecoder::loadStream(Common::SeekableReadStream *stream) {
_audioTracks.push_back(track);
- bool doPlay = true;//(_selectedAudioTrack == -1 && (int32)i == _selectedAudioTrack); // ResidualVM-specific
- initAudioTrack(_audioTracks[i], doPlay);
+ initAudioTrack(_audioTracks[i]);
}
_bink->skip(4 * audioTrackCount);
@@ -187,16 +186,19 @@ void BinkDecoder::readNextPacket() {
uint32 audioPacketStart = _bink->pos();
uint32 audioPacketEnd = _bink->pos() + audioPacketLength;
- // Number of samples in bytes
- audio.sampleCount = _bink->readUint32LE() / (2 * audio.channels);
+ // ResidualVM specific
+ if (i == _selectedAudioTrack) {
+ // Number of samples in bytes
+ audio.sampleCount = _bink->readUint32LE() / (2 * audio.channels);
- audio.bits = new Common::BitStream32LELSB(new Common::SeekableSubReadStream(_bink,
- audioPacketStart + 4, audioPacketEnd), true);
+ audio.bits = new Common::BitStream32LELSB(new Common::SeekableSubReadStream(_bink,
+ audioPacketStart + 4, audioPacketEnd), true);
- audioTrack->decodePacket();
+ audioTrack->decodePacket();
- delete audio.bits;
- audio.bits = 0;
+ delete audio.bits;
+ audio.bits = 0;
+ }
_bink->seek(audioPacketEnd);
@@ -326,15 +328,10 @@ BinkDecoder::BinkVideoTrack::~BinkVideoTrack() {
// ResidualVM-specific function
void BinkDecoder::setAudioTrack(uint32 track) {
_selectedAudioTrack = track;
- warning("Selecting audio track for Bink currently broken");
}
// ResidualVM-specific function
bool BinkDecoder::seek(const Audio::Timestamp &time) {
- // TODO: Don't seek if we actually just want the next frame
- // Fast path
- // if ((int32)frame == getCurFrame() + 1)
- // return true;
VideoDecoder::seek(time);
uint32 frame = getCurFrame();
@@ -431,11 +428,10 @@ void BinkDecoder::BinkVideoTrack::decodePacket(VideoFrame &frame) {
}
// Convert the YUV data we have to our format
- // We're ignoring alpha for now
// The width used here is the surface-width, and not the video-width
// to allow for odd-sized videos.
- assert(_curPlanes[0] && _curPlanes[1] && _curPlanes[2]);
- YUVToRGBMan.convert420(&_surface, Graphics::YUVToRGBManager::kScaleITU, _curPlanes[0], _curPlanes[1], _curPlanes[2],
+ assert(_curPlanes[0] && _curPlanes[1] && _curPlanes[2] && _curPlanes[3]);
+ YUVAToRGBAMan.convert420(&_surface, Graphics::YUVAToRGBAManager::kScaleITU, _curPlanes[0], _curPlanes[1], _curPlanes[2], _curPlanes[3],
_surfaceWidth, _surfaceHeight, _surfaceWidth, _surfaceWidth >> 1);
// And swap the planes with the reference planes
@@ -1611,7 +1607,7 @@ float BinkDecoder::BinkAudioTrack::getFloat() {
return f;
}
-void BinkDecoder::initAudioTrack(AudioInfo &audio, bool doPlay) {
+void BinkDecoder::initAudioTrack(AudioInfo &audio) {
audio.sampleCount = 0;
audio.bits = 0;
@@ -1677,8 +1673,7 @@ void BinkDecoder::initAudioTrack(AudioInfo &audio, bool doPlay) {
else if (audio.codec == kAudioCodecDCT)
audio.dct = new Common::DCT(frameLenBits, Common::DCT::DCT_III);
- if (doPlay) // ResidualVM-specific
- addTrack(new BinkAudioTrack(audio));
+ addTrack(new BinkAudioTrack(audio));
}
} // End of namespace Video
diff --git a/video/bink_decoder.h b/video/bink_decoder.h
index 7214517..cf5de7d 100644
--- a/video/bink_decoder.h
+++ b/video/bink_decoder.h
@@ -360,9 +360,9 @@ private:
Common::Array<AudioInfo> _audioTracks; ///< All audio tracks.
Common::Array<VideoFrame> _frames; ///< All video frames.
- void initAudioTrack(AudioInfo &audio, bool doPlay); // ResidualVM-specific
+ void initAudioTrack(AudioInfo &audio);
// ResidualVM-specific:
- int32 _selectedAudioTrack;
+ uint32 _selectedAudioTrack;
};
} // End of namespace Video
diff --git a/video/video_decoder.cpp b/video/video_decoder.cpp
index 826880b..ef030c6 100644
--- a/video/video_decoder.cpp
+++ b/video/video_decoder.cpp
@@ -166,8 +166,8 @@ Graphics::PixelFormat VideoDecoder::getPixelFormat() const {
const Graphics::Surface *VideoDecoder::decodeNextFrame() {
_needsUpdate = false;
- readNextPacket();
VideoTrack *track = findNextVideoTrack();
+ readNextPacket();
if (!track)
return 0;
@@ -325,6 +325,11 @@ bool VideoDecoder::seek(const Audio::Timestamp &time) {
}
bool VideoDecoder::seekToFrame(uint frame) {
+ // Fast path
+ if ((int32)frame == getCurFrame() + 1) {
+ return true;
+ }
+
VideoTrack *track = 0;
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) {
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment