Skip to content

Instantly share code, notes, and snippets.

@jfiedler
Created September 3, 2014 07:42
Show Gist options
  • Save jfiedler/4c2a34e0d752dd93f22a to your computer and use it in GitHub Desktop.
Save jfiedler/4c2a34e0d752dd93f22a to your computer and use it in GitHub Desktop.
package org.mapdb;
import static org.junit.Assert.assertEquals;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.Serializable;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.concurrent.BlockingQueue;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Junit test that demonstrates unexpected DB file growth if queues are used with custom serializers. One test shows
* expected file growth behavior for plain strings (and succeeds) while the second fails with the same payload (but
* using a dummy object and custom serializer).
*
* @author Jan Fiedler
*/
public class _MapDB_FileGrowthTest
{
public Path tempFile;
@Before
public void setUp()
throws Exception
{
tempFile = Files.createTempFile( "mapdb-db-test-file-", null );
}
@After
public void tearDown()
throws Exception
{
Files.delete( tempFile );
}
/**
* Test object that should be serialized.
*/
static class TestObject
{
final String payload;
TestObject( String payload )
{
this.payload = payload;
}
public String getPayload()
{
return payload;
}
}
/**
* Custom serializer taken care of our transport records.
*/
static class TestObjectSerializer
implements Serializer<TestObject>, Serializable
{
@Override
public void serialize( DataOutput out, TestObject value )
throws IOException
{
out.writeUTF( value.getPayload() );
}
@Override
public TestObject deserialize( DataInput in, int available )
throws IOException
{
return new TestObject( in.readUTF() );
}
@Override
public int fixedSize()
{
return -1;
}
}
@Test
public void testDBFileGrowthForPlainStrings()
throws Exception
{
// create a db with the temp file disabling transactions
DB db = DBMaker.newFileDB( tempFile.toFile() ).transactionDisable().make();
// create a queue within the db
BlockingQueue<String> queue = db.getQueue( "test-queue" );
// put some stuff into the queue
final int maxBacklog = 20000;
for ( int i = 0; i < maxBacklog; i++ )
{
queue.offer( String.valueOf( i ) );
}
// the DB file should now have reached its max size
long max = Files.size( tempFile );
// now start consuming the full queue
while ( queue.poll() != null )
{
// nothing to do
}
// expect the DB size to be the same (MapDB does not shrink files)
assertEquals( max, Files.size( tempFile ) );
// now run a second queue put loop with less elements than the first run
for ( int i = 0; i < maxBacklog / 2; i++ )
{
queue.offer( String.valueOf( i ) );
}
// expect that the DB did not grow since there should have been enough space from the first run
assertEquals( max, Files.size( tempFile ) );
}
@Test
public void testDBFileGrowthWithSerializer()
throws Exception
{
// create a db with the temp file disabling transactions
DB db = DBMaker.newFileDB( tempFile.toFile() ).transactionDisable().make();
// create a queue within the db
Serializer<TestObject> serializer = new TestObjectSerializer();
BlockingQueue<TestObject> queue = db.createQueue( "test-queue", serializer, false );
// put some stuff into the queue
final int maxBacklog = 20000;
for ( int i = 0; i < maxBacklog; i++ )
{
queue.offer( new TestObject( String.valueOf( i ) ) );
}
// the DB file should now have reached its max size
long max = Files.size( tempFile );
System.out.println( "DB file size: " + max );
// now start consuming the full queue
while ( queue.poll() != null )
{
// nothing to do
}
// expect the DB size to be the same (MapDB does not shrink files)
assertEquals( max, Files.size( tempFile ) );
// now run a second queue put loop with less elements than the first run
for ( int i = 0; i < maxBacklog / 2; i++ )
{
queue.offer( new TestObject( String.valueOf( i ) ) );
}
// expect that the DB did not grow since there should have been enough space from the first run
assertEquals( max, Files.size( tempFile ) );
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment