public
Last active

ColumnPaginationFilterTest.java

  • Download Gist
ColumnPaginationFilterTest.java
Java
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
import static org.apache.hadoop.hbase.util.Bytes.toBytes;
 
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.util.Bytes;
 
public class ColumnPaginationFilterTest {
 
// Example code demonstrating a possible bug where the max version limit for a table or
// scanner is not applied to disregard older versions, prior to counting columns within
// the ColumnPaginationFilter or ColumnCountGetFilter.
// This test creates a table containing a single row with two columns in the same column
// family. There are two versions of the first column and one version of the second column
// added to the row. The max column count for the filter is set to 2. Debugging the filter
// code shows that it is accepting both versions of the first column, then rejecting the
// second column since the max column count has been reached. However, only the latest
// version of the first column is ultimately retrieved by the Get or Scan since max versions
// is set to 1.
// Expected output:
// Get result
// row / fam / q1 / q1-v2
// row / fam / q2 / q2-v1
// Scan result
// row / fam / q1 / q1-v2
// row / fam / q2 / q2-v1
 
// Actual output:
// Get result
// row / fam / q1 / q1-v2
// Scan result
// row / fam / q1 / q1-v2
 
public static void main(String args[]) throws Exception {
 
Configuration conf = new Configuration();
HBaseTestingUtility testingUtil = new HBaseTestingUtility(conf);
testingUtil.startMiniCluster();
 
HColumnDescriptor columnDesc = new HColumnDescriptor("fam");
columnDesc.setMaxVersions(1);
HTableDescriptor tableDesc = new HTableDescriptor("table");
tableDesc.addFamily(columnDesc);
HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(tableDesc);
admin.close();
 
HTable table = new HTable(conf, "table");
table.put(new Put(toBytes("row")).add(toBytes("fam"), toBytes("q1"), toBytes("q1-v1")));
table.put(new Put(toBytes("row")).add(toBytes("fam"), toBytes("q1"), toBytes("q1-v2")));
table.put(new Put(toBytes("row")).add(toBytes("fam"), toBytes("q2"), toBytes("q2-v1")));
 
// ColumnCountGetFilter(2) exhibits the same behavior.
Filter filter = new ColumnPaginationFilter(2, 0);
 
Get get = new Get(toBytes("row"));
get.setMaxVersions(1);
get.setFilter(filter);
 
Result getResult = table.get(get);
 
System.out.println("Get result");
for (KeyValue kv : getResult.raw()) {
System.out.println(Bytes.toString(kv.getRow()) + " / "
+ Bytes.toString(kv.getFamily()) + " / "
+ Bytes.toString(kv.getQualifier()) + " / "
+ Bytes.toString(kv.getValue()));
}
 
Scan scan = new Scan();
scan.setMaxVersions(1);
scan.setFilter(filter);
 
ResultScanner scanner = table.getScanner(scan);
 
System.out.println("Scan result");
for (Result scanResult : scanner) {
for (KeyValue kv : scanResult.raw()) {
System.out.println(Bytes.toString(kv.getRow()) + " / "
+ Bytes.toString(kv.getFamily()) + " / "
+ Bytes.toString(kv.getQualifier()) + " / "
+ Bytes.toString(kv.getValue()));
}
}
 
scanner.close();
table.close();
testingUtil.shutdownMiniCluster();
}
}

Please sign in to comment on this gist.

Something went wrong with that request. Please try again.