Skip to content

Instantly share code, notes, and snippets.

@shrijeet
Created October 18, 2012 17:30
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save shrijeet/3913529 to your computer and use it in GitHub Desktop.
Save shrijeet/3913529 to your computer and use it in GitHub Desktop.
A program to rename HBase table (works for 0.92.x)
package hbase.experiments;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Writables;
public class RenameTable {
static final String programName = "RenameTable";
static final Log LOG = LogFactory.getLog(RenameTable.class);
static void printUsage() {
System.out.println("Usage: " + programName + " <OLD_NAME> <NEW_NAME>");
}
/* Passed 'dir' exists and is a directory else exception */
static void isDirExists(FileSystem fs, Path dir) throws IOException {
if (!fs.exists(dir)) {
throw new IOException("Does not exist " + dir.toString());
} else if (!fs.getFileStatus(dir).isDir()) {
throw new IOException("Not a directory " + dir.toString());
}
}
/* Returns true if the region belongs to passed table */
static boolean isTableRegion(byte [] tableName, HRegionInfo hri) {
return Bytes.equals(Bytes.toBytes(hri.getTableNameAsString()), tableName);
}
static HRegionInfo createHRI(byte [] tableName, HRegionInfo oldHRI) {
return new HRegionInfo(tableName, oldHRI.getStartKey(), oldHRI.getEndKey(), oldHRI.isSplit());
}
static HTableDescriptor getHTableDescriptor(byte [] tableName, FileSystem fs, Configuration conf)
throws FileNotFoundException,
IOException {
return FSTableDescriptors.getTableDescriptor(fs, new Path(conf.get(HConstants.HBASE_DIR)), tableName);
}
public static void main(String [] args) throws IOException {
if (args.length != 2) {
printUsage();
System.exit(1);
}
String oldTableName = args[0];
String newTableName = args[1];
/*
* Set hadoop filesystem configuration using the hbase.rootdir. Otherwise,
* we'll always use localhost though the hbase.rootdir might be pointing at
* hdfs location.
*/
Configuration conf = HBaseConfiguration.create();
conf.set("fs.default.name", conf.get(HConstants.HBASE_DIR));
FileSystem fs = FileSystem.get(conf);
/*
* If new table directory does not exit, create it. Keep going if already
* exists because maybe we are rerunning script because it failed first
* time. Otherwise we are overwriting a pre-existing table.
*/
Path rootdir = FSUtils.getRootDir(conf);
Path oldTableDir = fs.makeQualified(new Path(rootdir, new Path(oldTableName)));
isDirExists(fs, oldTableDir);
Path newTableDir = fs.makeQualified(new Path(rootdir, newTableName));
if (!fs.exists(newTableDir)) {
fs.mkdirs(newTableDir);
}
/* Get hold of oldHTableDescriptor and create a new one */
HTableDescriptor oldHTableDescriptor = getHTableDescriptor(Bytes.toBytes(oldTableName), fs,
conf);
HTableDescriptor newHTableDescriptor = new HTableDescriptor(Bytes.toBytes(newTableName));
for (HColumnDescriptor family : oldHTableDescriptor.getColumnFamilies()) {
newHTableDescriptor.addFamily(family);
}
/*
* Run through the meta table moving region mentions from old to new table
* name.
*/
HTable metaTable = new HTable(conf, HConstants.META_TABLE_NAME);
Scan scan = new Scan();
ResultScanner scanner = metaTable.getScanner(scan);
Result result;
while ((result = scanner.next()) != null) {
String rowId = Bytes.toString(result.getRow());
HRegionInfo oldHRI = Writables.getHRegionInfo(result.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER));
if (oldHRI == null) {
throw new IOException("HRegionInfo is null for " + rowId);
}
if (!isTableRegion(Bytes.toBytes(oldTableName), oldHRI)) {
continue;
}
System.out.println(oldHRI.toString());
Path oldRDir = new Path(oldTableDir, new Path(oldHRI.getEncodedName()));
if (!fs.exists(oldRDir)) {
LOG.warn(oldRDir.toString() + " does not exist -- region " + oldHRI.getRegionNameAsString());
}
/* Now make a new HRegionInfo to add to .META. for the new region. */
HRegionInfo newHRI = createHRI(Bytes.toBytes(newTableName), oldHRI);
System.out.println(newHRI);
Path newRDir = new Path(newTableDir, new Path(newHRI.getEncodedName()));
LOG.info("Renaming " + oldRDir.toString() + " as " + newRDir.toString());
fs.rename(oldRDir, newRDir);
/* Removing old region from meta */
LOG.info("Removing " + rowId + " from .META.");
Delete d = new Delete(result.getRow());
metaTable.delete(d);
/* Create 'new' region */
HRegion newR = new HRegion(newTableDir, null, fs, conf, newHRI, newHTableDescriptor, null);
LOG.info("Adding to meta: " + newR.toString());
byte [] newRbytes = Writables.getBytes(newR.getRegionInfo());
Put p = new Put(newR.getRegionName());
p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, newRbytes);
metaTable.put(p);
/*
* Finally update the .regioninfo under new region location so it has new
* name.
*/
Path regioninfofile = new Path(newR.getRegionDir(), HRegion.REGIONINFO_FILE);
fs.delete(regioninfofile, true);
FSDataOutputStream out = fs.create(regioninfofile);
newR.getRegionInfo().write(out);
out.close();
}
scanner.close();
FSTableDescriptors.createTableDescriptor(newHTableDescriptor,conf);
fs.delete(oldTableDir);
LOG.info("DONE");
}
}
@nfergu
Copy link

nfergu commented May 30, 2014

Tried this on 0.94 and it seem to have worked. Just be aware that you'll need to run "hbase hbck" afterwards.

This was for a test environment. I don't think I'd be brave enough run this in production!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment