Skip to content

Instantly share code, notes, and snippets.

@dilip
Created April 19, 2012 15:39
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save dilip/2421795 to your computer and use it in GitHub Desktop.
Save dilip/2421795 to your computer and use it in GitHub Desktop.
SequenceFileInputFormat and SequenceFileKeyRecordReader for enabling Hive to access data stored in a sequence file's key
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mycompany;
import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
public class SequenceFileKeyInputFormat<K, V> extends FileInputFormat<K, V> {
public SequenceFileKeyInputFormat() {
setMinSplitSize(SequenceFile.SYNC_INTERVAL);
}
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
FileStatus[] files = super.listStatus(job);
for (int i = 0; i < files.length; i++) {
FileStatus file = files[i];
if (file.isDir()) { // it's a MapFile
Path dataFile = new Path(file.getPath(), MapFile.DATA_FILE_NAME);
FileSystem fs = file.getPath().getFileSystem(job);
// use the data file
files[i] = fs.getFileStatus(dataFile);
}
}
return files;
}
public RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job, Reporter reporter)
throws IOException {
reporter.setStatus(split.toString());
return new SequenceFileKeyRecordReader<V, K>(job, (FileSplit) split);
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mycompany;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.util.ReflectionUtils;
public class SequenceFileKeyRecordReader<K, V> implements RecordReader<V, K> {
private SequenceFile.Reader in;
private long start;
private long end;
private boolean more = true;
protected Configuration conf;
public SequenceFileKeyRecordReader(Configuration conf, FileSplit split)
throws IOException {
Path path = split.getPath();
FileSystem fs = path.getFileSystem(conf);
this.in = new SequenceFile.Reader(fs, path, conf);
this.end = split.getStart() + split.getLength();
this.conf = conf;
if (split.getStart() > in.getPosition())
in.sync(split.getStart()); // sync to start
this.start = in.getPosition();
more = start < end;
}
/** The class of key that must be passed to {@link
* #next(Object, Object)}.. */
public Class getKeyClass() { return in.getValueClass(); }
/** The class of value that must be passed to {@link
* #next(Object, Object)}.. */
public Class getValueClass() { return in.getKeyClass(); }
@SuppressWarnings("unchecked")
public V createKey() {
return (V) ReflectionUtils.newInstance(getKeyClass(), conf);
}
@SuppressWarnings("unchecked")
public K createValue() {
return (K) ReflectionUtils.newInstance(getValueClass(), conf);
}
public synchronized boolean next(V key, K value) throws IOException {
if (!more) return false;
long pos = in.getPosition();
boolean remaining = (in.next(value) != null);
if (remaining) {
getCurrentValue(key);
}
if (pos >= end && in.syncSeen()) {
more = false;
} else {
more = remaining;
}
return more;
}
protected synchronized boolean next(K key)
throws IOException {
if (!more) return false;
long pos = in.getPosition();
boolean remaining = (in.next(key) != null);
if (pos >= end && in.syncSeen()) {
more = false;
} else {
more = remaining;
}
return more;
}
protected synchronized void getCurrentValue(V value)
throws IOException {
in.getCurrentValue(value);
}
/**
* Return the progress within the input split
* @return 0.0 to 1.0 of the input byte range
*/
public float getProgress() throws IOException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (in.getPosition() - start) / (float)(end - start));
}
}
public synchronized long getPos() throws IOException {
return in.getPosition();
}
protected synchronized void seek(long pos) throws IOException {
in.seek(pos);
}
public synchronized void close() throws IOException { in.close(); }
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment