Last active
February 14, 2020 17:36
-
-
Save pingsutw/1d0ed89bd7da0d88ea9399d0ace3050d to your computer and use it in GitHub Desktop.
Mapreduce Word count example (sort by word frequency)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import java.io.FileReader; | |
import java.io.BufferedReader; | |
import java.io.IOException; | |
import java.net.URI; | |
import java.util.ArrayList; | |
import java.util.HashSet; | |
import java.util.List; | |
import java.util.Set; | |
import java.util.*; | |
import java.util.StringTokenizer; | |
import org.apache.hadoop.conf.Configuration; | |
import org.apache.hadoop.fs.Path; | |
import org.apache.hadoop.io.IntWritable; | |
import org.apache.hadoop.io.Text; | |
import org.apache.hadoop.mapreduce.Job; | |
import org.apache.hadoop.mapreduce.Mapper; | |
import org.apache.hadoop.mapreduce.Reducer; | |
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; | |
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; | |
import org.apache.hadoop.mapreduce.Counter; | |
import org.apache.hadoop.util.GenericOptionsParser; | |
import org.apache.hadoop.util.StringUtils; | |
public class WordCount1 { | |
public static class TokenizerMapper | |
extends Mapper<Object, Text, Text, IntWritable>{ | |
static enum CountersEnum { INPUT_WORDS } | |
private final static IntWritable one = new IntWritable(1); | |
private Text word = new Text(); | |
private boolean caseSensitive; | |
private Set<String> patternsToSkip = new HashSet<String>(); | |
private Configuration conf; | |
private BufferedReader fis; | |
@Override | |
public void setup(Context context) throws IOException, | |
InterruptedException { | |
conf = context.getConfiguration(); | |
caseSensitive = conf.getBoolean("wordcount.case.sensitive", true); | |
if (conf.getBoolean("wordcount.skip.patterns", false)) { | |
URI[] patternsURIs = Job.getInstance(conf).getCacheFiles(); | |
for (URI patternsURI : patternsURIs) { | |
Path patternsPath = new Path(patternsURI.getPath()); | |
String patternsFileName = patternsPath.getName().toString(); | |
parseSkipFile(patternsFileName); | |
} | |
} | |
} | |
private void parseSkipFile(String fileName) { | |
try { | |
fis = new BufferedReader(new FileReader(fileName)); | |
String pattern = null; | |
while ((pattern = fis.readLine()) != null) { | |
patternsToSkip.add(pattern); | |
} | |
} catch (IOException ioe) { | |
System.err.println("Caught exception while parsing the cached file '" | |
+ StringUtils.stringifyException(ioe)); | |
} | |
} | |
@Override | |
public void map(Object key, Text value, Context context | |
) throws IOException, InterruptedException { | |
String line = (caseSensitive) ? | |
value.toString() : value.toString().toLowerCase(); | |
for (String pattern : patternsToSkip) { | |
line = line.replaceAll(pattern, ""); | |
} | |
StringTokenizer itr = new StringTokenizer(line); | |
while (itr.hasMoreTokens()) { | |
String token = itr.nextToken(); | |
for(int i=0;i<token.length()-2;i++){ | |
word.set(token.substring(i,i+3)); | |
context.write(word, one); | |
} | |
Counter counter = context.getCounter(CountersEnum.class.getName(), | |
CountersEnum.INPUT_WORDS.toString()); | |
counter.increment(1); | |
} | |
} | |
} | |
public static class IntSumReducer | |
extends Reducer<Text,IntWritable,Text,IntWritable> { | |
private IntWritable result = new IntWritable(); | |
private Map<String , Integer > map = new LinkedHashMap<String, Integer>(); | |
public void reduce(Text key, Iterable<IntWritable> values, | |
Context context | |
) throws IOException, InterruptedException { | |
int sum = 0; | |
for (IntWritable val : values) { | |
sum += val.get(); | |
} | |
map.put(key.toString(), sum); | |
//result.set(sum); | |
//context.write(key, result); | |
} | |
@Override | |
public void cleanup(Context context) throws IOException, InterruptedException { | |
Map<String, Integer> sortedMap = new HashMap<String, Integer>(); | |
sortedMap = sortMap(map); | |
for (Map.Entry<String,Integer> entry : sortedMap.entrySet()){ | |
context.write(new Text(entry.getKey()),new IntWritable(entry.getValue())); | |
} | |
} | |
public Map<String ,Integer> sortMap(Map<String,Integer> unsortMap){ | |
Map<String ,Integer> hashmap = new LinkedHashMap<String,Integer>(); | |
int count=0; | |
List<Map.Entry<String,Integer>> list = new LinkedList<Map.Entry<String,Integer>>(unsortMap.entrySet()); | |
//Sorting the list we created from unsorted Map | |
Collections.sort(list , new Comparator<Map.Entry<String,Integer>>(){ | |
public int compare (Map.Entry<String , Integer> o1 , Map.Entry<String , Integer> o2 ){ | |
//sorting in descending order | |
return o2.getValue().compareTo(o1.getValue()); | |
} | |
}); | |
for(Map.Entry<String, Integer> entry : list){ | |
hashmap.put(entry.getKey(),entry.getValue()); | |
//hashmap.put(entry.getValue(), entry.getKey()); | |
} | |
return hashmap; | |
} | |
} | |
public static void main(String[] args) throws Exception { | |
Configuration conf = new Configuration(); | |
GenericOptionsParser optionParser = new GenericOptionsParser(conf, args); | |
String[] remainingArgs = optionParser.getRemainingArgs(); | |
if ((remainingArgs.length != 2) && (remainingArgs.length != 4)) { | |
System.err.println("Usage: wordcount <in> <out> [-skip skipPatternFile]"); | |
System.exit(2); | |
} | |
Job job = Job.getInstance(conf, "word count"); | |
job.setJarByClass(WordCount1.class); | |
job.setMapperClass(TokenizerMapper.class); | |
job.setCombinerClass(IntSumReducer.class); | |
job.setReducerClass(IntSumReducer.class); | |
job.setOutputKeyClass(Text.class); | |
job.setOutputValueClass(IntWritable.class); | |
job.setNumReduceTasks(1); | |
List<String> otherArgs = new ArrayList<String>(); | |
for (int i=0; i < remainingArgs.length; ++i) { | |
if ("-skip".equals(remainingArgs[i])) { | |
job.addCacheFile(new Path(remainingArgs[++i]).toUri()); | |
job.getConfiguration().setBoolean("wordcount.skip.patterns", true); | |
} else { | |
otherArgs.add(remainingArgs[i]); | |
} | |
} | |
FileInputFormat.addInputPath(job, new Path(otherArgs.get(0))); | |
FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(1))); | |
System.exit(job.waitForCompletion(true) ? 0 : 1); | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment