Skip to content

Instantly share code, notes, and snippets.

@valo
Last active October 14, 2021 22:01
Show Gist options
  • Save valo/5258218 to your computer and use it in GitHub Desktop.
Save valo/5258218 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python
import sys
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
words = line.split()
# increase counters
for word in words:
# write the results to STDOUT (standard output);
# what we output here will be the input for the
# Reduce step, i.e. the input for reducer.py
#
# tab-delimited; the trivial word count is 1
print '%s\t%s' % (word, 1)</pre>
#!/usr/bin/env python
import sys
from my_awesome_algorithm import main_processing
for line in sys.stdin:
line = line.strip()
main_processing(line)
#!/usr/bin/env python
from operator import itemgetter
import sys
current_word = None
current_count = 0
for line in sys.stdin:
line = line.strip()
# parse the input we got from mapper.py
word, count = line.split('\t', 1)
count = int(count)
# this IF-switch only works because Hadoop sorts map output
# by key (here: word) before it is passed to the reducer
if current_word == word:
current_count += count
else:
if current_word:
print '%s\t%s' % (current_word, current_count)
current_count = count
current_word = word
# do not forget to output the last word if needed!
print '%s\t%s' % (current_word, current_count)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment