Create a gist now

Instantly share code, notes, and snippets.

Use a Dot Matrix Printer to print Tweets as they happen
#!/bin/sh
# Kenneth Finnegan, 2012
# kennethfinnegan.blogspot.com
#
# TwitterMatrixTicker
# Given a username and an ascii printer, checks for new mentions and
# prints them one at a time to the printer.
# Expected usage is either spun off into the background >/dev/null or
# on a detachable screen so you can monitor progress.
# What user do we want to monitor for tweets at?
USEROFINTEREST="kwf"
# File paths. The staging is needed since tweets come in
# the opposite order from what I wanted; tac (cat backwards) fixes that
LATESTTWEET="/tmp/twittermatrixticker.latest.$USEROFINTEREST"
STAGING_FILE="/tmp/twittermatrixticker.staging.$$"
OUTPUT_FILE="/dev/usb/lp0"
# Twitter API requests are made progressively slower as no traffic
# is seen. INTERVAL is the number of seconds between tweets, so API
# requests are really made at INTERVAL * REQUEST_LIMIT seconds
LATESTUSER=""
INTERVAL="10"
INTERVAL_LOW="10"
INTERVAL_HIGH="150"
REQUEST_LIMIT="5"
# Check if this is the first time this user's timeline has
# been monitored, preload the state file for twitter timeline progress
if [ ! -f $LATESTTWEET ]; then
echo "Generating new state file for @$USEROFINTEREST"
curl -s "http://search.twitter.com/search.json?q=@$USEROFINTEREST&rpp=$REQUEST_LIMIT&include_entries=true&result_type=recent" |
grep -e "\"max_id_str\":\"[^\"]*\"" |
awk -F'\"' '{print $4}' >$LATESTTWEET
else
echo "The last tweet displayed was `cat $LATESTTWEET`"
fi
# Loop forever checking for tweets, printing them, then sleeping
while true; do
echo "TwitterMatrixTicker with $INTERVAL second interval"
touch $STAGING_FILE
# Form the twitter request
curl -s "http://search.twitter.com/search.json?q=@$USEROFINTEREST&rpp=$REQUEST_LIMIT&include_entries=true&result_type=recent&since_id=`cat $LATESTTWEET`" |
sed 's/\\\"/#/g' |
sed 's/\`/#/g' |
sed 's/\\/#/g' |
tee /tmp/twittermatrixticker.debug.$$ |
# Parse out the user names, their tweets, and the latest id number
grep -o -e "\"text\":\"[^\"]*\"" \
-e "\"from_user\":\"[^\"]*\"" \
-e "\"max_id_str\":\"[^\"]*\"" |
# loop through the set of found items and handle them
while read LINE; do
FIELD="`echo $LINE | awk -F'\"' '{print $2}'`"
VALUE="`echo $LINE | awk -F'\"' '{print $4}'`"
if [ $FIELD = "from_user" ]; then
# We know who sent the next tweet we see; save this
LATESTUSER="$VALUE"
elif [ $FIELD = "text" ]; then
# We've found a tweet; stage this with cooresponding username
echo "$LATESTUSER: $VALUE" >>$STAGING_FILE
echo "Found tweet"
elif [ $FIELD = "max_id_str" ]; then
# Save the highwater mark from this request so we can pick up
# where we left off later.
echo "$VALUE" >$LATESTTWEET
echo "The latest tweet is now $VALUE"
fi
done
# Count how many tweets we ended up with, print them, then update
# the desired interval rate and sleep out the remainder of this
# interval if we didn't happen to get a complete REQUEST_LIMIT of tweets
TWEETSFOUND="`wc -l <$STAGING_FILE`"
echo "Tweets found: $TWEETSFOUND"
# Reverse tweets into cronological order, and print them one by one
tac $STAGING_FILE |
while read LINE; do
echo "$LINE" >$OUTPUT_FILE
# Spread out printings so I can feel more popular
# and it seems less bursty
sleep "$INTERVAL"
done
rm $STAGING_FILE
# Check to see if there was new tweets in this request
# If not, slow down requests, since this user doesn't get much traffic
if [ $TWEETSFOUND = "0" ]; then
echo "Slow it down"
INTERVAL="$(($INTERVAL + 1))"
if [ "$INTERVAL" -gt "$INTERVAL_HIGH" ]; then
INTERVAL="$INTERVAL_HIGH"
fi
else # Found some, speed up the requests
echo "Speed it up"
INTERVAL="$(( $(( $INTERVAL / 2)) + $((INTERVAL_LOW / 2)) ))"
fi
TIME_LEFT="$(( $(( $REQUEST_LIMIT - $TWEETSFOUND)) * $INTERVAL ))"
sleep "$TIME_LEFT"
done
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment