Skip to content

Instantly share code, notes, and snippets.

@punchagan punchagan/install.sh
Last active Aug 29, 2015

Embed
What would you like to do?
A columned LEDStrip module for RPi
set -e
# Defaults to install where install.sh is located
INSTALL_DIR="$( cd "$(dirname "$0")" ; pwd -P )"
BUILD_DIR=${INSTALL_DIR}/BUILD
mkdir -p $BUILD_DIR
pushd $BUILD_DIR
# Install dependencies that can be obtained from apt repos
sudo apt-get install python-dev
## ffmpeg can either be ffmpeg or libav-tools depending on distro
sudo apt-get install -y ffmpeg || true
sudo apt-get install -y libav-tools || true
sudo apt-get install -y lame flac faad vorbis-tools
sudo apt-get install -y python-alsaaudio
sudo apt-get install -y python-numpy
sudo apt-get install -y python-virtualenv
# Create a virtualenv
VENV=./venv
virtualenv $VENV --system-site-packages
# Source it
source $VENV/bin/activate
# Install mutagen
pip install mutagen
# Install decoder
wget -c http://www.brailleweb.com/downloads/decoder-1.5XB-Unix.zip
unzip -o decoder-1.5XB-Unix.zip
cp decoder-1.5XB-Unix/{codecs.pdc,*.py} $VENV/lib/python2.7/site-packages/
rm -rf decoder-1.5XB-Unix/
# Check to see if we have git
git --version > /dev/null
# Install py-spidev
pip install -e git+https://github.com/doceme/py-spidev.git#egg=spidev
# Install RPi-LED code
pip install -e git+git@github.com:adammhaile/RPi-LPD8806.git#egg=raspledstrip
# FIXME: We need to change the update method on LPD8806 to make it faster. Add
# it as a patch.
from raspledstrip.ledstrip import LEDStrip
from raspledstrip.color import wheel_color
class ColumnedLEDStrip(LEDStrip):
def __init__(self, leds=32, columns=3, gap_leds=4):
LEDStrip.__init__(self, leds, True)
self.driver.spi.max_speed_hz = 12000000
print 'Changed spi freq to %d' % self.driver.spi.max_speed_hz
self.columns = columns
self._column_data = [0] * columns
self._gap_leds = gap_leds # + 1
self._column_leds = (leds - (self._gap_leds * (columns - 1)))/columns
self._color = 0.0
def _normalize_height(self, height, h_min=2, h_range=13):
# fixme: h_min = 9, h_range = 1, in the example code.
height = (height - h_min) / float(h_range)
if height < 0.05:
height = 0.05
elif height > 1.0:
height = 1.0
return height
def _get_color(self):
color = wheel_color(int(self._color))
self._color = self._color + 1 if self._color <= 383.9 else 0.0
return color
def _display_column(self, column_number, height, color, decay):
"""Display the data for a specific column."""
height = self._normalize_height(height)
if height < self._column_data[column_number]:
height = self._column_data[column_number] * decay
self._column_data[column_number] = height
if column_number % 2 == 0:
start = column_number * (self._gap_leds + self._column_leds)
end = int(self._column_leds * height) + start
else:
end = column_number * (self._gap_leds + self._column_leds) + self._column_leds - 1
start = end - int(self._column_leds * height)
if start != end:
self.fill(color, start, end)
def display_data(self, data, color=None, decay=0.5):
"""Data is a list of heights.
The number of columns should be equal to the number of columns! We
could improve this, based on how we use it.
"""
# FIXME: Whatever the f$#@ this is! Why are we ignoring the color arg?!
color = self._get_color()
self.fillOff()
for column, height in enumerate(data):
self._display_column(column, height, color, decay)
self.update()
if __name__ == '__main__':
import time
import random
led = ColumnedLEDStrip()
led.all_off()
for _ in xrange(100000):
data = [1, 1, 1] # [random.random() for _ in range(led.columns)]
led.display_data(data)
print data
time.sleep(0.01)
import logging
import alsaaudio as aa
import decoder
import numpy as np
CHUNK_SIZE = 2048
def read_musicfile_in_chunks(path, chunk_size=CHUNK_SIZE, play_audio=True):
""" Read the music file at the given path, in chunks of the given size. """
musicfile = decoder.open(path)
sample_rate = musicfile.getframerate()
num_channels = musicfile.getnchannels()
if play_audio:
output = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL)
output.setchannels(num_channels)
output.setrate(sample_rate)
output.setformat(aa.PCM_FORMAT_S16_LE)
output.setperiodsize(CHUNK_SIZE)
# fixme: we could do the writing to audio in a thread ... ?
while True:
chunk = musicfile.readframes(CHUNK_SIZE)
if len(chunk) == 0:
break
if play_audio:
output.write(chunk)
yield chunk, sample_rate
def calculate_column_frequency(min_frequency, max_frequency, columns):
"""Split the given frequency range in 'column' number of ranges.
The function splits up the given range into smaller ranges, which have
equal number of octaves.
"""
logging.debug('Calculating frequencies for %d columns.', columns)
octaves = np.log2(max_frequency / min_frequency)
logging.debug('Octaves in selected frequency range ... %s', octaves)
octaves_per_column = octaves / columns
frequency_limits = [
min_frequency * 2**(octaves_per_column*n) for n in range(columns+1)
]
return zip(frequency_limits[:-1], frequency_limits[1:])
def piff(val, sample_rate):
"""Return the power array index corresponding to a particular frequency."""
return int(CHUNK_SIZE * val / sample_rate)
def calculate_levels(data, sample_rate, frequency_limits):
"""Calculate frequency response for each channel
Initial FFT code inspired from the code posted here:
http://www.raspberrypi.org/phpBB3/viewtopic.php?t=35838&p=454041
Optimizations from work by Scott Driscoll:
http://www.instructables.com/id/Raspberry-Pi-Spectrum-Analyzer-with-RGB-LED-Strip-/
"""
# create a numpy array. This won't work with a mono file, stereo only.
data_stereo = np.frombuffer(data, dtype=np.int16)
data = data_stereo[::2] # pull out the even values, just using left channel
# if you take an FFT of a chunk of audio, the edges will look like
# super high frequency cutoffs. Applying a window tapers the edges
# of each end of the chunk down to zero.
window = np.hanning(len(data))
data = data * window
# Apply FFT - real data
# We drop the last element in array to make it the same size as CHUNK_SIZE
fourier = np.fft.rfft(data)[:-1]
# Calculate the power spectrum
power = np.abs(fourier) ** 2
columns = len(frequency_limits)
# take the log10 of the resulting sum to approximate how human ears perceive sound levels
matrix = [
np.log10(
np.sum(
power[
piff(frequency_limits[i][0], sample_rate):
piff(frequency_limits[i][1], sample_rate)
]
)
)
for i in range(columns)
]
return matrix
if __name__ == '__main__':
frequency_limits = calculate_column_frequency(400, 12000)
for chunk, sample_rate in read_musicfile_in_chunks('sample1.mp3', play_audio=True):
data = calculate_levels(chunk, sample_rate, frequency_limits)
print data
# A simple run.py for the demo
import time
import sys
from leds import ColumnedLEDStrip
from music import calculate_levels, read_musicfile_in_chunks, calculate_column_frequency
from shairplay import initialize_shairplay, shutdown_shairplay, RaopCallbacks
if len(sys.argv) > 1:
path = sys.argv[1]
else:
path = 'sample.mp3'
columns = 10
gap_leds = 0
total_leds = 80
led = ColumnedLEDStrip(leds=total_leds, columns=columns, gap_leds=gap_leds)
led.all_off()
time.sleep(0.1)
frequency_limits = calculate_column_frequency(20, 20000, columns)
# for chunk, sample_rate in read_musicfile_in_chunks(path, play_audio=True):
# data = calculate_levels(chunk, sample_rate, frequency_limits)
# led.display_data(data)
class SampleCallbacks(RaopCallbacks):
def audio_init(self, bits, channels, samplerate):
print "Initializing", bits, channels, samplerate
def audio_process(self, session, buffer):
print "Processing", + len(buffer), "bytes of audio"
data = calculate_levels(buffer, 44100, frequency_limits)
led.display_data(data)
def audio_destroy(self, session):
print "Destroying"
def audio_set_volume(self, session, volume):
print "Set volume to", volume
def audio_set_metadata(self, session, metadata):
print "Got", len(metadata), "bytes of metadata"
def audio_set_coverart(self, session, coverart):
print "Got", len(coverart), "bytes of coverart"
path = "/home/pi/spectrum-analyzer/shairplay/src/lib/.libs/"
initialize_shairplay(path, SampleCallbacks)
while True:
try:
pass
except KeyboardInterrupt:
shutdown_shairplay()
break
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.