Skip to content

Instantly share code, notes, and snippets.

@moui72
Last active March 25, 2019 20:26
Show Gist options
  • Save moui72/368d27d5ffa69b66a9b6e946e74e9641 to your computer and use it in GitHub Desktop.
Save moui72/368d27d5ffa69b66a9b6e946e74e9641 to your computer and use it in GitHub Desktop.
import webrtcvad
import wave
import collections
import contextlib
import sys
import glob
from os import walk
from os.path import join, basename, getctime
from json import dump, dumps
import csv
import time
import re
from itertools import takewhile
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
sample_rate = wf.getframerate()
sample_width = wf.getsampwidth()
pcm_data = wf.readframes(wf.getnframes())
num_channels = wf.getnchannels()
assert num_channels == 1
assert sample_width == 2
assert sample_rate in (8000, 16000, 32000, 48000)
return pcm_data, sample_rate, wf.getnframes()/float(sample_rate)
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
triggered = False
yield [f for f in voiced_frames]
ring_buffer.clear()
voiced_frames = []
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield [f for f in voiced_frames]
def collapse(table):
byFn = {}
for row in table:
splitfn = row["Filename"].split("_")
if len(splitfn) < 2:
print(row["Filename"])
continue
if len(byFn.keys()) < 1 or splitfn[0] not in byFn:
byFn[splitfn[0]] = {
k: v for (k, v) in row.items() if k not in (
"Leading",
"Trailing"
)
}
byFn[splitfn[0]]["irt"] = 0
byFn[splitfn[0]]["Filename"] = splitfn[0]
if "r1" in splitfn[1]:
byFn[splitfn[0]]["irt"] += row["Trailing"]
else:
byFn[splitfn[0]]["irt"] += row["Leading"]
return byFn
def main(args):
try:
fdir = args[0]
except IndexError:
fdir = "~/dissertation/recordings/16k/"
main_start = time.time()
fns = []
rows = []
looptimes = []
fails = []
# vad takes integer paramater 0-3 to indicate aggressiveness of ignoring
# non-voice (0 is least. 3 is most agg)
vad = webrtcvad.Vad(1)
for root, dirs, files in walk(fdir):
# build a list of recordings
if "prac" in dirs:
# eliminat practice items
dirs.remove("prac")
for name in files:
if("wav" in name and "P" not in name):
# We only want .wav files
# Items prefaced with "P" are also practice
fns.append(join(root, name))
for fn in fns:
# iterate through sound files and find speech blocks
start = time.time()
audio, sample_rate, length = read_wave(fn)
bfn = basename(fn)
# convert audio into chunks of a given length (in ms)
frames = list(frame_generator(30, audio, sample_rate))
# find the segments that are speech
segments = list(vad_collector(sample_rate, 20, 300, vad, frames))
# if we only found 1, we don't want it
if len(segments) < 1:
fails.append(fn)
continue
# extract participant and item from filename
p, item = bfn.split(".", 1)
item = "".join(takewhile(str.isdigit, item))
rows.append({
"Filename": bfn,
# assume the leading silence ends when the first voiced segment
# begins; also convert to ms
"Leading": int(segments[0][0].timestamp * 1000),
# assume the trailing silence ends when the last voiced segment
# ends; also, convert to ms
"Trailing": int((length - segments[-1][-1].timestamp) * 1000),
"Length": int(length * 1000), # full file duration
"isFiller": "F" in bfn,
"Condition_Q": "Q" in bfn,
"Condition_GP": "Y" in bfn,
"Participant": p,
"Item": item
})
looptimes.append(time.time() - start)
total_runtime = time.time() - main_start
dts = time.strftime("%Y-%m-%d_%H%M%S")
try:
outf = args[1]
except IndexError:
outf = f"output-{dts}.csv"
keys = rows[0].keys()
with open(outf, "w") as outcsv:
writer = csv.DictWriter(outcsv, fieldnames=keys)
writer.writeheader()
for row in rows:
writer.writerow(row)
ckeys = [field for field in keys if field not in (
"Leading",
"Trailing"
)]
ckeys.append("irt")
with open("irt-"+outf, "w") as outirt:
writer = csv.DictWriter(outirt, fieldnames=ckeys)
writer.writeheader()
for row in collapse(rows).values():
writer.writerow(row)
print(f"Processed {len(fns)} files in {(total_runtime * 1000):>.2f} ms " +
f"with {len(fails)} failures. Longest iteration was " +
f"{max(looptimes):>.4f} ms, shortest was {min(looptimes):>.4f} " +
f"and ms average was {sum(looptimes)/len(looptimes):>.4f}")
if __name__ == '__main__':
main(sys.argv[1:])
import webrtcvad
import wave
import collections
import contextlib
import sys
import glob
from os import walk
from os.path import join, basename, getctime
from json import dump, dumps
import csv
import time
import re
from itertools import takewhile
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
sample_rate = wf.getframerate()
sample_width = wf.getsampwidth()
pcm_data = wf.readframes(wf.getnframes())
num_channels = wf.getnchannels()
assert num_channels == 1
assert sample_width == 2
assert sample_rate in (8000, 16000, 32000, 48000)
return pcm_data, sample_rate, wf.getnframes()/float(sample_rate)
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
triggered = False
yield [f for f in voiced_frames]
ring_buffer.clear()
voiced_frames = []
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield [f for f in voiced_frames]
def collapse(table):
byFn = {}
for row in table:
splitfn = row["Filename"].split("_")
if len(splitfn) < 2:
print(row["Filename"])
continue
if len(byFn.keys()) < 1 or splitfn[0] not in byFn:
byFn[splitfn[0]] = {
k: v for (k, v) in row.items() if k not in (
"Leading",
"Trailing"
)
}
byFn[splitfn[0]]["irt"] = 0
byFn[splitfn[0]]["Filename"] = splitfn[0]
if "r1" in splitfn[1]:
byFn[splitfn[0]]["irt"] += row["Trailing"]
else:
byFn[splitfn[0]]["irt"] += row["Leading"]
return byFn
def main(args):
try:
fdir = args[0]
except IndexError:
fdir = "~/dissertation/recordings/16k/"
main_start = time.time()
fns = []
rows = []
looptimes = []
fails = []
# vad takes integer paramater 0-3 to indicate aggressiveness of ignoring
# non-voice (0 is least. 3 is most agg)
vad = webrtcvad.Vad(1)
for root, dirs, files in walk(fdir):
# build a list of recordings
if "prac" in dirs:
# eliminat practice items
dirs.remove("prac")
for name in files:
if("wav" in name and "P" not in name):
# We only want .wav files
# Items prefaced with "P" are also practice
fns.append(join(root, name))
for fn in fns:
# iterate through sound files and find speech blocks
start = time.time()
audio, sample_rate, length = read_wave(fn)
bfn = basename(fn)
# convert audio into chunks of a given length (in ms)
frames = list(frame_generator(30, audio, sample_rate))
# find the segments that are speech
segments = list(vad_collector(sample_rate, 20, 300, vad, frames))
# if we only found 1, we don't want it
if len(segments) < 1:
fails.append(fn)
continue
# extract participant and item from filename
p, item = bfn.split(".", 1)
item = "".join(takewhile(str.isdigit, item))
rows.append({
"Filename": bfn,
# assume the leading silence ends when the first voiced segment
# begins; also convert to ms
"Leading": int(segments[0][0].timestamp * 1000),
# assume the trailing silence ends when the last voiced segment
# ends; also, convert to ms
"Trailing": int((length - segments[-1][-1].timestamp) * 1000),
"Length": int(length * 1000), # full file duration
"isFiller": "F" in bfn,
"Condition_Q": "Q" in bfn,
"Condition_GP": "Y" in bfn,
"Participant": p,
"Item": item
})
looptimes.append(time.time() - start)
total_runtime = time.time() - main_start
dts = time.strftime("%Y-%m-%d_%H%M%S")
try:
outf = args[1]
except IndexError:
outf = f"output-{dts}.csv"
keys = rows[0].keys()
with open(outf, "w") as outcsv:
writer = csv.DictWriter(outcsv, fieldnames=keys)
writer.writeheader()
for row in rows:
writer.writerow(row)
ckeys = [field for field in keys if field not in (
"Leading",
"Trailing"
)]
ckeys.append("irt")
with open("irt-"+outf, "w") as outirt:
writer = csv.DictWriter(outirt, fieldnames=ckeys)
writer.writeheader()
for row in collapse(rows).values():
writer.writerow(row)
print(f"Processed {len(fns)} files in {(total_runtime * 1000):>.2f} ms " +
f"with {len(fails)} failures. Longest iteration was " +
f"{max(looptimes):>.4f} ms, shortest was {min(looptimes):>.4f} " +
f"and ms average was {sum(looptimes)/len(looptimes):>.4f}")
if __name__ == '__main__':
main(sys.argv[1:])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment