Skip to content

Instantly share code, notes, and snippets.

@moui72
Created March 28, 2019 20:04
Show Gist options
  • Save moui72/d4011c220459e8c24c3f50ff125ad7b0 to your computer and use it in GitHub Desktop.
Save moui72/d4011c220459e8c24c3f50ff125ad7b0 to your computer and use it in GitHub Desktop.
import webrtcvad
import wave
import collections
import contextlib
import sys
import glob
from os import walk
from os.path import join, basename, getctime
from json import dump, dumps
import csv
import time
import re
from pydub import AudioSegment
from pydub import scipy_effects
from itertools import takewhile
from json import dump, dumps
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def band_pass(file, low=100, high=3200):
bpstart = time.time()
audio = AudioSegment.from_wav(file)
audio = audio.band_pass_filter(low, high)
bpend = time.time()-bpstart
print(f"Band pass of {basename(file)} took {bpend * 1000:<.2f}ms.")
return audio.raw_data
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
sample_rate = wf.getframerate()
sample_width = wf.getsampwidth()
pcm_data = wf.readframes(wf.getnframes())
num_channels = wf.getnchannels()
assert num_channels == 1
assert sample_width == 2
assert sample_rate in (8000, 16000, 32000, 48000)
return pcm_data, sample_rate, wf.getnframes()/float(sample_rate)
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
triggered = False
yield [f for f in voiced_frames]
ring_buffer.clear()
voiced_frames = []
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield [f for f in voiced_frames]
def collapse(table):
byFn = {}
for row in table:
splitfn = row["Filename"].split("_")
if len(splitfn) < 2:
print(row["Filename"])
continue
if len(byFn.keys()) < 1 or splitfn[0] not in byFn:
byFn[splitfn[0]] = {
k: v for (k, v) in row.items() if k not in (
"Leading",
"Trailing",
"Length",
"Filename"
)
}
byFn[splitfn[0]]["irt"] = 0
byFn[splitfn[0]]["File_pair"] = splitfn[0]
if "r1" in splitfn[1]:
if row["Trailing"] != "NA":
byFn[splitfn[0]]["irt"] += row["Trailing"]
else:
if row["Leading"] != "NA":
byFn[splitfn[0]]["irt"] += row["Leading"]
return byFn
def main(args):
dts = time.strftime("%Y-%m-%d_%H%M%S")
try:
fdir = args[0]
except IndexError:
fdir = "~/dissertation/recordings/16k/"
try:
agg = int(args[1])
except IndexError:
agg = 2
dirs = fdir.split("/")
quality = "44.1k"
subj = "all"
for d in dirs:
if "k" in d:
quality = d
if "k" not in dirs[-1] and len(dirs[-1]) > 0:
subj = dirs[-1]
try:
outf = f"{subj}-{quality}_agg-{agg}_{dts}.csv"
except IndexError:
outf = f"output-{dts}.csv"
main_start = time.time()
fns = []
rows = []
looptimes = []
fails = []
multiple_segs = []
# vad takes integer paramater 0-3 to indicate aggressiveness of ignoring
# non-voice (0 is least. 3 is most agg)
vad = webrtcvad.Vad(agg)
for root, dirs, files in walk(fdir):
# build a list of recordings
if "prac" in dirs:
# eliminat practice items
dirs.remove("prac")
for name in files:
if("wav" in name and "P" not in name):
# We only want .wav files
# Items prefaced with "P" are also practice
fns.append(join(root, name))
print(f"Preparation took {time.time()-main_start:>.4f}s")
proc = 0
tenth = round(len(fns) / 10)
for fn in fns:
proc += 1
if(proc % tenth == 0):
print(f"Last ten percent took {time.time()-main_start:>.4f}s")
# iterate through sound files and find speech blocks
start = time.time()
audioseg = AudioSegment.from_file(fn)
audio = audioseg.band_pass_filter(220, 3200, 8).raw_data
sample_rate = audioseg.frame_rate
length = len(audioseg) / 1000.0
bfn = basename(fn)
# convert audio into chunks of a given length (in ms)
frames = list(frame_generator(30, audio, sample_rate))
# find the segments that are speech (segemnts is a list of
# lists of frames that are voiced)
segments = list(vad_collector(sample_rate, 10, 100, vad, frames))
# extract participant and item from filename
p, item = bfn.split(".", 1)
item = "".join(takewhile(str.isdigit, item))
# if we found no voicing set Leading and Trailing to "NA"
if len(segments) < 1:
fails.append(fn)
rows.append({
"Filename": bfn,
"Leading": "NA",
"Trailing": "NA",
"Length": len(audioseg), # full file duration
"isFiller": "F" in bfn,
"Condition_Q": "Q" in bfn,
"Condition_GP": "Y" in bfn,
"Participant": p,
"Item": item
})
continue
if len(segments) > 1:
multiple_segs.append({
"file": fn,
"ends": [t[-1].timestamp + t[-1].duration for t in segments]
})
if segments[-1][-1].timestamp + segments[-1][-1].duration - length < 0.1 and "r1" in bfn:
# sketchy trailing value (< 150ms) for an r1
# see if next value makes sense
if segments[-2][-1].timestamp+segments[-2][-1].duration-length > 0.1 and segments[-2][-1].timestamp+segments[-2][-1].duration-length < 1:
segments[-1] = segments[-2]
rows.append({
"Filename": bfn,
# segments is a list of lists of frames that are voiced
# assume the leading silence ends when the first voiced segment
# begins; also convert to ms
"Leading": int(segments[0][0].timestamp * 1000),
# assume the trailing silence ends when the last voiced segment
# ends; also, convert to ms
"Trailing": int((length - segments[-1][-1].timestamp+segments[-1][-1].duration) * 1000),
"Length": len(audioseg), # full file duration in ms
"isFiller": "F" in bfn,
"Condition_Q": "Q" in bfn,
"Condition_GP": "Y" in bfn,
"Participant": p,
"Item": item
})
looptimes.append(time.time() - start)
total_runtime = time.time() - main_start
keys = rows[0].keys()
with open(outf, "w") as outcsv:
writer = csv.DictWriter(outcsv, fieldnames=keys)
writer.writeheader()
for row in rows:
writer.writerow(row)
shortest = 200
longest = 15000
crows = collapse(rows)
ckeys = list(crows.values())[0].keys()
lo = [r["irt"] for r in crows.values() if r["irt"] < shortest]
hi = [r["irt"] for r in crows.values() if r["irt"] > longest]
with open("irt-"+outf, "w") as outirt:
writer = csv.DictWriter(outirt, fieldnames=ckeys)
writer.writeheader()
for row in crows.values():
writer.writerow(row)
print(f"Processed {len(fns)} files in {(total_runtime):>.2f} s " +
f"with {len(fails)} failures. Longest iteration was " +
f"{max(looptimes):>.4f} ms, shortest was {min(looptimes):>.4f} ms " +
f"and average was {sum(looptimes)/len(looptimes):>.4f} ms")
print(
f"Collapsing took {time.time()-main_start-total_runtime:.4f} s. There were {len(lo)} IRTs below {shortest}ms and {len(hi)} IRTs above {longest/1000}s out of {len(crows)} total IRTs calculated.")
print(f"Total runtime was {time.time() - main_start:.2f} s.")
log = {
"times": looptimes,
"failed": fails,
"multiple voiced segments": multiple_segs,
"total runtime": round(time.time() - main_start, 4)
}
with open('log-'+dts+'.json', 'w') as outjson:
dump(log, outjson, indent=2)
if __name__ == '__main__':
main(sys.argv[1:])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment