Created
November 20, 2024 04:23
-
-
Save paulsowman/8f7bf5bcda9b3657b5a0a8502f81e560 to your computer and use it in GitHub Desktop.
Audio Envelope Processing for MEG/EEG Analysis Script for processing audio signals and adding them as additional channels to MEG/EEG data using MNE-Python. Extracts amplitude envelopes from raw audio and specific frequency bands (delta, theta, gamma), handles temporal alignment with MEG/EEG recordings, and provides flexible options for edge case…
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#This is a sketch, none of the paths etc. are real | |
import mne | |
import numpy as np | |
import pandas as pd | |
from scipy import signal | |
from pathlib import Path | |
from enum import Enum | |
class PaddingMethod(Enum): | |
ZERO = 'zero' | |
HOLD = 'hold' | |
CROP = 'crop' | |
def extract_band_envelopes(audio, fs, bands): | |
""" | |
Extract envelopes for specified frequency bands. | |
Parameters | |
---------- | |
audio : array-like | |
Raw audio signal | |
fs : float | |
Sampling frequency | |
bands : dict | |
Dictionary of frequency bands with format {name: (low_freq, high_freq)} | |
Returns | |
------- | |
dict | |
Dictionary containing envelope for each band | |
""" | |
envelopes = {} | |
# Extract envelope of raw signal | |
analytic_signal = signal.hilbert(audio) | |
envelopes['raw'] = np.abs(analytic_signal) | |
# Extract envelopes for each band | |
for band_name, (low_freq, high_freq) in bands.items(): | |
nyq = 0.5 * fs | |
b, a = signal.butter(4, [low_freq/nyq, high_freq/nyq], btype='band') | |
filtered = signal.filtfilt(b, a, audio) | |
analytic_signal = signal.hilbert(filtered) | |
envelopes[band_name] = np.abs(analytic_signal) | |
return envelopes | |
def align_envelope(envelope, n_samples, offset_samples, target_fs, padding_method=PaddingMethod.ZERO): | |
""" | |
Align envelope data with MEG/EEG data handling edge cases. | |
Parameters | |
---------- | |
envelope : array-like | |
Downsampled envelope data | |
n_samples : int | |
Number of samples in MEG/EEG data | |
offset_samples : int | |
Offset in samples | |
target_fs : float | |
Target sampling frequency | |
padding_method : PaddingMethod | |
Method to handle edge cases: 'zero', 'hold', or 'crop' | |
Returns | |
------- | |
array-like | |
Aligned envelope data | |
""" | |
if padding_method == PaddingMethod.CROP: | |
# Determine overlap region | |
if offset_samples < 0: # Audio starts before MEG/EEG | |
start_idx = abs(offset_samples) | |
end_idx = min(len(envelope), start_idx + n_samples) | |
return envelope[start_idx:end_idx] | |
else: # Audio starts after MEG/EEG | |
start_idx = 0 | |
end_idx = min(len(envelope), n_samples - offset_samples) | |
result = np.zeros(n_samples) | |
result[offset_samples:offset_samples + end_idx] = envelope[start_idx:end_idx] | |
return result[:n_samples] | |
else: # ZERO or HOLD padding | |
result = np.zeros(n_samples) | |
if offset_samples < 0: # Audio starts before MEG/EEG | |
start_idx = abs(offset_samples) | |
end_idx = min(len(envelope), start_idx + n_samples) | |
result[:end_idx-start_idx] = envelope[start_idx:end_idx] | |
# Handle remainder if audio is shorter than MEG/EEG | |
if end_idx-start_idx < n_samples and padding_method == PaddingMethod.HOLD: | |
result[end_idx-start_idx:] = envelope[end_idx-1] | |
else: # Audio starts after MEG/EEG | |
available_samples = min(len(envelope), n_samples - offset_samples) | |
result[offset_samples:offset_samples + available_samples] = envelope[:available_samples] | |
# Handle remainder if audio is shorter than remaining MEG/EEG | |
if (offset_samples + available_samples < n_samples and | |
padding_method == PaddingMethod.HOLD and | |
len(envelope) > 0): | |
result[offset_samples + available_samples:] = envelope[-1] | |
return result | |
def process_audio_for_mne(audio_path, raw, offset_info, target_fs, audio_type='participant', | |
padding_method=PaddingMethod.ZERO): | |
""" | |
Process audio and prepare it for adding to MNE Raw object. | |
Parameters | |
---------- | |
audio_path : str or Path | |
Path to audio file | |
raw : mne.io.Raw | |
MNE Raw object to add channels to | |
offset_info : float | |
Time offset in seconds | |
target_fs : float | |
Target sampling frequency (should match MEG/EEG data) | |
audio_type : str | |
'participant' or 'interviewer' to label channels | |
padding_method : PaddingMethod | |
Method to handle edge cases: 'zero', 'hold', or 'crop' | |
Returns | |
------- | |
dict | |
Dictionary of processed envelopes ready for MNE | |
""" | |
bands = { | |
'delta': (1, 4), | |
'theta': (4, 8), | |
'gamma': (30, 50) | |
} | |
# Load audio | |
audio, original_fs = signal.read(audio_path) | |
# Extract all envelopes | |
envelopes = extract_band_envelopes(audio, original_fs, bands) | |
# Process each envelope for MNE | |
processed_envelopes = {} | |
n_samples = len(raw.times) | |
offset_samples = int(offset_info * target_fs) | |
for name, envelope in envelopes.items(): | |
# Anti-aliasing filter | |
nyq = 0.5 * target_fs | |
b, a = signal.butter(4, 0.8 * nyq, fs=original_fs) | |
envelope_filtered = signal.filtfilt(b, a, envelope) | |
# Downsample | |
downsample_factor = int(original_fs / target_fs) | |
envelope_downsampled = envelope_filtered[::downsample_factor] | |
# Align and handle edge cases | |
final_envelope = align_envelope( | |
envelope_downsampled, | |
n_samples, | |
offset_samples, | |
target_fs, | |
padding_method | |
) | |
processed_envelopes[f'{audio_type}_{name}'] = final_envelope | |
return processed_envelopes | |
def add_audio_envelopes_to_raw(raw, audio_paths, offset_file, padding_method=PaddingMethod.ZERO): | |
""" | |
Add audio envelopes as misc channels to MNE Raw object. | |
Parameters | |
---------- | |
raw : mne.io.Raw | |
MNE Raw object | |
audio_paths : dict | |
Dictionary with paths to audio files {'participant': path, 'interviewer': path} | |
offset_file : str or Path | |
Path to CSV file containing offset information | |
padding_method : PaddingMethod | |
Method to handle edge cases: 'zero', 'hold', or 'crop' | |
""" | |
# Load offset information | |
offset_df = pd.read_csv(offset_file) | |
offset = float(offset_df['offset'].iloc[0]) | |
# Get sampling frequency from raw object | |
target_fs = raw.info['sfreq'] | |
# Process both audio sources | |
all_envelopes = {} | |
for audio_type, path in audio_paths.items(): | |
envelopes = process_audio_for_mne( | |
path, raw, offset, target_fs, | |
audio_type, padding_method | |
) | |
all_envelopes.update(envelopes) | |
# Add channels to raw object | |
info = mne.create_info( | |
ch_names=list(all_envelopes.keys()), | |
sfreq=target_fs, | |
ch_types=['misc'] * len(all_envelopes) | |
) | |
# Create RawArray with envelope data | |
envelope_data = np.vstack(list(all_envelopes.values())) | |
raw_envelopes = mne.io.RawArray(envelope_data, info) | |
# Add to original raw object | |
raw.add_channels([raw_envelopes], force_update_info=True) | |
# Example usage | |
if __name__ == "__main__": | |
# Load your MEG/EEG data | |
raw = mne.io.read_raw_fif('your_meg_file.fif') | |
# Define paths | |
audio_paths = { | |
'participant': 'participant_audio.wav', | |
'interviewer': 'interviewer_audio.wav' | |
} | |
offset_file = 'offset.csv' | |
# Add envelopes to raw object with specified padding method | |
add_audio_envelopes_to_raw(raw, audio_paths, offset_file, | |
padding_method=PaddingMethod.HOLD) | |
# Save modified raw object if needed | |
raw.save('raw_with_audio_envelopes.fif', overwrite=True) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment