Skip to content

Instantly share code, notes, and snippets.

@kingjr
Last active September 12, 2017 14:44
Show Gist options
  • Save kingjr/39515bf3558784c5ff8951c2c9014fa6 to your computer and use it in GitHub Desktop.
Save kingjr/39515bf3558784c5ff8951c2c9014fa6 to your computer and use it in GitHub Desktop.
for_claire
%matplotlib inline
import mne
import matplotlib.pyplot as plt # library for plotting
import numpy as np # library for matrix operations
# Generate random, fake MEG data
n_trials, n_channels, n_times, sfreq = 100, 32, 50, 500.
conditions = np.random.randint(0, 2, n_trials)
info = mne.create_info(n_channels, sfreq)
data = np.random.randn(n_trials, n_channels, n_times)
data[conditions==1, :10, n_times//2:] += 1. # add signal in some channels/times
epochs = mne.EpochsArray(data, info)
# setup logistic regression classifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_predict
from sklearn.pipeline import make_pipeline
from mne.decoding import SlidingEstimator, cross_val_multiscore
clf = make_pipeline(
StandardScaler(), # Z-score data, because gradiometers and magnetometers have different scales
LogisticRegression(),
)
# This will apply the logistic on each time sample separately
sliding = SlidingEstimator(clf, scoring='roc_auc')
# If you want to get the overall score across trials you can do:
scores = cross_val_multiscore(
sliding, # the estimator fitted and scored at each cross-validation fold
X=epochs.get_data(), # the data to decode
y=conditions, # the condition you want to classify
cv=5 # the number of cross-validation splits
)
# average scores across CV splits
scores = np.mean(scores, axis=0)
# Plot
plt.plot(epochs.times, scores)
plt.axhline(0.5, color='k')
plt.ylabel('AUC')
plt.xlabel('Times (ms)')
# If you want to train in one condition (here the first half of trials),
# and generalize to other condition (the second hald) with single trial
# predictions
training = range(n_trials//2)
testing = range(n_trials//2, n_trials)
sliding.fit(X=epochs[training].get_data(),
y=conditions[training])
y_pred = sliding.predict_proba(X=epochs[testing].get_data())
# the proba are for each class, since it's only two classes, we can skip one colum
y_pred = y_pred[..., 0]
# Plot single trial prediction, sorted by category
plt.matshow(y_pred[np.argsort(conditions[testing])],
cmap='RdBu_r', vmin=0., vmax=1.)
plt.xlabel('Time')
plt.ylabel('Trials')
plt.colorbar()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment