View clean_forrest_stim.py
from bids.grabbids import BIDSLayout
import pandas as pd
import os.path as op
layout = BIDSLayout('../')
event_files = layout.get(task='objectcategories', type='events', return_type='file')
for f in event_files:
events = pd.read_csv(f, delimiter='\t')
events['stim_file'] = events.stim_file.str.replace('stimuli/', '')
View permutation.py
import numpy as np
from tools import ProgressBar
from joblib import Parallel, delayed
import pandas as pd
def permutation_parallel(X, y, cla, feat_names, region, i):
newY = np.random.permutation(y)
cla_fits = cla.fit(X, newY)
fit_w = np.log(cla_fits.theta_[1] / cla_fits.theta_[0])
View plot_subset.py
from nilearn import plotting as niplt
import nibabel as nib
import seaborn as sns
import numpy as np
def plot_subset(nifti, layers, colors = None, **kwargs):
if not isinstance(nifti, nib.Nifti1Image):
nifti = nib.load(nifti)
data = nifti.get_data()
View generate_kt_stimuli.py
"""" This script generates stimuli for the keep track task.
Counterbalancing rules it tries to implement:
- Each category is used as a target equal number of times (only possible when number of targets is divisible by number of categories)
- Each word used equally often as a target, distractor, and final word
- Last word in the trial is always a distractor
- Target words and final words do not repeat across adjacent trials
- Distractors can repeat across trials
Number of targets per category:
View Boostrapping and Permuation testing
##############################################################
##### Bootstrap of group difference #####
##############################################################
### First, we generate two samples (N = 25) with a difference of 0.5
### Second, test using parametric t.test
### Then we define a function to test the difference between the two samples
### using indices that boot will provide
## Then we run boot. Importantly, define the strata of your group labels
View load_csvs.R
loadAllCSVs <- function (INPUT_PATH) {
# Get list of files
file_list <- list.files(INPUT_PATH)
# Iterate through files and add them to "dataset"
for (file in file_list){
file <- paste(INPUT_PATH,file,sep="")
# if the merged dataset doesn't exist, create it
if (!exists("dataset")){
dataset <- read.table(file, header=TRUE, sep=",")
View load_all_csv.R
# Loads all CSV files into a data set in given INPUT_PATH
loadAllCSVs <- function (INPUT_PATH) {
# Get list of files
file_list <- list.files(INPUT_PATH)
# Iterate through files and add them to "dataset"
for (file in file_list){
file <- paste(INPUT_PATH,file,sep="")
# if the merged dataset doesn't exist, create it
if (!exists("dataset")){
View dcor_matrix.py
def dcor_matrix(data):
"""Creates a correlation matrix using the dcor (distance correlation) function based on the R (energy) implementation
Assumes that variables are columns
Input:
data - x by y np array where y is the number of variables
Output:
Returns a correlation matrix as a np array of shape y by y
"""
import numpy as np
View td_analysis
# Contains basic temporal discounting analysis functions.
# Does not implement multilevel analysis (will be implemented later)
# Typical workflow:
# Input: TD Trials in trialData with the following columns:
# later value
# delay
# later_choice - 1 if subject chose "later" option, 0 if not
# condition - factor indication condition
# sub - subject number (factor)
# 1. Get Discounted Value at each delay for each subject