Last active
September 1, 2015 00:37
-
-
Save mwaskom/e7ab605315bd82906d98 to your computer and use it in GitHub Desktop.
Organize data after downloading from nims
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
"""Unpack nims data into the lyman directory structure. | |
Should be run from the root data directory. | |
""" | |
from __future__ import print_function | |
import os | |
import sys | |
import os.path as op | |
import shutil | |
from glob import glob | |
from subprocess import check_output | |
import argparse | |
import numpy as np | |
import nibabel as nib | |
def main(arglist): | |
args = parse_args(arglist) | |
subj = args.subject | |
# Create the data directories | |
if not op.exists(subj): | |
os.mkdir(subj) | |
for dir in ["behav", "nifti", "anat", "bold", "fieldmap", "mri"]: | |
path = op.join(subj, dir) | |
if not op.exists(path): | |
os.mkdir(path) | |
# Find an untarred nims directory | |
if args.nimsdir is None: | |
if args.tarfile is None: | |
sys.exit("Must specify either `-nimsdir` or `-tarfile`") | |
nimsdir = untar(args.tarfile) | |
else: | |
nimsdir = args.nimsdir | |
# Find the path to thedataset | |
nims_glob = op.join(nimsdir, "awagner", "sticks", "*") | |
# Process the task data | |
task_files = glob(op.join(nims_glob, "*task_pe0/*.nii.gz")) | |
task_files.sort(key=acquisition_sort) | |
print("\nFound {:d} files matching task template".format(len(task_files))) | |
suggest = 7 if args.whichscan.lower() == "b" else 1 | |
task_list = process_func_runs(subj, "task", task_files, suggest, 517) | |
# Process the task fieldmap data | |
tmap_files = glob(op.join(nims_glob, "*task_pe1/*.nii.gz")) | |
tmap_files.sort(key=acquisition_sort) | |
print(("\nFound {:d} files matching task fieldmap template" | |
.format(len(tmap_files)))) | |
suggest = 7 if args.whichscan.lower() == "b" else 1 | |
tmap_list = process_func_runs(subj, "task", tmap_files, suggest, 2, True) | |
# Process the rest data | |
rest_files = glob(op.join(nims_glob, "*rest_pe0/*.nii.gz")) | |
rest_files.sort(key=acquisition_sort) | |
print("\nFound {:d} files matching rest template".format(len(rest_files))) | |
suggest = 5 if args.whichscan.lower() == "b" else 1 | |
rest_list = process_func_runs(subj, "rest", rest_files, suggest, 512) | |
# Process the rest fieldmap data | |
rmap_files = glob(op.join(nims_glob, "*rest_pe1/*.nii.gz")) | |
rmap_files.sort(key=acquisition_sort) | |
print(("\nFound {:d} files matching rest fieldmap template" | |
.format(len(rmap_files)))) | |
suggest = 5 if args.whichscan.lower() == "b" else 1 | |
rmap_list = process_func_runs(subj, "rest", rmap_files, suggest, 2, True) | |
# Process the structurals | |
anat_files = glob(op.join(nims_glob, "*_T1w*/*.nii.gz")) | |
anat_files.sort(key=acquisition_sort) | |
print(("\nFound {:d} files matching anatomy template" | |
.format(len(anat_files)))) | |
suggest = 2 if args.whichscan.lower() == "b" else 1 | |
anat_list = process_anat_runs(subj, anat_files, suggest) | |
# Move and link the data | |
full_list = task_list + tmap_list + rest_list + rmap_list + anat_list | |
move_orig_files(full_list) | |
# Link the full functional runs | |
link_func_runs(task_list + rest_list + anat_list) | |
# Create the fieldmap images | |
create_fieldmaps(task_list + rest_list, tmap_list + rmap_list) | |
# Convert the anatomy files to mgz for Freesurfer | |
convert_anat(subj, anat_list) | |
# Clean up | |
if not args.dontrm: | |
if args.tarfile is not None: | |
os.remove(op.basename(args.tarfile)) | |
shutil.rmtree(nimsdir) | |
def acquisition_sort(fname): | |
return int(fname.split("/")[-2].split("_")[0]) | |
def process_func_runs(subj, kind, files, suggest, ntp_needed, fieldmap=False): | |
assoc_list = [] | |
for fname in files: | |
series = fname.split("/")[-2] | |
size = image_size(fname) | |
ntp_found = size[-1] | |
vol_dims = size[:-1] | |
if ntp_needed != ntp_found: | |
print(series + " does not have the expected number of TRs") | |
print(" (expected {:d}; read {:d})".format(ntp_needed, ntp_found)) | |
continue | |
elif vol_dims != (110, 110, 64): | |
print(series + " does not have the expected volume size") | |
print(" (expected {}; read {})".format((110, 110, 64), vol_dims)) | |
continue | |
run_number = request_run_number(fname, suggest) | |
if run_number: | |
suggest = run_number + 1 | |
# Make tuples that will be used to move and link files | |
perm_name = op.join(subj, "nifti", op.basename(fname)) | |
link_base = op.join("..", "nifti", op.basename(perm_name)) | |
subdir = "fieldmap" if fieldmap else "bold" | |
link_name = op.join(subj, subdir, | |
"{}_{:02d}.nii.gz".format(kind, run_number)) | |
assoc_list.append(((fname, perm_name), (link_base, link_name))) | |
return assoc_list | |
def process_anat_runs(subj, files, suggest): | |
assoc_list = [] | |
for fname in files: | |
run_number = request_run_number(fname, suggest) | |
if run_number: | |
suggest = run_number + 1 | |
perm_name = op.join(subj, "nifti", op.basename(fname)) | |
link_base = op.join("..", "nifti", op.basename(perm_name)) | |
link_name = op.join(subj, "anat", | |
"t1w_{:02d}.nii.gz".format(run_number)) | |
assoc_list.append(((fname, perm_name), (link_base, link_name))) | |
return assoc_list | |
def request_run_number(fname, suggested): | |
series = fname.split("/")[-2] | |
actual = raw_input(("Run number for {} or 's' to skip [{}]: " | |
.format(series, suggested))) | |
if actual: | |
if actual == "s": | |
return False | |
else: | |
return int(actual) | |
else: | |
return suggested | |
def image_size(fname): | |
return nib.load(fname).shape | |
def untar(tarfile): | |
# Move the tar archive locally | |
local = op.abspath(op.basename(tarfile)) | |
os.rename(tarfile, local) | |
# Untar the data | |
print("Unpacking {} archive".format(op.basename(local))) | |
check_output(["tar", "xf", local]) | |
return op.abspath("./nims") | |
def move_orig_files(data_list): | |
print("\nMoving original data") | |
for move_args, _ in data_list: | |
print("Writing {}".format(move_args[1])) | |
shutil.move(*move_args) | |
def link_func_runs(data_list): | |
print("\nLinking functional runs") | |
for _, link_args in data_list: | |
print("Writing {}".format(link_args[1])) | |
os.symlink(*link_args) | |
def create_fieldmaps(pe0_list, pe1_list): | |
print("\nMaking fieldmap images") | |
for pe0_info, pe1_info in zip(pe0_list, pe1_list): | |
(_, pe0_nii), _ = pe0_info | |
(_, pe1_nii), (_, fieldmap_name) = pe1_info | |
# Load the functionals with each direction of phase encoding | |
pe0_img = nib.load(pe0_nii) | |
pe1_img = nib.load(pe1_nii) | |
# Extract the second frame of each image and save | |
fieldmap_data = np.zeros((110, 110, 64, 2)) | |
fieldmap_data[..., 0] = pe0_img.dataobj[..., 1] | |
fieldmap_data[..., 1] = pe1_img.dataobj[..., 1] | |
# Write out the "fieldmap" file | |
print("Writing {}".format(fieldmap_name)) | |
fieldmap_img = nib.Nifti1Image(fieldmap_data, | |
pe0_img.get_affine(), | |
pe0_img.get_header()) | |
fieldmap_img.to_filename(fieldmap_name) | |
def convert_anat(subj, anat_list): | |
for _, (anat_path, link_name) in anat_list: | |
img_num = int(link_name.strip(".nii.gz")[-1]) | |
out_path = op.join(subj, "mri", "{:03d}.mgz".format(img_num)) | |
cmd = ["mri_convert", link_name, out_path] | |
check_output(cmd) | |
def parse_args(arglist): | |
parser = argparse.ArgumentParser() | |
parser.add_argument("-subject", required=True) | |
parser.add_argument("-nimsdir") | |
parser.add_argument("-tarfile") | |
parser.add_argument("-whichscan", default="a") | |
parser.add_argument("-dontrm", action="store_true") | |
return parser.parse_args(arglist) | |
if __name__ == "__main__": | |
main(sys.argv[1:]) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment