Skip to content

Instantly share code, notes, and snippets.

@swederik
Created April 15, 2011 14:55
Show Gist options
  • Save swederik/921827 to your computer and use it in GitHub Desktop.
Save swederik/921827 to your computer and use it in GitHub Desktop.
group connectivity tutorial
"""
==================================================
Using Camino and CMTK for structural connectivity analysis
==================================================
Introduction
============
This script, connectivity_tutorial.py, demonstrates the ability to perform connectivity mapping
using Nipype for pipelining, Freesurfer for Reconstruction / Parcellation, Camino for tensor-fitting and tractography,
and the Connectome Mapping Toolkit (CMTK) for connectivity analysis.
python connectivity_tutorial.py
We perform this analysis using the FSL course data, which can be acquired from here:
http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
This pipeline also requires the Freesurfer directory for 'subj1' from the FSL course data.
To save time, this data can be downloaded from here:
http://dl.dropbox.com/u/315714/subj1.zip?dl=1
Along with Camino, Camino-Trackvis, FSL and Freesurfer, you must also have the
Connectome File Format library installed (CFFlib) as well as the Connectome Mapper (cmp).
These are written by Stephan Gerhard and can be obtained from:
http://www.cmtk.org/
Import necessary modules from nipype.
"""
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.camino as camino
import nipype.interfaces.fsl as fsl
import nipype.interfaces.camino2trackvis as cam2trk
import nipype.interfaces.freesurfer as fs # freesurfer
import nipype.interfaces.matlab as mlab # how to run matlab
import nipype.interfaces.cmtk as cmtk
import nipype.algorithms.misc as misc
import inspect
import nibabel as nb
import os # system functions
import cmp # connectome mapper
"""
We use the following functions to scrape the voxel and data dimensions of the input images. This allows the
pipeline to be flexible enough to accept and process images of varying size. The SPM Face tutorial
(spm_face_tutorial.py) also implements this inferral of voxel size from the data.
"""
def get_vox_dims(volume):
import nibabel as nb
if isinstance(volume, list):
volume = volume[0]
nii = nb.load(volume)
hdr = nii.get_header()
voxdims = hdr.get_zooms()
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]
def get_data_dims(volume):
import nibabel as nb
if isinstance(volume, list):
volume = volume[0]
nii = nb.load(volume)
hdr = nii.get_header()
datadims = hdr.get_data_shape()
return [int(datadims[0]), int(datadims[1]), int(datadims[2])]
def get_affine(volume):
import nibabel as nb
nii = nb.load(volume)
return nii.get_affine()
def get_nsubs(group_list):
nsubs = 0
for grp in group_list.keys():
nsubs += len(group_list[grp])
return nsubs
def make_inlist(n, from_node):
inlist = list()
connections = list()
for i in range(1,n+1):
inlist = (from_node,str('in{num}'.format(num=i)))
connections.append(inlist)
return inlist, connections
fsl.FSLCommand.set_default_output_type('NIFTI')
def select_aparc(list_of_files):
for in_file in list_of_files:
if 'aparc+aseg.mgz' in in_file:
idx = list_of_files.index(in_file)
return list_of_files[idx]
def select_aparc_annot(list_of_files):
for in_file in list_of_files:
if '.aparc.annot' in in_file:
idx = list_of_files.index(in_file)
return list_of_files[idx]
"""
This needs to point to the freesurfer subjects directory (Recon-all must have been run on subj1 from the FSL course data)
"""
fs_dir = os.path.abspath('/usr/local/freesurfer')
subjects_dir = os.path.abspath('freesurfer')
"""
This needs to point to the fdt folder you can find after extracting
http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
"""
data_dir = os.path.abspath('exdata/')
fs.FSCommand.set_default_subjects_dir(subjects_dir)
group_list = {}
group_list['controls']=['subj1','guca88']
group_list['coma']=['BT_VS','DF_VS']
ngroups = len(group_list.keys())
nsubs = get_nsubs(group_list)
subject_list = ['subj1','guca88','BT_VS','DF_VS']
"""
Use subj_infosource node to loop through the subject list and define the input files.
For our purposes, these are the diffusion-weighted MR image, b vectors, and b values.
"""
group_infosource = pe.Node(interface=util.IdentityInterface(fields=['group_id','subject_list']), name="group_infosource")
group_infosource.iterables = ('group_id', group_list)
group_infosource.inputs.group_id = group_list.keys()
group_infosource.inputs.subject_list = group_list.values()
subj_infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id','subject_list']), name="subj_infosource")
subj_infosource.iterables = ('subject_id', subject_list)
subj_infosource.inputs.subject_list = subject_list
info = dict(dwi=[['subject_id', 'dwi']],
bvecs=[['subject_id','bvecs']],
bvals=[['subject_id','bvals']])
"""
Use datasource node to perform the actual data grabbing.
Templates for the associated images are used to obtain the correct images.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=info.keys()),
name = 'datasource')
datasource.inputs.template = "%s/%s"
datasource.inputs.base_directory = data_dir
datasource.inputs.field_template = dict(dwi='%s/%s.nii')
datasource.inputs.template_args = info
datasource.inputs.base_directory = data_dir
"""
FreeSurferSource nodes are used to retrieve various image
files that are automatically generated by the recon-all process.
Here we use three, two of which are defined to return files for solely the left and right hemispheres.
"""
FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource')
FreeSurferSource.inputs.subjects_dir = subjects_dir
FreeSurferSourceLH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceLH')
FreeSurferSourceLH.inputs.subjects_dir = subjects_dir
FreeSurferSourceLH.inputs.hemi = 'lh'
FreeSurferSourceRH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceRH')
FreeSurferSourceRH.inputs.subjects_dir = subjects_dir
FreeSurferSourceRH.inputs.hemi = 'rh'
"""
Since the data comes from the FSL course, we must convert it to a scheme file
for use in Camino
"""
fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme")
fsl2scheme.inputs.usegradmod = True
"""
Use FSL's Brain Extraction to create a mask from the b0 image
"""
b0Strip = pe.Node(interface=fsl.BET(mask = True), name = 'bet_b0')
"""
Use FSL's FLIRT function to coregister the b0 mask and the structural image.
A convert_xfm node is then used to obtain the inverse matrix.
FLIRT is used once again to apply the inverse transformation to the parcellated brain image.
"""
coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister')
coregister.inputs.cost = ('corratio')
convertxfm = pe.Node(interface=fsl.ConvertXFM(), name = 'convertxfm')
convertxfm.inputs.invert_xfm = True
inverse = pe.Node(interface=fsl.FLIRT(), name = 'inverse')
inverse.inputs.interp = ('nearestneighbour')
inverse_AparcAseg = inverse.clone('inverse_AparcAseg')
"""
A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject.
Nodes are used to convert the following:
- Original structural image to NIFTI
- Parcellated white matter image to NIFTI
- Parcellated whole-brain image to NIFTI
- Left and Right hemisphere surfaces to GIFTI (for visualization in ConnectomeViewer)
"""
mri_convert_Brain = pe.Node(interface=fs.MRIConvert(), name='mri_convert_Brain')
mri_convert_Brain.inputs.out_type = 'nii'
mri_convert_WMParc = mri_convert_Brain.clone('mri_convert_WMParc')
mri_convert_AparcAseg = mri_convert_Brain.clone('mri_convert_AparcAseg')
mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH')
mris_convertLH.inputs.out_datatype = 'gii'
mris_convertRH = mris_convertLH.clone('mris_convertRH')
mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels')
mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels')
"""
An inputnode is used to pass the data obtained by the data grabber to the actual processing functions
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals", "subject_id"]), name="inputnode")
"""
In this section we create the nodes necessary for diffusion analysis.
First, the diffusion image is converted to voxel order.
"""
image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel")
"""
Second, diffusion tensors are fit to the voxel-order data.
"""
dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit')
"""
Next, a lookup table is generated from the schemefile and the
signal-to-noise ratio (SNR) of the unweighted (q=0) data.
"""
dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen")
dtlutgen.inputs.snr = 16.0
dtlutgen.inputs.inversion = 1
"""
In this tutorial we implement probabilistic tractography using the PICo algorithm.
PICo tractography requires an estimate of the fibre direction and a model of its uncertainty in each voxel;
this is produced using the following node.
"""
picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs")
picopdfs.inputs.inputmodel = 'dt'
"""
Finally, tractography is performed. In this tutorial, we will use only 1 iteration for time-saving purposes.
"""
track = pe.Node(interface=camino.TrackPICo(), name="track")
track.inputs.iterations = 1
"""
Connectivity mapping in Camino can be very memory intensive. To deal with this, we add a "shredding" node
which removes roughly half of the tracts in the file.
"""
tractshred = pe.Node(interface=camino.TractShredder(), name='tractshred')
tractshred.inputs.offset = 0
tractshred.inputs.bunchsize = 2
tractshred.inputs.space = 1
"""
Here we create a connectivity mapping node using Camino.
For visualization, it can be beneficial to set a threshold for the minimum number of fiber connections
that are required for an edge to be drawn on the graph.
"""
conmap = pe.Node(interface=camino.Conmap(), name='conmap')
conmap.inputs.threshold = 100
"""
Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to
convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse.
"""
camino2trackvis = pe.Node(interface=cam2trk.Camino2Trackvis(), name="camino2trk")
camino2trackvis.inputs.min_length = 30
camino2trackvis.inputs.voxel_order = 'LAS'
trk2camino = pe.Node(interface=cam2trk.Trackvis2Camino(), name="trk2camino")
"""
Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview,
using the following two nodes.
"""
vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines")
procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines")
procstreamlines.inputs.outputtracts = 'oogl'
"""
We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the
fractional anisotropy and diffusivity trace maps and their associated headers.
"""
fa = pe.Node(interface=camino.FA(),name='fa')
#md = pe.Node(interface=camino.MD(),name='md')
trace = pe.Node(interface=camino.TrD(),name='trace')
dteig = pe.Node(interface=camino.DTEig(), name='dteig')
analyzeheader_fa = pe.Node(interface=camino.AnalyzeHeader(),name='analyzeheader_fa')
analyzeheader_fa.inputs.datatype = 'double'
analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace')
fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii')
trace2nii = fa2nii.clone("trace2nii")
"""
This section adds the new Connectome Mapping Toolkit nodes
These interfaces are experimental and may not function properly, or at all.
For this reason, the nodes have not been connected.
"""
roigen = pe.Node(interface=cmtk.ROIGen(), name="ROIGen")
cmp_config = cmp.configuration.PipelineConfiguration(parcellation_scheme = "NativeFreesurfer")
cmp_config.parcellation_scheme = "NativeFreesurfer"
roigen.inputs.LUT_file = cmp_config.get_freeview_lut("NativeFreesurfer")['freesurferaparc']
creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix")
creatematrix.inputs.resolution_network_file = cmp_config.parcellation['freesurferaparc']['node_information_graphml']
CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter")
"""
Here we define a few nodes using the Nipype Merge utility.
These are useful for passing lists of the files we want packaged in our CFF file.
"""
giftiSurfaces = pe.Node(interface=util.Merge(2), name="GiftiSurfaces")
giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels")
niftiVolumes = pe.Node(interface=util.Merge(5), name="NiftiVolumes")
fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays")
tractFiles = pe.Node(interface=util.Merge(1), name="TractFiles")
gpickledNetworks = pe.Node(interface=util.Merge(1), name="NetworkFiles")
"""
Since we have now created all our nodes, we can now define our workflow and start making connections.
"""
mapping = pe.Workflow(name='mapping')
"""
First, we connect the input node to the early conversion functions.
"""
mapping.connect([(inputnode, FreeSurferSource,[("subject_id","subject_id")])])
mapping.connect([(inputnode, FreeSurferSourceLH,[("subject_id","subject_id")])])
mapping.connect([(inputnode, FreeSurferSourceRH,[("subject_id","subject_id")])])
mapping.connect([(inputnode, image2voxel, [("dwi", "in_file")]),
(inputnode, fsl2scheme, [("bvecs", "bvec_file"),
("bvals", "bval_file")]),
(image2voxel, dtifit,[['voxel_order','in_file']]),
(fsl2scheme, dtifit,[['scheme','scheme_file']])
])
mapping.connect([(FreeSurferSource, mri_convert_WMParc,[('wmparc','in_file')])])
mapping.connect([(FreeSurferSource, mri_convert_Brain,[('brain','in_file')])])
mapping.connect([(FreeSurferSourceLH, mris_convertLH,[('pial','in_file')])])
mapping.connect([(FreeSurferSourceRH, mris_convertRH,[('pial','in_file')])])
mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels,[('pial','in_file')])])
mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels,[('pial','in_file')])])
mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, [(('annot', select_aparc_annot), 'annot_file')])])
mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, [(('annot', select_aparc_annot), 'annot_file')])])
"""
This section coregisters the diffusion-weighted and parcellated white-matter / whole brain images.
"""
mapping.connect([(inputnode, b0Strip,[('dwi','in_file')])])
mapping.connect([(b0Strip, coregister,[('out_file','in_file')])])
mapping.connect([(mri_convert_Brain, coregister,[('out_file','reference')])])
mapping.connect([(coregister, convertxfm,[('out_matrix_file','in_file')])])
mapping.connect([(b0Strip, inverse,[('out_file','reference')])])
mapping.connect([(convertxfm, inverse,[('out_file','in_matrix_file')])])
mapping.connect([(mri_convert_WMParc, inverse,[('out_file','in_file')])])
mapping.connect([(inverse, conmap,[('out_file','roi_file')])])
#mapping.connect([(ApplyVolTransform_WMParc, conmap,[('transformed_file','roi_file')])])
"""
The tractography pipeline consists of the following nodes.
"""
mapping.connect([(b0Strip, track,[("mask_file","seed_file")])])
mapping.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])])
mapping.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])])
mapping.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])])
mapping.connect([(picopdfs, track,[("pdfs","in_file")])])
"""
The tractography is passed to the shredder in preparation for connectivity mapping.
"""
mapping.connect([(track, tractshred,[("tracked","in_file")])])
mapping.connect([(tractshred, conmap,[("shredded","in_file")])])
"""
Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the
tensor fitting.
This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with
the original DWI image from the input node, to the header-generating nodes. This ensures that the files
will be correct and readable.
"""
mapping.connect([(dtifit, fa,[("tensor_fitted","in_file")])])
mapping.connect([(fa, analyzeheader_fa,[("fa","in_file")])])
mapping.connect([(inputnode, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
mapping.connect([(fa, fa2nii,[('fa','data_file')])])
mapping.connect([(inputnode, fa2nii,[(('dwi', get_affine), 'affine')])])
mapping.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])])
mapping.connect([(dtifit, trace,[("tensor_fitted","in_file")])])
mapping.connect([(trace, analyzeheader_trace,[("trace","in_file")])])
mapping.connect([(inputnode, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
mapping.connect([(trace, trace2nii,[('trace','data_file')])])
mapping.connect([(inputnode, trace2nii,[(('dwi', get_affine), 'affine')])])
mapping.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])])
mapping.connect([(dtifit, dteig,[("tensor_fitted","in_file")])])
"""
The output tracts are converted to trackvis format (and back). Here we also use the voxel- and data-grabbing
functions defined at the beginning of the pipeline.
"""
mapping.connect([(track, camino2trackvis, [('tracked','in_file')]),
(track, vtkstreamlines,[['tracked','in_file']]),
(camino2trackvis, trk2camino,[['trackvis','in_file']])
])
mapping.connect([(inputnode, camino2trackvis,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
#These lines are commented out the Camino mean diffusivity function appears to be broken.
#mapping.connect([(dtifit, md,[("tensor_fitted","in_file")])])
#mapping.connect([(md, analyzeheader2,[("md","in_file")])])
"""
Here the CMTK connectivity mapping nodes are connected.
"""
mapping.connect([(FreeSurferSource, mri_convert_AparcAseg, [(('aparc_aseg', select_aparc), 'in_file')])])
mapping.connect([(b0Strip, inverse_AparcAseg,[('out_file','reference')])])
mapping.connect([(convertxfm, inverse_AparcAseg,[('out_file','in_matrix_file')])])
mapping.connect([(mri_convert_AparcAseg, inverse_AparcAseg,[('out_file','in_file')])])
mapping.connect([(inverse_AparcAseg, roigen,[("out_file","aparc_aseg_file")])])
mapping.connect([(roigen, creatematrix,[("roi_file","roi_file")])])
mapping.connect([(roigen, creatematrix,[("dict_file","dict_file")])])
mapping.connect([(camino2trackvis, creatematrix,[("trackvis","tract_file")])])
mapping.connect([(inputnode, creatematrix,[("subject_id","out_matrix_file")])])
mapping.connect([(creatematrix, gpickledNetworks,[("matrix_file","in1")])])
mapping.connect([(mris_convertLH, giftiSurfaces,[("converted","in1")])])
mapping.connect([(mris_convertRH, giftiSurfaces,[("converted","in2")])])
mapping.connect([(mris_convertLHlabels, giftiLabels,[("converted","in1")])])
mapping.connect([(mris_convertRHlabels, giftiLabels,[("converted","in2")])])
mapping.connect([(roigen, niftiVolumes,[("roi_file","in1")])])
mapping.connect([(inputnode, niftiVolumes,[("dwi","in2")])])
mapping.connect([(mri_convert_Brain, niftiVolumes,[("out_file","in3")])])
mapping.connect([(fa2nii, niftiVolumes,[("nifti_file","in4")])])
mapping.connect([(trace2nii, niftiVolumes,[("nifti_file","in5")])])
mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file","in1")])])
mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file_mm","in2")])])
mapping.connect([(creatematrix, fiberDataArrays,[("fiber_length_file","in3")])])
mapping.connect([(creatematrix, fiberDataArrays,[("fiber_label_file","in4")])])
"""
This block connects a number of the files to the CFF converter. We pass lists of the surfaces
and volumes that are to be included, as well as the tracts and the network itself.
"""
CFFConverter.inputs.script_files = os.path.abspath(inspect.getfile(inspect.currentframe()))
mapping.connect([(giftiSurfaces, CFFConverter,[("out","gifti_surfaces")])])
mapping.connect([(giftiLabels, CFFConverter,[("out","gifti_labels")])])
mapping.connect([(gpickledNetworks, CFFConverter,[("out","gpickled_networks")])])
mapping.connect([(niftiVolumes, CFFConverter,[("out","nifti_volumes")])])
mapping.connect([(fiberDataArrays, CFFConverter,[("out","data_files")])])
mapping.connect([(camino2trackvis, CFFConverter,[("trackvis","tract_files")])])
mapping.connect([(inputnode, CFFConverter,[("subject_id","title")])])
mapping.connect([(inputnode, CFFConverter,[("subject_id","out_file")])])
"""
Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes
declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding
their names to the subject list and their data to the proper folders.
"""
connectivity = pe.Workflow(name="connectivity")
connectivity.base_dir = os.path.abspath('groupcon')
connectivity.connect([
(subj_infosource,datasource,[('subject_id', 'subject_id')]),
(datasource,mapping,[('dwi','inputnode.dwi'),
('bvals','inputnode.bvals'),
('bvecs','inputnode.bvecs')
]),
(subj_infosource,mapping,[('subject_id','inputnode.subject_id')]),
])
"""
Store the output
----------------
Create a datasink node to store the contrast images and registration info
"""
"""
datasink = pe.Node(interface=nio.DataSink(), name="datasink")
datasink.inputs.base_directory = os.path.abspath('groupcon/CFFout')
datasink.inputs.substitutions = []
def getsubs(subject_id):
subs = [('_subject_id_%s/'%subject_id,'')]
return subs
# store relevant outputs from various stages of the 1st level analysis
connectivity.connect([(subj_infosource, datasink,[('subject_id','container'),
(('subject_id', getsubs), 'substitutions')
]),
(mapping, datasink,[('CFFConverter.connectome_file','@connectome'),
])
])
"""
"""
Run the analysis pipeline and also create a dot+png (if graphviz is available)
that visually represents the workflow.
"""
if __name__ == '__main__':
connectivity.run()
connectivity.write_graph(format='eps',graph2use='flat')
"""
Level2 surface-based pipeline
-----------------------------
Create a level2 workflow
"""
l2flow = pe.Workflow(name='l2out')
l2flow.base_dir = os.path.abspath('groupcon')
"""
Setup a dummy node to iterate over contrasts and hemispheres
"""
l2inputnode = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name='inputnode')
l2inputnode.iterables = [('subject_id', subject_list)]
"""
Use a datagrabber node to collect contrast images and registration files
"""
l2source = pe.Node(interface=nio.DataGrabber(outfields=['cff']),
name='l2source')
l2source.inputs.template = '%s'
l2source.inputs.base_directory = os.path.abspath('groupcon/CFFout')
l2source.inputs.field_template = dict(cff='_subject_%s/connectome.cff')
l2source.inputs.template_args = dict(cff=[subject_list])
l2flow.connect(l2inputnode, 'subject_id', l2source, 'subject_id')
def ordersubjects(files, subj_list):
outlist = []
for s in subj_list:
for f in files:
if '/%s/' %s in f:
outlist.append(f)
continue
print outlist
return outlist
mergenode = pe.Node(interface=util.Merge(1),name='mergenode')
l2flow.connect(l2source,('cff', ordersubjects, subject_list), mergenode, 'in1')
MergeCNetworksSubj = pe.Node(interface=cmtk.MergeCNetworks(), name="MergeCNetworksSubj")
MergeCNetworksSubj.inputs.out_file = '{a} vs {b}'.format(a=group_list.keys()[0], b=group_list.keys()[1])
def list2tuple(listoflist):
return [tuple(x) for x in listoflist]
l2flow.connect(mergenode, ('out', list2tuple), MergeCNetworksSubj, 'in_files')
if __name__ == '__main__':
# l2flow.run()
l2flow.write_graph(graph2use='flat')
#groupcon = pe.Workflow(name="groupcon")
#groupcon.base_dir = os.path.abspath('groupcon')
#merge_subject_list = pe.Node(interface=util.Merge(nsubs), name="merge_subject_list")
#merge_group_list = pe.Node(interface=util.Merge(ngroups), name="merge_group_list")
#MergeCNetworksGroup = pe.Node(interface=cmtkbase.MergeCNetworks(), name="MergeCNetworksGroup")
#subj_inlist, subj_connections = make_inlist(nsubs, 'mapping.CFFConverter.connectome_file')
#group_inlist, group_connections = make_inlist(ngroups, 'connectome_file')
#groupcon.connect([(connectivity,merge_subject_list,subj_connections)])
#groupcon.connect([(merge_subject_list,MergeCNetworksSubj,[('out','in_files')])])
#groupcon.connect([(MergeCNetworksSubj,merge_group_list,group_connections)])
#groupcon.connect([(merge_group_list,MergeCNetworksGroup,[('out','in_files')])])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment