Commit 42908403 by Junhao WEN

NODDI pipeline

First version of NODDI preprocessing and processing pipeline
parent 350f0b5c
......@@ -126,6 +126,8 @@ def execute():
from clinica.pipelines.t1_volume_existing_template.t1_volume_existing_template_cli import T1VolumeExistingTemplateCLI #noqa
from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_cli import T1VolumeParcellationCLI
from clinica.pipelines.dwi_preprocessing_using_t1.dwi_preprocessing_using_t1_cli import DWIPreprocessingUsingT1CLI # noqa
from clinica.pipelines.dwi_preprocessing_noddi.dwi_preprocessing_noddi_cli import DwiPreprocessingNoddiCLI # noqa
from clinica.pipelines.dwi_processing_noddi.dwi_processing_noddi_cli import DwiProcessingNoddiCLI # noqa
from clinica.pipelines.dwi_preprocessing_using_phasediff_fieldmap.dwi_preprocessing_using_phasediff_fieldmap_cli import DWIPreprocessingUsingPhaseDiffFieldmapCLI # noqa
from clinica.pipelines.dwi_processing_dti.dwi_processing_dti_cli import DWIProcessingDTICLI # noqa
from clinica.pipelines.fmri_preprocessing.fmri_preprocessing_cli import fMRIPreprocessingCLI # noqa
......@@ -145,6 +147,8 @@ def execute():
T1VolumeExistingTemplateCLI(),
T1VolumeParcellationCLI(),
DWIPreprocessingUsingT1CLI(),
DwiPreprocessingNoddiCLI(),
DwiProcessingNoddiCLI(),
DWIPreprocessingUsingPhaseDiffFieldmapCLI(),
DWIProcessingDTICLI(),
fMRIPreprocessingCLI(),
......
function noddiprocessing(output_dir, noddi_img, brain_mask, roi_mask, bval, bvec, prefix, bStep, noddi_toolbox_dir, num_cores)
%This is a function to fit NODDI onto the multiple shells diffusion data
%based on this paper: NODDI: Practical in vivo neurite orientation
%dispersion and density imaging of the human brain, Neuroimage, 2012, Gary
%Zhang.
%output_dir is the path to store the result of fitting Noddi model.
%noddi_img is the path to the preprocessing noddi nii.
%brain_mask is the path to the mask which include just the voxels inside
%the brain.
%roi_mask is the path to the a maks to include just the voxels inside a
%ROI, normally, if you care about all the brain voxels, this should be
%equal to the brain_mask.
%bval is the path to the bval file
%bvec is the path to the rotated bvec in the preprocessing step
%prefix is the subject_id in BIDS version
%bStep is the rounded bval that you want for each shell in your data.
%num_cores, by default is 1 if not given.
% Note: this pipeline works only for non-compressed image, which means
% nii.gz does not fit in this pipeline, please convert the compressed image
% before fitting into this pipeline.
if (nargin<11)
num_cores = 1;
end
% make sure the num_cores are integer
if ischar(num_cores)
num_cores = str2double(num_cores);
end
%% add path to the python.m file
addpath(fileparts(which(mfilename())));
addpath(noddi_toolbox_dir);
%%
cd(output_dir);
%%Convert the raw DWI volume into the required format with the function CreateROI
CreateROI(noddi_img, roi_mask, strcat(prefix, '_fitted_original.mat'));
%% Round the bvals with amico fsl2scheme func
python(strcat(fileparts(which(mfilename())),'/roundbval.py'), bval, strcat(prefix,'_rounded.bval'), bStep)
%% Convert the FSL bval/bvec files into the required format with the function FSL2Protocol
protocol = FSL2Protocol(strcat(prefix,'_rounded.bval'), bvec);
% save the protocol for plotting use
save(strcat(prefix,'_protocol.mat'), 'protocol');
%For Camino users, the Scheme file can be converted into the required format with the function SchemeToProtocol:
%protocol = SchemeToProtocol('NODDI_protocol.scheme');
%% Create the NODDI model with MakeModel function
noddi = MakeModel('WatsonSHStickTortIsoV_B0');
save(strcat(prefix,'_model.mat'), 'noddi');
%% Fitting the NODDI model
% fit the model with one core or with multiple cores, if with multiple
% cores, need to set up the parallel preference locally in Matlab
tic;
if num_cores == 1
batch_fitting_single(strcat(prefix, '_fitted_original.mat'), protocol, noddi, strcat(prefix, '_fitted_params.mat'));
else
batch_fitting(strcat(prefix, '_fitted_original.mat'), protocol, noddi, strcat(prefix, '_fitted_params.mat'), num_cores); % with python wrapper, it seems to be a bug run parpool
end
t = toc;
fprintf('Fitting NODDI model on this subject takes %f hours ...\n', t./3600);
%% Convert the estimated NODDI parameters into volumetric parameter maps
SaveParamsAsNIfTI(strcat(prefix, '_fitted_params.mat'), strcat(prefix, '_fitted_original.mat'), brain_mask, prefix)
function [result status] = python(varargin)
%python Execute python command and return the result.
% python(pythonFILE) calls python script specified by the file pythonFILE
% using appropriate python executable.
%
% python(pythonFILE,ARG1,ARG2,...) passes the arguments ARG1,ARG2,...
% to the python script file pythonFILE, and calls it by using appropriate
% python executable.
%
% RESULT=python(...) outputs the result of attempted python call. If the
% exit status of python is not zero, an error will be returned.
%
% [RESULT,STATUS] = python(...) outputs the result of the python call, and
% also saves its exit status into variable STATUS.
%
% If the python executable is not available, it can be downloaded from:
% http://www.cpan.org
%
% See also SYSTEM, JAVA, MEX.
% Copyright 1990-2007 The MathWorks, Inc.
% $Revision: 1.1.4.11 $
cmdString = '';
% Add input to arguments to operating system command to be executed.
% (If an argument refers to a file on the MATLAB path, use full file path.)
for i = 1:nargin
thisArg = varargin{i};
if ~ischar(thisArg)
error(message('MATLAB:python:InputsMustBeStrings'));
end
if i==1
if exist(thisArg, 'file')==2
% This is a valid file on the MATLAB path
if isempty(dir(thisArg))
% Not complete file specification
% - file is not in current directory
% - OR filename specified without extension
% ==> get full file path
thisArg = which(thisArg);
end
else
% First input argument is pythonFile - it must be a valid file
error(message('MATLAB:python:FileNotFound', thisArg));
end
end
% Wrap thisArg in double quotes if it contains spaces
if isempty(thisArg) || any(thisArg == ' ')
thisArg = ['"', thisArg, '"']; %#ok<AGROW>
end
% Add argument to command string
cmdString = [cmdString, ' ', thisArg]; %#ok<AGROW>
end
% Execute python script
if isempty(cmdString)
error(message('MATLAB:python:NopythonCommand'));
elseif ispc
% PC
pythonCmd = fullfile(matlabroot, 'sys\python\win32\bin\');
cmdString = ['python' cmdString];
pythonCmd = ['set PATH=',pythonCmd, ';%PATH%&' cmdString];
[status, result] = dos(pythonCmd);
else
% UNIX
[status ignore] = unix('which python'); %#ok
if (status == 0)
cmdString = ['python', cmdString];
[status, result] = unix(cmdString);
else
error(message('MATLAB:python:NoExecutable'));
end
end
% Check for errors in shell command
if nargout < 2 && status~=0
error(message('MATLAB:python:ExecutionError', result, cmdString));
end
import sys
import numpy as np
import os.path
def roundbval(bvalsFilename, newbvalsFilename, bStep):
"""round the bvals in the .bval file.
If required, b-values can be rounded up to a specific threshold (bStep parameter).
Parameters
----------
:param str bvalsFilename: The path to bval file.
:param str newbvalsFilename: The path to output scheme file (optional).
:param float or list or np.bStep: If bStep is a scalar, round b-values to nearest integer multiple of bStep. If bStep is a list, it is treated as an array of shells in increasing order. B-values will be forced to the nearest shell value.
"""
if not os.path.exists(bvalsFilename):
raise RuntimeError( 'bvals file not exist:' + bvalsFilename )
#if newbvalsFilename is None:
#newbvalsFilename = os.path.splitext(bvalsFilename)[0]+".scheme"
# load files and check size
bvals = np.loadtxt(bvalsFilename)
# convert bStep(str) to tuple to the np.array
bStep = eval(bStep)
# if requested, round the b-values
bStep = np.array(bStep, dtype = np.float)
print bStep
print bStep.size
if bStep.size == 1 and bStep > 1.0:
print "-> Rounding b-values to nearest multiple of %s" % np.array_str(bStep)
bvals = np.round(bvals/bStep) * bStep
elif bStep.size > 1:
print "-> Setting b-values to the closest shell in %s" % np.array_str(bStep)
for i in range(0, bvals.size):
diff = min(abs(bvals[i] - bStep))
ind = np.argmin(abs(bvals[i] - bStep))
# warn if b > 99 is set to 0, possible error
if (bStep[ind] == 0.0 and diff > 100) or (bStep[ind] > 0.0 and diff > bStep[ind] / 20.0):
# For non-zero shells, warn if actual b-value is off by more than 5%. For zero shells, warn above 50. Assuming s / mm^2
print " Warning: measurement %d has b-value %d, being forced to %d\n'" % i, bvals[i], bStep[ind]
bvals[i] = bStep[ind]
np.savetxt( newbvalsFilename, bvals.T, fmt="%.06f", newline=' ' )
print "-> Writing new rounded bval file to [ %s ]" % newbvalsFilename
return newbvalsFilename
if __name__ == '__main__':
bvalsFilename = sys.argv[1]
newbvalsFilename = sys.argv[2]
bStep = sys.argv[3]
roundbval( bvalsFilename, newbvalsFilename, bStep)
\ No newline at end of file
% % PT
% output_dir='/aramis/home/wen/test/NODDI_fit_notbash/PT/matlab';
% noddi_img='/aramis/home/wen/test/noddi_preprocessing_test/subjects/sub-PREVDEMALS0010001BE/ses-M0/noddi/preprocessing/eddy-current-corretion/sub-PREVDEMALS0010001BE_ses-M0_eddy.nii';
% brain_mask='/aramis/home/wen/test/noddi_preprocessing_test/subjects/sub-PREVDEMALS0010001BE/ses-M0/noddi/preprocessing/sub-PREVDEMALS0010001BE_ses-M0_unwarp_B0_topup_mask.nii';
% bval='/aramis/home/wen/test/noddi_preprocessing_test/subjects/sub-PREVDEMALS0010001BE/ses-M0/noddi/preprocessing/original/sub-PREVDEMALS0010001BE_ses-M0.bval';
% bvec='/aramis/home/wen/test/noddi_preprocessing_test/subjects/sub-PREVDEMALS0010001BE/ses-M0/noddi/preprocessing/original/sub-PREVDEMALS0010001BE_ses-M0.bvec';
% prefix='matlab';
% noddi_model='WatsonSHStickTortIsoV_B0';
% num_cores=4;
% bStep ='0,300,700,2200';
% noddiprocessing(output_dir,noddi_img, brain_mask, bval, bvec, prefix, noddi_model, num_cores, bStep)
%% CN test with just one slice
output_dir='/Users/junhao.wen/test/NODDI';
noddi_img='/Users/junhao.wen/Hao/Dataset/NODDI_example_dataset/PREVDEMALS_03CN_preprocessed_data/sub-PREVDEMALS0010003PB_ses-M0_eddy.nii';
brain_mask='/Users/junhao.wen/Hao/Dataset/NODDI_example_dataset/PREVDEMALS_03CN_preprocessed_data/sub-PREVDEMALS0010003PB_ses-M0_unwarp_B0_topup_mask.nii';
roi_mask='/Users/junhao.wen/Hao/Dataset/NODDI_example_dataset/PREVDEMALS_03CN_preprocessed_data/sub-PREVDEMALS0010003PB_test_roi_mask.nii';
bval='/Users/junhao.wen/Hao/Dataset/NODDI_example_dataset/PREVDEMALS_03CN_preprocessed_data/sub-PREVDEMALS0010003PB_ses-M0.bval';
bvec='/Users/junhao.wen/Hao/Dataset/NODDI_example_dataset/PREVDEMALS_03CN_preprocessed_data/sub-PREVDEMALS0010003PB_ses-M0.bvec';
prefix='sub-PREVDEMALS0010003PB_ses-M0';
num_cores=4;
bStep ='0,300,700,2200';
noddiprocessing(output_dir,noddi_img, brain_mask, roi_mask, bval, bvec, prefix, bStep, num_cores)
%%
% %% PS
% output_dir='/aramis/home/wen/test/NODDI_fit_notbash/PS/matlab';
% noddi_img='/aramis/home/wen/test/noddi_preprocessing_test/subjects/sub-PREVDEMALS0010007RC/ses-M0/noddi/preprocessing/original/sub-PREVDEMALS0010007RC_ses-M0.nii';
% brain_mask='/aramis/home/wen/test/noddi_preprocessing_test/subjects/sub-PREVDEMALS0010007RC/ses-M0/noddi/preprocessing/sub-PREVDEMALS0010007RC_ses-M0_unwarp_B0_topup_mask.nii';
% bval='/aramis/home/wen/test/noddi_preprocessing_test/subjects/sub-PREVDEMALS0010007RC/ses-M0/noddi/preprocessing/original/sub-PREVDEMALS0010007RC_ses-M0.bval';
% bvec='/aramis/home/wen/test/noddi_preprocessing_test/subjects/sub-PREVDEMALS0010007RC/ses-M0/noddi/preprocessing/original/sub-PREVDEMALS0010007RC_ses-M0.bvec';
% prefix='matlab';
% noddi_model='WatsonSHStickTortIsoV_B0';
% num_cores=4;
% bStep ='0,300,700,2200';
% noddiprocessing(output_dir,noddi_img, brain_mask, bval, bvec, prefix, noddi_model, num_cores, bStep)
% %cd(output_dir);
% %SaveParamsAsNIfTI('FittedParams.mat', 'NODDI_roi.mat', brain_mask, prefix)
% %a=load('FittedParams.mat')
% %cd('/aramis/dataARAMIS/users/junhao.wen/PhD/PREVDEMALS/Noddi/data/NODDI_matlabtoolbox_data');
% %b=load('FittedParams.mat')
% %fprint('shit')
# `dwi_preprocessing_noddi`` - <VERY_SHORT_DESCRIPTION>
<SHORT_DESCRIPTION>
## Dependencies
<!-- If you installed the docker image of Clinica, nothing is required. -->
If you only installed the core of Clinica, this pipeline needs the installation of **<software_package>** on your computer. You can find how to install this software on the [installation](../#installing-clinica-from-source) page.
## Running the pipeline
The pipeline can be run with the following command line:
```
clinica run dwi_preprocessing_noddi bids_directory caps_directory
```
where:
- `bids_directory` is the input folder containing the dataset in a [BIDS](../BIDS) hierarchy.
- `caps_directory` is the output folder containing the results in a [CAPS](../CAPS) hierarchy.
- `<ARG_1>` <ARG_1_DESCRIPTION>
- `<ARG_2>` <ARG_2_DESCRIPTION>
If you want to run the pipeline on a subset of your BIDS dataset, you can use the `-tsv` flag to specify in a TSV file the participants belonging to your subset.
!!! note
If you have
## Outputs
Results are stored in the following folder of the [CAPS hierarchy](docs/CAPS): `subjects/sub-<participant_label>/ses-<session_label>/<some_folder>`.
The main output files are:
- `<source_file>_labelname-<label>_mainouput1`: description main output 1.
- `<source_file>_labelname-<label>_mainouput2`: description main output 2.
The full list of output files can be found in the [ClinicA Processed Structure (CAPS) Specification](https://docs.google.com/document/d/14mjXbqRceHK0fD0BIONniLK713zY7DbQHJEV7kxqsd8/edit#heading=h.f4ddnk971gkn).
<!--## Visualization of the results-->
<!--!!! note-->
<!-- The visualization command is not available for the moment. Please come back later, this section will be updated ASAP.-->
## Describing this pipeline in your paper
<!--You can have a single version for your pipeline-->
!!! cite "Example of paragraph (short version):"
These results have been obtained using the `dwi_preprocessing_noddi` pipeline of Clinica. This pipeline is a ...
!!! cite "Example of paragraph (long version):"
These results have been obtained using the `dwi_preprocessing_noddi` pipeline of Clinica. More precisely,...
!!! tip
Easily access the papers cited on this page on [Zotero](https://www.zotero.org/groups/1517933/aramis_clinica/items/collectionKey/2DHP3WXH).
# coding: utf8
__author__ = "Junhao Wen"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Junhao Wen"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Junhao Wen"
__email__ = "Junhao.Wen@inria.fr"
__status__ = "Development"
import clinica.engine as ce
class DwiPreprocessingNoddiCLI(ce.CmdParser):
def __init__(self):
super(DwiPreprocessingNoddiCLI, self).__init__()
def define_name(self):
"""Define the sub-command name to run this pipeline.
"""
self._name = 'dwi-preprocessing-multi-shell'
def define_description(self):
"""Define a description of this pipeline.
"""
self._description = 'Preprocessing of raw DWI datasets with multi-shell acquisitions and phase encoding directions: http://clinica.run/doc/Pipelines/DWI_Preprocessing/'
def define_options(self):
"""Define the sub-command arguments
"""
from clinica.engine.cmdparser import PIPELINE_CATEGORIES
clinica_comp = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_COMPULSORY'])
clinica_comp.add_argument("bids_directory",
help='Path to the BIDS directory.')
clinica_comp.add_argument("caps_directory",
help='Path to the CAPS directory.')
clinica_comp.add_argument("echo_spacing", type=float,
help='The echo spacing such that EffectiveEchoSpacing=echo_spacing/acceleration_factor (see BIDS specifications and https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/Faq for details).')
clinica_comp.add_argument("acceleration_factor", type=int,
help='Acceleration factor such that EffectiveEchoSpacing=echo_spacing/acceleration_factor (see BIDS specifications and https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/Faq for details).')
clinica_comp.add_argument("phase_encoding_direction", type=str,
help='Phase encoding direction using FSL convention (e.g. y- or y). Be careful, this is currently not the BIDS convention (i.e. j- or j).')
clinica_comp.add_argument("phase_encoding_direction_alternative", type=str,
help='The opposite phase encoding direction (e.g. if phase_encoding_direction is y- then phase_encoding_direction_alternative will be y).')
clinica_comp.add_argument("epi_factor", type=int,
help='EPI factor (e.g. 128) used for the computation of the TotalReadoutTime (see https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq for details)')
# Optional arguments
clinica_opt = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_OPTIONAL'])
clinica_opt.add_argument("-wd", "--working_directory",
help='Temporary directory to store pipeline intermediate results')
clinica_opt.add_argument("-np", "--n_procs", type=int,
help='Number of cores used to run in parallel')
clinica_opt.add_argument("-tsv", "--subjects_sessions_tsv",
help='TSV file containing a list of subjects with their sessions.')
def run_command(self, args):
"""
"""
from tempfile import mkdtemp
from dwi_preprocessing_noddi_pipeline import DwiPreprocessingNoddi
import os, errno
from clinica.iotools.utils.data_handling import create_subs_sess_list
from nipype import config
cfg = dict(execution={'parameterize_dirs': False})
config.update_config(cfg)
if args.subjects_sessions_tsv is None:
try:
temp_dir = mkdtemp()
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
create_subs_sess_list(args.bids_directory, temp_dir)
args.subjects_sessions_tsv = os.path.join(temp_dir, 'subjects_sessions_list.tsv')
#Run the DWIPreprocessingNoddi Pipeline from command line
pipeline = DwiPreprocessingNoddi(
bids_directory=self.absolute_path(args.bids_directory),
caps_directory=self.absolute_path(args.caps_directory),
tsv_file=self.absolute_path(args.subjects_sessions_tsv))
pipeline.parameters = {
# pass these args by using self.parameters in a dictionary
'epi_param': dict([('echospacing', args.echo_spacing), ('acc_factor', args.acceleration_factor), ('enc_dir', args.phase_encoding_direction), ('epi_factor', args.epi_factor)]),
'alt_epi_params': dict([('echospacing', args.echo_spacing), ('acc_factor', args.acceleration_factor), ('enc_dir_alt', args.phase_encoding_direction_alternative), ('epi_factor', args.epi_factor)])
}
if args.working_directory is None:
args.working_directory = mkdtemp()
pipeline.base_dir = self.absolute_path(args.working_directory)
if args.n_procs:
pipeline.run(plugin='MultiProc',
plugin_args={'n_procs': args.n_procs})
elif args.slurm:
pipeline.run(plugin='SLURM')
else:
pipeline.run()
\ No newline at end of file
# coding: utf8
__author__ = "Junhao Wen"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Junhao Wen"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Junhao Wen"
__email__ = "Junhao.Wen@inria.fr"
__status__ = "Development"
# command line tool.
import clinica.pipelines.engine as cpe
class DwiPreprocessingNoddi(cpe.Pipeline):
"""dwi_preprocessing_noddi SHORT DESCRIPTION.
Warnings:
- A WARNING.
Todos:
- [x] A FILLED TODO ITEM.
- [ ] AN ON-GOING TODO ITEM.
Args:
input_dir: A BIDS directory.
output_dir: An empty output directory where CAPS structured data will be written.
subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).
Returns:
A clinica pipeline object containing the dwi_preprocessing_noddi pipeline.
Raises:
Example:
>>> pipeline = DwiPreprocessingNoddi('~/MYDATASET_BIDS', '~/MYDATASET_CAPS')
>>> pipeline.parameters = {
>>> # ...
>>> }
>>> pipeline.base_dir = '/tmp/'
>>> pipeline.run()
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file.
"""
pass
def get_input_fields(self):
"""Specify the list of possible inputs of this pipeline.
Returns:
A list of (string) input fields name.
"""
return ['bids_ap_dwi', 'bids_ap_dwi_bvec', 'bids_ap_dwi_bval', 'bids_pa_dwi', 'bids_pa_dwi_bvec', 'bids_pa_dwi_bval',
'epi_param', 'epi_param_alt'] # Fill here the list
def get_output_fields(self):
"""Specify the list of possible outputs of this pipeline.
Returns:
A list of (string) output fields name.
"""
return ['preproc_dwi', 'preproc_bvec', 'preproc_bval', 'b0_mask'] # Fill here the list
def build_input_node(self):
"""Build and connect an input node to the pipeline.
"""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
import dwi_preprocessing_noddi_utils as utils
bids_ap_dwi, bids_ap_dwi_bvec, bids_ap_dwi_bval, bids_pa_dwi, bids_pa_dwi_bvec, bids_pa_dwi_bval = utils.grab_noddi_bids_files(self.bids_directory, self.tsv_file)
list_tuple = utils.create_list_tuple(bids_ap_dwi, bids_ap_dwi_bvec, bids_ap_dwi_bval, bids_pa_dwi, bids_pa_dwi_bvec, bids_pa_dwi_bval)
parallelsubjects = npe.Node(name="noddi_preprocessing_parallelization", interface=nutil.IdentityInterface(
fields=['bids_ap_dwi', 'bids_ap_dwi_bvec', 'bids_ap_dwi_bval', 'bids_pa_dwi', 'bids_pa_dwi_bvec',
'bids_pa_dwi_bval'], mandatory_inputs=True), synchronize=True, iterables=[('bids_ap_dwi', 'bids_ap_dwi_bvec', 'bids_ap_dwi_bval', 'bids_pa_dwi',
'bids_pa_dwi_bvec', 'bids_pa_dwi_bval'), list_tuple])
parallelsubjects.inputs.bids_ap_dwi = bids_ap_dwi
parallelsubjects.inputs.bids_ap_dwi_bvec = bids_ap_dwi_bvec
parallelsubjects.inputs.bids_ap_dwi_bval = bids_ap_dwi_bval
parallelsubjects.inputs.bids_pa_dwi = bids_pa_dwi
parallelsubjects.inputs.bids_pa_dwi_bvec = bids_pa_dwi_bvec
parallelsubjects.inputs.bids_pa_dwi_bval = bids_pa_dwi_bval
self.connect(
[(parallelsubjects, self.input_node, [('bids_ap_dwi', 'bids_ap_dwi')]),
(parallelsubjects, self.input_node, [('bids_ap_dwi_bvec', 'bids_ap_dwi_bvec')]),
(parallelsubjects, self.input_node, [('bids_ap_dwi_bval', 'bids_ap_dwi_bval')]),
(parallelsubjects, self.input_node, [('bids_pa_dwi', 'bids_pa_dwi')]),
(parallelsubjects, self.input_node, [('bids_pa_dwi_bvec', 'bids_pa_dwi_bvec')]),
(parallelsubjects, self.input_node, [('bids_pa_dwi_bval', 'bids_pa_dwi_bval')]),
])
def build_output_node(self):
"""Build and connect an output node to the pipeline.
"""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
import nipype.interfaces.io as nio
from os.path import join
import dwi_preprocessing_noddi_utils as utils
# Find container path from DWI filename
# =====================================
get_identifiers = npe.MapNode(nutil.Function(
input_names=['in_file', 'caps_directory'], output_names=['base_directory', 'subst_tuple_list'],
function=utils.get_subid_sesid), name='get_subid_sesid', iterfield=['in_file'])
get_identifiers.inputs.caps_directory = self.caps_directory
### datasink
datasink = npe.MapNode(nio.DataSink(infields=['@preproc_bval', '@preproc_dwi', '@preproc_bvec', '@b0_mask']),
name='datasinker',
iterfield=['base_directory', 'substitutions', '@preproc_bval', '@preproc_dwi', '@preproc_bvec', '@b0_mask'])
self.connect([
### datasink
(self.input_node, get_identifiers, [('bids_ap_dwi', 'in_file')]),
(get_identifiers, datasink, [('base_directory', 'base_directory')]),
(get_identifiers, datasink, [('subst_tuple_list', 'substitutions')]),
## original files
(self.output_node, datasink, [('preproc_bval', '@preproc_bval')]),
(self.output_node, datasink, [('preproc_dwi', '@preproc_dwi')]),
(self.output_node, datasink, [('preproc_bvec', '@preproc_bvec')]),
(self.output_node, datasink, [('b0_mask', '@b0_mask')])
])
def build_core_nodes(self):
"""Build and connect the core nodes of the pipeline.
"""
import dwi_preprocessing_noddi_utils as utils
one_subj_noddi = utils.noddi_preprocessing_twoped(self.caps_directory, epi_params=self.parameters['epi_param'],
alt_epi_params=self.parameters['alt_epi_params'])
# Connection
# ==========
self.connect([
(self.input_node, one_subj_noddi, [('bids_ap_dwi', 'inputnode.in_file')]),
(self.input_node, one_subj_noddi, [('bids_ap_dwi_bvec', 'inputnode.in_bvec')]),
(self.input_node, one_subj_noddi, [('bids_ap_dwi_bval', 'inputnode.in_bval')]),
(self.input_node, one_subj_noddi, [('bids_pa_dwi', 'inputnode.alt_file')]),
(self.input_node, one_subj_noddi, [('bids_pa_dwi_bvec', 'inputnode.alt_bvec')]),
(self.input_node, one_subj_noddi, [('bids_pa_dwi_bval', 'inputnode.alt_bval')]),
## output
(one_subj_noddi, self.output_node, [('outputnode.ecc_out_file', 'preproc_dwi')]), # noqa
(one_subj_noddi, self.output_node, [('outputnode.out_bvec', 'preproc_bvec')]), # noqa
(one_subj_noddi, self.output_node, [('outputnode.original_merged_bval', 'preproc_bval')]), # noqa
(one_subj_noddi, self.output_node, [('outputnode.out_mask', 'b0_mask')]) # noqa
])
\ No newline at end of file
# coding: utf8
"""dwi_preprocessing_noddi - Clinica Visualizer.
This file has been generated automatically by the `clinica generate template`
command line tool. See here for more details:
http://clinica.run/doc/InteractingWithClinica/
"""
\ No newline at end of file
{
"id": "aramislab/dwi_preprocessing_noddi",
"author": "John Doe",
"version": "0.1.0",
"dependencies": [
{
"type": "binary",
"name": "clinica",
"version": ">=0.1.0"
}
]
}
\ No newline at end of file
# `dwi_processing_noddi`` - <VERY_SHORT_DESCRIPTION>
<SHORT_DESCRIPTION>
## Dependencies
<!-- If you installed the docker image of Clinica, nothing is required. -->
If you only installed the core of Clinica, this pipeline needs the installation of **<software_package>** on your computer. You can find how to install this software on the [installation](../#installing-clinica-from-source) page.
## Running the pipeline
The pipeline can be run with the following command line:
```
clinica run dwi_processing_noddi bids_directory caps_directory
```
where:
- `bids_directory` is the input folder containing the dataset in a [BIDS](../BIDS) hierarchy.
- `caps_directory` is the output folder containing the results in a [CAPS](../CAPS) hierarchy.
- `<ARG_1>` <ARG_1_DESCRIPTION>
- `<ARG_2>` <ARG_2_DESCRIPTION>
If you want to run the pipeline on a subset of your BIDS dataset, you can use the `-tsv` flag to specify in a TSV file the participants belonging to your subset.
!!! note
If you have
## Outputs
Results are stored in the following folder of the [CAPS hierarchy](docs/CAPS): `subjects/sub-<participant_label>/ses-<session_label>/<some_folder>`.
The main output files are:
- `<source_file>_labelname-<label>_mainouput1`: description main output 1.
- `<source_file>_labelname-<label>_mainouput2`: description main output 2.
The full list of output files can be found in the [ClinicA Processed Structure (CAPS) Specification](https://docs.google.com/document/d/14mjXbqRceHK0fD0BIONniLK713zY7DbQHJEV7kxqsd8/edit#heading=h.f4ddnk971gkn).
<!--## Visualization of the results-->
<!--!!! note-->
<!-- The visualization command is not available for the moment. Please come back later, this section will be updated ASAP.-->
## Describing this pipeline in your paper
<!--You can have a single version for your pipeline-->
!!! cite "Example of paragraph (short version):"
These results have been obtained using the `dwi_processing_noddi` pipeline of Clinica. This pipeline is a ...
!!! cite "Example of paragraph (long version):"
These results have been obtained using the `dwi_processing_noddi` pipeline of Clinica. More precisely,...
!!! tip
Easily access the papers cited on this page on [Zotero](https://www.zotero.org/groups/1517933/aramis_clinica/items/collectionKey/2DHP3WXH).