Commit abeffdab by simonabottani

Add T1 Volume Parcellation Pipeline

parent ba16fba0
Pipeline #823 failed with stages
in 13 seconds
......@@ -124,6 +124,7 @@ def execute():
from clinica.pipelines.t1_volume_dartel2mni.t1_volume_dartel2mni_cli import T1VolumeDartel2MNICLI # noqa
from clinica.pipelines.t1_volume_new_template.t1_volume_new_template_cli import T1VolumeNewTemplateCLI # noqa
from clinica.pipelines.t1_volume_existing_template.t1_volume_existing_template_cli import T1VolumeExistingTemplateCLI #noqa
from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_cli import T1VolumeParcellationCLI
from clinica.pipelines.dwi_preprocessing_using_t1.dwi_preprocessing_using_t1_cli import DWIPreprocessingUsingT1CLI # noqa
from clinica.pipelines.dwi_preprocessing_using_phasediff_fieldmap.dwi_preprocessing_using_phasediff_fieldmap_cli import DWIPreprocessingUsingPhaseDiffFieldmapCLI # noqa
from clinica.pipelines.dwi_processing_dti.dwi_processing_dti_cli import DWIProcessingDTICLI # noqa
......@@ -141,6 +142,7 @@ def execute():
T1VolumeDartel2MNICLI(),
T1VolumeNewTemplateCLI(),
T1VolumeExistingTemplateCLI(),
T1VolumeParcellationCLI(),
DWIPreprocessingUsingT1CLI(),
DWIPreprocessingUsingPhaseDiffFieldmapCLI(),
DWIProcessingDTICLI(),
......
......@@ -8,7 +8,7 @@ command line tool. See here for more details: https://gitlab.icm-institute.org/a
__author__ = "Jorge Samper Gonzalez"
__copyright__ = "Copyright 2016, The Aramis Lab Team"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Jorge Samper Gonzalez"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
......
......@@ -9,7 +9,7 @@ command line tool. See here for more details: https://gitlab.icm-institute.org/a
import clinica.engine as ce
__author__ = "Jorge Samper Gonzalez"
__copyright__ = "Copyright 2016, The Aramis Lab Team"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Jorge Samper Gonzalez"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
......
......@@ -12,7 +12,7 @@ from nipype.interfaces.base import TraitedSpec, File, traits
from nipype.utils.filemanip import split_filename
__author__ = "Jorge Samper Gonzalez"
__copyright__ = "Copyright 2016, The Aramis Lab Team"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Jorge Samper Gonzalez"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
......
......@@ -9,7 +9,7 @@ command line tool. See here for more details: https://gitlab.icm-institute.org/a
import clinica.engine as ce
__author__ = "Jorge Samper Gonzalez"
__copyright__ = "Copyright 2016, The Aramis Lab Team"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Jorge Samper Gonzalez"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
......
......@@ -9,7 +9,7 @@ command line tool. See here for more details: https://gitlab.icm-institute.org/a
import clinica.pipelines.engine as cpe
__author__ = "Jorge Samper Gonzalez"
__copyright__ = "Copyright 2016, The Aramis Lab Team"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Jorge Samper Gonzalez"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
......
# `spm_parcellation`` - <VERY_SHORT_DESCRIPTION>
<SHORT_DESCRIPTION>
#### Contents
- [Dependencies](#dependencies)
- [Running the pipeline (command line)](#running-the-pipeline-command-line)
- [Outputs](#outputs)
- [Visualization of the results](#visualization-of-the-results)
- [Describing this pipeline in your paper](#describing-this-pipeline-in-your-paper)
- [Appendix](#appendix)
## Dependencies
If you installed the docker image of Clinica, nothing is required.
If you only installed the core of Clinica, this pipeline needs the installation of **<software_package>** on your computer. You can find how to install this software on the [installation](docs/BeforeYouInstall) page.
## Running the pipeline (command line)
The pipeline can be run with the following command line:
```
clinica run spm_parcellation bids_directory caps_directory
```
where:
- `bids_directory` is the input folder containing the dataset in a [BIDS](docs/BIDS) hierarchy
- `caps_directory` is the output folder containing the results in a [CAPS](docs/CAPS) hierarchy
- `<ARG_1>` <ARG_1_DESCRIPTION>
- `<ARG_2>` <ARG_2_DESCRIPTION>
## Outputs
Results are stored in the following folder of the [CAPS hierarchy](docs/CAPS): `subjects/sub-<participant_label>/ses-<session_label>/<some_folder>`.
The main output files are:
- `<source_file>_main_ouput_1`: description main output 1.
- `<source_file>_main_ouput_2`: description main output 2.
The full list of output files can be found in the [ClinicA Processed Structure (CAPS) Specification](https://docs.google.com/document/d/14mjXbqRceHK0fD0BIONniLK713zY7DbQHJEV7kxqsd8/edit#heading=h.f4ddnk971gkn).
## Visualization of the results
After the execution of the pipeline, you can check the outputs of a subject by running the command:
> **Notes:**
>
> _The visualization command is not available for the moment. Please come back later, this section will be updated ASAP._
## Describing this pipeline in your paper
> **Example of paragraph:**
>
>_These results have been obtained using the my-pipeline pipeline of Clinica. More precisely ..._
## Appendix
Further information can be found on [this supplementary page](docs/Pipelines/<My_Pipeline_Appendix>).
\ No newline at end of file
{
"id": "aramislab/t1-volume-parcellation",
"author": "John DOE",
"version": "0.1.0",
"dependencies": [
{
"type": "binary",
"name": "clinica",
"version": ">=0.1.0"
}
]
}
\ No newline at end of file
# coding: utf8
"""spm_parcellation - Clinica Command Line Interface.
This file has been generated automatically by the `clinica generate template`
command line tool. See here for more details:
https://gitlab.icm-institute.org/aramislab/clinica/wikis/docs/InteractingWithClinica.
"""
__author__ = "Simona Bottani"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Simona Bottani"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Simona Bottani"
__email__ = "simona.bottani@icm-institute.org"
__status__ = "Development"
import clinica.engine as ce
class T1VolumeParcellationCLI(ce.CmdParser):
def define_name(self):
"""Define the sub-command name to run this pipeline.
"""
self._name = 't1-volume-parcellation'
def define_options(self):
"""Define the sub-command arguments
"""
#self._args.add_argument("bids_directory",
# help='Path to the BIDS directory.')
self._args.add_argument("caps_directory",
help='Path to the CAPS directory.')
self._args.add_argument("group_id",
help = 'Current group name')
self._args.add_argument("-tsv", "--subjects_sessions_tsv",
help='TSV file containing the subjects with their sessions.')
#self._args.add_argument("-im_type", "--image_type", type = str, default = 'T1',
# choices =['T1', 'pet'],
# help = 'image type. Possible values are T1 and pet')
self._args.add_argument("-pet", "--pet_type", type = str, default = 'fdg',
choices = ['fdg', 'av45'],
help = 'PET image type. Possible values are fdg and av45.')
self._args.add_argument("-m", "--modulate", default='on',
choices=['on', 'off'],
help='A boolean. Modulate output images - no modulation preserves concentrations')
self._args.add_argument("-atlases", "--atlases", nargs='+', type=str,
default=['AAL2', 'LPBA40', 'Neuromorphometrics', 'AICHA', 'Hammers'],
choices=['AAL2', 'LPBA40', 'Neuromorphometrics', 'AICHA', 'Hammers'],
help='A list of atlases to use to calculate the mean GM concentration at each region')
self._args.add_argument("-wd", "--working_directory",
help='Temporary directory to store pipeline intermediate results')
self._args.add_argument("-np", "--n_procs", type=int,
help='Number of cores used to run in parallel')
def run_pipeline(self, args):
"""
"""
from tempfile import mkdtemp
from t1_volume_parcellation_pipeline import T1VolumeParcellation
# Most of the time, you will want to instantiate your pipeline with a
# BIDS and CAPS directory as inputs:
pipeline = T1VolumeParcellation(
bids_directory='./4',
caps_directory=self.absolute_path(args.caps_directory),
tsv_file=self.absolute_path(args.subjects_sessions_tsv))
#pipeline = spm_parcellation()
pipeline.parameters = {
# Add your own pipeline parameters here to use them inside your
# pipeline. See the file `spm_parcellation_pipeline.py` to
# see an example of use.
'group_id': args.group_id,
'pet_type': args.pet_type,
'modulate': args.modulate,
'atlases': args.atlases,
'wd': self.absolute_path(args.working_directory),
'n_procs': args.n_procs
}
if args.working_directory is None:
args.working_directory = mkdtemp()
pipeline.base_dir = self.absolute_path(args.working_directory)
if args.n_procs:
pipeline.run(plugin='MultiProc', plugin_args={'n_procs': args.n_procs})
else:
pipeline.run()
# coding: utf8
"""spm_parcellation - Clinica Pipeline.
This file has been generated automatically by the `clinica generate template`
command line tool. See here for more details:
https://gitlab.icm-institute.org/aramislab/clinica/wikis/docs/InteractingWithClinica.
"""
import clinica.pipelines.engine as cpe
__author__ = "Simona Bottani"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Simona Bottani"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Simona Bottani"
__email__ = "simona.bottani@icm-institute.org"
__status__ = "Development"
class T1VolumeParcellation(cpe.Pipeline):
"""spm_parcellation SHORT DESCRIPTION.
Warnings:
- A WARNING.
Todos:
- [x] A FILLED TODO ITEM.
- [ ] AN ON-GOING TODO ITEM.
Args:
input_dir: A BIDS directory.
output_dir: An empty output directory where CAPS structured data will be written.
subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).
Returns:
A clinica pipeline object containing the spm_parcellation pipeline.
Raises:
Example:
>>> from t1_volume_parcellation import T1VolumeParcellation
>>> pipeline = T1VolumeParcellation('~/MYDATASET_BIDS', '~/MYDATASET_CAPS')
>>> pipeline.parameters = {
>>> # ...
>>> }
>>> pipeline.base_dir = '/tmp/'
>>> pipeline.run()
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file.
"""
pass
def get_input_fields(self):
"""Specify the list of possible inputs of this pipeline.
Returns:
A list of (string) input fields name.
"""
return ['file_list', 'atlas_list'] # Fill here the list
def get_output_fields(self):
"""Specify the list of possible outputs of this pipeline.
Returns:
A list of (string) output fields name.
"""
pass # Fill here the list
def build_input_node(self):
"""Build and connect an input node to the pipeline.
"""
from clinica.utils.stream import cprint
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from clinica.lib.pycaps.caps_layout import CAPSLayout
# This node is supposedly used to load BIDS inputs when this pipeline is
# not already connected to the output of a previous Clinica pipeline.
# For the purpose of the example, we simply read input arguments given
# by the command line interface and transmitted here through the
# `self.parameters` dictionary and pass it to the `self.input_node` to
# further by used as input of the core nodes.
read_parameters_node = npe.Node(name="LoadingCLIArguments",
interface=nutil.IdentityInterface(
fields=self.get_input_fields(),
mandatory_inputs=True))
read_parameters_node.inputs.atlas_list = self.parameters['atlases']
caps_layout = CAPSLayout(self.caps_directory)
cprint('------- INPUT FILES FOR EACH SUBJECTS -------')
subjects_regex = '|'.join(sub[4:] for sub in self.subjects)
unique_session = set(list(self.sessions))
sessions_regex = '|'.join(sub[4:] for sub in unique_session)
cprint(' * grabbing all files from CAPS folder')
caps_file = caps_layout.get(return_type='file',
subject=subjects_regex,
session=sessions_regex,
group_id = self.parameters['group_id'],
modulation = self.parameters['modulate'])
read_parameters_node.inputs.file_list = caps_file
self.connect([
(read_parameters_node, self.input_node, [('file_list', 'file_list')]),
(read_parameters_node, self.input_node, [('atlas_list', 'atlas_list')])
])
def build_output_node(self):
"""Build and connect an output node to the pipeline.
"""
# In the same idea as the input node, this output node is supposedly
# used to write the output fields in a CAPS. It should be executed only
# if this pipeline output is not already connected to a next Clinica
# pipeline.
pass
def build_core_nodes(self):
"""Build and connect the core nodes of the pipeline.
"""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
import t1_volume_parcellation_utils as spu
import nipype.interfaces.io as nio
atlas_stats_node = npe.MapNode(nutil.Function(input_names=['file_list',
'atlas_list'],
output_names=['atlas_statistics'],
function=spu.atlas_statistics),
name='atlas_stats_node',
iterfield=['file_list'])
outputnode = npe.Node(nutil.IdentityInterface(fields=['atlas_statistics']),
name='outputnode',
mandatory_inputs=True)
datasink = npe.Node(nio.DataSink(),
name='datasink')
datasink.inputs.base_directory = self.caps_directory
datasink.inputs.parameterization = True
datasink.inputs.regexp_substitutions = [
(r'(.*)(atlas_statistics)/.*/(sub-(.*)_ses-(.*)_T1.*)$',
r'\1/subjects/sub-\4/ses-\5/t1/spm/dartel/group-' + self.parameters['group_id'] + r'/\2/\3')]
# Connection
# ==========
self.connect([
# STEP 1
(self.input_node, atlas_stats_node, [('file_list', 'file_list')]),
(self.input_node, atlas_stats_node, [('atlas_list', 'atlas_list')]),
(atlas_stats_node, outputnode, [('atlas_statistics', 'atlas_statistics')]),
(outputnode, datasink, [('atlas_statistics', 'atlas_statistics')])
])
# coding: utf8
"""spm_parcellation - Clinica Utilities.
This file has been generated automatically by the `clinica generate template`
command line tool. See here for more details:
https://gitlab.icm-institute.org/aramislab/clinica/wikis/docs/InteractingWithClinica.
"""
__author__ = "Simona Bottani"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Simona Bottani"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Simona Bottani"
__email__ = "simona.bottani@icm-institute.org"
__status__ = "Development"
def atlas_statistics(file_list, atlas_list):
"""
For each atlas name provided it ƒcalculates for the input image the mean for each region in the atlas and saves it to a tsv file.
:param in_image: A Nifti image
:param in_atlas_list: List of names of atlas to be applied
:return: List of paths to tsv files
"""
from os import getcwd
from os.path import abspath, join
from nipype.utils.filemanip import split_filename
from clinica.utils.atlas import AtlasAbstract
from clinica.utils.statistics import statistics_on_atlas
from clinica.utils.stream import cprint
orig_dir, base, ext = split_filename(file_list)
atlas_classes = AtlasAbstract.__subclasses__()
atlas_statistics_list = []
for atlas in atlas_list:
for atlas_class in atlas_classes:
if atlas_class.get_name_atlas() == atlas:
#out_atlas_statistics = abspath(join(getcwd(), base + '_space-' + atlas + '_map-graymatter_statistics.tsv'))
out_atlas_statistics = abspath(
join('./' + base + '_space-' + atlas + '_map-graymatter_statistics.tsv'))
cprint(out_atlas_statistics)
statistics_on_atlas(file_list, atlas_class(), out_atlas_statistics)
atlas_statistics_list.append(out_atlas_statistics)
break
return atlas_statistics_list
"""t1_volume_parcellation - Clinica Visualizer.
This file has been generated automatically by the `clinica generate template`
command line tool. See here for more details: https://gitlab.icm-institute.org/aramislab/clinica/wikis/docs/InteractingWithClinica.
"""
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment