Commit 53557904 by Mauricio Diaz

Merge branch 'master' of gitlab.icm-institute.org:aramislab/clinica

parents ba3be5cc ab6c27e0
......@@ -125,11 +125,11 @@ def execute():
from clinica.pipelines.t1_volume_new_template.t1_volume_new_template_cli import T1VolumeNewTemplateCLI # noqa
from clinica.pipelines.t1_volume_existing_template.t1_volume_existing_template_cli import T1VolumeExistingTemplateCLI #noqa
from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_cli import T1VolumeParcellationCLI
from clinica.pipelines.dwi_preprocessing_using_t1.dwi_preprocessing_using_t1_cli import DWIPreprocessingUsingT1CLI # noqa
from clinica.pipelines.dwi_preprocessing_noddi.dwi_preprocessing_noddi_cli import DwiPreprocessingNoddiCLI # noqa
from clinica.pipelines.dwi_processing_noddi.dwi_processing_noddi_cli import DwiProcessingNoddiCLI # noqa
from clinica.pipelines.dwi_preprocessing_using_phasediff_fieldmap.dwi_preprocessing_using_phasediff_fieldmap_cli import DWIPreprocessingUsingPhaseDiffFieldmapCLI # noqa
from clinica.pipelines.dwi_preprocessing_using_t1.dwi_preprocessing_using_t1_cli import DWIPreprocessingUsingT1CLI # noqa
from clinica.pipelines.dwi_processing_dti.dwi_processing_dti_cli import DWIProcessingDTICLI # noqa
from clinica.pipelines.dwi_processing_noddi.dwi_processing_noddi_cli import DwiProcessingNoddiCLI # noqa
from clinica.pipelines.fmri_preprocessing.fmri_preprocessing_cli import fMRIPreprocessingCLI # noqa
from clinica.pipelines.pet_volume.pet_volume_cli import PETVolumeCLI # noqa
from clinica.pipelines.pet_surface.pet_surface_cli import PetSurfaceCLI # noqa
......
......@@ -26,7 +26,7 @@ class DwiPreprocessingNoddiCLI(ce.CmdParser):
def define_description(self):
"""Define a description of this pipeline.
"""
self._description = 'Preprocessing of raw DWI datasets with multi-shell acquisitions and phase encoding directions: http://clinica.run/doc/Pipelines/DWI_Preprocessing/'
self._description = 'Preprocessing of raw DWI datasets with multi-shell acquisitions and opposite phase encoding directions: http://clinica.run/doc/Pipelines/DWI_Preprocessing/'
def define_options(self):
"""Define the sub-command arguments
......@@ -80,7 +80,6 @@ class DwiPreprocessingNoddiCLI(ce.CmdParser):
create_subs_sess_list(args.bids_directory, temp_dir)
args.subjects_sessions_tsv = os.path.join(temp_dir, 'subjects_sessions_list.tsv')
#Run the DWIPreprocessingNoddi Pipeline from command line
pipeline = DwiPreprocessingNoddi(
bids_directory=self.absolute_path(args.bids_directory),
caps_directory=self.absolute_path(args.caps_directory),
......@@ -102,4 +101,4 @@ class DwiPreprocessingNoddiCLI(ce.CmdParser):
elif args.slurm:
pipeline.run(plugin='SLURM')
else:
pipeline.run()
\ No newline at end of file
pipeline.run()
{
"id": "aramislab/dwi_preprocessing_noddi",
"id": "aramislab/dwi_preprocessing_multi_shell",
"author": "John Doe",
"version": "0.1.0",
"dependencies": [
......@@ -9,4 +9,4 @@
"version": ">=0.1.0"
}
]
}
\ No newline at end of file
}
......@@ -81,7 +81,5 @@ class DwiProcessingNoddiCLI(ce.CmdParser):
pipeline.base_dir = self.absolute_path(args.working_directory)
if args.n_procs:
pipeline.run(plugin='MultiProc', plugin_args={'n_procs': args.n_procs})
elif args.slurm:
pipeline.run(plugin='SLURM')
else:
pipeline.run()
\ No newline at end of file
pipeline.run()
......@@ -433,23 +433,8 @@ class CAPSTSVBasedInput(CAPSInput):
"""
#import pandas as pd
pass
#if self._images is not None:
# return self._images
#print self._group_id
#print self._atlas
#print self._image_type
#if self._image_type == 'T1':
# self._images = str('group-' + self._group_id + '_T1w_space-' + self._atlas + '_map-graymatter')
### to implement for PET
#return self._images
def get_x(self):
"""
......
......@@ -37,7 +37,7 @@ def load_data(images, caps_directory, subjects, sessions, dataset):
participant_id = subjects
print len(participant_id)
session_id = sessions
for i in xrange(len(participant_id)):
......
......@@ -92,6 +92,11 @@ class KFoldCV(base.MLValidation):
mean_results = pd.DataFrame(all_results.apply(np.nanmean).to_dict(), columns=all_results.columns, index=[0, ])
mean_results.to_csv(path.join(output_dir, 'mean_results.tsv'),
index=False, sep='\t', encoding='utf-8')
print "Mean results of the classification:"
print "Balanced accuracy: %s" %(mean_results_df['balanced_accuracy'].to_string(index = False))
print "specificity: %s" % (mean_results_df['specificity'].to_string(index=False))
print "sensitivity: %s" % (mean_results_df['sensitivity'].to_string(index=False))
print "auc: %s" % (mean_results_df['auc'].to_string(index=False))
class RepeatedKFoldCV(base.MLValidation):
......@@ -312,6 +317,11 @@ class RepeatedHoldOut(base.MLValidation):
columns=all_results_df.columns, index=[0, ])
mean_results_df.to_csv(path.join(output_dir, 'mean_results.tsv'),
index=False, sep='\t', encoding='utf-8')
print "Mean results of the classification:"
print "Balanced accuracy: %s" %(mean_results_df['balanced_accuracy'].to_string(index = False))
print "specificity: %s" % (mean_results_df['specificity'].to_string(index=False))
print "sensitivity: %s" % (mean_results_df['sensitivity'].to_string(index=False))
print "auc: %s" % (mean_results_df['auc'].to_string(index=False))
self.compute_error_variance()
self.compute_accuracy_variance()
......@@ -427,6 +437,7 @@ class LearningCurveRepeatedHoldOut(base.MLValidation):
return self._classifier, self._best_params, self._split_results
def save_results(self, output_dir):
from clinica.utils.stream import cprint
if self._split_results is None:
raise Exception("No results to save. Method validate() must be run before save_results().")
......@@ -470,6 +481,7 @@ class LearningCurveRepeatedHoldOut(base.MLValidation):
mean_results_df = pd.DataFrame(iteration_results_df.apply(np.nanmean).to_dict(),
columns=iteration_results_df.columns, index=[0, ])
mean_results_df.to_csv(path.join(iteration_dir, 'mean_results.tsv'),
index=False, sep='\t', encoding='utf-8')
all_results_list.append(mean_results_df)
......
......@@ -7,10 +7,6 @@
"type": "binary",
"name": "clinica",
"version": ">=0.1.0"
},
{
"type": "software",
"name": "matlab"
}
]
}
\ No newline at end of file
}
......@@ -129,7 +129,7 @@ class StatisticsSurfaceCLI(ce.CmdParser):
if args.custom_file is None:
cprint('No feature type selected : using cortical thickness as default value')
args.custom_file = '@subject/@session/t1/freesurfer_cross_sectional/@subject_@session/surf/@hemi.thickness.fwhm@fwhm.fsaverage.mgh'
args.feature_label = 'cortical_thickness'
args.feature_label = 'ct'
else:
cprint('Using custom features.')
if args.feature_label is None:
......
......@@ -18,6 +18,11 @@ class {{ pipeline.class_name }}CLI(ce.CmdParser):
self._name = '{{ pipeline.command_name }}'
def define_description(self):
"""Define a description of this pipeline.
"""
# self._description = 'Brief description: https://gitlab.icm-institute.org/aramislab/clinica/wikis/docs/Pipelines/{{ pipeline.class_name }}'
def define_options(self):
"""Define the sub-command arguments
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment