Commit 75f5dd6a by Mauricio Diaz

Merge branch master into dev

parents d6123fa2 350f0b5c
Pipeline #959 passed with stages
in 4 minutes 47 seconds
# Python and system files
build/
dist/
examples/external-data
......@@ -10,10 +11,18 @@ Clinica.egg-info
*.lib
*.sys
*.so
.DS_Store
# Editor's files
.idea
.vscode
# Code coverage and crash log files
.coverage
crash*
.DS_Store
# Clinica specific files
clinica/lib/weighted_prototypes_lib/cpp_code/bin/CMakeCache.txt
clinica/lib/weighted_prototypes_lib/cpp_code/bin/CMakeFiles
clinica/lib/weighted_prototypes_lib/cpp_code/bin/ITKIOFactoryRegistration
......@@ -31,3 +40,9 @@ clinica/lib/weighted_prototypes_lib/community_latest/hierarchy
clinica/lib/weighted_prototypes_lib/community_latest/main_community.o
clinica/lib/weighted_prototypes_lib/community_latest/main_convert.o
clinica/lib/weighted_prototypes_lib/community_latest/main_hierarchy.o
# Pytest files
.pytest_cache/
test/data
# TODO:
# - [ ] Test dataset
# - [ ] Doc generation
# - [ ] Automatize pipeline CL tests
# - [ ] Automatize pipeline CL options tests
# Continous Integration Scrip
# Clinica - ARAMIS
#
stages:
- build
- install
- test
- deploy
- package
#variables:
# BIDS_TEST: "/localdrive10TB/data/gitlab-runner/BIDS_TEST"
# CAPS_TEST: "/localdrive10TB/data/gitlab-runner/CAPS_TEST"
# WD_TEST: "/localdrive10TB/data/gitlab-runner/WD_TEST"
# ENVIRONMENT_TEST: "/export/applications/clinica/Ubuntu14.04/dot_path"
variables:
BIDS_TEST: "/localdrive10TB/data/gitlab-runner/BIDS_TEST"
CAPS_TEST: "/localdrive10TB/data/gitlab-runner/CAPS_TEST"
WD_TEST: "/localdrive10TB/data/gitlab-runner/WD_TEST"
ENVIRONMENT_TEST: "/export/applications/clinica/Ubuntu14.04/dot_path"
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache"
cache:
paths:
- .cache/
before_script:
- echo $SHELL
- pwd
- whoami
- cat $ENVIRONMENT_TEST
- source $ENVIRONMENT_TEST
after_script:
# - conda env remove -y --name clinica_env
job_pip_install:
# Test the basic Clinica installation
build:mac:
stage: build
tags:
- macOS
script:
- pip install -r requirements.txt
- pip install --no-deps --ignore-installed .
- eval "$(register-python-argcomplete clinica)"
# If the environment already exists or if it's changed
# (by editing the environment.yml file), the environment
# is recreated.
- >
if conda env list|grep -w "clinica_env"; then
if git diff HEAD~ --name-only|grep environment.yml; then
echo "REcreate environment clinica_env"
conda env create --force --file environment.yml
fi;
else
echo "Create environment clinica_env"
conda env create --force --file environment.yml
fi;
# TODO: Uncomment when unit tests are set
# job_py_test:
# stage: test
# script:
# - pytest .
build:linux:
stage: build
tags:
- linux
script:
# If the environment already exists or if it's changed
# (by editing the environment.yml file), the environment
# is recreated.
- >
if conda env list|grep -w "clinica_env"; then
if git diff HEAD~ --name-only|grep environment.yml; then
echo "REcreate environment clinica_env"
conda env create --force --file environment.yml
fi;
else
echo "Create environment clinica_env"
conda env create --force --file environment.yml
fi;
launch:mac:
# Install python dependencies for Clinica
stage: install
tags:
- install
- macOS
script:
- source activate clinica_env
- pip install --ignore-installed .
- eval "$(register-python-argcomplete clinica)"
- clinica --help
- source deactivate
job_cli_test:
stage: test
launch:linux:
# Install python dependencies for Clinica
stage: install
tags:
- install
- linux
script:
- source activate clinica_env
- pip install --ignore-installed .
- eval "$(register-python-argcomplete clinica)"
- clinica --help
- source deactivate
# TODO: Uncomment when computing power will be sufficient and CI policy is set
# job_pipelines_test:
# stage: test
# script:
# - clinica run fmri-preprocessing -wd $WD_TEST -np 2 $BIDS_TEST $CAPS_TEST
# after_script:
# - rm -rf $WD_TEST/*
# - rm -rf $CAPS_TEST/*
# TODO: Uncomment when everyone has switched to develop branch
# job_push_on_master:
# stage: deploy
# script:
# - git push origin develop:master
\ No newline at end of file
unit:mac:
stage: test
tags:
- macOS
script:
- source activate clinica_env
- module load clinica/matlab
- module load clinica/itk
- module load clinica/ants
- module load clinica/fsl
- module load clinica/PETPVC
- module load clinica/dcm2niix
- module load clinica/freesurfer/5.3.0
- module load clinica/mrtrix3
- module load clinica/spm12
- cd test
- ln -s /Volumes/builds/Clinica/data_ci ./data
- pytest -v
- source deactivate
pkg:
stage: package
tags:
- macOS
script:
- tar -cvzf --exclude='.gitignore' --exclude='.git' --exclude='.DS_Store' --exclude='.gitmodules' --exclude='.idea' --exclude='.vscode' --exclude='.gitlab-ci.yml' -f clinica-$CI_COMMIT_REF_NAME.tar.gz ./*
artifacts:
name: "$CI_COMMIT_REF_NAME"
paths:
- clinica-$CI_COMMIT_REF_NAME.tar.gz
only:
- tags
# Clinica
[![pipeline status](https://gitlab.icm-institute.org/aramislab/clinica/badges/master/pipeline.svg)](https://gitlab.icm-institute.org/aramislab/clinica/commits/master)
Welcome to the [**Clinica**](http://clinica.run) software! All the information can be found on the [wiki](http://clinica.run/doc) section!
[![pipeline status](https://gitlab.icm-institute.org/aramislab/clinica/badges/master/pipeline.svg)](https://gitlab.icm-institute.org/aramislab/clinica/commits/master)
[![conda install](https://anaconda.org/aramislab/clinica/badges/installer/conda.svg)](http://clinica.run/doc/Installation/)
[![platform](https://anaconda.org/aramislab/clinica/badges/platforms.svg)](http://clinica.run/doc/Installation/)
[![license](https://anaconda.org/aramislab/clinica/badges/license.svg)](http://clinica.run/doc/Installation/)
0.1.1
\ No newline at end of file
0.1.2
......@@ -119,16 +119,18 @@ def execute():
from clinica.engine import CmdParser
from clinica.pipelines.t1_freesurfer_cross_sectional.t1_freesurfer_cross_sectional_cli import T1FreeSurferCrossSectionalCLI # noqa
from clinica.pipelines.t1_spm_segmentation.t1_spm_segmentation_cli import T1SPMSegmentationCLI # noqa
from clinica.pipelines.t1_spm_dartel.t1_spm_dartel_cli import T1SPMDartelCLI # noqa
from clinica.pipelines.t1_spm_dartel2mni.t1_spm_dartel2mni_cli import T1SPMDartel2MNICLI # noqa
from clinica.pipelines.t1_spm_full_prep.t1_spm_full_prep_cli import T1SPMFullPrepCLI # noqa
from clinica.pipelines.t1_spm_dartel_existing_template.t1_spm_dartel_existing_template_cli import T1SPMDartelExistingTemplateCLI #noqa
from clinica.pipelines.t1_volume_tissue_segmentation.t1_volume_tissue_segmentation_cli import T1VolumeTissueSegmentationCLI # noqa
from clinica.pipelines.t1_volume_create_dartel.t1_volume_create_dartel_cli import T1VolumeCreateDartelCLI # noqa
from clinica.pipelines.t1_volume_dartel2mni.t1_volume_dartel2mni_cli import T1VolumeDartel2MNICLI # noqa
from clinica.pipelines.t1_volume_new_template.t1_volume_new_template_cli import T1VolumeNewTemplateCLI # noqa
from clinica.pipelines.t1_volume_existing_template.t1_volume_existing_template_cli import T1VolumeExistingTemplateCLI #noqa
from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_cli import T1VolumeParcellationCLI
from clinica.pipelines.dwi_preprocessing_using_t1.dwi_preprocessing_using_t1_cli import DWIPreprocessingUsingT1CLI # noqa
from clinica.pipelines.dwi_preprocessing_using_phasediff_fieldmap.dwi_preprocessing_using_phasediff_fieldmap_cli import DWIPreprocessingUsingPhaseDiffFieldmapCLI # noqa
from clinica.pipelines.dwi_processing_dti.dwi_processing_dti_cli import DWIProcessingDTICLI # noqa
from clinica.pipelines.fmri_preprocessing.fmri_preprocessing_cli import fMRIPreprocessingCLI # noqa
from clinica.pipelines.pet_preprocess_volume.pet_preprocess_volume_cli import PETPreprocessVolumeCLI # noqa
from clinica.pipelines.pet_volume.pet_volume_cli import PETVolumeCLI # noqa
from clinica.pipelines.pet_surface.pet_surface_cli import PetSurfaceCLI # noqa
from clinica.pipelines.statistics_surface.statistics_surface_cli import StatisticsSurfaceCLI # noqa
......@@ -136,17 +138,19 @@ def execute():
extra_dir="pipelines").load()
pipelines += [
T1FreeSurferCrossSectionalCLI(),
T1SPMSegmentationCLI(),
T1SPMDartelCLI(),
T1SPMDartel2MNICLI(),
T1SPMFullPrepCLI(),
T1SPMDartelExistingTemplateCLI(),
T1VolumeTissueSegmentationCLI(),
T1VolumeCreateDartelCLI(),
T1VolumeDartel2MNICLI(),
T1VolumeNewTemplateCLI(),
T1VolumeExistingTemplateCLI(),
T1VolumeParcellationCLI(),
DWIPreprocessingUsingT1CLI(),
DWIPreprocessingUsingPhaseDiffFieldmapCLI(),
DWIProcessingDTICLI(),
fMRIPreprocessingCLI(),
PETPreprocessVolumeCLI(),
StatisticsSurfaceCLI()
PETVolumeCLI(),
PetSurfaceCLI(),
StatisticsSurfaceCLI(),
]
run_parser = sub_parser.add_parser(
......
......@@ -411,7 +411,12 @@ def dcm_to_nii(input_path, output_path, bids_name):
if not os.path.exists(output_path):
os.mkdir(output_path)
os.system('dcm2niix -b n -z y -o ' + output_path + ' -f ' + bids_name + ' ' + input_path)
if 'bold' in bids_name:
#generation of the json file
os.system('dcm2niix -b y -o ' + output_path + ' -f ' + bids_name + ' ' + input_path)
else:
os.system('dcm2niix -b n -z y -o ' + output_path + ' -f ' + bids_name + ' ' + input_path)
# If dcm2niix didn't work use dcm2nii
if not os.path.exists(path.join(output_path, bids_name + '.nii.gz')):
......@@ -428,7 +433,7 @@ def get_bids_subjs_list(bids_path):
Given a BIDS compliant dataset, returns the list of all the subjects available
Args:
Args:/
bids_path: path to the BIDS folder
"""
......
......@@ -13,6 +13,7 @@ __email__ = "jorge.samper-gonzalez@inria.fr"
__status__ = "Development"
def convert_adni_fmri(source_dir, csv_dir, dest_dir, subjs_list=None):
"""
......@@ -35,9 +36,16 @@ def convert_adni_fmri(source_dir, csv_dir, dest_dir, subjs_list=None):
subjs_list = list(adni_merge.PTID.unique())
cprint('Calculating paths of fMRI images. Output will be stored in ' + path.join(dest_dir, 'conversion_info') + '.')
images = compute_fmri_path(source_dir, csv_dir, dest_dir, subjs_list)
if path.isfile(path.join(dest_dir, 'conversion_info', 'fmri_paths.tsv')):
images = pd.io.parsers.read_csv(path.join(dest_dir, 'conversion_info', 'fmri_paths.tsv'), sep = '\t')
else:
images = compute_fmri_path(source_dir, csv_dir, dest_dir, subjs_list)
cprint('Paths of fMRI images found. Exporting images into BIDS ...')
fmri_paths_to_bids(images, dest_dir)
fmri_paths_to_bids(dest_dir, images)
cprint('fMRI conversion done.')
......@@ -196,6 +204,7 @@ def fmri_paths_to_bids(dest_dir, fmri_paths, mod_to_update=False):
import shutil
from glob import glob
import clinica.iotools.bids_utils as bids
from clinica.utils.stream import cprint
subjs_list = fmri_paths['Subject_ID'].drop_duplicates().values
......@@ -224,7 +233,10 @@ def fmri_paths_to_bids(dest_dir, fmri_paths, mod_to_update=False):
os.remove(existing_fmri[0])
if not os.path.exists(ses_path):
os.mkdir(ses_path)
if not os.path.exists(path.join(dest_dir, bids_id)):
os.mkdir(path.join(dest_dir, bids_id))
os.mkdir(path.join(dest_dir, bids_id, bids_ses_id))
#os.mkdir(ses_path)
fmri_info = fmri_paths[(fmri_paths['Subject_ID'] == subjs_list[i]) & (fmri_paths['VISCODE'] == ses)]
......@@ -235,7 +247,13 @@ def fmri_paths_to_bids(dest_dir, fmri_paths, mod_to_update=False):
fmri_path = fmri_info['Path'].values[0]
dcm_to_convert = adni_utils.check_two_dcm_folder(fmri_path, dest_dir,
fmri_info['IMAGEUID'].values[0])
bids.convert_fmri(dcm_to_convert, path.join(ses_path, 'func'), bids_file_name)
cprint(os.path.join(ses_path, 'func', bids_file_name +'_task-rest_bold.nii.gz'))
if not os.path.isfile(os.path.join(ses_path, 'func', bids_file_name +'_task-rest_bold.nii.gz')):
bids.convert_fmri(dcm_to_convert, path.join(ses_path, 'func'), bids_file_name)
else:
cprint("Images already converted")
# Delete the temporary folder used for copying fmri with 2 subjects inside the DICOM folder
if os.path.exists(path.join(dest_dir, 'tmp_dcm_folder')):
......@@ -293,3 +311,141 @@ def select_image_qc(id_list, mri_qc_subj):
selected_image = min(int_ids)
return int(selected_image)
def compute_fmri_path_refactoring(source_dir, clinical_dir, dest_dir, subjs_list):
"""
Compute the paths to fmri images.
The fmri images to convert into BIDS are chosen in the following way:
- Extract the list of subjects from MAYOADIRL_MRI_FMRI_09_15_16.csv
- Select the only the scans that came from PHILIPS machine (field Scanner from IDA_MR_Metadata_Listing.csv)
- Discard all the subjects with column series_quality = 4 (4 means that the scan is not usable) in MAYOADIRL_MRI_IMAGEQC_12_08_15.csv
In case of multiple scans for the same session, same date the one to convert is chosen with the following criteria:
- Check if in the file MAYOADIRL_MRI_IMAGEQC_12_08_15.csv there is a single scan with the field series_selected == 1
- If yes choose the one with series_selected == 1
- If no choose the scan with the best quality
Args:
source_dir: path to the ADNI image folder
clinical_dir: path to the directory with all the clinical data od ADNI
dest_dir: path to the output_folder
subjs_list: subjects list
Returns: pandas Dataframe containing the path for each fmri
"""
import os
from os import path
from os import walk
import pandas as pd
import logging
from clinica.iotools.converters.adni_to_bids import adni_utils
from clinica.utils.stream import cprint
fmri_col = ['Subject_ID', 'VISCODE', 'Visit', 'IMAGEUID', 'Sequence', 'Scan Date', 'LONIUID', 'Scanner',
'MagStregth', 'Path']
fmri_df = pd.DataFrame(columns=fmri_col)
# Load the requested clinical data
mayo_mri_fmri_path = path.join(clinical_dir, 'MAYOADIRL_MRI_FMRI_09_15_16.csv')
mayo_mri_imageqc_path = path.join(clinical_dir, 'MAYOADIRL_MRI_IMAGEQC_12_08_15.csv')
#ida_mr_metadata_path = path.join(clinical_dir, 'IDA_MR_Metadata_Listing.csv')
mayo_mri_fmri = pd.io.parsers.read_csv(mayo_mri_fmri_path, sep=',')
# ida_mr_metadata = pd.io.parsers.read_csv(ida_mr_metadata_path, sep=',')
mayo_mri_imageqc = pd.io.parsers.read_csv(mayo_mri_imageqc_path, sep=',')
for subj in subjs_list:
# print subj
fmri_subjs_info = mayo_mri_fmri[(mayo_mri_fmri.RID == int(subj[-4:]))]
# Extract visits available
visits_list = fmri_subjs_info['VISCODE2'].tolist()
# Removing duplicates
visits_list = list(set(visits_list))
if len(visits_list) != 0:
for viscode in visits_list:
visit = ''
image_path = ''
fmri_subj = fmri_subjs_info[fmri_subjs_info['VISCODE2'] == viscode]
if not fmri_subj.empty:
# If there are multiple scans for the same session same subject, check what is the one selected for the usage (field 'series_selected') or
# choose the one with the best quality
if len(fmri_subj) > 1:
fmri_imageuid = fmri_subj['IMAGEUID'].tolist()
loni_uid_list = ['I' + str(imageuid) for imageuid in fmri_imageuid]
images_qc = mayo_mri_imageqc[mayo_mri_imageqc.loni_image.isin(loni_uid_list)]
series_selected_values = images_qc['series_selected'].tolist()
sum_series_selected = sum(series_selected_values)
if sum_series_selected == 1:
imageuid_to_select = images_qc[images_qc['series_selected'] > 0]['loni_image'].iloc[
0].replace('I', '')
else:
imageuid_to_select = select_image_qc(fmri_imageuid, images_qc)
fmri_subj = fmri_subj[fmri_subj['IMAGEUID'] == int(imageuid_to_select)].iloc[0]
else:
fmri_subj = fmri_subj.iloc[0]
fmri_imageuid = fmri_subj['IMAGEUID']
# Discard scans made with non Philips scanner and with a bad quality
fmri_metadata = ida_mr_metadata[ida_mr_metadata['IMAGEUID'] == fmri_imageuid]
if not fmri_metadata.empty:
fmri_metadata = fmri_metadata.iloc[0]
if not 'Philips' in fmri_metadata['Scanner']:
cprint('No Philips scanner for ' + subj + ' visit ' + viscode + '. Skipped.')
continue
elif 4 in mayo_mri_imageqc[mayo_mri_imageqc['loni_image'] == 'I' + str(fmri_imageuid)][
'series_quality'].values:
cprint('Bad scan quality for ' + subj + ' visit ' + viscode + '. Skipped.')
continue
scan_date = fmri_subj.SCANDATE
sequence = adni_utils.replace_sequence_chars(fmri_subj.SERDESC)
scanner = fmri_metadata['Scanner']
loni_uid = fmri_metadata['LONIUID']
visit = fmri_metadata['Visit']
mag_strenght = fmri_metadata['MagStrength']
# Calculate the path
seq_path = path.join(source_dir, str(subj), sequence)
for (dirpath, dirnames, filenames) in walk(seq_path):
found = False
for d in dirnames:
if d == 'S' + str(loni_uid):
image_path = path.join(dirpath, d)
# Check if the path exists
if not os.path.isdir(image_path):
cprint('Path not existing for subject ' + subj + ' visit ' + visit)
found = True
break
if found:
break
# The session scmri correspond to the baseline
if viscode == 'scmri':
viscode = 'bl'
else:
cprint('Missing visit, sequence, scan date and loniuid for subject ' + subj + ' visit ' + visit)
continue
row_to_append = pd.DataFrame(
[[subj, str(viscode), visit, str(fmri_imageuid), sequence, scan_date, str(loni_uid),
scanner, mag_strenght, image_path]], columns=fmri_col)
fmri_df = fmri_df.append(row_to_append, ignore_index=True)
else:
logging.info('Missing fMRI for ', subj, 'visit', visit)
fmri_df.to_csv(path.join(dest_dir, 'conversion_info', 'fmri_paths.tsv'), sep='\t', index=False)
return fmri_df
......@@ -55,6 +55,7 @@ class AdniToBids(Converter):
from clinica.utils.stream import cprint
import clinica.iotools.converters.adni_to_bids.adni_utils as adni_utils
clinic_specs_path = path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data',
'clinical_specifications_adni.xlsx')
try:
......
# coding: utf-8
"""
Utilities for adni_to_bids converter
......@@ -14,6 +15,7 @@ __email__ = "jorge.samper-gonzalez@inria.fr"
__status__ = "Development"
def replace_sequence_chars(sequence_name):
"""
Replace some special character with the sequence name given in input
......@@ -300,6 +302,7 @@ def write_adni_sessions_tsv(sessions_dict, fields_bids, bids_subjs_paths):
from os import path
import os
import pandas as pd
import numpy as np
columns_order = remove_fields_duplicated(fields_bids)
......@@ -320,9 +323,26 @@ def write_adni_sessions_tsv(sessions_dict, fields_bids, bids_subjs_paths):
sessions_df = sessions_df.append(pd.DataFrame(sessions_dict[bids_id][ses], index=['i', ]))
sessions_df = sessions_df[columns_order]
sessions_df[['adas_Q1','adas_Q2','adas_Q3','adas_Q4','adas_Q5','adas_Q6','adas_Q7','adas_Q8','adas_Q9','adas_Q10','adas_Q11','adas_Q12','adas_Q13']] = sessions_df[['adas_Q1','adas_Q2','adas_Q3','adas_Q4','adas_Q5','adas_Q6','adas_Q7','adas_Q8','adas_Q9','adas_Q10','adas_Q11','adas_Q12','adas_Q13']].apply(pd.to_numeric)
sessions_df['adas_memory'] = sessions_df['adas_Q1'] + sessions_df['adas_Q4'] + sessions_df['adas_Q7'] + sessions_df['adas_Q8'] + sessions_df['adas_Q9'] #/ 45
sessions_df['adas_language'] = sessions_df['adas_Q2'] + sessions_df['adas_Q5'] + sessions_df['adas_Q10'] + sessions_df['adas_Q11'] + sessions_df['adas_Q12'] #/ 25
sessions_df['adas_praxis'] = sessions_df['adas_Q3'] + sessions_df['adas_Q6'] #/ 10
sessions_df['adas_concentration'] = sessions_df['adas_Q13'] #/ 5
list_diagnosis_nan = np.where(pd.isnull(sessions_df['diagnosis']))
diagnosis_change = {1: 'CN', 2: 'MCI', 3: 'AD'}
for j in list_diagnosis_nan[0]:
if not is_nan(sessions_df['adni_diagnosis_change'][j]) and int(sessions_df['adni_diagnosis_change'][j]) < 4:
sessions_df['diagnosis'][j] = diagnosis_change[int(sessions_df['adni_diagnosis_change'][j])]
sessions_df.to_csv(path.join(sp, bids_id + '_sessions.tsv'), sep='\t', index=False, encoding='utf-8')
def update_sessions_dict(sessions_dict, subj_bids, visit_id, field_value, bids_field_name):
"""
Update the sessions dictionary for the bids subject specified by subj_bids created by the method
......@@ -348,6 +368,7 @@ def update_sessions_dict(sessions_dict, subj_bids, visit_id, field_value, bids_f
if bids_field_name == 'diagnosis':
field_value = convert_diagnosis_code(field_value)
# If the dictionary already contain the subject add or update information regarding a specific session,
# otherwise create the entry
if sessions_dict.has_key(subj_bids):
......@@ -452,7 +473,7 @@ def create_adni_sessions_dict(bids_ids, clinic_specs_path, clinical_data_dir, bi
if not pd.isnull(sessions_fields[i]):
# Extract only the fields related to the current file opened
if location in field_location[i]:
if location == 'ADAS_ADNIGO2.csv' or location == 'DXSUM_PDXCONV_ADNIALL.csv' or location == 'CDR.csv':
if location == 'ADAS_ADNIGO2.csv' or location == 'DXSUM_PDXCONV_ADNIALL.csv' or location == 'CDR.csv' or location == 'NEUROBAT.csv':
if type(row['VISCODE2']) == float:
continue
visit_id = row['VISCODE2']
......@@ -470,6 +491,7 @@ def create_adni_sessions_dict(bids_ids, clinic_specs_path, clinical_data_dir, bi
else:
continue
# Write the sessions dictionary created in several tsv files
write_adni_sessions_tsv(sessions_dict, fields_bids, bids_subjs_paths)
......@@ -541,4 +563,6 @@ def create_adni_scans_files(clinic_specs_path, bids_subjs_paths, bids_ids):
scans_df['filename'] = pd.Series(path.join(mod_name, file_name))
scans_df.to_csv(scans_tsv, header=False, sep='\t', index=False, encoding='utf-8')
scans_df = pd.DataFrame(columns=(fields_bids))
\ No newline at end of file
scans_df = pd.DataFrame(columns=(fields_bids))
......@@ -14,15 +14,21 @@ class CmdParserSubjectsSessions(ce.CmdParser):
def define_options(self):
self._args.add_argument("bids_directory",
help='Path to the BIDS dataset directory.')
self._args.add_argument("out_directory",
help='Path to the output directory.') # noqa
self._args.add_argument("-on", '--output_name',
type=str, default='',
help='(Optional) Name of the output file.') # noqa
self._args.add_argument("out_tsv",
help='Output TSV file containing the participants with their sessions.') # noqa
def run_command(self, args):
import os
import errno
from clinica.iotools.utils import data_handling as dt
dt.create_subs_sess_list(args.bids_directory, args.out_directory, args.output_name)
output_directory = os.path.dirname(args.out_tsv)
if not os.path.exists(output_directory):
try:
os.makedirs(output_directory)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
dt.create_subs_sess_list(args.bids_directory, output_directory, os.path.basename(args.out_tsv))
class CmdParserMergeTsv(ce.CmdParser):
......
{
"name": "caps",
"root": ".",
"entities": [
{
"name": "subject",
......@@ -13,6 +15,21 @@
{
"name": "dwi_preprocessing_file",
"pattern":".*\/subjects\/sub-[a-zA-Z0-9]+\/ses-[a-zA-Z0-9]+\/dwi\/preprocessing\/(.*)"
},
{
"name": "freesurfer_file",
"pattern":".*\/subjects\/sub-[a-zA-Z0-9]+\/ses-[a-zA-Z0-9]+\/t1\/freesurfer_cross_sectional\/sub-[a-zA-Z0-9]+_ses-[a-zA-Z0-9]+\/.*\/(.*)"
},
{
"name": "group_id",
"mandatory" : false,
"pattern":".*\/subjects\/sub-[a-zA-Z0-9]+\/ses-[a-zA-Z0-9]+\/.*\/group-(.*)\/.*"
},
{
"name": "modulation",
"pattern":".*\/.*T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability.nii.*"
}
]
}
# Dwi Preprocessing Phase Difference Fieldmap3
\ No newline at end of file
"""Dwi Preprocessing Phase Difference Fieldmap3 - Clinica Command Line Interface.
This file has been generated automatically by the `clinica generate template`
command line tool. See here for more details: https://gitlab.icm-institute.org/aramis/clinica/wikis/docs/InteractingWithClinica.
"""
import clinica.engine as ce
class DwiPreprocessingPhaseDifferenceFieldmap3CLI(ce.CmdParser):
def define_name(self):
"""Define the sub-command name to run this pipelines.
"""