Skip to content
Open

Hw4 #21

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions example_solution/scripts/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Example HW4 solution

On HPC:

1. Run the `mk_skel.sh martin` script
2. Copy the input data to `/scratch/$USER/martin/data_in`
2. Copy the contents of the `scripts` directory here to `/scratch/$USER/martin/scripts`
3. `cd /scratch/$USER/martin/scripts`
4. `./run_all.sh`

2 changes: 2 additions & 0 deletions example_solution/scripts/inputs.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
100206_3T_DWI_dir95_LR 100206_3T_DWI_dir95_LR.bval 100206_3T_DWI_dir95_LR.bvec -1 0 0 .1115
100206_3T_DWI_dir95_RL 100206_3T_DWI_dir95_RL.bval 100206_3T_DWI_dir95_RL.bvec 1 0 0 .1115
14 changes: 14 additions & 0 deletions example_solution/scripts/run_all.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#!/bin/bash
#Run everything
#This script calls the required SLURM submission scripts
#and illustrates the use of job dependencies
#there is a possibility this won't work with afterok.
#You can try afterany to ignore the job exit status

j_topup=$( sbatch sbatch_topup.sh )
j_eddy=$( sbatch --dependency=afterok:$j_topup sbatch_eddy_mp.sh )
j_dtifit=$( sbatch --dependency=afterok:$j_eddy sbatch_dtifit.sh )
j_bedpostprep=$( sbatch --dependency=afterok:$j_eddy sbatch_bedpost_pre.sh )
j_bedpost=$( sbatch --dependency=afterok:$j_bedpostprep sbatch_bedpost.sh )
sbatch --dependency=afterok:$j_bedpost sbatch_bedpost_post.sh

4 changes: 4 additions & 0 deletions example_solution/scripts/run_bedpost.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash
cd /bind/data_out
bedpostx_single_slice.sh bedpostX $1 --nf=2 --fudge=1 --bi=1000 --nj=1250 --se=25 --model=2 --cnonlinear --rician

4 changes: 4 additions & 0 deletions example_solution/scripts/run_bedpost_post.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash
cd /bind/data_out
bedpostx_postproc.sh bedpostX.bedpostX

11 changes: 11 additions & 0 deletions example_solution/scripts/run_bedpost_pre.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash
#prepare for bedpost
cd /bind/data_out
mkdir bedpostX
imcp corrected bedpostX/data
imcp brain_mask bedpostX/nodif_brain_mask
cp bvals bedpostX/bvals
cp corrected.eddy_rotated_bvecs bedpostX/bvecs
bedpostx_preproc.sh bedpostX
#make the required directories
mkdir -p bedpostX.bedpostX/logs/monitor
10 changes: 10 additions & 0 deletions example_solution/scripts/run_dtifit.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/bin/bash

#Fit a tensor model
#Assume the preprocessed inputs are in /bind/data_out

cd /bind/data_out
dtifit -k corrected -m brain_mask -r corrected.eddy_rotated_bvecs -b bvals -o dti



20 changes: 20 additions & 0 deletions example_solution/scripts/run_eddy.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/bin/bash
#Run eddy
#Assume the preprocessed inputs are in /bind/data_out
#and original source files are in /bind/data_in
#Multiband factor=3, top slice removed

cd /bind/data_out
eddy_cuda --mask=brain_mask \
--acqp=acqparams.txt \
--index=index.txt \
--imain=merged_dwi_cropped \
--bvecs=bvecs \
--bvals=bvals \
--out=corrected \
--mb=3 \
--mb_offs=1 \
--ol_type=gw \
--cnr_maps \
--repol \
--topup=topup
20 changes: 20 additions & 0 deletions example_solution/scripts/run_eddy_mp.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/bin/bash
#Run eddy
#Assume the preprocessed inputs are in /bind/data_out
#and original source files are in /bind/data_in
#Multiband factor=3, top slice removed

cd /bind/data_out
eddy_openmp --mask=brain_mask \
--acqp=acqparams.txt \
--index=index.txt \
--imain=merged_dwi_cropped \
--bvecs=bvecs \
--bvals=bvals \
--out=corrected \
--mb=3 \
--mb_offs=1 \
--ol_type=gw \
--cnr_maps \
--repol \
--topup=topup
86 changes: 86 additions & 0 deletions example_solution/scripts/run_topup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
#!/bin/bash
#prep the data and run topup
#Usage:
#run_topup.sh inputs.txt
#Where inputs.txt contains lines of the following form, one per blip
#dwi_file bval_file bvec_file x y z t
#x y z t are lines as in the topup acquisition parameter file

inputs=$1

INDIR=/bind/data_in
OUTDIR=/bind/data_out

#clean up
rm ${OUTDIR}/index.txt
rm ${OUTDIR}/acqparams.txt

extract_b0 ()
{
#extract low b0 values as separate volumes
#extract_b0 file.bvals dwi.nii.gz prefix offset
#assumes series starts with b0
local bvals=( $( cat $1 ))
local b0in=$2
local prefix=$3
local rr=$$
local i=0
local index_i=$4
eval local acq="$5"
while [ $i -lt ${#bvals[@]} ]; do
if [ ${bvals[$i]} -lt 51 ]; then
#extract this b0
fslroi $b0in `printf "${prefix}.${rr}.%03d $i 1" $i`
#write the corresponding acquistion parameter line
echo "$acq" >> ${OUTDIR}/acqparams.txt
let index_i=index_i+1
fi
let i=i+1
echo $(( index_i )) >> ${OUTDIR}/index.txt

done

fslmerge -t ${prefix} ${prefix}.${rr}.*
rm ${prefix}.${rr}.*
}

#commands to merge everything
cmd_merge_dwi="fslmerge -t ${OUTDIR}/merged_dwi "
cmd_merge_bval="paste -d ' ' "
cmd_merge_bvec="paste -d ' ' "


offset=0
while read line; do
a=( $( echo $line ) )
echo ${INDIR}/${a[0]}
cmd_merge_dwi="$cmd_merge_dwi ${INDIR}/${a[0]}"
cmd_merge_bval="$cmd_merge_bval ${INDIR}/${a[1]}"
cmd_merge_bvec="$cmd_merge_bvec ${INDIR}/${a[2]}"
acq="${a[3]} ${a[4]} ${a[5]} ${a[6]}"
#echo "$acq" >> ${OUTDIR}/acqparams.txt
prefix=`printf "${OUTDIR}/blip_%03d" $offset`
extract_b0 ${INDIR}/${a[1]} ${INDIR}/${a[0]} $prefix $offset "\${acq}"
let offset=offset+`fslnvols $prefix`
done <<< "$(cat $inputs)"

`$cmd_merge_dwi`
eval "$cmd_merge_bvec" > ${OUTDIR}/bvecs
eval "$cmd_merge_bval" > ${OUTDIR}/bvals

#merge b0
fslmerge -t ${OUTDIR}/b0_all ${OUTDIR}/blip_*

#remove top slice from HCP
fslroi ${OUTDIR}/b0_all ${OUTDIR}/b0_all_cropped 0 -1 0 -1 0 110 0 -1
fslroi ${OUTDIR}/merged_dwi ${OUTDIR}/merged_dwi_cropped 0 -1 0 -1 0 110 0 -1

#topup
cd $OUTDIR
topup --imain=b0_all_cropped --datain=acqparams.txt --config=b02b0.cnf --out=topup --iout=corrected_b0

#create brain mask
bet corrected_b0 brain -m



43 changes: 43 additions & 0 deletions example_solution/scripts/sbatch_bedpost.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/bin/bash
#SBATCH --mail-type=ALL # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=First.Last@uconn.edu # Your email address
#SBATCH --nodes=1 # OpenMP requires a single node
#SBATCH --mem=2048mb # Memory limit
#SBATCH --time=08:00:00 # Time limit hh:mm:ss
#SBATCH -e error_%A_%a.log # Standard error
#SBATCH -o output_%A_%a.log # Standard output
#SBATCH --job-name=bedpostx # Descriptive job name
#SBATCH --partition=serial # Use a serial partition 24 cores/7days
#SBATCH --array=0-109

export OMP_NUM_THREADS=1 #<= cpus-per-task
export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 #<= cpus-per-task
##### END OF JOB DEFINITION #####

#Define user paths
NETID=$USER
PROJECT=martin

export DIR_BASE=/scratch/${NETID}/${PROJECT}
export DIR_RESOURCES=${DIR_BASE}/resources #ro
export DIR_DATA=${DIR_BASE}/data #rw data
export DIR_DATAIN=${DIR_BASE}/data_in #ro data
export DIR_DATAOUT=${DIR_BASE}/data_out #rw data
export SUBJECTS_DIR=${DIR_BASE}/freesurfer #rw for Freesurfer
export DIR_WORK=/work #rw /work on HPC is 40Gb local storage
export DIR_SCRATCH=${DIR_BASE}/scratch #rw shared storage
export DIR_SCRIPTS=${DIR_BASE}/scripts #ro, prepended to PATH


# Load modules
module load matlab/2017a #matlab binaries are bound
module load singularity/2.3.1-gcc #required to run the container

#set the matlab license path to the path inside the container
export LM_LICENSE_FILE=/bind/matlablicense/uits.lic

#finally call the container with any arguments for the job
#wrapper will bind the appropriate paths
#environment variables are passed to the container

./burc_wrapper.sh run_bedpost.sh $SLURM_ARRAY_TASK_ID
43 changes: 43 additions & 0 deletions example_solution/scripts/sbatch_bedpost_post.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/bin/bash
#SBATCH --mail-type=ALL # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=First.Last@uconn.edu # Your email address
#SBATCH --nodes=1 # OpenMP requires a single node
#SBATCH --ntasks=1 # Run a single serial task
#SBATCH --cpus-per-task=1 # Number of cores to use
#SBATCH --time=00:20:00 # Time limit hh:mm:ss
#SBATCH -e error_%A_%a.log # Standard error
#SBATCH -o output_%A_%a.log # Standard output
#SBATCH --job-name=bedpostpre # Descriptive job name
#SBATCH --partition=serial # Use a serial partition 24 cores/7days

export OMP_NUM_THREADS=1 #<= cpus-per-task
export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 #<= cpus-per-task
##### END OF JOB DEFINITION #####

#Define user paths
NETID=$USER
PROJECT=martin

export DIR_BASE=/scratch/${NETID}/${PROJECT}
export DIR_RESOURCES=${DIR_BASE}/resources #ro
export DIR_DATA=${DIR_BASE}/data #rw data
export DIR_DATAIN=${DIR_BASE}/data_in #ro data
export DIR_DATAOUT=${DIR_BASE}/data_out #rw data
export SUBJECTS_DIR=${DIR_BASE}/freesurfer #rw for Freesurfer
export DIR_WORK=/work #rw /work on HPC is 40Gb local storage
export DIR_SCRATCH=${DIR_BASE}/scratch #rw shared storage
export DIR_SCRIPTS=${DIR_BASE}/scripts #ro, prepended to PATH


# Load modules
module load matlab/2017a #matlab binaries are bound
module load singularity/2.3.1-gcc #required to run the container

#set the matlab license path to the path inside the container
export LM_LICENSE_FILE=/bind/matlablicense/uits.lic

#finally call the container with any arguments for the job
#wrapper will bind the appropriate paths
#environment variables are passed to the container

./burc_wrapper.sh run_bedpost_post.sh
43 changes: 43 additions & 0 deletions example_solution/scripts/sbatch_bedpost_pre.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/bin/bash
#SBATCH --mail-type=ALL # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=First.Last@uconn.edu # Your email address
#SBATCH --nodes=1 # OpenMP requires a single node
#SBATCH --ntasks=1 # Run a single serial task
#SBATCH --cpus-per-task=1 # Number of cores to use
#SBATCH --time=00:20:00 # Time limit hh:mm:ss
#SBATCH -e error_%A_%a.log # Standard error
#SBATCH -o output_%A_%a.log # Standard output
#SBATCH --job-name=bedpostpre # Descriptive job name
#SBATCH --partition=serial # Use a serial partition 24 cores/7days

export OMP_NUM_THREADS=1 #<= cpus-per-task
export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 #<= cpus-per-task
##### END OF JOB DEFINITION #####

#Define user paths
NETID=$USER
PROJECT=martin

export DIR_BASE=/scratch/${NETID}/${PROJECT}
export DIR_RESOURCES=${DIR_BASE}/resources #ro
export DIR_DATA=${DIR_BASE}/data #rw data
export DIR_DATAIN=${DIR_BASE}/data_in #ro data
export DIR_DATAOUT=${DIR_BASE}/data_out #rw data
export SUBJECTS_DIR=${DIR_BASE}/freesurfer #rw for Freesurfer
export DIR_WORK=/work #rw /work on HPC is 40Gb local storage
export DIR_SCRATCH=${DIR_BASE}/scratch #rw shared storage
export DIR_SCRIPTS=${DIR_BASE}/scripts #ro, prepended to PATH


# Load modules
module load matlab/2017a #matlab binaries are bound
module load singularity/2.3.1-gcc #required to run the container

#set the matlab license path to the path inside the container
export LM_LICENSE_FILE=/bind/matlablicense/uits.lic

#finally call the container with any arguments for the job
#wrapper will bind the appropriate paths
#environment variables are passed to the container

./burc_wrapper.sh run_bedpost_pre.sh
43 changes: 43 additions & 0 deletions example_solution/scripts/sbatch_dtifit.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/bin/bash
#SBATCH --mail-type=ALL # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=First.Last@uconn.edu # Your email address
#SBATCH --nodes=1 # OpenMP requires a single node
#SBATCH --ntasks=1 # Run a single serial task
#SBATCH --cpus-per-task=1 # Number of cores to use
#SBATCH --mem=2048mb # Memory limit
#SBATCH --time=00:10:00 # Time limit hh:mm:ss
#SBATCH -e error_%A_%a.log # Standard error
#SBATCH -o output_%A_%a.log # Standard output
#SBATCH --job-name=dtifit # Descriptive job name
#SBATCH --partition=serial # Use a serial partition 24 cores/7days
export OMP_NUM_THREADS=1 #<= cpus-per-task
export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 #<= cpus-per-task
##### END OF JOB DEFINITION #####

#Define user paths
NETID=$USER
PROJECT=martin

export DIR_BASE=/scratch/${NETID}/${PROJECT}
export DIR_RESOURCES=${DIR_BASE}/resources #ro
export DIR_DATA=${DIR_BASE}/data #rw data
export DIR_DATAIN=${DIR_BASE}/data_in #ro data
export DIR_DATAOUT=${DIR_BASE}/data_out #rw data
export SUBJECTS_DIR=${DIR_BASE}/freesurfer #rw for Freesurfer
export DIR_WORK=/work #rw /work on HPC is 40Gb local storage
export DIR_SCRATCH=${DIR_BASE}/scratch #rw shared storage
export DIR_SCRIPTS=${DIR_BASE}/scripts #ro, prepended to PATH


# Load modules
module load matlab/2017a #matlab binaries are bound
module load singularity/2.3.1-gcc #required to run the container

#set the matlab license path to the path inside the container
export LM_LICENSE_FILE=/bind/matlablicense/uits.lic

#finally call the container with any arguments for the job
#wrapper will bind the appropriate paths
#environment variables are passed to the container

./burc_wrapper.sh run_dtifit.sh
Loading