Much of this work has been adapted by the FSL guide for DTI reconstruction: http://camino.cs.ucl.ac.uk/index.php?n=Tutorials.DTI. We will show you a few steps that have been implemented in rcamino
: camino_pointset2scheme
, camino_modelfit
, camino_fa
, camino_md
, and camino_dteig
.
The data located in this tutorial is located at http://cmic.cs.ucl.ac.uk/camino//uploads/Tutorials/example_dwi.zip. It contains 3 files:
4Ddwi_b1000.nii.gz
- a 4D image of the DWI data.brain_mask.nii.gz
- A brain mask of the DTI datagrad_dirs.txt
- a 3 column text file with the b-vectors as the first 3 columnsFirst, we download the data into a temporary directory the unzip it:
tdir = tempdir()
tfile = file.path(tdir, "example_dwi.zip")
download.file("http://cmic.cs.ucl.ac.uk/camino//uploads/Tutorials/example_dwi.zip",
destfile = tfile)
files = unzip(zipfile = tfile, exdir = tdir, overwrite = TRUE)
As dtifit
requires the b-values and b-vectors to be separated, and this data has b-values of \(1000\) when the b-vectors is not zero. This is very important and you must know where your b-values and b-vectors are when doing your analyses and what units they are in.
library(rcamino)
b_data_file = grep("[.]txt$", files, value = TRUE)
scheme_file = camino_pointset2scheme(infile = b_data_file,
bvalue = 1e9)
/tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/pointset2scheme -inputfile '/tmp/Rtmp3VqWMB/grad_dirs.txt' -bvalue 1000000000 -outputfile /tmp/Rtmp3VqWMB/file66153ff883d9.scheme
Here we ensure that the number of b-values/b-vectors is the same as the number of time points in the 4D image.
Loading required package: oro.nifti
oro.nifti 0.9.7
[1] 33
[1] 33
used (Mb) gc trigger (Mb) max used (Mb)
Ncells 520671 27.9 940480 50.3 750400 40.1
Vcells 693962 5.3 115850709 883.9 143603106 1095.7
We will save the result in a temporary file (outfile
), but also return the result as a nifti
object ret
, as retimg = TRUE
. We will use the first volume as the reference as is the default in FSL. Note FSL is zero-indexed so the first volume is the zero-ith index:
/tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/image2voxel -inputfile '/tmp/Rtmp3VqWMB/4Ddwi_b1000.nii.gz' -outputfile '/tmp/Rtmp3VqWMB/file6615c832a64.Bfloat' -outputdatatype float
Note, from here on forward we will use either the filename for the output of the eddy current correction or the eddy-current-corrected nifti
object.
mask_fname = grep("mask", files, value = TRUE)
model_fname = camino_modelfit(
infile = float_fname,
scheme = scheme_file,
mask = mask_fname,
outputdatatype = "double"
)
/tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/modelfit -inputfile '/tmp/Rtmp3VqWMB/file6615c832a64.Bfloat' -outputfile '/tmp/Rtmp3VqWMB/file66152c611ee5.Bdouble' -inputdatatype float -schemefile /tmp/Rtmp3VqWMB/file66153ff883d9.scheme -bgmask /tmp/Rtmp3VqWMB/brain_mask.nii.gz -maskdatatype float -model dt
cat '/tmp/Rtmp3VqWMB/file66152c611ee5.Bdouble' | /tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/fa -inputmodel dt -outputdatatype double > '/tmp/Rtmp3VqWMB/file661556b497f2.Bdouble'
library(neurobase)
fa_img_name = camino_voxel2image(infile = fa_fname,
header = img_fname,
gzip = TRUE,
components = 1)
/tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/voxel2image -inputfile /tmp/Rtmp3VqWMB/file661556b497f2.Bdouble -header /tmp/Rtmp3VqWMB/4Ddwi_b1000.nii.gz -outputroot /tmp/Rtmp3VqWMB/file6615477b856a_ -components 1 -gzip
We can chain Camino commands using the magrittr
pipe operation (%>%
):
library(magrittr)
fa_img2 = model_fname %>%
camino_fa() %>%
camino_voxel2image(header = img_fname, gzip = TRUE, components = 1) %>%
readnii
cat '/tmp/Rtmp3VqWMB/file66152c611ee5.Bdouble' | /tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/fa -inputmodel dt -outputdatatype double > '/tmp/Rtmp3VqWMB/file6615561f9e47.Bdouble'
/tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/voxel2image -inputfile /tmp/Rtmp3VqWMB/file6615561f9e47.Bdouble -header /tmp/Rtmp3VqWMB/4Ddwi_b1000.nii.gz -outputroot /tmp/Rtmp3VqWMB/file66152a8795a9_ -components 1 -gzip
[1] TRUE
Similar to getting FA maps, we can get mean diffusivity (MD) maps, read them into R
, and visualize them using ortho2
:
md_img = model_fname %>%
camino_md() %>%
camino_voxel2image(header = img_fname, gzip = TRUE, components = 1) %>%
readnii
cat '/tmp/Rtmp3VqWMB/file66152c611ee5.Bdouble' | /tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/md -inputmodel dt -outputdatatype double > '/tmp/Rtmp3VqWMB/file66156da31a3d.Bdouble'
/tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/voxel2image -inputfile /tmp/Rtmp3VqWMB/file66156da31a3d.Bdouble -header /tmp/Rtmp3VqWMB/4Ddwi_b1000.nii.gz -outputroot /tmp/Rtmp3VqWMB/file66153fab13bc_ -components 1 -gzip
Using camino_dt2nii
, we can export the diffusion tensors into NIfTI files. We see the result is the filenames of the NIfTI files, and that they all exist (otherwise there’d be an errors.)
nifti_dt = camino_dt2nii(
infile = model_fname,
inputmodel = "dt",
header = img_fname,
gzip = TRUE
)
/tmp/Rtmpu8nUfi/Rinst64164244e0c8/rcamino/camino/bin/dt2nii -inputfile /tmp/Rtmp3VqWMB/file66152c611ee5.Bdouble -header /tmp/Rtmp3VqWMB/4Ddwi_b1000.nii.gz -inputmodel dt -outputroot /tmp/Rtmp3VqWMB/file66156ade22df_ -gzip
[1] "/tmp/Rtmp3VqWMB/file66156ade22df_exitcode.nii.gz"
[2] "/tmp/Rtmp3VqWMB/file66156ade22df_lns0.nii.gz"
[3] "/tmp/Rtmp3VqWMB/file66156ade22df_dt.nii.gz"
We can read these DT images into R
again using readnii
, but we must set drop_dim = FALSE
for diffusion tensor images because the pixel dimensions are zero and readnii
assumes you want to drop “empty” dimensions