This function wraps tapas_data()
to run in parallel. This function creates
the training vectors for all subjects from a probability map,
a gold standard mask (normally a manual segmentation), and a brain mask. For a grid of thresholds provided
and applied to the probability map the function calculates Sørensen's–Dice coefficient (DSC) between the automatic
image and the gold standard image. The function also calculates the volume associated with thresholding
at each respective threshold.
tapas_data_par(cores = 1, thresholds = seq(from = 0, to = 1, by =
0.01), pmap, gold_standard, mask, k = 0, subject_id = NULL,
ret = FALSE, outfile = NULL, verbose = TRUE)
cores | The number of cores to use. This argument controls at most how many child processes will be run simultaneously. The default is set to 1. |
---|---|
thresholds | A |
pmap | A |
gold_standard | A |
mask | A |
k | The minimum number of voxels for a cluster/component. Segmentation clusters of size less than k are removed from the mask, volume estimation, and the Sørensen's–Dice coefficient (DSC) calculation. |
subject_id | A |
ret | A |
outfile | Is set to |
verbose | A |
A list
with the subject-level tibble
object in each element returned from tapas_data()
for each
subject. ret
must be TRUE
to return objects locally. To save objects a vector
of codeoutfile
file paths must be provided.
if (FALSE) {
# Data is provided in the rtapas package
library(oro.nifti)
# Data is provided in the rtapas package as arrays. Below we will convert them to nifti objects.
# Create a list of gold standard manual segmentation
train_gold_standard_masks = list(gs1 = gs1,
gs2 = gs2,
gs3 = gs3,
gs4 = gs4,
gs5 = gs5,
gs6 = gs6,
gs7 = gs7,
gs8 = gs8,
gs9 = gs9,
gs10 = gs10)
# Convert the gold standard masks to nifti objects
train_gold_standard_masks = lapply(train_gold_standard_masks, oro.nifti::nifti)
# Make a list of the training probability maps
train_probability_maps = list(pmap1 = pmap1,
pmap2 = pmap2,
pmap3 = pmap3,
pmap4 = pmap4,
pmap5 = pmap5,
pmap6 = pmap6,
pmap7 = pmap7,
pmap8 = pmap8,
pmap9 = pmap9,
pmap10 = pmap10)
# Convert the probability maps to nifti objects
train_probability_maps = lapply(train_probability_maps, oro.nifti::nifti)
# Make a list of the brain masks
train_brain_masks = list(brain_mask1 = brain_mask,
brain_mask2 = brain_mask,
brain_mask3 = brain_mask,
brain_mask4 = brain_mask,
brain_mask5 = brain_mask,
brain_mask6 = brain_mask,
brain_mask7 = brain_mask,
brain_mask8 = brain_mask,
brain_mask9 = brain_mask,
brain_mask10 = brain_mask)
# Convert the brain masks to nifti objects
train_brain_masks = lapply(train_brain_masks, oro.nifti::nifti)
# Specify training IDs
train_ids = paste0('subject_', 1:length(train_gold_standard_masks))
# The function below runs on 2 cores. Be sure your machine has 2 cores available or switch to 1.
# Run tapas_data_par function
data = tapas_data_par(cores = 2,
thresholds = seq(from = 0, to = 1, by = 0.01),
pmap = train_probability_maps,
gold_standard = train_gold_standard_masks,
mask = train_brain_masks,
k = 0,
subject_id = train_ids,
ret = TRUE,
outfile = NULL,
verbose = TRUE)
}