Note
Click here to download the full example code
Example 05: Train a network for regression (tomography)ΒΆ
This script trains a MS-D network for regression (i.e. denoising/artifact removal) Run generatedata.py first to generate required training data.
# Import code
import msdnet
import glob
# Define dilations in [1,10] as in paper.
dilations = msdnet.dilations.IncrementDilations(10)
# Create main network object for regression, with 100 layers,
# [1,10] dilations, 5 input channels (5 slices), 1 output channel, using
# the GPU (set gpu=False to use CPU)
n = msdnet.network.MSDNet(100, dilations, 5, 1, gpu=True)
# Initialize network parameters
n.initialize()
# Define training data
# First, create lists of input files (low quality) and target files (high quality)
flsin = sorted(glob.glob('tomo_train/lowqual/*.tiff'))
flstg = sorted(glob.glob('tomo_train/highqual/*.tiff'))
# Create list of datapoints (i.e. input/target pairs)
dats = []
for i in range(len(flsin)):
# Create datapoint with file names
d = msdnet.data.ImageFileDataPoint(flsin[i],flstg[i])
# Add datapoint to list
dats.append(d)
# Note: The above can also be achieved using a utility function for such 'simple' cases:
# dats = msdnet.utils.load_simple_data('tomo_train/lowqual/*.tiff', 'tomo_train/highqual/*.tiff', augment=False)
# Convert input slices to input slabs (i.e. multiple slices as input)
dats = msdnet.data.convert_to_slabs(dats, 2, flip=True)
# Augment data by rotating and flipping
dats_augm = [msdnet.data.RotateAndFlipDataPoint(d) for d in dats]
# Normalize input and output of network to zero mean and unit variance using
# training data images
n.normalizeinout(dats)
# Use image batches of a single image
bprov = msdnet.data.BatchProvider(dats,1)
# Define validation data (not using augmentation)
flsin = sorted(glob.glob('tomo_val/lowqual/*.tiff'))
flstg = sorted(glob.glob('tomo_val/highqual/*.tiff'))
datsv = []
for i in range(len(flsin)):
d = msdnet.data.ImageFileDataPoint(flsin[i],flstg[i])
datsv.append(d)
# Note: The above can also be achieved using a utility function for such 'simple' cases:
# datsv = msdnet.utils.load_simple_data('tomo_val/lowqual/*.tiff', 'tomo_val/highqual/*.tiff', augment=False)
# Convert input slices to input slabs (i.e. multiple slices as input)
datsv = msdnet.data.convert_to_slabs(datsv, 2, flip=False)
# Validate with Mean-Squared Error
val = msdnet.validate.MSEValidation(datsv)
# Use ADAM training algorithms
t = msdnet.train.AdamAlgorithm(n)
# Log error metrics to console
consolelog = msdnet.loggers.ConsoleLogger()
# Log error metrics to file
filelog = msdnet.loggers.FileLogger('log_tomo_regr.txt')
# Log typical, worst, and best images to image files
imagelog = msdnet.loggers.ImageLogger('log_tomo_regr', onlyifbetter=True, chan_in=2)
# Train network until program is stopped manually
# Network parameters are saved in regr_params.h5
# Validation is run after every len(datsv) (=256)
# training steps.
msdnet.train.train(n, t, val, bprov, 'tomo_regr_params.h5',loggers=[consolelog,filelog,imagelog], val_every=len(datsv))