From c6d44acf0155ae92cfc912253c86a2e81de9d49c Mon Sep 17 00:00:00 2001
From: "Daniel.Frisinghelli" <daniel.frisinghelli@eurac.edu>
Date: Mon, 31 Aug 2020 10:59:07 +0200
Subject: [PATCH] Added cross-references for documentation.

---
 pysegcnn/core/graphics.py        |  86 ++++++------
 pysegcnn/core/logging.py         |   4 +-
 pysegcnn/core/predict.py         |  57 ++++----
 pysegcnn/core/trainer.py         | 142 +++++++++++++-------
 pysegcnn/core/transforms.py      | 214 ++++++++++++++++++-----------
 pysegcnn/core/utils.py           | 222 +++++++++++++++----------------
 pysegcnn/main/eval.py            |  14 +-
 pysegcnn/main/train.py           |  19 ++-
 pysegcnn/preprocessing/sparcs.py |   2 +
 9 files changed, 432 insertions(+), 328 deletions(-)

diff --git a/pysegcnn/core/graphics.py b/pysegcnn/core/graphics.py
index a4f693b..7c9755e 100644
--- a/pysegcnn/core/graphics.py
+++ b/pysegcnn/core/graphics.py
@@ -37,15 +37,15 @@ def contrast_stretching(image, alpha=5):
 
     Parameters
     ----------
-    image : `numpy.ndarray`
-        the input image.
+    image : :py:class:`numpy.ndarray`
+        The input image.
     alpha : `int`, optional
-        The level of the percentiles. The default is 5.
+        The level of the percentiles. The default is `5`.
 
     Returns
     -------
-    norm : `numpy.ndarray`
-        the stretched image.
+    norm : :py:class:`numpy.ndarray`
+        The contrast-stretched image.
 
     """
     # compute upper and lower percentiles defining the range of the stretch
@@ -68,14 +68,14 @@ def running_mean(x, w):
 
     Parameters
     ----------
-    x : array_like
+    x : `array_like`
         The sequence to compute a running mean on.
     w : `int`
         The window length of the running mean.
 
     Returns
     -------
-    rm : `numpy.ndarray`
+    rm : :py:class:`numpy.ndarray`
         The running mean of the sequence ``x``.
 
     """
@@ -90,8 +90,8 @@ def plot_sample(x, use_bands, labels, y=None, y_pred=None, figsize=(10, 10),
 
     Parameters
     ----------
-    x : `numpy.ndarray` or `torch.Tensor`, (b, h, w)
-        Array containing the raw data of the tile, shape=(bands, height, width)
+    x : :py:class:`numpy.ndarray` or :py:class:`torch.Tensor`, (b, h, w)
+        Array containing the data of the tile, shape=(bands, height, width).
     use_bands : `list` of `str`
         List describing the order of the bands in ``x``.
     labels : `dict` [`int`, `dict`]
@@ -101,31 +101,31 @@ def plot_sample(x, use_bands, labels, y=None, y_pred=None, figsize=(10, 10),
                 A named color (`str`).
             ``'label'``
                 The name of the class label (`str`).
-    y : `numpy.ndarray` or `torch.Tensor` or `None`, optional
+    y : :py:class:`numpy.ndarray` or :py:class:`torch.Tensor`, optional
         Array containing the ground truth of tile ``x``, shape=(height, width).
-        The default is None.
-    y_pred : `numpy.ndarray` or `torch.Tensor` or `None`, optional
+        The default is `None`, i.e. the ground truth is not plotted.
+    y_pred : :py:class:`numpy.ndarray` or :py:class:`torch.Tensor`, optional
         Array containing the prediction for tile ``x``, shape=(height, width).
-        The default is None.
+        The default is `None`, i.e. the prediction is not plotted.
     figsize : `tuple`, optional
-        The figure size in centimeters. The default is (10, 10).
+        The figure size in centimeters. The default is `(10, 10)`.
     bands : `list` [`str`], optional
-        The bands to build the FCC. The default is ['nir', 'red', 'green'].
+        The bands to build the FCC. The default is `['nir', 'red', 'green']`.
     state : `str` or `None`, optional
         Filename to save the plot to. ``state`` should be an existing model
-        state file ending with '.pt'. The default is None, i.e. plot is not
-        saved to disk.
-    outpath : `str` or `pathlib.Path`, optional
-        Output path. The default is 'pysegcnn/main/_samples'.
+        state file ending with `'.pt'`. The default is `None`, i.e. the plot is
+        not saved to disk.
+    outpath : `str` or :py:class:`pathlib.Path`, optional
+        Output path. The default is `'pysegcnn/main/_samples'`.
     alpha : `int`, optional
         The level of the percentiles to increase constrast in the FCC.
-        The default is 0, i.e. no stretching.
+        The default is `0`, i.e. no stretching is applied.
 
     Returns
     -------
-    fig : `matplotlib.figure.Figure`
+    fig : :py:class:`matplotlib.figure.Figure`
         The figure handle.
-    ax : `numpy.ndarray` [`matplotlib.axes._subplots.AxesSubplot`]
+    ax : :py:class:`numpy.ndarray`
         An array of the axes handles.
 
     """
@@ -199,7 +199,7 @@ def plot_confusion_matrix(cm, labels, normalize=True,
 
     Parameters
     ----------
-    cm : `numpy.ndarray`
+    cm : :py:class:`numpy.ndarray`
         The confusion matrix.
     labels : `dict` [`int`, `dict`]
         The label dictionary. The keys are the values of the class labels
@@ -209,23 +209,23 @@ def plot_confusion_matrix(cm, labels, normalize=True,
             ``'label'``
                 The name of the class label (`str`).
     normalize : `bool`, optional
-        Whether to normalize the confusion matrix. The default is True.
+        Whether to normalize the confusion matrix. The default is `True`.
     figsize : `tuple`, optional
-        The figure size in centimeters. The default is (10, 10).
+        The figure size in centimeters. The default is `(10, 10)`.
     cmap : `str`, optional
-        A colormap in `matplotlib.pyplot.colormaps()`. The default is 'Blues'.
-    state_file : `str` or `None` or `pathlib.Path`, optional
+        A matplotlib colormap. The default is `'Blues'`.
+    state_file : `str` or `None` or :py:class:`pathlib.Path`, optional
         Filename to save the plot to. ``state`` should be an existing model
-        state file ending with '.pt'. The default is None, i.e. plot is not
-        saved to disk.
-    outpath : `str` or `pathlib.Path`, optional
-        Output path. The default is 'pysegcnn/main/_graphics/'.
+        state file ending with `'.pt'`. The default is `None`, i.e. the plot is
+        not saved to disk.
+    outpath : `str` or :py:class:`pathlib.Path`, optional
+        Output path. The default is `'pysegcnn/main/_graphics/'`.
 
     Returns
     -------
-    fig : `matplotlib.figure.Figure`
+    fig : :py:class:`matplotlib.figure.Figure`
         The figure handle.
-    ax : `matplotlib.axes._subplots.AxesSubplot`
+    ax : :py:class:`matplotlib.axes._subplots.AxesSubplot`
         The axes handle.
 
     """
@@ -302,23 +302,23 @@ def plot_loss(state_file, figsize=(10, 10), step=5,
 
     Parameters
     ----------
-    state_file : `str` or `pathlib.Path`
+    state_file : `str` or :py:class:`pathlib.Path`
         The model state file. Model state files are stored in
-        pysegcnn/main/_models.
+        `pysegcnn/main/_models`.
     figsize : `tuple`, optional
-        The figure size in centimeters. The default is (10, 10).
+        The figure size in centimeters. The default is `(10, 10)`.
     step : `int`, optional
-        The step of epochs for the x-axis labels. The default is 5, i.e. label
-        each fifth epoch.
+        The step to label epochs on the x-axis labels. The default is `5`, i.e.
+        label each fifth epoch.
     colors : `list` [`str`], optional
-        A list of four named colors supported by `matplotlib`.
-        The default is ['lightgreen', 'green', 'skyblue', 'steelblue'].
-    outpath : `str` or `pathlib.Path`, optional
-        Output path. The default is 'pysegcnn/main/_graphics/'.
+        A list of four named colors supported by matplotlib.
+        The default is `['lightgreen', 'green', 'skyblue', 'steelblue']`.
+    outpath : `str` or :py:class:`pathlib.Path`, optional
+        Output path. The default is `'pysegcnn/main/_graphics/'`.
 
     Returns
     -------
-    fig : `matplotlib.figure.Figure`
+    fig : :py:class:`matplotlib.figure.Figure`
         The figure handle.
 
     """
diff --git a/pysegcnn/core/logging.py b/pysegcnn/core/logging.py
index 916a4f0..c7c6464 100644
--- a/pysegcnn/core/logging.py
+++ b/pysegcnn/core/logging.py
@@ -20,7 +20,7 @@ import pathlib
 
 # the logging configuration dictionary
 def log_conf(logfile):
-    """Set basic logging configuration passed to `logging.config.dictConfig`.
+    """Set basic logging configuration.
 
     See the logging `docs`_ for a detailed description of the configuration
     dictionary.
@@ -30,7 +30,7 @@ def log_conf(logfile):
 
     Parameters
     ----------
-    logfile : `str` or `pathlib.Path`
+    logfile : `str` or :py:class:`pathlib.Path`
         The file to save the logs to.
 
     Returns
diff --git a/pysegcnn/core/predict.py b/pysegcnn/core/predict.py
index ff7455e..2489aeb 100644
--- a/pysegcnn/core/predict.py
+++ b/pysegcnn/core/predict.py
@@ -34,14 +34,13 @@ LOGGER = logging.getLogger(__name__)
 
 
 def _get_scene_tiles(ds, scene_id):
-    """Return the tiles of the scene with id = ``scene_id``.
+    """Return the tiles of the scene with id ``scene_id``.
 
     Parameters
     ----------
-    ds : `pysegcnn.core.split.RandomSubset` or
-    `pysegcnn.core.split.SceneSubset`
-        An instance of `~pysegcnn.core.split.RandomSubset` or
-        `~pysegcnn.core.split.SceneSubset`.
+    ds : :py:class:`pysegcnn.core.split.CustomSubset`
+        A instance of a subclass of
+        :py:class:`pysegcnn.core.split.CustomSubset`.
     scene_id : `str`
         A valid scene identifier.
 
@@ -77,27 +76,27 @@ def predict_samples(ds, model, cm=False, plot=False, **kwargs):
 
     Parameters
     ----------
-    ds : `pysegcnn.core.split.RandomSubset` or
-    `pysegcnn.core.split.SceneSubset`
-        An instance of `~pysegcnn.core.split.RandomSubset` or
-        `~pysegcnn.core.split.SceneSubset`.
-    model : `pysegcnn.core.models.Network`
-        An instance of `~pysegcnn.core.models.Network`.
+    ds : :py:class:`pysegcnn.core.split.RandomSubset` or
+    :py:class:`pysegcnn.core.split.SceneSubset`
+        An instance of :py:class:`pysegcnn.core.split.RandomSubset` or
+        :py:class:`pysegcnn.core.split.SceneSubset`.
+    model : :py:class:`pysegcnn.core.models.Network`
+        An instance of :py:class:`pysegcnn.core.models.Network`.
     cm : `bool`, optional
-        Whether to compute the confusion matrix. The default is False.
+        Whether to compute the confusion matrix. The default is `False`.
     plot : `bool`, optional
         Whether to plot a false color composite, ground truth and model
-        prediction for each sample. The default is False.
+        prediction for each sample. The default is `False`.
     **kwargs
         Additional keyword arguments passed to
-        `pysegcnn.core.graphics.plot_sample`.
+        :py:func:`pysegcnn.core.graphics.plot_sample`.
 
     Raises
     ------
     TypeError
         Raised if ``ds`` is not an instance of
-        `~pysegcnn.core.split.RandomSubset` or
-        `~pysegcnn.core.split.SceneSubset`.
+        :py:class:`pysegcnn.core.split.RandomSubset` or
+        :py:class:`pysegcnn.core.split.SceneSubset`.
 
     Returns
     -------
@@ -109,9 +108,10 @@ def predict_samples(ds, model, cm=False, plot=False, **kwargs):
                 The ground truth
             ``'prediction'``
                 Model prediction
-    conf_mat : `numpy.ndarray`
+    conf_mat : :py:class:`numpy.ndarray`
         The confusion matrix. Note that the confusion matrix ``conf_mat`` is
-        only computed if ``cm`` = True.
+        only computed if ``cm=True``.
+
     """
     # check whether the dataset is a valid subset, i.e.
     # an instance of pysegcnn.core.split.SceneSubset or
@@ -183,26 +183,26 @@ def predict_scenes(ds, model, scene_id=None, cm=False, plot=False, **kwargs):
 
     Parameters
     ----------
-    ds : `pysegcnn.core.split.SceneSubset`
-        An instance of `~pysegcnn.core.split.SceneSubset`.
-    model : `pysegcnn.core.models.Network`
-        An instance of `~pysegcnn.core.models.Network`.
+    ds : :py:class:`pysegcnn.core.split.SceneSubset`
+        An instance of :py:class:`pysegcnn.core.split.SceneSubset`.
+    model : :py:class:`pysegcnn.core.models.Network`
+        An instance of :py:class:`pysegcnn.core.models.Network`.
     scene_id : `str` or `None`
         A valid scene identifier.
     cm : `bool`, optional
-        Whether to compute the confusion matrix. The default is False.
+        Whether to compute the confusion matrix. The default is `False`.
     plot : `bool`, optional
         Whether to plot a false color composite, ground truth and model
-        prediction for each scene. The default is False.
+        prediction for each scene. The default is `False`.
     **kwargs
         Additional keyword arguments passed to
-        `pysegcnn.core.graphics.plot_sample`.
+        :py:func:`pysegcnn.core.graphics.plot_sample`.
 
     Raises
     ------
     TypeError
         Raised if ``ds`` is not an instance of
-        `~pysegcnn.core.split.SceneSubset`.
+        :py:class:`pysegcnn.core.split.SceneSubset`.
 
     Returns
     -------
@@ -214,9 +214,10 @@ def predict_scenes(ds, model, scene_id=None, cm=False, plot=False, **kwargs):
                 The ground truth
             ``'prediction'``
                 Model prediction
-    conf_mat : `numpy.ndarray`
+    conf_mat : :py:class:`numpy.ndarray`
         The confusion matrix. Note that the confusion matrix ``conf_mat`` is
-        only computed if ``cm`` = True.
+        only computed if ``cm=True``.
+
     """
     # check whether the dataset is a valid subset, i.e. an instance of
     # pysegcnn.core.split.SceneSubset
diff --git a/pysegcnn/core/trainer.py b/pysegcnn/core/trainer.py
index 24f56fe..1057831 100644
--- a/pysegcnn/core/trainer.py
+++ b/pysegcnn/core/trainer.py
@@ -102,17 +102,17 @@ class DatasetConfig(BaseConfig):
     seed : `int`
         The random seed. Used to split the dataset into training,
         validation and test set. Useful for reproducibility.
-    sort : `bool`, optional
+    sort : `bool`
         Whether to chronologically sort the samples. Useful for time series
         data. The default is `False`.
-    transforms : `list`, optional
+    transforms : `list`
         List of :py:class:`pysegcnn.core.transforms.Augment` instances.
         Each item in ``transforms`` generates a distinct transformed
         version of the dataset. The total dataset is composed of the
         original untransformed dataset together with each transformed
         version of it. If ``transforms=[]``, only the original dataset is
         used. The default is `[]`.
-    pad : `bool`, optional
+    pad : `bool`
         Whether to center pad the input image. Set ``pad=True``, if the
         images are not evenly divisible by the ``tile_size``. The image
         data is padded with a constant padding value of zero. For each
@@ -206,13 +206,13 @@ class SplitConfig(BaseConfig):
     tvratio : `float`
         The ratio of training data to validation data, e.g. ``tvratio=0.8``
         means 80% training, 20% validation.
-    date : `str`, optional
+    date : `str`
         A date. Used if ``split_mode='date'``. The default is  `yyyymmdd`.
-    dateformat : `str`, optional
+    dateformat : `str`
         The format of ``date``. ``dateformat`` is used by
         :py:func:`datetime.datetime.strptime' to parse ``date`` to a
         :py:class:`datetime.datetime` object. The default is `'%Y%m%d'`.
-    drop : `float`, optional
+    drop : `float`
         Whether to drop samples (during training only) with a fraction of
         pixels equal to the constant padding value >= ``drop``. ``drop=0``
         means, do not drop any samples. The default is `0`.
@@ -403,49 +403,49 @@ class ModelConfig(BaseConfig):
         The name of the optimizer to update the model weights.
     loss_name : `str`
         The name of the loss function measuring the model error.
-    skip_connection : `bool`, optional
+    skip_connection : `bool`
         Whether to apply skip connections. The default is `True`.
-    kwargs: `dict`, optional
+    kwargs: `dict`
         The configuration for each convolution in the model. The default is
         `{'kernel_size': 3, 'stride': 1, 'dilation': 1}`.
-    batch_size : `int`, optional
+    batch_size : `int`
         The model batch size. Determines the number of samples to process
         before updating the model weights. The default is `64`.
-    checkpoint : `bool`, optional
+    checkpoint : `bool`
         Whether to resume training from an existing model checkpoint. The
         default is `False`.
-    transfer : `bool`, optional
+    transfer : `bool`
         Whether to use a model for transfer learning on a new dataset. If True,
         the model architecture of ``pretrained_model`` is adjusted to a new
         dataset. The default is `False`.
-    pretrained_model : `str`, optional
+    pretrained_model : `str`
         The name of the pretrained model to use for transfer learning.
         The default is `''`.
-    lr : `float`, optional
+    lr : `float`
         The learning rate used by the gradient descent algorithm.
         The default is `0.001`.
-    early_stop : `bool`, optional
+    early_stop : `bool`
         Whether to apply `Early Stopping`_. The default is `False`.
-    mode : `str`, optional
-        The mode of the early stopping. Depends on the metric measuring
+    mode : `str`
+        The early stopping mode. Depends on the metric measuring
         performance. When using model loss as metric, use ``mode='min'``,
         however, when using accuracy as metric, use ``mode='max'``. For now,
         only ``mode='max'`` is supported. Only used if ``early_stop=True``.
         The default is `'max'`.
-    delta : `float`, optional
+    delta : `float`
         Minimum change in early stopping metric to be considered as an
         improvement. Only used if ``early_stop=True``. The default is `0`.
-    patience : `int`, optional
+    patience : `int`
         The number of epochs to wait for an improvement in the early stopping
         metric. If the model does not improve over more than ``patience``
         epochs, quit training. Only used if ``early_stop=True``. The default is
         `10`.
-    epochs : `int`, optional
+    epochs : `int`
         The maximum number of epochs to train. The default is `50`.
-    nthreads : `int`, optional
+    nthreads : `int`
         The number of cpu threads to use during training. The default is
         :py:func:`torch.get_num_threads()`.
-    save : `bool`, optional
+    save : `bool`
         Whether to save the model state to disk. Model states are saved in
         pysegcnn/main/_models. The default is `True`.
     model_class : :py:class:`pysegcnn.core.models.Network`
@@ -853,31 +853,41 @@ class EvalConfig(BaseConfig):
     test : `bool` or `None`
         Whether to evaluate the model on the training(``test=None``), the
         validation (``test=False``) or the test set (``test=True``).
-    predict_scene : `bool`, optional
+    predict_scene : `bool`
         The model prediction order. If False, the samples (tiles) of a dataset
         are predicted in any order and the scenes are not reconstructed.
         If True, the samples (tiles) are ordered according to the scene they
         belong to and a model prediction for each entire reconstructed scene is
         returned. The default is `False`.
-    plot_samples : `bool`, optional
+    plot_samples : `bool`
         Whether to save a plot of false color composite, ground truth and model
         prediction for each sample (tile). Only used if ``predict_scene=False``
         . The default is `False`.
-    plot_scenes : `bool`, optional
+    plot_scenes : `bool`
         Whether to save a plot of false color composite, ground truth and model
         prediction for each entire scene. Only used if ``predict_scene=True``.
         The default is `False`.
-    plot_bands : `list` [`str`], optional
+    plot_bands : `list` [`str`]
         The bands to build the false color composite. The default is
         `['nir', 'red', 'green']`.
-    cm : `bool`, optional
+    cm : `bool`
         Whether to compute and plot the confusion matrix. The default is `True`
         .
-    figsize : `tuple`, optional
+    figsize : `tuple`
         The figure size in centimeters. The default is `(10, 10)`.
-    alpha : `int`, optional
+    alpha : `int`
         The level of the percentiles for contrast stretching of the false color
         compsite. The default is `0`, i.e. no stretching.
+    base_path : :py:class:`pathlib.Path`
+        Root path to store model output.
+    sample_path : :py:class:`pathlib.Path`
+        Path to store plots of model predictions for single samples.
+    scenes_path : :py:class:`pathlib.Path`
+        Path to store plots of model predictions for entire scenes.
+    perfmc_path : :py:class:`pathlib.Path`
+        Path to store plots of model performance, e.g. confusion matrix.
+    models_path : :py:class:`pathlib.Path`
+        Path to search for model state files, i.e. pretrained models.
 
     """
 
@@ -920,10 +930,6 @@ class EvalConfig(BaseConfig):
         self.models_path = self.base_path.joinpath('_models')
         self.state_file = self.models_path.joinpath(self.state_file)
 
-        # write initialization string to log file
-        LogConfig.init_log('{}: ' + 'Evaluating model: {}.'.format(
-            self.state_file.name))
-
 
 @dataclasses.dataclass
 class LogConfig(BaseConfig):
@@ -931,11 +937,14 @@ class LogConfig(BaseConfig):
 
     Generate the model log file.
 
-    Parameters
+    Attributes
     ----------
     state_file : :py:class:`pathlib.Path`
         Path to a model state file.
-
+    log_path : :py:class:`pathlib.Path`
+        Path to store model logs.
+    log_file : :py:class:`pathlib.Path`
+        Path to the log file of the model ``state_file``.
     """
 
     state_file: pathlib.Path
@@ -991,7 +1000,7 @@ class NetworkTrainer(BaseConfig):
     :py:class:`pysegcnn.core.models.Network` on a dataset of type
     :py:class:`pysegcnn.core.dataset.ImageDataset`.
 
-    Parameters
+    Attributes
     ----------
     model : :py:class:`pysegcnn.core.models.Network`
         The model to train. An instance of
@@ -1003,35 +1012,38 @@ class NetworkTrainer(BaseConfig):
         The loss function to compute the model error. An instance of
         :py:class:`torch.nn.Module`.
     train_dl : :py:class:`torch.utils.data.DataLoader`
-        The training :py:class:`torch.utils.data.DataLoader` instance.
+        The training :py:class:`torch.utils.data.DataLoader` instance build
+        from an instance of :py:class:`pysegcnn.core.split.CustomSubset`.
     valid_dl : :py:class:`torch.utils.data.DataLoader`
-        The validation :py:class:`torch.utils.data.DataLoader` instance.
+        The validation :py:class:`torch.utils.data.DataLoader` instance build
+        from an instance of :py:class:`pysegcnn.core.split.CustomSubset`.
     test_dl : :py:class:`torch.utils.data.DataLoader`
-        The test :py:class:`torch.utils.data.DataLoader` instance.
+        The test :py:class:`torch.utils.data.DataLoader` instance build from an
+        instance of :py:class:`pysegcnn.core.split.CustomSubset`.
     state_file : :py:class:`pathlib.Path`
         Path to save the model state.
-    epochs : `int`, optional
+    epochs : `int`
         The maximum number of epochs to train. The default is `1`.
-    nthreads : `int`, optional
+    nthreads : `int`
         The number of cpu threads to use during training. The default is
         :py:func:`torch.get_num_threads()`.
-    early_stop : `bool`, optional
+    early_stop : `bool`
         Whether to apply `Early Stopping`_. The default is `False`.
-    mode : `str`, optional
-        The mode of the early stopping. Depends on the metric measuring
+    mode : `str`
+        The early stopping mode. Depends on the metric measuring
         performance. When using model loss as metric, use ``mode='min'``,
         however, when using accuracy as metric, use ``mode='max'``. For now,
         only ``mode='max'`` is supported. Only used if ``early_stop=True``.
         The default is `'max'`.
-    delta : `float`, optional
+    delta : `float`
         Minimum change in early stopping metric to be considered as an
         improvement. Only used if ``early_stop=True``. The default is `0`.
-    patience : `int`, optional
+    patience : `int`
         The number of epochs to wait for an improvement in the early stopping
         metric. If the model does not improve over more than ``patience``
         epochs, quit training. Only used if ``early_stop=True``. The default is
         `10`.
-    checkpoint_state : `dict` [`str`, :py:class:`numpy.ndarray`], optional
+    checkpoint_state : `dict` [`str`, :py:class:`numpy.ndarray`]
         A model checkpoint for ``model``. If specified, ``checkpoint_state``
         should be a dictionary with keys:
             ``'ta'``
@@ -1043,9 +1055,25 @@ class NetworkTrainer(BaseConfig):
             ``'vl'``
                 The loss on the validation set (:py:class:`numpy.ndarray`).
         The default is `{}`.
-    save : `bool`, optional
+    save : `bool`
         Whether to save the model state to ``state_file``. The default is
         `True`.
+    device : `str`
+        The device to train the model on, i.e. `cpu` or `cuda`.
+    max_accuracy : `float`
+        Maximum accuracy on the validation dataset.
+    es : `None` or :py:class:`pysegcnn.core.trainer.EarlyStopping`
+        The early stopping instance if ``early_stop=True``, else `None`.
+    training_state : `dict` [`str`, `numpy.ndarray`]
+            The training state dictionary with keys:
+            ``'ta'``
+                The accuracy on the training set (:py:class:`numpy.ndarray`).
+            ``'tl'``
+                The loss on the training set (:py:class:`numpy.ndarray`).
+            ``'va'``
+                The accuracy on the validation set (:py:class:`numpy.ndarray`).
+            ``'vl'``
+                The loss on the validation set (:py:class:`numpy.ndarray`).
 
     .. _Early Stopping:
         https://en.wikipedia.org/wiki/Early_stopping
@@ -1363,6 +1391,24 @@ class EarlyStopping(object):
     .. _Early Stopping:
         https://en.wikipedia.org/wiki/Early_stopping
 
+    Attributes
+    ----------
+    mode : `str`
+        The early stopping mode.
+    best : `float`
+        Best metric score.
+    min_delta : `float`
+        Minimum change in early stopping metric to be considered as an
+        improvement.
+    patience : `int`
+        The number of epochs to wait for an improvement.
+    is_better : `function`
+        Function indicating whether the metric improved.
+    early_stop : `bool`
+        Whether the early stopping criterion is met.
+    counter : `int`
+        The counter advancing each time a metric does not improve.
+
     """
 
     def __init__(self, mode='max', best=0, min_delta=0, patience=10):
@@ -1371,7 +1417,7 @@ class EarlyStopping(object):
         Parameters
         ----------
         mode : `str`, optional
-            The mode of the early stopping. Depends on the metric measuring
+            The early stopping mode. Depends on the metric measuring
             performance. When using model loss as metric, use ``mode='min'``,
             however, when using accuracy as metric, use ``mode='max'``. For
             now, only ``mode='max'`` is supported. Only used if
diff --git a/pysegcnn/core/transforms.py b/pysegcnn/core/transforms.py
index 0045c5a..4d3c27d 100644
--- a/pysegcnn/core/transforms.py
+++ b/pysegcnn/core/transforms.py
@@ -34,17 +34,14 @@ class Transform(object):
 
         Parameters
         ----------
-        image : `numpy.ndarray`
+        image : :py:class:`numpy.ndarray`
             The image to transform.
 
         Raises
         ------
         NotImplementedError
-            Raised if `~pysegcnn.core.transforms.Transform` is not inherited.
-
-        Returns
-        -------
-        None.
+            Raised if :py:class:`pysegcnn.core.transforms.Transform` is not
+            inherited.
 
         """
         raise NotImplementedError
@@ -54,6 +51,12 @@ class VariantTransform(Transform):
     """Base class for a spatially variant transformation.
 
     Transformation on the ground truth required.
+
+    Attributes
+    ----------
+    invariant : `bool`
+        Whether the transformation is spatially invariant.
+
     """
 
     def __init__(self):
@@ -66,6 +69,12 @@ class InvariantTransform(Transform):
     """Base class for a spatially invariant transformation.
 
     Transformation on the ground truth not required.
+
+    Attributes
+    ----------
+    invariant : `bool`
+        Whether the transformation is spatially invariant.
+
     """
 
     def __init__(self):
@@ -77,18 +86,24 @@ class InvariantTransform(Transform):
 class FlipLr(VariantTransform):
     """Flip an image horizontally.
 
-    Parameters
+    Attributes
     ----------
-    p : `float`, optional
-        The probability to apply the transformation. The default is 0.5.
-
-    Returns
-    -------
-    None.
+    p : `float`
+        The probability to apply the transformation.
+    applied : `bool`
+        Whether the transformation was applied.
 
     """
 
     def __init__(self, p=0.5):
+        """Initialize.
+
+        Parameters
+        ----------
+        p : `float`, optional
+            The probability to apply the transformation. The default is `0.5`.
+
+        """
         super().__init__()
         # the probability to apply the transformation
         self.p = p
@@ -98,12 +113,12 @@ class FlipLr(VariantTransform):
 
         Parameters
         ----------
-        image : `numpy.ndarray`
+        image : :py:class:`numpy.ndarray`
             The image to transform.
 
         Returns
         -------
-        transform : `numpy.ndarray`
+        transform : :py:class:`numpy.ndarray`
             The transformed image.
 
         """
@@ -131,18 +146,24 @@ class FlipLr(VariantTransform):
 class FlipUd(VariantTransform):
     """Flip an image vertically.
 
-    Parameters
+    Attributes
     ----------
-    p : `float`, optional
-        The probability to apply the transformation. The default is 0.5.
-
-    Returns
-    -------
-    None.
+    p : `float`
+        The probability to apply the transformation.
+    applied : `bool`
+        Whether the transformation was applied.
 
     """
 
     def __init__(self, p=0.5):
+        """Initialize.
+
+        Parameters
+        ----------
+        p : `float`, optional
+            The probability to apply the transformation. The default is `0.5`.
+
+        """
         super().__init__()
         # the probability to apply the transformation
         self.p = p
@@ -152,12 +173,12 @@ class FlipUd(VariantTransform):
 
         Parameters
         ----------
-        image : `numpy.ndarray`
+        image : :py:class:`numpy.ndarray`
             The image to transform.
 
         Returns
         -------
-        transform : `numpy.ndarray`
+        transform : :py:class:`numpy.ndarray`
             The transformed image.
 
         """
@@ -183,26 +204,35 @@ class FlipUd(VariantTransform):
 
 
 class Rotate(VariantTransform):
-    """Rotate an image by ``angle``.
+    """Rotate an image in the spatial plane.
 
-    The image is rotated in the spatial plane.
+    .. important::
 
-    If the input array has more then two dimensions, the spatial dimensions are
-    assumed to be the last two dimensions of the array.
+        If the input array has more then two dimensions, the spatial dimensions
+        are assumed to be the last two dimensions of the array.
 
-    Parameters
+    Attributes
     ----------
+    p : `float`
+        The probability to apply the transformation.
+    applied : `bool`
+        Whether the transformation was applied.
     angle : `float`
         The rotation angle in degrees.
-    p : `float`, optional
-        The probability to apply the transformation. The default is 0.5.
 
-    Returns
-    -------
-    None.
     """
 
     def __init__(self, angle, p=0.5):
+        """Initialize.
+
+        Parameters
+        ----------
+        angle : `float`
+            The rotation angle in degrees.
+        p : `float`, optional
+            The probability to apply the transformation. The default is `0.5`.
+
+        """
         super().__init__()
 
         # the rotation angle
@@ -216,13 +246,14 @@ class Rotate(VariantTransform):
 
         Parameters
         ----------
-        image : `numpy.ndarray`
+        image : :py:class:`numpy.ndarray`
             The image to transform.
 
         Returns
         -------
-        transform : `numpy.ndarray`
+        transform : :py:class:`numpy.ndarray`
             The transformed image.
+
         """
         if np.random.random(1) < self.p:
 
@@ -260,35 +291,27 @@ class Rotate(VariantTransform):
 class Noise(InvariantTransform):
     """Add gaussian noise to an image.
 
-    Valid modes are:
+    Supported modes are:
 
-        'add': image = image + noise
-        'speckle' : image = image + image * noise
+        - 'add' : ``image = image + noise``
+        - 'speckle' : ``image = image + image * noise``
 
-    Parameters
+    Attributes
     ----------
+    modes : `list` [`str`]
+        The supported modes.
     mode : `str`
         The mode to add the noise.
-    mean : `float`, optional
-        The mean of the gaussian distribution from which the noise is sampled.
-        The default is 0.
-    var : `float`, optional
-        The variance of the gaussian distribution from which the noise is
-        sampled. The default is 0.05.
-    p : `float`, optional
-        The probability to apply the transformation. The default is 0.5.
-    exclude : `list` [`float`] or `list` [`int`], optional
-        Values for which the noise is not added. Useful for pixels resulting
-        from image padding. The default is [].
-
-    Raises
-    ------
-    ValueError
-        Raised if ``mode`` is not supported.
-
-    Returns
-    -------
-    None.
+    mean : `float`
+        The mean of the gaussian distribution.
+    var : `float`
+        The variance of the gaussian distribution.
+    p : `float`
+        The probability to apply the transformation.
+    applied : `bool`
+        Whether the transformation was applied.
+    exclude : `list` [`float`]
+        Values for which the noise is not added.
 
     """
 
@@ -296,6 +319,30 @@ class Noise(InvariantTransform):
     modes = ['add', 'speckle']
 
     def __init__(self, mode, mean=0, var=0.05, p=0.5, exclude=[]):
+        """Initialize.
+
+        Parameters
+        ----------
+        mode : `str`
+            The mode to add the noise.
+        mean : `float`, optional
+            The mean of the gaussian distribution from which the noise is
+            sampled. The default is `0`.
+        var : `float`, optional
+            The variance of the gaussian distribution from which the noise is
+            sampled. The default is `0.05`.
+        p : `float`, optional
+            The probability to apply the transformation. The default is `0.5`.
+        exclude : `list` [`float`] or `list` [`int`], optional
+            Values for which the noise is not added. Useful for pixels
+            resulting from image padding. The default is `[]`.
+
+        Raises
+        ------
+        ValueError
+            Raised if ``mode`` is not supported.
+
+        """
         super().__init__()
 
         # check which kind of noise to apply
@@ -320,13 +367,13 @@ class Noise(InvariantTransform):
 
         Parameters
         ----------
-        image : `numpy.ndarray`
+        image : :py:class:`numpy.ndarray`
             The image to transform.
 
         Returns
         -------
-        transform : `numpy.ndarray`
-            The transformed image
+        transform : :py:class:`numpy.ndarray`
+            The transformed image.
 
         """
         if np.random.random(1) < self.p:
@@ -372,19 +419,24 @@ class Augment(object):
 
     Container class applying each transformation in ``transforms`` in order.
 
-    Parameters
+    Attributes
     ----------
     transforms : `list` or `tuple`
-        A sequence of instances of `pysegcnn.core.transforms.VariantTransform`
-        or `pysegcnn.core.transforms.InvariantTransform`.
-
-    Returns
-    -------
-    None.
+        The transformations to apply.
 
     """
 
     def __init__(self, transforms):
+        """Initialize.
+
+        Parameters
+        ----------
+        transforms : `list` or `tuple`
+            A list of instances of
+            :py:class:`pysegcnn.core.transforms.VariantTransform`
+            or :py:class:`pysegcnn.core.transforms.InvariantTransform`.
+
+        """
         assert isinstance(transforms, (list, tuple))
         self.transforms = transforms
 
@@ -396,16 +448,16 @@ class Augment(object):
 
         Parameters
         ----------
-        image : `numpy.ndarray`
+        image : :py:class:`numpy.ndarray`
             The input image.
-        gt : `numpy.ndarray`
+        gt : :py:class:`numpy.ndarray`
             The corresponding ground truth of ``image``.
 
         Returns
         -------
-        image : `numpy.ndarray`
+        image : :py:class:`numpy.ndarray`
             The transformed image.
-        gt : `numpy.ndarray`
+        gt : :py:class:`numpy.ndarray`
             The transformed ground truth.
 
         """
@@ -432,17 +484,17 @@ class Augment(object):
         return image, gt
 
     def __repr__(self):
-        """Representation of `~pysegcnn.core.transforms.Augment`.
+        """Representation.
 
         Returns
         -------
-        repr : `str`
+        fs : `str`
             Representation string.
 
         """
-        fstring = self.__class__.__name__ + '('
+        fs = self.__class__.__name__ + '('
         for t in self.transforms:
-            fstring += '\n'
-            fstring += '    {0}'.format(t)
-        fstring += '\n)'
-        return fstring
+            fs += '\n'
+            fs += '    {0}'.format(t)
+        fs += '\n)'
+        return fs
diff --git a/pysegcnn/core/utils.py b/pysegcnn/core/utils.py
index 84d2b3c..8cd73e8 100644
--- a/pysegcnn/core/utils.py
+++ b/pysegcnn/core/utils.py
@@ -38,76 +38,70 @@ SUFFIXES = ['toa_ref', 'toa_rad', 'toa_brt']
 
 
 def img2np(path, tile_size=None, tile=None, pad=False, cval=0):
-    """Read an image to a `numpy.ndarray`.
+    r"""Read an image to a :py:class:`numpy.ndarray`.
 
     If ``tile_size`` is not `None`, the input image is divided into square
-    tiles of size (``tile_size``, ``tile_size``). If the image is not evenly
-    divisible and ``pad`` = False, a `ValueError` is raised. However, if
-    ``pad`` = True, center padding with constant value ``cval`` is applied.
+    tiles of size ``(tile_size, tile_size)``. If the image is not evenly
+    divisible and ``pad=False``, a ``ValueError`` is raised. However, if
+    ``pad=True``, center padding with constant value ``cval`` is applied.
 
     The tiling works as follows:
 
-        (Padded) Input image:
-
-        ------------------------------------------------
-        |           |           |          |           |
-        |  tile_00  |  tile_01  |    ...   |  tile_0n  |
-        |           |           |          |           |
-        |----------------------------------------------|
-        |           |           |          |           |
-        |  tile_10  |  tile_11  |    ...   |  tile_1n  |
-        |           |           |          |           |
-        |----------------------------------------------|
-        |           |           |          |           |
-        |    ...    |    ...    |    ...   |    ...    |
-        |           |           |          |           |
-        |----------------------------------------------|
-        |           |           |          |           |
-        |  tile_m0  |  tile_m1  |    ...   |  tile_mn  |
-        |           |           |          |           |
-        ------------------------------------------------
-
-    where m = n. Each tile has its id, which starts at 0 in the topleft corner
-    of the input image, i.e. tile_00 has id=0, and increases along the width
-    axis, i.e. tile_0n has id=n, tile_10 has id=n+1, ..., tile_mn has
-    id=(m * n) - 1.
-
-    If ``tile`` is an integer, only the tile with id = ``tile`` is returned.
+        +-----------+-----------+-----------+-----------+
+        |           |           |           |           |
+        |  tile_00  |  tile_01  |    ...    |  tile_0n  |
+        |           |           |           |           |
+        +-----------+-----------+-----------+-----------+
+        |           |           |           |           |
+        |  tile_10  |  tile_11  |    ...    |  tile_1n  |
+        |           |           |           |           |
+        +-----------+-----------+-----------+-----------+
+        |           |           |           |           |
+        |    ...    |    ...    |    ...    |    ...    |
+        |           |           |           |           |
+        +-----------+-----------+-----------+-----------+
+        |           |           |           |           |
+        |  tile_m0  |  tile_m1  |    ...    |  tile_mn  |
+        |           |           |           |           |
+        +-----------+-----------+-----------+-----------+
+
+    where :math:`m = n`. Each tile has its id, which starts at `0` in the
+    topleft corner of the input image, i.e. `tile_00` has :math:`id=0`, and
+    increases along the width axis, i.e. `tile_0n` has :math:`id=n`, `tile_10`
+    has :math:`id=n+1`, ..., `tile_mn` has :math:`id=(m \\cdot n) - 1`.
+
+    If ``tile`` is an integer, only the tile with ``id=tile`` is returned.
 
     Parameters
     ----------
-    path : `str` or `None` or `numpy.ndarray`
+    path : `str` or `None` or :py:class:`numpy.ndarray`
         The image to read.
     tile_size : `None` or `int`, optional
-        The size of a tile. The default is None.
+        The size of a tile. The default is `None`.
     tile : `int`, optional
-        The tile id. The default is None.
+        The tile id. The default is `None`.
     pad : `bool`, optional
-        Whether to center pad the input image. The default is False.
+        Whether to center pad the input image. The default is `False`.
     cval : `float`, optional
-        The constant padding value. The default is 0.
+        The constant padding value. The default is `0`.
 
     Raises
     ------
     FileNotFoundError
         Raised if ``path`` is a path that does not exist.
     TypeError
-        Raised if ``path`` is not `str` or `None` or `numpy.ndarray`.
+        Raised if ``path`` is not `str` or `None` or :py:class:`numpy.ndarray`.
 
     Returns
     -------
-    image : `numpy.ndarray`
+    image : :py:class:`numpy.ndarray`
         The image array. The output shape is:
+            - `(tiles, bands, tile_size, tile_size)` if ``tile_size`` is not
+            `None`. If the image does only have one band,
+            `(tiles, tile_size, tile_size)`
 
-            if ``tile_size`` is not `None`:
-                shape=(tiles, bands, tile_size, tile_size)
-                if the image does only have one band:
-                    shape=(tiles, tile_size, tile_size)
-
-            else:
-                shape=(bands, height, width)
-                if the image does only have one band:
-                    shape=(height, width)
+            - `(bands, height, width)` if ``tile_size=None``. If the image does
+            only have one band, `(height, width)`.
 
     """
     # check the type of path
@@ -252,12 +246,12 @@ def is_divisible(img_size, tile_size, pad=False):
     tile_size : `int`
         The size of the tile.
     pad : `bool`, optional
-        Whether to center pad the input image. The default is False.
+        Whether to center pad the input image. The default is `False`.
 
     Raises
     ------
     ValueError
-        Raised if the image is not evenly divisible and ``pad`` = False.
+        Raised if the image is not evenly divisible and ``pad=False``.
 
     Returns
     -------
@@ -334,7 +328,7 @@ def check_tile_extend(img_size, topleft, tile_size):
     -------
     nrows : `int`
         Number of rows of the tile within the image.
-    ncols : TYPE
+    ncols : `int`
         Number of columns of the tile within the image.
 
     """
@@ -382,7 +376,7 @@ def tile_topleft_corner(img_size, tile_size):
     -------
     indices : `dict`
         The keys of ``indices`` are the tile ids (`int`) and the values are the
-        topleft corners (`tuple` = (y, x)) of the tiles.
+        topleft corners (y, x) of the tiles.
 
     """
     # check if the image is divisible into square tiles of size
@@ -411,14 +405,14 @@ def reconstruct_scene(tiles):
 
     Parameters
     ----------
-    tiles : `torch.Tensor` or `numpy.ndarray`
-        The tiled image, shape=(tiles, bands, tile_size, tile_size) or
-        shape=(tiles, tile_size, tile_size).
+    tiles : :py:class:`torch.Tensor` or :py:class:`numpy.ndarray`
+        The tiled image, shape: `(tiles, bands, tile_size, tile_size)` or
+        `(tiles, tile_size, tile_size)`.
 
     Returns
     -------
-    image : `numpy.ndarray`
-        The reconstructed image.
+    image : :py:class:`numpy.ndarray`
+        The reconstructed image, shape: `(bands, height, width)`.
 
     """
     # convert to numpy array
@@ -453,9 +447,9 @@ def accuracy_function(outputs, labels):
 
     Parameters
     ----------
-    outputs : `torch.Tensor` or array_like
+    outputs : :py:class:`torch.Tensor` or `array_like`
         The model prediction.
-    labels : `torch.Tensor` or array_like
+    labels : :py:class:`torch.Tensor` or `array_like`
         The ground truth.
 
     Returns
@@ -666,13 +660,13 @@ def doy2date(year, doy):
     Parameters
     ----------
     year : `int`
-        The year
+        The year.
     doy : `int`
-        The day of the year
+        The day of the year.
 
     Returns
     -------
-    date : `datetime.datetime`
+    date : :py:class:`datetime.datetime`
         The converted date.
     """
     # convert year/day of year to a datetime object
@@ -689,8 +683,8 @@ def item_in_enum(name, enum):
     ----------
     name : `str`
         Name of the item.
-    enum : `enum.Enum`
-        An instance of `enum.Enum`.
+    enum : :py:class:`enum.Enum`
+        An instance of :py:class:`enum.Enum`.
 
     Raises
     ------
@@ -718,25 +712,25 @@ def destack_tiff(image, outpath=None, overwrite=False, remove=False,
     """Destack a TIFF with more than one band into a TIFF file for each band.
 
     Each band in ``image`` is saved to ``outpath`` as distinct TIFF file.
-    The default filenames are: "filename(``image``) + _B(i).tif", where i is
-    the respective number of each band in ``image``.
+    The default filenames are: `"filename(``image``) + _B(i).tif"`, where `i`
+    is the respective number of each band in ``image``.
 
     Parameters
     ----------
-    image : `str` or `pathlib.Path`
+    image : `str` or :py:class:`pathlib.Path`
         The TIFF to destack.
     outpath : `str`, optional
-        Path to save the output TIFF files. The default is None. If None,
+        Path to save the output TIFF files. The default is `None`. If `None`,
         ``outpath`` is the path to ``image``.
     remove : `bool`, optional
         Whether to remove ``image`` from disk after destacking. The default is
-        False.
+        `False`.
     overwrite : `bool`, optional
-        Whether to overwrite existing TIFF files.
+        Whether to overwrite existing TIFF files. The default is `False`.
     suffix : `str`, optional
         String to append to the filename of ``image``. If specified, the TIFF
-        filenames for each band in ``image`` are, "filename(``image``) +
-        + _B(i)_ + ``suffix``.tif". The default is ''.
+        filenames for each band in ``image`` are, `"filename(``image``) +
+        + _B(i)_ + ``suffix``.tif"`. The default is `''`.
 
     Raises
     ------
@@ -841,39 +835,33 @@ def standard_eo_structure(source_path, target_path, overwrite=False,
     The directory tree in ``source_path`` is modified to the following
     structure in ``target_path``:
 
-        target_path/
-            scene_id_1/
-                files matching scene_id_1
-            scene_id_2/
-                files matching scene_id_2
-            .
-            .
-            .
-            scene_id_n/
-                files matching scene_id_n
+        - target_path/
+            - scene_id_1/
+                - files matching scene_id_1
+            - scene_id_2/
+                - files matching scene_id_2
+            - ...
+            - scene_id_n/
+                - files matching scene_id_n
 
 
     Parameters
     ----------
-    source_path : `str` or `pathlib.Path`
+    source_path : `str` or :py:class:`pathlib.Path`
         Path to the remote sensing dataset.
-    target_path : `str` or `pathlib.Path`
+    target_path : `str` or :py:class:`pathlib.Path`
         Path to save the restructured dataset.
     overwrite : `bool`, optional
         Whether to overwrite existing files in ``target_path``.
-        The default is True.
+        The default is `False`.
     move : `bool`, optional
         Whether to move the files from ``source_path`` to ``target_path``. If
-        True, files in ``source_path`` are moved to ``target_path``, if False,
-        files in ``source_path`` are copied to ``target_path``. The default is
-        False.
+        `True`, files in ``source_path`` are moved to ``target_path``, if
+        `False`, files in ``source_path`` are copied to ``target_path``. The
+        default is `False`.
     parser : `function`, optional
         The scene identifier parsing function. Depends on the sensor of the
-        dataset. See e.g., `pysegcnn.core.utils.parse_landsat_scene`.
-
-    Returns
-    -------
-    None.
+        dataset. See e.g., :py:func:`pysegcnn.core.utils.parse_landsat_scene`.
 
     """
     # create a directory for each scene
@@ -931,17 +919,18 @@ def extract_archive(inpath, outpath, overwrite=False):
 
     Parameters
     ----------
-    inpath : `str` or `pathlib.Path`
+    inpath : `str` or :py:class:`pathlib.Path`
         Path to an archive.
-    outpath : `str` or `pathlib.Path`
+    outpath : `str` or :py:class:`pathlib.Path`
         Path to save extracted files.
     overwrite : `bool`, optional
-        Whether to overwrite existing extracted files.
+        Whether to overwrite existing extracted files. The default is `False`.
 
     Returns
     -------
-    subdir : str
-        path to the extracted files
+    target : :py:class:`pathlib.Path`
+        Path to the extracted files.
+
     """
     inpath = pathlib.Path(inpath)
 
@@ -984,7 +973,7 @@ def read_landsat_metadata(file):
 
     Parameters
     ----------
-    file : `str` or `pathlib.Path`
+    file : `str` or :py:class:`pathlib.Path`
         Path to a Landsat *_MTL.txt file.
 
     Raises
@@ -997,6 +986,7 @@ def read_landsat_metadata(file):
     metadata : `dict`
         The metadata text file as dictionary, where each line is a (key, value)
         pair.
+
     """
     file = pathlib.Path(file)
     # check if the metadata file exists
@@ -1032,7 +1022,8 @@ def get_radiometric_constants(metadata):
     Parameters
     ----------
     metadata : `dict`
-        The dictionary returned by ``read_landsat_metadata``.
+        The dictionary returned by
+        :py:func:`pysegcnn.core.utils.read_landsat_metadata`.
 
     Returns
     -------
@@ -1040,6 +1031,7 @@ def get_radiometric_constants(metadata):
         Radiometric rescaling factors of the OLI sensor.
     tir : `dict`
         Thermal conversion constants of the TIRS sensor.
+
     """
     # regular expression patterns matching the radiometric rescaling factors
     oli_pattern = re.compile('(RADIANCE|REFLECTANCE)_(MULT|ADD)_BAND_\\d{1,2}')
@@ -1064,41 +1056,41 @@ def landsat_radiometric_calibration(scene, outpath=None, exclude=[],
     Convert the Landsat OLI bands to top of atmosphere radiance or reflectance
     and the TIRS bands to top of atmosphere brightness temperature.
 
-    Conversion is performed following the `equations`_ provided by the USGS.
+    .. important::
+
+        Conversion is performed following the `equations`_ provided by the
+        USGS.
 
     The filename of each band is extended by one of the following suffixes,
     depending on the type of the radiometric calibration:
 
-        'toa_ref': top of atmosphere reflectance
-        'toa_rad': top of atmopshere radiance
-        'toa_brt': top of atmosphere brightness temperature
+        - `'toa_ref'`: top of atmosphere reflectance
+        - `'toa_rad'`: top of atmopshere radiance
+        - `'toa_brt'`: top of atmosphere brightness temperature
 
     Parameters
     ----------
-    scene : `str` or `pathlib.Path`
+    scene : `str` or :py:class:`pathlib.Path`
         Path to a Landsat scene in digital number format.
-    outpath : `str` or `pathlib.Path`, optional
-        Path to save the calibrated images. The default is None, which means
-        saving to ``scene``.
+    outpath : `str` or :py:class:`pathlib.Path`, optional
+        Path to save the calibrated images. The default is `None`, which means
+        saving in the same directory ``scene``.
     exclude : `list` [`str`], optional
-        Bands to exclude from the radiometric calibration. The default is [].
+        Bands to exclude from the radiometric calibration. The default is `[]`.
     radiance : `bool`, optional
-        Whether to calculate top of atmosphere radiance. The default is False,
-        which means calculating top of atmopshere reflectance.
+        Whether to calculate top of atmosphere radiance. The default is `False`
+        , which means calculating top of atmopshere reflectance.
     overwrite : `bool`, optional
-        Whether to overwrite the calibrated images. The default is False.
+        Whether to overwrite the calibrated images. The default is `False`.
     remove_raw : `bool`, optional
-        Whether to remove the raw digitial number images. The default is True.
+        Whether to remove the raw digitial number images. The default is `True`
+        .
 
     Raises
     ------
     FileNotFoundError
         Raised if ``scene`` does not exist.
 
-    Returns
-    -------
-    None.
-
     .. _equations:
         https://www.usgs.gov/land-resources/nli/landsat/using-usgs-landsat-level-1-data-product
 
diff --git a/pysegcnn/main/eval.py b/pysegcnn/main/eval.py
index 9197a88..4dc8849 100644
--- a/pysegcnn/main/eval.py
+++ b/pysegcnn/main/eval.py
@@ -2,10 +2,15 @@
 
 Steps to run a model evaluation:
 
-    (1) Configure the dictionary 'eval_config' in pysegcnn/main/config.py
-    (2) Save pysegcnn/main/config.py
-    (3) In a terminal, navigate to the repository's root directory
-    (4) run "python pysegcnn/main/eval.py"
+    1. Configure the dictionary ``eval_config`` in
+    :py:mod:`pysegcnn.main.config.py`
+    2. Save :py:mod:`pysegcnn.main.config.py`
+    3. In a terminal, navigate to the repository's root directory
+    4. Run
+
+    .. code-block:: bash
+
+        python pysegcnn/main/eval.py
 
 
 License
@@ -42,6 +47,7 @@ if __name__ == '__main__':
     # initialize logging
     log = LogConfig(ec.state_file)
     dictConfig(log_conf(log.log_file))
+    log.init_log('{}: ' + 'Evaluating model: {}.'.format(ec.state_file.name))
 
     # load the model state
     model, _, model_state = Network.load(ec.state_file)
diff --git a/pysegcnn/main/train.py b/pysegcnn/main/train.py
index 6d75060..bdc23b5 100644
--- a/pysegcnn/main/train.py
+++ b/pysegcnn/main/train.py
@@ -2,13 +2,18 @@
 
 Steps to launch a model run:
 
-    (1) Configure the model run in pysegcnn/main/config.py
-        (i) configure the dataset      : dictionary 'dataset_config'
-        (j) configure the dataset split: dictionary 'split_config'
-        (k) configure the model        : dictionary 'model_config'
-    (2) Save pysegcnn/main/config.py
-    (3) In a terminal, navigate to the repository's root directory
-    (4) run "python pysegcnn/main/train.py"
+    1. Configure the model run in :py:mod:`pysegcnn.main.config.py`
+        - configure the dataset: ``dataset_config``
+        - configure the split  : ``split_config``
+        - configure the model  : ``model_config``
+    2. Save :py:mod:`pysegcnn.main.config.py`
+    3. In a terminal, navigate to the repository's root directory
+    4. Run
+
+    .. code-block:: bash
+
+        python pysegcnn/main/train.py
+
 
 License
 -------
diff --git a/pysegcnn/preprocessing/sparcs.py b/pysegcnn/preprocessing/sparcs.py
index 0f12f8a..e298e28 100644
--- a/pysegcnn/preprocessing/sparcs.py
+++ b/pysegcnn/preprocessing/sparcs.py
@@ -2,6 +2,8 @@
 
 After downloading the Sparcs dataset, from the repository's root directory run:
 
+.. code-block:: bash
+
     python pysegcnn/preprocessing/sparcs.py
 
 The above command will print the usage of the script.
-- 
GitLab