diff --git a/pysegcnn/main/eval.py b/pysegcnn/main/eval.py
index 11a41ecd8b809243b1adeb288691bc2fdf59ec8f..2ecfe6055aa079b876a95158e55d1aae420648a5 100644
--- a/pysegcnn/main/eval.py
+++ b/pysegcnn/main/eval.py
@@ -2,9 +2,8 @@
 
 Steps to run a model evaluation:
 
-    1. Configure the dictionary ``eval_config`` in
-    :py:mod:`pysegcnn.main.config.py`
-    2. Save :py:mod:`pysegcnn.main.config.py`
+    1. Configure the model evaluation in :py:mod:`pysegcnn.main.eval_config.py`
+    2. Save :py:mod:`pysegcnn.main.eval_config.py`
     3. In a terminal, navigate to the repository's root directory
     4. Run
 
@@ -29,7 +28,7 @@ License
 
 # locals
 from pysegcnn.core.trainer import NetworkInference
-from pysegcnn.main.config import eval_config
+from pysegcnn.main.eval_config import eval_config
 
 
 if __name__ == '__main__':
diff --git a/pysegcnn/main/eval_config.py b/pysegcnn/main/eval_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b5f71ba472c3f5e8c649fb7821e6451aa0685f1
--- /dev/null
+++ b/pysegcnn/main/eval_config.py
@@ -0,0 +1,170 @@
+"""The configuration file to train and evaluate a model.
+
+The configuration is handled by the configuration dictionaries.
+
+Modify the values to your needs, but DO NOT modify the keys.
+
+License
+-------
+
+    Copyright (c) 2020 Daniel Frisinghelli
+
+    This source code is licensed under the GNU General Public License v3.
+
+    See the LICENSE file in the repository's root directory.
+
+"""
+
+# !/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# builtins
+import pathlib
+
+# locals
+from pysegcnn.core.utils import search_files
+
+# path to this file
+HERE = pathlib.Path(__file__).resolve().parent
+
+# path to the datasets on the current machine
+DRIVE_PATH = pathlib.Path('C:/Eurac/Projects/CCISNOW/_Datasets/')
+# DRIVE_PATH = pathlib.Path('/mnt/CEPH_PROJECTS/cci_snow/dfrisinghelli/_Datasets/')  # nopep8
+
+# name and paths to the datasets
+DATASETS = {'Sparcs': DRIVE_PATH.joinpath('Sparcs'),
+            'Alcd': DRIVE_PATH.joinpath('Alcd/60m')
+            }
+
+# name of the target dataset
+TRG_DS = 'Alcd'
+
+# spectral bands to use for training
+BANDS = ['red', 'green', 'blue', 'nir', 'swir1', 'swir2']
+
+# tile size of a single sample
+TILE_SIZE = 128
+
+# the target dataset configuration dictionary
+trg_ds = {
+    'dataset_name': 'Sparcs',
+    'root_dir': DATASETS[TRG_DS],
+    'gt_pattern': '(.*)Labels\\.tif',
+    'bands': BANDS,
+    'tile_size': TILE_SIZE,
+    'pad': True,
+    'sort': True,
+    'transforms': [],
+    'merge_labels': {'Cirrus': 'Cloud',
+                     'Not_used': 'No_data'}
+
+}
+
+# the target dataset split configuration dictionary
+trg_ds_split = {
+
+    # 'split_mode': 'tile',
+    'split_mode': 'scene',
+    'k_folds': 1,  # keep k_folds=1 for evaluating models
+    'seed': 0,
+    'shuffle': True,
+    'ttratio': 1,
+    'tvratio': 0.8,
+
+}
+
+# the evaluation configuration
+eval_config = {
+
+    # -------------------------------------------------------------------------
+    # ----------------------------- Evaluation --------------------------------
+    # -------------------------------------------------------------------------
+
+    # these options are only used for evaluating a trained model using
+    # pysegcnn.main.eval.py
+
+    # the model(s) to evaluate
+    'state_files': search_files(HERE, '*.pt'),
+
+    # Evaluate on datasets defined at training time ---------------------------
+
+    # implicit=True,  models are evaluated on the training, validation
+    #                 and test datasets defined at training time
+    # implicit=False, models are evaluated on an explicitly defined dataset
+    #                 'ds'
+    'implicit': True,
+    # 'implicit': False,
+
+    # The options 'domain' and 'test' define on which domain (source, target)
+    # and on which set (training, validation, test) to evaluate the model.
+    # NOTE: If the specified set was not available at training time, an error
+    #       is raised.
+
+    # whether to evaluate the model on the labelled source domain or the
+    # (un)labelled target domain
+    # if domain='trg',  target domain
+    # if domain='src',  source domain
+    # 'domain': 'src',
+    'domain': 'trg',
+
+    # the subset to evaluate the model on
+    # test=False, 0 means evaluating on the validation set
+    # test=True, 1 means evaluating on the test set
+    # test=None means evaluating on the training set
+    # 'test': True,
+    'test': None,
+    # 'test': False,
+
+    # whether to map the model labels from the model source domain to the
+    # defined 'domain'
+    # For models trained via unsupervised domain adaptation, the classes of the
+    # source domain, i.e. the classes the model is trained with, may differ
+    # from the classes of the target domain. Setting 'map_labels'=True, means
+    # mapping the source classes to the target classes. Obviously, this is only
+    # possible if the target classes are a subset of the source classes.
+    'map_labels': False,
+
+    # Evaluate on an explicitly defined dataset -------------------------------
+
+    # OPTIONAL: If 'trg_ds' is specified and 'implicit'=False, the model is not
+    #           evaluated on the datasets defined at training time, but on the
+    #           dataset defined by 'trg_ds'.
+
+    # the dataset to evaluate the model on (optional)
+    'ds': trg_ds,
+
+    # the dataset split to use for 'ds'
+    'ds_split': trg_ds_split,
+
+    # Evaluation options ------------------------------------------------------
+
+    # whether to compute and plot the confusion matrix
+    # output path is: pysegcnn/main/_graphics/
+    # 'cm': True,
+    'cm': False,
+
+    # whether to predict each sample or each scene individually
+    # False: each sample is predicted individually and the scenes are not
+    #        reconstructed
+    # True: each scene is first reconstructed and then the whole scene is
+    #       predicted at once
+    # NOTE: this option works only for datasets split by split_mode="scene"
+    'predict_scene': True,
+
+    # whether to save plots of (input, ground truth, prediction) for each scene
+    # in the train/validation/test dataset to disk, applies if
+    # predict_scene=True
+    # output path is: pysegcnn/main/_scenes/
+    'plot_scenes': True,
+
+    # plot_bands defines the bands used to plot a false color composite of
+    # the input scene: red': bands[0], green': bands[1], blue': bands[2]
+    'plot_bands': ['nir', 'red', 'green'],
+
+    # size of the figures
+    'figsize': (16, 9),
+
+    # degree of constrast stretching for false color composite
+    'alpha': 5
+
+}
diff --git a/pysegcnn/main/train_config.py b/pysegcnn/main/train_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7dc82ab050c6827cfd69625045f41f5a43b5cad
--- /dev/null
+++ b/pysegcnn/main/train_config.py
@@ -0,0 +1,258 @@
+"""The configuration file to train a model on a single domain.
+
+The configuration is handled by the configuration dictionaries.
+
+Modify the values to your needs, but DO NOT modify the keys.
+
+The models can be trained with :py:mod:`pysegcnn.main.train_source.py`.
+
+License
+-------
+
+    Copyright (c) 2020 Daniel Frisinghelli
+
+    This source code is licensed under the GNU General Public License v3.
+
+    See the LICENSE file in the repository's root directory.
+
+"""
+
+# !/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# builtins
+import pathlib
+
+# from pysegcnn.core.transforms import Augment, FlipLr, FlipUd, Noise
+
+# path to this file
+HERE = pathlib.Path(__file__).resolve().parent
+
+# path to the datasets on the current machine
+DRIVE_PATH = pathlib.Path('C:/Eurac/Projects/CCISNOW/_Datasets/')
+# DRIVE_PATH = pathlib.Path('/mnt/CEPH_PROJECTS/cci_snow/dfrisinghelli/_Datasets/')  # nopep8
+
+# name and paths to the datasets
+DATASETS = {'Sparcs': DRIVE_PATH.joinpath('Sparcs'),
+            'Alcd': DRIVE_PATH.joinpath('Alcd/60m')
+            }
+
+# name of the dataset
+DS_NAME = 'Sparcs'
+
+# spectral bands to use for training
+BANDS = ['red', 'green', 'blue', 'nir', 'swir1', 'swir2']
+
+# tile size of a single sample
+TILE_SIZE = 128
+
+# number of folds for cross validation
+K_FOLDS = 1
+
+# the source dataset configuration dictionary
+ds_config = {
+
+    # -------------------------------------------------------------------------
+    # Dataset -----------------------------------------------------------------
+    # -------------------------------------------------------------------------
+
+    # name of the dataset
+    'dataset_name': DS_NAME,
+
+    # path to the dataset
+    'root_dir': DATASETS[DS_NAME],
+
+    # a regex pattern to match the ground truth file naming convention
+    'gt_pattern': '(.*)mask\\.png',
+    # 'gt_pattern': '(.*)class\\.img',
+
+    # define the bands to use to train the segmentation network:
+    # either a list of bands, e.g. ['red', 'green', 'nir', 'swir2', ...]
+    # or [], which corresponds to using all available bands
+    # IMPORTANT: the list of bands should be equal for the source and target
+    #            domains, when using any sort of transfer learning
+    'bands': BANDS,
+
+    # define the size of the network input
+    # if None, the size will default to the size of a scene
+    'tile_size': TILE_SIZE,
+
+    # whether to central pad the scenes with a constant value
+    # if True, padding is used if the scenes are not evenly divisible into
+    # tiles of size (tile_size, tile_size)
+    # 'pad': False,
+    'pad': True,
+
+    # whether to sort the dataset in chronological order, useful for time
+    # series data
+    # 'sort': True,
+    'sort': False,
+
+    # whether to artificially increase the training data size using data
+    # augmentation methods
+
+    # Supported data augmentation methods are:
+    #   - FlipLr: horizontally flip an image
+    #   - FlipUd: vertically flip an image
+    #   - Noise:  add gaussian noise with defined mean and variance to an image
+    #             two modes for adding noise are available
+    #                 - speckle:  image = image + image * noise,
+    #                 - add: image = image + noise
+    #             pixel values = exclude (default=0) are not modified by adding
+    #             noise (i.e., the "no data" pixels added by the padding)
+    # More detail can be found in pytorch.transforms.py
+
+    # A probability can be assigned to each transformation so that it may or
+    # may not be applied, thus
+    #    - set p=1 to a transformation to always apply it
+    #    - set p=0 to a transformation to never apply it
+    #    - set 0 < p < 1 to apply a transformation with randomness
+
+    # transforms is a list of transformations to apply to the original data
+    # if transforms=[], no transformation is applied and only the original
+    # dataset is used
+    'transforms': [],
+
+    # if you provide lists to transforms, each list represents a distinct
+    # transformation of the original dataset
+    # here an example if you want to perform two sets of transformations:
+    #    1: FlipLr + Noise
+    #    2: FlipLr + Noise + FlipUd
+    # the resulting dataset will have 3 times the size of the original dataset,
+    # i.e. the original dataset + the two transformed versions of it
+
+    # 'transforms': [
+    #     Augment([
+    #         FlipLr(p=0.5),
+    #         Noise(mode='speckle', mean=0, var=0.1, p=0.5, exclude=0)
+    #         ]),
+    #     Augment([
+    #         FlipLr(p=0.5),
+    #         Noise(mode='speckle', mean=0, var=0.1, p=0.5, exclude=0),
+    #         FlipUd(p=0.5)
+    #         ]),
+    #     ],
+
+    # The label mapping dictionary, where each (key, value) pair represents a
+    # distinct label mapping. The keys are the labels to be mapped and the
+    # values are the corresponding labels to be mapped to.
+    # NOTE: Passing an empty dictionary means all labels are preserved as is
+    # 'merge_labels': {}
+    'merge_labels': {'Shadow_over_water': 'Shadow',
+                     'Flooded': 'Land'}
+
+    # EXAMPLE: merge label class 'Shadow over Water' to label class 'Shadow'
+    # 'merge_labels': {'Shadow_over_water': 'Shadow'}
+}
+
+# the source dataset split configuration dictionary
+ds_split_config = {
+
+    # -------------------------------------------------------------------------
+    # Dataset split -----------------------------------------------------------
+    # -------------------------------------------------------------------------
+
+    # the mode to split the dataset:
+    #
+    #    - 'tile':   for each scene, the tiles can be distributed among the
+    #                training, validation and test set
+    #
+    #    - 'scene':  for each scene, all the tiles of the scene are included in
+    #                either the training set, the validation set or the test
+    #                set, respectively
+    # 'split_mode': 'tile',
+    'split_mode': 'scene',
+
+    # the number of folds for cross validation
+    #
+    # k_folds = 1 : The model is trained with a single dataset split based on
+    #               'tvratio' and 'ttratio'
+    # k_folds > 1 : The model is trained via cross validation on k_folds splits
+    #               of the dataset
+    'k_folds': K_FOLDS,
+
+    # the random seed for the random number generator
+    # ensures reproducibility of the training, validation and test data split
+    'seed': 0,
+
+    # whether to shuffle the data before splitting
+    'shuffle': True,
+
+    # -------------------------------------------------------------------------
+    # IMPORTANT: these setting only apply if 'kfolds=1'
+    # -------------------------------------------------------------------------
+
+    # (ttratio * 100) % of the dataset will be used for training and
+    # validation
+    # used if 'kfolds=1'
+    'ttratio': 1,
+
+    # (ttratio * tvratio) * 100 % will be used for training
+    # (1 - ttratio * tvratio) * 100 % will be used for validation
+    # used if 'kfolds=1'
+    'tvratio': 0.8,
+
+}
+
+
+# the model configuration dictionary
+model_config = {
+
+    # -------------------------------------------------------------------------
+    # Network -----------------------------------------------------------------
+    # -------------------------------------------------------------------------
+
+    # define the model
+    'model_name': 'Segnet',
+
+    # -------------------------------------------------------------------------
+    # Optimizer ---------------------------------------------------------------
+    # -------------------------------------------------------------------------
+
+    # define an optimizer to update the network weights
+    'optim_name': 'Adam',
+
+    # optimizer keyword arguments
+    'optim_kwargs': {
+        'lr': 0.001,  # the learning rate
+        'weight_decay': 0,  # the weight decay rate
+        'amsgrad': False  # whether to use AMSGrad variant (for Adam)
+        },
+
+    # -------------------------------------------------------------------------
+    # Training configuration --------------------------------------------------
+    # -------------------------------------------------------------------------
+
+    # whether to save the model state to disk
+    # model states are saved in:    pysegcnn/main/_models
+    # model log files are saved in: pysegcnn/main/_logs
+    'save': True,
+
+    # whether to resume training from an existing model checkpoint
+    'checkpoint': False,
+
+    # define the batch size
+    # determines how many samples of the dataset are processed until the
+    # weights of the network are updated (via mini-batch gradient descent)
+    'batch_size': 128,
+
+    # the seed for the random number generator intializing the network weights
+    'torch_seed': 0,
+
+    # whether to early stop training if the accuracy (loss) on the validation
+    # set does not increase (decrease) more than delta over patience epochs
+    # -------------------------------------------------------------------------
+    # The early stopping metric is chosen as:
+    #    - validation set accuracy if mode='max'
+    #    - validation set loss if mode='min'
+    # -------------------------------------------------------------------------
+    'early_stop': True,
+    'mode': 'max',
+    'delta': 0,
+    'patience': 10,
+
+    # define the number of epochs: the number of maximum iterations over
+    # the whole training dataset
+    'epochs': 100,
+
+}
diff --git a/pysegcnn/main/train_source.py b/pysegcnn/main/train_source.py
index a3097aee79b7618951d56f5d52ddf44128fe2eac..55cbaa8dbc4f772533b93533de50361deabc19ed 100644
--- a/pysegcnn/main/train_source.py
+++ b/pysegcnn/main/train_source.py
@@ -2,17 +2,17 @@
 
 Steps to launch a model run:
 
-    1. Configure the model run in :py:mod:`pysegcnn.main.config.py`
-        - configure the dataset   : ``src_ds_config``
-        - configure the split     : ``src_ds_config`
+    1. Configure the model run in :py:mod:`pysegcnn.main.train_config.py`
+        - configure the dataset   : ``ds_config``
+        - configure the split     : ``ds_split_config`
         - configure the model     : ``model_config``
-    2. Save :py:mod:`pysegcnn.main.config.py`
+    2. Save :py:mod:`pysegcnn.main.train_config.py`
     3. In a terminal, navigate to the repository's root directory
     4. Run
 
     .. code-block:: bash
 
-        python pysegcnn/main/train.py
+        python pysegcnn/main/train_source.py
 
 
 License
@@ -36,16 +36,15 @@ from logging.config import dictConfig
 from pysegcnn.core.trainer import (DatasetConfig, SplitConfig, ModelConfig,
                                    StateConfig, LogConfig,
                                    ClassificationNetworkTrainer)
-from pysegcnn.main.config import (src_ds_config, src_split_config,
-                                  model_config)
+from pysegcnn.main.train_config import ds_config, ds_split_config, model_config
 from pysegcnn.core.logging import log_conf
 
 
 if __name__ == '__main__':
 
     # (i) instanciate the source domain configurations
-    src_dc = DatasetConfig(**src_ds_config)   # source domain dataset
-    src_sc = SplitConfig(**src_split_config)  # source domain dataset split
+    src_dc = DatasetConfig(**ds_config)   # source domain dataset
+    src_sc = SplitConfig(**ds_split_config)  # source domain dataset split
 
     # (ii) instanciate the model configuration
     net_mc = ModelConfig(**model_config)
@@ -97,4 +96,5 @@ if __name__ == '__main__':
             )
 
         # (xii) train the model
+        LogConfig.init_log('Fold {} / {}'.format(fold + 1, len(src_folds)))
         training_state = trainer.train()
diff --git a/pysegcnn/main/train_transfer.py b/pysegcnn/main/train_transfer.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac40daeabc05670131bad6695c5f1dd015dc2380
--- /dev/null
+++ b/pysegcnn/main/train_transfer.py
@@ -0,0 +1,139 @@
+"""Main script to train a model using transfer learning.
+
+Steps to launch a model run:
+
+    1. Configure the model run in
+    :py:mod:`pysegcnn.main.train_transfer_config.py`
+        - configure the dataset(s)   : ``src_ds_config`` and ``trg_ds_config``
+        - configure the split(s)     : ``src_ds_config`` and ``trg_ds_config``
+        - configure the model        : ``model_config``
+        - configure transfer learning: ``tlda_config``
+    2. Save :py:mod:`pysegcnn.main.train_transfer_config.py`
+    3. In a terminal, navigate to the repository's root directory
+    4. Run
+
+    .. code-block:: bash
+
+        python pysegcnn/main/train_transfer.py
+
+
+License
+-------
+
+    Copyright (c) 2020 Daniel Frisinghelli
+
+    This source code is licensed under the GNU General Public License v3.
+
+    See the LICENSE file in the repository's root directory.
+
+"""
+
+# !/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# builtins
+from logging.config import dictConfig
+
+# locals
+from pysegcnn.core.trainer import (
+    DatasetConfig, SplitConfig, ModelConfig, TransferLearningConfig,
+    StateConfig, LogConfig, DomainAdaptationTrainer)
+from pysegcnn.main.train_transfer_config import (
+    src_ds_config, src_split_config, trg_ds_config, trg_split_config,
+    model_config, tlda_config)
+from pysegcnn.core.logging import log_conf
+
+
+if __name__ == '__main__':
+
+    # (i) instanciate the source domain configurations
+    src_dc = DatasetConfig(**src_ds_config)   # source domain dataset
+    src_sc = SplitConfig(**src_split_config)  # source domain dataset split
+
+    # (ii) instanciate the target domain configuration
+    trg_dc = DatasetConfig(**trg_ds_config)   # target domain dataset
+    trg_sc = SplitConfig(**trg_split_config)  # target domain dataset split
+
+    # (iii) instanciate the datasets to train the model on
+    src_ds = src_dc.init_dataset()
+    trg_ds = trg_dc.init_dataset()
+
+    # (iv) instanciate the model configuration
+    net_mc = ModelConfig(**model_config)
+
+    # (v) instanciate transfer learning configuration
+    trn_sf = TransferLearningConfig(**tlda_config)
+
+    # (vi) instanciate the model state configuration
+    net_sc = StateConfig()
+
+    # (vii) instanciate the source training, validation and test dataset folds
+    src_folds = src_sc.train_val_test_split(src_ds)
+
+    # (viii) instanciate the target training, validation and test dataset folds
+    trg_folds = trg_sc.train_val_test_split(trg_ds)
+
+    # (ix) iterate over the different folds
+    for fold, (src_fold, trg_fold) in enumerate(zip(src_folds, trg_folds)):
+
+        # (x) the source dataloaders
+        src_tra_dl, src_val_dl, src_tes_dl = src_sc.dataloaders(
+            *src_fold.values(), batch_size=net_mc.batch_size, shuffle=True,
+            drop_last=False)
+
+        # (xi) the target dataloaders
+        trg_tra_dl, trg_val_dl, trg_tes_dl = trg_sc.dataloaders(
+            *trg_fold.values(), batch_size=net_mc.batch_size, shuffle=True,
+            drop_last=False)
+
+        # (xii) instanciate the model state file
+        state_file = net_sc.init_state(src_dc, src_sc, net_mc,
+                                       trg_dc=trg_dc, trg_sc=trg_sc, tc=trn_sf,
+                                       fold=fold)
+
+        # (xiii) instanciate logging configuration
+        net_lc = LogConfig(state_file)
+        dictConfig(log_conf(net_lc.log_file))
+
+        # (xiv) instanciate the model
+        if trn_sf.supervised or trn_sf.uda_from_pretrained:
+            # check whether to load a pretrained model for (un)supervised
+            # transfer learning
+            net, optimizer, checkpoint = trn_sf.transfer_model(
+                trn_sf.pretrained_path,
+                nclasses=len(src_ds).labels,
+                optim_kwargs=net_mc.optim_kwargs,
+                freeze=trn_sf.freeze)
+        else:
+            # initialize model from scratch or from an existing model
+            # checkpoint
+            net, optimizer, checkpoint = net_mc.init_model(
+                len(src_ds.use_bands), len(src_ds.labels), state_file)
+
+        # (xv) instanciate the network trainer class
+        trainer = DomainAdaptationTrainer(
+            model=net,
+            optimizer=optimizer,
+            state_file=net.state_file,
+            src_train_dl=src_tra_dl,
+            src_valid_dl=src_val_dl,
+            src_test_dl=src_tes_dl,
+            epochs=net_mc.epochs,
+            nthreads=net_mc.nthreads,
+            early_stop=net_mc.early_stop,
+            mode=net_mc.mode,
+            delta=net_mc.delta,
+            patience=net_mc.patience,
+            checkpoint_state=checkpoint,
+            save=net_mc.save,
+            supervised=trn_sf.supervised,
+            trg_train_dl=trg_tra_dl,
+            trg_valid_dl=trg_val_dl,
+            trg_test_dl=trg_tes_dl,
+            uda_loss_function=trn_sf.uda_loss_function,
+            uda_lambda=trn_sf.uda_lambda,
+            uda_pos=trn_sf.uda_pos)
+
+        # (xvi) train the model
+        LogConfig.init_log('Fold {} / {}'.format(fold + 1, len(src_folds)))
+        training_state = trainer.train()
diff --git a/pysegcnn/main/config.py b/pysegcnn/main/train_transfer_config.py
similarity index 78%
rename from pysegcnn/main/config.py
rename to pysegcnn/main/train_transfer_config.py
index 583bc7d734095d8ee15f393a2f112a6e0fc061d3..533b7be432ac07c12abd8c130ac7e550d747e2a1 100644
--- a/pysegcnn/main/config.py
+++ b/pysegcnn/main/train_transfer_config.py
@@ -243,7 +243,7 @@ model_config = {
     # optimizer keyword arguments
     'optim_kwargs': {
         'lr': 0.001,  # the learning rate
-        'weight_decay': 0.01,  # the weight decay rate
+        'weight_decay': 0,  # the weight decay rate
         'amsgrad': False  # whether to use AMSGrad variant (for Adam)
         },
 
@@ -358,101 +358,4 @@ tlda_config = {
     # whether to freeze the pretrained model weights
     'freeze': True,
 
-    }
-
-# the evaluation configuration
-eval_config = {
-
-    # -------------------------------------------------------------------------
-    # ----------------------------- Evaluation --------------------------------
-    # -------------------------------------------------------------------------
-
-    # these options are only used for evaluating a trained model using
-    # pysegcnn.main.eval.py
-
-    # the model(s) to evaluate
-    'state_files': [''],
-
-    # Evaluate on datasets defined at training time ---------------------------
-
-    # implicit=True,  models are evaluated on the training, validation
-    #                 and test datasets defined at training time
-    # implicit=False, models are evaluated on an explicitly defined dataset
-    #                 'ds'
-    'implicit': True,
-    # 'implicit': False,
-
-    # The options 'domain' and 'test' define on which domain (source, target)
-    # and on which set (training, validation, test) to evaluate the model.
-    # NOTE: If the specified set was not available at training time, an error
-    #       is raised.
-
-    # whether to evaluate the model on the labelled source domain or the
-    # (un)labelled target domain
-    # if domain='trg',  target domain
-    # if domain='src',  source domain
-    # 'domain': 'src',
-    'domain': 'trg',
-
-    # the subset to evaluate the model on
-    # test=False, 0 means evaluating on the validation set
-    # test=True, 1 means evaluating on the test set
-    # test=None means evaluating on the training set
-    # 'test': True,
-    'test': None,
-    # 'test': False,
-
-    # whether to map the model labels from the model source domain to the
-    # defined 'domain'
-    # For models trained via unsupervised domain adaptation, the classes of the
-    # source domain, i.e. the classes the model is trained with, may differ
-    # from the classes of the target domain. Setting 'map_labels'=True, means
-    # mapping the source classes to the target classes. Obviously, this is only
-    # possible if the target classes are a subset of the source classes.
-    'map_labels': False,
-
-    # Evaluate on an explicitly defined dataset -------------------------------
-
-    # OPTIONAL: If 'ds' is specified and 'implicit'=False, the model is not
-    #           evaluated on the datasets defined at training time, but on the
-    #           dataset defined by 'ds'.
-
-    # the dataset to evaluate the model on (optional)
-    'ds': trg_ds_config,
-
-    # the dataset split to use for 'ds'
-    'ds_split': trg_split_config,
-
-    # Evaluation options ------------------------------------------------------
-
-    # whether to compute and plot the confusion matrix
-    # output path is: pysegcnn/main/_graphics/
-    # 'cm': True,
-    'cm': False,
-
-    # whether to predict each sample or each scene individually
-    # False: each sample is predicted individually and the scenes are not
-    #        reconstructed
-    # True: each scene is first reconstructed and then the whole scene is
-    #       predicted at once
-    # NOTE: this option works only for datasets split by split_mode="scene" or
-    #       split_mode="date"
-    'predict_scene': True,
-
-    # whether to save plots of (input, ground truth, prediction) for each scene
-    # in the train/validation/test dataset to disk, applies if
-    # predict_scene=True
-    # output path is: pysegcnn/main/_scenes/
-    'plot_scenes': True,
-
-    # plot_bands defines the bands used to plot a false color composite of
-    # the input scene: red': bands[0], green': bands[1], blue': bands[2]
-    'plot_bands': ['nir', 'red', 'green'],
-
-    # size of the figures
-    'figsize': (16, 9),
-
-    # degree of constrast stretching for false color composite
-    'alpha': 5
-
 }