Skip to content
Snippets Groups Projects
Commit fcc5bc7f authored by Frisinghelli Daniel's avatar Frisinghelli Daniel
Browse files

Default configuration.

parent be0a7d6e
No related branches found
No related tags found
No related merge requests found
......@@ -50,7 +50,7 @@ CHUNKS = {'time': 365}
# -----------------------------------------------------------------------------
# include day of year as predictor
DOY = False
DOY = True
# use digital elevation model instead of model orography
DEM = True
......@@ -85,7 +85,7 @@ STRATIFY = False
# size of the validation set w.r.t. the training set
# e.g., VALID_SIZE = 0.1 means: 90% of CALIB_PERIOD for training
# 10% of CALIB_PERIOD for validation
VALID_SIZE = 0.1
VALID_SIZE = 0.2
# number of folds for training with KFold cross-validation
CV = 5
......@@ -117,18 +117,18 @@ FILTERS = [32, 64, 128, 256]
# BernoulliGammaLoss (NLL of Bernoulli-Gamma distribution)
# BernoulliWeibullLoss (NLL of Bernoulli-Weibull distribution)
# LOSS = L1Loss()
LOSS = MSELoss()
# LOSS = BernoulliGammaLoss(min_amount=1)
# LOSS = MSELoss()
LOSS = BernoulliGammaLoss(min_amount=1)
# LOSS = BernoulliWeibullLoss(min_amount=1)
# stochastic optimization algorithm
OPTIM = torch.optim.SGD
# OPTIM = torch.optim.Adam
OPTIM_PARAMS = {'lr': 1e-1, # learning rate
'weight_decay': 1e-6 # regularization rate
OPTIM_PARAMS = {'lr': 1e-3, # learning rate
'weight_decay': 0 # regularization rate
}
if OPTIM == torch.optim.SGD:
OPTIM_PARAMS['momentum'] = 0.9
OPTIM_PARAMS['momentum'] = 0.99
# learning rate scheduler
# LR_SCHEDULER = torch.optim.lr_scheduler.MultiStepLR
......@@ -148,14 +148,14 @@ BATCH_SIZE = 16
# network training configuration
TRAIN_CONFIG = {
'checkpoint_state': {},
'epochs': 250,
'epochs': 50,
'save': True,
'save_loaders': False,
'early_stop': True,
'patience': 25,
'patience': 10,
'multi_gpu': True,
'classification': False,
'clip_gradients': True
'clip_gradients': False
}
# whether to overwrite existing models
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment