Skip to content
Snippets Groups Projects
Commit df4ba10c authored by Frisinghelli Daniel's avatar Frisinghelli Daniel
Browse files

Decreased number of epochs to 20.

parent 7b6fa4e7
No related branches found
No related tags found
No related merge requests found
......@@ -4,7 +4,6 @@
# -*- coding: utf-8 -*-
# builtins
import sys
import time
import logging
from datetime import timedelta
......@@ -19,7 +18,6 @@ from torch.utils.data import DataLoader
# locals
from pysegcnn.core.utils import search_files
from pysegcnn.core.trainer import NetworkTrainer, LogConfig
from pysegcnn.core.models import Network
from pysegcnn.core.logging import log_conf
from climax.core.dataset import ERA5Dataset, NetCDFDataset
from climax.core.loss import MSELoss, L1Loss
......@@ -36,11 +34,10 @@ LOGGER = logging.getLogger(__name__)
# network training configuration
TRAIN_CONFIG = {
'checkpoint_state': {},
'epochs': 75,
'epochs': 20,
'save': True,
'save_loaders': False,
'early_stop': False,
'patience': 100,
'multi_gpu': True,
'classification': False,
'clip_gradients': False
......@@ -51,7 +48,7 @@ MIN_LR = 1e-4
# learning rate scheduler: increase lr each epoch
LR_SCHEDULER = torch.optim.lr_scheduler.ExponentialLR
LR_SCHEDULER_PARAMS = {'gamma': 1.15}
LR_SCHEDULER_PARAMS = {'gamma': 1.6}
if __name__ == '__main__':
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment