From df4ba10c7d447e2f00c2b0c2c225ab3df385833e Mon Sep 17 00:00:00 2001 From: "Daniel.Frisinghelli" <daniel.frisinghelli@eurac.edu> Date: Wed, 20 Oct 2021 16:58:16 +0200 Subject: [PATCH] Decreased number of epochs to 20. --- climax/main/lr_range_test.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/climax/main/lr_range_test.py b/climax/main/lr_range_test.py index e789ac9..f15e5c2 100644 --- a/climax/main/lr_range_test.py +++ b/climax/main/lr_range_test.py @@ -4,7 +4,6 @@ # -*- coding: utf-8 -*- # builtins -import sys import time import logging from datetime import timedelta @@ -19,7 +18,6 @@ from torch.utils.data import DataLoader # locals from pysegcnn.core.utils import search_files from pysegcnn.core.trainer import NetworkTrainer, LogConfig -from pysegcnn.core.models import Network from pysegcnn.core.logging import log_conf from climax.core.dataset import ERA5Dataset, NetCDFDataset from climax.core.loss import MSELoss, L1Loss @@ -36,11 +34,10 @@ LOGGER = logging.getLogger(__name__) # network training configuration TRAIN_CONFIG = { 'checkpoint_state': {}, - 'epochs': 75, + 'epochs': 20, 'save': True, 'save_loaders': False, 'early_stop': False, - 'patience': 100, 'multi_gpu': True, 'classification': False, 'clip_gradients': False @@ -51,7 +48,7 @@ MIN_LR = 1e-4 # learning rate scheduler: increase lr each epoch LR_SCHEDULER = torch.optim.lr_scheduler.ExponentialLR -LR_SCHEDULER_PARAMS = {'gamma': 1.15} +LR_SCHEDULER_PARAMS = {'gamma': 1.6} if __name__ == '__main__': -- GitLab