Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
C
Climax
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
earth_observation_public
Climax
Commits
f655da60
Commit
f655da60
authored
3 years ago
by
Frisinghelli Daniel
Browse files
Options
Downloads
Patches
Plain Diff
Setup for hyperparameter grid search.
parent
970832cd
No related branches found
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
Scripts/downscale.sh
+0
-63
0 additions, 63 deletions
Scripts/downscale.sh
Scripts/grid_search.sh
+86
-0
86 additions, 0 deletions
Scripts/grid_search.sh
climax/main/config.py
+35
-15
35 additions, 15 deletions
climax/main/config.py
with
121 additions
and
78 deletions
Scripts/downscale.sh
deleted
100644 → 0
+
0
−
63
View file @
970832cd
#!/usr/bin/env bash
# activate conda environment
conda activate climax
# move to project repository
cd
~/git/climax
# loss functions
# LOSS=(L1Loss BernoulliGammaLoss BernoulliWeibullLoss MSELoss)
LOSS
=(
L1Loss BernoulliGammaLoss MSELoss
)
# wet day thresholds to test
# WET_DAY_THRESHOLDS=(0 0.5 1 2 3 5)
# weight decay values to test
LAMBDA
=(
0 0.000001 0.00001 0.0001 0.001 0.01 1
)
# learning rate values to test
LR
=(
0.1 0.01 0.005 0.001 0.0005
)
# iterate over loss functions
for
loss
in
${
LOSS
[@]
}
;
do
# change loss function in configuration
if
[
"
$loss
"
=
"L1Loss"
]
||
[
"
$loss
"
=
"MSELoss"
]
;
then
sed
-i
"s/LOSS
\s
*=.*/LOSS=
$loss
()/"
./climax/main/config.py
else
sed
-i
"s/LOSS
\s
*=.*/LOSS=
$loss
(min_amount=1)/"
./climax/main/config.py
fi
# iterate over weight decay values
for
lambda
in
${
LAMBDA
[@]
}
;
do
# change weight regularization in configuration
sed
-i
"s/'weight_decay':.*/'weight_decay':
$lambda
/"
./climax/main/config.py
# run downscaling
# python climax/main/downscale.py
python climax/main/downscale_train.py
python climax/main/downscale_infer.py
done
# iterate over learning rate values
for
lr
in
${
LR
[@]
}
;
do
# change weight regularization in configuration
sed
-i
"s/'lr':.*/'lr':
$lr
/"
./climax/main/config.py
# run downscaling
# python climax/main/downscale.py
python climax/main/downscale_train.py
python climax/main/downscale_infer.py
done
# for w in ${WET_DAY_THRESHOLDS[@]}; do
# change wet day threshold in configuration
# sed -i "s/min_amount\s*=.*/min_amount=$w)/" ./climax/main/config.py
# run downscaling
# python climax/main/downscale.py
# python climax/main/downscale_train.py
# python climax/main/downscale_infer.py
# done
done
This diff is collapsed.
Click to expand it.
Scripts/grid_search.sh
0 → 100644
+
86
−
0
View file @
f655da60
#!/usr/bin/env bash
# activate conda environment
conda activate climax
# move to project repository
cd
~/git/climax
# predictands
# PREDICTAND=(pr tasmin tasmax)
PREDICTAND
=(
pr
)
# optimizers
# OPTIM=(torch.optim.Adam torch.optim.SGD)
OPTIM
=(
torch.optim.Adam
)
# learning rate scheduler
LRSCHEDULER
=(
None torch.optim.lr_scheduler.CyclicLR
)
# wet day thresholds to test
# WET_DAY_THRESHOLDS=(0 0.5 1 2 3 5)
# weight decay values to test
LAMBDA
=(
0 0.000001 0.00001 0.0001 0.001 0.01 1
)
# iterate over predictands
for
predictand
in
${
PREDICTAND
[@]
}
;
do
# change predictand in configuration
sed
-i
"s/PREDICTAND
\s
*=.*/PREDICTAND='
$predictand
'/"
./climax/main/config.py
# define available loss functions for current predictand
if
[
"
$predictand
"
=
"pr"
]
;
then
LOSS
=(
L1Loss BernoulliGammaLoss MSELoss
)
else
LOSS
=(
L1Loss MSELoss
)
fi
# iterate over loss functions
for
loss
in
${
LOSS
[@]
}
;
do
# change loss function in configuration
if
[
"
$loss
"
=
"L1Loss"
]
||
[
"
$loss
"
=
"MSELoss"
]
;
then
sed
-i
"s/LOSS
\s
*=.*/LOSS=
$loss
()/"
./climax/main/config.py
else
sed
-i
"s/LOSS
\s
*=.*/LOSS=
$loss
(min_amount=1)/"
./climax/main/config.py
fi
# iterate over the optimizer
for
optim
in
${
OPTIM
[@]
}
;
do
# change optimizer in configuration
sed
-i
"s/OPTIM
\s
*=.*/OPTIM=
$optim
/"
./climax/main/config.py
# SGD with fixed and cyclic learning rate policy
if
[
"
$optim
"
=
"torch.optim.SGD"
]
;
then
for
scheduler
in
${
LRSCHEDULER
[@]
}
;
do
# change learning rate scheduler in configuration
sed
-i
"s/LR_SCHEDULER
\s
*=.*/LR_SCHEDULER=
$scheduler
/"
./climax/main/config.py
# iterate over weight decay values
for
lambda
in
${
LAMBDA
[@]
}
;
do
# change weight regularization in configuration
sed
-i
"s/'weight_decay':.*/'weight_decay':
$lambda
/"
./climax/main/config.py
# run downscaling
# python climax/main/downscale.py
python climax/main/downscale_train.py
python climax/main/downscale_infer.py
done
done
else
# iterate over weight decay values
for
lambda
in
${
LAMBDA
[@]
}
;
do
# change weight regularization in configuration
sed
-i
"s/'weight_decay':.*/'weight_decay':
$lambda
/"
./climax/main/config.py
# run downscaling
# python climax/main/downscale.py
python climax/main/downscale_train.py
python climax/main/downscale_infer.py
done
fi
done
done
done
This diff is collapsed.
Click to expand it.
climax/main/config.py
+
35
−
15
View file @
f655da60
...
...
@@ -123,29 +123,49 @@ LOSS = MSELoss()
# LOSS = BernoulliGammaLoss(min_amount=1)
# LOSS = BernoulliWeibullLoss(min_amount=1)
# batch size: number of time steps processed by the net in each iteration
BATCH_SIZE
=
16
# base learning rate: constant or CyclicLR policy
BASE_LR
=
1e-4
# maximum learning rate for CyclicLR policy
MAX_LR
=
1e-3
# stochastic optimization algorithm
# OPTIM = torch.optim.SGD
OPTIM
=
torch
.
optim
.
Adam
# batch size: number of time steps processed by the net in each iteration
BATCH_SIZE
=
16
# maximum learning rate determined from learning rate range test
if
PREDICTAND
==
'
tasmin
'
:
if
isinstance
(
LOSS
,
L1Loss
):
MAX_LR
=
0.001
if
OPTIM
is
torch
.
optim
.
Adam
else
0.004
if
isinstance
(
LOSS
,
MSELoss
):
MAX_LR
=
0.001
if
OPTIM
is
torch
.
optim
.
Adam
else
0.002
if
PREDICTAND
==
'
tasmax
'
:
if
isinstance
(
LOSS
,
L1Loss
):
MAX_LR
=
0.001
if
isinstance
(
LOSS
,
MSELoss
):
MAX_LR
=
0.001
if
OPTIM
is
torch
.
optim
.
Adam
else
0.004
if
PREDICTAND
==
'
pr
'
:
if
isinstance
(
LOSS
,
L1Loss
):
MAX_LR
=
0.001
if
isinstance
(
LOSS
,
MSELoss
):
MAX_LR
=
0.0004
if
isinstance
(
LOSS
,
BernoulliGammaLoss
):
MAX_LR
=
0.0005
if
OPTIM
is
torch
.
optim
.
Adam
else
0.001
# base learning rate: MAX_LR / 4 (Smith L. (2017))
BASE_LR
=
MAX_LR
/
4
# optimization parameters
OPTIM_PARAMS
=
{
'
lr
'
:
BASE_LR
,
'
weight_decay
'
:
0
}
if
OPTIM
==
torch
.
optim
.
SGD
:
if
OPTIM
is
torch
.
optim
.
SGD
:
OPTIM_PARAMS
[
'
momentum
'
]
=
0.99
# SGD with momentum
# learning rate scheduler: CyclicLR policy
LR_SCHEDULER
=
None
# LR_SCHEDULER = torch.optim.lr_scheduler.CyclicLR
LR_SCHEDULER_PARAMS
=
{
'
base_lr
'
:
BASE_LR
,
'
max_lr
'
:
MAX_LR
,
'
mode
'
:
'
triangular
'
,
'
step_size_up
'
:
400
,
'
cycle_momentum
'
:
False
,
'
base_momentum
'
:
0.9
,
'
max_momentum
'
:
0.99
}
LR_SCHEDULER_PARAMS
=
{
'
base_lr
'
:
BASE_LR
,
'
max_lr
'
:
MAX_LR
,
'
mode
'
:
'
triangular
'
,
'
cycle_momentum
'
:
True
if
OPTIM
is
torch
.
optim
.
SGD
else
False
,
'
base_momentum
'
:
0.9
,
'
max_momentum
'
:
0.99
,
'
step_size_up
'
:
400
}
# whether to randomly shuffle time steps or to conserve time series for model
# training
...
...
@@ -157,7 +177,7 @@ NORM = True
# network training configuration
TRAIN_CONFIG
=
{
'
checkpoint_state
'
:
{},
'
epochs
'
:
5
0
,
'
epochs
'
:
10
0
,
'
save
'
:
True
,
'
save_loaders
'
:
False
,
'
early_stop
'
:
True
,
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment