Skip to content
Snippets Groups Projects
Commit 68635fab authored by Frisinghelli Daniel's avatar Frisinghelli Daniel
Browse files

Refactor.

parent 458843ac
No related branches found
No related tags found
No related merge requests found
%% Cell type:markdown id:032a6c39-ca0c-44d7-bf26-b4ea0ed87739 tags: %% Cell type:markdown id:032a6c39-ca0c-44d7-bf26-b4ea0ed87739 tags:
## ERA5-predictors ## ERA5-predictors
%% Cell type:code id:1daf69a6-f61b-484d-9b07-101da88f4b28 tags: %% Cell type:code id:1daf69a6-f61b-484d-9b07-101da88f4b28 tags:
``` python ``` python
# builtins # builtins
import pathlib import pathlib
# externals # externals
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import xarray as xr import xarray as xr
from sklearn.linear_model import TweedieRegressor from sklearn.model_selection import train_test_split
# locals # locals
from pysegcnn.core.utils import search_files
from climax.core.dataset import ERA5Dataset from climax.core.dataset import ERA5Dataset
from climax.core.constants import ERA5_VARIABLES from climax.core.constants import ERA5_VARIABLES
from climax.core.utils import search_files from climax.core.utils import search_files
from climax.main.config import CALIB_PERIOD
from climax.main.io import OBS_PATH
``` ```
%% Cell type:code id:e64afc53-097d-403c-b6ee-e6865b26a245 tags: %% Cell type:code id:e64afc53-097d-403c-b6ee-e6865b26a245 tags:
``` python ``` python
# path to ERA5 reanalysis data # path to ERA5 reanalysis data
ERA5_PATH = pathlib.Path('/mnt/CEPH_PROJECTS/FACT_CLIMAX/REANALYSIS/ERA5') ERA5_PATH = pathlib.Path('/mnt/CEPH_PROJECTS/FACT_CLIMAX/REANALYSIS/ERA5')
``` ```
%% Cell type:code id:d2a127b5-faa0-4cf5-8d7c-bfb747d38b51 tags: %% Cell type:code id:d2a127b5-faa0-4cf5-8d7c-bfb747d38b51 tags:
``` python ``` python
# list of valid predictor variable names # list of valid predictor variable names
ERA5_VARIABLES ERA5_VARIABLES
``` ```
%% Cell type:code id:10c6c7e1-6ff2-4aa3-ace6-3adb7edf2583 tags: %% Cell type:code id:10c6c7e1-6ff2-4aa3-ace6-3adb7edf2583 tags:
``` python ``` python
# define the predictor variables you want to use # define the predictor variables you want to use
ERA5_PREDICTORS = ['geopotential', 'temperature', 'mean_sea_level_pressure'] # use geopotential, temperature and pressure ERA5_PREDICTORS = ['geopotential', 'temperature', 'mean_sea_level_pressure'] # use geopotential, temperature and pressure
# you can change this list as you wish, e.g.: # you can change this list as you wish, e.g.:
# ERA5_PREDICTORS = ['geopotential', 'temperature'] # use only geopotential and temperature # ERA5_PREDICTORS = ['geopotential', 'temperature'] # use only geopotential and temperature
# ERA5_PREDICTORS = ERA5_VARIABLES # use all ERA5 variables as predictors # ERA5_PREDICTORS = ERA5_VARIABLES # use all ERA5 variables as predictors
# this checks if the variable names are correct # this checks if the variable names are correct
assert all([p in ERA5_VARIABLES for p in ERA5_PREDICTORS]) assert all([p in ERA5_VARIABLES for p in ERA5_PREDICTORS])
``` ```
%% Cell type:markdown id:6f500399-4d61-41e8-9013-eaac69b3e49a tags: %% Cell type:markdown id:6f500399-4d61-41e8-9013-eaac69b3e49a tags:
### Use the climax package to load ERA5 predictors ### Use the climax package to load ERA5 predictors
%% Cell type:code id:57223fb2-efec-46c9-a9f7-ca71524627ef tags: %% Cell type:code id:57223fb2-efec-46c9-a9f7-ca71524627ef tags:
``` python ``` python
# define which pressure levels you want to use: currently only 500 and 850 are available # define which pressure levels you want to use: currently only 500 and 850 are available
PLEVELS = [500, 850] PLEVELS = [500, 850]
``` ```
%% Cell type:code id:f5c75041-a40d-45e5-b940-2847fee80926 tags: %% Cell type:code id:f5c75041-a40d-45e5-b940-2847fee80926 tags:
``` python ``` python
# create the xarray.Dataset of the specified predictor variables # create the xarray.Dataset of the specified predictor variables
predictors = ERA5Dataset(ERA5_PATH, ERA5_PREDICTORS, plevels=PLEVELS) predictors = ERA5Dataset(ERA5_PATH, ERA5_PREDICTORS, plevels=PLEVELS)
predictors = predictors.merge(chunks=-1) predictors = predictors.merge(chunks=-1)
``` ```
%% Cell type:code id:ea5c1c43-fbbf-4db1-926b-1102e8079348 tags: %% Cell type:code id:ea5c1c43-fbbf-4db1-926b-1102e8079348 tags:
``` python ``` python
# check out the xarray.Dataset: you will see all the variables you specified # check out the xarray.Dataset: you will see all the variables you specified
predictors predictors
``` ```
%% Cell type:markdown id:14668e55-3b47-44ea-b3f7-596b0e62eec6 tags: %% Cell type:markdown id:14668e55-3b47-44ea-b3f7-596b0e62eec6 tags:
## DEM-predictors ## DEM-predictors
%% Cell type:code id:3c89e1ea-3a2d-45aa-bc0d-6123239d58b3 tags: %% Cell type:code id:3c89e1ea-3a2d-45aa-bc0d-6123239d58b3 tags:
``` python ``` python
DEM_PATH = pathlib.Path('/mnt/CEPH_PROJECTS/FACT_CLIMAX/DEM/') DEM_PATH = pathlib.Path('/mnt/CEPH_PROJECTS/FACT_CLIMAX/DEM/')
``` ```
%% Cell type:code id:2c39941c-2edc-428c-bd8e-70dc43115238 tags: %% Cell type:code id:2c39941c-2edc-428c-bd8e-70dc43115238 tags:
``` python ``` python
# digital elevation model: Copernicus EU-Dem v1.1 # digital elevation model: Copernicus EU-Dem v1.1
dem = search_files(DEM_PATH, '^eu_dem_v11_stt.nc$').pop() dem = search_files(DEM_PATH, '^eu_dem_v11_stt.nc$').pop()
# read elevation and compute slope and aspect # read elevation and compute slope and aspect
dem = ERA5Dataset.dem_features(dem, {'y': predictors.y, 'x': predictors.x}, add_coord={'time': predictors.time}) dem = ERA5Dataset.dem_features(dem, {'y': predictors.y, 'x': predictors.x}, add_coord={'time': predictors.time})
``` ```
%% Cell type:code id:f3a04b66-72bb-4b16-a527-080c543cbeab tags: %% Cell type:code id:f3a04b66-72bb-4b16-a527-080c543cbeab tags:
``` python ``` python
dem dem
``` ```
%% Cell type:markdown id:d55ca9b1-46a9-462a-ba42-fb7a38c823fa tags: %% Cell type:markdown id:d55ca9b1-46a9-462a-ba42-fb7a38c823fa tags:
## Merge predictors ## Merge predictors
%% Cell type:code id:b96ad03c-6065-4609-986a-66a5cb799526 tags: %% Cell type:code id:b96ad03c-6065-4609-986a-66a5cb799526 tags:
``` python ``` python
xr.merge([predictors, dem]) Era5_ds = xr.merge([predictors, dem])
```
%% Cell type:markdown id:dd584313-8913-45b5-a379-2724d42bc99f tags:
## Read observations
%% Cell type:code id:e982d6f9-ae6b-435e-917d-44118c6a09b1 tags:
``` python
PREDICTAND = 'pr'
```
%% Cell type:code id:fe45a13d-d95d-4260-a3bc-0e6fd53db732 tags:
``` python
# read in-situ gridded observations
Obs_ds = search_files(OBS_PATH.joinpath(PREDICTAND), 'OBS_{}(.*).nc$'.format(PREDICTAND)).pop()
Obs_ds = xr.open_dataset(Obs_ds)
```
%% Cell type:markdown id:caaf0557-74a9-4c30-9697-bb58d68553aa tags:
## Group predictors by season
%% Cell type:code id:c98896f5-7953-4bb3-af97-fd98740514d1 tags:
``` python
# split into training and validation set
train, valid = train_test_split(CALIB_PERIOD, shuffle=False, test_size=0.1)
```
%% Cell type:code id:1a7295fd-3c57-4ca4-9c47-23a179db4829 tags:
``` python
# training and validation dataset
Era5_train, Obs_train = Era5_ds.sel(time=train), Obs_ds.sel(time=train)
Era5_valid, Obs_valid = Era5_ds.sel(time=valid), Obs_ds.sel(time=valid)
```
%% Cell type:code id:82a88472-7837-4fa6-a591-1f1952ea442b tags:
``` python
# group predictors and predictand by season
season_indices_train = Era5_train.groupby('time.season').groups
season_indices_valid = Era5_valid.groupby('time.season').groups
# group training and validation set by season
Era_season_train = {k: Era5_train.isel(time=v) for k, v in
season_indices_train.items()}
Obs_season_train = {k: Obs_train.isel(time=v) for k, v in
season_indices_train.items()}
Era_season_valid = {k: Era5_valid.isel(time=v) for k, v in
season_indices_valid.items()}
Obs_season_valid = {k: Obs_valid.isel(time=v) for k, v in
season_indices_valid.items()}
```
%% Cell type:code id:dde5735a-4633-452f-aada-22da3a15696e tags:
``` python
Era_season_train = {k: Era5_train.isel(time=v) for k, v in
season_indices_train.items()}
``` ```
......
%% Cell type:markdown id:f15afea1-9ea4-4201-bdd7-32ae377db6a9 tags: %% Cell type:markdown id:f15afea1-9ea4-4201-bdd7-32ae377db6a9 tags:
# Evaluate ERA-5 downscaling: minimum and maximum temperatures # Evaluate ERA-5 downscaling: minimum and maximum temperatures
%% Cell type:markdown id:7d2c4c86-18e4-44c4-bdf7-0e8249614749 tags: %% Cell type:markdown id:7d2c4c86-18e4-44c4-bdf7-0e8249614749 tags:
We used **1981-1991 as training** period and **1991-2010 as reference** period. The results shown in this notebook are based on the model predictions on the reference period. We used **1981-1991 as training** period and **1991-2010 as reference** period. The results shown in this notebook are based on the model predictions on the reference period.
%% Cell type:markdown id:3b06b8f0-090e-4da6-87bf-be19c0dddd7d tags: %% Cell type:markdown id:3b06b8f0-090e-4da6-87bf-be19c0dddd7d tags:
**Predictors on pressure levels (500, 850)**: **Predictors on pressure levels (500, 850)**:
- Geopotential (z) - Geopotential (z)
- Temperature (t) - Temperature (t)
- Zonal wind (u) - Zonal wind (u)
- Meridional wind (v) - Meridional wind (v)
- Specific humidity (q) - Specific humidity (q)
**Predictors on surface**: **Predictors on surface**:
- Mean sea level pressure (msl) - Mean sea level pressure (msl)
**Auxiliary predictors**: **Auxiliary predictors**:
- Elevation from Copernicus EU-DEM v1.1 (dem) - Elevation from Copernicus EU-DEM v1.1 (dem)
- Day of the year (doy) - Day of the year (doy)
%% Cell type:markdown id:4186c89c-b55b-4559-a818-8b712baaf44e tags: %% Cell type:markdown id:4186c89c-b55b-4559-a818-8b712baaf44e tags:
Define the predictand and the model to evaluate: Define the predictand and the model to evaluate:
%% Cell type:code id:bb24b6ed-2d0a-44e0-b9a9-abdcb2a8294d tags: %% Cell type:code id:bb24b6ed-2d0a-44e0-b9a9-abdcb2a8294d tags:
``` python ``` python
# define the model parameters # define the model parameters
PREDICTAND = 'tasmax' PREDICTAND = 'tasmax'
MODEL = 'USegNet' MODEL = 'USegNet'
PPREDICTORS = 'ztuvq' PPREDICTORS = 'ztuvq'
PLEVELS = ['500', '850'] PLEVELS = ['500', '850']
SPREDICTORS = 'p' SPREDICTORS = 'p'
DEM = 'dem' DEM = 'dem'
DEM_FEATURES = '' DEM_FEATURES = ''
DOY = 'doy' DOY = 'doy'
``` ```
%% Cell type:code id:686a7c1d-e71d-4954-8d79-26b9a84f648e tags: %% Cell type:code id:686a7c1d-e71d-4954-8d79-26b9a84f648e tags:
``` python ``` python
# mapping from predictands to variable names # mapping from predictands to variable names
NAMES = {'tasmin': 'minimum temperature', 'tasmax': 'maximum temperature', 'pr': 'precipitation'} NAMES = {'tasmin': 'minimum temperature', 'tasmax': 'maximum temperature', 'pr': 'precipitation'}
``` ```
%% Cell type:markdown id:4d5d12c5-50fd-4c5c-9240-c3df78e49b44 tags: %% Cell type:markdown id:4d5d12c5-50fd-4c5c-9240-c3df78e49b44 tags:
### Imports ### Imports
%% Cell type:code id:1afb0fab-5d2a-4875-9032-29b99c6dec89 tags: %% Cell type:code id:1afb0fab-5d2a-4875-9032-29b99c6dec89 tags:
``` python ``` python
# builtins # builtins
import datetime import datetime
import warnings import warnings
import calendar import calendar
# externals # externals
import xarray as xr import xarray as xr
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import scipy.stats as stats import scipy.stats as stats
from IPython.display import Image from IPython.display import Image
from sklearn.metrics import r2_score from sklearn.metrics import r2_score
# locals # locals
from climax.main.io import ERA5_PATH, OBS_PATH, TARGET_PATH from climax.main.io import ERA5_PATH, OBS_PATH, TARGET_PATH
from pysegcnn.core.utils import search_files from pysegcnn.core.utils import search_files
``` ```
%% Cell type:markdown id:2c2d0b1d-9630-4cfe-ae9d-35b05b94a1a5 tags: %% Cell type:markdown id:2c2d0b1d-9630-4cfe-ae9d-35b05b94a1a5 tags:
### Model architecture ### Model architecture
%% Cell type:code id:f98e1c9f-0581-43ea-a569-fd05fdaf36c1 tags: %% Cell type:code id:f98e1c9f-0581-43ea-a569-fd05fdaf36c1 tags:
``` python ``` python
Image("./Figures/architecture.png", width=900, height=400) Image("./Figures/architecture.png", width=900, height=400)
``` ```
%% Cell type:markdown id:1db1f750-7d74-48ea-a385-94595a1da724 tags: %% Cell type:markdown id:1db1f750-7d74-48ea-a385-94595a1da724 tags:
### Loss function ### Loss function
%% Cell type:markdown id:eb4ac3ea-213a-4391-8c88-909f6288de63 tags: %% Cell type:markdown id:eb4ac3ea-213a-4391-8c88-909f6288de63 tags:
For temperature, the network is optimizing the mean-squared error (negative log-likelihood of normal distribution): For temperature, the network is optimizing the mean-squared error (negative log-likelihood of normal distribution):
%% Cell type:markdown id:83bd9d8f-91b4-45f5-b58e-3551a942661a tags: %% Cell type:markdown id:83bd9d8f-91b4-45f5-b58e-3551a942661a tags:
$$\mathcal{J}(y_{pred} \mid y_{true}) = \left(y_{pred} - y_{true}\right)^2$$ $$\mathcal{J}(y_{pred} \mid y_{true}) = \left(y_{pred} - y_{true}\right)^2$$
%% Cell type:markdown id:f47caa41-9380-4c02-8785-4febcf2cb2d0 tags: %% Cell type:markdown id:f47caa41-9380-4c02-8785-4febcf2cb2d0 tags:
### Load datasets ### Load datasets
%% Cell type:code id:e8cb1011-32dd-4e32-9692-4a4aa009869f tags: %% Cell type:code id:e8cb1011-32dd-4e32-9692-4a4aa009869f tags:
``` python ``` python
# construct file pattern to match # construct file pattern to match
PATTERN = '_'.join([MODEL, PREDICTAND, PPREDICTORS, *PLEVELS]) PATTERN = '_'.join([MODEL, PREDICTAND, PPREDICTORS, *PLEVELS])
PATTERN = '_'.join([PATTERN, SPREDICTORS]) if SPREDICTORS else PATTERN PATTERN = '_'.join([PATTERN, SPREDICTORS]) if SPREDICTORS else PATTERN
PATTERN = '_'.join([PATTERN, DEM]) if DEM else PATTERN PATTERN = '_'.join([PATTERN, DEM]) if DEM else PATTERN
PATTERN = '_'.join([PATTERN, DEM_FEATURES]) if DEM_FEATURES else PATTERN PATTERN = '_'.join([PATTERN, DEM_FEATURES]) if DEM_FEATURES else PATTERN
PATTERN = '_'.join([PATTERN, DOY]) if DOY else PATTERN PATTERN = '_'.join([PATTERN, DOY]) if DOY else PATTERN
``` ```
%% Cell type:code id:020dfe33-ce3c-467f-ad0a-295cc338b1a9 tags: %% Cell type:code id:020dfe33-ce3c-467f-ad0a-295cc338b1a9 tags:
``` python ``` python
# model predictions and observations NetCDF # model predictions and observations NetCDF
y_pred = xr.open_dataset(search_files(TARGET_PATH.joinpath(PREDICTAND), '.'.join([PATTERN, 'nc$'])).pop()) y_pred = xr.open_dataset(search_files(TARGET_PATH.joinpath(PREDICTAND), '.'.join([PATTERN, 'nc$'])).pop())
if PREDICTAND == 'tas': if PREDICTAND == 'tas':
# read both tasmax and tasmin # read both tasmax and tasmin
tasmax = xr.open_dataset(search_files(OBS_PATH.joinpath('tasmax'), '.nc$').pop()) tasmax = xr.open_dataset(search_files(OBS_PATH.joinpath('tasmax'), '.nc$').pop())
tasmin = xr.open_dataset(search_files(OBS_PATH.joinpath('tasmin'), '.nc$').pop()) tasmin = xr.open_dataset(search_files(OBS_PATH.joinpath('tasmin'), '.nc$').pop())
y_true = xr.merge([tasmax, tasmin]) y_true = xr.merge([tasmax, tasmin])
else: else:
y_true = xr.open_dataset(search_files(OBS_PATH.joinpath(PREDICTAND), '.nc$').pop()) y_true = xr.open_dataset(search_files(OBS_PATH.joinpath(PREDICTAND), '.nc$').pop())
``` ```
%% Cell type:code id:1dc2a386-d63b-4c6a-8e63-00365927559d tags: %% Cell type:code id:1dc2a386-d63b-4c6a-8e63-00365927559d tags:
``` python ``` python
# subset to time period covered by predictions # subset to time period covered by predictions
y_true = y_true.sel(time=y_pred.time) y_true = y_true.sel(time=y_pred.time)
``` ```
%% Cell type:markdown id:06dcb26f-f8e0-4365-9279-871bc35492f9 tags: %% Cell type:markdown id:06dcb26f-f8e0-4365-9279-871bc35492f9 tags:
### Load ERA-5 reference dataset ### Load ERA-5 reference dataset
%% Cell type:code id:8f20564f-3f1b-4da6-b625-f2d0c6392249 tags: %% Cell type:code id:8f20564f-3f1b-4da6-b625-f2d0c6392249 tags:
``` python ``` python
# search ERA-5 reference dataset # search ERA-5 reference dataset
y_refe = xr.open_dataset(search_files(ERA5_PATH.joinpath('ERA5', '2m_{}_temperature'.format(PREDICTAND.lstrip('tas'))), '.nc$').pop()) y_refe = xr.open_dataset(search_files(ERA5_PATH.joinpath('ERA5', '2m_{}_temperature'.format(PREDICTAND.lstrip('tas'))), '.nc$').pop())
``` ```
%% Cell type:code id:f7b2fe84-131d-485a-a6e7-3c47135447e1 tags: %% Cell type:code id:f7b2fe84-131d-485a-a6e7-3c47135447e1 tags:
``` python ``` python
# convert to °C # convert to °C
y_refe = y_refe - 273.15 y_refe = y_refe - 273.15
``` ```
%% Cell type:code id:6be4c268-df97-4bd0-948a-cc1943a3c0fb tags: %% Cell type:code id:6be4c268-df97-4bd0-948a-cc1943a3c0fb tags:
``` python ``` python
# subset to time period covered by predictions # subset to time period covered by predictions
y_refe = y_refe.sel(time=y_pred.time).drop_vars('lambert_azimuthal_equal_area') y_refe = y_refe.sel(time=y_pred.time).drop_vars('lambert_azimuthal_equal_area')
y_refe = y_refe.rename({'t2m': PREDICTAND}) y_refe = y_refe.rename({'t2m': PREDICTAND})
``` ```
%% Cell type:code id:9c7efab4-0cd2-4f6d-afbd-5e284d2bda9d tags: %% Cell type:code id:9c7efab4-0cd2-4f6d-afbd-5e284d2bda9d tags:
``` python ``` python
# align datasets and mask missing values in model predictions # align datasets and mask missing values in model predictions
y_true, y_pred, y_refe = xr.align(y_true, y_pred, y_refe, join='override') y_true, y_pred, y_refe = xr.align(y_true, y_pred, y_refe, join='override')
y_pred = y_pred.where(~np.isnan(y_true), other=np.nan) y_pred = y_pred.where(~np.isnan(y_true), other=np.nan)
y_refe = y_refe.where(~np.isnan(y_true), other=np.nan) y_refe = y_refe.where(~np.isnan(y_true), other=np.nan)
``` ```
%% Cell type:markdown id:ddebdf9f-862c-461e-aa57-cd344d54eee9 tags: %% Cell type:markdown id:ddebdf9f-862c-461e-aa57-cd344d54eee9 tags:
## Model validation: temperature ## Model validation: temperature
%% Cell type:markdown id:ab15d557-c7ea-40c0-9977-a3d410fea784 tags: %% Cell type:markdown id:ab15d557-c7ea-40c0-9977-a3d410fea784 tags:
### Coefficient of determination ### Coefficient of determination
%% Cell type:code id:bb8223e0-c5cb-477c-8c31-3dfb94b20ce1 tags: %% Cell type:code id:bb8223e0-c5cb-477c-8c31-3dfb94b20ce1 tags:
``` python ``` python
# group timeseries by month and calculate mean over time and space # group timeseries by month and calculate mean over time and space
y_pred_ac = y_pred.groupby('time.month').mean(dim=('time', 'y', 'x')) y_pred_ac = y_pred.groupby('time.month').mean(dim=('time', 'y', 'x'))
y_true_ac = y_true.groupby('time.month').mean(dim=('time', 'y', 'x')) y_true_ac = y_true.groupby('time.month').mean(dim=('time', 'y', 'x'))
``` ```
%% Cell type:code id:619d5dc9-4d36-43a3-b23c-a4ea51229c78 tags: %% Cell type:code id:619d5dc9-4d36-43a3-b23c-a4ea51229c78 tags:
``` python ``` python
# get predicted and observed values over entire time series and grid points # get predicted and observed values over entire time series and grid points
y_pred_values = y_pred[PREDICTAND].groupby('time.month').mean(dim='time').values.flatten() y_pred_values = y_pred[PREDICTAND].groupby('time.month').mean(dim='time').values.flatten()
y_true_values = y_true[PREDICTAND].groupby('time.month').mean(dim='time').values.flatten() y_true_values = y_true[PREDICTAND].groupby('time.month').mean(dim='time').values.flatten()
``` ```
%% Cell type:code id:49dff6ce-a629-460b-a43b-d1a0ef447351 tags: %% Cell type:code id:49dff6ce-a629-460b-a43b-d1a0ef447351 tags:
``` python ``` python
# apply mask of valid pixels # apply mask of valid pixels
mask = (~np.isnan(y_pred_values) & ~np.isnan(y_true_values)) mask = (~np.isnan(y_pred_values) & ~np.isnan(y_true_values))
y_pred_values = y_pred_values[mask] y_pred_values = y_pred_values[mask]
y_true_values = y_true_values[mask] y_true_values = y_true_values[mask]
``` ```
%% Cell type:code id:e9e46770-4176-4257-8ea2-7050d3325e98 tags: %% Cell type:code id:e9e46770-4176-4257-8ea2-7050d3325e98 tags:
``` python ``` python
# calculate coefficient of determination # calculate coefficient of determination
r2 = r2_score(y_true_values, y_pred_values) r2 = r2_score(y_true_values, y_pred_values)
r2 r2
``` ```
%% Cell type:code id:703ab604-5193-4032-92eb-80f2cff9fc2c tags: %% Cell type:code id:703ab604-5193-4032-92eb-80f2cff9fc2c tags:
``` python ``` python
# scatter plot of observations vs. predictions # scatter plot of observations vs. predictions
fig, ax = plt.subplots(1, 1, figsize=(10, 10)) fig, ax = plt.subplots(1, 1, figsize=(10, 10))
# plot only a subset of data: otherwise plot is overloaded ... # plot only a subset of data: otherwise plot is overloaded ...
# subset = np.random.choice(np.arange(0, len(y_pred_values)), size=int(1e7), replace=False) # subset = np.random.choice(np.arange(0, len(y_pred_values)), size=int(1e7), replace=False)
# ax.plot(y_true_values[subset], y_pred_values[subset], 'o', alpha=.5, markeredgecolor='grey', markerfacecolor='none', markersize=3); # ax.plot(y_true_values[subset], y_pred_values[subset], 'o', alpha=.5, markeredgecolor='grey', markerfacecolor='none', markersize=3);
# plot entire dataset # plot entire dataset
ax.plot(y_true_values, y_pred_values, 'o', alpha=.5, markeredgecolor='grey', markerfacecolor='none', markersize=3); ax.plot(y_true_values, y_pred_values, 'o', alpha=.5, markeredgecolor='grey', markerfacecolor='none', markersize=3);
# plot 1:1 mapping line # plot 1:1 mapping line
interval = np.arange(-15, 45, 5) interval = np.arange(-15, 45, 5)
#interval = np.arange(-30, 35, 5) #interval = np.arange(-30, 35, 5)
ax.plot(interval, interval, color='k', lw=2, ls='--') ax.plot(interval, interval, color='k', lw=2, ls='--')
# add coefficient of determination: calculated on entire dataset! # add coefficient of determination: calculated on entire dataset!
ax.text(interval[-1] - 0.5, interval[0] + 0.5, s='Coefficient of determination R$^2$ = {:.2f}'.format(r2), ha='right', fontsize=18) ax.text(interval[-1] - 0.5, interval[0] + 0.5, s='Coefficient of determination R$^2$ = {:.2f}'.format(r2), ha='right', fontsize=18)
# format axes # format axes
ax.set_ylim(interval[0], interval[-1]) ax.set_ylim(interval[0], interval[-1])
ax.set_xlim(interval[0], interval[-1]) ax.set_xlim(interval[0], interval[-1])
ax.set_xticks(interval) ax.set_xticks(interval)
ax.set_xticklabels(interval, fontsize=16) ax.set_xticklabels(interval, fontsize=16)
ax.set_yticks(interval) ax.set_yticks(interval)
ax.set_yticklabels(interval, fontsize=16) ax.set_yticklabels(interval, fontsize=16)
ax.set_xlabel('Observed', fontsize=18) ax.set_xlabel('Observed', fontsize=18)
ax.set_ylabel('Predicted', fontsize=18) ax.set_ylabel('Predicted', fontsize=18)
ax.set_title('Monthly mean {} (°C)'.format(NAMES[PREDICTAND]), fontsize=20, pad=10); ax.set_title('Monthly mean {} (°C)'.format(NAMES[PREDICTAND]), fontsize=20, pad=10);
# add axis for annual cycle # add axis for annual cycle
axins = inset_axes(ax, width="30%", height="40%", loc=2, borderpad=1) axins = inset_axes(ax, width="30%", height="40%", loc=2, borderpad=1)
axins.plot(y_pred_ac[PREDICTAND].values, ls='--', color='k', label='Predicted') axins.plot(y_pred_ac[PREDICTAND].values, ls='--', color='k', label='Predicted')
axins.plot(y_true_ac[PREDICTAND].values, ls='-', color='k', label='Observed') axins.plot(y_true_ac[PREDICTAND].values, ls='-', color='k', label='Observed')
axins.legend(frameon=False, fontsize=12, loc='lower center'); axins.legend(frameon=False, fontsize=12, loc='lower center');
axins.yaxis.tick_right() axins.yaxis.tick_right()
#axins.set_yticks(np.arange(-10, 11, 2)) #axins.set_yticks(np.arange(-10, 11, 2))
#axins.set_yticklabels(np.arange(-10, 11, 2), fontsize=12) #axins.set_yticklabels(np.arange(-10, 11, 2), fontsize=12)
axins.set_yticks(np.arange(-0, 22, 2)) axins.set_yticks(np.arange(-0, 22, 2))
axins.set_yticklabels(np.arange(0, 22, 2), fontsize=12) axins.set_yticklabels(np.arange(0, 22, 2), fontsize=12)
axins.set_xticks(np.arange(0, 12)) axins.set_xticks(np.arange(0, 12))
axins.set_xticklabels([calendar.month_name[i + 1] for i in np.arange(0, 12)], rotation=90, fontsize=12) axins.set_xticklabels([calendar.month_name[i + 1] for i in np.arange(0, 12)], rotation=90, fontsize=12)
# axins.set_ylabel('{} / (°C)'.format(NAMES[PREDICTAND].capitalize()), fontsize=14) # axins.set_ylabel('{} / (°C)'.format(NAMES[PREDICTAND].capitalize()), fontsize=14)
# axins.set_xlabel('Month', fontsize=14); # axins.set_xlabel('Month', fontsize=14);
# save figure # save figure
fig.savefig('../Notebooks/Figures/{}_r2.png'.format(PREDICTAND), dpi=300, bbox_inches='tight') fig.savefig('../Notebooks/Figures/{}_r2.png'.format(PREDICTAND), dpi=300, bbox_inches='tight')
``` ```
%% Cell type:markdown id:5e8be24e-8ca2-4582-98c0-b56c6db289d2 tags: %% Cell type:markdown id:5e8be24e-8ca2-4582-98c0-b56c6db289d2 tags:
### Bias ### Bias
%% Cell type:markdown id:a4f7177c-7d09-401f-957b-0e493b9ef5d0 tags: %% Cell type:markdown id:a4f7177c-7d09-401f-957b-0e493b9ef5d0 tags:
Calculate yearly average bias over entire reference period: Calculate yearly average bias over entire reference period:
%% Cell type:code id:746bf95f-a78b-4da8-a063-1fa48e3c5da8 tags: %% Cell type:code id:746bf95f-a78b-4da8-a063-1fa48e3c5da8 tags:
``` python ``` python
# yearly average bias over reference period # yearly average bias over reference period
y_pred_yearly_avg = y_pred.groupby('time.year').mean(dim='time') y_pred_yearly_avg = y_pred.groupby('time.year').mean(dim='time')
y_refe_yearly_avg = y_refe.groupby('time.year').mean(dim='time') y_refe_yearly_avg = y_refe.groupby('time.year').mean(dim='time')
y_true_yearly_avg = y_true.groupby('time.year').mean(dim='time') y_true_yearly_avg = y_true.groupby('time.year').mean(dim='time')
bias_yearly_avg = y_pred_yearly_avg - y_true_yearly_avg bias_yearly_avg = y_pred_yearly_avg - y_true_yearly_avg
bias_yearly_avg_ref = y_refe_yearly_avg - y_true_yearly_avg bias_yearly_avg_ref = y_refe_yearly_avg - y_true_yearly_avg
for var in bias_yearly_avg: for var in bias_yearly_avg:
print('(Model) Yearly average bias of {}: {:.2f}°C'.format(var, bias_yearly_avg[var].mean().item())) print('(Model) Yearly average bias of {}: {:.2f}°C'.format(var, bias_yearly_avg[var].mean().item()))
print('(ERA-5) Yearly average bias of {}: {:.2f}°C'.format(var, bias_yearly_avg_ref.mean().to_array().values.item())) print('(ERA-5) Yearly average bias of {}: {:.2f}°C'.format(var, bias_yearly_avg_ref.mean().to_array().values.item()))
``` ```
%% Cell type:code id:fa14fadb-0f76-4962-80cb-e69cd7f5fac5 tags: %% Cell type:code id:fa14fadb-0f76-4962-80cb-e69cd7f5fac5 tags:
``` python ``` python
# mean absolute error over reference period # mean absolute error over reference period
mae_avg = np.abs(y_pred_yearly_avg - y_true_yearly_avg).mean() mae_avg = np.abs(y_pred_yearly_avg - y_true_yearly_avg).mean()
mae_avg_ref = np.abs(y_refe_yearly_avg - y_true_yearly_avg).mean() mae_avg_ref = np.abs(y_refe_yearly_avg - y_true_yearly_avg).mean()
for var in mae_avg: for var in mae_avg:
print('(Model) Yearly average MAE of {}: {:.2f}°C'.format(var, mae_avg[var].item())) print('(Model) Yearly average MAE of {}: {:.2f}°C'.format(var, mae_avg[var].item()))
print('(ERA-5) Yearly average MAE of {}: {:.2f}°C'.format(var, mae_avg_ref.mean().to_array().values.item())) print('(ERA-5) Yearly average MAE of {}: {:.2f}°C'.format(var, mae_avg_ref.mean().to_array().values.item()))
``` ```
%% Cell type:code id:d2c50119-3cee-43c5-838b-e84639af55db tags: %% Cell type:code id:d2c50119-3cee-43c5-838b-e84639af55db tags:
``` python ``` python
# root mean squared error over reference period # root mean squared error over reference period
rmse_avg = ((y_pred_yearly_avg - y_true_yearly_avg) ** 2).mean() rmse_avg = ((y_pred_yearly_avg - y_true_yearly_avg) ** 2).mean()
rmse_avg_ref = ((y_refe_yearly_avg - y_true_yearly_avg) **2).mean() rmse_avg_ref = ((y_refe_yearly_avg - y_true_yearly_avg) **2).mean()
for var in rmse_avg: for var in rmse_avg:
print('(Model) Yearly average RMSE of {}: {:.2f}°C'.format(var, rmse_avg[var].item())) print('(Model) Yearly average RMSE of {}: {:.2f}°C'.format(var, rmse_avg[var].item()))
print('(ERA-5) Yearly average RMSE of {}: {:.2f}°C'.format(var, rmse_avg_ref.mean().to_array().values.item())) print('(ERA-5) Yearly average RMSE of {}: {:.2f}°C'.format(var, rmse_avg_ref.mean().to_array().values.item()))
``` ```
%% Cell type:code id:ed54a50e-41c2-4a7f-9839-05357996f0c4 tags: %% Cell type:code id:ed54a50e-41c2-4a7f-9839-05357996f0c4 tags:
``` python ``` python
# Pearson's correlation coefficient over reference period # Pearson's correlation coefficient over reference period
for var in y_pred_yearly_avg: for var in y_pred_yearly_avg:
correlations = [] correlations = []
for year in y_pred_yearly_avg.year: for year in y_pred_yearly_avg.year:
y_p = y_pred_yearly_avg[var].sel(year=year).values y_p = y_pred_yearly_avg[var].sel(year=year).values
y_t = y_true_yearly_avg[var].sel(year=year).values y_t = y_true_yearly_avg[var].sel(year=year).values
r, _ = stats.pearsonr(y_p[~np.isnan(y_p)], y_t[~np.isnan(y_t)]) r, _ = stats.pearsonr(y_p[~np.isnan(y_p)], y_t[~np.isnan(y_t)])
correlations.append(r) correlations.append(r)
print('Yearly average Pearson correlation coefficient for {}: {:.2f}'.format(var, np.asarray(r).mean())) print('Yearly average Pearson correlation coefficient for {}: {:.2f}'.format(var, np.asarray(r).mean()))
``` ```
%% Cell type:code id:760f86ce-9e04-4938-b24f-d2819fbf622e tags: %% Cell type:code id:760f86ce-9e04-4938-b24f-d2819fbf622e tags:
``` python ``` python
# plot average of observation, prediction, and bias # plot average of observation, prediction, and bias
vmin, vmax = (-15, 15) if PREDICTAND == 'tasmin' else (0, 25) vmin, vmax = (-15, 15) if PREDICTAND == 'tasmin' else (0, 25)
fig, axes = plt.subplots(len(y_pred_yearly_avg.data_vars), 3, figsize=(24, len(y_pred_yearly_avg.data_vars) * 6), fig, axes = plt.subplots(len(y_pred_yearly_avg.data_vars), 3, figsize=(24, len(y_pred_yearly_avg.data_vars) * 6),
sharex=True, sharey=True) sharex=True, sharey=True)
axes = axes.reshape(len(y_pred_yearly_avg.data_vars), -1) axes = axes.reshape(len(y_pred_yearly_avg.data_vars), -1)
for i, var in enumerate(y_pred_yearly_avg): for i, var in enumerate(y_pred_yearly_avg):
for ds, ax in zip([y_true_yearly_avg, y_pred_yearly_avg, bias_yearly_avg], axes[i, ...]): for ds, ax in zip([y_true_yearly_avg, y_pred_yearly_avg, bias_yearly_avg], axes[i, ...]):
if ds is bias_yearly_avg: if ds is bias_yearly_avg:
ds = ds[var].mean(dim='year') ds = ds[var].mean(dim='year')
im2 = ax.imshow(ds.values, origin='lower', cmap='RdBu_r', vmin=-2, vmax=2) im2 = ax.imshow(ds.values, origin='lower', cmap='RdBu_r', vmin=-2, vmax=2)
ax.text(x=ds.shape[0] - 2, y=2, s='Average: {:.2f}°C'.format(ds.mean().item()), fontsize=14, ha='right') ax.text(x=ds.shape[0] - 2, y=2, s='Average: {:.2f}°C'.format(ds.mean().item()), fontsize=14, ha='right')
else: else:
im1 = ax.imshow(ds[var].mean(dim='year').values, origin='lower', cmap='RdYlBu_r', vmin=vmin, vmax=vmax) im1 = ax.imshow(ds[var].mean(dim='year').values, origin='lower', cmap='RdYlBu_r', vmin=vmin, vmax=vmax)
# set titles # set titles
axes[0, 0].set_title('Observed', fontsize=16, pad=10); axes[0, 0].set_title('Observed', fontsize=16, pad=10);
axes[0, 1].set_title('Predicted', fontsize=16, pad=10); axes[0, 1].set_title('Predicted', fontsize=16, pad=10);
axes[0, 2].set_title('Bias', fontsize=16, pad=10); axes[0, 2].set_title('Bias', fontsize=16, pad=10);
# adjust axes # adjust axes
for ax in axes.flat: for ax in axes.flat:
ax.axes.get_xaxis().set_ticklabels([]) ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_xaxis().set_ticks([]) ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticklabels([]) ax.axes.get_yaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticks([]) ax.axes.get_yaxis().set_ticks([])
ax.axes.axis('tight') ax.axes.axis('tight')
ax.set_xlabel('') ax.set_xlabel('')
ax.set_ylabel('') ax.set_ylabel('')
# adjust figure # adjust figure
fig.suptitle('Average {}: 1991 - 2010'.format(NAMES[PREDICTAND]), fontsize=20); fig.suptitle('Average {}: 1991 - 2010'.format(NAMES[PREDICTAND]), fontsize=20);
fig.subplots_adjust(hspace=0, wspace=0, top=0.85) fig.subplots_adjust(hspace=0, wspace=0, top=0.85)c
# add colorbar for bias # add colorbar for bias
axes = axes.flatten() axes = axes.flatten()
cbar_ax_bias = fig.add_axes([axes[-1].get_position().x1 + 0.01, axes[-1].get_position().y0, cbar_ax_bias = fig.add_axes([axes[-1].get_position().x1 + 0.01, axes[-1].get_position().y0,
0.01, axes[-1].get_position().y1 - axes[-1].get_position().y0]) 0.01, axes[-1].get_position().y1 - axes[-1].get_position().y0])
cbar_bias = fig.colorbar(im2, cax=cbar_ax_bias) cbar_bias = fig.colorbar(im2, cax=cbar_ax_bias)
cbar_bias.set_label(label='Bias / (°C)', fontsize=16) cbar_bias.set_label(label='Bias / (°C)', fontsize=16)
cbar_bias.ax.tick_params(labelsize=14) cbar_bias.ax.tick_params(labelsize=14)
# add colorbar for predictand # add colorbar for predictand
cbar_ax_predictand = fig.add_axes([axes[0].get_position().x0, axes[0].get_position().y0 - 0.1, cbar_ax_predictand = fig.add_axes([axes[0].get_position().x0, axes[0].get_position().y0 - 0.1,
axes[-1].get_position().x0 - axes[0].get_position().x0, axes[-1].get_position().x0 - axes[0].get_position().x0,
0.05]) 0.05])
cbar_predictand = fig.colorbar(im1, cax=cbar_ax_predictand, orientation='horizontal') cbar_predictand = fig.colorbar(im1, cax=cbar_ax_predictand, orientation='horizontal')
cbar_predictand.set_label(label='{} / (°C)'.format(NAMES[PREDICTAND].capitalize()), fontsize=16) cbar_predictand.set_label(label='{} / (°C)'.format(NAMES[PREDICTAND].capitalize()), fontsize=16)
cbar_predictand.ax.tick_params(labelsize=14) cbar_predictand.ax.tick_params(labelsize=14)
# add metrics: MAE and RMSE # add metrics: MAE and RMSE
axes[1].text(x=ds.shape[0] - 2, y=2, s='MAE = {:.2f}°C'.format(mae_avg[PREDICTAND].item()), fontsize=14, ha='right') axes[1].text(x=ds.shape[0] - 2, y=2, s='MAE = {:.2f}°C'.format(mae_avg[PREDICTAND].item()), fontsize=14, ha='right')
axes[1].text(x=ds.shape[0] - 2, y=12, s='RMSE = {:.2f}°C$^2$'.format(rmse_avg[PREDICTAND].item()), fontsize=14, ha='right') axes[1].text(x=ds.shape[0] - 2, y=12, s='RMSE = {:.2f}°C$^2$'.format(rmse_avg[PREDICTAND].item()), fontsize=14, ha='right')
# save figure # save figure
fig.savefig('../Notebooks/Figures/{}_average_bias.png'.format(PREDICTAND), dpi=300, bbox_inches='tight') fig.savefig('../Notebooks/Figures/{}_average_bias.png'.format(PREDICTAND), dpi=300, bbox_inches='tight')
``` ```
%% Cell type:markdown id:aa4ef730-5a9d-40dc-a318-2f43a4cf1cd2 tags: %% Cell type:markdown id:aa4ef730-5a9d-40dc-a318-2f43a4cf1cd2 tags:
### Seasonal bias ### Seasonal bias
%% Cell type:markdown id:eda455a2-e8ee-4644-bb85-b0cf76acd11a tags: %% Cell type:markdown id:eda455a2-e8ee-4644-bb85-b0cf76acd11a tags:
Calculate seasonal bias: Calculate seasonal bias:
%% Cell type:code id:8b781300-2639-437d-9875-f57306715c80 tags: %% Cell type:code id:8b781300-2639-437d-9875-f57306715c80 tags:
``` python ``` python
# group data by season: (DJF, MAM, JJA, SON) # group data by season: (DJF, MAM, JJA, SON)
y_true_snl = y_true.groupby('time.season').mean(dim='time') y_true_snl = y_true.groupby('time.season').mean(dim='time')
y_pred_snl = y_pred.groupby('time.season').mean(dim='time') y_pred_snl = y_pred.groupby('time.season').mean(dim='time')
y_refe_snl = y_refe.groupby('time.season').mean(dim='time') y_refe_snl = y_refe.groupby('time.season').mean(dim='time')
bias_snl = y_pred_snl - y_true_snl bias_snl = y_pred_snl - y_true_snl
bias_snl_ref = y_refe_snl - y_true_snl bias_snl_ref = y_refe_snl - y_true_snl
``` ```
%% Cell type:code id:b148ec7e-f832-40f2-b287-c0351e7e1aad tags: %% Cell type:code id:b148ec7e-f832-40f2-b287-c0351e7e1aad tags:
``` python ``` python
# print average bias per season: ERA-5 # print average bias per season: ERA-5
for var in bias_snl_ref.data_vars: for var in bias_snl_ref.data_vars:
for season in bias_snl_ref[PREDICTAND].season: for season in bias_snl_ref[PREDICTAND].season:
print('(ERA-5) Average bias of mean {} for season {}: {:.1f}°C'.format(var, season.values.item(), bias_snl_ref[var].sel(season=season).mean().item())) print('(ERA-5) Average bias of mean {} for season {}: {:.1f}°C'.format(var, season.values.item(), bias_snl_ref[var].sel(season=season).mean().item()))
``` ```
%% Cell type:code id:e54daae8-ea32-431f-8a34-f92d95e7627e tags: %% Cell type:code id:e54daae8-ea32-431f-8a34-f92d95e7627e tags:
``` python ``` python
# print average bias per season: model # print average bias per season: model
for var in bias_snl.data_vars: for var in bias_snl.data_vars:
for season in bias_snl[PREDICTAND].season: for season in bias_snl[PREDICTAND].season:
print('(Model) Average bias of mean {} for season {}: {:.1f}°C'.format(var, season.values.item(), bias_snl[var].sel(season=season).mean().item())) print('(Model) Average bias of mean {} for season {}: {:.1f}°C'.format(var, season.values.item(), bias_snl[var].sel(season=season).mean().item()))
``` ```
%% Cell type:markdown id:c4232ae9-a557-4d61-8ab3-d7eda6201f98 tags: %% Cell type:markdown id:c4232ae9-a557-4d61-8ab3-d7eda6201f98 tags:
Plot seasonal differences, taken from the [xarray documentation](xarray.pydata.org/en/stable/examples/monthly-means.html). Plot seasonal differences, taken from the [xarray documentation](xarray.pydata.org/en/stable/examples/monthly-means.html).
%% Cell type:code id:b39a6cc0-614c-452d-bb85-bc10e5179948 tags: %% Cell type:code id:b39a6cc0-614c-452d-bb85-bc10e5179948 tags:
``` python ``` python
# plot seasonal differences # plot seasonal differences
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(24, 12), sharex=True, sharey=True) fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(24, 12), sharex=True, sharey=True)
axes = axes.flatten() axes = axes.flatten()
# plot annual average bias # plot annual average bias
ds = bias_yearly_avg[PREDICTAND].mean(dim='year') ds = bias_yearly_avg[PREDICTAND].mean(dim='year')
im = axes[0].imshow(ds.values, origin='lower', cmap='RdBu_r', vmin=-2, vmax=2) im = axes[0].imshow(ds.values, origin='lower', cmap='RdBu_r', vmin=-2, vmax=2)
axes[0].set_title('Annual', fontsize=16); axes[0].set_title('Annual', fontsize=16);
axes[0].text(x=ds.shape[0] - 2, y=2, s='Average: {:.2f}°C'.format(ds.mean().item()), fontsize=14, ha='right') axes[0].text(x=ds.shape[0] - 2, y=2, s='Average: {:.2f}°C'.format(ds.mean().item()), fontsize=14, ha='right')
# plot seasonal average bias # plot seasonal average bias
for ax, season in zip(axes[1:], bias_snl.season): for ax, season in zip(axes[1:], bias_snl.season):
ds = bias_snl[PREDICTAND].sel(season=season) ds = bias_snl[PREDICTAND].sel(season=season)
ax.imshow(ds.values, origin='lower', cmap='RdBu_r', vmin=-2, vmax=2) ax.imshow(ds.values, origin='lower', cmap='RdBu_r', vmin=-2, vmax=2)
ax.set_title(season.item(), fontsize=16); ax.set_title(season.item(), fontsize=16);
ax.text(x=ds.shape[0] - 2, y=2, s='Average: {:.2f}°C'.format(ds.mean().item()), fontsize=14, ha='right') ax.text(x=ds.shape[0] - 2, y=2, s='Average: {:.2f}°C'.format(ds.mean().item()), fontsize=14, ha='right')
# adjust axes # adjust axes
for ax in axes.flat: for ax in axes.flat:
ax.axes.get_xaxis().set_ticklabels([]) ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_xaxis().set_ticks([]) ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticklabels([]) ax.axes.get_yaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticks([]) ax.axes.get_yaxis().set_ticks([])
ax.axes.axis('tight') ax.axes.axis('tight')
ax.set_xlabel('') ax.set_xlabel('')
ax.set_ylabel('') ax.set_ylabel('')
# turn off last axis # turn off last axis
axes[-1].set_visible(False) axes[-1].set_visible(False)
# adjust figure # adjust figure
fig.suptitle('Average bias of {}: 1991 - 2010'.format(NAMES[PREDICTAND]), fontsize=20); fig.suptitle('Average bias of {}: 1991 - 2010'.format(NAMES[PREDICTAND]), fontsize=20);
fig.subplots_adjust(hspace=0.1, wspace=0, top=0.925) fig.subplots_adjust(hspace=0.1, wspace=0, top=0.925)
# add colorbar # add colorbar
cbar_ax_predictand = fig.add_axes([axes[-1].get_position().x1 + 0.01, axes[-1].get_position().y0, cbar_ax_predictand = fig.add_axes([axes[-1].get_position().x1 + 0.01, axes[-1].get_position().y0,
0.01, axes[0].get_position().y1 - axes[-1].get_position().y0]) 0.01, axes[0].get_position().y1 - axes[-1].get_position().y0])
cbar_predictand = fig.colorbar(im, cax=cbar_ax_predictand) cbar_predictand = fig.colorbar(im, cax=cbar_ax_predictand)
cbar_predictand.set_label(label='Bias / (°C)', fontsize=16) cbar_predictand.set_label(label='Bias / (°C)', fontsize=16)
cbar_predictand.ax.tick_params(labelsize=14) cbar_predictand.ax.tick_params(labelsize=14)
# save figure # save figure
fig.savefig('../Notebooks/Figures/{}_average_bias_seasonal.png'.format(PREDICTAND), dpi=300, bbox_inches='tight') fig.savefig('../Notebooks/Figures/{}_average_bias_seasonal.png'.format(PREDICTAND), dpi=300, bbox_inches='tight')
``` ```
%% Cell type:markdown id:41600269-2f8c-4717-8f74-b3dfaef60359 tags: %% Cell type:markdown id:41600269-2f8c-4717-8f74-b3dfaef60359 tags:
Calculate the mean annual cycle: Calculate the mean annual cycle:
%% Cell type:code id:c9f27c01-4dfc-4d16-8d29-00e69b7794cd tags: %% Cell type:code id:c9f27c01-4dfc-4d16-8d29-00e69b7794cd tags:
``` python ``` python
# group timeseries by month and calculate mean over time and space # group timeseries by month and calculate mean over time and space
y_pred_ac = y_pred.groupby('time.month').mean(dim=('time', 'y', 'x')) y_pred_ac = y_pred.groupby('time.month').mean(dim=('time', 'y', 'x'))
y_true_ac = y_true.groupby('time.month').mean(dim=('time', 'y', 'x')) y_true_ac = y_true.groupby('time.month').mean(dim=('time', 'y', 'x'))
``` ```
%% Cell type:code id:bcc63e73-2636-49c1-a66b-241eb5407e2e tags: %% Cell type:code id:bcc63e73-2636-49c1-a66b-241eb5407e2e tags:
``` python ``` python
# plot mean annual cycle # plot mean annual cycle
fig, ax = plt.subplots(1, 1, figsize=(10, 10)) fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.plot(y_pred_ac[PREDICTAND].values, ls='--', color='k', label='Predicted') ax.plot(y_pred_ac[PREDICTAND].values, ls='--', color='k', label='Predicted')
ax.plot(y_true_ac[PREDICTAND].values, ls='-', color='k', label='Observed') ax.plot(y_true_ac[PREDICTAND].values, ls='-', color='k', label='Observed')
ax.legend(frameon=False, fontsize=14); ax.legend(frameon=False, fontsize=14);
ax.set_yticks(np.arange(np.floor(y_true_ac[PREDICTAND].min().item()), np.ceil(y_true_ac[PREDICTAND].max().item()) + 1, 1)) ax.set_yticks(np.arange(np.floor(y_true_ac[PREDICTAND].min().item()), np.ceil(y_true_ac[PREDICTAND].max().item()) + 1, 1))
ax.set_yticklabels(np.arange(np.floor(y_true_ac[PREDICTAND].min().item()), np.ceil(y_true_ac[PREDICTAND].max().item()) + 1, 1), fontsize=12) ax.set_yticklabels(np.arange(np.floor(y_true_ac[PREDICTAND].min().item()), np.ceil(y_true_ac[PREDICTAND].max().item()) + 1, 1), fontsize=12)
ax.set_xticks(np.arange(0, 12)) ax.set_xticks(np.arange(0, 12))
ax.set_xticklabels([calendar.month_name[i + 1] for i in np.arange(0, 12)], rotation=90, fontsize=12) ax.set_xticklabels([calendar.month_name[i + 1] for i in np.arange(0, 12)], rotation=90, fontsize=12)
ax.set_title('Mean annual cycle of {}: 1991 - 2010'.format(NAMES[PREDICTAND]), pad=20, fontsize=16); ax.set_title('Mean annual cycle of {}: 1991 - 2010'.format(NAMES[PREDICTAND]), pad=20, fontsize=16);
ax.set_ylabel('{} / (°C)'.format(NAMES[PREDICTAND].capitalize()), fontsize=14) ax.set_ylabel('{} / (°C)'.format(NAMES[PREDICTAND].capitalize()), fontsize=14)
ax.set_xlabel('Month', fontsize=14); ax.set_xlabel('Month', fontsize=14);
# save figure # save figure
fig.savefig('../Notebooks/Figures/{}_mean_annual_cycle.png'.format(PREDICTAND), dpi=300, bbox_inches='tight') fig.savefig('../Notebooks/Figures/{}_mean_annual_cycle.png'.format(PREDICTAND), dpi=300, bbox_inches='tight')
``` ```
%% Cell type:markdown id:c70b369d-2d16-42e3-9300-4a18757ad1b2 tags: %% Cell type:markdown id:c70b369d-2d16-42e3-9300-4a18757ad1b2 tags:
### Bias of extreme values ### Bias of extreme values
%% Cell type:code id:33198267-8eb6-4b84-bb6d-c903b27ccbc5 tags: %% Cell type:code id:33198267-8eb6-4b84-bb6d-c903b27ccbc5 tags:
``` python ``` python
# extreme quantile of interest # extreme quantile of interest
quantile = 0.02 if PREDICTAND == 'tasmin' else 0.98 quantile = 0.02 if PREDICTAND == 'tasmin' else 0.98
``` ```
%% Cell type:code id:8fd82b83-0824-4663-846c-53fc3afbc7d1 tags: %% Cell type:code id:8fd82b83-0824-4663-846c-53fc3afbc7d1 tags:
``` python ``` python
# calculate extreme quantile for each year # calculate extreme quantile for each year
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning) warnings.simplefilter('ignore', category=RuntimeWarning)
y_pred_ex = y_pred.groupby('time.year').quantile(quantile, dim='time') y_pred_ex = y_pred.groupby('time.year').quantile(quantile, dim='time')
y_true_ex = y_true.groupby('time.year').quantile(quantile, dim='time') y_true_ex = y_true.groupby('time.year').quantile(quantile, dim='time')
y_refe_ex = y_refe.groupby('time.year').quantile(quantile, dim='time') y_refe_ex = y_refe.groupby('time.year').quantile(quantile, dim='time')
``` ```
%% Cell type:code id:822984f8-cdaa-410b-96e8-e58da39ba40d tags: %% Cell type:code id:822984f8-cdaa-410b-96e8-e58da39ba40d tags:
``` python ``` python
# calculate bias in extreme quantile for each year # calculate bias in extreme quantile for each year
bias_ex = y_pred_ex - y_true_ex bias_ex = y_pred_ex - y_true_ex
bias_ex_ref = y_refe_ex - y_true_ex bias_ex_ref = y_refe_ex - y_true_ex
``` ```
%% Cell type:code id:efd902ee-1154-4b0c-bfbd-a64ace625e76 tags: %% Cell type:code id:efd902ee-1154-4b0c-bfbd-a64ace625e76 tags:
``` python ``` python
# bias of extreme quantile: ERA-5 # bias of extreme quantile: ERA-5
for var in bias_ex_ref: for var in bias_ex_ref:
print('(ERA-5) Yearly average bias for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, bias_ex_ref[var].mean().item())) print('(ERA-5) Yearly average bias for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, bias_ex_ref[var].mean().item()))
``` ```
%% Cell type:code id:9343d8a4-07a6-4c9c-85f6-f670efe7c6cd tags: %% Cell type:code id:9343d8a4-07a6-4c9c-85f6-f670efe7c6cd tags:
``` python ``` python
# bias of extreme quantile: Model # bias of extreme quantile: Model
for var in bias_ex: for var in bias_ex:
print('(Model) Yearly average bias for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, bias_ex[var].mean().item())) print('(Model) Yearly average bias for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, bias_ex[var].mean().item()))
``` ```
%% Cell type:code id:ba38c293-6a6c-4ce4-b5a2-8e468e69cb44 tags: %% Cell type:code id:ba38c293-6a6c-4ce4-b5a2-8e468e69cb44 tags:
``` python ``` python
# mean absolute error in extreme quantile # mean absolute error in extreme quantile
mae_ex = np.abs(y_pred_ex - y_true_ex).mean() mae_ex = np.abs(y_pred_ex - y_true_ex).mean()
mae_ex_ref = np.abs(y_refe_ex - y_true_ex).mean() mae_ex_ref = np.abs(y_refe_ex - y_true_ex).mean()
``` ```
%% Cell type:code id:d71dd376-f445-4c27-8ffc-7ec9f1583b52 tags: %% Cell type:code id:d71dd376-f445-4c27-8ffc-7ec9f1583b52 tags:
``` python ``` python
# mae of extreme quantile: ERA-5 # mae of extreme quantile: ERA-5
for var in mae_ex_ref: for var in mae_ex_ref:
print('(ERA-5) Yearly average MAE for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, mae_ex_ref[var].item())) print('(ERA-5) Yearly average MAE for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, mae_ex_ref[var].item()))
``` ```
%% Cell type:code id:440cbb08-5a06-4708-baa5-3ca3bdb0675a tags: %% Cell type:code id:440cbb08-5a06-4708-baa5-3ca3bdb0675a tags:
``` python ``` python
# mae of extreme quantile: Model # mae of extreme quantile: Model
for var in mae_ex: for var in mae_ex:
print('(Model) Yearly average MAE for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, mae_ex[var].item())) print('(Model) Yearly average MAE for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, mae_ex[var].item()))
``` ```
%% Cell type:code id:fd263446-b57f-4f4a-86fd-5b5596aa32ee tags: %% Cell type:code id:fd263446-b57f-4f4a-86fd-5b5596aa32ee tags:
``` python ``` python
# root mean squared error in extreme quantile # root mean squared error in extreme quantile
rmse_ex = ((y_pred_ex - y_true_ex) ** 2).mean() rmse_ex = ((y_pred_ex - y_true_ex) ** 2).mean()
rmse_ex_ref = ((y_refe_ex - y_true_ex) ** 2).mean() rmse_ex_ref = ((y_refe_ex - y_true_ex) ** 2).mean()
``` ```
%% Cell type:code id:90245a82-992f-4592-8749-d7a21c1f3a21 tags: %% Cell type:code id:90245a82-992f-4592-8749-d7a21c1f3a21 tags:
``` python ``` python
# rmse of extreme quantile: ERA-5 # rmse of extreme quantile: ERA-5
for var in rmse_ex_ref: for var in rmse_ex_ref:
print('(ERA-5) Yearly average RMSE for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, rmse_ex_ref[var].item())) print('(ERA-5) Yearly average RMSE for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, rmse_ex_ref[var].item()))
``` ```
%% Cell type:code id:92591caa-8ed4-4f91-a5ea-32f6b095cdfe tags: %% Cell type:code id:92591caa-8ed4-4f91-a5ea-32f6b095cdfe tags:
``` python ``` python
# rmse of extreme quantile: Model # rmse of extreme quantile: Model
for var in rmse_ex: for var in rmse_ex:
print('(Model) Yearly average RMSE for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, rmse_ex[var].item())) print('(Model) Yearly average RMSE for P{:.0f} of {}: {:.1f}°C'.format(quantile * 100, var, rmse_ex[var].item()))
``` ```
%% Cell type:code id:cfc196fa-0999-4603-959c-2c82f038c8fa tags: %% Cell type:code id:cfc196fa-0999-4603-959c-2c82f038c8fa tags:
``` python ``` python
# plot extremes of observation, prediction, and bias # plot extremes of observation, prediction, and bias
vmin, vmax = (-20, 0) if PREDICTAND == 'tasmin' else (10, 40) vmin, vmax = (-20, 0) if PREDICTAND == 'tasmin' else (10, 40)
fig, axes = plt.subplots(len(y_pred_ex.data_vars), 3, figsize=(24, len(y_pred_ex.data_vars) * 6), fig, axes = plt.subplots(len(y_pred_ex.data_vars), 3, figsize=(24, len(y_pred_ex.data_vars) * 6),
sharex=True, sharey=True) sharex=True, sharey=True)
axes = axes.reshape(len(y_pred_ex.data_vars), -1) axes = axes.reshape(len(y_pred_ex.data_vars), -1)
for i, var in enumerate(y_pred_ex): for i, var in enumerate(y_pred_ex):
for ds, ax in zip([y_true_ex, y_pred_ex, bias_ex], axes[i, ...]): for ds, ax in zip([y_true_ex, y_pred_ex, bias_ex], axes[i, ...]):
if ds is bias_ex: if ds is bias_ex:
ds = ds[var].mean(dim='year') ds = ds[var].mean(dim='year')
im2 = ax.imshow(ds.values, origin='lower', cmap='RdBu_r', vmin=-2, vmax=2) im2 = ax.imshow(ds.values, origin='lower', cmap='RdBu_r', vmin=-2, vmax=2)
ax.text(x=ds.shape[0] - 2, y=2, s='Average: {:.2f}°C'.format(ds.mean().item()), fontsize=14, ha='right') ax.text(x=ds.shape[0] - 2, y=2, s='Average: {:.2f}°C'.format(ds.mean().item()), fontsize=14, ha='right')
else: else:
im1 = ax.imshow(ds[var].mean(dim='year').values, origin='lower', cmap='Blues_r' if PREDICTAND == 'tasmin' else 'Reds', im1 = ax.imshow(ds[var].mean(dim='year').values, origin='lower', cmap='Blues_r' if PREDICTAND == 'tasmin' else 'Reds',
vmin=vmin, vmax=vmax) vmin=vmin, vmax=vmax)
# set titles # set titles
axes[0, 0].set_title('Observed', fontsize=16, pad=10); axes[0, 0].set_title('Observed', fontsize=16, pad=10);
axes[0, 1].set_title('Predicted', fontsize=16, pad=10); axes[0, 1].set_title('Predicted', fontsize=16, pad=10);
axes[0, 2].set_title('Bias', fontsize=16, pad=10); axes[0, 2].set_title('Bias', fontsize=16, pad=10);
# adjust axes # adjust axes
for ax in axes.flat: for ax in axes.flat:
ax.axes.get_xaxis().set_ticklabels([]) ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_xaxis().set_ticks([]) ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticklabels([]) ax.axes.get_yaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticks([]) ax.axes.get_yaxis().set_ticks([])
ax.axes.axis('tight') ax.axes.axis('tight')
ax.set_xlabel('') ax.set_xlabel('')
ax.set_ylabel('') ax.set_ylabel('')
# adjust figure # adjust figure
fig.suptitle('Average P{:.0f} of {}: 1991 - 2010'.format(quantile * 100, NAMES[PREDICTAND]), fontsize=20); fig.suptitle('Average P{:.0f} of {}: 1991 - 2010'.format(quantile * 100, NAMES[PREDICTAND]), fontsize=20);
fig.subplots_adjust(hspace=0, wspace=0, top=0.85) fig.subplots_adjust(hspace=0, wspace=0, top=0.85)
# add colorbar for bias # add colorbar for bias
axes = axes.flatten() axes = axes.flatten()
cbar_ax = fig.add_axes([axes[-1].get_position().x1 + 0.01, axes[-1].get_position().y0, cbar_ax = fig.add_axes([axes[-1].get_position().x1 + 0.01, axes[-1].get_position().y0,
0.01, axes[-1].get_position().y1 - axes[-1].get_position().y0]) 0.01, axes[-1].get_position().y1 - axes[-1].get_position().y0])
cbar = fig.colorbar(im2, cax=cbar_ax) cbar = fig.colorbar(im2, cax=cbar_ax)
cbar.set_label(label='Bias / (°C)', fontsize=16) cbar.set_label(label='Bias / (°C)', fontsize=16)
cbar.ax.tick_params(labelsize=14) cbar.ax.tick_params(labelsize=14)
# add colorbar for predictand # add colorbar for predictand
cbar_ax_predictand = fig.add_axes([axes[0].get_position().x0, axes[0].get_position().y0 - 0.1, cbar_ax_predictand = fig.add_axes([axes[0].get_position().x0, axes[0].get_position().y0 - 0.1,
axes[-1].get_position().x0 - axes[0].get_position().x0, axes[-1].get_position().x0 - axes[0].get_position().x0,
0.05]) 0.05])
cbar_predictand = fig.colorbar(im1, cax=cbar_ax_predictand, orientation='horizontal') cbar_predictand = fig.colorbar(im1, cax=cbar_ax_predictand, orientation='horizontal')
cbar_predictand.set_label(label='{} / (°C)'.format(NAMES[PREDICTAND].capitalize()), fontsize=16) cbar_predictand.set_label(label='{} / (°C)'.format(NAMES[PREDICTAND].capitalize()), fontsize=16)
cbar_predictand.ax.tick_params(labelsize=14) cbar_predictand.ax.tick_params(labelsize=14)
# add metrics: MAE and RMSE # add metrics: MAE and RMSE
axes[1].text(x=ds.shape[0] - 2, y=2, s='MAE = {:.2f}°C'.format(mae_ex[PREDICTAND].item()), fontsize=14, ha='right') axes[1].text(x=ds.shape[0] - 2, y=2, s='MAE = {:.2f}°C'.format(mae_ex[PREDICTAND].item()), fontsize=14, ha='right')
axes[1].text(x=ds.shape[0] - 2, y=12, s='RMSE = {:.2f}°C$^2$'.format(rmse_ex[PREDICTAND].item()), fontsize=14, ha='right') axes[1].text(x=ds.shape[0] - 2, y=12, s='RMSE = {:.2f}°C$^2$'.format(rmse_ex[PREDICTAND].item()), fontsize=14, ha='right')
# save figure # save figure
fig.savefig('../Notebooks/Figures/{}_average_bias_p{:.0f}.png'.format(PREDICTAND, quantile * 100), dpi=300, bbox_inches='tight') fig.savefig('../Notebooks/Figures/{}_average_bias_p{:.0f}.png'.format(PREDICTAND, quantile * 100), dpi=300, bbox_inches='tight')
``` ```
%% Cell type:markdown id:b1550618-1314-4d45-a6b7-2f853dce3eb1 tags: %% Cell type:markdown id:b1550618-1314-4d45-a6b7-2f853dce3eb1 tags:
### Compute ERA-5 daily minimum and maximum 2m temperature ### Compute ERA-5 daily minimum and maximum 2m temperature
%% Cell type:code id:6a1a7bc8-2edf-46f3-9fb9-9f97ed262877 tags: %% Cell type:code id:6a1a7bc8-2edf-46f3-9fb9-9f97ed262877 tags:
``` python ``` python
# search ERA-5 hourly 2m temperature data # search ERA-5 hourly 2m temperature data
y_refe_list = sorted(search_files(ERA5_PATH.joinpath('Downloads', '2m_temperature'), '.nc$')) y_refe_list = sorted(search_files(ERA5_PATH.joinpath('Downloads', '2m_temperature'), '.nc$'))
y_refe_list y_refe_list
``` ```
%% Cell type:code id:b6d6e195-85e9-4ac0-9e50-79fd20042665 tags: %% Cell type:code id:b6d6e195-85e9-4ac0-9e50-79fd20042665 tags:
``` python ``` python
y_refe = xr.open_mfdataset(y_refe_list) y_refe = xr.open_mfdataset(y_refe_list)
``` ```
%% Cell type:code id:7cbdf926-9f01-49a7-9e61-d864af4a3c38 tags: %% Cell type:code id:7cbdf926-9f01-49a7-9e61-d864af4a3c38 tags:
``` python ``` python
t_max = y_refe.resample(time='D').max(dim='time') t_max = y_refe.resample(time='D').max(dim='time')
``` ```
%% Cell type:code id:71b38603-5ee3-4fd8-8360-67bdcb2b8f47 tags: %% Cell type:code id:71b38603-5ee3-4fd8-8360-67bdcb2b8f47 tags:
``` python ``` python
t_min = y_refe.resample(time='D').min(dim='time') t_min = y_refe.resample(time='D').min(dim='time')
``` ```
%% Cell type:code id:33c09067-9060-4432-b33a-831b4ec59c66 tags: %% Cell type:code id:33c09067-9060-4432-b33a-831b4ec59c66 tags:
``` python ``` python
t_max.to_netcdf('./tmax_tmp.nc', engine='h5netcdf') t_max.to_netcdf('./tmax_tmp.nc', engine='h5netcdf')
t_min.to_netcdf('./tmin_tmp.nc', engine='h5netcdf') t_min.to_netcdf('./tmin_tmp.nc', engine='h5netcdf')
``` ```
%% Cell type:code id:a3a8d0fe-3d14-4a10-8314-59a5319551ee tags: %% Cell type:code id:a3a8d0fe-3d14-4a10-8314-59a5319551ee tags:
``` python ``` python
grid = '/mnt/CEPH_PROJECTS/FACT_CLIMAX/OBS_DATA/grid_1km_laea' grid = '/mnt/CEPH_PROJECTS/FACT_CLIMAX/OBS_DATA/grid_1km_laea'
``` ```
%% Cell type:code id:bb0de3d6-b4ef-4228-a2c5-a06b0e540af9 tags: %% Cell type:code id:bb0de3d6-b4ef-4228-a2c5-a06b0e540af9 tags:
``` python ``` python
import pathlib import pathlib
target = pathlib.Path('/mnt/CEPH_PROJECTS/FACT_CLIMAX/REANALYSIS/ERA5/2m_{}_temperature/ERA5_2m_{}_temperature_1981_2010.nc') target = pathlib.Path('/mnt/CEPH_PROJECTS/FACT_CLIMAX/REANALYSIS/ERA5/2m_{}_temperature/ERA5_2m_{}_temperature_1981_2010.nc')
``` ```
%% Cell type:code id:9083153e-bde5-4c06-88f8-a55ba693fdda tags: %% Cell type:code id:9083153e-bde5-4c06-88f8-a55ba693fdda tags:
``` python ``` python
from climax.core.utils import reproject_cdo from climax.core.utils import reproject_cdo
``` ```
%% Cell type:code id:cc973c07-0a24-4b3f-a5c8-5b9fc8347cdc tags: %% Cell type:code id:cc973c07-0a24-4b3f-a5c8-5b9fc8347cdc tags:
``` python ``` python
for name, ds in zip(['min', 'max'], ['./tmin_tmp.nc', './tmax_tmp.nc']): for name, ds in zip(['min', 'max'], ['./tmin_tmp.nc', './tmax_tmp.nc']):
out = pathlib.Path(str(target).format(name, name)) out = pathlib.Path(str(target).format(name, name))
#out.parent.mkdir(parents=True, exist_ok=True) #out.parent.mkdir(parents=True, exist_ok=True)
reproject_cdo(grid, ds, out, mode='bilinear', overwrite=True) reproject_cdo(grid, ds, out, mode='bilinear', overwrite=True)
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment