diff --git a/Notebooks/eval_temperature.ipynb b/Notebooks/eval_temperature.ipynb
index 4a2e34430f9119bc1c443f438bbb9b1ac847288a..c319d3972f1341e0303cd558d924634c38263181 100644
--- a/Notebooks/eval_temperature.ipynb
+++ b/Notebooks/eval_temperature.ipynb
@@ -35,8 +35,8 @@
     "PREDICTAND = 'tasmin'\n",
     "MODEL = 'USegNet'\n",
     "PPREDICTORS = 'ztuvq'\n",
-    "PLEVLES = ['500', '850']\n",
-    "SPREDICTORS = ''\n",
+    "PLEVELS = ['500', '850']\n",
+    "SPREDICTORS = 'pt2'\n",
     "DEM = 'dem'\n",
     "DOY = 'doy'"
    ]
@@ -106,7 +106,7 @@
    "outputs": [],
    "source": [
     "# model predictions and observations NetCDF\n",
-    "y_pred = TARGET_PATH.joinpath(PREDICTAND, '_'.join([MODEL, PREDICTAND, PPREDICTORS, PLEVELS, SPREDICTORS, DEM, DOY]) + '.nc')\n",
+    "y_pred = TARGET_PATH.joinpath(PREDICTAND, '_'.join([MODEL, PREDICTAND, PPREDICTORS, *PLEVELS, SPREDICTORS, DEM, DOY]) + '.nc')\n",
     "if PREDICTAND == 'tas':\n",
     "    # read both tasmax and tasmin\n",
     "    tasmax = xr.open_dataset(search_files(OBS_PATH.joinpath('tasmax'), '.nc$').pop())\n",
@@ -175,6 +175,7 @@
     "y_pred_avg = y_pred.mean(dim='time')\n",
     "y_true_avg = y_true.mean(dim='time')\n",
     "bias = y_pred_avg - y_true_avg\n",
+    "# bias = (y_pred - y_true).mean()\n",
     "for var in bias:\n",
     "    print('Overall average bias {}: {:.2f}'.format(var, bias[var].mean().item()))"
    ]
@@ -188,6 +189,7 @@
    "source": [
     "# mean absolute error over reference period\n",
     "mae = np.abs(y_pred_avg - y_true_avg).mean()\n",
+    "# mae = np.abs(y_pred - y_true).mean()\n",
     "for var in mae:\n",
     "    print('Mean absolute error {}: {:.2f}'.format(var, mae[var].item()))"
    ]
@@ -201,6 +203,7 @@
    "source": [
     "# root mean squared error over reference period\n",
     "rmse = ((y_pred_avg - y_true_avg) ** 2).mean()\n",
+    "# rmse = ((y_pred - y_true) ** 2).mean()\n",
     "for var in rmse:\n",
     "    print('Root mean squared error {}: {:.2f}'.format(var, rmse[var].item()))"
    ]
@@ -215,7 +218,9 @@
     "# Pearson's correlation coefficient over reference period\n",
     "for var in y_pred_avg:\n",
     "    y_p = y_pred_avg[var].values[~np.isnan(y_pred_avg[var])]\n",
+    "    # y_p = y_pred[var].values[~np.isnan(y_pred[var])]\n",
     "    y_t = y_true_avg[var].values[~np.isnan(y_true_avg[var])]\n",
+    "    # y_t = y_true[var].values[~np.isnan(y_true[var])]\n",
     "    r, _ = stats.pearsonr(y_p, y_t)\n",
     "    print('Pearson correlation for {}: {:.2f}'.format(var, r))"
    ]
@@ -314,6 +319,16 @@
     "plt.tight_layout()"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ec2f6293-384e-41b6-9e79-75bd852df0c0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# TODO: Annual cylce"
+   ]
+  },
   {
    "cell_type": "markdown",
    "id": "c70b369d-2d16-42e3-9300-4a18757ad1b2",
@@ -322,6 +337,16 @@
     "### Bias of extreme values"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4acfc3f2-20ed-498c-ab35-f392ae0e64f9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# TODO: smooth quantiles"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -330,7 +355,7 @@
    "outputs": [],
    "source": [
     "# percentiles of interest\n",
-    "percentiles = [0.01, 0.02, 0.5, 0.98, 0.99]"
+    "percentiles = [0.01, 0.02, 0.98, 0.99]"
    ]
   },
   {