Skip to content
Snippets Groups Projects
Commit 048d866d authored by Frisinghelli Daniel's avatar Frisinghelli Daniel
Browse files

Stable implementation of Bernoulli-Gamma and Bernoulli-Weibull loss.

parent d4deb150
No related branches found
No related tags found
No related merge requests found
......@@ -48,8 +48,7 @@ class L1Loss(NaNLoss):
return F.l1_loss(y_pred[mask], y_true[mask], reduction=self.reduction)
class BernoulliGammaLoss(NaNLoss):
class BernoulliLoss(NaNLoss):
def __init__(self, size_average=None, reduce=None, reduction='mean',
min_amount=0):
super().__init__(size_average, reduce, reduction)
......@@ -57,6 +56,13 @@ class BernoulliGammaLoss(NaNLoss):
# minimum amount of precipitation to be classified as precipitation
self.min_amount = min_amount
class BernoulliGammaLoss(BernoulliLoss):
def __init__(self, size_average=None, reduce=None, reduction='mean',
min_amount=0):
super().__init__(size_average, reduce, reduction, min_amount)
def forward(self, y_pred, y_true):
# convert to float32
......@@ -67,34 +73,34 @@ class BernoulliGammaLoss(NaNLoss):
mask = ~torch.isnan(y_true)
y_true = y_true[mask]
# mask values less than 0
y_true[y_true < 0] = 0
# calculate true probability of precipitation:
# 1 if y_true > min_amount else 0
p_true = (y_true > self.min_amount).type(torch.float32)
# estimates of precipitation probability and gamma shape and scale
# parameters: ensure numerical stability
# mask for values < min_amount
mask_p = y_true < self.min_amount
y_gamm = y_true[~mask_p]
# clip probabilities to (0, 1)
p_pred = torch.sigmoid(y_pred[:, 0, ...].squeeze()[mask])
# clip shape and scale to (0, +infinity)
gshape = torch.exp(y_pred[:, 1, ...].squeeze()[mask])
gscale = torch.exp(y_pred[:, 2, ...].squeeze()[mask])
gshape = torch.exp(y_pred[:, 1, ...].squeeze()[mask][~mask_p])
gscale = torch.exp(y_pred[:, 2, ...].squeeze()[mask][~mask_p])
# negative log-likelihood function of Bernoulli-Gamma distribution
loss = torch.zeros_like(y_true)
# Bernoulli contribution
loss = - (1 - p_true) * torch.log(1 - p_pred + self.epsilon)
loss_bern = torch.log(1 - p_pred[mask_p] + self.epsilon)
# Gamma contribution
loss -= p_true * (torch.log(p_pred + self.epsilon) + (gshape - 1) *
torch.log(y_true + self.epsilon) -
y_true / (gscale + self.epsilon) -
gshape * torch.log(gscale + self.epsilon) -
torch.lgamma(gshape + self.epsilon))
loss_gamm = (torch.log(p_pred[~mask_p] + self.epsilon) +
(gshape - 1) * torch.log(y_gamm + self.epsilon) -
y_gamm / (gscale + self.epsilon) -
gshape * torch.log(gscale + self.epsilon) -
torch.lgamma(gshape + self.epsilon)
)
# fill loss array
loss[torch.where(mask_p)] = - loss_bern
loss[torch.where(~mask_p)] = - loss_gamm
return self.reduce(loss)
......@@ -104,14 +110,11 @@ class BernoulliGammaLoss(NaNLoss):
return p * np.exp(shape) * np.exp(scale)
class BernoulliGenParetoLoss(NaNLoss):
class BernoulliWeibullLoss(BernoulliLoss):
def __init__(self, size_average=None, reduce=None, reduction='mean',
min_amount=0):
super().__init__(size_average, reduce, reduction)
# minimum amount of precipitation to be classified as precipitation
self.min_amount = min_amount
super().__init__(size_average, reduce, reduction, min_amount)
def forward(self, y_pred, y_true):
......@@ -123,95 +126,39 @@ class BernoulliGenParetoLoss(NaNLoss):
mask = ~torch.isnan(y_true)
y_true = y_true[mask]
# mask values less than 0
y_true[y_true < 0] = 0
# calculate true probability of precipitation:
# 1 if y_true > min_amount else 0
p_true = (y_true > self.min_amount).type(torch.float32)
# estimates of precipitation probability and gamma shape and scale
# parameters: ensure numerical stability
# clip probabilities to (0, 1)
p_pred = torch.sigmoid(y_pred[:, 0, ...].squeeze()[mask])
# clip shape and scale to (0, +infinity)
gshape = torch.exp(y_pred[:, 1, ...].squeeze()[mask])
gscale = torch.exp(y_pred[:, 2, ...].squeeze()[mask])
# negative log-likelihood function of Bernoulli-GenPareto distribution
# Bernoulli contribution
loss = - (1 - p_true) * torch.log(1 - p_pred + self.epsilon)
# GenPareto contribution
loss -= p_true * (torch.log(p_pred + self.epsilon) + torch.log(
1 - (1 + (gshape * y_true / gscale)) ** (- 1 / gshape) +
self.epsilon))
return self.reduce(loss)
class BernoulliWeibullLoss(NaNLoss):
def __init__(self, size_average=None, reduce=None, reduction='mean',
min_amount=0):
super().__init__(size_average, reduce, reduction)
# minimum amount of precipitation to be classified as precipitation
self.min_amount = min_amount
def forward(self, y_pred, y_true):
# convert to float32
y_pred = y_pred.type(torch.float64)
y_true = y_true.type(torch.float64).squeeze()
# missing values
mask = ~torch.isnan(y_true)
y_true = y_true[mask]
# mask values less than 0
y_true[y_true < 0] = 0
# calculate true probability of precipitation:
# 1 if y_true > min_amount else 0
p_true = (y_true > self.min_amount).type(torch.float32)
# estimates of precipitation probability and gamma shape and scale
# parameters: ensure numerical stability
# mask for values < min_amount
mask_p = y_true < self.min_amount
y_weib = y_true[~mask_p]
# clip probabilities to (0, 1)
p_pred = torch.sigmoid(y_pred[:, 0, ...].squeeze()[mask])
# clip scale to (0, +infinity)
scale = torch.exp(y_pred[:, 2, ...].squeeze()[mask])
scale = torch.exp(y_pred[:, 2, ...].squeeze()[mask][~mask_p])
# clip shape to (0, 1)
# NOTE: in general shape in (0, +infinity), but for precipitation the
# shape parameter of the Weibull distribution < 1
# clip shape to (0, 10)
# NOTE: in general shape in (0, +infinity), clipping is required for
# numerical stability
shape = torch.clamp(
torch.exp(y_pred[:, 1, ...].squeeze()[mask]), max=1)
torch.exp(y_pred[:, 1, ...].squeeze()[mask][~mask_p]), max=10)
# negative log-likelihood function of Bernoulli-Weibull distribution
loss = torch.zeros_like(y_true)
# Bernoulli contribution
loss = - (1 - p_true) * torch.log(1 - p_pred + self.epsilon)
# replace values < min_amount: ensures numerical stability in backprop
y_true[y_true <= self.min_amount] = 1
loss_bern = torch.log(1 - p_pred[mask_p] + self.epsilon)
# Weibull contribution
loss -= p_true * (torch.log(p_pred + self.epsilon) +
torch.log(shape + self.epsilon) -
shape * torch.log(scale + self.epsilon) +
(shape - 1) * torch.log(y_true + self.epsilon) -
torch.pow(y_true / (scale + self.epsilon), shape)
)
# clip loss to finite values to ensure numerical stability
# loss = torch.clamp(loss, max=100)
loss_weib = (torch.log(p_pred[~mask_p] + self.epsilon) +
torch.log(shape + self.epsilon) -
shape * torch.log(scale + self.epsilon) +
(shape - 1) * torch.log(y_weib + self.epsilon) -
torch.pow(y_weib / (scale + self.epsilon), shape)
)
# fill loss array
loss[torch.where(mask_p)] = - loss_bern
loss[torch.where(~mask_p)] = - loss_weib
return self.reduce(loss)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment