import torch
import numpy as np
from sklearn.metrics import r2_score, explained_variance_score
[docs]def masked_mae_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans:
# https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
[docs]def masked_mae_torch(preds, labels, null_val=np.nan):
labels[torch.abs(labels) < 1e-4] = 0
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = labels.ne(null_val)
mask = mask.float()
mask /= torch.mean(mask)
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(torch.sub(preds, labels))
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
[docs]def log_cosh_loss(preds, labels):
loss = torch.log(torch.cosh(preds - labels))
return torch.mean(loss)
[docs]def huber_loss(preds, labels, delta=1.0):
residual = torch.abs(preds - labels)
condition = torch.le(residual, delta)
small_res = 0.5 * torch.square(residual)
large_res = delta * residual - 0.5 * delta * delta
return torch.mean(torch.where(condition, small_res, large_res))
# lo = torch.nn.SmoothL1Loss()
# return lo(preds, labels)
[docs]def quantile_loss(preds, labels, delta=0.25):
condition = torch.ge(labels, preds)
large_res = delta * (labels - preds)
small_res = (1 - delta) * (preds - labels)
return torch.mean(torch.where(condition, large_res, small_res))
[docs]def masked_mape_torch(preds, labels, null_val=np.nan, eps=0):
labels[torch.abs(labels) < 1e-4] = 0
if np.isnan(null_val) and eps != 0:
loss = torch.abs((preds - labels) / (labels + eps))
return torch.mean(loss)
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = labels.ne(null_val)
mask = mask.float()
mask /= torch.mean(mask)
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs((preds - labels) / labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
[docs]def masked_mse_torch(preds, labels, null_val=np.nan):
labels[torch.abs(labels) < 1e-4] = 0
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = labels.ne(null_val)
mask = mask.float()
mask /= torch.mean(mask)
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.square(torch.sub(preds, labels))
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
[docs]def masked_rmse_torch(preds, labels, null_val=np.nan):
labels[torch.abs(labels) < 1e-4] = 0
return torch.sqrt(masked_mse_torch(preds=preds, labels=labels,
null_val=null_val))
[docs]def r2_score_torch(preds, labels):
preds = preds.cpu().flatten()
labels = labels.cpu().flatten()
return r2_score(labels, preds)
[docs]def explained_variance_score_torch(preds, labels):
preds = preds.cpu().flatten()
labels = labels.cpu().flatten()
return explained_variance_score(labels, preds)
[docs]def masked_rmse_np(preds, labels, null_val=np.nan):
return np.sqrt(masked_mse_np(preds=preds, labels=labels,
null_val=null_val))
[docs]def masked_mse_np(preds, labels, null_val=np.nan):
with np.errstate(divide='ignore', invalid='ignore'):
if np.isnan(null_val):
mask = ~np.isnan(labels)
else:
mask = np.not_equal(labels, null_val)
mask = mask.astype('float32')
mask /= np.mean(mask)
rmse = np.square(np.subtract(preds, labels)).astype('float32')
rmse = np.nan_to_num(rmse * mask)
return np.mean(rmse)
[docs]def masked_mae_np(preds, labels, null_val=np.nan):
with np.errstate(divide='ignore', invalid='ignore'):
if np.isnan(null_val):
mask = ~np.isnan(labels)
else:
mask = np.not_equal(labels, null_val)
mask = mask.astype('float32')
mask /= np.mean(mask)
mae = np.abs(np.subtract(preds, labels)).astype('float32')
mae = np.nan_to_num(mae * mask)
return np.mean(mae)
[docs]def masked_mape_np(preds, labels, null_val=np.nan):
with np.errstate(divide='ignore', invalid='ignore'):
if np.isnan(null_val):
mask = ~np.isnan(labels)
else:
mask = np.not_equal(labels, null_val)
mask = mask.astype('float32')
mask /= np.mean(mask)
mape = np.abs(np.divide(np.subtract(
preds, labels).astype('float32'), labels))
mape = np.nan_to_num(mask * mape)
return np.mean(mape)
[docs]def r2_score_np(preds, labels):
preds = preds.flatten()
labels = labels.flatten()
return r2_score(labels, preds)
[docs]def explained_variance_score_np(preds, labels):
preds = preds.flatten()
labels = labels.flatten()
return explained_variance_score(labels, preds)