Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Single Net Quantile Regression #5

Open
TimHahn1981 opened this issue Apr 17, 2020 · 0 comments
Open

Single Net Quantile Regression #5

TimHahn1981 opened this issue Apr 17, 2020 · 0 comments

Comments

@TimHahn1981
Copy link

Hi Igor,
thanks for your great post on Quantile Regression. I recently came across a paper by two Facebook AI researchers who used a nice and simple approach based on random q values during training to do quantile regression. This way, you neither need multiple outputs (as I had previously done) nor multiple nets (as you have done in our post).
I was wondering what you think about the approach and whether you think it can be implemented in Keras. Generally, one would just need to induce random q values during training. I tried, but failed so far.
Best,
Tim

Torch code from the authors' repo below (https://arxiv.org/pdf/1811.00908.pdf)

from typing import Union
import torch
import torch.utils.data as Data
import numpy

class FBUncertaintyRegressor():
def init(self, hidden: int = 64, epochs: int = 10, learning_rate: float = 1e-2, weight_decay: float = 1e-2,
quantil: Union[float, str] = "all", device: Union[str, torch.device] = 'cpu'):
super().init()
self.hidden = hidden
self.epochs = epochs
self.batch_size = 64
self.learning_rate = learning_rate
self.quantil = quantil
self.weight_decay = weight_decay
if isinstance(device, str):
self.device = torch.device(device)
elif isinstance(device, torch.device):
self.device = device
else:
self.device = torch.device('cpu')
self.model = None

def fit(self, data: numpy.ndarray, targets: numpy.ndarray = None) -> 'FBUncertaintyRegressor':
    data = torch.tensor(data, dtype=torch.float32)
    targets = torch.tensor(targets, dtype=torch.float32).to(device=self.device)
    ds = Data.TensorDataset(data, targets)
    loader = Data.DataLoader(dataset=ds, batch_size=self.batch_size)

    dim = data.shape[1]
    self.model = torch.nn.Sequential(
        torch.nn.Linear(dim + 1, self.hidden),
        torch.nn.ReLU(),
        torch.nn.BatchNorm1d(self.hidden),
        torch.nn.Dropout(.2),
        torch.nn.Linear(self.hidden, 1)
    ).to(device=self.device)

    opt = torch.optim.Adam(self.model.parameters(), eps=1e-07,
                           lr=self.learning_rate)   #, weight_decay=self.weight_decay)
    loss = QuantileLoss()

    for i in range(self.epochs):
        for batch_x, batch_y in loader:
            batch_x = batch_x.to(device=self.device)
            opt.zero_grad()
            if self.quantil == "all":
                taus = torch.rand(batch_x.shape[0], 1).to(device=self.device)
            else:
                taus = torch.zeros(batch_x.shape[0], 1).fill_(self.quantil).to(device=self.device)
            tau_augs = self.__augment__(batch_x, taus)
            model_out = self.model(tau_augs)
            a = loss(model_out, targets, taus)
            a.backward()
            opt.step()
        print(i, a)
        #taus_ = torch.rand(data.size(0), 1).fill_(self.quantil).to(device=self.device)
        #loss_ = loss(self.model(self.__augment__(data, taus_)), targets, taus_).detach().numpy()
        #print(f"Epoch {i}/{self.epochs}; loss: {loss_}")

    return self

def predict(self, data: numpy.ndarray, predict_quantil: float = None) -> numpy.ndarray:
    if predict_quantil is None and isinstance(self.quantil, float):
        predict_quantil = self.quantil
    data = torch.tensor(data, dtype=torch.float32).to(device=self.device)
    return self.model(self.__augment__(data, predict_quantil)).to(device='cpu').detach().numpy()

def __augment__(self, data: torch.Tensor, tau=None) -> torch.Tensor:
    if tau is None:
        tau = torch.zeros(data.size(0), 1).fill_(0.5).to(device=self.device)
    elif type(tau) == float:
        tau = torch.zeros(data.size(0), 1).fill_(tau).to(device=self.device)
    return torch.cat((data, (tau - 0.5) * 12), 1)

class QuantileLoss(torch.nn.Module):
def init(self):
super(QuantileLoss, self).init()

def forward(self, yhat: torch.Tensor, y: torch.Tensor, tau: float) -> torch.Tensor:
    diff = yhat - y
    mask = (diff.ge(0).float() - tau).detach()
    return (mask * diff).mean()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant