Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cannot run the .ipynb file #9

Open
ssaneei opened this issue Aug 3, 2023 · 0 comments
Open

Cannot run the .ipynb file #9

ssaneei opened this issue Aug 3, 2023 · 0 comments

Comments

@ssaneei
Copy link

ssaneei commented Aug 3, 2023

Hi!

I wanted to run your code but apparently fastai has changed some libraries. So I started to modify your code without the need to fastai, here is what I have right now, it's still super close to your code and maybe just some lines are different.
However, the problem is that I can't run it for layer values other than 0, 1 and 2, but in the code you're going deeper until layer = 40. I wanted to ask you if you know that your code will be runable with some modifications and if you can find some parts that I was wrong with your code.

Thank you in advance. :)

Here's the code, right now:

`
import torch
import torch.optim
import torch.nn as nn
import numpy as np
import cv2
import matplotlib.pyplot as plt
import PIL.Image
from torchvision.models import vgg16

class SaveFeatures():
    def __init__(self, module):
        self.hook = module.register_forward_hook(self.hook_fn)
    def hook_fn(self, module, input, output):
        self.features = torch.tensor(output, requires_grad=True)
    def close(self):
        self.hook.remove()


class FilterVisualizer():
    def __init__(self, size=56, upscaling_steps=12, upscaling_factor=1.2):
        self.size, self.upscaling_steps, self.upscaling_factor = size, upscaling_steps, upscaling_factor
        self.model = vgg16(pretrained=True).eval()
        self.model.requires_grad_(False)

    def visualize(self, layer, filter, lr=0.1, opt_steps=20, blur=None):
        sz = self.size
        img = np.uint8(np.random.uniform(150, 180, (sz, sz, 3)))/255  # generate random image
        activations = SaveFeatures(list(self.model.children())[layer])  # register hook

        
        for _ in range(self.upscaling_steps):
            img_var = torch.tensor(np.transpose(cv2.resize(img, (sz, sz)), (2, 0, 1)), dtype=torch.float32, device='cpu', requires_grad=True)
            optimizer = torch.optim.Adam([img_var], lr=lr, weight_decay=1e-6)
            for n in range(opt_steps):
                optimizer.zero_grad()
                model_output = self.model(img_var.unsqueeze(0))
                loss = -activations.features[0, filter].mean()
                loss.backward()
                optimizer.step()
            img = img_var.detach().cpu().numpy()
            img = (img - np.min(img)) / (np.max(img) - np.min(img))
            img = np.clip(np.transpose(img, (1, 2, 0)), 0, 1)
            self.output = img
            sz = int(self.upscaling_factor * sz)
            img = cv2.resize(img, (sz, sz), interpolation=cv2.INTER_CUBIC)
            if blur is not None:
                img = cv2.blur(img, (blur, blur))

        self.save(layer, filter)
        activations.close()

    def save(self, layer, filter):
        plt.imsave("layer_" + str(layer) + "_filter_" + str(filter) + ".jpg", np.clip(self.output, 0, 1))


layer = 0
filter = 265


FV = FilterVisualizer(size=56, upscaling_steps=12, upscaling_factor=1.2)


FV.visualize(layer, filter, blur=5)


img = PIL.Image.open("layer_" + str(layer) + "_filter_" + str(filter) + ".jpg")
plt.figure(figsize=(7, 7))
plt.imshow(img)

`

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant