forked from jiwei0921/DMRA
-
Notifications
You must be signed in to change notification settings - Fork 4
/
transform.py
57 lines (38 loc) · 1.5 KB
/
transform.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import numpy as np
import torch
from PIL import Image
def colormap(n): #import n, then r'g'b obtain values, finally acquiring colormap
cmap=np.zeros([n, 3]).astype(np.uint8)
for i in np.arange(n):
r, g, b = np.zeros(3)
for j in np.arange(8):
r = r + (1<<(7-j))*((i&(1<<(3*j))) >> (3*j))
g = g + (1<<(7-j))*((i&(1<<(3*j+1))) >> (3*j+1))
b = b + (1<<(7-j))*((i&(1<<(3*j+2))) >> (3*j+2))
cmap[i,:] = np.array([r, g, b])
return cmap
class Relabel:
def __init__(self, olabel, nlabel):
self.olabel = olabel
self.nlabel = nlabel
def __call__(self, tensor):
assert isinstance(tensor, torch.LongTensor), 'tensor needs to be LongTensor'
tensor[tensor == self.olabel] = self.nlabel
return tensor
class ToLabel:
def __call__(self, image):
return torch.from_numpy(np.array(image)).long().unsqueeze(0)
class Colorize:
def __init__(self, n=21):
self.cmap = colormap(256)
self.cmap[n] = self.cmap[-1]
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[0], size[1]).fill_(0)
for label in range(1, len(self.cmap)):
mask = (gray_image == label)
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image