-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSOM.py
More file actions
69 lines (52 loc) · 2.36 KB
/
SOM.py
File metadata and controls
69 lines (52 loc) · 2.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import numpy as np
import cv2
import sompy
import random
import format_data as fd
from sklearn.neural_network import MLPClassifier
if __name__ == '__main__':
# Opening image and creating data
img = cv2.imread('paisagem5.jpg')
img_LUV = cv2.cvtColor(img, cv2.COLOR_BGR2LUV)
img_UV = img_LUV[:, :, 1:]
data = img_UV.reshape(img.shape[0] * img.shape[1], 2)
# não sei direito o que acontece aqui
reduced_data = np.array(random.sample(data.tolist(), int(0.3 * data.size)))
dataset = reduced_data
# Criação da SOM
som = sompy.SOMFactory.build(dataset, mapsize=[200, 200], mask=None, mapshape='planar',
lattice='rect', normalization='var', initialization='pca', neighborhood='gaussian', training='batch', name='coloring')
som.train(n_job=1, verbose='info', train_rough_len=None, train_finetune_len=None)
img_grey = img_LUV[:, :, 0]
codebook = som._normalizer.denormalize_by(som.data_raw, som.codebook.matrix)
proj = som.bmu_ind_to_xy(som.project_data(data))
# indices no codebook correspondentes a cada pixel do data
proj = som.project_data(data)
# data quantizado (cores do codebook)
new_data = np.uint8(codebook[proj])
# Cria um vetor de regiões do L
train_input = fd.image_mapping(img_LUV[:, :, 0], 3)
train_target = new_data
# Cria uma rede pro U e uma pro V
clf_U = MLPClassifier(activation='relu', hidden_layer_sizes=(20, 20,), max_iter=1000, learning_rate_init=0.001, verbose=True)
clf_V = MLPClassifier(activation='relu', hidden_layer_sizes=(20, 20,), max_iter=1000, learning_rate_init=0.001, verbose=True)
clf_U.fit(train_input, train_target[:, 0])
clf_V.fit(train_input, train_target[:, 1])
U = np.uint8(clf_U.predict(train_input))
V = np.uint8(clf_V.predict(train_input))
accuracy_U = 100 * clf_U.score(train_input, train_target[:, 0])
accuracy_V = 100 * clf_V.score(train_input, train_target[:, 1])
print('Accuracy U: %.2lf' % accuracy_U)
print('Accuracy V: %.2lf' % accuracy_V)
new_U = U.reshape(img.shape[0], img.shape[1], 1)
new_V = V.reshape(img.shape[0], img.shape[1], 1)
new_img = cv2.merge([img_grey, new_U, new_V])
result = cv2.cvtColor(np.uint8(new_img), cv2.COLOR_LUV2BGR)
cv2.imshow('original U', img_UV[:, :, 0])
cv2.imshow('predict U', np.uint8(new_U))
cv2.imshow('original V', img_UV[:, :, 1])
cv2.imshow('predict V', np.uint8(new_V))
cv2.imshow('original', img)
cv2.imshow('test', result)
cv2.waitKey(0)
cv2.destroyAllWindows()