Skip to content

Commit

Permalink
second unit test 1 + added NM in main
Browse files Browse the repository at this point in the history
  • Loading branch information
jitskevanpeer committed Dec 14, 2023
1 parent c1e9a6c commit 6c64c59
Show file tree
Hide file tree
Showing 4 changed files with 83 additions and 56 deletions.
20 changes: 10 additions & 10 deletions HDC_library.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def lookup_generate(dim, n_keys, mode = 1):
table[i,:] = row
prob_array[i] = probability

return table.astype(np.int8),prob_array
return table.astype(np.int8)#,prob_array

# dim is the HDC dimensionality D
def encode_HDC_RFF(img, position_table, grayscale_table, dim):
Expand All @@ -59,7 +59,7 @@ def encode_HDC_RFF(img, position_table, grayscale_table, dim):
xor_result = (encoded_input[pixel] ^ position_table[pixel])
xor_result = (xor_result != 0).astype(int)
xor_result[xor_result == 0] = -1

hv = xor_result
container[pixel, :] = hv*1

Expand Down Expand Up @@ -101,9 +101,9 @@ def train_HDC_RFF(n_class, N_train, Y_train_init, HDC_cont_train, gamma, D_b):
#Solve the system of equations to get the vector alpha:
alpha = np.zeros(N_train+1)
alpha = np.linalg.solve(Beta,L) #alpha here is the whole v vector from the slides
print("beta =",Beta)
#print("beta =",Beta)
#print("L =",L)
print("alpha =",alpha)
#print("alpha =",alpha)

# Get HDC prototype for class cla, still in floating point
final_HDC_centroid = np.zeros(100)
Expand Down Expand Up @@ -175,11 +175,11 @@ def evaluate_F_of_x(Nbr_of_trials, HDC_cont_all, LABELS, beta_, bias_, gamma, al

# Do the same encoding steps with the test set
# put testset equal to training set for unit test 2, we want 100% accuracy
#HDC_cont_test_ = HDC_cont_all[N_train:,:]
HDC_cont_test_ = HDC_cont_train_*1
HDC_cont_test_ = HDC_cont_all[N_train:,:]
#HDC_cont_test_ = HDC_cont_train_*1
HDC_cont_test_cpy = HDC_cont_test_ * 1
#bias_test = bias_[N_train:]
bias_test = bias_train
bias_test = bias_[N_train:]
#bias_test = bias_train
# Apply cyclic accumulation with biases and accumulation speed beta_
HDC_cont_test_cpy = HDC_cont_test_cpy*beta_ + bias_test
cyclic_accumulation_test = HDC_cont_test_cpy % (2 ** B_cnt)
Expand All @@ -193,8 +193,8 @@ def evaluate_F_of_x(Nbr_of_trials, HDC_cont_all, LABELS, beta_, bias_, gamma, al
elif abs(cyclic_accumulation_test[row,col] - pow(2,B_cnt-1)) <= alpha_sp:
cyclic_accumulation_test[row,col] = 0

#Y_test = LABELS[N_train:] - 1
Y_test = Y_train*1
Y_test = LABELS[N_train:] - 1
#Y_test = Y_train*1
Y_test = Y_test.astype(int)

# Compute accuracy and sparsity of the test set w.r.t the HDC prototypes
Expand Down
Binary file modified __pycache__/HDC_library.cpython-36.pyc
Binary file not shown.
71 changes: 43 additions & 28 deletions main_HDC.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
grayscale_table = lookup_generate(D_HDC, maxval, mode = 1) #Input encoding LUT
position_table = lookup_generate(D_HDC, imgsize_vector, mode = 0) #weight for XOR-ing
HDC_cont_all = np.zeros((X.shape[0], D_HDC)) #Will contain all "bundled" HDC vectors
bias_ = # -> INSERT YOUR CODE #generate the random biases once
bias_ = np.random.uniform(0, 2*np.pi,size=(X.shape[0],D_HDC)) #generate the random biases once

for i in range(X.shape[0]):
if i%100 == 0:
Expand Down Expand Up @@ -129,7 +129,8 @@
Accs.append(np.mean(local_avgre))
Sparsities.append(np.mean(local_sparse))
##################################

print("Acc =",Accs)
print("Sparsity =",Sparsities)
#Transform lists to numpy array:
F_of_x = np.array(F_of_x)
Accs = np.array(Accs)
Expand All @@ -148,21 +149,28 @@

#1) sort Accs, Sparsities, F_of_x, Simplex, add best objective to array "objective_"

# -> INSERT YOUR CODE
sorted_indices = np.argsort(F_of_x) #sort cost functions from smallest to largest
F_of_x = F_of_x[sorted_indices]
Accs = Accs[sorted_indices]
Sparsities = Sparsities[sorted_indices]
Simplex = Simplex[sorted_indices, :]

best_objective_value = F_of_x[0] #lowest cost
objective_.append(best_objective_value)

#2) average simplex x_0

# -> INSERT YOUR CODE
x_0 = np.mean(Simplex[:-1, :], axis=0)

#3) Reflexion x_r

# -> INSERT YOUR CODE
x_r = x_0 + alpha_simp * (x_0 - Simplex[-1,:])

#Evaluate cost of reflected point x_r

# -> INSERT YOUR CODE

if # -> INSERT YOUR CODE:
F_curr, acc_curr, sparse_curr = evaluate_F_of_x(Nbr_of_trials, HDC_cont_all, LABELS, x_r[2], bias_, x_r[0], x_r[1], n_class, N_train, D_b, lambda_1, lambda_2, B_cnt)
F_curr = 1 - np.mean(F_curr)
if F_curr >= best_objective_value and F_curr < F_of_x[-2]:
F_of_x[-1] = F_curr
Simplex[-1,:] = x_r
Accs[-1] = acc_curr
Expand All @@ -172,16 +180,13 @@
rest = True

if rest == True:
#4) Expansion x_e
if # -> INSERT YOUR CODE:

# -> INSERT YOUR CODE
#4) Expansion x_e
if F_curr < best_objective_value:
x_e = x_0 + gamma_simp*(x_r - x_0)
F_exp, acc_exp, sparse_exp = evaluate_F_of_x(Nbr_of_trials, HDC_cont_all, LABELS, x_e[2], bias_, x_e[0], x_e[1], n_class, N_train, D_b, lambda_1, lambda_2, B_cnt)
F_exp = 1 - np.mean(F_exp)

#Evaluate cost of reflected point x_e

# -> INSERT YOUR CODE

if # -> INSERT YOUR CODE:
if F_exp < F_curr :
F_of_x[-1] = F_exp
Simplex[-1,:] = x_e
Accs[-1] = acc_exp
Expand All @@ -193,25 +198,35 @@
Sparsities[-1] = sparse_curr

else:
#4) Contraction x_c
if # -> INSERT YOUR CODE:
# -> INSERT YOUR CODE:
elif # -> INSERT YOUR CODE::
# -> INSERT YOUR CODE:
#5) Contraction x_c
flag = False
if F_curr < F_of_x[-1]:
x_c = x_0 + rho_simp * (x_r - x_0)
F_c, acc_c, sparse_c = evaluate_F_of_x(Nbr_of_trials, HDC_cont_all, LABELS, x_c[2], bias_, x_c[0], x_c[1], n_class, N_train, D_b, lambda_1, lambda_2, B_cnt)
if F_c < F_curr:
flag = True
else:
x_c = x_0 + rho_simp * (F_of_x[-1] - x_0)
F_c, acc_c, sparse_c = evaluate_F_of_x(Nbr_of_trials, HDC_cont_all, LABELS, x_c[2], bias_, x_c[0], x_c[1], n_class, N_train, D_b, lambda_1, lambda_2, B_cnt)
if F_c < F_of_x[-1]:
flag = True

#Evaluate cost of contracted point x_e

# -> INSERT YOUR CODE:

if # -> INSERT YOUR CODE:
F_of_x[-1] = F_c
if flag:
F_of_x[-1] = F_c #replace worst point with contracted point
Simplex[-1,:] = x_c
Accs[-1] = acc_c
Sparsities[-1] = sparse_c
else:
#4) Shrinking
#6) Shrinking
for rep in range(1, Simplex.shape[0]):
# -> INSERT YOUR CODE:
#Replace all points except the best (Simplex[0])
Simplex[rep,:] = Simplex[0,:] + sigma_simp * (Simplex[rep,:] - Simplex[0,:])
F_shrink, acc_shrink, sparse_shrink = evaluate_F_of_x(Nbr_of_trials, HDC_cont_all, LABELS, Simplex[rep,2], bias_, Simplex[rep,0], Simplex[rep,1], n_class, N_train, D_b, lambda_1, lambda_2, B_cnt)
F_of_x[rep] = 1 - np.mean(F_shrink)
Accs[rep] = np.mean(acc_shrink)
Sparsities[rep] = np.mean(sparse_shrink)


##################################
Expand Down
48 changes: 30 additions & 18 deletions playground_testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from sklearn.utils import shuffle
#np.set_printoptions(threshold=np.inf, linewidth=200)

# UNIT TEST 1

def test_matrix_probability(LUT,in_p):
rows = len(LUT)
Expand All @@ -31,23 +32,21 @@ def test_matrix_probability(LUT,in_p):
plt.show()


#LUT,p_in = lookup_generate(1024,256,1)
#LUT,p_in = lookup_generate(100,256,1)
#test_matrix_probability(LUT,p_in)


dim = 1024
#position_table = lookup_generate(dim, len(position_table), 1)
#grayscale_table = lookup_generate(dim, len(position_table), 0)
mat = spio.loadmat('XOR_test_data.mat', squeeze_me=True)

in1 = mat['in1'] # array
in2 = mat['in2']
desired = mat['result']

# TO DO
def test_XOR(in1,in2,desired,dim):
img = np.random.randint(0, 256, size=30)
img_hv, calculated = encode_HDC_RFF(img, in1, in2, dim)
def test_XOR(in1,in2,desired):
#img = np.random.randint(0, 256, size=30)
#img_hv, calculated = encode_HDC_RFF(img, in1, in2, dim)
calculated = (in1 ^ in2)
calculated = (calculated != 0).astype(int)
print("desired =",desired)
print("calculated =",calculated)
if (desired == calculated).all():
Expand All @@ -57,6 +56,22 @@ def test_XOR(in1,in2,desired,dim):

#test_XOR(in1,in2,desired,dim)

def test_encode_HDC_RFF():
#make synthetic test data
img = np.array([1, 2, 3])
position_table = np.array([[1,1,1],[1,1,1],[-1,-1,-1]])
grayscale_table = np.ones((5, 3),dtype=np.int8)
grayscale_table[1::2, :] = -1 #alternate rows of 1s and -1s
dim = 3
#[[1 1 1][1 1 1][-1 -1 -1]] XOR [[-1 -1 -1][1 1 1][-1 -1 -1]] = [[1 1 1][0 0 0][0 0 0]]
#we replace the zeros with -1. Using these testdata we tested all possible xor combinations.
result,container = encode_HDC_RFF(img, position_table, grayscale_table, dim)
print("result =",container)

#test_encode_HDC_RFF()

# UNIT TEST 2

def test_train():
n_class = 2
N_train = 360
Expand All @@ -65,14 +80,14 @@ def test_train():
Y_train_init = np.concatenate((np.ones(N_train),np.ones(N_train)*(-1)))
HDC_cont_train = np.concatenate((np.ones((N_train,100)),np.ones((N_train,100))*(-1)))
centroids, biases, centroids_q, biases_q = train_HDC_RFF(n_class, 2*N_train, Y_train_init, HDC_cont_train, gamma, D_b)
print("centroids =",centroids)
print("biases =",biases)
#print("centroids =",centroids)
#print("biases =",biases)
Acc = compute_accuracy(HDC_cont_train, Y_train_init, centroids_q, biases_q)
return Acc

#Acc = test_train()
#print("Acc =",Acc)
# If testset = trainset, we should get 100% accuracy
# If testset = trainset, we should get very high accuracy


dataset_path = 'WISCONSIN/data.csv'
Expand All @@ -85,7 +100,7 @@ def test_train():
D_HDC = 100 #HDC hypervector dimension
portion = 0.6 #We choose 60%-40% split between train and test sets
Nbr_of_trials = 1 #Test accuracy averaged over Nbr_of_trials runs
N_tradeof_points = 20 #Number of tradeoff points - use 100
N_tradeof_points = 40 #Number of tradeoff points - use 100
N_fine = int(N_tradeof_points*0.4) #Number of tradeoff points in the "fine-grain" region - use 30
#Initialize the sparsity-accuracy hyperparameter search
lambda_fine = np.linspace(-0.2, 0.2, N_tradeof_points-N_fine)
Expand All @@ -110,14 +125,11 @@ def test_train():
"""
#3) Generate HDC LUTs and bundle dataset
"""
grayscale_table, prob_array1 = lookup_generate(D_HDC, maxval, mode = 1) #Input encoding LUT
position_table, prob_array2 = lookup_generate(D_HDC, imgsize_vector, mode = 0) #weight for XOR-ing
grayscale_table = lookup_generate(D_HDC, maxval, mode = 1) #Input encoding LUT
position_table = lookup_generate(D_HDC, imgsize_vector, mode = 0) #weight for XOR-ing
HDC_cont_all = np.zeros((X.shape[0], D_HDC)) #Will contain all "bundled" HDC vectors
bias_ = np.random.uniform(0, 2*np.pi,size=(X.shape[0],D_HDC)) # -> INSERT YOUR CODE #generate the random biases once, [0,2*pi[ uniform

print("X dimension =",X.shape)
print("position table =", np.shape(position_table))
print("grayscale table =", np.shape(grayscale_table))
for i in range(X.shape[0]): # for every patient
if i%100 == 0:
print(str(i) + "/" + str(X.shape[0]))
Expand Down Expand Up @@ -174,4 +186,4 @@ def test_train():
F_of_x.append(1 - np.mean(local_avg)) #Append cost F(x)
Accs.append(np.mean(local_avgre))
Sparsities.append(np.mean(local_sparse))
##################################
##################################

0 comments on commit 6c64c59

Please sign in to comment.