Skip to content

Commit

Permalink
for debug
Browse files Browse the repository at this point in the history
  • Loading branch information
FujishigeTemma committed Jul 28, 2023
1 parent 2519826 commit e4b834c
Show file tree
Hide file tree
Showing 58 changed files with 7,179 additions and 1,089 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -102,3 +102,6 @@ venv.bak/

# macos
.DS_Store

# outputs
outputs/
188 changes: 96 additions & 92 deletions analysis_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,31 +13,35 @@
@author: daniel
"""

import pdb

# from burst_generator_inhomogeneous_poisson import inhom_poiss
import shelve

import matplotlib.pyplot as plt
import numpy as np
import pylab
from scipy.signal import convolve
from scipy.stats import pearsonr
from sklearn.preprocessing import normalize
#from burst_generator_inhomogeneous_poisson import inhom_poiss
import shelve
import matplotlib.pyplot as plt
import pylab
import pdb


def tri_filter(signal, kernel_delta):
"""
kernel_delta
width of kernel in datapoints
"""
kernel = np.append(np.arange(kernel_delta/2),np.arange(kernel_delta/2,-1,-1))
kernel = np.append(np.arange(kernel_delta / 2), np.arange(kernel_delta / 2, -1, -1))
# convolve2d has proven PAINFULLY slow for some reason
#signal_conv = convolve2d(signal,kernel,'same')
# signal_conv = convolve2d(signal,kernel,'same')
new_signal = []
for x in signal:
new_signal.append(convolve(x, kernel, 'same'))
new_signal.append(convolve(x, kernel, "same"))
signal_conv = np.array(new_signal)
return signal_conv

def correlate_signals(signal1,signal2):

def correlate_signals(signal1, signal2):
"""Correlates two nxm dimensional signals.
Correlates sig1n with Sign2n along m and then averages all n correlation
coefficients.
Expand All @@ -48,216 +52,216 @@ def correlate_signals(signal1,signal2):
for idx in range(signal1.shape[0]):
sig1 = signal1[idx] - pylab.std(signal1[idx])
sig2 = signal2[idx] - pylab.std(signal2[idx])
#cor = sum(sig1*sig2)/(len(sig1)*pylab.std(sig1)*pylab.std(sig2))
cor = sum(sig1*sig2)/(np.sqrt(sum(sig1**2))*np.sqrt(sum(sig2**2)))
# cor = sum(sig1*sig2)/(len(sig1)*pylab.std(sig1)*pylab.std(sig2))
cor = sum(sig1 * sig2) / (np.sqrt(sum(sig1**2)) * np.sqrt(sum(sig2**2)))
corrs.append(cor)

corrs = np.array(corrs)
return np.nanmean(corrs)

def avg_dotprod_signals(signal1,signal2):

def avg_dotprod_signals(signal1, signal2):
"""Average dot product of signal1 and signal2 excluding silent cells"""
non_silent_sigs = np.unique(np.concatenate((np.argwhere(signal1.any(axis=1)),np.argwhere(signal2.any(axis=1)))))
non_silent_sigs = np.unique(np.concatenate((np.argwhere(signal1.any(axis=1)), np.argwhere(signal2.any(axis=1)))))
non_silent_sigs.sort()
product = signal1[non_silent_sigs]*signal2[non_silent_sigs]
product = signal1[non_silent_sigs] * signal2[non_silent_sigs]
prod_sum = product.sum(axis=1)
avg_dot_product = prod_sum.mean()
return avg_dot_product


def ndp_signals(signal1, signal2):
#pdb.set_trace()
# pdb.set_trace()
dotproduct = (signal1 * signal2).sum()
#pdb.set_trace()
normalization = np.sqrt((signal1*signal1).sum())*np.sqrt((signal2*signal2).sum())
#pdb.set_trace()
return dotproduct/normalization
# pdb.set_trace()
normalization = np.sqrt((signal1 * signal1).sum()) * np.sqrt((signal2 * signal2).sum())
# pdb.set_trace()
return dotproduct / normalization


def ndp_signals_tresolved(signal1, signal2, len_bin):
pdb.set_trace()
signal1 = np.reshape(signal1[:,0:int(len_bin*int(signal1.shape[1]/len_bin))], (signal1.shape[0],int(signal1.shape[1]/len_bin),len_bin))
signal1 = np.reshape(signal1[:, 0 : int(len_bin * int(signal1.shape[1] / len_bin))], (signal1.shape[0], int(signal1.shape[1] / len_bin), len_bin))
signal1 = signal1.sum(axis=2)
pdb.set_trace()
signal2 = np.reshape(signal2[:,0:int(len_bin*int(signal2.shape[1]/len_bin))], (signal2.shape[0],int(signal2.shape[1]/len_bin),len_bin))
signal2 = np.reshape(signal2[:, 0 : int(len_bin * int(signal2.shape[1] / len_bin))], (signal2.shape[0], int(signal2.shape[1] / len_bin), len_bin))
signal2 = signal2.sum(axis=2)
pdb.set_trace()
dotproduct = (signal1 * signal2).sum(axis=0)
pdb.set_trace()
normalization = np.sqrt((signal1*signal1).sum(axis=0))*np.sqrt((signal2*signal2).sum(axis=0))

return dotproduct/normalization
normalization = np.sqrt((signal1 * signal1).sum(axis=0)) * np.sqrt((signal2 * signal2).sum(axis=0))

return dotproduct / normalization


def avg_dotprod_signals_tbinned(signal1,signal2, len_bin = 1000):
def avg_dotprod_signals_tbinned(signal1, signal2, len_bin=1000):
"""Average dot product of signal1 and signal2"""
# Normalize every time bin invididually

signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)
signal1 = signal1[:,0:5,:]
signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)
signal2 = signal2[:,0:5,:]

signal1 = np.reshape(signal1[:, 0 : int((signal1.shape[1] / len_bin) * len_bin)], (signal1.shape[0], signal1.shape[1] / len_bin, len_bin), len_bin)
signal1 = signal1[:, 0:5, :]
signal2 = np.reshape(signal2[:, 0 : int((signal2.shape[1] / len_bin) * len_bin)], (signal2.shape[0], signal2.shape[1] / len_bin, len_bin), len_bin)
signal2 = signal2[:, 0:5, :]

sig1 = []
for x in signal1:
sig1.append(normalize(x,axis=1))
sig1.append(normalize(x, axis=1))
signal1 = np.array(sig1)

sig2 = []
for x in signal2:
sig2.append(normalize(x, axis=1))
signal2 = np.array(sig2)

product = signal1*signal2
product = signal1 * signal2
prod_sum = product.sum(axis=2)

silent_sigs = np.argwhere(np.logical_and(np.invert(signal1.any(axis=2)), np.invert(signal2.any(axis=2))))

for x in silent_sigs:
prod_sum[x[0],x[1]] = np.NaN
prod_sum[x[0], x[1]] = np.NaN
avg_dot_product = np.nanmean(prod_sum, axis=0)
return avg_dot_product


def time_stamps_to_signal(time_stamps, dt_signal, t_start, t_stop):
"""Convert an array of timestamps to a signal where 0 is absence and 1 is
presence of spikes
"""
# Construct a zero array with size corresponding to desired output signal
sig = np.zeros((np.shape(time_stamps)[0],int((t_stop-t_start)/dt_signal)))
sig = np.zeros((np.shape(time_stamps)[0], int((t_stop - t_start) / dt_signal)))
# Find the indices where spikes occured according to time_stamps
time_idc = []
for x in time_stamps:
curr_idc = []
if np.any(x):
for y in x:
curr_idc.append((y-t_start)/ dt_signal)
curr_idc.append((y - t_start) / dt_signal)
time_idc.append(curr_idc)

# Set the spike indices to 1
try:
for sig_idx, idc in enumerate(time_idc):
sig[sig_idx,np.array(idc,dtype=np.int)] = 1
sig[sig_idx, np.array(idc, dtype=np.int)] = 1
except:

for sig_idx, idc in enumerate(time_idc):
sig[sig_idx,np.array(idc,dtype=np.int)-1] = 1
sig[sig_idx, np.array(idc, dtype=np.int) - 1] = 1

return sig

def population_similarity_measure_ob(signal1,signal2, len_bin):
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)

def population_similarity_measure_ob(signal1, signal2, len_bin):
signal1 = np.reshape(signal1[:, 0 : int((signal1.shape[1] / len_bin) * len_bin)], (signal1.shape[0], signal1.shape[1] / len_bin, len_bin), len_bin)
signal1 = signal1.mean(axis=2)
signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)
signal2 = np.reshape(signal2[:, 0 : int((signal2.shape[1] / len_bin) * len_bin)], (signal2.shape[0], signal2.shape[1] / len_bin, len_bin), len_bin)
signal2 = signal2.mean(axis=2)

#Normalize
# Normalize
signal1 = normalize(signal1, axis=0)
signal2 = normalize(signal2, axis=0)

product = signal1*signal2
product = signal1 * signal2
prod_sum = product.sum(axis=0)
return prod_sum.mean()

def similarity_measure_leutgeb_BUGGY(signal1,signal2, len_bin):
"""Oriented on the """
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)

def similarity_measure_leutgeb_BUGGY(signal1, signal2, len_bin):
"""Oriented on the"""
signal1 = np.reshape(signal1[:, 0 : int((signal1.shape[1] / len_bin) * len_bin)], (signal1.shape[0], signal1.shape[1] / len_bin, len_bin), len_bin)
signal1 = signal1.sum(axis=2)

signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)
signal2 = np.reshape(signal2[:, 0 : int((signal2.shape[1] / len_bin) * len_bin)], (signal2.shape[0], signal2.shape[1] / len_bin, len_bin), len_bin)
signal2 = signal2.sum(axis=2)

corr_vector = []

for x in range(signal1.shape[1]):
corr_vector.append(pearsonr(signal1[:,x], signal2[:,x])[0])
corr_vector.append(pearsonr(signal1[:, x], signal2[:, x])[0])

return np.array(corr_vector)

def similarity_measure_leutgeb(signal1,signal2, len_bin):

def similarity_measure_leutgeb(signal1, signal2, len_bin):
pdb.set_trace()
signal1 = np.reshape(signal1[:,0:int(len_bin*int(signal1.shape[1]/len_bin))], (signal1.shape[0],int(signal1.shape[1]/len_bin),len_bin))
signal1 = np.reshape(signal1[:, 0 : int(len_bin * int(signal1.shape[1] / len_bin))], (signal1.shape[0], int(signal1.shape[1] / len_bin), len_bin))
signal1 = signal1.sum(axis=2)
pdb.set_trace()
signal2 = np.reshape(signal2[:,0:int(len_bin*int(signal2.shape[1]/len_bin))], (signal2.shape[0],int(signal2.shape[1]/len_bin),len_bin))
signal2 = np.reshape(signal2[:, 0 : int(len_bin * int(signal2.shape[1] / len_bin))], (signal2.shape[0], int(signal2.shape[1] / len_bin), len_bin))
signal2 = signal2.sum(axis=2)
pdb.set_trace()
corr_vector = []

for x in range(signal1.shape[1]):
corr_vector.append(pearsonr(signal1[:,x], signal2[:,x])[0])
corr_vector.append(pearsonr(signal1[:, x], signal2[:, x])[0])
pdb.set_trace()
return np.array(corr_vector)


def sqrt_diff(signal1, signal2, len_bin):
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)

signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)

subtr = np.sqrt((signal1 - signal2)**2)
subtr_sum = subtr.sum(axis = 2).sum(axis=0)
signal1 = np.reshape(signal1[:, 0 : int((signal1.shape[1] / len_bin) * len_bin)], (signal1.shape[0], signal1.shape[1] / len_bin, len_bin), len_bin)

signal2 = np.reshape(signal2[:, 0 : int((signal2.shape[1] / len_bin) * len_bin)], (signal2.shape[0], signal2.shape[1] / len_bin, len_bin), len_bin)

subtr = np.sqrt((signal1 - signal2) ** 2)
subtr_sum = subtr.sum(axis=2).sum(axis=0)
print(subtr_sum)
return subtr_sum


def sqrt_diff_norm_TRESOLVED(signal1, signal2, len_bin):
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)
signal1 = np.reshape(signal1[:, 0 : int((signal1.shape[1] / len_bin) * len_bin)], (signal1.shape[0], signal1.shape[1] / len_bin, len_bin), len_bin)

signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)
signal2 = np.reshape(signal2[:, 0 : int((signal2.shape[1] / len_bin) * len_bin)], (signal2.shape[0], signal2.shape[1] / len_bin, len_bin), len_bin)
total_spikes = signal1.sum(axis=2).sum(axis=0) + signal1.sum(axis=2).sum(axis=0)
subtr = np.sqrt((signal1 - signal2)**2)
subtr_sum = subtr.sum(axis = 2).sum(axis=0) / total_spikes
subtr = np.sqrt((signal1 - signal2) ** 2)
subtr_sum = subtr.sum(axis=2).sum(axis=0) / total_spikes
print(subtr_sum)
return subtr_sum


def coactivity(signal1, signal2):
coactive = (np.array(signal1 >0) * np.array(signal2 > 0)).sum()
total_active = np.logical_or(np.array(signal1 >0), np.array(signal2 > 0)).sum()
return coactive/float(total_active)
coactive = (np.array(signal1 > 0) * np.array(signal2 > 0)).sum()
total_active = np.logical_or(np.array(signal1 > 0), np.array(signal2 > 0)).sum()
return coactive / float(total_active)


def overlap(signal1, signal2):
total_overlap = ((signal1 > 0) == (signal2 >0)).sum()
total_overlap = ((signal1 > 0) == (signal2 > 0)).sum()
return total_overlap / float(len(signal1))


def sqrt_diff_norm(signal1, signal2, len_bin):
total_spikes = signal1.sum() + signal2.sum()
subtr = np.sqrt((signal1 - signal2)**2).sum()
return subtr/total_spikes
subtr = np.sqrt((signal1 - signal2) ** 2).sum()
return subtr / total_spikes


def inner_pearsonr_BUGGY(signal1, len_bin):
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)
signal1 = np.reshape(signal1[:, 0 : int((signal1.shape[1] / len_bin) * len_bin)], (signal1.shape[0], signal1.shape[1] / len_bin, len_bin), len_bin)
return signal1
signal1 = signal1.sum(axis=2)

corr_vector = []

for x in range(signal1.shape[1]):
corr_vector.append(pearsonr(signal1[:,0], signal1[:,x])[0])
corr_vector.append(pearsonr(signal1[:, 0], signal1[:, x])[0])

return corr_vector


def inner_pearsonr(signal1, len_bin):
signal1 = np.reshape(signal1, (signal1.shape[0],signal1.shape[1]/len_bin,len_bin))
signal1 = np.reshape(signal1, (signal1.shape[0], signal1.shape[1] / len_bin, len_bin))

signal1 = signal1.sum(axis=2)

corr_vector = []

for x in range(signal1.shape[1]):
corr_vector.append(pearsonr(signal1[:,0], signal1[:,x])[0])
corr_vector.append(pearsonr(signal1[:, 0], signal1[:, x])[0])

return corr_vector


if __name__ == '__main__':

if __name__ == "__main__":
temporal_patterns = inhom_poiss()
time_sig = time_stamps_to_signal(temporal_patterns,
dt_signal=0.1,
t_start=0,
t_stop=1000)
time_sig = time_stamps_to_signal(temporal_patterns, dt_signal=0.1, t_start=0, t_stop=1000)
4 changes: 1 addition & 3 deletions mechs/gskch.mod
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,7 @@ PROCEDURE state() { :Computes state variable q at current v and dt.
cai = ncai + lcai + tcai
rate(cai)
q = q + (qinf-q) * qexp
VERBATIM
return 0;
ENDVERBATIM

}

LOCAL q10
Expand Down
6 changes: 0 additions & 6 deletions mechs/hyperde3.mod
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,6 @@ INITIAL {
hys = hysinf
hyhtf = hyhtfinf
hyhts = hyhtsinf
VERBATIM
return 0;
ENDVERBATIM
}

? states
Expand All @@ -116,9 +113,6 @@ PROCEDURE states() { :Computes state variables m, h, and n
hyhtf = hyhtf + hyhtfexp*(hyhtfinf-hyhtf)
hyhts = hyhts + hyhtsexp*(hyhtsinf-hyhts)

VERBATIM
return 0;
ENDVERBATIM
}

LOCAL q10
Expand Down
Loading

0 comments on commit e4b834c

Please sign in to comment.