diff --git a/src/cnnett.py b/lp-src/cnnett.py similarity index 100% rename from src/cnnett.py rename to lp-src/cnnett.py diff --git a/src/conv_lp.py b/lp-src/conv_lp.py similarity index 100% rename from src/conv_lp.py rename to lp-src/conv_lp.py diff --git a/src/deepcover_keras.py b/lp-src/deepcover_keras.py similarity index 100% rename from src/deepcover_keras.py rename to lp-src/deepcover_keras.py diff --git a/src/lp.py b/lp-src/lp.py similarity index 100% rename from src/lp.py rename to lp-src/lp.py diff --git a/src/nnett.py b/lp-src/nnett.py similarity index 100% rename from src/nnett.py rename to lp-src/nnett.py diff --git a/src/util.py b/lp-src/util.py similarity index 100% rename from src/util.py rename to lp-src/util.py diff --git a/src/__pycache__/mask.cpython-37.pyc b/src/__pycache__/mask.cpython-37.pyc new file mode 100644 index 0000000..4deb65f Binary files /dev/null and b/src/__pycache__/mask.cpython-37.pyc differ diff --git a/src/__pycache__/spectra_gen.cpython-37.pyc b/src/__pycache__/spectra_gen.cpython-37.pyc new file mode 100644 index 0000000..cd93258 Binary files /dev/null and b/src/__pycache__/spectra_gen.cpython-37.pyc differ diff --git a/src/__pycache__/to_explain.cpython-37.pyc b/src/__pycache__/to_explain.cpython-37.pyc new file mode 100644 index 0000000..c429488 Binary files /dev/null and b/src/__pycache__/to_explain.cpython-37.pyc differ diff --git a/src/__pycache__/to_rank.cpython-37.pyc b/src/__pycache__/to_rank.cpython-37.pyc new file mode 100644 index 0000000..366e334 Binary files /dev/null and b/src/__pycache__/to_rank.cpython-37.pyc differ diff --git a/src/__pycache__/utils.cpython-37.pyc b/src/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000..1fa7bce Binary files /dev/null and b/src/__pycache__/utils.cpython-37.pyc differ diff --git a/src/mask.py b/src/mask.py new file mode 100644 index 0000000..e2c0250 --- /dev/null +++ b/src/mask.py @@ -0,0 +1,19 @@ +import numpy as np + +def find_mask(x, p=2/32): + sp=x.shape + h=int(sp[0]*p) + if h<1: h=1 + tmp_x=x.copy() + bg_x=x.copy() + + for iindex, _ in np.ndenumerate(x): + i0=iindex[0] + i1=iindex[1] + region=tmp_x[ np.max([i0-h,0]) : np.min([i0+h, sp[0]]), np.max([i1-h,0]):np.min([i1+h,sp[1]])] + v=np.min(region) + for j in range(0, (sp[2])): + #bg_x[i0][i1][j]=v + bg_x[i0][i1][j]=np.mean(region[:,:,j]) + + return bg_x diff --git a/src/sfl.py b/src/sfl.py new file mode 100644 index 0000000..fdb668a --- /dev/null +++ b/src/sfl.py @@ -0,0 +1,138 @@ + +from keras.preprocessing import image +from keras.applications import vgg16 +from keras.applications.vgg16 import VGG16 +from keras.applications import inception_v3, mobilenet, xception +from keras.models import load_model +import matplotlib.pyplot as plt + +import argparse +import os +import numpy as np + +from utils import * +from to_explain import * + +def main(): + parser=argparse.ArgumentParser(description='To explain neural network decisions' ) + parser.add_argument( + '--model', dest='model', default='-1', help='the input neural network model (.h5)') + parser.add_argument("--inputs", dest="inputs", default="-1", + help="the input test data directory", metavar="DIR") + parser.add_argument("--outputs", dest="outputs", default="outs", + help="the outputput test data directory", metavar="DIR") + parser.add_argument("--measures", dest="measures", default=['tarantula', 'zoltar', 'ochiai', 'wong-ii'], + help="the measures", metavar="zoltar, tarantula ...", nargs='+') + parser.add_argument("--measure", dest="measure", default="None", + help="the measure", metavar="zoltar, tarantula ...") + parser.add_argument("--mnist-dataset", dest="mnist", help="MNIST dataset", action="store_true") + parser.add_argument("--normalized-input", dest="normalized", help="To normalize the input", action="store_true") + parser.add_argument("--cifar10-dataset", dest="cifar10", help="CIFAR-10 dataset", action="store_true") + parser.add_argument("--grayscale", dest="grayscale", help="MNIST dataset", action="store_true") + parser.add_argument("--vgg16-model", dest='vgg16', help="vgg16 model", action="store_true") + parser.add_argument("--inception-v3-model", dest='inception_v3', help="inception v3 model", action="store_true") + parser.add_argument("--xception-model", dest='xception', help="Xception model", action="store_true") + parser.add_argument("--mobilenet-model", dest='mobilenet', help="mobilenet model", action="store_true") + parser.add_argument("--attack", dest='attack', help="to atatck", action="store_true") + parser.add_argument("--text-only", dest='text_only', help="for efficiency", action="store_true") + parser.add_argument("--input-rows", dest="img_rows", default="224", + help="input rows", metavar="INT") + parser.add_argument("--input-cols", dest="img_cols", default="224", + help="input cols", metavar="INT") + parser.add_argument("--input-channels", dest="img_channels", default="3", + help="input channels", metavar="INT") + parser.add_argument("--top-classes", dest="top_classes", default="1", + help="check the top-xx classifications", metavar="INT") + parser.add_argument("--adversarial-ub", dest="adv_ub", default="1.", + help="upper bound on the adversarial percentage (0, 1]", metavar="FLOAT") + parser.add_argument("--adversarial-lb", dest="adv_lb", default="0.", + help="lower bound on the adversarial percentage (0, 1]", metavar="FLOAT") + parser.add_argument("--adversarial-value", dest="adv_value", default="234", + help="adversarial value", metavar="FLOAT") + parser.add_argument("--testgen-factor", dest="testgen_factor", default="0.2", + help="test generation factor (0, 1]", metavar="FLOAT") + parser.add_argument("--testgen-size", dest="testgen_size", default="2000", + help="testgen size ", metavar="INT") + parser.add_argument("--testgen-iterations", dest="testgen_iter", default="1", + help="to control the testgen iteration", metavar="INT") + + args=parser.parse_args() + + img_rows, img_cols, img_channels = int(args.img_rows), int(args.img_cols), int(args.img_channels) + + ## some common used datasets + if args.mnist: + img_rows, img_cols, img_channels = 28, 28, 1 + elif args.cifar10: + img_rows, img_cols, img_channels = 32, 32, 3 + elif args.inception_v3 or args.xception: + img_rows, img_cols, img_channels = 299, 299, 3 + + ## to load the input DNN model + if args.model!='-1': + dnn=load_model(args.model) + elif args.vgg16: + print ('to load VGG16') + dnn=VGG16() + print ('done') + elif args.mobilenet: + dnn=mobilenet.MobileNet() + elif args.inception_v3: + dnn=inception_v3.InceptionV3() + elif args.xception: + dnn=xception.Xception() + else: + raise Exception ('A DNN model needs to be provided...') + + ## to load the input data + fnames=[] + xs=[] + if args.inputs!='-1': + for path, subdirs, files in os.walk(args.inputs): + for name in files: + fname=(os.path.join(path, name)) + if fname.endswith('.jpg') or fname.endswith('.png') or fname.endswith('.JPEG'): + if args.grayscale is True or args.mnist: + x=image.load_img(fname, target_size=(img_rows, img_cols), color_mode = "grayscale") + x=np.expand_dims(x,axis=2) + else: + x=image.load_img(fname, target_size=(img_rows, img_cols)) + x=np.expand_dims(x,axis=0) + xs.append(x) + fnames.append(fname) + else: + raise Exception ('What do you want me to do?') + xs=np.vstack(xs) + xs = xs.reshape(xs.shape[0], img_rows, img_cols, img_channels) + print ('Total data loaded:', len(xs)) + + eobj=explain_objectt(dnn, xs) + eobj.outputs=args.outputs + eobj.top_classes=int(args.top_classes) + eobj.adv_ub=float(args.adv_ub) + eobj.adv_lb=float(args.adv_lb) + eobj.adv_value=float(args.adv_value) + eobj.testgen_factor=float(args.testgen_factor) + eobj.testgen_size=int(args.testgen_size) + eobj.testgen_iter=int(args.testgen_iter) + eobj.vgg16=args.vgg16 + eobj.mnist=args.mnist + eobj.cifar10=args.cifar10 + eobj.inception_v3=args.inception_v3 + eobj.xception=args.xception + eobj.mobilenet=args.mobilenet + eobj.attack=args.attack + eobj.text_only=args.text_only + eobj.normalized=args.normalized + eobj.fnames=fnames + measures = [] + if not args.measure=='None': + measures.append(args.measure) + else: measures = args.measures + eobj.measures=measures + + to_explain(eobj) + +if __name__=="__main__": + main() + diff --git a/src/spectra_gen.py b/src/spectra_gen.py new file mode 100644 index 0000000..d286d6a --- /dev/null +++ b/src/spectra_gen.py @@ -0,0 +1,138 @@ +import numpy as np +from utils import * + +def spectra_sym_gen(eobj, x, y, adv_value=1, testgen_factor=.2, testgen_size=0): + + + v_type=type(adv_value) + model=eobj.model + failing=[] + passing=[] + + #inputs=[] + sp=x.shape + x_flag=np.zeros(sp, dtype=bool) + portion=int(sp[0]*testgen_factor) + incr=1/6*portion + if portion<1: portion=1 + L0=np.array(np.arange(x.size)) + L0=np.reshape(L0, sp) + + while (not np.all(x_flag)) or len(passing)+len(failing)1: #ite>0.01: + t2=x.copy() + #ite=ite-1#ite//2 #ite=(ite+0)/2 + ite=int(ite-incr) + if ite<1: break + region=L0[ np.max([i0-ite,0]) : np.min([i0+ite, sp[0]]), np.max([i1-ite,0]):np.min([i1+ite,sp[1]])].flatten() + + L=region #L0[0:portion] + if v_type==np.ndarray: + np.put(t, L, adv_value.take(L)) + else: + np.put(t, L, adv_value) + x_flag.flat[L]=True #np.put(x, L, True) + new_y=np.argsort(model.predict(sbfl_preprocess(eobj, np.array([t]))))[0][-eobj.top_classes:] + #is_adv=(len(np.intersect1d(y, new_y))==0) + #ite-=0.01 + #L2=L0[0:int(ite/testgen_factor*portion)] + #if v_type==np.ndarray: + # np.put(t2, L2, adv_value.take(L2)) + #else: + # np.put(t2, L2, adv_value) + #new_y=np.argsort(model.predict(sbfl_preprocess(eobj, np.array([t2]))))[0][-eobj.top_classes:] + ##print (y, new_y) + if (len(np.intersect1d(y, new_y))!=0): + passing.append(t2) + break + + else: + passing.append(t) + ## to find a failing + ite=h #testgen_factor + while itesp[0]/2: break + region=L0[ np.max([i0-ite,0]) : np.min([i0+ite, sp[0]]), np.max([i1-ite,0]):np.min([i1+ite,sp[1]])].flatten() + + L=region #L0[0:portion] + if v_type==np.ndarray: + np.put(t, L, adv_value.take(L)) + else: + np.put(t, L, adv_value) + x_flag.flat[L]=True #np.put(x, L, True) + new_y=np.argsort(model.predict(sbfl_preprocess(eobj, np.array([t]))))[0][-eobj.top_classes:] + #t2=x.copy() + #ite=(ite+1)/2 + ##ite+=0.01 + #L2=L0[0:int(ite/testgen_factor*portion)] + #if v_type==np.ndarray: + # np.put(t2, L2, adv_value.take(L2)) + #else: + # np.put(t2, L2, adv_value) + #new_y=np.argsort(model.predict(sbfl_preprocess(eobj, np.array([t2]))))[0][-eobj.top_classes:] + if (len(np.intersect1d(y, new_y))==0): + failing.append(t2) + x_flag.flat[L]=True + break + + return np.array(passing), np.array(failing) + +def spectra_gen(x, adv_value=1, testgen_factor=0.01, testgen_size=0): + + #print (adv_value, testgen_factor, testgen_size) + v_type=type(adv_value) + + inputs=[] + sp=x.shape + x_flag=np.zeros(sp, dtype=bool) + portion=int(x.size*testgen_factor) #int(x.size/sp[2]*testgen_factor) + + while (not np.all(x_flag)) or len(inputs)=0: + + ipos=np.unravel_index(ind[pos], sp) + if not im_flag[ipos]: + for k in range(0,sp[2]): + if type(bg_v)==np.ndarray: + im[ipos[0]][ipos[1]][k]=bg_v[ipos[0]][ipos[1]][k] + else: + im[ipos[0]][ipos[1]][k]=bg_v + im_flag[ipos[0]][ipos[1]][k]=True + count+=1 + + pos-=1 + + if count5000: break + + if count-old_count>=step_incr: + old_count=count + + adv_v=model.predict(sbfl_preprocess(eobj, np.array([im]))) + adv_y=np.argsort(adv_v)[0][-top_classes:] + if len(np.intersect1d(y, adv_y))==0: + #if np.sort(adv_v)[0][-top_classes:][0]>.5: + return im, count, np.sort(adv_v)[0][-top_classes:] + + return x, x.size//sp[2], [None] + diff --git a/src/to_explain.py b/src/to_explain.py new file mode 100644 index 0000000..e622911 --- /dev/null +++ b/src/to_explain.py @@ -0,0 +1,92 @@ +import numpy as np +from spectra_gen import * +from to_rank import * +from utils import * +from datetime import datetime +from mask import * + +def to_explain(eobj): + print ('to explain...') + model=eobj.model + ## to create output DI + di=eobj.outputs + try: + os.system('mkdir -p {0}'.format(di)) + print ('mkdir -p {0}'.format(di)) + except: pass + + for i in range(0, len(eobj.inputs)): + print ('## Input ', i) + x=eobj.inputs[i] + res=model.predict(sbfl_preprocess(eobj, np.array([x]))) + y=np.argsort(res)[0][-eobj.top_classes:] + + print (eobj.fnames[i], '>>>>>>>>>>>>', 'Label:', y, 'Output:', res) + + ite=0 + reasonable_advs=False + while ite=eobj.adv_ub: + print ('###### too many advs') + continue + else: + reasonable_advs=True + break + + if not reasonable_advs: + print ('###### failed to explain') + continue + + ## to obtain the ranking for Input i + selement=sbfl_elementt(x, 0, adv_xs, adv_ys, model) + dii=di+'/{0}'.format(str(datetime.now()).replace(' ', '-')) + dii=dii.replace(':', '-') + os.system('mkdir -p {0}'.format(dii)) + for measure in eobj.measures: + ranking_i, spectrum=to_rank(selement, measure) + selement.y = y + diii=dii+'/{0}'.format(measure) + os.system('mkdir -p {0}'.format(diii)) + np.savetxt(diii+'/ranking.txt', ranking_i, fmt='%s') + + # to plot the heatmap + spectrum = np.array((spectrum/spectrum.max())*255) + gray_img = np.array(spectrum[:,:,0],dtype='uint8') + #print (gray_img) + heatmap_img = cv2.applyColorMap(gray_img, cv2.COLORMAP_JET) + if x.shape[2]==1: + x3d = np.repeat(x[:, :, 0][:, :, np.newaxis], 3, axis=2) + else: x3d = x + fin = cv2.addWeighted(heatmap_img, 0.7, x3d, 0.3, 0) + plt.rcParams["axes.grid"] = False + plt.imshow(cv2.cvtColor(fin, cv2.COLOR_BGR2RGB)) + plt.savefig(diii+'/heatmap_{0}.png'.format(measure)) + + # to plot the top ranked pixels + if not eobj.text_only: + top_plot(selement, ranking_i, diii, measure, eobj) diff --git a/src/to_rank.py b/src/to_rank.py new file mode 100644 index 0000000..144b827 --- /dev/null +++ b/src/to_rank.py @@ -0,0 +1,123 @@ +import numpy as np +from utils import * + +def to_rank(sbfl_element, metric='zoltar'): + origin_data=sbfl_element.x + sp=origin_data.shape + ef=np.zeros(sp,dtype=float) + nf=np.zeros(sp,dtype=float) + ep=np.zeros(sp,dtype=float) + np_=np.zeros(sp,dtype=float) + + xs=np.array(sbfl_element.xs) + + diffs=np.abs(xs-origin_data) + #diffs=diffs - (1+0.05 * origin_data) + #diffs[diffs>0]=0 + + for i in range(0, len(diffs)): + is_adv=(sbfl_element.y!=sbfl_element.ys[i]) + ds_i1=diffs[i].copy() + ds_i1[ds_i1>0]=1 + ds_i2=diffs[i].copy() + ds_i2[ds_i2>0]=-1 + ds_i2[ds_i2==0]=+1 + ds_i2[ds_i2==-1]=0 + if is_adv: + ef=ef+ds_i1 + nf=nf+ds_i2 + #ef=ef+ds_i2 + #nf=nf+ds_i1 + #for index, _ in np.ndenumerate(diffs[i]): + # flag=diffs[i][index]>0 + # if flag: + # ef[index]+=1 + # else: + # nf[index]+=1 + else: + ep=ep+ds_i1 + np_=np_+ds_i2 + #ep=ep+ds_i2 + #np_=np_+ds_i1 + #for index, _ in np.ndenumerate(diffs[i]): + # flag=diffs[i][index]>0 + # if flag: + # ep[index]+=1 + # else: + # np_[index]+=1 + + ind=None + spectrum=None + if metric=='random': + spectrum=np.random.rand(sp[0], sp[1], sp[2]) + elif metric=='zoltar': + zoltar=np.zeros(sp, dtype=float) + for index, x in np.ndenumerate(origin_data): + aef=ef[index] + anf=nf[index] + anp=np_[index] + aep=ep[index] + if aef==0: + zoltar[index]=0 + else: + k=(10000.0*anf*aep)/aef + zoltar[index]=(aef*1.0)/(aef+anf+aep+k) + spectrum=zoltar + elif metric=='wong-ii': + wong=np.zeros(sp, dtype=float) + for index, x in np.ndenumerate(origin_data): + aef=ef[index] + anf=nf[index] + anp=np_[index] + aep=ep[index] + wong[index]=aef-aep + spectrum=wong + elif metric=='ochiai': + ochiai=np.zeros(sp, dtype=float) + for index, x in np.ndenumerate(origin_data): + aef=ef[index] + anf=nf[index] + anp=np_[index] + aep=ep[index] + try: + ochiai[index]=aef/np.sqrt((aef+anf)*(aef+aep)) + except: ochiai[index]=0 + spectrum=ochiai + elif metric=='tarantula': + tarantula=np.zeros(sp, dtype=float) + for index, x in np.ndenumerate(origin_data): + aef=ef[index] + anf=nf[index] + anp=np_[index] + aep=ep[index] + try: tarantula[index]=(aef/(aef+anf))/(aef/(aef+anf)+anp/(aep+anp)) + except: tarantula[index]=0 + spectrum=tarantula + else: + raise Exception('The measure is not supported: {0}'.format(metric)) + + spectrum_flags=np.zeros(sp, dtype=bool) + for iindex, _ in np.ndenumerate(spectrum): + tot=0 + for j in range(0, (sp[2])): + if not spectrum_flags[iindex[0]][iindex[1]][j]: + tot+=spectrum[iindex[0]][iindex[1]][j] + for j in range(0, (sp[2])): + if not spectrum_flags[iindex[0]][iindex[1]][j]: + spectrum_flags[iindex[0]][iindex[1]][j]=True + spectrum[iindex[0]][iindex[1]][j]=tot + + # to smooth + smooth = np.ones(spectrum.shape) + sI = spectrum.shape[0] + sJ = spectrum.shape[1] + sd = (int)(sI*(10. / 224)) + for si in range(0, spectrum.shape[0]): + for sj in range(0, spectrum.shape[1]): + for sk in range(0, spectrum.shape[2]): + smooth[si][sj][sk] = np.mean(spectrum[np.max([0, si-sd]):np.min([sI, si+sd]), np.max([0,sj-sd]):np.min([sJ, sj+sd]), sk]) + spectrum = smooth + + ind=np.argsort(spectrum, axis=None) + + return ind, spectrum diff --git a/src/to_restore.py b/src/to_restore.py new file mode 100644 index 0000000..3f3fc44 --- /dev/null +++ b/src/to_restore.py @@ -0,0 +1,52 @@ + +import numpy as np +from utils import * +from sbfl import * + +def to_restore(eobj, ind, origin_data, y, bg_v, init_step, step_incr): + + v_type=type(bg_v) + + top_classes=eobj.top_classes + sp=origin_data.shape + + x=origin_data + model=eobj.model + #y=np.argsort(model.predict(sbfl_preprocess(eobj,np.array([x]))))[0][-top_classes:] + + latest_step=ind.size + + im=np.ones(sp) + im=np.multiply(im, bg_v) + im_flag=np.zeros(im.shape, dtype=bool) + + pos=ind.size-1 + old_count=1 + count=1 + + adv_v=0 + + while pos>=0: + + ipos=np.unravel_index(ind[pos], sp) + if not im_flag[ipos]: + for k in range(0,sp[2]): + im[ipos[0]][ipos[1]][k]=x[ipos[0]][ipos[1]][k] + im_flag[ipos[0]][ipos[1]][k]=True + count+=1 + + pos-=1 + + if count=step_incr: + old_count=count + + adv_v=model.predict(sbfl_preprocess(eobj, np.array([im]))) + adv_y=np.argsort(adv_v)[0][-top_classes:] + if len(np.intersect1d(y, adv_y))!=0: + #if np.sort(adv_v)[0][-top_classes:][0]>.5: + return im, count, np.sort(adv_v)[0][-top_classes:] + + return x, x.size//sp[2], [None] + diff --git a/src/utils.py b/src/utils.py new file mode 100644 index 0000000..6515b90 --- /dev/null +++ b/src/utils.py @@ -0,0 +1,117 @@ +#import matplotlib.pyplot as plt +from keras import * +from keras import backend as K +import numpy as np +from PIL import Image +import copy +import sys, os +import cv2 +import matplotlib +import matplotlib.pyplot as plt +from keras.preprocessing.image import save_img +from keras.applications import vgg16 +from keras.applications import inception_v3, mobilenet, xception + +class explain_objectt: + def __init__(self, model, inputs): + self.model=model + self.inputs=inputs + self.outputs=None + self.top_classes=None + self.adv_ub=None + self.adv_lb=None + self.adv_value=None + self.testgen_factor=None + self.testgen_size=None + self.testgen_iter=None + self.vgg16=None + self.mnist=None + self.cifar10=None + self.inception_v3=None + self.xception=None + self.mobilenet=None + self.attack=None + self.text_only=None + self.measures=None + self.normalized=None + self.fnames=[] + + +class sbfl_elementt: + def __init__(self, x, y, xs, ys, model, adv_part=None): + self.x=x + self.y=y + self.xs=xs + self.ys=ys + self.model=model + self.adv_part=adv_part + +# Yield successive n-sized +# chunks from l. +def divide_chunks(l, n): + # looping till length l + for i in range(0, len(l), n): + yield l[i:i + n] + +def arr_to_str(inp): + ret=inp[0] + for i in range(1, len(inp)): + ret+=' ' + ret+=inp[i] + return ret + +def sbfl_preprocess(eobj, chunk): + x=chunk.copy() + if eobj.vgg16 is True: + x=vgg16.preprocess_input(x) + elif eobj.inception_v3 is True: + x=inception_v3.preprocess_input(x) + elif eobj.xception is True: + x=xception.preprocess_input(x) + elif eobj.mobilenet is True: + x=mobilenet.preprocess_input(x) + elif eobj.normalized is True: + x=x/255. + elif eobj.mnist is True or eobj.cifar10 is True: + x=x/255. + return x + +def save_an_image(im, title, di='./'): + if not di.endswith('/'): + di+='/' + save_img((di+title+'.jpg'), im) + +def top_plot(sbfl_element, ind, di, metric='', eobj=None, bg=128, online=False, online_mark=[255,0,255]): + origin_data=sbfl_element.x + sp=origin_data.shape + + try: + print ('mkdir -p {0}'.format(di)) + os.system('mkdir -p {0}'.format(di)) + except: pass + + save_an_image(origin_data, 'origin-{0}'.format(sbfl_element.y), di) + + im_flag=np.zeros(sp, dtype=bool) + im_o=np.multiply(np.ones(sp), bg) + count=0 + base=int((ind.size/sp[2])/100) + pos=ind.size-1 + found_exp = False + while pos>=0: + ipos=np.unravel_index(ind[pos], sp) + if not im_flag[ipos]: + for k in range(0,sp[2]): + im_o[ipos[0]][ipos[1]][k]=origin_data[ipos[0]][ipos[1]][k] + im_flag[ipos[0]][ipos[1]][k]=True + count+=1 + if count%base==0: + save_an_image(im_o, '{1}-{0}'.format(int(count/base), metric), di) + res=sbfl_element.model.predict(sbfl_preprocess(eobj, np.array([im_o]))) + y=np.argsort(res)[0][-eobj.top_classes:] + #print (int(count/base), '>>>', y, sbfl_element.y, y==sbfl_element.y) + if y==sbfl_element.y and not found_exp: + save_an_image(im_o, 'explanation-found-{1}-{0}'.format(int(count/base), metric), di) + found_exp = True + pos-=1 +