diff --git a/Multi_CAM.py b/Multi_CAM.py index 5dee135..7f5346e 100644 --- a/Multi_CAM.py +++ b/Multi_CAM.py @@ -21,8 +21,8 @@ parser = argparse.ArgumentParser() parser.add_argument('--models', type=str, default='resnet50', - help='resnet50 or vgg16 or vgg19') -parser.add_argument('--target_layer', type=str, default='layer2', + help='resnet50') +parser.add_argument('--target_layer', type=str, default='layer4', help='target_layer') parser.add_argument('--target_class', type=int, default=None, help='target_class') @@ -33,14 +33,8 @@ ########################################################################################################################### model_arch = args.models -if model_arch == 'vgg16': - model = vgg16_bn(pretrained=True).cuda().eval() ##### - target_layer = model.features[int(args.target_layer)] -elif model_arch == 'vgg19': - model = vgg19_bn(pretrained=True).cuda().eval() ##### - target_layer = model.features[int(args.target_layer)] -elif model_arch == 'resnet50': - model = resnet50(pretrained=True).cuda().eval() ##### +if model_arch == 'resnet50': + model = resnet50(pretrained=True) ##### if args.target_layer == 'layer1': target_layer = model.layer1 elif args.target_layer == 'layer2': @@ -49,6 +43,10 @@ target_layer = model.layer3 elif args.target_layer == 'layer4': target_layer = model.layer4 + +if torch.cuda.is_available(): + model = model.cuda() +model.eval() ####################################################################################################################### value = dict() @@ -60,119 +58,109 @@ def backward_hook(module, input, output): target_layer.register_forward_hook(forward_hook) target_layer.register_backward_hook(backward_hook) -Score_CAM_class = ScoreCAM(model,target_layer) - -path_s = os.listdir('./picture') - -for path in path_s: - img_path_long = './picture/{}'.format(path) - img = cv2.imread(img_path_long,1) - img_show = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img_show = cv2.resize(img_show,(224,224)) - img = np.float32(cv2.resize(img, (224,224)))/255 - - in_tensor = preprocess_image(img).cuda() - R_CAM, output = model(in_tensor, args.target_layer, [args.target_class]) - - if args.target_class == None: - maxindex = np.argmax(output.data.cpu().numpy()) - else: - maxindex = args.target_class - - print(index2class[maxindex]) - save_path = './results/{}_{}'.format(index2class[maxindex][:10], img_path_long.split('/')[-1]) - - output[:, maxindex].sum().backward(retain_graph=True) - activation = value['activations'] # [1, 2048, 7, 7] - gradient = value['gradients'] # [1, 2048, 7, 7] - gradient_2 = gradient ** 2 - gradient_3 = gradient ** 3 - - gradient_ = torch.mean(gradient, dim=(2, 3), keepdim=True) - grad_cam = activation * gradient_ - grad_cam = torch.sum(grad_cam, dim=(0, 1)) - grad_cam = torch.clamp(grad_cam, min=0) - grad_cam = grad_cam.data.cpu().numpy() - grad_cam = cv2.resize(grad_cam, (224, 224)) - - - alpha_numer = gradient_2 - alpha_denom = 2 * gradient_2 + torch.sum(activation * gradient_3, axis=(2, 3), keepdims=True) # + 1e-2 - alpha = alpha_numer / alpha_denom - w = torch.sum(alpha * torch.clamp(gradient, 0), axis=(2, 3), keepdims=True) - grad_campp = activation * w - grad_campp = torch.sum(grad_campp, dim=(0, 1)) - grad_campp = torch.clamp(grad_campp, min=0) - grad_campp = grad_campp.data.cpu().numpy() - grad_campp = cv2.resize(grad_campp, (224, 224)) - - - score_map, _ = Score_CAM_class(in_tensor, class_idx=maxindex) - score_map = score_map.squeeze() - score_map = score_map.detach().cpu().numpy() - R_CAM = tensor2image(R_CAM) - - fig = plt.figure(figsize=(10, 10)) - plt.subplots_adjust(bottom=0.01) - - plt.subplot(2, 5, 1) - plt.imshow(img_show) - plt.title('Original') - plt.axis('off') - - plt.subplot(2, 5, 1 + 5) - plt.imshow(img_show) - plt.axis('off') - - plt.subplot(2, 5, 2) - plt.imshow((grad_cam),cmap='seismic') - plt.imshow(img_show, alpha=.5) - plt.title('Grad CAM', fontsize=15) - plt.axis('off') - - plt.subplot(2, 5, 2 + 5) - plt.imshow(img_show*threshold(grad_cam)[...,np.newaxis]) - plt.title('Grad CAM', fontsize=15) - plt.axis('off') - - plt.subplot(2, 5, 3) - plt.imshow((grad_campp),cmap='seismic') - plt.imshow(img_show, alpha=.5) - plt.title('Grad CAM++', fontsize=15) - plt.axis('off') - - plt.subplot(2, 5, 3 + 5) - plt.imshow(img_show*threshold(grad_campp)[...,np.newaxis]) - plt.title('Grad CAM++', fontsize=15) - plt.axis('off') - - plt.subplot(2, 5, 4) - plt.imshow((score_map),cmap='seismic') - plt.imshow(img_show, alpha=.5) - plt.title('Score_CAM', fontsize=15) - plt.axis('off') - plt.subplot(2, 5, 4 + 5) - plt.imshow(img_show*threshold(score_map)[...,np.newaxis]) - plt.title('Score_CAM', fontsize=15) - plt.axis('off') +# path_s = os.listdir('./picture') +path_s = os.listdir('./sample-imagenet') - plt.subplot(2, 5, 5) - plt.imshow((R_CAM),cmap='seismic') - plt.imshow(img_show, alpha=.5) - plt.title('Relevance_CAM', fontsize=15) +def save_cam(cam, image, save_path): + # save cam + plt.imshow((cam),cmap='seismic') + plt.imshow(image, alpha=.5) plt.axis('off') + plt.tight_layout() + plt.draw() + # plt.show() + plt.savefig(save_path, bbox_inches='tight') + plt.clf() + plt.close() - plt.subplot(2, 5, 5 + 5) - plt.imshow(img_show*threshold(R_CAM)[...,np.newaxis]) - plt.title('Relevance_CAM', fontsize=15) + # save segmentation + plt.imshow(image*threshold(cam)[...,np.newaxis]) plt.axis('off') - plt.tight_layout() plt.draw() - # plt.waitforbuttonpress() - plt.savefig(save_path) + plt.savefig(save_path+'_seg', bbox_inches='tight') plt.clf() plt.close() -print('Done') \ No newline at end of file +for k, path in enumerate(path_s[:200]): + try: + img_path_long = './sample-imagenet/{}'.format(path) + img = cv2.imread(img_path_long,1) + img_show = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img_show = cv2.resize(img_show,(224,224)) + img = np.float32(cv2.resize(img, (224,224)))/255 + + in_tensor = preprocess_image(img).cuda() if torch.cuda.is_available() else preprocess_image(img) + XR_CAM, R_CAM, output = model(in_tensor, args.target_layer, [args.target_class]) + + if args.target_class == None: + maxindex = np.argmax(output.data.cpu().numpy()) + else: + maxindex = args.target_class + + print('{}/{} - {}'.format(k, len(path_s[:200]), index2class[maxindex])) + output[:, maxindex].sum().backward(retain_graph=True) + activation = value['activations'] # [1, 2048, 7, 7] + gradient = value['gradients'] # [1, 2048, 7, 7] + gradient_2 = gradient ** 2 + gradient_3 = gradient ** 3 + + # grad-cam + gradient_ = torch.mean(gradient, dim=(2, 3), keepdim=True) + grad_cam = activation * gradient_ + grad_cam = torch.sum(grad_cam, dim=(0, 1)) + grad_cam = torch.clamp(grad_cam, min=0) + grad_cam = grad_cam.data.cpu().numpy() + grad_cam = cv2.resize(grad_cam, (224, 224)) + + # xgrad-cam + w = (gradient*activation) / torch.sum(activation, dim=(2,3), keepdim=True).add(1e-8) + w = torch.sum(w, dim=(2,3), keepdim=True) + xgrad_cam = activation * w + xgrad_cam = torch.sum(xgrad_cam, dim=(0,1)) + xgrad_cam = torch.clamp(xgrad_cam, min=0) + xgrad_cam = xgrad_cam.data.cpu().numpy() + xgrad_cam = cv2.resize(xgrad_cam, (224, 224)) + + # grad-cam++ + alpha_numer = gradient_2 + alpha_denom = 2 * gradient_2 + torch.sum(activation * gradient_3, axis=(2, 3), keepdims=True) # + 1e-2 + alpha = alpha_numer / alpha_denom + w = torch.sum(alpha * torch.clamp(gradient, 0), axis=(2, 3), keepdims=True) + grad_campp = activation * w + grad_campp = torch.sum(grad_campp, dim=(0, 1)) + grad_campp = torch.clamp(grad_campp, min=0) + grad_campp = grad_campp.data.cpu().numpy() + grad_campp = cv2.resize(grad_campp, (224, 224)) + + # xrelevance-cam + XR_CAM = tensor2image(XR_CAM) + + # relevance-cam + R_CAM = tensor2image(R_CAM) + + # create file directory if not exists + save_path_parent_dir = './results-sample-imagenet/{}/{}'.format(img_path_long.split('/')[-1], args.target_layer) + if not os.path.exists(save_path_parent_dir): + os.makedirs(save_path_parent_dir) + + # save the cams + save_path_relevance_cam = './results-sample-imagenet/{}/{}/{}'.format(img_path_long.split('/')[-1], args.target_layer, 'RelevanceCAM') + save_path_xrelevance_cam = './results-sample-imagenet/{}/{}/{}'.format(img_path_long.split('/')[-1], args.target_layer, 'XRelevanceCAM') + save_path_xgrad_cam = './results-sample-imagenet/{}/{}/{}'.format(img_path_long.split('/')[-1], args.target_layer, 'XGradCAM') + save_path_grad_cam = './results-sample-imagenet/{}/{}/{}'.format(img_path_long.split('/')[-1], args.target_layer, 'GradCAM') + save_path_gradcam_pp = './results-sample-imagenet/{}/{}/{}'.format(img_path_long.split('/')[-1], args.target_layer, 'GradCAM++') + + save_cam(R_CAM, img_show, save_path_relevance_cam) + save_cam(XR_CAM, img_show, save_path_xrelevance_cam) + save_cam(xgrad_cam, img_show, save_path_xgrad_cam) + save_cam(grad_campp, img_show, save_path_gradcam_pp) + save_cam(grad_cam, img_show, save_path_grad_cam) + except: + print('error happens in this iteration') + +print('Done') + + + diff --git a/images/Comparison.jpg b/images/Comparison.jpg deleted file mode 100644 index 7bdfe56..0000000 Binary files a/images/Comparison.jpg and /dev/null differ diff --git a/images/R_CAM_pipeline.jpg b/images/R_CAM_pipeline.jpg deleted file mode 100644 index cfd2289..0000000 Binary files a/images/R_CAM_pipeline.jpg and /dev/null differ diff --git a/modules/resnet.py b/modules/resnet.py index e85512e..a8d068c 100644 --- a/modules/resnet.py +++ b/modules/resnet.py @@ -1,7 +1,8 @@ import torch.nn as nn import torch.utils.model_zoo as model_zoo +import numpy as np -from modules.layers import * +# from layers import * import torch __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] @@ -19,7 +20,7 @@ def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) + padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): @@ -281,58 +282,96 @@ def _make_layer(self, block, planes, blocks, stride=1): def CLRP(self, x, maxindex = [None]): if maxindex == [None]: maxindex = torch.argmax(x, dim=1) - R = torch.ones(x.shape).cuda() + R = torch.ones(x.shape).cuda() if torch.cuda.is_available() else torch.ones(x.shape) R /= -self.num_classes for i in range(R.size(0)): R[i, maxindex[i]] = 1 return R - def forward(self, x, mode='output', target_class = [None]): - + def forward(self, x, mode='output', target_class = [None], xMode=False): + + # this is part of the architecture, where before the block, there is a general feature convolution x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) + # start blockwise-convolution layer1 = self.layer1(x) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) + # average pooling it and flattening before linear classifier x = self.avgpool(layer4) x = x.view(x.size(0), -1) z = self.fc(x) if mode == 'output': return z + # start the CLRP propagagation, start from the end to the start layer by layer (following inverse of above) R = self.CLRP(z, target_class) R = self.fc.relprop(R, 1) R = R.reshape_as(self.avgpool.Y) R4 = self.avgpool.relprop(R, 1) - if mode == 'layer4': - r_weight4 = torch.mean(R4, dim=(2, 3), keepdim=True) - r_cam4 = layer4 * r_weight4 - r_cam4 = torch.sum(r_cam4, dim=(1), keepdim=True) - return r_cam4, z + def generate_multi_relevance_cams(rel, layer_acts): + """generate both xrelevance-cam and relevance-cam""" + xrel_weights = self._compute_weights(rel, layer_acts, True) + xrel_cam = layer_acts * xrel_weights + xrel_cam = torch.sum(xrel_cam, dim=(1), keepdim=True) + + rel_weights = self._compute_weights(rel, layer_acts, False) + rel_cam = layer_acts * rel_weights + rel_cam = torch.sum(rel_cam, dim=(1), keepdim=True) + + return xrel_cam, rel_cam + if mode == 'layer4': + xr_cam4, r_cam4 = generate_multi_relevance_cams(R4, layer4) + return xr_cam4, r_cam4, z elif mode == 'layer3': R3 = self.layer4.relprop(R4, 1) - r_weight3 = torch.mean(R3, dim=(2, 3), keepdim=True) - r_cam3 = layer3 * r_weight3 - r_cam3 = torch.sum(r_cam3, dim=(1), keepdim=True) - return r_cam3, z + xr_cam3, r_cam3 = generate_multi_relevance_cams(R3, layer3) + return xr_cam3, r_cam3, z elif mode == 'layer2': R3 = self.layer4.relprop(R4, 1) R2 = self.layer3.relprop(R3, 1) - r_weight2 = torch.mean(R2, dim=(2, 3), keepdim=True) - r_cam2 = layer2 * r_weight2 - r_cam2 = torch.sum(r_cam2, dim=(1), keepdim=True) - return r_cam2, z - else: - return z + xr_cam2, r_cam2 = generate_multi_relevance_cams(R2, layer2) + return xr_cam2, r_cam2, z + elif mode == 'layer1': + R3 = self.layer4.relprop(R4, 1) + R2 = self.layer3.relprop(R3, 1) + R1 = self.layer2.relprop(R2, 1) + + xr_cam1, r_cam1 = generate_multi_relevance_cams(R1, layer1) + return xr_cam1, r_cam1, z + + def _XRelevanceCAM(self, R, activations): #XRelevanceCAM + """state of the art among the ones that I tried but visually it is bad + this works! + """ + try: + R = R.cpu().detach().numpy() + activations = activations.cpu().detach().numpy() + except: + R = R.detach().numpy() + activations = activations.detach().numpy() + weights = R / (np.sum(activations, axis=(2, 3), keepdims=True) + 1e-7) # per channel division operation + + weights = np.sum(weights, axis=(2, 3), keepdims=True) + return torch.tensor(weights, device='cuda' if torch.cuda.is_available() else 'cpu') + + def _compute_weights(self, R, activations, xMode): + # xrelevance + if xMode: + return self._XRelevanceCAM(R, activations) + + # relevance + return torch.mean(R, dim=(2, 3), keepdim=True) def relprop(self, R, alpha, flag = 'inter'): + # this is the function that perform clrp propagation all the way to the first convolution, might not needed if self.long: R = self.fc.relprop(R, alpha) R = R.reshape_as(self.avgpool.Y) @@ -481,4 +520,828 @@ def resnet152(pretrained=False, **kwargs): # kwargs['groups'] = 32 # kwargs['width_per_group'] = 8 # return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], -# pretrained, progress, **kwargs) \ No newline at end of file +# pretrained, progress, **kwargs) + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial +import math + +__all__ = ['forward_hook', 'Clone', 'Add', 'Cat', 'ReLU', 'Dropout', 'BatchNorm2d', 'Linear', 'MaxPool2d', + 'AdaptiveAvgPool2d', 'AvgPool2d', 'Conv2d', 'Sequential', 'safe_divide'] + +def safe_divide(a, b): + return a / (b + b.eq(0).type(b.type()) * 1e-9) * b.ne(0).type(b.type()) + +def forward_hook(self, input, output): + if type(input[0]) in (list, tuple): + self.X = [] + for i in input[0]: + x = i.detach() + x.requires_grad = True + self.X.append(x) + else: + self.X = input[0].detach() + self.X.requires_grad = True + + self.Y = output + +def backward_hook(self, grad_input, grad_output): + self.grad_input = grad_input + self.grad_output = grad_output + +class RelProp(nn.Module): + def __init__(self): + super(RelProp, self).__init__() + # if not self.training: + self.register_forward_hook(forward_hook) + + def gradprop(self, Z, X, S): + C = torch.autograd.grad(Z, X, S, retain_graph=True) + return C + + def relprop(self, R, alpha = 1): + return R + def m_relprop(self, R,pred, alpha = 1): + return R + def RAP_relprop(self, R_p): + return R_p + +# An ordinary implementation of Swish function +class Swish(RelProp): + def forward(self, x): + return x * torch.sigmoid(x) + +# A memory-efficient implementation of Swish function +class SwishImplementation(torch.autograd.Function): + @staticmethod + def forward(ctx, i): + result = i * torch.sigmoid(i) + ctx.save_for_backward(i) + return result + + @staticmethod + def backward(ctx, grad_output): + i = ctx.saved_tensors[0] + sigmoid_i = torch.sigmoid(i) + return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) + +class MemoryEfficientSwish(RelProp): + def forward(self, x): + return SwishImplementation.apply(x) + + +class RelPropSimple(RelProp): + def relprop(self, R, alpha = 1): + Z = self.forward(self.X) + S = safe_divide(R, Z) + C = self.gradprop(Z, self.X, S) + + if torch.is_tensor(self.X) == False: + outputs = [] + outputs.append(self.X[0] * C[0]) + outputs.append(self.X[1] * C[1]) + else: + outputs = self.X * C[0] + return outputs + + def RAP_relprop(self, R_p): + def backward(R_p): + Z = self.forward(self.X) + Sp = safe_divide(R_p, Z) + + Cp = self.gradprop(Z, self.X, Sp)[0] + if torch.is_tensor(self.X) == False: + Rp = [] + Rp.append(self.X[0] * Cp) + Rp.append(self.X[1] * Cp) + else: + Rp = self.X * (Cp) + return Rp + if torch.is_tensor(R_p) == False: + idx = len(R_p) + tmp_R_p = R_p + Rp = [] + for i in range(idx): + Rp_tmp = backward(tmp_R_p[i]) + Rp.append(Rp_tmp) + else: + Rp = backward(R_p) + return Rp + + +class Identity(nn.Identity, RelProp): + pass + +class ReLU(nn.ReLU, RelProp): + pass + +class LeakyReLU(nn.LeakyReLU, RelProp): + pass + +class Dropout(nn.Dropout, RelProp): + pass + +class MaxPool2d(nn.MaxPool2d, RelPropSimple): + pass + + +class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d, RelProp): + def relprop(self, R, alpha=1): + px = torch.clamp(self.X, min=0) + + def f(x1): + Z1 = F.adaptive_avg_pool2d(x1, self.output_size) + + S1 = safe_divide(R, Z1) + + C1 = x1 * self.gradprop(Z1, x1, S1)[0] + + return C1 + + activator_relevances = f(px) + + + out = activator_relevances + + return out + +class ZeroPad2d(nn.ZeroPad2d, RelPropSimple): + def relprop(self, R, alpha=1): + Z = self.forward(self.X) + S = safe_divide(R, Z) + C = self.gradprop(Z, self.X, S) + outputs = self.X * C[0] + return outputs + +class AvgPool2d(nn.AvgPool2d, RelPropSimple): + pass + + +class Add(RelPropSimple): + def forward(self, inputs): + return torch.add(*inputs) + +'''여기는 생각해보자''' +class Multiply(RelPropSimple): + def forward(self, inputs): + return torch.mul(*inputs) + + def relprop(self, R, alpha = 1): + x0 = torch.clamp(self.X[0],min=0) + x1 = torch.clamp(self.X[1],min=0) + x = [x0,x1] + Z = self.forward(x) + S = safe_divide(R, Z) + C = self.gradprop(Z, x, S) + + outputs = [] + outputs.append(x[0] * C[0]) + outputs.append(x[1] * C[1]) + + return outputs + +class Clone(RelProp): + def forward(self, input, num): + self.__setattr__('num', num) + outputs = [] + for _ in range(num): + outputs.append(input) + + return outputs + + def relprop(self, R, alpha = 1): + Z = [] + for _ in range(self.num): + Z.append(self.X) + S = [safe_divide(r, z) for r, z in zip(R, Z)] + C = self.gradprop(Z, self.X, S)[0] + + R = self.X * C + + return R + + def RAP_relprop(self, R_p): + def backward(R_p): + Z = [] + for _ in range(self.num): + Z.append(self.X) + + Spp = [] + Spn = [] + + for z, rp, rn in zip(Z, R_p): + Spp.append(safe_divide(torch.clamp(rp, min=0), z)) + Spn.append(safe_divide(torch.clamp(rp, max=0), z)) + + Cpp = self.gradprop(Z, self.X, Spp)[0] + Cpn = self.gradprop(Z, self.X, Spn)[0] + + Rp = self.X * (Cpp * Cpn) + + return Rp + if torch.is_tensor(R_p) == False: + idx = len(R_p) + tmp_R_p = R_p + Rp = [] + for i in range(idx): + Rp_tmp = backward(tmp_R_p[i]) + Rp.append(Rp_tmp) + else: + Rp = backward(R_p) + return Rp + +class Cat(RelProp): + def forward(self, inputs, dim): + self.__setattr__('dim', dim) + return torch.cat(inputs, dim) + + def relprop(self, R, alpha = 1): + Z = self.forward(self.X, self.dim) + S = safe_divide(R, Z) + C = self.gradprop(Z, self.X, S) + + outputs = [] + for x, c in zip(self.X, C): + outputs.append(x * c) + + return outputs + def RAP_relprop(self, R_p): + def backward(R_p): + Z = self.forward(self.X, self.dim) + Sp = safe_divide(R_p, Z) + + Cp = self.gradprop(Z, self.X, Sp) + + Rp = [] + + for x, cp in zip(self.X, Cp): + Rp.append(x * (cp)) + + + return Rp + if torch.is_tensor(R_p) == False: + idx = len(R_p) + tmp_R_p = R_p + Rp = [] + for i in range(idx): + Rp_tmp = backward(tmp_R_p[i]) + Rp.append(Rp_tmp) + else: + Rp = backward(R_p) + return Rp + +class Sequential(nn.Sequential): + def relprop(self, R, alpha = 1): + for m in reversed(self._modules.values()): + R = m.relprop(R, alpha) + return R + def RAP_relprop(self, Rp): + for m in reversed(self._modules.values()): + Rp = m.RAP_relprop(Rp) + return Rp + +class BatchNorm2d(nn.BatchNorm2d, RelProp): + def relprop(self, R, alpha = 1): + X = self.X + beta = 1 - alpha + weight = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3) / ( + (self.running_var.unsqueeze(0).unsqueeze(2).unsqueeze(3).pow(2) + self.eps).pow(0.5)) + Z = X * weight + 1e-9 + S = R / Z + Ca = S * weight + R = self.X * (Ca) + return R + def RAP_relprop(self, R_p): + def f(R, w1, x1): + Z1 = x1 * w1 + S1 = safe_divide(R, Z1) * w1 + C1 = x1 * S1 + return C1 + def backward(R_p): + X = self.X + + weight = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3) / ( + (self.running_var.unsqueeze(0).unsqueeze(2).unsqueeze(3).pow(2) + self.eps).pow(0.5)) + + if torch.is_tensor(self.bias): + bias = self.bias.unsqueeze(-1).unsqueeze(-1) + bias_p = safe_divide(bias * R_p.ne(0).type(self.bias.type()), + R_p.ne(0).type(self.bias.type()).sum(dim=[2, 3], keepdim=True)) + R_p = R_p - bias_p + + Rp = f(R_p, weight, X) + + if torch.is_tensor(self.bias): + Bp = f(bias_p, weight, X) + + Rp = Rp + Bp + + return Rp + + if torch.is_tensor(R_p) == False: + idx = len(R_p) + tmp_R_p = R_p + Rp = [] + for i in range(idx): + Rp_tmp = backward(tmp_R_p[i]) + Rp.append(Rp_tmp) + else: + Rp = backward(R_p) + return Rp + +class Linear(nn.Linear, RelProp): + def relprop(self, R, alpha = 1): + beta = alpha - 1 + pw = torch.clamp(self.weight, min=0) + nw = torch.clamp(self.weight, max=0) + px = torch.clamp(self.X, min=0) + nx = torch.clamp(self.X, max=0) + + # def f(w1, w2, x1, x2): + # Z1 = F.linear(x1, w1) + # Z2 = F.linear(x2, w2) + # S1 = safe_divide(R, Z1) + # S2 = safe_divide(R, Z2) + # C1 = x1 * self.gradprop(Z1, x1, S1)[0] + # C2 = x2 * self.gradprop(Z2, x2, S2)[0] + # return C1 #+ C2 + + def f(w1, w2, x1, x2): + Z1 = F.linear(x1, w1) + Z2 = F.linear(x2, w2) + Z = Z1 + Z2 + S = safe_divide(R, Z) + C1 = x1 * self.gradprop(Z1, x1, S)[0] + C2 = x2 * self.gradprop(Z2, x2, S)[0] + return C1 + C2 + + + activator_relevances = f(pw, nw, px, nx) + inhibitor_relevances = f(nw, pw, px, nx) + + out = alpha * activator_relevances - beta*inhibitor_relevances + + return out + + def RAP_relprop(self, R_p): + def shift_rel(R, R_val): + R_nonzero = torch.ne(R, 0).type(R.type()) + shift = safe_divide(R_val, torch.sum(R_nonzero, dim=-1, keepdim=True)) * torch.ne(R, 0).type(R.type()) + K = R - shift + return K + def pos_prop(R, Za1, Za2, x1): + R_pos = torch.clamp(R, min=0) + R_neg = torch.clamp(R, max=0) + S1 = safe_divide((R_pos * safe_divide((Za1 + Za2), Za1 + Za2)), Za1) + C1 = x1 * self.gradprop(Za1, x1, S1)[0] + S1n = safe_divide((R_neg * safe_divide((Za1 + Za2), Za1 + Za2)), Za1) + C1n = x1 * self.gradprop(Za1, x1, S1n)[0] + S2 = safe_divide((R_pos * safe_divide((Za2), Za1 + Za2)), Za2) + C2 = x1 * self.gradprop(Za2, x1, S2)[0] + S2n = safe_divide((R_neg * safe_divide((Za2), Za1 + Za2)), Za2) + C2n = x1 * self.gradprop(Za2, x1, S2n)[0] + Cp = C1 + C2 + Cn = C2n + C1n + + C = (Cp + Cn) + C = shift_rel(C, C.sum(dim=-1,keepdim=True)-R.sum(dim=-1,keepdim=True)) + return C + def f(R, w1, w2, x1, x2): + R_nonzero = R.ne(0).type(R.type()) + Za1 = F.linear(x1, w1) * R_nonzero + Za2 = - F.linear(x1, w2) * R_nonzero + + Zb1 = - F.linear(x2, w1) * R_nonzero + Zb2 = F.linear(x2, w2) * R_nonzero + + C1 = pos_prop(R, Za1, Za2, x1) + C2 = pos_prop(R, Zb1, Zb2, x2) + + return C1 + C2 + + def first_prop(pd, px, nx, pw, nw): + Rpp = F.linear(px, pw) * pd + Rpn = F.linear(px, nw) * pd + Rnp = F.linear(nx, pw) * pd + Rnn = F.linear(nx, nw) * pd + Pos = (Rpp + Rnn).sum(dim=-1, keepdim=True) + Neg = (Rpn + Rnp).sum(dim=-1, keepdim=True) + + Z1 = F.linear(px, pw) + Z2 = F.linear(px, nw) + Z3 = F.linear(nx, pw) + Z4 = F.linear(nx, nw) + + S1 = safe_divide(Rpp, Z1) + S2 = safe_divide(Rpn, Z2) + S3 = safe_divide(Rnp, Z3) + S4 = safe_divide(Rnn, Z4) + C1 = px * self.gradprop(Z1, px, S1)[0] + C2 = px * self.gradprop(Z2, px, S2)[0] + C3 = nx * self.gradprop(Z3, nx, S3)[0] + C4 = nx * self.gradprop(Z4, nx, S4)[0] + bp = self.bias * pd * safe_divide(Pos, Pos + Neg) + bn = self.bias * pd * safe_divide(Neg, Pos + Neg) + Sb1 = safe_divide(bp, Z1) + Sb2 = safe_divide(bn, Z2) + Cb1 = px * self.gradprop(Z1, px, Sb1)[0] + Cb2 = px * self.gradprop(Z2, px, Sb2)[0] + return C1 + C4 + Cb1 + C2 + C3 + Cb2 + def backward(R_p, px, nx, pw, nw): + # dealing bias + # if torch.is_tensor(self.bias): + # bias_p = self.bias * R_p.ne(0).type(self.bias.type()) + # R_p = R_p - bias_p + + Rp = f(R_p, pw, nw, px, nx) + + # if torch.is_tensor(self.bias): + # Bp = f(bias_p, pw, nw, px, nx) + # + # Rp = Rp + Bp + return Rp + def redistribute(Rp_tmp): + Rp = torch.clamp(Rp_tmp, min=0) + Rn = torch.clamp(Rp_tmp, max=0) + R_tot = (Rp - Rn).sum(dim=-1, keepdim=True) + Rp_tmp3 = safe_divide(Rp, R_tot) * (Rp + Rn).sum(dim=-1, keepdim=True) + Rn_tmp3 = -safe_divide(Rn, R_tot) * (Rp + Rn).sum(dim=-1, keepdim=True) + return Rp_tmp3 + Rn_tmp3 + pw = torch.clamp(self.weight, min=0) + nw = torch.clamp(self.weight, max=0) + X = self.X + px = torch.clamp(X, min=0) + nx = torch.clamp(X, max=0) + if torch.is_tensor(R_p) == True and R_p.max() == 1: ## first propagation + pd = R_p + + Rp_tmp = first_prop(pd, px, nx, pw, nw) + A = redistribute(Rp_tmp) + + return A + else: + Rp = backward(R_p, px, nx, pw, nw) + + + return Rp + + +class Conv2d(nn.Conv2d, RelProp): + def gradprop2(self, DY, weight): + Z = self.forward(self.X) + + output_padding = self.X.size()[2] - ( + (Z.size()[2] - 1) * self.stride[0] - 2 * self.padding[0] + self.kernel_size[0]) + + return F.conv_transpose2d(DY, weight, stride=self.stride, padding=self.padding, output_padding=output_padding) + + def relprop(self, R, alpha = 1): + if self.X.shape[1] == 3: + pw = torch.clamp(self.weight, min=0) + nw = torch.clamp(self.weight, max=0) + X = self.X + L = self.X * 0 + \ + torch.min(torch.min(torch.min(self.X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, + keepdim=True)[0] + H = self.X * 0 + \ + torch.max(torch.max(torch.max(self.X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, + keepdim=True)[0] + Za = torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding) - \ + torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding) - \ + torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding) + 1e-9 + + S = R / Za + C = X * self.gradprop2(S, self.weight) - L * self.gradprop2(S, pw) - H * self.gradprop2(S, nw) + R = C + else: + beta = alpha - 1 + pw = torch.clamp(self.weight, min=0) + nw = torch.clamp(self.weight, max=0) + px = torch.clamp(self.X, min=0) + nx = torch.clamp(self.X, max=0) + + + # def f(w1, w2, x1, x2): + # Z1 = F.conv2d(x1, w1, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups) + # Z2 = F.conv2d(x2, w2, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups) + # S1 = safe_divide(R, Z1) + # S2 = safe_divide(R, Z2) + # C1 = x1 * self.gradprop(Z1, x1, S1)[0] + # C2 = x2 * self.gradprop(Z2, x2, S2)[0] + # return C1 + C2 + + def f(w1, w2, x1, x2): + Z1 = F.conv2d(x1, w1, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups) + Z2 = F.conv2d(x2, w2, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups) + Z = Z1 + Z2 + S = safe_divide(R, Z) + C1 = x1 * self.gradprop(Z1, x1, S)[0] + C2 = x2 * self.gradprop(Z2, x2, S)[0] + return C1 + C2 + + + + activator_relevances = f(pw, nw, px, nx) + inhibitor_relevances = f(nw, pw, px, nx) + + R = alpha * activator_relevances - beta * inhibitor_relevances + return R + + def RAP_relprop(self, R_p): + def shift_rel(R, R_val): + R_nonzero = torch.ne(R, 0).type(R.type()) + shift = safe_divide(R_val, torch.sum(R_nonzero, dim=[1,2,3], keepdim=True)) * torch.ne(R, 0).type(R.type()) + K = R - shift + return K + def pos_prop(R, Za1, Za2, x1): + R_pos = torch.clamp(R, min=0) + R_neg = torch.clamp(R, max=0) + S1 = safe_divide((R_pos * safe_divide((Za1 + Za2), Za1 + Za2)), Za1) + C1 = x1 * self.gradprop(Za1, x1, S1)[0] + S1n = safe_divide((R_neg * safe_divide((Za1 + Za2), Za1 + Za2)), Za2) + C1n = x1 * self.gradprop(Za2, x1, S1n)[0] + S2 = safe_divide((R_pos * safe_divide((Za2), Za1 + Za2)), Za2) + C2 = x1 * self.gradprop(Za2, x1, S2)[0] + S2n = safe_divide((R_neg * safe_divide((Za2), Za1 + Za2)), Za2) + C2n = x1 * self.gradprop(Za2, x1, S2n)[0] + Cp = C1 + C2 + Cn = C2n + C1n + C = (Cp + Cn) + C = shift_rel(C, C.sum(dim=[1,2,3], keepdim=True) - R.sum(dim=[1,2,3], keepdim=True)) + return C + + def f(R, w1, w2, x1, x2): + R_nonzero = R.ne(0).type(R.type()) + Za1 = F.conv2d(x1, w1, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) * R_nonzero + Za2 = - F.conv2d(x1, w2, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) * R_nonzero + + Zb1 = - F.conv2d(x2, w1, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) * R_nonzero + Zb2 = F.conv2d(x2, w2, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) * R_nonzero + + C1 = pos_prop(R, Za1, Za2, x1) + C2 = pos_prop(R, Zb1, Zb2, x2) + return C1 #+ C2 + + def backward(R_p, px, nx, pw, nw): + + # if torch.is_tensor(self.bias): + # bias = self.bias.unsqueeze(-1).unsqueeze(-1) + # bias_p = safe_divide(bias * R_p.ne(0).type(self.bias.type()), + # R_p.ne(0).type(self.bias.type()).sum(dim=[2, 3], keepdim=True)) + # R_p = R_p - bias_p + + Rp = f(R_p, pw, nw, px, nx) + + # if torch.is_tensor(self.bias): + # Bp = f(bias_p, pw, nw, px, nx) + # + # Rp = Rp + Bp + return Rp + def final_backward(R_p, pw, nw, X1): + X = X1 + L = X * 0 + \ + torch.min(torch.min(torch.min(X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, + keepdim=True)[0] + H = X * 0 + \ + torch.max(torch.max(torch.max(X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, + keepdim=True)[0] + Za = torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) - \ + torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) - \ + torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) + + Sp = safe_divide(R_p, Za) + + Rp = X * self.gradprop2(Sp, self.weight) - L * self.gradprop2(Sp, pw) - H * self.gradprop2(Sp, nw) + return Rp + pw = torch.clamp(self.weight, min=0) + nw = torch.clamp(self.weight, max=0) + px = torch.clamp(self.X, min=0) + nx = torch.clamp(self.X, max=0) + + if self.X.shape[1] == 3: + Rp = final_backward(R_p, pw, nw, self.X) + else: + Rp = backward(R_p, px, nx, pw, nw) + + return Rp + +def get_same_padding_conv2d(image_size): + """Chooses static padding if you have specified an image size, and dynamic padding otherwise. + Static padding is necessary for ONNX exporting of models. + Args: + image_size (int or tuple): Size of the image. + Returns: + Conv2dDynamicSamePadding or Conv2dStaticSamePadding. + """ + return partial(Conv2dStaticSamePadding, image_size=image_size) + +# def get_same_padding_depthwise_conv2d(image_size): +# """Chooses static padding if you have specified an image size, and dynamic padding otherwise. +# Static padding is necessary for ONNX exporting of models. +# Args: +# image_size (int or tuple): Size of the image. +# Returns: +# Conv2dDynamicSamePadding or Conv2dStaticSamePadding. +# """ +# return partial(Conv2_depthwise_dStaticSamePadding, image_size=image_size) + +class ConvTranspose2d(nn.ConvTranspose2d, RelProp): + def relprop(self, R, alpha = 1): + + pw = torch.clamp(self.weight, min=0) + nw = torch.clamp(self.weight, max=0) + px = torch.clamp(self.X, min=0) + nx = torch.clamp(self.X, max=0) + + def f(w1, x1): + Z1 = F.conv_transpose2d(x1, w1, bias=None, stride=self.stride, padding=self.padding, output_padding=self.output_padding) + S1 = safe_divide(R, Z1) + C1 = x1 * self.gradprop(Z1, x1, S1)[0] + return C1 + + activator_relevances = f(pw, px) + R = activator_relevances + return R + +class Conv2dStaticSamePadding(Conv2d): + """2D Convolutions like TensorFlow's 'SAME' mode, with the given input image size. + The padding mudule is calculated in construction function, then used in forward. + """ + + # With the same calculation as Conv2dDynamicSamePadding + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, image_size=None, **kwargs): + super().__init__(in_channels, out_channels, kernel_size, stride, **kwargs) + self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2 + + # Calculate padding based on image size and save it + assert image_size is not None + ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size + kh, kw = self.weight.size()[-2:] + sh, sw = self.stride + oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) + pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0) + pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0) + if pad_h > 0 or pad_w > 0: + self.pad_flag = 'zeropad' + self.static_padding = ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, + pad_h // 2, pad_h - pad_h // 2)) + else: + self.pad_flag = 'identity' + self.static_padding = Identity() + + def forward(self, x): + self.X = x + self.padd_output = self.static_padding(self.X) + x = F.conv2d(self.padd_output, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + def relprop(self, R, alpha=1): + if self.padd_output.shape[1] == 3: + pw = torch.clamp(self.weight, min=0) + nw = torch.clamp(self.weight, max=0) + X = self.padd_output + L = self.padd_output * 0 + \ + torch.min(torch.min(torch.min(self.padd_output, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, + keepdim=True)[0] + H = self.padd_output * 0 + \ + torch.max(torch.max(torch.max(self.padd_output, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, + keepdim=True)[0] + Za = torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding) - \ + torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding) - \ + torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding) + 1e-9 + + S = R / Za + C = X * self.gradprop2(S, self.weight) - L * self.gradprop2(S, pw) - H * self.gradprop2(S, nw) + R = C + else: + beta = alpha - 1 + pw = torch.clamp(self.weight, min=0) + nw = torch.clamp(self.weight, max=0) + px = torch.clamp(self.padd_output, min=0) + nx = torch.clamp(self.padd_output, max=0) + + # def f(w1, w2, x1, x2): + # Z1 = F.conv2d(x1, w1, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups) + # Z2 = F.conv2d(x2, w2, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups) + # S1 = safe_divide(R, Z1) + # S2 = safe_divide(R, Z2) + # C1 = x1 * self.gradprop(Z1, x1, S1)[0] + # C2 = x2 * self.gradprop(Z2, x2, S2)[0] + # return C1 + C2 + + def f(w1, w2, x1, x2): + Z1 = F.conv2d(x1, w1, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups) + Z2 = F.conv2d(x2, w2, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups) + Z = Z1 + Z2 + S = safe_divide(R, Z) + C1 = x1 * self.gradprop(Z1, x1, S)[0] + C2 = x2 * self.gradprop(Z2, x2, S)[0] + return C1 + C2 + + activator_relevances = f(pw, nw, px, nx) + inhibitor_relevances = f(nw, pw, px, nx) + + R = alpha * activator_relevances - beta * inhibitor_relevances + R = self.static_padding.relprop(R) + return R + + def RAP_relprop(self, R_p): + def shift_rel(R, R_val): + R_nonzero = torch.ne(R, 0).type(R.type()) + shift = safe_divide(R_val, torch.sum(R_nonzero, dim=[1, 2, 3], keepdim=True)) * torch.ne(R, 0).type( + R.type()) + K = R - shift + return K + + def pos_prop(R, Za1, Za2, x1): + R_pos = torch.clamp(R, min=0) + R_neg = torch.clamp(R, max=0) + S1 = safe_divide((R_pos * safe_divide((Za1 + Za2), Za1 + Za2)), Za1) + C1 = x1 * self.gradprop(Za1, x1, S1)[0] + S1n = safe_divide((R_neg * safe_divide((Za1 + Za2), Za1 + Za2)), Za2) + C1n = x1 * self.gradprop(Za2, x1, S1n)[0] + S2 = safe_divide((R_pos * safe_divide((Za2), Za1 + Za2)), Za2) + C2 = x1 * self.gradprop(Za2, x1, S2)[0] + S2n = safe_divide((R_neg * safe_divide((Za2), Za1 + Za2)), Za2) + C2n = x1 * self.gradprop(Za2, x1, S2n)[0] + Cp = C1 + C2 + Cn = C2n + C1n + C = (Cp + Cn) + C = shift_rel(C, C.sum(dim=[1, 2, 3], keepdim=True) - R.sum(dim=[1, 2, 3], keepdim=True)) + return C + + def f(R, w1, w2, x1, x2): + R_nonzero = R.ne(0).type(R.type()) + Za1 = F.conv2d(x1, w1, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) * R_nonzero + Za2 = - F.conv2d(x1, w2, bias=None, stride=self.stride, padding=self.padding, + groups=self.groups) * R_nonzero + + Zb1 = - F.conv2d(x2, w1, bias=None, stride=self.stride, padding=self.padding, + groups=self.groups) * R_nonzero + Zb2 = F.conv2d(x2, w2, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) * R_nonzero + + C1 = pos_prop(R, Za1, Za2, x1) + C2 = pos_prop(R, Zb1, Zb2, x2) + return C1 # + C2 + + def backward(R_p, px, nx, pw, nw): + + # if torch.is_tensor(self.bias): + # bias = self.bias.unsqueeze(-1).unsqueeze(-1) + # bias_p = safe_divide(bias * R_p.ne(0).type(self.bias.type()), + # R_p.ne(0).type(self.bias.type()).sum(dim=[2, 3], keepdim=True)) + # R_p = R_p - bias_p + + Rp = f(R_p, pw, nw, px, nx) + + # if torch.is_tensor(self.bias): + # Bp = f(bias_p, pw, nw, px, nx) + # + # Rp = Rp + Bp + return Rp + + def final_backward(R_p, pw, nw, X1): + X = X1 + L = X * 0 + \ + torch.min(torch.min(torch.min(X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, + keepdim=True)[0] + H = X * 0 + \ + torch.max(torch.max(torch.max(X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, + keepdim=True)[0] + Za = torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) - \ + torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) - \ + torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding, groups=self.groups) + + Sp = safe_divide(R_p, Za) + + Rp = X * self.gradprop2(Sp, self.weight) - L * self.gradprop2(Sp, pw) - H * self.gradprop2(Sp, nw) + return Rp + + pw = torch.clamp(self.weight, min=0) + nw = torch.clamp(self.weight, max=0) + px = torch.clamp(self.padd_output, min=0) + nx = torch.clamp(self.padd_output, max=0) + + if self.padd_output.shape[1] == 3: + Rp = final_backward(R_p, pw, nw, self.padd_output) + else: + Rp = backward(R_p, px, nx, pw, nw) + + Rp = self.static_padding.relprop(Rp) + return Rp + + + + +if __name__=='__main__': + + convt = ConvTranspose2d(100, 50, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False).cuda() + + rand = torch.rand((1,100,224,224)).cuda() + out = convt(rand) + rel = convt.relprop(out) + + print(out.shape) \ No newline at end of file diff --git a/results/lycaenid, _butterfly.jpg b/results/lycaenid, _butterfly.jpg deleted file mode 100644 index 78f53f8..0000000 Binary files a/results/lycaenid, _butterfly.jpg and /dev/null differ diff --git a/results/night snak_snake2.JPG b/results/night snak_snake2.JPG deleted file mode 100644 index 36481a1..0000000 Binary files a/results/night snak_snake2.JPG and /dev/null differ diff --git a/results/ostrich, S_ostrich.jpg b/results/ostrich, S_ostrich.jpg deleted file mode 100644 index 44d5ce2..0000000 Binary files a/results/ostrich, S_ostrich.jpg and /dev/null differ diff --git a/results/sulphur-cr_pigeon.jpg b/results/sulphur-cr_pigeon.jpg deleted file mode 100644 index bad2298..0000000 Binary files a/results/sulphur-cr_pigeon.jpg and /dev/null differ diff --git a/results/tench, Tin_ILSVRC2012_val_00039905.jpg b/results/tench, Tin_ILSVRC2012_val_00039905.jpg deleted file mode 100644 index 0d189fa..0000000 Binary files a/results/tench, Tin_ILSVRC2012_val_00039905.jpg and /dev/null differ diff --git a/results/tusker_zebra_elephant1.jpg b/results/tusker_zebra_elephant1.jpg deleted file mode 100644 index c94ee52..0000000 Binary files a/results/tusker_zebra_elephant1.jpg and /dev/null differ