Skip to content

Commit

Permalink
fix data generate.
Browse files Browse the repository at this point in the history
add faceswap
change paper name
  • Loading branch information
pminhtam committed Jun 3, 2020
1 parent 53c4cae commit db530ae
Show file tree
Hide file tree
Showing 18 changed files with 275 additions and 27 deletions.
18 changes: 10 additions & 8 deletions cnn_visualization/generate_class_specific_samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def generate(self, iterations=150):
np.ndarray -- Final maximally activated class image
"""
print("bat dau generate xong ... ")
initial_learning_rate = 6
initial_learning_rate = 200
for i in range(1, iterations):
print(i)
# Process image and return variable
Expand All @@ -57,9 +57,9 @@ def generate(self, iterations=150):
print(output)
class_loss = -output[0, self.target_class]

if i % 10 == 0 or i == iterations-1:
if i % 1 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.data.numpy()))
"{0:.2f}".format(class_loss.cpu().data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
Expand All @@ -68,16 +68,18 @@ def generate(self, iterations=150):
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
if i % 10 == 0 or i == iterations-1:
print(self.created_image.size)
if i % 1 == 0 or i == iterations-1:
# Save image
initial_learning_rate /=2
im_path = 'generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)

return self.processed_image

def parse_args():
parser = argparse.ArgumentParser(description="Deepfake detection")
parser.add_argument('--model_path', default="../../model/xception/model_pytorch_4.pt", help='path to model ')
parser.add_argument('--model_path', default="../../../model/xception/model_pytorch_4.pt", help='path to model ')
parser.add_argument('--gpu_id',type=int, default=-1, help='path to model ')
parser.add_argument('--image_size',type=int, default=256, help='path to model ')
parser.add_argument('--iterations',type=int, default=256, help='iterations random number')
Expand Down Expand Up @@ -171,13 +173,13 @@ def parse_args():
pass


# from pytorch_model.xception import xception
from pytorch_model.xception import xception

# model = xception(pretrained=False)
model = xception(pretrained=False)
device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
model = model.to(device)
model.load_state_dict(torch.load(args.model_path))
model.load_state_dict(torch.load(args.model_path,map_location=torch.device('cpu')))
print("Load xong ... ")
model.eval()
csig = ClassSpecificImageGeneration(model, target_class,image_size)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def generate(self, iterations=150, blur_freq=4, blur_rad=1, wd=0.0001, clipping_
optimizer = SGD([self.processed_image],
lr=initial_learning_rate, weight_decay=wd)
# Forward
output = self.model(self.processed_image)
output = self.model(self.processed_image).cpu()
# Target specific class
class_loss = -output[0, self.target_class]

Expand Down
29 changes: 22 additions & 7 deletions cnn_visualization/misc_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ def save_gradient_images(gradient, file_name):
# Normalize
gradient = gradient - gradient.min()
gradient /= gradient.max()
print(gradient.max())
# Save image
path_to_file = os.path.join('../results', file_name + '.jpg')
save_image(gradient, path_to_file)
Expand Down Expand Up @@ -108,6 +109,7 @@ def format_np_output(np_arr):
"""
# Phase/Case 1: The np arr only has 2 dimensions
# Result: Add a dimension at the beginning
print("format_np_output :",np_arr.shape)
if len(np_arr.shape) == 2:
np_arr = np.expand_dims(np_arr, axis=0)
# Phase/Case 2: Np arr has only 1 channel (assuming first dim is channel)
Expand All @@ -116,12 +118,16 @@ def format_np_output(np_arr):
np_arr = np.repeat(np_arr, 3, axis=0)
# Phase/Case 3: Np arr is of shape 3xWxH
# Result: Convert it to WxHx3 in order to make it saveable by PIL
if np_arr.shape[0] > 3:
np_arr = np_arr[6:9]
if np_arr.shape[0] == 3:
np_arr = np_arr.transpose(1, 2, 0)
# Phase/Case 4: NP arr is normalized between 0-1
# Result: Multiply with 255 and change type to make it saveable by PIL
if np.max(np_arr) <= 1:
np_arr = (np_arr*255).astype(np.uint8)
else:
np_arr = (np_arr * 255).astype(np.uint8)
return np_arr


Expand All @@ -134,11 +140,12 @@ def save_image(im, path):
"""
if isinstance(im, (np.ndarray, np.generic)):
im = format_np_output(im)
# print(im)
im = Image.fromarray(im)
im.save(path)


def preprocess_image(pil_im, resize_im=True,image_size=256):
def preprocess_image(pil_im, resize_im=True,image_size=128):
"""
Processes image for CNNs
Expand Down Expand Up @@ -210,6 +217,8 @@ def get_positive_negative_saliency(gradient):
returns:
pos_saliency ( )
"""
print(gradient.min())
print(gradient.max())
pos_saliency = (np.maximum(0, gradient) / gradient.max())
neg_saliency = (np.maximum(0, -gradient) / -gradient.min())
return pos_saliency, neg_saliency
Expand All @@ -230,9 +239,15 @@ def get_example_params(example_index):
pretrained_model(Pytorch model): Model to use for the operations
"""
# Pick one of the examples
example_list = (('../input_images/snake.jpg', 56),
('../input_images/cat_dog.png', 243),
('../input_images/spider.png', 72))
# example_list = (("../../../data/extract_raw_img_test/real/aansscoqsl.mp4_8.jpg", 0),
# ("../../../data/extract_raw_img_test/real/aansscoqsl.mp4_10.jpg", 0),
# ("../../../data/extract_raw_img_test/real/aansscoqsl.mp4_17.jpg", 0))
# example_list = (("G:/fake_test/1/fake/1/376.png", 0),
# ("G:/fake_test/1/fake/1/377.png", 0),
# ("G:/fake_test/1/fake/1/378.png", 0))
example_list = (("G:/real_test/3/real/59/1214.png", 0),
("G:/real_test/3/real/59/1169.png", 0),
("G:/real_test/3/real/59/1169.png", 0))
img_path = example_list[example_index][0]
target_class = example_list[example_index][1]
file_name_to_export = img_path[img_path.rfind('/')+1:img_path.rfind('.')]
Expand All @@ -241,9 +256,9 @@ def get_example_params(example_index):
# Process image
prep_img = preprocess_image(original_image)
# Define model
pretrained_model = models.alexnet(pretrained=True)
# pretrained_model = models.alexnet(pretrained=True)
return (original_image,
prep_img,
target_class,
file_name_to_export,
pretrained_model)
file_name_to_export)
# pretrained_model)
20 changes: 10 additions & 10 deletions paper/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,23 +6,23 @@ https://github.com/NVlabs/stylegan2

[paper v1](1812.04948.pdf)

[paper v1](1912.04958.pdf)
[paper v1](stylegan2.pdf)


## starGAN
https://github.com/clovaai/stargan-v2

[paper v1](1711.09020.pdf)
[paper v1](stargan.pdf)

[paper v2](1912.01865.pdf)
[paper v2](stargan2.pdf)




# Phương pháp phát hiện ảnh giả mạo
## Classic ML
### Head pose
[paper](1811.00661.pdf)
[paper](headpose.pdf)

Exposing deep fakes using inconsistent head poses

Expand All @@ -33,7 +33,7 @@ có thể xác phân biệt được ảnh thật giả.


### Visual Arrtifact
[paper](Exploiting visual artifacts to expose deepfakes and face manipulations-annotated.pdf)
[paper](visual_artifacts.pdf)

Exploiting Visual Artifacts to Expose Deepfakes and Face Manipulations

Expand All @@ -45,7 +45,7 @@ Tính toán những đặc điểm trên khuôn mặt:


### frequency domain
[paper](1911.00686.pdf)
[paper](frequency_domain.pdf)

Unmasking DeepFakes with simple Features

Expand All @@ -58,7 +58,7 @@ toán này tính trung bình các giá trị có cùng khoảng cách tới tâm

## Deep learning
### Mesonet
[paper](1809.00888.pdf)
[paper](mesonet.pdf)

MesoNet: a compact facial video forgery detection network

Expand All @@ -68,7 +68,7 @@ tầng cao có thể dẫn đến không phân biệt được ảnh khuôn mặ
thuận lợi cho việc phân biệt các ảnh thật và giả.

### Capsule
[paper](Capsule-forensics Using Capsule Networks to Detect Forged Images and Videos-annotated.pdfs)
[paper](capsule.pdf)

Capsule-forensics: Using capsule networks to detect forged images and videos.

Expand All @@ -79,7 +79,7 @@ Một lớp capsule network được thiết kế phía sau mạng VGG-19 để


### xception
[paper](1901.08971-annotated.pdf)
[paper](facefornsic++.pdf)

Faceforensics++: Learning to detect manipulated facial images

Expand All @@ -93,6 +93,6 @@ tính toán so với lớp tích chập thông thường.

## Figerprint

[paper](1811.08180.pdf)
[paper](fingerprint.pdf)


File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Loading

0 comments on commit db530ae

Please sign in to comment.