Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fixed indentation rules, followed by pep8 #50

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ debug*
videos/
build/
dist/
datasets/portrait/
logs/
torch.egg-info/
*/**/__pycache__
torch/version.py
Expand Down
138 changes: 69 additions & 69 deletions src/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,85 +5,85 @@
import random

class dataset_single(data.Dataset):
def __init__(self, opts, setname, input_dim):
self.dataroot = opts.dataroot
images = os.listdir(os.path.join(self.dataroot, opts.phase + setname))
self.img = [os.path.join(self.dataroot, opts.phase + setname, x) for x in images]
self.size = len(self.img)
self.input_dim = input_dim
def __init__(self, opts, setname, input_dim):
self.dataroot = opts.dataroot
images = os.listdir(os.path.join(self.dataroot, opts.phase + setname))
self.img = [os.path.join(self.dataroot, opts.phase + setname, x) for x in images]
self.size = len(self.img)
self.input_dim = input_dim

# setup image transformation
transforms = [Resize((opts.resize_size, opts.resize_size), Image.BICUBIC)]
transforms.append(CenterCrop(opts.crop_size))
transforms.append(ToTensor())
transforms.append(Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
self.transforms = Compose(transforms)
print('%s: %d images'%(setname, self.size))
return
# setup image transformation
transforms = [Resize((opts.resize_size, opts.resize_size), Image.BICUBIC)]
transforms.append(CenterCrop(opts.crop_size))
transforms.append(ToTensor())
transforms.append(Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
self.transforms = Compose(transforms)
print('%s: %d images'%(setname, self.size))
return

def __getitem__(self, index):
data = self.load_img(self.img[index], self.input_dim)
return data
def __getitem__(self, index):
data = self.load_img(self.img[index], self.input_dim)
return data

def load_img(self, img_name, input_dim):
img = Image.open(img_name).convert('RGB')
img = self.transforms(img)
if input_dim == 1:
img = img[0, ...] * 0.299 + img[1, ...] * 0.587 + img[2, ...] * 0.114
img = img.unsqueeze(0)
return img
def load_img(self, img_name, input_dim):
img = Image.open(img_name).convert('RGB')
img = self.transforms(img)
if input_dim == 1:
img = img[0, ...] * 0.299 + img[1, ...] * 0.587 + img[2, ...] * 0.114
img = img.unsqueeze(0)
return img

def __len__(self):
return self.size
def __len__(self):
return self.size

class dataset_unpair(data.Dataset):
def __init__(self, opts):
self.dataroot = opts.dataroot
def __init__(self, opts):
self.dataroot = opts.dataroot

# A
images_A = os.listdir(os.path.join(self.dataroot, opts.phase + 'A'))
self.A = [os.path.join(self.dataroot, opts.phase + 'A', x) for x in images_A]
# A
images_A = os.listdir(os.path.join(self.dataroot, opts.phase + 'A'))
self.A = [os.path.join(self.dataroot, opts.phase + 'A', x) for x in images_A]

# B
images_B = os.listdir(os.path.join(self.dataroot, opts.phase + 'B'))
self.B = [os.path.join(self.dataroot, opts.phase + 'B', x) for x in images_B]
# B
images_B = os.listdir(os.path.join(self.dataroot, opts.phase + 'B'))
self.B = [os.path.join(self.dataroot, opts.phase + 'B', x) for x in images_B]

self.A_size = len(self.A)
self.B_size = len(self.B)
self.dataset_size = max(self.A_size, self.B_size)
self.input_dim_A = opts.input_dim_a
self.input_dim_B = opts.input_dim_b
self.A_size = len(self.A)
self.B_size = len(self.B)
self.dataset_size = max(self.A_size, self.B_size)
self.input_dim_A = opts.input_dim_a
self.input_dim_B = opts.input_dim_b

# setup image transformation
transforms = [Resize((opts.resize_size, opts.resize_size), Image.BICUBIC)]
if opts.phase == 'train':
transforms.append(RandomCrop(opts.crop_size))
else:
transforms.append(CenterCrop(opts.crop_size))
if not opts.no_flip:
transforms.append(RandomHorizontalFlip())
transforms.append(ToTensor())
transforms.append(Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
self.transforms = Compose(transforms)
print('A: %d, B: %d images'%(self.A_size, self.B_size))
return
# setup image transformation
transforms = [Resize((opts.resize_size, opts.resize_size), Image.BICUBIC)]
if opts.phase == 'train':
transforms.append(RandomCrop(opts.crop_size))
else:
transforms.append(CenterCrop(opts.crop_size))
if not opts.no_flip:
transforms.append(RandomHorizontalFlip())
transforms.append(ToTensor())
transforms.append(Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
self.transforms = Compose(transforms)
print('A: %d, B: %d images'%(self.A_size, self.B_size))
return

def __getitem__(self, index):
if self.dataset_size == self.A_size:
data_A = self.load_img(self.A[index], self.input_dim_A)
data_B = self.load_img(self.B[random.randint(0, self.B_size - 1)], self.input_dim_B)
else:
data_A = self.load_img(self.A[random.randint(0, self.A_size - 1)], self.input_dim_A)
data_B = self.load_img(self.B[index], self.input_dim_B)
return data_A, data_B
def __getitem__(self, index):
if self.dataset_size == self.A_size:
data_A = self.load_img(self.A[index], self.input_dim_A)
data_B = self.load_img(self.B[random.randint(0, self.B_size - 1)], self.input_dim_B)
else:
data_A = self.load_img(self.A[random.randint(0, self.A_size - 1)], self.input_dim_A)
data_B = self.load_img(self.B[index], self.input_dim_B)
return data_A, data_B

def load_img(self, img_name, input_dim):
img = Image.open(img_name).convert('RGB')
img = self.transforms(img)
if input_dim == 1:
img = img[0, ...] * 0.299 + img[1, ...] * 0.587 + img[2, ...] * 0.114
img = img.unsqueeze(0)
return img
def load_img(self, img_name, input_dim):
img = Image.open(img_name).convert('RGB')
img = self.transforms(img)
if input_dim == 1:
img = img[0, ...] * 0.299 + img[1, ...] * 0.587 + img[2, ...] * 0.114
img = img.unsqueeze(0)
return img

def __len__(self):
return self.dataset_size
def __len__(self):
return self.dataset_size
Loading