Skip to content

Commit

Permalink
fix bugs and readme
Browse files Browse the repository at this point in the history
陈安沛 committed Jun 30, 2022
1 parent ba2c43e commit 17deeed
Showing 6 changed files with 401 additions and 11 deletions.
12 changes: 5 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
@@ -88,12 +88,10 @@ We provide two options for training on your own image set:
## Citation
If you find our code or paper helps, please consider citing:
```
@misc{TensoRF,
title={TensoRF: Tensorial Radiance Fields},
author={Anpei Chen and Zexiang Xu and Andreas Geiger and and Jingyi Yu and Hao Su},
year={2022},
eprint={2203.09517},
archivePrefix={arXiv},
primaryClass={cs.CV}
@article{tensorf,
title={TensoRF: Tensorial Radiance Fields},
author={Chen, Anpei and Xu, Zexiang and Geiger, Andreas and Yu, Jingyi and Su, Hao},
journal={arXiv preprint arXiv:2203.09517},
year={2022}
}
```
207 changes: 207 additions & 0 deletions extra/auto_run_paramsets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,207 @@
import os
import threading, queue
import numpy as np
import time


def getFolderLocker(logFolder):
while True:
try:
os.makedirs(logFolder+"/lockFolder")
break
except:
time.sleep(0.01)

def releaseFolderLocker(logFolder):
os.removedirs(logFolder+"/lockFolder")

def getStopFolder(logFolder):
return os.path.isdir(logFolder+"/stopFolder")


def get_param_str(key, val):
if key == 'data_name':
return f'--datadir {datafolder}/{val} '
else:
return f'--{key} {val} '

def get_param_list(param_dict):
param_keys = list(param_dict.keys())
param_modes = len(param_keys)
param_nums = [len(param_dict[key]) for key in param_keys]

param_ids = np.zeros(param_nums+[param_modes], dtype=int)
for i in range(param_modes):
broad_tuple = np.ones(param_modes, dtype=int).tolist()
broad_tuple[i] = param_nums[i]
broad_tuple = tuple(broad_tuple)
print(broad_tuple)
param_ids[...,i] = np.arange(param_nums[i]).reshape(broad_tuple)
param_ids = param_ids.reshape(-1, param_modes)
# print(param_ids)
print(len(param_ids))

params = []
expnames = []
for i in range(param_ids.shape[0]):
one = ""
name = ""
param_id = param_ids[i]
for j in range(param_modes):
key = param_keys[j]
val = param_dict[key][param_id[j]]
if type(key) is tuple:
assert len(key) == len(val)
for k in range(len(key)):
one += get_param_str(key[k], val[k])
name += f'{val[k]},'
name=name[:-1]+'-'
else:
one += get_param_str(key, val)
name += f'{val}-'
params.append(one)
name=name.replace(' ','')
print(name)
expnames.append(name[:-1])
# print(params)
return params, expnames







if __name__ == '__main__':



# nerf
expFolder = "nerf/"
# parameters to iterate, use tuple to couple multiple parameters
datafolder = '/mnt/new_disk_2/anpei/Dataset/nerf_synthetic/'
param_dict = {
'data_name': ['ship', 'mic', 'chair', 'lego', 'drums', 'ficus', 'hotdog', 'materials'],
'data_dim_color': [13, 27, 54]
}

# n_iters = 30000
# for data_name in ['Robot']:#'Bike','Lifestyle','Palace','Robot','Spaceship','Steamtrain','Toad','Wineholder'
# cmd = f'CUDA_VISIBLE_DEVICES={cuda} python train.py ' \
# f'--dataset_name nsvf --datadir /mnt/new_disk_2/anpei/Dataset/TeRF/Synthetic_NSVF/{data_name} '\
# f'--expname {data_name} --batch_size {batch_size} ' \
# f'--n_iters {n_iters} ' \
# f'--N_voxel_init {128**3} --N_voxel_final {300**3} '\
# f'--N_vis {5} ' \
# f'--n_lamb_sigma "[16,16,16]" --n_lamb_sh "[48,48,48]" ' \
# f'--upsamp_list "[2000, 3000, 4000, 5500,7000]" --update_AlphaMask_list "[3000,4000]" ' \
# f'--shadingMode MLP_Fea --fea2denseAct softplus --view_pe {2} --fea_pe {2} ' \
# f'--L1_weight_inital {8e-5} --L1_weight_rest {4e-5} --rm_weight_mask_thre {1e-4} --add_timestamp 0 ' \
# f'--render_test 1 '
# print(cmd)
# os.system(cmd)

# nsvf
# expFolder = "nsvf_0227/"
# datafolder = '/mnt/new_disk_2/anpei/Dataset/TeRF/Synthetic_NSVF/'
# param_dict = {
# 'data_name': ['Robot','Steamtrain','Bike','Lifestyle','Palace','Spaceship','Toad','Wineholder'],#'Bike','Lifestyle','Palace','Robot','Spaceship','Steamtrain','Toad','Wineholder'
# 'shadingMode': ['SH'],
# ('n_lamb_sigma', 'n_lamb_sh'): [ ("[8,8,8]", "[8,8,8]")],
# ('view_pe', 'fea_pe', 'featureC','fea2denseAct','N_voxel_init') : [(2, 2, 128, 'softplus',128**3)],
# ('L1_weight_inital', 'L1_weight_rest', 'rm_weight_mask_thre'):[(4e-5, 4e-5, 1e-4)],
# ('n_iters','N_voxel_final'): [(30000,300**3)],
# ('dataset_name','N_vis','render_test') : [("nsvf",5,1)],
# ('upsamp_list','update_AlphaMask_list'): [("[2000,3000,4000,5500,7000]","[3000,4000]")]
#
# }

# tankstemple
# expFolder = "tankstemple_0304/"
# datafolder = '/mnt/new_disk_2/anpei/Dataset/TeRF/TanksAndTemple/'
# param_dict = {
# 'data_name': ['Truck','Barn','Caterpillar','Family','Ignatius'],
# 'shadingMode': ['MLP_Fea'],
# ('n_lamb_sigma', 'n_lamb_sh'): [("[16,16,16]", "[48,48,48]")],
# ('view_pe', 'fea_pe','fea2denseAct','N_voxel_init','render_test') : [(2, 2, 'softplus',128**3,1)],
# ('TV_weight_density','TV_weight_app'):[(0.1,0.01)],
# # ('L1_weight_inital', 'L1_weight_rest', 'rm_weight_mask_thre'): [(4e-5, 4e-5, 1e-4)],
# ('n_iters','N_voxel_final'): [(15000,300**3)],
# ('dataset_name','N_vis') : [("tankstemple",5)],
# ('upsamp_list','update_AlphaMask_list'): [("[2000,3000,4000,5500,7000]","[2000,4000]")]
# }

# llff
# expFolder = "real_iconic/"
# datafolder = '/mnt/new_disk_2/anpei/Dataset/MVSNeRF/real_iconic/'
# List = os.listdir(datafolder)
# param_dict = {
# 'data_name': List,
# ('shadingMode', 'view_pe', 'fea_pe','fea2denseAct', 'nSamples','N_voxel_init') : [('MLP_Fea', 0, 0, 'relu',512,128**3)],
# ('n_lamb_sigma', 'n_lamb_sh') : [("[16,4,4]", "[48,12,12]")],
# ('TV_weight_density', 'TV_weight_app'):[(1.0,1.0)],
# ('n_iters','N_voxel_final'): [(25000,640**3)],
# ('dataset_name','downsample_train','ndc_ray','N_vis','render_path') : [("llff",4.0, 1,-1,1)],
# ('upsamp_list','update_AlphaMask_list'): [("[2000,3000,4000,5500,7000]","[2500]")],
# }

# expFolder = "llff/"
# datafolder = '/mnt/new_disk_2/anpei/Dataset/MVSNeRF/nerf_llff_data'
# param_dict = {
# 'data_name': ['fern', 'flower', 'room', 'leaves', 'horns', 'trex', 'fortress', 'orchids'],#'fern', 'flower', 'room', 'leaves', 'horns', 'trex', 'fortress', 'orchids'
# ('n_lamb_sigma', 'n_lamb_sh'): [("[16,4,4]", "[48,12,12]")],
# ('shadingMode', 'view_pe', 'fea_pe', 'featureC','fea2denseAct', 'nSamples','N_voxel_init') : [('MLP_Fea', 0, 0, 128, 'relu',512,128**3),('SH', 0, 0, 128, 'relu',512,128**3)],
# ('TV_weight_density', 'TV_weight_app'):[(1.0,1.0)],
# ('n_iters','N_voxel_final'): [(25000,640**3)],
# ('dataset_name','downsample_train','ndc_ray','N_vis','render_test','render_path') : [("llff",4.0, 1,-1,1,1)],
# ('upsamp_list','update_AlphaMask_list'): [("[2000,3000,4000,5500,7000]","[2500]")],
# }

#setting available gpus
gpus_que = queue.Queue(3)
for i in [1,2,3]:
gpus_que.put(i)

os.makedirs(f"log/{expFolder}", exist_ok=True)

def run_program(gpu, expname, param):
cmd = f'CUDA_VISIBLE_DEVICES={gpu} python train.py ' \
f'--expname {expname} --basedir ./log/{expFolder} --config configs/lego.txt ' \
f'{param}' \
f'> "log/{expFolder}{expname}/{expname}.txt"'
print(cmd)
os.system(cmd)
gpus_que.put(gpu)

params, expnames = get_param_list(param_dict)


logFolder=f"log/{expFolder}"
os.makedirs(logFolder, exist_ok=True)

ths = []
for i in range(len(params)):

if getStopFolder(logFolder):
break


targetFolder = f"log/{expFolder}{expnames[i]}"
gpu = gpus_que.get()
getFolderLocker(logFolder)
if os.path.isdir(targetFolder):
releaseFolderLocker(logFolder)
gpus_que.put(gpu)
continue
else:
os.makedirs(targetFolder, exist_ok=True)
print("making",targetFolder, "running",expnames[i], params[i])
releaseFolderLocker(logFolder)


t = threading.Thread(target=run_program, args=(gpu, expnames[i], params[i]), daemon=True)
t.start()
ths.append(t)

for th in ths:
th.join()
182 changes: 182 additions & 0 deletions extra/compute_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
import os, math
import numpy as np
import scipy.signal
from typing import List, Optional
from PIL import Image
import os
import torch
import configargparse

__LPIPS__ = {}
def init_lpips(net_name, device):
assert net_name in ['alex', 'vgg']
import lpips
print(f'init_lpips: lpips_{net_name}')
return lpips.LPIPS(net=net_name, version='0.1').eval().to(device)

def rgb_lpips(np_gt, np_im, net_name, device):
if net_name not in __LPIPS__:
__LPIPS__[net_name] = init_lpips(net_name, device)
gt = torch.from_numpy(np_gt).permute([2, 0, 1]).contiguous().to(device)
im = torch.from_numpy(np_im).permute([2, 0, 1]).contiguous().to(device)
return __LPIPS__[net_name](gt, im, normalize=True).item()


def findItem(items, target):
for one in items:
if one[:len(target)]==target:
return one
return None


''' Evaluation metrics (ssim, lpips)
'''
def rgb_ssim(img0, img1, max_val,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_map=False):
# Modified from https://github.com/google/mipnerf/blob/16e73dfdb52044dcceb47cda5243a686391a6e0f/internal/math.py#L58
assert len(img0.shape) == 3
assert img0.shape[-1] == 3
assert img0.shape == img1.shape

# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((np.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = np.exp(-0.5 * f_i)
filt /= np.sum(filt)

# Blur in x and y (faster than the 2D convolution).
def convolve2d(z, f):
return scipy.signal.convolve2d(z, f, mode='valid')

filt_fn = lambda z: np.stack([
convolve2d(convolve2d(z[...,i], filt[:, None]), filt[None, :])
for i in range(z.shape[-1])], -1)
mu0 = filt_fn(img0)
mu1 = filt_fn(img1)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(img0**2) - mu00
sigma11 = filt_fn(img1**2) - mu11
sigma01 = filt_fn(img0 * img1) - mu01

# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = np.maximum(0., sigma00)
sigma11 = np.maximum(0., sigma11)
sigma01 = np.sign(sigma01) * np.minimum(
np.sqrt(sigma00 * sigma11), np.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim = np.mean(ssim_map)
return ssim_map if return_map else ssim


if __name__ == '__main__':

parser = configargparse.ArgumentParser()
parser.add_argument("--exp", type=str, help="folder of exps")
parser.add_argument("--paramStr", type=str, help="str of params")
args = parser.parse_args()


# datanames = ['drums','hotdog','materials','ficus','lego','mic','ship','chair'] #['ship']#
# gtFolder = "/home/code-base/user_space/codes/nerf/data/nerf_synthetic"
# expFolder = "/home/code-base/user_space/codes/TensoRF/log/"+args.exp

# datanames = ['room','fortress', 'flower','orchids','leaves','horns','trex','fern'] #['ship']#
# gtFolder = "/mnt/new_disk_2/anpei/Dataset/MVSNeRF/nerf_llff_data/"
# expFolder = "/mnt/new_disk_2/anpei/code/TensoRF/log/"+args.exp
paramStr = args.paramStr
fileNum = 200


expitems = os.listdir(expFolder)
finalFolder = f'{expFolder}/finals/{paramStr}'
outFile = f'{finalFolder}/{paramStr}_metrics.txt'
os.makedirs(finalFolder, exist_ok=True)

expitems.sort(reverse=True)


with open(outFile, 'w') as f:
all_psnr = []
all_ssim = []
all_alex = []
all_vgg = []
for dataname in datanames:


gtstr = gtFolder+"/"+dataname+"/test/r_%d.png"
expname = findItem(expitems, f'{paramStr}-{dataname}')
print("expname: ", expname)
if expname is None:
print("no ",dataname, "exists")
continue
resultstr = expFolder+"/"+expname+"/imgs_test_all/"+ dataname+"-"+paramStr+ "_%03d.png"
metric_file = f'{expFolder}/{expname}/imgs_test_all/{paramStr}-{dataname}_mean.txt'
video_file = f'{expFolder}/{expname}/imgs_test_all/{paramStr}-{dataname}_video.mp4'

exist_metric=False
if os.path.isfile(metric_file):
metrics = np.loadtxt(metric_file)
print(metrics, metrics.tolist())
if metrics.size == 4:
psnr, ssim, l_a, l_v = metrics.tolist()
exist_metric = True
os.system(f"cp {video_file} {finalFolder}/")

if not exist_metric:
psnrs = []
ssims = []
l_alex = []
l_vgg = []
for i in range(fileNum):
gt = np.asarray(Image.open(gtstr%i),dtype=np.float32) / 255.0
gtmask = gt[...,[3]]
gt = gt[...,:3]
gt = gt*gtmask + (1-gtmask)
img = np.asarray(Image.open(resultstr%i),dtype=np.float32)[...,:3] / 255.0
# print(gt[0,0],img[0,0],gt.shape, img.shape, gt.max(), img.max())


psnr = -10. * np.log10(np.mean(np.square(img - gt)))
ssim = rgb_ssim(img, gt, 1)
lpips_alex = rgb_lpips(gt, img, 'alex','cuda')
lpips_vgg = rgb_lpips(gt, img, 'vgg','cuda')

print(i, psnr, ssim, lpips_alex, lpips_vgg)
psnrs.append(psnr)
ssims.append(ssim)
l_alex.append(lpips_alex)
l_vgg.append(lpips_vgg)
psnr = np.mean(np.array(psnrs))
ssim = np.mean(np.array(ssims))
l_a = np.mean(np.array(l_alex))
l_v = np.mean(np.array(l_vgg))

rS=f'{dataname} : psnr {psnr} ssim {ssim} l_a {l_a} l_v {l_v}'
print(rS)
f.write(rS+"\n")

all_psnr.append(psnr)
all_ssim.append(ssim)
all_alex.append(l_a)
all_vgg.append(l_v)

psnr = np.mean(np.array(all_psnr))
ssim = np.mean(np.array(all_ssim))
l_a = np.mean(np.array(all_alex))
l_v = np.mean(np.array(all_vgg))

rS=f'mean : psnr {psnr} ssim {ssim} l_a {l_a} l_v {l_v}'
print(rS)
f.write(rS+"\n")
4 changes: 2 additions & 2 deletions models/tensoRF.py
Original file line number Diff line number Diff line change
@@ -193,13 +193,13 @@ def density_L1(self):
def TV_loss_density(self, reg):
total = 0
for idx in range(len(self.density_plane)):
total = total + reg(self.density_plane[idx]) * 1e-2 + reg(self.density_line[idx]) * 1e-3
total = total + reg(self.density_plane[idx]) * 1e-2 #+ reg(self.density_line[idx]) * 1e-3
return total

def TV_loss_app(self, reg):
total = 0
for idx in range(len(self.app_plane)):
total = total + reg(self.app_plane[idx]) * 1e-2 + reg(self.app_line[idx]) * 1e-3
total = total + reg(self.app_plane[idx]) * 1e-2 #+ reg(self.app_line[idx]) * 1e-3
return total

def compute_densityfeature(self, xyz_sampled):
2 changes: 1 addition & 1 deletion train.py
Original file line number Diff line number Diff line change
@@ -204,7 +204,7 @@ def reconstruction(args):
summary_writer.add_scalar('train/reg_tv_density', loss_tv.detach().item(), global_step=iteration)
if TV_weight_app>0:
TV_weight_app *= lr_factor
loss_tv = loss_tv + tensorf.TV_loss_app(tvreg)*TV_weight_app
loss_tv = tensorf.TV_loss_app(tvreg)*TV_weight_app
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_app', loss_tv.detach().item(), global_step=iteration)

5 changes: 4 additions & 1 deletion utils.py
Original file line number Diff line number Diff line change
@@ -52,7 +52,8 @@ def visualize_depth(depth, minmax=None, cmap=cv2.COLORMAP_JET):

def N_to_reso(n_voxels, bbox):
xyz_min, xyz_max = bbox
voxel_size = ((xyz_max - xyz_min).prod() / n_voxels).pow(1 / 3)
dim = len(xyz_min)
voxel_size = ((xyz_max - xyz_min).prod() / n_voxels).pow(1 / dim)
return ((xyz_max - xyz_min) / voxel_size).long().tolist()

def cal_n_samples(reso, step_ratio=0.5):
@@ -153,6 +154,8 @@ def forward(self,x):
def _tensor_size(self,t):
return t.size()[1]*t.size()[2]*t.size()[3]



import plyfile
import skimage.measure
def convert_sdf_samples_to_ply(

0 comments on commit 17deeed

Please sign in to comment.