-
Notifications
You must be signed in to change notification settings - Fork 20
/
all_rendering.py
2596 lines (2214 loc) · 109 KB
/
all_rendering.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import pickle
import numpy as np
import os
import sys
import bpy
import math
import shutil
import json
import time
from mathutils import Vector, Matrix
import argparse
import glob
import colorsys
import bmesh
from mathutils.bvhtree import BVHTree
"""
Blender python script for rendering all visual effects.
"""
context = bpy.context
scene = context.scene
render = scene.render
#########################################################
# Ensure all collections and objects are visible
#########################################################
def ensure_collection_visibility(collection_name):
if collection_name in bpy.data.collections:
collection = bpy.data.collections[collection_name]
collection.hide_viewport = False # Ensure collection is visible in the viewport
collection.hide_render = False # Ensure collection is enabled for rendering
else:
print(f"Collection '{collection_name}' not found.")
def enable_render_for_all_objects():
for obj in bpy.data.objects:
obj.hide_viewport = False # Ensure the object is visible in the viewport
obj.hide_render = False # Ensure the object is visible in the render
ensure_collection_visibility("Collection") # Ensure default collection is visible and renderable
enable_render_for_all_objects() # Ensure all objects are visible in the render
#########################################################
# Handle duplicate objects (not used)
#########################################################
# def duplicate_hierarchy(obj, parent=None):
# """Recursively duplicate an object and all its children."""
# # Duplicate the object (without the data)
# new_obj = obj.copy()
# # Link the object data if it exists (for meshes, curves, etc.)
# if new_obj.data:
# new_obj.data = obj.data.copy()
# # If a parent is specified, set the duplicated object's parent
# if parent:
# new_obj.parent = parent
# # Link the new object to the collection
# bpy.context.collection.objects.link(new_obj)
# # Recursively duplicate children
# for child in obj.children:
# duplicate_hierarchy(child, new_obj)
# return new_obj
# def create_linked_duplicate(object_name: str) -> None:
# """Creates n linked duplicate of the given object."""
# original_obj = bpy.data.objects.get(object_name)
# if original_obj:
# new_obj = duplicate_hierarchy(original_obj)
# else:
# new_obj = None
# print(f"Object '{object_name}' not found.")
# return new_obj
#########################################################
# Argument parser for blender: https://blender.stackexchange.com/questions/6817/how-to-pass-command-line-arguments-to-a-blender-python-script
#########################################################
class ArgumentParserForBlender(argparse.ArgumentParser):
"""
This class is identical to its superclass, except for the parse_args
method (see docstring). It resolves the ambiguity generated when calling
Blender from the CLI with a python script, and both Blender and the script
have arguments. E.g., the following call will make Blender crash because
it will try to process the script's -a and -b flags:
>>> blender --python my_script.py -a 1 -b 2
To bypass this issue this class uses the fact that Blender will ignore all
arguments given after a double-dash ('--'). The approach is that all
arguments before '--' go to Blender, arguments after go to the script.
The following calls work fine:
>>> blender --python my_script.py -- -a 1 -b 2
>>> blender --python my_script.py --
"""
def _get_argv_after_doubledash(self):
"""
Given the sys.argv as a list of strings, this method returns the
sublist right after the '--' element (if present, otherwise returns
an empty list).
"""
try:
idx = sys.argv.index("--")
return sys.argv[idx+1:] # the list after '--'
except ValueError as e: # '--' not in the list:
return []
# overrides superclass
def parse_args(self):
"""
This method is expected to behave identically as in the superclass,
except that the sys.argv list will be pre-processed using
_get_argv_after_doubledash before. See the docstring of the class for
usage examples and details.
"""
return super().parse_args(args=self._get_argv_after_doubledash())
#########################################################
# Blender scene setup
#########################################################
def reset_scene() -> None:
"""Resets the scene to a clean state."""
# delete everything that isn't part of a camera or a light
for obj in bpy.data.objects:
if obj.type not in {"CAMERA"}:
bpy.data.objects.remove(obj, do_unlink=True)
# delete all the materials
for material in bpy.data.materials:
bpy.data.materials.remove(material, do_unlink=True)
# delete all the textures
for texture in bpy.data.textures:
bpy.data.textures.remove(texture, do_unlink=True)
# delete all the images
for image in bpy.data.images:
bpy.data.images.remove(image, do_unlink=True)
def setup_blender_env(img_width, img_height):
reset_scene()
# Set render engine and parameters
render.engine = 'CYCLES'
render.image_settings.file_format = "PNG"
render.image_settings.color_mode = "RGBA"
render.resolution_x = img_width
render.resolution_y = img_height
render.resolution_percentage = 100
scene.cycles.device = "GPU"
scene.cycles.preview_samples = 64
scene.cycles.samples = 64 # 32 for testing, 256 or higher 512 for final
scene.cycles.use_denoising = True
scene.render.film_transparent = True
scene.cycles.film_exposure = 2.0
# Set the device_type (from Zhihao's code, not sure why specify this)
preferences = context.preferences
preferences.addons[
"cycles"
].preferences.compute_device_type = "CUDA" # or "OPENCL"
# get_devices() to let Blender detects GPU device
preferences.addons["cycles"].preferences.get_devices()
print(preferences.addons["cycles"].preferences.compute_device_type)
for d in preferences.addons["cycles"].preferences.devices:
d["use"] = 1 # Using all devices, include GPU and CPU
print(d["name"], d["use"])
#########################################################
# Blender camera setup
#########################################################
def create_camera_list(c2w, K):
"""
Create a list of camera parameters
Args:
c2w: (N, 4, 4) camera to world transform
K: (3, 3) or (N, 3, 3) camera intrinsic matrix
"""
cam_list = []
for i in range(len(c2w)):
pose = c2w[i].reshape(-1, 4)
if len(K.shape) == 3:
cam_list.append({'c2w': pose, 'K': K[i]})
else:
cam_list.append({'c2w': pose, 'K': K})
return cam_list
def setup_camera():
# Find a camera in the scene
cam = None
for obj in bpy.data.objects:
if obj.type == 'CAMERA':
cam = obj
print("found camera")
break
# If no camera is found, create a new one
if cam is None:
bpy.ops.object.camera_add()
cam = bpy.context.object
# Set the camera as the active camera for the scene
bpy.context.scene.camera = cam
return cam
class Camera():
def __init__(self, im_height, im_width, out_dir):
os.makedirs(out_dir, exist_ok=True)
self.out_dir = out_dir
self.w = im_width
self.h = im_height
self.camera = setup_camera()
def set_camera(self, K, c2w):
self.K = K # (3, 3)
self.c2w = c2w # (3 or 4, 4), camera to world transform
# original camera model: x: right, y: down, z: forward (OpenCV, COLMAP format)
# Blender camera model: x: right, y: up , z: backward (OpenGL, NeRF format)
self.camera.data.type = 'PERSP'
self.camera.data.lens_unit = 'FOV'
f = K[0, 0]
rad = 2 * np.arctan(self.w/(2 * f))
self.camera.data.angle = rad
self.camera.data.sensor_fit = 'HORIZONTAL' # 'HORIZONTAL' keeps horizontal right (more recommended)
# f = K[1, 1]
# rad = 2 * np.arctan(self.h/(2 * f))
# self.camera.data.angle = rad
# self.camera.data.sensor_fit = 'VERTICAL' # 'VERTICAL' keeps vertical right
self.pose = self.transform_pose(c2w)
self.camera.matrix_world = Matrix(self.pose)
def transform_pose(self, pose):
'''
Transform camera-to-world matrix
Input: (3 or 4, 4) x: right, y: down, z: forward
Output: (4, 4) x: right, y: up , z: backward
'''
pose_bl = np.zeros((4, 4))
pose_bl[3, 3] = 1
# camera position remain the same
pose_bl[:3, 3] = pose[:3, 3]
R_c2w = pose[:3, :3]
transform = np.array([
[1, 0, 0],
[0, -1, 0],
[0, 0, -1]
])
R_c2w_bl = R_c2w @ transform
pose_bl[:3, :3] = R_c2w_bl
return pose_bl
def initialize_depth_extractor(self):
bpy.context.scene.view_layers["ViewLayer"].use_pass_z = True
bpy.context.view_layer.cycles.use_denoising = True
bpy.context.view_layer.cycles.denoising_store_passes = True
bpy.context.scene.use_nodes = True
nodes = bpy.context.scene.node_tree.nodes
links = bpy.context.scene.node_tree.links
render_layers = nodes['Render Layers']
depth_file_output = nodes.new(type="CompositorNodeOutputFile")
depth_file_output.name = 'File Output Depth'
depth_file_output.format.file_format = 'OPEN_EXR'
links.new(render_layers.outputs[2], depth_file_output.inputs[0])
def render_single_timestep_rgb_and_depth(self, cam_info, FRAME_INDEX, dir_name_rgb='rgb', dir_name_depth='depth'):
dir_path_rgb = os.path.join(self.out_dir, dir_name_rgb)
dir_path_depth = os.path.join(self.out_dir, dir_name_depth)
os.makedirs(dir_path_rgb, exist_ok=True)
os.makedirs(dir_path_depth, exist_ok=True)
self.set_camera(cam_info['K'], cam_info['c2w'])
# Set paths for both RGB and depth outputs
depth_output_path = os.path.join(dir_path_depth, '{:0>3d}'.format(FRAME_INDEX))
rgb_output_path = os.path.join(dir_path_rgb, '{:0>3d}.png'.format(FRAME_INDEX))
# Assuming your Blender setup has nodes named accordingly
bpy.context.scene.render.filepath = rgb_output_path
bpy.data.scenes["Scene"].node_tree.nodes["File Output Depth"].base_path = depth_output_path
bpy.ops.render.render(use_viewport=True, write_still=True)
#########################################################
# Blender lighting setup
#########################################################
def add_env_lighting(env_map_path: str, strength: float = 1.0):
"""
Add environment lighting to the scene with controllable strength.
Args:
env_map_path (str): Path to the environment map.
strength (float): Strength of the environment map.
"""
# Ensure that we are using nodes for the world's material
world = bpy.context.scene.world
world.use_nodes = True
nodes = world.node_tree.nodes
nodes.clear()
# Create an environment texture node and load the image
env = nodes.new('ShaderNodeTexEnvironment')
env.image = bpy.data.images.load(env_map_path)
# Create a Background node and set its strength
background = nodes.new('ShaderNodeBackground')
background.inputs['Strength'].default_value = strength
# Create an Output node
out = nodes.new('ShaderNodeOutputWorld')
# Link nodes together
links = world.node_tree.links
links.new(env.outputs['Color'], background.inputs['Color'])
links.new(background.outputs['Background'], out.inputs['Surface'])
def add_emitter_lighting(obj: bpy.types.Object, strength: float = 100.0, color=(1, 1, 1)):
"""
Add an emitter light to the object with controllable strength and color.
"""
# Create a new material for the object
mat = bpy.data.materials.new(name='EmitterMaterial')
obj.data.materials.clear()
obj.data.materials.append(mat)
# Set the material to use nodes
mat.use_nodes = True
nodes = mat.node_tree.nodes
nodes.clear()
# Create an Emission node and set its strength and color
emission = nodes.new('ShaderNodeEmission')
emission.inputs['Strength'].default_value = strength
emission.inputs['Color'].default_value = (*color, 1.0)
# Create an Output node
out = nodes.new('ShaderNodeOutputMaterial')
# Link nodes together
links = mat.node_tree.links
links.new(emission.outputs['Emission'], out.inputs['Surface'])
def add_sun_lighting(strength: float = 1.0, direction=(0, 0, 1)):
"""
Add a sun light to the scene with controllable strength and direction.
Args:
strength (float): Strength of the sun light.
direction (tuple): Direction of the sun light.
"""
sun_name = 'Sun'
sun = bpy.data.objects.get(sun_name)
if sun is None:
bpy.ops.object.light_add(type='SUN', location=(0, 0, 0))
sun = bpy.context.object
sun.name = sun_name
direction = Vector(direction)
direction.normalize()
rotation = direction.to_track_quat('Z', 'Y').to_euler()
sun.rotation_euler = rotation
sun.data.energy = strength
#########################################################
# Object manipulation
#########################################################
def object_meshes(single_obj):
for obj in [single_obj] + single_obj.children_recursive:
if isinstance(obj.data, (bpy.types.Mesh)):
yield obj
def scene_meshes():
for obj in bpy.context.scene.objects.values():
if isinstance(obj.data, (bpy.types.Mesh)):
yield obj
def scene_root_objects():
for obj in bpy.context.scene.objects.values():
if not obj.parent:
yield obj
def scene_bbox(single_obj=None, ignore_matrix=False):
bpy.ops.object.select_all(action="DESELECT")
bbox_min = (math.inf,) * 3
bbox_max = (-math.inf,) * 3
found = False
for obj in scene_meshes() if single_obj is None else object_meshes(single_obj):
found = True
for coord in obj.bound_box:
coord = Vector(coord)
if not ignore_matrix:
coord = obj.matrix_world @ coord
bbox_min = tuple(min(x, y) for x, y in zip(bbox_min, coord))
bbox_max = tuple(max(x, y) for x, y in zip(bbox_max, coord))
if not found:
raise RuntimeError("no objects in scene to compute bounding box for")
return Vector(bbox_min), Vector(bbox_max)
def normalize_scene(single_obj):
bbox_min, bbox_max = scene_bbox(single_obj)
scale = 1 / max(bbox_max - bbox_min)
single_obj.scale = single_obj.scale * scale
bpy.context.view_layer.update() # Ensure the scene is fully updated
bbox_min, bbox_max = scene_bbox(single_obj)
offset = -(bbox_min + bbox_max) / 2
single_obj.matrix_world.translation += offset
bpy.ops.object.select_all(action="DESELECT")
def load_object(object_path: str) -> bpy.types.Object:
"""Loads an object asset into the scene."""
# import the object
if object_path.endswith(".glb") or object_path.endswith(".gltf"):
bpy.ops.import_scene.gltf(filepath=object_path, merge_vertices=True)
elif object_path.endswith(".fbx"):
bpy.ops.import_scene.fbx(filepath=object_path, axis_forward='Y', axis_up='Z')
elif object_path.endswith(".ply"):
# bpy.ops.import_mesh.ply(filepath=object_path) # only used for snap blender
bpy.ops.wm.ply_import(filepath=object_path, forward_axis='Y', up_axis='Z') # used for blender 4.0 & snap blender
elif object_path.endswith(".obj"):
# bpy.ops.import_scene.obj(filepath=object_path, use_split_objects=False, forward_axis='Y', up_axis='Z') # only used for snap blender
bpy.ops.wm.obj_import(filepath=object_path, use_split_objects=False, forward_axis='Y', up_axis='Z') # used for blender 4.0 & snap blender
##### This part is used for ChatSim assets #####
elif object_path.endswith(".blend"):
blend_path = object_path
new_obj_name = 'chatsim_' + blend_path.split('/')[-1].split('.')[0]
model_obj_name = 'Car' # general names used for all assets in ChatSim
with bpy.data.libraries.load(blend_path) as (data_from, data_to):
data_to.objects = data_from.objects
for obj in data_to.objects: # actually part that import the object
if obj.name == model_obj_name:
bpy.context.collection.objects.link(obj)
if model_obj_name in bpy.data.objects: # rename the object to avoid conflict
imported_object = bpy.data.objects[model_obj_name]
imported_object.name = new_obj_name
print(f"rename {model_obj_name} to {new_obj_name}")
for slot in imported_object.material_slots: # rename the material to avoid conflict
material = slot.material
if material:
material.name = new_obj_name + "_" + material.name
return imported_object
else:
raise ValueError(f"Unsupported file type: {object_path}")
new_obj = bpy.context.object
return new_obj
def merge_meshes(obj):
"""
Merge all meshes within the object into a single mesh
Args:
obj: blender object
"""
all_object_nodes = [obj] + obj.children_recursive
mesh_objects = [obj for obj in all_object_nodes if obj.type == 'MESH']
bpy.ops.object.select_all(action='DESELECT') # Deselect all objects first
for obj in mesh_objects:
obj.select_set(True) # Select each mesh object
bpy.context.view_layer.objects.active = obj # Set as active object
bpy.ops.object.join()
bpy.ops.object.select_all(action="DESELECT")
def remove_empty_nodes_v1(obj):
"""
Remove empty nodes in the object (original version)
Args:
obj: blender object
Returns:
obj: blender object with empty nodes removed, only keep the mesh nodes
"""
all_object_nodes = [obj] + obj.children_recursive
# find the mesh node first
current_obj = None
world_matrix = None
for obj_node in all_object_nodes:
if obj_node.type == 'MESH':
current_obj = obj_node # after merging, only one mesh node left
world_matrix = obj_node.matrix_world.copy()
break
# perform removal
for obj_node in all_object_nodes:
if obj_node != current_obj:
bpy.data.objects.remove(obj_node, do_unlink=True)
# apply world transform back
current_obj.matrix_world = world_matrix
bpy.ops.object.select_all(action="DESELECT")
return current_obj
def remove_empty_nodes_v2(obj):
"""
Remove empty nodes in the object while preserving mesh transformations.
Args:
obj: Blender object, typically the root of a hierarchy.
Returns:
Blender object with empty nodes removed, only keeping the mesh nodes.
"""
all_object_nodes = [obj] + obj.children_recursive
mesh_node = None
# Find the first mesh node
for obj_node in all_object_nodes:
if obj_node.type == 'MESH':
mesh_node = obj_node
break
if mesh_node:
# Clear parent to apply transformation, if any
mesh_node.matrix_world = mesh_node.matrix_local if mesh_node.parent is None else mesh_node.matrix_world
mesh_node.parent = None
# Perform removal of other nodes
for obj_node in all_object_nodes:
if obj_node != mesh_node:
bpy.data.objects.remove(obj_node, do_unlink=True)
bpy.ops.object.select_all(action="DESELECT")
mesh_node.select_set(True)
bpy.context.view_layer.objects.active = mesh_node
return mesh_node
def transform_object_origin(obj, set_origin_to_bottom=True):
"""
Transform object to align with the scene, make the bottom point or center point of the object to be the origin
Args:
obj: blender object
"""
bbox_min, bbox_max = scene_bbox(obj)
new_origin = np.zeros(3)
new_origin[0] = (bbox_max[0] + bbox_min[0]) / 2.
new_origin[1] = (bbox_max[1] + bbox_min[1]) / 2.
if set_origin_to_bottom:
new_origin[2] = bbox_min[2]
else:
new_origin[2] = (bbox_max[2] + bbox_min[2]) / 2.
all_object_nodes = [obj] + obj.children_recursive
## move the asset origin to the new origin
for obj_node in all_object_nodes:
if obj_node.data:
me = obj_node.data
mw = obj_node.matrix_world
matrix = obj_node.matrix_world
o = Vector(new_origin)
o = matrix.inverted() @ o
me.transform(Matrix.Translation(-o))
mw.translation = mw @ o
## move all transform to origin (no need to since Empty objects have all been removed)
# for obj_node in all_object_nodes:
# obj_node.matrix_world.translation = [0, 0, 0]
# obj_node.rotation_quaternion = [1, 0, 0, 0]
bpy.ops.object.select_all(action="DESELECT")
def rotate_obj(obj, R):
"""
Apply rotation matrix to blender object
Args:
obj: blender object
R: (3, 3) rotation matrix
"""
R = Matrix(R)
obj.rotation_mode = 'QUATERNION'
# Combine the rotations by matrix multiplication
current_rotation = obj.rotation_quaternion.to_matrix().to_3x3()
new_rotation_matrix = R @ current_rotation
# Convert back to a quaternion and apply to the object
obj.rotation_quaternion = new_rotation_matrix.to_quaternion()
def get_object_center_to_bottom_offset(obj):
"""
Get the offset from the center to the bottom of the object
Args:
obj: blender object
Returns:
offset: (3,) offset
"""
bbox_min, bbox_max = scene_bbox(obj)
bottom_pos = np.zeros(3)
bottom_pos[0] = (bbox_max[0] + bbox_min[0]) / 2.
bottom_pos[1] = (bbox_max[1] + bbox_min[1]) / 2.
bottom_pos[2] = bbox_min[2]
offset = np.array(obj.location) - bottom_pos
return offset
def insert_object(obj_path, pos, rot, scale=1.0, from_3DGS=False):
"""
Insert object into the scene
Args:
obj_path: path to the object
pos: (3,) position
rot: (3, 3) rotation matrix
scale: scale of the object
Returns:
inserted_obj: blender object
"""
inserted_obj = load_object(obj_path)
merge_meshes(inserted_obj)
inserted_obj = remove_empty_nodes_v1(inserted_obj)
# inserted_obj = remove_empty_nodes_v2(inserted_obj)
if not from_3DGS and not inserted_obj.name.startswith('chatsim'):
normalize_scene(inserted_obj)
transform_object_origin(inserted_obj, set_origin_to_bottom=False) # set origin to the center for simulation
inserted_obj.scale *= scale
rotate_obj(inserted_obj, rot)
bpy.context.view_layer.update()
## object origin is at center and pos represents the contact point, so we need to adjust the object position
## NOTE: we might have problem when rotation on either x or y axis is not 0
if True:
offset = get_object_center_to_bottom_offset(inserted_obj)
inserted_obj.location = pos + offset
else:
inserted_obj.location = pos # TODO: 3DGS might also adopt bottom point as position
inserted_obj.select_set(True)
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True) # apply transform to allow simulation
if from_3DGS:
bpy.ops.object.shade_smooth() # smooth shading for 3DGS objects
bpy.context.view_layer.update()
bpy.ops.object.select_all(action="DESELECT")
return inserted_obj
def insert_animated_object(obj_path, pos, rot, scale=1.0):
"""
Insert animated object into the scene
Args:
obj_path: path to the object
pos: (3,) position
rot: (3, 3) rotation matrix
scale: scale of the object
Returns:
inserted_obj: blender object
"""
inserted_obj = load_object(obj_path)
merge_meshes(inserted_obj)
if not inserted_obj.name.startswith('chatsim'):
normalize_scene(inserted_obj)
inserted_obj.scale *= scale
rotate_obj(inserted_obj, rot)
bpy.context.view_layer.update()
## object origin is at center and pos represents the contact point, so we need to adjust the object position
## NOTE: we might have problem when rotation on either x or y axis is not 0
offset = get_object_center_to_bottom_offset(inserted_obj) if not inserted_obj.name.startswith('chatsim') else np.zeros(3)
inserted_obj.location = pos + offset
bpy.context.view_layer.update()
bpy.ops.object.select_all(action="DESELECT")
return inserted_obj
def get_geometry_proxy(obj, voxel_size=0.01):
"""
Remesh the object with voxel size
Args:
obj: blender object
voxel_size: voxel size
"""
bpy.ops.object.select_all(action="DESELECT")
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
# duplicate
proxy_obj = obj.copy()
proxy_obj.data = obj.data.copy()
proxy_obj.name = obj.name + "_proxy"
bpy.context.collection.objects.link(proxy_obj)
# apply remesh modifier
remesh_mod = proxy_obj.modifiers.new(name="Remesh", type='REMESH')
remesh_mod.mode = 'VOXEL'
remesh_mod.voxel_size = voxel_size
bpy.ops.object.modifier_apply(modifier=remesh_mod.name)
# Remove any existing rigid body physics from the proxy object
if proxy_obj.rigid_body:
bpy.context.view_layer.objects.active = proxy_obj
bpy.ops.rigidbody.object_remove()
# Option1: add a Copy Transforms constraint
copy_transforms = proxy_obj.constraints.new(type='COPY_TRANSFORMS')
copy_transforms.target = obj
# Option2: parent the proxy object to the original object
# proxy_obj.parent = obj
# proxy_obj.matrix_parent_inverse = obj.matrix_world.inverted()
proxy_obj.hide_render = True # avoid rendering the proxy object
bpy.ops.object.select_all(action="DESELECT")
return proxy_obj
#########################################################
# Shadow catcher setup
#########################################################
def add_meshes_shadow_catcher(mesh_path=None, is_uv_mesh=False):
"""
Add entire scene meshes as shadow catcher to the scene
Args:
mesh_path: path to the mesh file
is_uv_mesh: whether the mesh is a UV textured mesh
"""
# add meshes extracted from NeRF/3DGS as shadow catcher
if mesh_path is None or not os.path.exists(mesh_path):
AssertionError('meshes file does not exist')
mesh = load_object(mesh_path)
# mesh.is_shadow_catcher = True # set True for transparent shadow catcher
mesh.visible_diffuse = False # prevent meshes light up the scene
if not is_uv_mesh:
mesh.visible_glossy = False # prevent white material from reflecting light
white_mat = create_white_material()
if mesh.data.materials:
mesh.data.materials[0] = white_mat
else:
mesh.data.materials.append(white_mat)
bpy.ops.object.select_all(action="DESELECT")
return mesh
def add_planar_shadow_catcher(size=10):
"""
Add a large planar surface as shadow catcher to the scene
Args:
size: size of the planar surface
"""
bpy.ops.mesh.primitive_plane_add(size=1)
mesh = bpy.context.object
mesh.scale = (size, size, 1)
mesh.name = "floor_plane"
mesh.visible_glossy = False # prevent white material from reflecting light
white_mat = create_white_material()
if mesh.data.materials:
mesh.data.materials[0] = white_mat
else:
mesh.data.materials.append(white_mat)
bpy.ops.object.select_all(action="DESELECT")
return mesh
#########################################################
# Rigid body simulation
#########################################################
def add_rigid_body(obj, rb_type='ACTIVE', collision_shape='MESH', mass=1.0, restitution=0.6, collision_margin=0.001):
"""
Add rigid body to the object
Args:
obj: blender object
mass: mass of the object
collision_shape: collision shape of the object
"""
all_obj_nodes = [obj] + obj.children_recursive
for obj_node in all_obj_nodes:
if obj_node.type == 'MESH':
bpy.context.view_layer.objects.active = obj_node
bpy.ops.rigidbody.object_add()
if rb_type == 'KINEMATIC':
obj_node.rigid_body.type = 'PASSIVE'
obj_node.rigid_body.kinematic = True
else:
obj_node.rigid_body.type = rb_type
obj_node.rigid_body.collision_shape = collision_shape
obj_node.rigid_body.restitution = restitution
obj_node.rigid_body.mass = mass
obj_node.rigid_body.collision_margin = collision_margin
# obj_node.rigid_body.friction = 0.8 # TODO: lead to blender crash (not sure why)
bpy.ops.object.select_all(action="DESELECT")
#########################################################
# Animation
#########################################################
# def set_linear_trajectory(blender_obj, points, t1=1, t2=-1, f_axis='TRACK_NEGATIVE_Y'):
# """
# Set a trajectory of an object from multiple points (use Bezier curve)
# """
# curve_data = bpy.data.curves.new('myCurve', type='CURVE')
# curve_data.dimensions = '3D'
# curve_data.path_duration = max(t2 - t1, 1)
# curve_data.use_path = True
# curve_data.eval_time = 0
# curve_data.keyframe_insert(data_path="eval_time", frame=t1)
# curve_data.eval_time = max(t2 - t1, 1)
# curve_data.keyframe_insert(data_path="eval_time", frame=t2)
# n_points = len(points)
# # TODO Test: chatsim objects are centered at the origin, but the points is on the floor, so we need to adjust the offset
# # offset = get_object_center_to_bottom_offset(blender_obj) if blender_obj.name.startswith('chatsim') else np.zeros(3)
# spline = curve_data.splines.new(type='BEZIER')
# spline.use_endpoint_u = True
# spline.use_endpoint_v = True
# spline.bezier_points.add(n_points - 1) # already has one point by default
# for i, point in enumerate(points):
# spline.bezier_points[i].co = point
# spline.bezier_points[i].handle_left_type = spline.bezier_points[i].handle_right_type = 'AUTO'
# curve_obj = bpy.data.objects.new('MyCurveObject', curve_data)
# scene.collection.objects.link(curve_obj)
# follow_path_constraint = blender_obj.constraints.new(type='FOLLOW_PATH')
# follow_path_constraint.target = curve_obj
# follow_path_constraint.use_curve_follow = True
# follow_path_constraint.forward_axis = f_axis
# follow_path_constraint.up_axis = 'UP_Z'
# # set blender obj location and rotation to zero
# blender_obj.location = (0, 0, 0)
# blender_obj.rotation_euler = (0, 0, 0)
# bpy.context.view_layer.update()
def set_linear_trajectory(blender_obj, points, t1=1, t2=-1, f_axis='TRACK_NEGATIVE_Y'):
"""
Set a trajectory of an object from multiple points (use Poly curve)
"""
curve_data = bpy.data.curves.new('myCurve', type='CURVE')
curve_data.dimensions = '3D'
curve_data.path_duration = max(t2 - t1, 1)
curve_data.use_path = True
curve_data.eval_time = 0
curve_data.keyframe_insert(data_path="eval_time", frame=t1)
curve_data.eval_time = max(t2 - t1, 1)
curve_data.keyframe_insert(data_path="eval_time", frame=t2)
n_points = len(points)
# TODO Test: chatsim objects are centered at the origin, but the points is on the floor, so we need to adjust the offset
# offset = get_object_center_to_bottom_offset(blender_obj) if blender_obj.name.startswith('chatsim') else np.zeros(3)
spline = curve_data.splines.new(type='POLY')
spline.points.add(n_points - 1) # already has one point by default
for i, point in enumerate(points):
spline.points[i].co = (point[0], point[1], point[2], 1)
# spline.points[i].co = (point[0]+offset[0], point[1]+offset[1], point[2]+offset[2], 1)
curve_obj = bpy.data.objects.new('MyCurveObject', curve_data)
scene.collection.objects.link(curve_obj)
follow_path_constraint = blender_obj.constraints.new(type='FOLLOW_PATH')
follow_path_constraint.target = curve_obj
follow_path_constraint.use_curve_follow = True
follow_path_constraint.forward_axis = f_axis
follow_path_constraint.up_axis = 'UP_Z'
# set blender obj location and rotation to zero
blender_obj.location = (0, 0, 0)
blender_obj.rotation_euler = (0, 0, 0)
bpy.context.view_layer.update()
def extend_cyclic_animation_command_line(obj, n_repetitions_after=0, n_repetitions_before=0):
"""
Extend the cyclic animation of the object
"""
if obj.animation_data is not None and obj.animation_data.action is not None:
for fcurves_f in obj.animation_data.action.fcurves:
new_modifier = fcurves_f.modifiers.new(type='CYCLES')
new_modifier.cycles_after = n_repetitions_after # 0 means infinite repetitions after the end of the fcurves_f
new_modifier.cycles_before = n_repetitions_before # 0 means infinite repetitions before the start of the fcurves_f
# new_modifier.frame_start = fcurves_f.range()[0]
# new_modifier.frame_end = fcurves_f.range()[1]
def add_cyclic_animation(obj, n_repetitions_after=0, n_repetitions_before=0):
"""
Add cyclic animation to the object
"""
all_obj_nodes = [obj] + obj.children_recursive
for obj_node in all_obj_nodes:
extend_cyclic_animation_command_line(obj_node, n_repetitions_after, n_repetitions_before)
#########################################################
# Materials
#########################################################
def create_white_material():
mat = bpy.data.materials.new(name="WhiteMaterial")
mat.use_nodes = True
bsdf = mat.node_tree.nodes["Principled BSDF"]
bsdf.inputs["Base Color"].default_value = (1, 1, 1, 1)
bsdf.inputs["Metallic"].default_value = 0.0
# bsdf.inputs["Specular"].default_value = 0.0 # issue: https://github.com/ross-g/io_pdx_mesh/issues/86
bsdf.inputs[7].default_value = 0.0 # Specular
bsdf.inputs["Roughness"].default_value = 1.0
return mat
def get_mesh_nodes(obj):
"""
Get all mesh nodes in the object
Args:
obj: blender object
Returns:
mesh_nodes: list of blender mesh objects
"""
all_object_nodes = [obj] + obj.children_recursive
mesh_nodes = []
for obj_node in all_object_nodes:
if obj_node.type == 'MESH':
mesh_nodes.append(obj_node)
return mesh_nodes
def get_material_nodes(mesh_nodes):
"""
Get the material nodes (PRINCIPLE_BSDF & TEXTURE) of the object
"""
principled_nodes = []
base_color_nodes = []
material_nodes = []
for mesh in mesh_nodes:
for material_slot in mesh.material_slots:
material = material_slot.material
if material and material.use_nodes:
for node in material.node_tree.nodes:
if node.type == 'BSDF_PRINCIPLED':
principled_nodes.append(node)
elif node.type == 'TEX_IMAGE':
base_color_nodes.append(node)
material_nodes.append(material)
return principled_nodes, base_color_nodes, material_nodes
def adjust_principled_bsdf(principled_nodes, specular=0.5, metallic=0.0, roughness=0.5):
"""
Adjust the specular, metallic and roughness value of the material
"""
for node in principled_nodes:
if node.inputs['Specular'].is_linked:
node.id_data.links.remove(node.inputs['Specular'].links[0])
if node.inputs['Metallic'].is_linked:
node.id_data.links.remove(node.inputs['Metallic'].links[0])
if node.inputs['Roughness'].is_linked:
node.id_data.links.remove(node.inputs['Roughness'].links[0])
node.inputs['Specular'].default_value = specular
node.inputs['Metallic'].default_value = metallic
node.inputs['Roughness'].default_value = roughness
def make_mirror_material(principled_nodes):
"""