Skip to content

Commit

Permalink
added the free point handler
Browse files Browse the repository at this point in the history
  • Loading branch information
rlav440 committed Sep 9, 2024
1 parent 1ad4c63 commit cc6556e
Show file tree
Hide file tree
Showing 13 changed files with 453 additions and 61 deletions.
6 changes: 4 additions & 2 deletions pyCamSet/calibration_targets/shape_by_faces.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def __init__(self, face_local_coords, face_transforms, scale_factor=1.):
self.ur_pdata[i] = h_tform(transform=tform, points=points/scale_factor) * scale_factor
return

def draw_meshes(self, face_corners, face_images: list[np.ndarray]):
def draw_meshes(self, face_corners, face_images: list[np.ndarray], return_scene=False):
"""
Draws a mesh given the current face set up
Expand Down Expand Up @@ -135,6 +135,8 @@ def draw_meshes(self, face_corners, face_images: list[np.ndarray]):
# a = self.point_data.reshape((-1, 3))
# ls = [str(i) for i in range(a.shape[0])]
# scene.add_point_labels(a, ls)
if return_scene:
return scene
scene.add_axes()
scene.show()

Expand Down Expand Up @@ -165,7 +167,7 @@ def draw_net(self, net_images, net_transforms):
r_im = None #crop down to the max and min containing the array.
img = Image.new('L', im.shape, 0) # some shapes might use a sub window of the image
ImageDraw.Draw(img).polygon(bf, outline=1, fill=1)
r_im *= numpy.array(img)
r_im *= np.array(img)
# map the face shape over the image, making a mask and doing a bounds check for all faces
blank_canvas[0] += r_im

Expand Down
54 changes: 43 additions & 11 deletions pyCamSet/calibration_targets/target_Ccube.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,21 @@
from cv2 import aruco
import cv2
from PIL import Image
from matplotlib import pyplot as plt
from tqdm import tqdm

from pyCamSet.calibration_targets import AbstractTarget, ImageDetection, FaceToShape
from pyCamSet.cameras import Camera
from pyCamSet.utils.general_utils import split_aruco_dictionary, make_4x4h_tform, downsample_valid

TFORMS = [
([-1.209,-1.209, 1.209],[ 0.5,-0.5, 0.5]),
([ 1.209,-1.209, 1.209],[ 0.5, 0.5,-0.5]),
([0. ,0. ,1.571],[ 0.5,-0.5,-0.5]),
([2.221,0. ,2.221],[-0.5, 0.5,-0.5]),
([3.142,0. ,0. ],[-0.5, 0.5, 0.5]),
([-1.571, 0. , 0. ],[-0.5,-0.5, 0.5]),
]
# TFORMS = [
# ([-1.209,-1.209, 1.209],[ 0.5,-0.5, 0.5]),
# ([ 1.209,-1.209, 1.209],[ 0.5, 0.5,-0.5]),
# ([0. ,0. ,1.571],[ 0.5,-0.5,-0.5]),
# ([2.221,0. ,2.221],[-0.5, 0.5,-0.5]),
# ([3.142,0. ,0. ],[-0.5, 0.5, 0.5]),
# ([-1.571, 0. , 0. ],[-0.5,-0.5, 0.5]),
# ]

TFORMS = [
(([2.22144147, 2.22144147, 0. ]), ([-0.5, -0.5, 0.5])),
Expand Down Expand Up @@ -97,19 +99,25 @@ def __init__(self, length=20, n_points=5,
blank_face, board_offset = make_blank_square(draw_res, line_fraction, border_fraction)
sub_res = (draw_res[0] - 2 * board_offset, draw_res[1] - 2*board_offset)
self.textures = [blank_face.copy() for _ in range(6)]
# debug_t = []
for t, board in zip(self.textures, self.boards):
t[board_offset:-board_offset, board_offset:-board_offset] = board.draw(sub_res)

# debug_t.append(board.draw(draw_res)) #DEBUG
# self.textures = debug_t

bd = np.array([board.chessboardCorners for board in self.boards])
coord_bump = self.length*border_fraction/2
board_coords = bd + [coord_bump, coord_bump, 0]
# board_coords = bd #DEBUG
self.base_face = np.array([
[0, self.length,0],
[self.length, self.length,0],
[self.length, 0,0],
[0, 0,0],
])

# breakpoint()
self.faceData = FaceToShape(
face_local_coords=board_coords,
face_transforms=[make_4x4h_tform(*t) for t in TFORMS],
Expand All @@ -118,19 +126,43 @@ def __init__(self, length=20, n_points=5,
self.point_data = self.faceData.point_data
self._process_data()

def plot(self):
def plot(self, return_scene = False):
"""
Draws a 3D model of the calibration target using pyVista
"""
faces = self.faceData.draw_meshes(self.base_face, self.textures)
scene = self.faceData.draw_meshes(self.base_face, self.textures, return_scene=return_scene)
if return_scene:
return scene

def save_to_pdf(
self,
f_out: Path | None = None,
border_width: float = 10,
):
individual_faces = True,

):
if individual_faces:
for idf, face in enumerate(tqdm(self.textures)):
blank_f = int(border_width * 0.0393701 * self.dpi)
dims = np.array(face.shape) + blank_f * 2
full_im = np.ones((dims)) * 255
full_im[blank_f:-blank_f, blank_f:-blank_f] = face

if f_out is None:
f_out = Path(f'Ccube_length_{self.length * 1000:.2f}mm' \
f'_{self.n_points}_points_at' \
f'_{self.square_size * 1000:.2f}mm_face_{idf}.png')
full_im = full_im.astype(np.uint8)
with Image.fromarray(full_im) as im:
im.save(fp=f_out, resolution=self.dpi)
f_out = None
return

raise NotImplementedError
im_board = self.faceData.draw_net(self.textures, NET_FORMS)
plt.imshow(im_board)
plt.show()

blank_f = int(border_width * 0.0393701 * self.dpi)
dims = np.array(im_board.shape) + blank_f * 2
Expand Down
1 change: 1 addition & 0 deletions pyCamSet/calibration_targets/target_detections.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,7 @@ def add_detection(self, cam_name, im_num, detection: ImageDetection) -> None:
:param detection: The detection data, contained as an image detection.
"""
ind = self.cam_names.index(cam_name)

if detection.has_data:
if detection.keys.ndim == 1:
keys = detection.keys[..., None]
Expand Down
9 changes: 5 additions & 4 deletions pyCamSet/cameras/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,19 +199,20 @@ def project_points(self, points, mode="opencv", distort=True):
which returns v,u coordinates
:return points: points in the uv coordinates
"""

centered = h_tform(points, self.proj)
if centered.ndim == 1:
centered = centered[None, ...]
if mode == "image":
return centered[:, ::-1]
if distort and not np.any(np.logical_not(np.isclose(self.distortion_coefs, np.zeros(5)))):
dist_zero = np.all(np.isclose(self.distortion_coefs, 0))
if distort and not dist_zero:
distorted = [distort_points(
pt,
self.intrinsic,
self.distortion_coefs
) for pt in centered]
return np.array(distorted)

if mode == "image":
return centered[:, ::-1]
return centered

def _is_in_image(self, cords) -> bool:
Expand Down
12 changes: 8 additions & 4 deletions pyCamSet/cameras/camera_set.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def return_view_overlaps(self):

return final_mesh

def project_points_to_all_cams(self, points) -> list[dict[str|int,np.ndarray]]|dict[str|int, np.ndarray]:
def project_points_to_all_cams(self, points, distort=True) -> list[dict[str|int,np.ndarray]]|dict[str|int, np.ndarray]:
"""
Projects a point or list of points to all cameras.
Expand All @@ -306,7 +306,7 @@ def project_points_to_all_cams(self, points) -> list[dict[str|int,np.ndarray]]|d
points = points[None, ...]
single_flag = True

all_projections = [cam.project_points(points) for cam in self._cam_list]
all_projections = [cam.project_points(points, distort=distort) for cam in self._cam_list]
projection_dictionary_list = [{} for _ in range(points.shape[0])]

for cam_proj, cam_name in zip(all_projections, self._cam_dict.keys()):
Expand All @@ -319,7 +319,7 @@ def project_points_to_all_cams(self, points) -> list[dict[str|int,np.ndarray]]|d
return projection_dictionary_list

def multi_cam_triangulate(self, to_reconstruct: list[dict] or dict or np.ndarray,
return_used = False):
return_used = False, distort=True):
"""
A lsq minimised triangulation of camera point locations to reconstruct.
Automatically identifies points with shared visibility
Expand Down Expand Up @@ -351,14 +351,18 @@ def multi_cam_triangulate(self, to_reconstruct: list[dict] or dict or np.ndarray
data[:, 1:-2], axis=0, return_inverse=True, return_counts=True
)
viable_mask = count > 1
reconstructable_data = data[viable_mask[inv]]
reconstructable_data = data[viable_mask[inv].squeeze()]

_, im_index, im_counts = np.unique(reconstructable_data[:, 1:-2], axis=0, return_index=True, return_counts=True)
start_ind = np.append(0, np.cumsum(im_counts[np.argsort(im_index)]))

#build the projection matricies
proj = np.array([cam.proj for cam in self])
dists = np.array([cam.distortion_coefs for cam in self])

if not distort:
dists = np.zeros_like(dists)

intr = np.array([cam.intrinsic for cam in self])

reconstructed = nb_triangulate_full(reconstructable_data, proj, start_ind, intr, dists)
Expand Down
69 changes: 68 additions & 1 deletion pyCamSet/optimisation/compiled_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,74 @@
import cv2


@njit(fastmath=True)
def geometric_median(X):
output = np.empty(3)
work_point = np.empty(3)
w_dist = np.empty(X.shape[0])
n_geometric_median_prealloc(X, work_point, output, w_dist)
return output

@njit(fastmath=True)
def n_geometric_median_prealloc(X, wp0, out, w_dist, eps=1e-5, max_iter=20):
e2 = eps ** 2
run_len = len(X)
if len(X) == 1:
out[:] = X
return

wp0[:] = 0
for i in range(run_len):
wp0[0] += X[i, 0]
wp0[1] += X[i, 1]
wp0[2] += X[i, 2]
wp0 /= run_len

iter = 0

while iter < max_iter:
non_zero = 0
for i in range(run_len):
dist = math.sqrt((X[i, 0] - wp0[0]) ** 2 + (X[i, 1] - wp0[1]) ** 2 + (X[i, 2] - wp0[2]) ** 2)
if dist == 0:
w_dist[i] = 0
else:
w_dist[i] = 1 / dist
non_zero += 1

w_sum = np.sum(w_dist)
w_dist /= w_sum
out[:] = 0 # the new estimate
for i in range(run_len):
out[0] += w_dist[i] * X[i, 0]
out[1] += w_dist[i] * X[i, 1]
out[2] += w_dist[i] * X[i, 2]

num_zeros = run_len - non_zero
if num_zeros == 0:
pass # do nothing to the work point
elif num_zeros == len(X):
return wp0 # i.e it must be perfect somehow.
else:
r = w_sum * np.sqrt(
(out[0] - wp0[0]) ** 2 + (out[1] - wp0[1]) ** 2 + (out[2] - wp0[2]) ** 2
)
# print(r)
rinv = 0 if r == 0 else num_zeros / r
s0 = max(0, 1 - rinv)
s1 = min(1, rinv)
out *= s0
out[0] += s1 * wp0[0]
out[1] += s1 * wp0[1]
out[2] += s1 * wp0[2]

d = (wp0[0] - out[0]) ** 2 + (wp0[1] - out[1]) ** 2 + (wp0[2] - out[2]) ** 2
if d < e2:
return
wp0[:] = out
iter += 1


@njit(cache=True)
def fill_pose(pose_data, poses, poses_unfixed):
"""
Expand All @@ -28,7 +96,6 @@ def fill_pose(pose_data, poses, poses_unfixed):
def fill_extr(extr_data, extr, extr_unfixed):
"""
Fills an extrinsics array with data from an input param array.
:param extr_data: The extrinsic parameters, an nx6 array
:param extr: The extrinsic data with missing blocks to fill
:param extr_unfixed: A boolean array describing which extrinsics are unfixed
Expand Down
Loading

0 comments on commit cc6556e

Please sign in to comment.