Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions flystar/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@
except PackageNotFoundError:
pass


21 changes: 15 additions & 6 deletions flystar/align.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ def __init__(self, list_of_starlists, ref_index=0, iters=2,
dr_tol=[1, 1], dm_tol=[2, 1],
outlier_tol=[None, None],
trans_args=[{'order': 2}, {'order': 2}],
init_order=1,
mag_trans=True, mag_lim=None, weights=None,
trans_input=None, trans_class=transforms.PolyTransform,
use_vel=False, calc_trans_inverse=False,
Expand Down Expand Up @@ -169,6 +170,7 @@ def = None. If not None, then this should contain an array or list of transform
self.dm_tol = dm_tol
self.outlier_tol = outlier_tol
self.trans_args = trans_args
self.init_order = init_order
self.mag_trans = mag_trans
self.mag_lim = mag_lim
self.weights = weights
Expand Down Expand Up @@ -381,6 +383,7 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar

trans = trans_initial_guess(ref_list[keepers], star_list_orig_trim, self.trans_args[0],
mode=self.init_guess_mode,
order=self.init_order,
verbose=self.verbose,
mag_trans=self.mag_trans)

Expand Down Expand Up @@ -417,7 +420,6 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar
idx1 = idx1[keepers]
idx2 = idx2[keepers]


# Determine weights in the fit.
weight = self.get_weights_for_lists(ref_list[idx2], star_list_T[idx1])

Expand Down Expand Up @@ -484,7 +486,7 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar
if self.verbose > 1:
print( ' Match 2: After trans, found ', len(idx_lis), ' matches out of ', len(star_list_T),
'. If match count is low, check dr_tol, dm_tol.' )

## Make plot, if desired
plots.trans_positions(ref_list, ref_list[idx_ref], star_list_T, star_list_T[idx_lis],
fileName='{0}'.format(star_list_T['t'][0]))
Expand Down Expand Up @@ -1293,6 +1295,7 @@ def __init__(self, ref_list, list_of_starlists, iters=2,
dr_tol=[1, 1], dm_tol=[2, 1],
outlier_tol=[None, None],
trans_args=[{'order': 2}, {'order': 2}],
init_order=1,
mag_trans=True, mag_lim=None, ref_mag_lim=None,
weights=None,
trans_input=None,
Expand Down Expand Up @@ -1383,6 +1386,10 @@ def = None. If not None, then this should contain an array or list of transform
then the transformation argument (i.e. order) will be changed for every iteration in
iters.

init_order: int
Polynomial transformation order to use for initial guess transformation.
Order=1 should be used in most cases, but sometimes higher order is needed

calc_trans_inverse: boolean
If true, then calculate the inverse transformation (from reference to starlist)
in addition to the normal transformation (from starlist to reference). The inverse
Expand Down Expand Up @@ -1460,6 +1467,7 @@ def = None. If not None, then this should contain an array or list of transform
super().__init__(list_of_starlists, ref_index=-1, iters=iters,
dr_tol=dr_tol, dm_tol=dm_tol,
outlier_tol=outlier_tol, trans_args=trans_args,
init_order=init_order,
mag_trans=mag_trans, mag_lim=mag_lim, weights=weights,
trans_input=trans_input, trans_class=trans_class,
calc_trans_inverse=calc_trans_inverse, use_vel=use_vel,
Expand Down Expand Up @@ -1820,7 +1828,7 @@ def add_rows_for_new_stars(ref_table, star_list, idx_lis):
elif ref_table[col_name].dtype == np.dtype('bool'):
new_col_empty = False
else:
new_col_empty = None
new_col_empty = np.nan

if len(ref_table[col_name].shape) == 1:
new_col_shape = len(idx_lis_new)
Expand Down Expand Up @@ -3494,7 +3502,7 @@ def check_trans_input(list_of_starlists, trans_input, mag_trans):

def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle',
ignore_contains='star', verbose=True, n_req_match=3,
mag_trans=True):
mag_trans=True, order=1):
"""
Take two starlists and perform an initial matching and transformation.

Expand All @@ -3509,7 +3517,7 @@ def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle',
# the "ignore_contains" string.
idx_r = np.flatnonzero(np.char.find(ref_list['name'], ignore_contains) == -1)
idx_s = np.flatnonzero(np.char.find(star_list['name'], ignore_contains) == -1)

# Match the star names
name_matches, ndx_r, ndx_s = np.intersect1d(ref_list['name'][idx_r],
star_list['name'][idx_s],
Expand Down Expand Up @@ -3554,7 +3562,8 @@ def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle',
if ('order' in trans_args) and (trans_args['order'] == 0):
order = 0
else:
order = 1
order = order

trans = transforms.PolyTransform.derive_transform(x1m, y1m ,x2m, y2m, order=order, weights=None)

# Calculate flux transformation based on matches. If desired, should be applied as
Expand Down
3 changes: 3 additions & 0 deletions flystar/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ def query_gaia(ra, dec, search_radius=30.0, table_name='gaiadr2'):
gaia_job = Gaia.cone_search_async(target_coords, search_radius, table_name = table_name + '.gaia_source')
gaia = gaia_job.get_results()

#Change new 'SOURCE_ID' column header back to lowercase 'source_id' so all subsequent functions still work:
gaia['SOURCE_ID'].name = 'source_id'

return gaia


Expand Down
18 changes: 18 additions & 0 deletions flystar/archive_io.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import pickle

# Need to add these functions to a utility .py file rather than storing them in general structure.
def open_archive(file_name):
"""
Helper function to open archived files.
"""
with open(file_name, 'rb') as file_archive:
file_dict = pickle.load(file_archive)
return file_dict

def save_archive(file_name, save_data):
"""
Helper function to archive a file.
"""
with open(file_name, 'wb') as outfile:
pickle.dump(save_data, outfile, protocol=pickle.HIGHEST_PROTOCOL)
return
203 changes: 203 additions & 0 deletions flystar/fit_velocity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
from tqdm import tqdm
import numpy as np
import pandas as pd

def linear(x, k, b):
return k*x + b

def linear_fit(x, y, sigma=None, absolute_sigma=True):
"""Weighted linear regression (See https://en.wikipedia.org/wiki/Weighted_least_squares#Solution). Recommended for low-dimension, non-degenerate data. Otherwise, please use scipy.curve_fit.

Parameters
----------
x : array-like
x data
y : array-like
y data
sigma : array-like, optional
Weighted by 1/sigma**2. If not provided, weight = 1, by default None
absolute_sigma : bool, optional
If True (default), sigma is used in an absolute sense and the estimated parameter uncertainty reflects these absolute values. If False, only the relative magnitudes of the sigma values matter, by default True

Returns
-------
result : dictionary
Dictionary with keys 'slope', 'e_slope', 'intercept', 'e_intercept', and 'chi2' if return_chi2=True.
"""
x = np.array(x)
y = np.array(y)
if sigma is None:
sigma = np.ones_like(x)
else:
sigma = np.array(sigma)

X = np.vander(x, 2)
W = np.diag(1/sigma**2)
XTWX = X.T @ W @ X
pcov = np.linalg.inv(XTWX) # Covariance Matrix
popt = pcov @ X.T @ W @ y # Linear Solution
perr = np.sqrt(np.diag(pcov)) # Uncertainty of Linear Solution

residual = y - X @ popt
chi2 = residual.T @ W @ residual

if not absolute_sigma:
reduced_chi2 = chi2/(len(x) - 2)
perr *= reduced_chi2**0.5

result = {
'slope': popt[0],
'intercept': popt[1],
'e_slope': perr[0],
'e_intercept': perr[1],
'chi2': chi2
}

return result


def calc_chi2(x, y, sigma, slope, intercept):
popt = np.array([slope, intercept])
X = np.vander(x, 2)
W = np.diag(1/sigma**2)
residual = y - X @ popt
return residual.T @ W @ residual


def fit_velocity(startable, weighting='var', use_scipy=False, absolute_sigma=True, epoch_cols='all', art_star=False):
"""Fit proper motion with weighted linear regression equations (see https://en.wikipedia.org/wiki/Weighted_least_squares#Solution).
Assumes that all data are valid.

Parameters
----------
startable : StarTable
StarTable object
weighting : str, optional
Weighting by variance (1/ye**2) or standard deviation (1/ye), by default 'var'
use_scipy : bool, optional
Use scipy.curve_fit or flystar.fit_velocity.linear_fit, by default False
epoch_cols : str or list of intergers, optional
List of indicies of columns to use. If 'all', use all columns, by default 'all'
art_star : bool, optional
Artificial star catalog or not. If True, use startable['x'][:, epoch_ols, 1] as the location, by default False.

Returns
-------
result : pd.DataFrame
Proper motion dataframe with keys vx, vxe, vy, vye, x0, x0e, y0, y0e

Raises
------
ValueError
If weighting is neither 'std' nor 'var'
"""
if weighting not in ['std', 'var']:
raise ValueError(f"Weighting must be either 'std' or 'var', not '{weighting}'.")
if epoch_cols is None:
epoch_cols = np.arange(len(startable.meta['YEARS'])) # use all cols if not specified

N = len(startable)
vx = np.zeros(N)
vy = np.zeros(N)
vxe = np.zeros(N)
vye = np.zeros(N)
x0 = np.zeros(N)
y0 = np.zeros(N)
x0e = np.zeros(N)
y0e = np.zeros(N)
chi2_vx = np.zeros(N)
chi2_vy = np.zeros(N)
t0 = np.zeros(N)

time = np.array(startable.meta['YEARS'])[epoch_cols]

if not art_star:
x_arr = startable['x'][:, epoch_cols]
y_arr = startable['y'][:, epoch_cols]
else:
x_arr = startable['x'][:, epoch_cols, 1]
y_arr = startable['y'][:, epoch_cols, 1]

xe_arr = startable['xe'][:, epoch_cols]
ye_arr = startable['ye'][:, epoch_cols]

if weighting=='std':
sigma_x_arr = np.abs(xe_arr)**0.5
sigma_y_arr = np.abs(ye_arr)**0.5
elif weighting=='var':
sigma_x_arr = xe_arr
sigma_y_arr = ye_arr

# For each star
for i in tqdm(range(len(startable))):
x = x_arr[i]
y = y_arr[i]
xe = xe_arr[i]
ye = ye_arr[i]
sigma_x = sigma_x_arr[i]
sigma_y = sigma_y_arr[i]

t_weight = 1. / np.hypot(xe, ye)
t0[i] = np.average(time, weights=t_weight)
dt = time - t0[i]

if use_scipy:
p0x = np.array([0., x.mean()])
p0y = np.array([0., y.mean()])

# Use scipy.curve_fit to fit for velocity
vx_opt, vx_cov = curve_fit(linear, dt, x, p0=p0x, sigma=sigma_x, absolute_sigma=absolute_sigma)
vy_opt, vy_cov = curve_fit(linear, dt, y, p0=p0y, sigma=sigma_y, absolute_sigma=absolute_sigma)

vx[i] = vx_opt[0]
vy[i] = vy_opt[0]
x0[i] = vx_opt[1]
y0[i] = vy_opt[1]
vxe[i], x0e[i] = np.sqrt(vx_cov.diagonal())
vye[i], y0e[i] = np.sqrt(vy_cov.diagonal())
chi2_vx[i] = calc_chi2(dt, x, sigma_x, *vx_opt)
chi2_vy[i] = calc_chi2(dt, y, sigma_y, *vy_opt)

else:
vx_result = linear_fit(dt, x, sigma=sigma_x, absolute_sigma=absolute_sigma)
vy_result = linear_fit(dt, y, sigma=sigma_y, absolute_sigma=absolute_sigma)

vx[i] = vx_result['slope']
vxe[i] = vx_result['e_slope']
x0[i] = vx_result['intercept']
x0e[i] = vx_result['e_intercept']
chi2_vx[i] = vx_result['chi2']

vy[i] = vy_result['slope']
vye[i] = vy_result['e_slope']
y0[i] = vy_result['intercept']
y0e[i] = vy_result['e_intercept']
chi2_vy[i] = vy_result['chi2']

result = pd.DataFrame({
'vx': vx, 'vy': vy,
'vxe': vxe, 'vye': vye,
'x0': x0, 'y0': y0,
'x0e': x0e, 'y0e': y0e,
'chi2_vx': chi2_vx,
'chi2_vy': chi2_vy,
't0': t0
})
return result


# Test
if __name__=='__main__':
from scipy.optimize import curve_fit

x = np.array([1,2,3,4])
y = np.array([1,2,5,6])
sigma = np.array([.4,.2,.1,.3])

for absolute_sigma in [True, False]:
result = linear_fit(x, y, sigma=sigma, absolute_sigma=absolute_sigma)
popt, pcov = curve_fit(linear, x, y, sigma=sigma, absolute_sigma=absolute_sigma)
perr = np.sqrt(np.diag(pcov))
print(f'Absolute Sigma = {absolute_sigma}')
print(f"linear_fit: slope = {result['slope']:.3f} ± {result['e_slope']:.3f}, intercept = {result['intercept']:.3f} ± {result['e_intercept']:.3f}, chi2={result['chi2']:.3f}")
print(f'curve_fit: slope = {popt[0]:.3f} ± {perr[0]:.3f}, intercept = {popt[1]:.3f} ± {perr[1]:.3f}, chi2={calc_chi2(x, y, sigma, *popt):.3f}\n')
6 changes: 3 additions & 3 deletions flystar/match.py
Original file line number Diff line number Diff line change
Expand Up @@ -531,7 +531,7 @@ def generic_match(sl1, sl2, init_mode='triangle',
raise TypeError("The second catalog has to be a StarList")

# Find the initial transformation
if init_mode is 'triangle': # Blind triangles method
if init_mode == 'triangle': # Blind triangles method

# Prepare the reduced starlists for matching
sl1_cut = copy.deepcopy(sl1)
Expand All @@ -548,13 +548,13 @@ def generic_match(sl1, sl2, init_mode='triangle',
transf = align.initial_align(sl1_cut, sl2_cut, briteN=n_bright,
transformModel=model, order=order_dr[0]) #order_dr[i_loop][0] ?

elif init_mode is 'match_name': # Name match
elif init_mode == 'match_name': # Name match
sl1_idx_init, sl2_idx_init, _ = starlists.restrict_by_name(sl1, sl2)
transf = model(sl2['x'][sl2_idx_init], sl2['y'][sl2_idx_init],
sl1['x'][sl1_idx_init], sl1['y'][sl1_idx_init],
order=int(order_dr[0][0]))

elif init_mode is 'load': # Load a transformation file
elif init_mode == 'load': # Load a transformation file
transf = transforms.Transform2D.from_file(kwargs['transf_file'])

else: # None of the above
Expand Down
Loading