diff --git a/flystar/__init__.py b/flystar/__init__.py index 826bb09..96db861 100644 --- a/flystar/__init__.py +++ b/flystar/__init__.py @@ -10,3 +10,4 @@ except PackageNotFoundError: pass + diff --git a/flystar/align.py b/flystar/align.py index 3975510..0a942ba 100755 --- a/flystar/align.py +++ b/flystar/align.py @@ -24,6 +24,7 @@ def __init__(self, list_of_starlists, ref_index=0, iters=2, dr_tol=[1, 1], dm_tol=[2, 1], outlier_tol=[None, None], trans_args=[{'order': 2}, {'order': 2}], + init_order=1, mag_trans=True, mag_lim=None, weights=None, trans_input=None, trans_class=transforms.PolyTransform, use_vel=False, calc_trans_inverse=False, @@ -174,6 +175,7 @@ def = None. If not None, then this should contain an array or list of transform self.dm_tol = dm_tol self.outlier_tol = outlier_tol self.trans_args = trans_args + self.init_order = init_order self.mag_trans = mag_trans self.mag_lim = mag_lim self.weights = weights @@ -386,6 +388,7 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar trans = trans_initial_guess(ref_list[keepers], star_list_orig_trim, self.trans_args[0], mode=self.init_guess_mode, + order=self.init_order, verbose=self.verbose, mag_trans=self.mag_trans) @@ -422,7 +425,6 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar idx1 = idx1[keepers] idx2 = idx2[keepers] - # Determine weights in the fit. weight = self.get_weights_for_lists(ref_list[idx2], star_list_T[idx1]) @@ -489,7 +491,7 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar if self.verbose > 1: print( ' Match 2: After trans, found ', len(idx_lis), ' matches out of ', len(star_list_T), '. If match count is low, check dr_tol, dm_tol.' ) - + ## Make plot, if desired plots.trans_positions(ref_list, ref_list[idx_ref], star_list_T, star_list_T[idx_lis], fileName='{0}'.format(star_list_T['t'][0])) @@ -1298,6 +1300,7 @@ def __init__(self, ref_list, list_of_starlists, iters=2, dr_tol=[1, 1], dm_tol=[2, 1], outlier_tol=[None, None], trans_args=[{'order': 2}, {'order': 2}], + init_order=1, mag_trans=True, mag_lim=None, ref_mag_lim=None, weights=None, trans_input=None, @@ -1388,6 +1391,10 @@ def = None. If not None, then this should contain an array or list of transform then the transformation argument (i.e. order) will be changed for every iteration in iters. + init_order: int + Polynomial transformation order to use for initial guess transformation. + Order=1 should be used in most cases, but sometimes higher order is needed + calc_trans_inverse: boolean If true, then calculate the inverse transformation (from reference to starlist) in addition to the normal transformation (from starlist to reference). The inverse @@ -1465,6 +1472,7 @@ def = None. If not None, then this should contain an array or list of transform super().__init__(list_of_starlists, ref_index=-1, iters=iters, dr_tol=dr_tol, dm_tol=dm_tol, outlier_tol=outlier_tol, trans_args=trans_args, + init_order=init_order, mag_trans=mag_trans, mag_lim=mag_lim, weights=weights, trans_input=trans_input, trans_class=trans_class, calc_trans_inverse=calc_trans_inverse, use_vel=use_vel, @@ -1825,7 +1833,7 @@ def add_rows_for_new_stars(ref_table, star_list, idx_lis): elif ref_table[col_name].dtype == np.dtype('bool'): new_col_empty = False else: - new_col_empty = None + new_col_empty = np.nan if len(ref_table[col_name].shape) == 1: new_col_shape = len(idx_lis_new) @@ -3499,7 +3507,7 @@ def check_trans_input(list_of_starlists, trans_input, mag_trans): def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle', ignore_contains='star', verbose=True, n_req_match=3, - mag_trans=True): + mag_trans=True, order=1): """ Take two starlists and perform an initial matching and transformation. @@ -3514,7 +3522,7 @@ def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle', # the "ignore_contains" string. idx_r = np.flatnonzero(np.char.find(ref_list['name'], ignore_contains) == -1) idx_s = np.flatnonzero(np.char.find(star_list['name'], ignore_contains) == -1) - + # Match the star names name_matches, ndx_r, ndx_s = np.intersect1d(ref_list['name'][idx_r], star_list['name'][idx_s], @@ -3559,7 +3567,8 @@ def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle', if ('order' in trans_args) and (trans_args['order'] == 0): order = 0 else: - order = 1 + order = order + trans = transforms.PolyTransform.derive_transform(x1m, y1m ,x2m, y2m, order=order, weights=None) # Calculate flux transformation based on matches. If desired, should be applied as diff --git a/flystar/analysis.py b/flystar/analysis.py index 85d7ef3..953461b 100644 --- a/flystar/analysis.py +++ b/flystar/analysis.py @@ -52,6 +52,9 @@ def query_gaia(ra, dec, search_radius=30.0, table_name='gaiadr2'): gaia_job = Gaia.cone_search_async(target_coords, search_radius, table_name = table_name + '.gaia_source') gaia = gaia_job.get_results() + #Change new 'SOURCE_ID' column header back to lowercase 'source_id' so all subsequent functions still work: + gaia['SOURCE_ID'].name = 'source_id' + return gaia diff --git a/flystar/plots.py b/flystar/plots.py index 7abde6e..f680ba9 100755 --- a/flystar/plots.py +++ b/flystar/plots.py @@ -75,6 +75,7 @@ def trans_positions(ref, ref_mat, starlist, starlist_mat, xlim=None, ylim=None, py.savefig(root + 'Transformed_positions_{0}'.format(fileName) + '.png') else: py.savefig(root + 'Transformed_positions.png') + py.close() return diff --git a/flystar/startables.py b/flystar/startables.py index 6641800..78fa270 100644 --- a/flystar/startables.py +++ b/flystar/startables.py @@ -1,6 +1,7 @@ from astropy.table import Table, Column from astropy.stats import sigma_clipping from scipy.optimize import curve_fit +from tqdm import tqdm import numpy as np import warnings import collections @@ -526,11 +527,14 @@ def detections(self): return - def fit_velocities(self, bootstrap=0, fixed_t0=False, verbose=False, + def fit_velocities(self, weighting='var', bootstrap=0, fixed_t0=False, verbose=False, mask_val=None, mask_lists=False): """ Fit velocities for all stars in the table. """ + if weighting not in ['var', 'std']: + raise ValueError(f"Weighting must either be 'var' or 'std', not {weighting}!") + if ('t' not in self.colnames) and ('list_times' not in self.meta): raise RuntimeError('fit_velocities: Failed to time values.') @@ -591,9 +595,9 @@ def fit_velocities(self, bootstrap=0, fixed_t0=False, verbose=False, # STARS LOOP through the stars and work on them 1 at a time. # This is slow; but robust. - for ss in range(N_stars): + for ss in tqdm(range(N_stars)): self.fit_velocity_for_star(ss, bootstrap=bootstrap, fixed_t0=fixed_t0, - mask_val=mask_val, mask_lists=mask_lists) + mask_val=mask_val, mask_lists=mask_lists, weighting=weighting) if verbose: stop_time = time.time() @@ -602,7 +606,7 @@ def fit_velocities(self, bootstrap=0, fixed_t0=False, verbose=False, return def fit_velocity_for_star(self, ss, bootstrap=False, fixed_t0=False, - mask_val=None, mask_lists=False): + mask_val=None, mask_lists=False, weighting='var'): def poly_model(time, *params): pos = np.polynomial.polynomial.polyval(time, params) return pos @@ -613,6 +617,12 @@ def poly_model(time, *params): if mask_val: x = np.ma.masked_values(x, mask_val) y = np.ma.masked_values(y, mask_val) + # If no mask, convert x.mask to list + if not np.ma.is_masked(x): + x.mask = np.zeros_like(x.data, dtype=bool) + if not np.ma.is_masked(y): + y.mask = np.zeros_like(y.data, dtype=bool) + if mask_lists is not False: # Remove a list @@ -655,6 +665,11 @@ def poly_model(time, *params): if mask_val: xe = np.ma.masked_values(xe, mask_val) ye = np.ma.masked_values(ye, mask_val) + # If no mask, convert xe.mask to list + if not np.ma.is_masked(xe): + xe.mask = np.zeros_like(xe.data, dtype=bool) + if not np.ma.is_masked(ye): + ye.mask = np.zeros_like(ye.data, dtype=bool) if mask_lists is not False: # Remove a list @@ -675,6 +690,8 @@ def poly_model(time, *params): if mask_val: t = np.ma.masked_values(t, mask_val) + if not np.ma.is_masked(t): + t.mask = np.zeros_like(t.data, dtype=bool) if mask_lists is not False: # Remove a list @@ -685,11 +702,13 @@ def poly_model(time, *params): # Throw a warning if mask_lists is not a list if not isinstance(mask_lists, list): raise RuntimeError('mask_lists needs to be a list.') - + + # For inconsistent masks, mask the star if any of the values are masked. + new_mask = np.logical_or.reduce((t.mask, x.mask, y.mask, xe.mask, ye.mask)) # Figure out where we have detections (as indicated by error columns) good = np.where((xe != 0) & (ye != 0) & np.isfinite(xe) & np.isfinite(ye) & - np.isfinite(x) & np.isfinite(y))[0] + np.isfinite(x) & np.isfinite(y) & ~new_mask)[0] N_good = len(good) @@ -721,8 +740,12 @@ def poly_model(time, *params): # Catch the case where all the times are identical if (dt == dt[0]).all(): - wgt_x = (1.0/xe)**2 - wgt_y = (1.0/ye)**2 + if weighting == 'var': + wgt_x = (1.0/xe)**2 + wgt_y = (1.0/ye)**2 + elif weighting == 'std': + wgt_x = 1./np.abs(xe) + wgt_y = 1./np.abs(ye) self['x0'][ss] = np.average(x, weights=wgt_x) self['y0'][ss] = np.average(y, weights=wgt_y) @@ -739,11 +762,18 @@ def poly_model(time, *params): # Catch the case where we have enough measurements to actually # fit a velocity! if N_good > 2: - vx_opt, vx_cov = curve_fit(poly_model, dt.compressed(), x.compressed(), p0=p0x, sigma=xe.compressed(), - absolute_sigma=True) - vy_opt, vy_cov = curve_fit(poly_model, dt.compressed(), y.compressed(), p0=p0y, sigma=ye.compressed(), - absolute_sigma=True) - + if weighting == 'var': + sigma_x = xe + sigma_y = ye + elif weighting == 'std': + sigma_x = np.abs(xe)**0.5 + sigma_y = np.abs(ye)**0.5 + + vx_opt, vx_cov = curve_fit(poly_model, dt, x, p0=p0x, sigma=sigma_x, + absolute_sigma=True) + vy_opt, vy_cov = curve_fit(poly_model, dt, y, p0=p0y, sigma=sigma_y, + absolute_sigma=True) + self['x0'][ss] = vx_opt[0] self['vx'][ss] = vx_opt[1] self['y0'][ss] = vy_opt[0] @@ -760,10 +790,16 @@ def poly_model(time, *params): for bb in range(bootstrap): bdx = np.random.choice(edx, N_good) - - vx_opt_b, vx_cov_b = curve_fit(poly_model, dt[bdx].compressed(), x[bdx].compressed(), p0=vx_opt, sigma=xe[bdx].compressed(), + if weighting == 'var': + sigma_x = xe[bdx] + sigma_y = ye[bdx] + elif weighting == 'std': + sigma_x = xe[bdx]**0.5 + sigma_y = ye[bdx]**0.5 + + vx_opt_b, vx_cov_b = curve_fit(poly_model, dt[bdx], x[bdx], p0=vx_opt, sigma=sigma_x, absolute_sigma=True) - vy_opt_b, vy_cov_b = curve_fit(poly_model, dt[bdx].compressed(), y[bdx].compressed(), p0=vy_opt, sigma=ye[bdx].compressed(), + vy_opt_b, vy_cov_b = curve_fit(poly_model, dt[bdx], y[bdx], p0=vy_opt, sigma=sigma_y, absolute_sigma=True) fit_x0_b[bb] = vx_opt_b[0] @@ -786,9 +822,13 @@ def poly_model(time, *params): self['vye'][ss] = vy_err[1] elif N_good == 2: - # Note nough epochs to fit a velocity. - self['x0'][ss] = np.average(x, weights=1.0/xe**2) - self['y0'][ss] = np.average(y, weights=1.0/ye**2) + # Not enough epochs to fit a velocity. + if weighting == 'var': + self['x0'][ss] = np.average(x, weights=1.0/xe**2) + self['y0'][ss] = np.average(y, weights=1.0/ye**2) + elif weighting == 'std': + self['x0'][ss] = np.average(x, weights=1.0/np.abs(xe)) + self['y0'][ss] = np.average(y, weights=1.0/np.abs(ye)) dx = np.diff(x)[0] dy = np.diff(y)[0]