diff --git a/flystar/__init__.py b/flystar/__init__.py index 826bb09..96db861 100644 --- a/flystar/__init__.py +++ b/flystar/__init__.py @@ -10,3 +10,4 @@ except PackageNotFoundError: pass + diff --git a/flystar/align.py b/flystar/align.py index abdb54c..a19c2dd 100755 --- a/flystar/align.py +++ b/flystar/align.py @@ -19,6 +19,7 @@ def __init__(self, list_of_starlists, ref_index=0, iters=2, dr_tol=[1, 1], dm_tol=[2, 1], outlier_tol=[None, None], trans_args=[{'order': 2}, {'order': 2}], + init_order=1, mag_trans=True, mag_lim=None, weights=None, trans_input=None, trans_class=transforms.PolyTransform, use_vel=False, calc_trans_inverse=False, @@ -169,6 +170,7 @@ def = None. If not None, then this should contain an array or list of transform self.dm_tol = dm_tol self.outlier_tol = outlier_tol self.trans_args = trans_args + self.init_order = init_order self.mag_trans = mag_trans self.mag_lim = mag_lim self.weights = weights @@ -381,6 +383,7 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar trans = trans_initial_guess(ref_list[keepers], star_list_orig_trim, self.trans_args[0], mode=self.init_guess_mode, + order=self.init_order, verbose=self.verbose, mag_trans=self.mag_trans) @@ -417,7 +420,6 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar idx1 = idx1[keepers] idx2 = idx2[keepers] - # Determine weights in the fit. weight = self.get_weights_for_lists(ref_list[idx2], star_list_T[idx1]) @@ -484,7 +486,7 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar if self.verbose > 1: print( ' Match 2: After trans, found ', len(idx_lis), ' matches out of ', len(star_list_T), '. If match count is low, check dr_tol, dm_tol.' ) - + ## Make plot, if desired plots.trans_positions(ref_list, ref_list[idx_ref], star_list_T, star_list_T[idx_lis], fileName='{0}'.format(star_list_T['t'][0])) @@ -1293,6 +1295,7 @@ def __init__(self, ref_list, list_of_starlists, iters=2, dr_tol=[1, 1], dm_tol=[2, 1], outlier_tol=[None, None], trans_args=[{'order': 2}, {'order': 2}], + init_order=1, mag_trans=True, mag_lim=None, ref_mag_lim=None, weights=None, trans_input=None, @@ -1383,6 +1386,10 @@ def = None. If not None, then this should contain an array or list of transform then the transformation argument (i.e. order) will be changed for every iteration in iters. + init_order: int + Polynomial transformation order to use for initial guess transformation. + Order=1 should be used in most cases, but sometimes higher order is needed + calc_trans_inverse: boolean If true, then calculate the inverse transformation (from reference to starlist) in addition to the normal transformation (from starlist to reference). The inverse @@ -1460,6 +1467,7 @@ def = None. If not None, then this should contain an array or list of transform super().__init__(list_of_starlists, ref_index=-1, iters=iters, dr_tol=dr_tol, dm_tol=dm_tol, outlier_tol=outlier_tol, trans_args=trans_args, + init_order=init_order, mag_trans=mag_trans, mag_lim=mag_lim, weights=weights, trans_input=trans_input, trans_class=trans_class, calc_trans_inverse=calc_trans_inverse, use_vel=use_vel, @@ -1820,7 +1828,7 @@ def add_rows_for_new_stars(ref_table, star_list, idx_lis): elif ref_table[col_name].dtype == np.dtype('bool'): new_col_empty = False else: - new_col_empty = None + new_col_empty = np.nan if len(ref_table[col_name].shape) == 1: new_col_shape = len(idx_lis_new) @@ -3494,7 +3502,7 @@ def check_trans_input(list_of_starlists, trans_input, mag_trans): def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle', ignore_contains='star', verbose=True, n_req_match=3, - mag_trans=True): + mag_trans=True, order=1): """ Take two starlists and perform an initial matching and transformation. @@ -3509,7 +3517,7 @@ def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle', # the "ignore_contains" string. idx_r = np.flatnonzero(np.char.find(ref_list['name'], ignore_contains) == -1) idx_s = np.flatnonzero(np.char.find(star_list['name'], ignore_contains) == -1) - + # Match the star names name_matches, ndx_r, ndx_s = np.intersect1d(ref_list['name'][idx_r], star_list['name'][idx_s], @@ -3554,7 +3562,8 @@ def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle', if ('order' in trans_args) and (trans_args['order'] == 0): order = 0 else: - order = 1 + order = order + trans = transforms.PolyTransform.derive_transform(x1m, y1m ,x2m, y2m, order=order, weights=None) # Calculate flux transformation based on matches. If desired, should be applied as diff --git a/flystar/analysis.py b/flystar/analysis.py index 85d7ef3..953461b 100644 --- a/flystar/analysis.py +++ b/flystar/analysis.py @@ -52,6 +52,9 @@ def query_gaia(ra, dec, search_radius=30.0, table_name='gaiadr2'): gaia_job = Gaia.cone_search_async(target_coords, search_radius, table_name = table_name + '.gaia_source') gaia = gaia_job.get_results() + #Change new 'SOURCE_ID' column header back to lowercase 'source_id' so all subsequent functions still work: + gaia['SOURCE_ID'].name = 'source_id' + return gaia diff --git a/flystar/archive_io.py b/flystar/archive_io.py new file mode 100755 index 0000000..88de5cb --- /dev/null +++ b/flystar/archive_io.py @@ -0,0 +1,18 @@ +import pickle + +# Need to add these functions to a utility .py file rather than storing them in general structure. +def open_archive(file_name): + """ + Helper function to open archived files. + """ + with open(file_name, 'rb') as file_archive: + file_dict = pickle.load(file_archive) + return file_dict + +def save_archive(file_name, save_data): + """ + Helper function to archive a file. + """ + with open(file_name, 'wb') as outfile: + pickle.dump(save_data, outfile, protocol=pickle.HIGHEST_PROTOCOL) + return \ No newline at end of file diff --git a/flystar/fit_velocity.py b/flystar/fit_velocity.py new file mode 100755 index 0000000..45d55d0 --- /dev/null +++ b/flystar/fit_velocity.py @@ -0,0 +1,203 @@ +from tqdm import tqdm +import numpy as np +import pandas as pd + +def linear(x, k, b): + return k*x + b + +def linear_fit(x, y, sigma=None, absolute_sigma=True): + """Weighted linear regression (See https://en.wikipedia.org/wiki/Weighted_least_squares#Solution). Recommended for low-dimension, non-degenerate data. Otherwise, please use scipy.curve_fit. + + Parameters + ---------- + x : array-like + x data + y : array-like + y data + sigma : array-like, optional + Weighted by 1/sigma**2. If not provided, weight = 1, by default None + absolute_sigma : bool, optional + If True (default), sigma is used in an absolute sense and the estimated parameter uncertainty reflects these absolute values. If False, only the relative magnitudes of the sigma values matter, by default True + + Returns + ------- + result : dictionary + Dictionary with keys 'slope', 'e_slope', 'intercept', 'e_intercept', and 'chi2' if return_chi2=True. + """ + x = np.array(x) + y = np.array(y) + if sigma is None: + sigma = np.ones_like(x) + else: + sigma = np.array(sigma) + + X = np.vander(x, 2) + W = np.diag(1/sigma**2) + XTWX = X.T @ W @ X + pcov = np.linalg.inv(XTWX) # Covariance Matrix + popt = pcov @ X.T @ W @ y # Linear Solution + perr = np.sqrt(np.diag(pcov)) # Uncertainty of Linear Solution + + residual = y - X @ popt + chi2 = residual.T @ W @ residual + + if not absolute_sigma: + reduced_chi2 = chi2/(len(x) - 2) + perr *= reduced_chi2**0.5 + + result = { + 'slope': popt[0], + 'intercept': popt[1], + 'e_slope': perr[0], + 'e_intercept': perr[1], + 'chi2': chi2 + } + + return result + + +def calc_chi2(x, y, sigma, slope, intercept): + popt = np.array([slope, intercept]) + X = np.vander(x, 2) + W = np.diag(1/sigma**2) + residual = y - X @ popt + return residual.T @ W @ residual + + +def fit_velocity(startable, weighting='var', use_scipy=False, absolute_sigma=True, epoch_cols='all', art_star=False): + """Fit proper motion with weighted linear regression equations (see https://en.wikipedia.org/wiki/Weighted_least_squares#Solution). + Assumes that all data are valid. + + Parameters + ---------- + startable : StarTable + StarTable object + weighting : str, optional + Weighting by variance (1/ye**2) or standard deviation (1/ye), by default 'var' + use_scipy : bool, optional + Use scipy.curve_fit or flystar.fit_velocity.linear_fit, by default False + epoch_cols : str or list of intergers, optional + List of indicies of columns to use. If 'all', use all columns, by default 'all' + art_star : bool, optional + Artificial star catalog or not. If True, use startable['x'][:, epoch_ols, 1] as the location, by default False. + + Returns + ------- + result : pd.DataFrame + Proper motion dataframe with keys vx, vxe, vy, vye, x0, x0e, y0, y0e + + Raises + ------ + ValueError + If weighting is neither 'std' nor 'var' + """ + if weighting not in ['std', 'var']: + raise ValueError(f"Weighting must be either 'std' or 'var', not '{weighting}'.") + if epoch_cols is None: + epoch_cols = np.arange(len(startable.meta['YEARS'])) # use all cols if not specified + + N = len(startable) + vx = np.zeros(N) + vy = np.zeros(N) + vxe = np.zeros(N) + vye = np.zeros(N) + x0 = np.zeros(N) + y0 = np.zeros(N) + x0e = np.zeros(N) + y0e = np.zeros(N) + chi2_vx = np.zeros(N) + chi2_vy = np.zeros(N) + t0 = np.zeros(N) + + time = np.array(startable.meta['YEARS'])[epoch_cols] + + if not art_star: + x_arr = startable['x'][:, epoch_cols] + y_arr = startable['y'][:, epoch_cols] + else: + x_arr = startable['x'][:, epoch_cols, 1] + y_arr = startable['y'][:, epoch_cols, 1] + + xe_arr = startable['xe'][:, epoch_cols] + ye_arr = startable['ye'][:, epoch_cols] + + if weighting=='std': + sigma_x_arr = np.abs(xe_arr)**0.5 + sigma_y_arr = np.abs(ye_arr)**0.5 + elif weighting=='var': + sigma_x_arr = xe_arr + sigma_y_arr = ye_arr + + # For each star + for i in tqdm(range(len(startable))): + x = x_arr[i] + y = y_arr[i] + xe = xe_arr[i] + ye = ye_arr[i] + sigma_x = sigma_x_arr[i] + sigma_y = sigma_y_arr[i] + + t_weight = 1. / np.hypot(xe, ye) + t0[i] = np.average(time, weights=t_weight) + dt = time - t0[i] + + if use_scipy: + p0x = np.array([0., x.mean()]) + p0y = np.array([0., y.mean()]) + + # Use scipy.curve_fit to fit for velocity + vx_opt, vx_cov = curve_fit(linear, dt, x, p0=p0x, sigma=sigma_x, absolute_sigma=absolute_sigma) + vy_opt, vy_cov = curve_fit(linear, dt, y, p0=p0y, sigma=sigma_y, absolute_sigma=absolute_sigma) + + vx[i] = vx_opt[0] + vy[i] = vy_opt[0] + x0[i] = vx_opt[1] + y0[i] = vy_opt[1] + vxe[i], x0e[i] = np.sqrt(vx_cov.diagonal()) + vye[i], y0e[i] = np.sqrt(vy_cov.diagonal()) + chi2_vx[i] = calc_chi2(dt, x, sigma_x, *vx_opt) + chi2_vy[i] = calc_chi2(dt, y, sigma_y, *vy_opt) + + else: + vx_result = linear_fit(dt, x, sigma=sigma_x, absolute_sigma=absolute_sigma) + vy_result = linear_fit(dt, y, sigma=sigma_y, absolute_sigma=absolute_sigma) + + vx[i] = vx_result['slope'] + vxe[i] = vx_result['e_slope'] + x0[i] = vx_result['intercept'] + x0e[i] = vx_result['e_intercept'] + chi2_vx[i] = vx_result['chi2'] + + vy[i] = vy_result['slope'] + vye[i] = vy_result['e_slope'] + y0[i] = vy_result['intercept'] + y0e[i] = vy_result['e_intercept'] + chi2_vy[i] = vy_result['chi2'] + + result = pd.DataFrame({ + 'vx': vx, 'vy': vy, + 'vxe': vxe, 'vye': vye, + 'x0': x0, 'y0': y0, + 'x0e': x0e, 'y0e': y0e, + 'chi2_vx': chi2_vx, + 'chi2_vy': chi2_vy, + 't0': t0 + }) + return result + + +# Test +if __name__=='__main__': + from scipy.optimize import curve_fit + + x = np.array([1,2,3,4]) + y = np.array([1,2,5,6]) + sigma = np.array([.4,.2,.1,.3]) + + for absolute_sigma in [True, False]: + result = linear_fit(x, y, sigma=sigma, absolute_sigma=absolute_sigma) + popt, pcov = curve_fit(linear, x, y, sigma=sigma, absolute_sigma=absolute_sigma) + perr = np.sqrt(np.diag(pcov)) + print(f'Absolute Sigma = {absolute_sigma}') + print(f"linear_fit: slope = {result['slope']:.3f} ± {result['e_slope']:.3f}, intercept = {result['intercept']:.3f} ± {result['e_intercept']:.3f}, chi2={result['chi2']:.3f}") + print(f'curve_fit: slope = {popt[0]:.3f} ± {perr[0]:.3f}, intercept = {popt[1]:.3f} ± {perr[1]:.3f}, chi2={calc_chi2(x, y, sigma, *popt):.3f}\n') \ No newline at end of file diff --git a/flystar/match.py b/flystar/match.py index 67a04f3..d40ccdb 100644 --- a/flystar/match.py +++ b/flystar/match.py @@ -531,7 +531,7 @@ def generic_match(sl1, sl2, init_mode='triangle', raise TypeError("The second catalog has to be a StarList") # Find the initial transformation - if init_mode is 'triangle': # Blind triangles method + if init_mode == 'triangle': # Blind triangles method # Prepare the reduced starlists for matching sl1_cut = copy.deepcopy(sl1) @@ -548,13 +548,13 @@ def generic_match(sl1, sl2, init_mode='triangle', transf = align.initial_align(sl1_cut, sl2_cut, briteN=n_bright, transformModel=model, order=order_dr[0]) #order_dr[i_loop][0] ? - elif init_mode is 'match_name': # Name match + elif init_mode == 'match_name': # Name match sl1_idx_init, sl2_idx_init, _ = starlists.restrict_by_name(sl1, sl2) transf = model(sl2['x'][sl2_idx_init], sl2['y'][sl2_idx_init], sl1['x'][sl1_idx_init], sl1['y'][sl1_idx_init], order=int(order_dr[0][0])) - elif init_mode is 'load': # Load a transformation file + elif init_mode == 'load': # Load a transformation file transf = transforms.Transform2D.from_file(kwargs['transf_file']) else: # None of the above diff --git a/flystar/plots.py b/flystar/plots.py index 7abde6e..c675170 100755 --- a/flystar/plots.py +++ b/flystar/plots.py @@ -12,6 +12,7 @@ import pdb import math import astropy +from astropy.table import Table from astropy.io import ascii #################################################### @@ -75,6 +76,7 @@ def trans_positions(ref, ref_mat, starlist, starlist_mat, xlim=None, ylim=None, py.savefig(root + 'Transformed_positions_{0}'.format(fileName) + '.png') else: py.savefig(root + 'Transformed_positions.png') + py.close() return @@ -2156,13 +2158,12 @@ def plot_chi2_dist(tab, Ndetect, xlim=40, n_bins=50): chi2_y_list = [] fnd_list = [] # Number of non-NaN error measurements - for ii in range(len(tab['xe'])): + for ii in range(len(tab)): # Ignore the NaNs fnd = np.argwhere(~np.isnan(tab['xe'][ii,:])) # fnd = np.where(tab['xe'][ii, :] > 0)[0] fnd_list.append(len(fnd)) - time = tab['t'][ii, fnd] x = tab['x'][ii, fnd] y = tab['y'][ii, fnd] xerr = tab['xe'][ii, fnd] @@ -3310,7 +3311,7 @@ def plot_sky(stars_tab, label=label, picker=4) #for legend - if label is not '_nolegend_': + if label != '_nolegend_': line.set_label(str(label)) epochs_legend.append(line) diff --git a/flystar/startables.py b/flystar/startables.py index df1ea05..90e50d7 100644 --- a/flystar/startables.py +++ b/flystar/startables.py @@ -1,6 +1,8 @@ -from astropy.table import Table, Column +from astropy.table import Table, Column, hstack from astropy.stats import sigma_clipping from scipy.optimize import curve_fit +from flystar.fit_velocity import linear_fit, calc_chi2, linear, fit_velocity +from tqdm import tqdm import numpy as np import warnings import collections @@ -534,22 +536,52 @@ def detections(self): return - def fit_velocities(self, bootstrap=0, fixed_t0=False, verbose=False, - mask_val=None, mask_lists=False): - """ - Fit velocities for all stars in the table. + def fit_velocities(self, weighting='var', use_scipy=True, absolute_sigma=True, bootstrap=0, fixed_t0=False, verbose=False, + mask_val=None, mask_lists=False, show_progress=True): + """Fit velocities for all stars in the table and add to the columns 'vx', 'vxe', 'vy', 'vye', 'x0', 'x0e', 'y0', 'y0e'. + + Parameters + ---------- + weighting : str, optional + Weight by variance 'var' or standard deviation 'std', by default 'var' + use_scipy : bool, optional + Use scipy.curve_fit (recommended for large number of epochs, but may return inf or nan) or analytic fitting from flystar.fit_velocity.linear_fit (recommended for a few epochs), by default True + absolute_sigma : bool, optional + Absolute sigma or not. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html for details, by default True + bootstrap : int, optional + Calculate uncertain using bootstraping or not, by default 0 + fixed_t0 : bool or array-like, optional + Fix the t0 in dt = time - t0 if user provides an array with the same length of the table, or automatically calculate t0 = np.average(time, weights=1/np.hypot(xe, ye)) if False, by default False + verbose : bool, optional + Output verbose information or not, by default False + mask_val : float, optional + Value that needs to be masked in the data, e.g. -100000, by default None + mask_lists : list, optional + Columns that needs to be masked, by default False + show_progress : bool, optional + Show progress bar or not, by default True + + Raises + ------ + ValueError + If weighting is neither 'var' or 'std' + KeyError + If there's not time information in the table """ + if weighting not in ['var', 'std']: + raise ValueError(f"fit_velocities: Weighting must either be 'var' or 'std', not {weighting}!") + if ('t' not in self.colnames) and ('list_times' not in self.meta): - raise RuntimeError('fit_velocities: Failed to time values.') + raise KeyError("fit_velocities: Failed to time values. No 't' column in table, no 'list_times' in meta.") - N_stars, N_epochs = self['x'].shape + N_stars = len(self) if verbose: start_time = time.time() msg = 'Starting startable.fit_velocities for {0:d} stars with n={1:d} bootstrap' print(msg.format(N_stars, bootstrap)) - col_list_float = ['x0','vx','y0','vy','x0e','vxe','y0e','vye','t0'] + col_list_float = ['x0','vx','y0','vy','x0e','vxe','y0e','vye','chi2_vx','chi2_vy','t0'] col_list_int = ['n_vfit'] col_list = col_list_float+col_list_int # Clean/remove up old arrays. @@ -586,17 +618,21 @@ def fit_velocities(self, bootstrap=0, fixed_t0=False, verbose=False, # STARS LOOP through the stars and work on them 1 at a time. # This is slow; but robust. - for ss in range(N_stars): - self.fit_velocity_for_star(ss, bootstrap=bootstrap, fixed_t0=fixed_t0, - mask_val=mask_val, mask_lists=mask_lists) - + if show_progress: + for ss in tqdm(range(N_stars)): + self.fit_velocity_for_star(ss, weighting=weighting, use_scipy=use_scipy, absolute_sigma=absolute_sigma, bootstrap=bootstrap, fixed_t0=fixed_t0, + mask_val=mask_val, mask_lists=mask_lists) + else: + for ss in range(N_stars): + self.fit_velocity_for_star(ss, weighting=weighting, use_scipy=use_scipy, absolute_sigma=absolute_sigma, bootstrap=bootstrap, fixed_t0=fixed_t0, + mask_val=mask_val, mask_lists=mask_lists, ) if verbose: stop_time = time.time() print('startable.fit_velocities runtime = {0:.0f} s for {1:d} stars'.format(stop_time - start_time, N_stars)) return - def fit_velocity_for_star(self, ss, bootstrap=False, fixed_t0=False, + def fit_velocity_for_star(self, ss, weighting='var', use_scipy=True, absolute_sigma=True, bootstrap=False, fixed_t0=False, mask_val=None, mask_lists=False): # Make a mask of invalid (NaN) values and a user-specified invalid value. @@ -605,6 +641,12 @@ def fit_velocity_for_star(self, ss, bootstrap=False, fixed_t0=False, if mask_val: x = np.ma.masked_values(x, mask_val) y = np.ma.masked_values(y, mask_val) + # If no mask, convert x.mask to list + if not np.ma.is_masked(x): + x.mask = np.zeros_like(x.data, dtype=bool) + if not np.ma.is_masked(y): + y.mask = np.zeros_like(y.data, dtype=bool) + if mask_lists is not False: # Remove a list @@ -647,6 +689,11 @@ def fit_velocity_for_star(self, ss, bootstrap=False, fixed_t0=False, if mask_val: xe = np.ma.masked_values(xe, mask_val) ye = np.ma.masked_values(ye, mask_val) + # If no mask, convert xe.mask to list + if not np.ma.is_masked(xe): + xe.mask = np.zeros_like(xe.data, dtype=bool) + if not np.ma.is_masked(ye): + ye.mask = np.zeros_like(ye.data, dtype=bool) if mask_lists is not False: # Remove a list @@ -667,6 +714,8 @@ def fit_velocity_for_star(self, ss, bootstrap=False, fixed_t0=False, if mask_val: t = np.ma.masked_values(t, mask_val) + if not np.ma.is_masked(t): + t.mask = np.zeros_like(t.data, dtype=bool) if mask_lists is not False: # Remove a list @@ -677,11 +726,13 @@ def fit_velocity_for_star(self, ss, bootstrap=False, fixed_t0=False, # Throw a warning if mask_lists is not a list if not isinstance(mask_lists, list): raise RuntimeError('mask_lists needs to be a list.') - + + # For inconsistent masks, mask the star if any of the values are masked. + new_mask = np.logical_or.reduce((t.mask, x.mask, y.mask, xe.mask, ye.mask)) # Figure out where we have detections (as indicated by error columns) good = np.where((xe != 0) & (ye != 0) & np.isfinite(xe) & np.isfinite(ye) & - np.isfinite(x) & np.isfinite(y))[0] + np.isfinite(x) & np.isfinite(y) & ~new_mask)[0] N_good = len(good) @@ -731,6 +782,7 @@ def fit_velocity_for_star(self, ss, bootstrap=False, fixed_t0=False, self['vy'][ss] = 0.0 self['vxe'][ss] = 0.0 self['vye'][ss] = 0.0 + return elif motion_model_use=='linear': mod = motion_model.Linear(x[0], (x[-1]-x[0])/(t[-1]-t[0]), @@ -745,12 +797,92 @@ def fit_velocity_for_star(self, ss, bootstrap=False, fixed_t0=False, self['vxe'][ss] = param_errs[1] self['y0e'][ss] = param_errs[2] self['vye'][ss] = param_errs[3] - return + + + def fit_velocities_all_detected(self, weighting='var', use_scipy=False, absolute_sigma=False, epoch_cols='all', mask_val=None, art_star=False, return_result=False): + """Fit velocities for stars detected in all epochs specified by epoch_cols. + Criterion: xe/ye error > 0 and finite, x/y not masked. + Parameters + ---------- + weighting : str, optional + Variance weighting('var') or standard deviation weighting ('std'), by default 'var' + use_scipy : bool, optional + Use scipy.curve_fit or flystar.fit_velocity.fit_velocity, by default False + absolute_sigma : bool, optional + Absolute sigma or rescaled sigma, by default False + epoch_cols : str or list of intergers, optional + List of epoch column indices used for fitting velocity, by default 'all' + mask_val : float, optional + Values in x, y to be masked + art_star : bool, optional + Artificial star or observation star catalog. If artificial star, use 'det' column to select stars detected in all epochs, by default False + return_result : bool, optional + Return the velocity results or not, by default False + Returns + ------- + vel_result : astropy Table + Astropy Table with velocity results + """ - + N_stars = len(self) + + if epoch_cols == 'all': + epoch_cols = np.arange(np.shape(self['x'])[1]) + + # Artificial Star + if art_star: + detected_in_all_epochs = np.all(self['det'][:, epoch_cols], axis=1) + + # Observation Star + else: + valid_xe = np.all(self['xe'][:, epoch_cols]!=0, axis=1) & np.all(np.isfinite(self['xe'][:, epoch_cols]), axis=1) + valid_ye = np.all(self['ye'][:, epoch_cols]!=0, axis=1) & np.all(np.isfinite(self['ye'][:, epoch_cols]), axis=1) + + if mask_val: + x = np.ma.masked_values(self['x'][:, epoch_cols], mask_val) + y = np.ma.masked_values(self['y'][:, epoch_cols], mask_val) + + # If no mask, convert x.mask to list + if not np.ma.is_masked(x): + x.mask = np.zeros_like(self['x'][:, epoch_cols].data, dtype=bool) + if not np.ma.is_masked(y): + y.mask = np.zeros_like(self['y'][:, epoch_cols].data, dtype=bool) + + valid_x = ~np.any(x.mask, axis=1) + valid_y = ~np.any(y.mask, axis=1) + detected_in_all_epochs = np.logical_and.reduce(( + valid_x, valid_y, valid_xe, valid_ye + )) + else: + detected_in_all_epochs = np.logical_and(valid_xe, valid_ye) + + + # Fit velocities + vel_result = fit_velocity(self[detected_in_all_epochs], weighting=weighting, use_scipy=use_scipy, absolute_sigma=absolute_sigma, epoch_cols=epoch_cols, art_star=art_star) + vel_result = Table.from_pandas(vel_result) + # Add n_vfit + n_vfit = len(epoch_cols) + vel_result['n_vfit'] = n_vfit + # Clean/remove up old arrays. + columns = [*vel_result.keys(), 'n_vfit'] + for column in columns: + if column in self.colnames: self.remove_column(column) + + # Update self + for column in columns: + column_array = np.ma.zeros(N_stars) + column_array[detected_in_all_epochs] = vel_result[column] + column_array[~detected_in_all_epochs] = np.nan + column_array.mask = ~detected_in_all_epochs + self[column] = column_array + + if return_result: + return vel_result + else: + return \ No newline at end of file