diff --git a/directlfq/configs/intable_config.yaml b/directlfq/configs/intable_config.yaml index 5a2e8a99..e97b0130 100644 --- a/directlfq/configs/intable_config.yaml +++ b/directlfq/configs/intable_config.yaml @@ -1,31 +1,6 @@ --- #this file determines the parameters used to convert long format tables as e.g. produced by Spectronaut or DIA-NN into a wide table format -alphadia_precursor_protein: - format: longtable - sample_ID: run - quant_ID: - precursor: weighted_ms1_intensity - protein_cols: - - pg_master - ion_hierarchy: - precursor: - order: [SEQ, MOD, CHARGE] - mapping: - SEQ: - - sequence - MOD: - - mods - CHARGE: - - charge - use_iontree: True - ml_level: CHARGE - filters: - protein_qval: - param: pg_qval - comparator: "<=" - value: 0.01 - alphapept_peptides: format: longtable diff --git a/directlfq/dashboard_parts.py b/directlfq/dashboard_parts.py index d20f4476..8357268e 100644 --- a/directlfq/dashboard_parts.py +++ b/directlfq/dashboard_parts.py @@ -337,8 +337,8 @@ def run_pipeline(self, *args): min_nonan = self.num_nonan_vals.value file_of_proteins_for_normalization = None if self.protein_subset_for_normalization_file.value_input == '' else self.protein_subset_for_normalization_file.value_input num_cores = None if self.num_cores_vals.value == -1 else self.num_cores_vals.value - yaml_filt_dict_path = None if self.yaml_filt_dict_path.value_input == '' else self.yaml_filt_dict_path.value_input - if isinstance(additional_headers, str): # The user will enter a string with semicolon-separated values + yaml_filt_dict_path = None if self.yaml_filt_dict_path.value == '' else self.yaml_filt_dict_path.value + if additional_headers is not None: #the user will enter a string with semicolon separated values additional_headers = additional_headers.split(';') lfq_manager.run_lfq( diff --git a/directlfq/utils.py b/directlfq/utils.py index 3590f978..99ed3641 100644 --- a/directlfq/utils.py +++ b/directlfq/utils.py @@ -216,7 +216,7 @@ def add_columns_to_lfq_results_table(lfq_results_df, input_file, columns_to_add) all_columns = filter_columns_to_existing_columns(all_columns, input_file) lfq_results_df = lfq_results_df[[x is not None for x in lfq_results_df[config.PROTEIN_ID]]] - if len(all_columns) == 1: #if there are no columns to add, return the original dataframe + if (len(columns_to_add) == 0) and (len(standard_columns_for_input_type)==0) : return lfq_results_df input_df = pd.read_csv(input_file, sep="\t", usecols=all_columns).drop_duplicates(subset=protein_column_input_table)