diff --git a/.ci/flake8_lint_include_list.txt b/.ci/flake8_lint_include_list.txt index 0ddc6d8a6c23..b9c3491444a9 100644 --- a/.ci/flake8_lint_include_list.txt +++ b/.ci/flake8_lint_include_list.txt @@ -264,35 +264,7 @@ lib/galaxy/sample_tracking/__init__.py lib/galaxy/sample_tracking/sample.py lib/galaxy/security/validate_user_input.py lib/galaxy/tags/ -lib/galaxy/tools/actions/ -lib/galaxy/tools/cwl/ -lib/galaxy/tools/data_manager/__init__.py -lib/galaxy/tools/deps/ -lib/galaxy/tools/exception_handling.py -lib/galaxy/tools/execute.py -lib/galaxy/tools/filters/ -lib/galaxy/tools/imp_exp/export_history.py -lib/galaxy/tools/imp_exp/__init__.py -lib/galaxy/tools/linters/ -lib/galaxy/tools/lint.py -lib/galaxy/tools/lint_util.py -lib/galaxy/tools/loader_directory.py -lib/galaxy/tools/loader.py -lib/galaxy/tools/parameters/dataset_matcher.py -lib/galaxy/tools/parameters/history_query.py -lib/galaxy/tools/parameters/__init__.py -lib/galaxy/tools/parameters/input_translation.py -lib/galaxy/tools/parameters/sanitize.py -lib/galaxy/tools/parameters/validation.py -lib/galaxy/tools/parameters/wrapped_json.py -lib/galaxy/tools/parameters/wrapped.py -lib/galaxy/tools/parser/ -lib/galaxy/tools/special_tools.py -lib/galaxy/tools/test.py -lib/galaxy/tools/toolbox/ -lib/galaxy/tools/util/galaxyops/ -lib/galaxy/tools/util/__init__.py -lib/galaxy/tools/verify/ +lib/galaxy/tools/ lib/galaxy/util/ lib/galaxy_utils/__init__.py lib/galaxy/util/sleeper.py diff --git a/.ci/py3_sources.txt b/.ci/py3_sources.txt index 83a46debe7a0..25e55544477f 100644 --- a/.ci/py3_sources.txt +++ b/.ci/py3_sources.txt @@ -40,26 +40,7 @@ lib/galaxy/quota/ lib/galaxy/sample_tracking/ lib/galaxy/security/ lib/galaxy/tags/ -lib/galaxy/tools/actions/ -lib/galaxy/tools/cwl/ -lib/galaxy/tools/deps/ -lib/galaxy/tools/exception_handling.py -lib/galaxy/tools/execute.py -lib/galaxy/tools/lint.py -lib/galaxy/tools/lint_util.py -lib/galaxy/tools/linters/ -lib/galaxy/tools/loader.py -lib/galaxy/tools/loader_directory.py -lib/galaxy/tools/parameters/dataset_matcher.py -lib/galaxy/tools/parameters/__init__.py -lib/galaxy/tools/parameters/input_translation.py -lib/galaxy/tools/parameters/sanitize.py -lib/galaxy/tools/parameters/validation.py -lib/galaxy/tools/parameters/wrapped_json.py -lib/galaxy/tools/parameters/wrapped.py -lib/galaxy/tools/parser/ -lib/galaxy/tools/test.py -lib/galaxy/tools/toolbox/ +lib/galaxy/tools/ lib/galaxy/tours/ lib/galaxy/util/ lib/galaxy/visualization/ diff --git a/lib/galaxy/tools/__init__.py b/lib/galaxy/tools/__init__.py index 545d16104a28..72c05c470db3 100755 --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -1,7 +1,6 @@ """ Classes encapsulating galaxy tools and tool configuration. """ - import glob import json import logging @@ -10,64 +9,88 @@ import tarfile import tempfile import threading -import urllib -from datetime import datetime - from cgi import FieldStorage +from datetime import datetime from xml.etree import ElementTree + from mako.template import Template from paste import httpexceptions from six import string_types +from six.moves.urllib.parse import unquote_plus -from galaxy.version import VERSION_MAJOR -from galaxy import model -from galaxy.managers import histories +import galaxy.jobs +import tool_shed.util.repository_util as repository_util +from galaxy import ( + exceptions, + model +) from galaxy.datatypes.metadata import JobExternalOutputMetadataWrapper -from galaxy import exceptions +from galaxy.managers import histories from galaxy.queue_worker import ( reload_toolbox, send_control_task ) from galaxy.tools.actions import DefaultToolAction -from galaxy.tools.actions.upload import UploadToolAction -from galaxy.tools.actions.data_source import DataSourceToolAction from galaxy.tools.actions.data_manager import DataManagerToolAction +from galaxy.tools.actions.data_source import DataSourceToolAction from galaxy.tools.actions.model_operations import ModelOperationToolAction -from galaxy.tools.deps import views -from galaxy.tools.deps import CachedDependencyManager -from galaxy.tools.parameters import params_to_incoming, check_param, params_from_strings, params_to_strings, visit_input_values +from galaxy.tools.actions.upload import UploadToolAction +from galaxy.tools.deps import ( + CachedDependencyManager, + views +) +from galaxy.tools.parameters import ( + check_param, + params_from_strings, + params_to_incoming, + params_to_strings, + visit_input_values +) from galaxy.tools.parameters import output_collect -from galaxy.tools.parameters.basic import (BaseURLToolParameter, - DataToolParameter, DataCollectionToolParameter, HiddenToolParameter, - SelectToolParameter, ToolParameter) +from galaxy.tools.parameters.basic import ( + BaseURLToolParameter, + DataCollectionToolParameter, + DataToolParameter, + HiddenToolParameter, + SelectToolParameter, + ToolParameter +) from galaxy.tools.parameters.grouping import Conditional, ConditionalWhen, Repeat, Section, UploadDataset from galaxy.tools.parameters.input_translation import ToolInputTranslator -from galaxy.tools.test import parse_tests -from galaxy.tools.parser import get_tool_source +from galaxy.tools.parameters.meta import expand_meta_parameters +from galaxy.tools.parser import ( + get_tool_source, + ToolOutputCollectionPart +) from galaxy.tools.parser.xml import XmlPageSource -from galaxy.tools.parser import ToolOutputCollectionPart +from galaxy.tools.test import parse_tests from galaxy.tools.toolbox import BaseGalaxyToolBox -from galaxy.util import rst_to_html, string_as_bool -from galaxy.util import ExecutionTimer -from galaxy.util import listify -from galaxy.util import unicodify -from galaxy.tools.parameters.meta import expand_meta_parameters +from galaxy.util import ( + ExecutionTimer, + listify, + rst_to_html, + string_as_bool, + unicodify +) from galaxy.util.bunch import Bunch +from galaxy.util.dictifiable import Dictifiable from galaxy.util.expressions import ExpressionContext from galaxy.util.json import json_fix from galaxy.util.odict import odict from galaxy.util.template import fill_template +from galaxy.version import VERSION_MAJOR from galaxy.web import url_for from galaxy.web.form_builder import SelectField -from galaxy.util.dictifiable import Dictifiable from galaxy.work.context import WorkRequestContext from tool_shed.util import common_util -import tool_shed.util.repository_util as repository_util from tool_shed.util import shed_util_common as suc -from .loader import template_macro_params, raw_tool_xml_tree, imported_macro_paths from .execute import execute as execute_job -import galaxy.jobs +from .loader import ( + imported_macro_paths, + raw_tool_xml_tree, + template_macro_params +) log = logging.getLogger( __name__ ) @@ -475,7 +498,7 @@ def __get_job_tool_configuration(self, job_params=None): if job_tool_config.params: # There are job params and this config has params defined for param, value in job_params.items(): - if param not in job_tool_config.params or job_tool_config.params[param] != job_params[param]: + if param not in job_tool_config.params or job_tool_config.params[param] != value: break else: # All params match, use this config @@ -683,12 +706,13 @@ def __parse_legacy_features(self, tool_source): self.hook_map[key] = value file_name = code_elem.get("file") code_path = os.path.join( self.tool_dir, file_name ) - execfile( code_path, self.code_namespace ) + with open(code_path) as f: + exec(compile(f.read(), code_path, 'exec'), self.code_namespace) # User interface hints uihints_elem = root.find( "uihints" ) if uihints_elem is not None: - for key, value in uihints_elem.attrib.iteritems(): + for key, value in uihints_elem.attrib.items(): self.uihints[ key ] = value def __parse_tests(self, tool_source): @@ -778,12 +802,12 @@ def parse_inputs( self, tool_source ): # nginx_upload_path. This logic is handled in the tool_form.mako # template. if self.nginx_upload and self.app.config.nginx_upload_path: - if '?' in urllib.unquote_plus( self.action ): + if '?' in unquote_plus( self.action ): raise Exception( 'URL parameters in a non-default tool action can not be used ' 'in conjunction with nginx upload. Please convert them to ' 'hidden POST parameters' ) self.action = (self.app.config.nginx_upload_path + '?nginx_redir=', - urllib.unquote_plus(self.action)) + unquote_plus(self.action)) self.target = input_elem.get( "target", self.target ) self.method = input_elem.get( "method", self.method ) # Parse the actual parameters @@ -909,7 +933,7 @@ def parse_input_elem( self, page_source, enctypes, context=None ): group.test_param.refresh_on_change = True for attr in value_from[1].split( '.' ): group.value_from = getattr( group.value_from, attr ) - for case_value, case_inputs in group.value_from( context, group, self ).iteritems(): + for case_value, case_inputs in group.value_from( context, group, self ).items(): case = ConditionalWhen() case.value = case_value if case_inputs: @@ -1136,7 +1160,7 @@ def fill_in_new_state( self, trans, inputs, state, context=None ): in the dictionary `inputs`. Grouping elements are filled in recursively. """ context = ExpressionContext( state, context ) - for input in inputs.itervalues(): + for input in inputs.values(): state[ input.name ] = input.get_initial_value( trans, context ) def get_param( self, key ): @@ -1226,8 +1250,8 @@ def handle_input( self, trans, incoming, history=None ): log.debug( 'Validated and populated state for tool request %s' % validation_timer ) # If there were errors, we stay on the same page and display them if any( all_errors ): - err_data = { key: value for d in all_errors for ( key, value ) in d.iteritems() } - raise exceptions.MessageException( ', '.join( [ msg for msg in err_data.itervalues() ] ), err_data=err_data ) + err_data = { key: value for d in all_errors for ( key, value ) in d.items() } + raise exceptions.MessageException( ', '.join( msg for msg in err_data.values() ), err_data=err_data ) else: execution_tracker = execute_job( trans, self, all_params, history=request_context.history, rerun_remap_job_id=rerun_remap_job_id, collection_info=collection_info ) if execution_tracker.successful_jobs: @@ -1257,7 +1281,7 @@ def handle_single_execution( self, trans, rerun_remap_job_id, params, history, m message = 'Error executing tool: %s' % str(e) return False, message if isinstance( out_data, odict ): - return job, out_data.items() + return job, list(out_data.items()) else: if isinstance( out_data, string_types ): message = out_data @@ -1308,7 +1332,7 @@ def get_static_param_values( self, trans ): does require input. """ args = dict() - for key, param in self.inputs.iteritems(): + for key, param in self.inputs.items(): # BaseURLToolParameter is now a subclass of HiddenToolParameter, so # we must check if param is a BaseURLToolParameter first if isinstance( param, BaseURLToolParameter ): @@ -1762,7 +1786,7 @@ def to_json( self, trans, kwd={}, job=None, workflow_building_mode=False ): # populates model from state def populate_model( inputs, state_inputs, group_inputs, other_values=None ): other_values = ExpressionContext( state_inputs, other_values ) - for input_index, input in enumerate( inputs.itervalues() ): + for input_index, input in enumerate( inputs.values() ): tool_dict = None group_state = state_inputs.get( input.name, {} ) if input.type == 'repeat': @@ -1855,7 +1879,7 @@ def populate_model( inputs, state_inputs, group_inputs, other_values=None ): # populates state from incoming parameters def populate_state( self, request_context, inputs, incoming, state, errors={}, prefix='', context=None ): context = ExpressionContext( state, context ) - for input in inputs.itervalues(): + for input in inputs.values(): state[ input.name ] = input.get_initial_value( request_context, context ) key = prefix + input.name group_state = state[ input.name ] @@ -1865,7 +1889,7 @@ def populate_state( self, request_context, inputs, incoming, state, errors={}, p del group_state[:] while True: rep_prefix = '%s_%d' % ( key, rep_index ) - if not any( [ incoming_key.startswith( rep_prefix ) for incoming_key in incoming.keys() ] ) and rep_index >= input.min: + if not any( incoming_key.startswith( rep_prefix ) for incoming_key in incoming.keys() ) and rep_index >= input.min: break if rep_index < input.max: new_state = { '__index__' : rep_index } @@ -1900,7 +1924,7 @@ def populate_state( self, request_context, inputs, incoming, state, errors={}, p del group_state[ -1 ] while len( writable_files ) > len( group_state ): new_state = { '__index__' : len( group_state ) } - for upload_item in input.inputs.itervalues(): + for upload_item in input.inputs.values(): new_state[ upload_item.name ] = upload_item.get_initial_value( request_context, context ) group_state.append( new_state ) for i, rep_state in enumerate( group_state ): @@ -2070,7 +2094,7 @@ def _prepare_json_list( self, param_list ): def _prepare_json_param_dict( self, param_dict ): rval = {} - for key, value in param_dict.iteritems(): + for key, value in param_dict.items(): if isinstance( value, dict ): rval[ key ] = self._prepare_json_param_dict( value ) elif isinstance( value, list ): @@ -2087,7 +2111,7 @@ def exec_before_job( self, app, inp_data, out_data, param_dict=None ): json_params[ 'output_data' ] = [] json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=galaxy.jobs.TOOL_PROVIDED_JOB_METADATA_FILE ) json_filename = None - for i, ( out_name, data ) in enumerate( out_data.iteritems() ): + for i, ( out_name, data ) in enumerate( out_data.items() ): # use wrapped dataset to access certain values wrapped_data = param_dict.get( out_name ) # allow multiple files to be created @@ -2139,7 +2163,7 @@ def exec_before_job( self, app, inp_data, out_data, param_dict=None ): json_params[ 'output_data' ] = [] json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=galaxy.jobs.TOOL_PROVIDED_JOB_METADATA_FILE ) json_filename = None - for i, ( out_name, data ) in enumerate( out_data.iteritems() ): + for i, ( out_name, data ) in enumerate( out_data.items() ): # use wrapped dataset to access certain values wrapped_data = param_dict.get( out_name ) # allow multiple files to be created @@ -2192,7 +2216,7 @@ class SetMetadataTool( Tool ): requires_setting_metadata = False def exec_after_process( self, app, inp_data, out_data, param_dict, job=None ): - for name, dataset in inp_data.iteritems(): + for name, dataset in inp_data.items(): external_metadata = JobExternalOutputMetadataWrapper( job ) if external_metadata.external_metadata_set_successfully( dataset, app.model.context ): dataset.metadata.from_JSON_dict( external_metadata.get_output_filenames_by_dataset( dataset, app.model.context ).filename_out ) @@ -2250,7 +2274,7 @@ def exec_after_process( self, app, inp_data, out_data, param_dict, job=None, **k if job and job.state == job.states.ERROR: return # Job state may now be 'running' instead of previous 'error', but datasets are still set to e.g. error - for dataset in out_data.itervalues(): + for dataset in out_data.values(): if dataset.state != dataset.states.OK: return data_manager_id = job.data_manager_association.data_manager_id @@ -2385,7 +2409,7 @@ def produce_outputs( self, trans, out_data, output_collections, incoming, histor new_elements["reverse"] = reverse output_collections.create_collection( - self.outputs.values()[0], "output", elements=new_elements + next(iter(self.outputs.values())), "output", elements=new_elements ) @@ -2464,7 +2488,7 @@ def produce_outputs( self, trans, out_data, output_collections, incoming, histor new_elements[key] = value.copy() output_collections.create_collection( - self.outputs.values()[0], "output", elements=new_elements + next(iter(self.outputs.values())), "output", elements=new_elements ) @@ -2499,7 +2523,7 @@ def produce_outputs( self, trans, out_data, output_collections, incoming, histor new_elements[element_identifier] = element.copy() output_collections.create_collection( - self.outputs.values()[0], "output", elements=new_elements + next(iter(self.outputs.values())), "output", elements=new_elements ) @@ -2523,7 +2547,7 @@ def add_elements(collection, prefix=""): add_elements(hdca.collection) output_collections.create_collection( - self.outputs.values()[0], "output", elements=new_elements + next(iter(self.outputs.values())), "output", elements=new_elements ) diff --git a/lib/galaxy/tools/data/__init__.py b/lib/galaxy/tools/data/__init__.py index f13d4c9b1b4b..8de016426b37 100644 --- a/lib/galaxy/tools/data/__init__.py +++ b/lib/galaxy/tools/data/__init__.py @@ -6,21 +6,20 @@ to modify the tool configurations. """ +import hashlib import logging import os import os.path import re import string -import hashlib - from glob import glob from tempfile import NamedTemporaryFile -from urllib2 import urlopen -from galaxy import util -from galaxy.util.odict import odict +from six.moves.urllib.request import urlopen +from galaxy import util from galaxy.util.dictifiable import Dictifiable +from galaxy.util.odict import odict log = logging.getLogger( __name__ ) @@ -158,7 +157,7 @@ def to_xml_file( self, shed_tool_data_table_config, new_elems=None, remove_elems for elem in out_elems: out.write( util.xml_to_string( elem, pretty=True ) ) out.write( '\n' ) - os.chmod( full_path, 0644 ) + os.chmod( full_path, 0o644 ) def reload_tables( self, table_names=None ): """ @@ -166,7 +165,7 @@ def reload_tables( self, table_names=None ): """ tables = self.get_tables() if not table_names: - table_names = tables.keys() + table_names = list(tables.keys()) elif not isinstance( table_names, list ): table_names = [ table_names ] for table_name in table_names: @@ -349,7 +348,7 @@ def configure_and_load( self, config_element, tool_data_path, from_shed_config=F self.filenames[ filename ] = dict( found=found, filename=filename, from_shed_config=from_shed_config, tool_data_path=tool_data_path, config_element=config_element, tool_shed_repository=repo_info, errors=errors ) else: - log.debug( "Filename '%s' already exists in filenames (%s), not adding", filename, self.filenames.keys() ) + log.debug( "Filename '%s' already exists in filenames (%s), not adding", filename, list(self.filenames.keys()) ) # Remove URL tmp file if tmp_file is not None: tmp_file.close() @@ -357,7 +356,7 @@ def configure_and_load( self, config_element, tool_data_path, from_shed_config=F def merge_tool_data_table( self, other_table, allow_duplicates=True, persist=False, persist_on_error=False, entry_source=None, **kwd ): assert self.columns == other_table.columns, "Merging tabular data tables with non matching columns is not allowed: %s:%s != %s:%s" % ( self.name, self.columns, other_table.name, other_table.columns ) # merge filename info - for filename, info in other_table.filenames.iteritems(): + for filename, info in other_table.filenames.items(): if filename not in self.filenames: self.filenames[ filename ] = info # save info about table @@ -473,7 +472,7 @@ def get_column_name_list( self ): rval = [] for i in range( self.largest_index + 1 ): found_column = False - for name, index in self.columns.iteritems(): + for name, index in self.columns.items(): if index == i: if not found_column: rval.append( name ) @@ -530,7 +529,7 @@ def get_filename_for_source( self, source, default=None ): else: source_repo_info = None filename = default - for name, value in self.filenames.iteritems(): + for name, value in self.filenames.items(): repo_info = value.get( 'tool_shed_repository', None ) if ( not source_repo_info and not repo_info ) or ( source_repo_info and repo_info and source_repo_info == repo_info ): filename = name @@ -637,7 +636,7 @@ def _replace_field_separators( self, fields, separator=None, replace=None, comme replace = "_" else: replace = " " - return map( lambda x: x.replace( separator, replace ), fields ) + return [x.replace( separator, replace ) for x in fields] def _deduplicate_data( self ): # Remove duplicate entries, without recreating self.data object diff --git a/lib/galaxy/tools/data_manager/manager.py b/lib/galaxy/tools/data_manager/manager.py index 44691e7fd569..aa57a35cc873 100644 --- a/lib/galaxy/tools/data_manager/manager.py +++ b/lib/galaxy/tools/data_manager/manager.py @@ -1,20 +1,24 @@ import errno import json +import logging import os + from six import string_types from galaxy import util -from galaxy.util.odict import odict -from galaxy.util.template import fill_template +from galaxy.queue_worker import ( + reload_data_managers, + send_control_task +) from galaxy.tools.data import TabularToolDataTable from galaxy.tools.toolbox.watcher import get_tool_conf_watcher -from tool_shed.util import common_util -from tool_shed.util import repository_util -from galaxy.queue_worker import reload_data_managers -from galaxy.queue_worker import send_control_task +from galaxy.util.odict import odict +from galaxy.util.template import fill_template +from tool_shed.util import ( + common_util, + repository_util +) -# set up logger -import logging log = logging.getLogger( __name__ ) SUPPORTED_DATA_TABLE_TYPES = ( TabularToolDataTable ) @@ -112,7 +116,7 @@ def remove_manager( self, manager_ids ): # determine if any data_tables are no longer tracked for data_table_name in data_manager.data_tables.keys(): remove_data_table_tracking = True - for other_data_manager in self.data_managers.itervalues(): + for other_data_manager in self.data_managers.values(): if data_table_name in other_data_manager.data_tables: remove_data_table_tracking = False break @@ -279,21 +283,21 @@ def process_result( self, out_data ): data_manager_dicts = {} data_manager_dict = {} # TODO: fix this merging below - for output_name, output_dataset in out_data.iteritems(): + for output_name, output_dataset in out_data.items(): try: output_dict = json.loads( open( output_dataset.file_name ).read() ) except Exception as e: log.warning( 'Error reading DataManagerTool json for "%s": %s' % ( output_name, e ) ) continue data_manager_dicts[ output_name ] = output_dict - for key, value in output_dict.iteritems(): + for key, value in output_dict.items(): if key not in data_manager_dict: data_manager_dict[ key ] = {} data_manager_dict[ key ].update( value ) data_manager_dict.update( output_dict ) data_tables_dict = data_manager_dict.get( 'data_tables', {} ) - for data_table_name in self.data_tables.iterkeys(): + for data_table_name in self.data_tables.keys(): data_table_values = data_tables_dict.pop( data_table_name, None ) if not data_table_values: log.warning( 'No values for data table "%s" were returned by the data manager "%s".' % ( data_table_name, self.id ) ) @@ -307,7 +311,7 @@ def process_result( self, out_data ): continue # next table name output_ref_values = {} if data_table_name in self.output_ref_by_data_table: - for data_table_column, output_ref in self.output_ref_by_data_table[ data_table_name ].iteritems(): + for data_table_column, output_ref in self.output_ref_by_data_table[ data_table_name ].items(): output_ref_dataset = out_data.get( output_ref, None ) assert output_ref_dataset is not None, "Referenced output was not found." output_ref_values[ data_table_column ] = output_ref_dataset @@ -316,7 +320,7 @@ def process_result( self, out_data ): data_table_values = [ data_table_values ] for data_table_row in data_table_values: data_table_value = dict( **data_table_row ) # keep original values here - for name, value in data_table_row.iteritems(): # FIXME: need to loop through here based upon order listed in data_manager config + for name, value in data_table_row.items(): # FIXME: need to loop through here based upon order listed in data_manager config if name in output_ref_values: self.process_move( data_table_name, name, output_ref_values[ name ].extra_files_path, **data_table_value ) data_table_value[ name ] = self.process_value_translation( data_table_name, name, **data_table_value ) @@ -332,13 +336,13 @@ def process_result( self, out_data ): for ref_file in out_data.values(): util.move_merge( ref_file.extra_files_path, self.data_managers.app.config.galaxy_data_manager_data_path ) path_column_names = [ 'path' ] - for data_table_name, data_table_values in data_tables_dict.iteritems(): + for data_table_name, data_table_values in data_tables_dict.items(): data_table = self.data_managers.app.tool_data_tables.get( data_table_name, None ) if not isinstance( data_table_values, list ): data_table_values = [ data_table_values ] for data_table_row in data_table_values: data_table_value = dict( **data_table_row ) # keep original values here - for name, value in data_table_row.iteritems(): + for name, value in data_table_row.items(): if name in path_column_names: data_table_value[ name ] = os.path.abspath( os.path.join( self.data_managers.app.config.galaxy_data_manager_data_path, value ) ) data_table.add_entry( data_table_value, persist=True, entry_source=self ) @@ -346,7 +350,7 @@ def process_result( self, out_data ): noop_self=True, kwargs={'table_name': data_table_name} ) else: - for data_table_name, data_table_values in data_tables_dict.iteritems(): + for data_table_name, data_table_values in data_tables_dict.items(): # tool returned extra data table entries, but data table was not declared in data manager # do not add these values, but do provide messages log.warning( 'The data manager "%s" returned an undeclared data table "%s" with new entries "%s". These entries will not be created. Please confirm that an entry for "%s" exists in your "%s" file.' % ( self.id, data_table_name, data_table_values, data_table_name, self.data_managers.filename ) ) diff --git a/lib/galaxy/tools/deps/resolvers/resolver_mixins.py b/lib/galaxy/tools/deps/resolvers/resolver_mixins.py index 1a330af9d877..61663e0c08b0 100644 --- a/lib/galaxy/tools/deps/resolvers/resolver_mixins.py +++ b/lib/galaxy/tools/deps/resolvers/resolver_mixins.py @@ -41,7 +41,7 @@ def _installed_versions(self, recipe): return [] names = os.listdir(recipe_base_path) - return filter(lambda n: os.path.isdir(os.path.join(recipe_base_path, n)), names) + return [n for n in names if os.path.isdir(os.path.join(recipe_base_path, n))] class UsesToolDependencyDirMixin: @@ -53,14 +53,10 @@ def _init_base_path(self, dependency_manager, **kwds): class UsesInstalledRepositoriesMixin: def _get_installed_dependency( self, name, type, version=None, **kwds ): - installed_tool_dependencies = kwds.get("installed_tool_dependencies", []) - for installed_tool_dependency in (installed_tool_dependencies or []): - name_and_type_equal = installed_tool_dependency.name == name and installed_tool_dependency.type == type - if version: - if name_and_type_equal and installed_tool_dependency.version == version: - return installed_tool_dependency - else: - if name_and_type_equal: + installed_tool_dependencies = kwds.get("installed_tool_dependencies") or [] + for installed_tool_dependency in installed_tool_dependencies: + if installed_tool_dependency.name == name and installed_tool_dependency.type == type: + if not version or installed_tool_dependency.version == version: return installed_tool_dependency return None diff --git a/lib/galaxy/tools/errors.py b/lib/galaxy/tools/errors.py index 18ca67700a20..faa1dca7018c 100644 --- a/lib/galaxy/tools/errors.py +++ b/lib/galaxy/tools/errors.py @@ -1,9 +1,14 @@ """ Functionality for dealing with tool errors. """ -import string -from galaxy import model, util, web import cgi +import string + +from galaxy import ( + model, + util, + web +) from galaxy.util import unicodify error_report_template = """ diff --git a/lib/galaxy/tools/evaluation.py b/lib/galaxy/tools/evaluation.py index e0a2e32de789..440f04e8c92c 100644 --- a/lib/galaxy/tools/evaluation.py +++ b/lib/galaxy/tools/evaluation.py @@ -1,33 +1,42 @@ import json +import logging import os import tempfile + from six import string_types from galaxy import model -from galaxy.util.object_wrapper import wrap_with_safe_string -from galaxy.util.bunch import Bunch -from galaxy.util.none_like import NoneDataset -from galaxy.util.template import fill_template -from galaxy.tools.wrappers import ( - ToolParameterValueWrapper, - DatasetFilenameWrapper, - DatasetListWrapper, - DatasetCollectionWrapper, - SelectToolParameterWrapper, - InputValueWrapper, - RawObjectWrapper +from galaxy.jobs.datasets import dataset_path_rewrites +from galaxy.tools import global_tool_errors +from galaxy.tools.parameters import ( + visit_input_values, + wrapped_json, ) from galaxy.tools.parameters.basic import ( - DataToolParameter, DataCollectionToolParameter, + DataToolParameter, SelectToolParameter, ) -from galaxy.tools.parameters import wrapped_json, visit_input_values -from galaxy.tools.parameters.grouping import Conditional, Repeat, Section -from galaxy.tools import global_tool_errors -from galaxy.jobs.datasets import dataset_path_rewrites +from galaxy.tools.parameters.grouping import ( + Conditional, + Repeat, + Section +) +from galaxy.tools.wrappers import ( + DatasetCollectionWrapper, + DatasetFilenameWrapper, + DatasetListWrapper, + InputValueWrapper, + RawObjectWrapper, + SelectToolParameterWrapper, + ToolParameterValueWrapper, +) +from galaxy.util.bunch import Bunch +from galaxy.util.none_like import NoneDataset +from galaxy.util.object_wrapper import wrap_with_safe_string +from galaxy.util.template import fill_template from galaxy.work.context import WorkRequestContext -import logging + log = logging.getLogger( __name__ ) @@ -150,7 +159,7 @@ def do_walk( inputs, input_values ): """ Wraps parameters as neccesary. """ - for input in inputs.itervalues(): + for input in inputs.values(): if isinstance( input, Repeat ): for d in input_values[ input.name ]: do_walk( input.inputs, d ) @@ -256,7 +265,7 @@ def wrap_input( input_values, input ): # tools where the inputs don't even get passed through. These # tools (e.g. UCSC) should really be handled in a special way. if self.tool.check_values: - identifier_key_dict = dict((v, "%s|__identifier__" % k) for k, v in input_datasets.iteritems()) # allows lookup of identifier through HDA. + identifier_key_dict = dict((v, "%s|__identifier__" % k) for k, v in input_datasets.items()) # allows lookup of identifier through HDA. self.__walk_inputs( self.tool.inputs, param_dict, wrap_input ) def __populate_input_dataset_wrappers(self, param_dict, input_datasets, input_dataset_paths): @@ -347,7 +356,7 @@ def __populate_output_dataset_wrappers(self, param_dict, output_datasets, output param_dict[name].files_path = os.path.abspath(os.path.join( job_working_directory, "dataset_%s_files" % (hda.dataset.id) )) for child in hda.children: param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child ) - for out_name, output in self.tool.outputs.iteritems(): + for out_name, output in self.tool.outputs.items(): if out_name not in param_dict and output.filters: # Assume the reason we lack this output is because a filter # failed to pass; for tool writing convienence, provide a @@ -407,9 +416,9 @@ def __sanitize_param_dict( self, param_dict ): Note: this method follows the style of the similar populate calls, in that param_dict is modified in-place. """ # chromInfo is a filename, do not sanitize it. - skip = [ 'chromInfo' ] + self.tool.template_macro_params.keys() + skip = [ 'chromInfo' ] + list(self.tool.template_macro_params.keys()) if not self.tool or not self.tool.options or self.tool.options.sanitize: - for key, value in param_dict.items(): + for key, value in list(param_dict.items()): if key not in skip: # Remove key so that new wrapped object will occupy key slot del param_dict[key] @@ -565,7 +574,7 @@ def __write_workdir_file( self, config_filename, content, context, is_template=T with open( config_filename, "w" ) as f: f.write( value ) # For running jobs as the actual user, ensure the config file is globally readable - os.chmod( config_filename, 0644 ) + os.chmod( config_filename, 0o644 ) def __register_extra_file( self, name, local_config_path ): """ diff --git a/lib/galaxy/tools/imp_exp/__init__.py b/lib/galaxy/tools/imp_exp/__init__.py index 248da34f54ca..3fde45703f36 100644 --- a/lib/galaxy/tools/imp_exp/__init__.py +++ b/lib/galaxy/tools/imp_exp/__init__.py @@ -342,7 +342,7 @@ def get_item_tag_dict( item ): def prepare_metadata( metadata ): """ Prepare metatdata for exporting. """ - for name, value in metadata.items(): + for name, value in list(metadata.items()): # Metadata files are not needed for export because they can be # regenerated. if isinstance( value, trans.app.model.MetadataFile ): diff --git a/lib/galaxy/tools/imp_exp/export_history.py b/lib/galaxy/tools/imp_exp/export_history.py index 618c63e12676..3f1ae0e19583 100644 --- a/lib/galaxy/tools/imp_exp/export_history.py +++ b/lib/galaxy/tools/imp_exp/export_history.py @@ -5,6 +5,7 @@ usage: %prog history_attrs dataset_attrs job_attrs out_file -G, --gzip: gzip archive file """ +from __future__ import print_function import optparse import os @@ -105,7 +106,7 @@ def main(): # Create archive. status = create_archive( history_attrs, dataset_attrs, job_attrs, out_file, gzip ) - print status + print(status) if __name__ == "__main__": diff --git a/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py b/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py index 932779c0e438..ee15bcf8cfda 100644 --- a/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py +++ b/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py @@ -5,16 +5,18 @@ usage: %prog archive_source dest_dir --[url|file] source type, either a URL or a file. """ +from __future__ import print_function +import math +import optparse import os import sys -import optparse import tarfile import tempfile -import urllib2 -import math from base64 import b64decode +from six.moves.urllib.request import urlopen + # Set max size of archive/file that will be handled to be 100 GB. This is # arbitrary and should be adjusted as needed. MAX_SIZE = 100 * math.pow( 2, 30 ) @@ -25,7 +27,7 @@ def url_to_file( url, dest_file ): Transfer a file from a remote URL to a temporary file. """ try: - url_reader = urllib2.urlopen( url ) + url_reader = urlopen( url ) CHUNK = 10 * 1024 # 10k total = 0 fp = open( dest_file, 'wb') @@ -40,7 +42,7 @@ def url_to_file( url, dest_file ): fp.close() return dest_file except Exception as e: - print "Exception getting file from URL: %s" % e, sys.stderr + print("Exception getting file from URL: %s" % e, file=sys.stderr) return None @@ -99,4 +101,4 @@ def main(options, args): try: main(options, args) except Exception as e: - print "Error unpacking tar/gz archive: %s" % e, sys.stderr + print("Error unpacking tar/gz archive: %s" % e, file=sys.stderr) diff --git a/lib/galaxy/tools/parameters/basic.py b/lib/galaxy/tools/parameters/basic.py index e96d55cd0774..77a16d638ebc 100644 --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -1,29 +1,38 @@ """ Basic tool parameters. """ - import logging -import re import os import os.path -from six import string_types +import re from xml.etree.ElementTree import XML +from six import string_types + +import galaxy.model +import galaxy.tools.parser from galaxy import util -from galaxy.util import string_as_bool, sanitize_param, unicodify +from galaxy.util import ( + sanitize_param, + string_as_bool, + unicodify +) +from galaxy.util.bunch import Bunch +from galaxy.util.dictifiable import Dictifiable from galaxy.util.expressions import ExpressionContext -from sanitize import ToolParameterSanitizer -import validation -import galaxy.tools.parser -from ..parser import get_input_source as ensure_input_source -from ..parameters import history_query -from ..parameters import dynamic_options -from .dataset_matcher import DatasetMatcher -from .dataset_matcher import DatasetCollectionMatcher from galaxy.web import url_for -from galaxy.util.dictifiable import Dictifiable -import galaxy.model -from galaxy.util.bunch import Bunch + +from . import validation +from .dataset_matcher import ( + DatasetCollectionMatcher, + DatasetMatcher +) +from .sanitize import ToolParameterSanitizer +from ..parameters import ( + dynamic_options, + history_query +) +from ..parser import get_input_source as ensure_input_source log = logging.getLogger( __name__ ) @@ -798,7 +807,7 @@ def get_legal_values( self, trans, other_values ): def from_json( self, value, trans, other_values={} ): legal_values = self.get_legal_values( trans, other_values ) workflow_building_mode = trans.workflow_building_mode - for context_value in other_values.itervalues(): + for context_value in other_values.values(): if isinstance( context_value, RuntimeValue ): workflow_building_mode = True break @@ -848,7 +857,7 @@ def to_param_dict_string( self, value, other_values={} ): if isinstance( value, list ): if not self.multiple: raise ValueError( "Multiple values provided but parameter %s is not expecting multiple values." % self.name ) - value = map( str, value ) + value = list(map( str, value )) else: value = str( value ) if self.tool is None or self.tool.options.sanitize: @@ -1045,7 +1054,7 @@ def from_json( self, value, trans, other_values={} ): column2 = column2.strip() if column2: column_list.append( column2 ) - value = map( ColumnListParameter._strip_c, column_list ) + value = list(map( ColumnListParameter._strip_c, column_list )) else: value = [] else: @@ -1097,7 +1106,7 @@ def get_column_list( self, trans, other_values ): if column_list is None: column_list = this_column_list else: - column_list = filter( lambda c: c in this_column_list, column_list ) + column_list = [c for c in column_list if c in this_column_list] return column_list def get_options( self, trans, other_values ): @@ -1248,12 +1257,12 @@ def get_options( self, trans=None, value=None, other_values={} ): options = self._get_options_from_code( trans=trans, value=value, other_values=other_values ) else: options = [] - for filter_key, filter_value in self.filtered.iteritems(): + for filter_key, filter_value in self.filtered.items(): dataset = other_values.get(filter_key) if dataset.__class__.__name__.endswith( "DatasetFilenameWrapper" ): # this is a bad way to check for this, but problems importing class ( due to circular imports? ) dataset = dataset.dataset if dataset: - for meta_key, meta_dict in filter_value.iteritems(): + for meta_key, meta_dict in filter_value.items(): if hasattr( dataset, 'metadata' ) and hasattr( dataset.metadata, 'spec' ): check_meta_val = dataset.metadata.spec[ meta_key ].param.to_string( dataset.metadata.get( meta_key ) ) if check_meta_val in meta_dict: @@ -1387,7 +1396,7 @@ def get_dependencies( self ): """ Get the *names* of the other params this param depends on. """ - return self.filtered.keys() + return list(self.filtered.keys()) def to_dict( self, trans, other_values={} ): # skip SelectToolParameter (the immediate parent) bc we need to get options in a different way here @@ -1762,8 +1771,8 @@ def to_dict( self, trans, other_values={} ): datatypes_registery = self._datatypes_registery( trans, self.tool ) all_edam_formats = datatypes_registery.edam_formats if hasattr( datatypes_registery, 'edam_formats' ) else {} all_edam_data = datatypes_registery.edam_data if hasattr( datatypes_registery, 'edam_formats' ) else {} - edam_formats = map(lambda ext: all_edam_formats.get(ext, None), extensions) - edam_data = map(lambda ext: all_edam_data.get(ext, None), extensions) + edam_formats = [all_edam_formats.get(ext, None) for ext in extensions] + edam_data = [all_edam_data.get(ext, None) for ext in extensions] d['extensions'] = extensions d['edam'] = {'edam_formats': edam_formats, 'edam_data': edam_data} diff --git a/lib/galaxy/tools/parameters/dynamic_options.py b/lib/galaxy/tools/parameters/dynamic_options.py index 248301781d26..b32ea7518524 100644 --- a/lib/galaxy/tools/parameters/dynamic_options.py +++ b/lib/galaxy/tools/parameters/dynamic_options.py @@ -2,13 +2,20 @@ Support for generating the options for a SelectToolParameter dynamically (based on the values of other parameters or other aspects of the current state) """ - import logging import os -import validation -from galaxy.util import string_as_bool -from galaxy.model import User, HistoryDatasetAssociation, HistoryDatasetCollectionAssociation + +from six import StringIO + import galaxy.tools +from galaxy.model import ( + HistoryDatasetAssociation, + HistoryDatasetCollectionAssociation, + User +) +from galaxy.util import string_as_bool + +from . import validation log = logging.getLogger(__name__) @@ -590,10 +597,9 @@ def get_fields( self, trans, other_values ): options = self.parse_file_fields( open( path ) ) else: # Pass just the first megabyte to parse_file_fields. - import StringIO log.warning( "Attempting to load options from large file, reading just first megabyte" ) contents = open( path, 'r' ).read( 1048576 ) - options = self.parse_file_fields( StringIO.StringIO( contents ) ) + options = self.parse_file_fields( StringIO( contents ) ) elif self.tool_data_table: options = self.tool_data_table.get_fields() else: diff --git a/lib/galaxy/tools/parameters/grouping.py b/lib/galaxy/tools/parameters/grouping.py index 22a74d46a93b..f83d66490500 100644 --- a/lib/galaxy/tools/parameters/grouping.py +++ b/lib/galaxy/tools/parameters/grouping.py @@ -1,21 +1,26 @@ """ Constructs for grouping tool parameters """ - import logging -log = logging.getLogger( __name__ ) - import os -import StringIO import unicodedata -from six import text_type + +from six import ( + StringIO, + text_type +) + from galaxy.datatypes import sniff -from galaxy.util import inflector -from galaxy.util import relpath -from galaxy.util import sanitize_for_filename +from galaxy.util import ( + inflector, + relpath, + sanitize_for_filename +) from galaxy.util.bunch import Bunch -from galaxy.util.expressions import ExpressionContext from galaxy.util.dictifiable import Dictifiable +from galaxy.util.expressions import ExpressionContext + +log = logging.getLogger( __name__ ) class Group( object, Dictifiable ): @@ -82,7 +87,7 @@ def value_to_basic( self, value, app ): # Propogate __index__ if '__index__' in d: rval_dict['__index__'] = d['__index__'] - for input in self.inputs.itervalues(): + for input in self.inputs.values(): rval_dict[ input.name ] = input.value_to_basic( d[input.name], app ) rval.append( rval_dict ) return rval @@ -96,7 +101,7 @@ def value_from_basic( self, value, app, ignore_errors=False ): # compatibility) rval_dict['__index__'] = d.get( '__index__', i ) # Restore child inputs - for input in self.inputs.itervalues(): + for input in self.inputs.values(): if ignore_errors and input.name not in d: # If we do not have a value, and are ignoring errors, we simply # do nothing. There will be no value for the parameter in the @@ -114,7 +119,7 @@ def get_initial_value( self, trans, context ): rval = [] for i in range( self.default ): rval_dict = { '__index__': i} - for input in self.inputs.itervalues(): + for input in self.inputs.values(): rval_dict[ input.name ] = input.get_initial_value( trans, context ) rval.append( rval_dict ) return rval @@ -125,7 +130,7 @@ def to_dict( self, trans ): def input_to_dict( input ): return input.to_dict( trans ) - repeat_dict[ "inputs" ] = map( input_to_dict, self.inputs.values() ) + repeat_dict[ "inputs" ] = list(map( input_to_dict, self.inputs.values() )) return repeat_dict @@ -150,14 +155,14 @@ def label( self ): def value_to_basic( self, value, app ): rval = {} - for input in self.inputs.itervalues(): + for input in self.inputs.values(): rval[ input.name ] = input.value_to_basic( value[input.name], app ) return rval def value_from_basic( self, value, app, ignore_errors=False ): rval = {} try: - for input in self.inputs.itervalues(): + for input in self.inputs.values(): if not ignore_errors or input.name in value: rval[ input.name ] = input.value_from_basic( value[ input.name ], app, ignore_errors ) except Exception as e: @@ -168,7 +173,7 @@ def value_from_basic( self, value, app, ignore_errors=False ): def get_initial_value( self, trans, context ): rval = {} child_context = ExpressionContext( rval, context ) - for child_input in self.inputs.itervalues(): + for child_input in self.inputs.values(): rval[ child_input.name ] = child_input.get_initial_value( trans, child_context ) return rval @@ -178,7 +183,7 @@ def to_dict( self, trans ): def input_to_dict( input ): return input.to_dict( trans ) - section_dict[ "inputs" ] = map( input_to_dict, self.inputs.values() ) + section_dict[ "inputs" ] = list(map( input_to_dict, self.inputs.values() )) return section_dict @@ -232,7 +237,7 @@ def group_title( self, context ): def title_by_index( self, trans, index, context ): d_type = self.get_datatype( trans, context ) - for i, ( composite_name, composite_file ) in enumerate( d_type.writable_files.iteritems() ): + for i, ( composite_name, composite_file ) in enumerate( d_type.writable_files.items() ): if i == index: rval = composite_name if composite_file.description: @@ -249,7 +254,7 @@ def value_to_basic( self, value, app ): # Propogate __index__ if '__index__' in d: rval_dict['__index__'] = d['__index__'] - for input in self.inputs.itervalues(): + for input in self.inputs.values(): rval_dict[ input.name ] = input.value_to_basic( d[input.name], app ) rval.append( rval_dict ) return rval @@ -262,7 +267,7 @@ def value_from_basic( self, value, app, ignore_errors=False ): # compatibility) rval_dict['__index__'] = d.get( '__index__', i ) # Restore child inputs - for input in self.inputs.itervalues(): + for input in self.inputs.values(): if ignore_errors and input.name not in d: # this wasn't tested rval_dict[ input.name ] = input.get_initial_value( None, d ) else: @@ -273,10 +278,10 @@ def value_from_basic( self, value, app, ignore_errors=False ): def get_initial_value( self, trans, context ): d_type = self.get_datatype( trans, context ) rval = [] - for i, ( composite_name, composite_file ) in enumerate( d_type.writable_files.iteritems() ): + for i, ( composite_name, composite_file ) in enumerate( d_type.writable_files.items() ): rval_dict = {} rval_dict['__index__'] = i # create __index__ - for input in self.inputs.itervalues(): + for input in self.inputs.values(): rval_dict[ input.name ] = input.get_initial_value( trans, context ) rval.append( rval_dict ) return rval @@ -467,8 +472,8 @@ def get_filenames( context ): dataset.uuid = None # load metadata files_metadata = context.get( self.metadata_ref, {} ) - metadata_name_substition_default_dict = dict( [ ( composite_file.substitute_name_with_metadata, d_type.metadata_spec[ composite_file.substitute_name_with_metadata ].default ) for composite_file in d_type.composite_files.values() if composite_file.substitute_name_with_metadata ] ) - for meta_name, meta_spec in d_type.metadata_spec.iteritems(): + metadata_name_substition_default_dict = dict( ( composite_file.substitute_name_with_metadata, d_type.metadata_spec[ composite_file.substitute_name_with_metadata ].default ) for composite_file in d_type.composite_files.values() if composite_file.substitute_name_with_metadata ) + for meta_name, meta_spec in d_type.metadata_spec.items(): if meta_spec.set_in_upload: if meta_name in files_metadata: meta_value = files_metadata[ meta_name ] @@ -478,7 +483,7 @@ def get_filenames( context ): dataset.precreated_name = dataset.name = self.get_composite_dataset_name( context ) if dataset.datatype.composite_type == 'auto_primary_file': # replace sniff here with just creating an empty file - temp_name, is_multi_byte = sniff.stream_to_file( StringIO.StringIO( d_type.generate_primary_file( dataset ) ), prefix='upload_auto_primary_file' ) + temp_name, is_multi_byte = sniff.stream_to_file( StringIO( d_type.generate_primary_file( dataset ) ), prefix='upload_auto_primary_file' ) dataset.primary_file = temp_name dataset.to_posix_lines = True dataset.space_to_tab = False @@ -494,7 +499,7 @@ def get_filenames( context ): keys = [ value.name for value in writable_files.values() ] for i, group_incoming in enumerate( groups_incoming[ writable_files_offset : ] ): key = keys[ i + writable_files_offset ] - if group_incoming is None and not writable_files[ writable_files.keys()[ keys.index( key ) ] ].optional: + if group_incoming is None and not writable_files[ list(writable_files.keys())[ keys.index( key ) ] ].optional: dataset.warnings.append( "A required composite file (%s) was not specified." % ( key ) ) dataset.composite_files[ key ] = None else: @@ -504,7 +509,7 @@ def get_filenames( context ): dataset.composite_files[ key ] = file_bunch.__dict__ else: dataset.composite_files[ key ] = None - if not writable_files[ writable_files.keys()[ keys.index( key ) ] ].optional: + if not writable_files[ list(writable_files.keys())[ keys.index( key ) ] ].optional: dataset.warnings.append( "A required composite file (%s) was not specified." % ( key ) ) return [ dataset ] else: @@ -546,7 +551,7 @@ def value_to_basic( self, value, app ): rval = dict() rval[ self.test_param.name ] = self.test_param.value_to_basic( value[ self.test_param.name ], app ) current_case = rval[ '__current_case__' ] = self.get_current_case( value[ self.test_param.name ] ) - for input in self.cases[ current_case ].inputs.itervalues(): + for input in self.cases[ current_case ].inputs.values(): if input.name in value: # parameter might be absent in unverified workflow rval[ input.name ] = input.value_to_basic( value[ input.name ], app ) return rval @@ -557,7 +562,7 @@ def value_from_basic( self, value, app, ignore_errors=False ): rval[ self.test_param.name ] = self.test_param.value_from_basic( value.get( self.test_param.name ), app, ignore_errors ) current_case = rval[ '__current_case__' ] = self.get_current_case( rval[ self.test_param.name ] ) # Inputs associated with current case - for input in self.cases[ current_case ].inputs.itervalues(): + for input in self.cases[ current_case ].inputs.values(): # If we do not have a value, and are ignoring errors, we simply # do nothing. There will be no value for the parameter in the # conditional's values dictionary. @@ -581,7 +586,7 @@ def get_initial_value( self, trans, context ): rval[ self.test_param.name ] = test_value # Fill in state for selected case child_context = ExpressionContext( rval, context ) - for child_input in self.cases[current_case].inputs.itervalues(): + for child_input in self.cases[current_case].inputs.values(): rval[ child_input.name ] = child_input.get_initial_value( trans, child_context ) return rval @@ -591,7 +596,7 @@ def to_dict( self, trans ): def nested_to_dict( input ): return input.to_dict( trans ) - cond_dict[ "cases" ] = map( nested_to_dict, self.cases ) + cond_dict[ "cases" ] = list(map( nested_to_dict, self.cases )) cond_dict[ "test_param" ] = nested_to_dict( self.test_param ) return cond_dict @@ -609,5 +614,5 @@ def to_dict( self, trans ): def input_to_dict( input ): return input.to_dict( trans ) - when_dict[ "inputs" ] = map( input_to_dict, self.inputs.values() ) + when_dict[ "inputs" ] = list(map( input_to_dict, self.inputs.values() )) return when_dict diff --git a/lib/galaxy/tools/parameters/meta.py b/lib/galaxy/tools/parameters/meta.py index 907889b028c8..79ecbfbc1843 100644 --- a/lib/galaxy/tools/parameters/meta.py +++ b/lib/galaxy/tools/parameters/meta.py @@ -1,10 +1,14 @@ -from galaxy.util import permutations -from galaxy import model -from galaxy import util -from galaxy import exceptions -import itertools import copy +import itertools import logging + +from galaxy import ( + exceptions, + model, + util +) +from galaxy.util import permutations + log = logging.getLogger( __name__ ) @@ -52,10 +56,10 @@ def expand_workflow_inputs( inputs ): product = product or [ [ None ] ] linked_keys = linked_keys or [ ( None, None ) ] product_keys = product_keys or [ ( None, None ) ] - for linked_values, product_values in itertools.product( *[ zip( *linked ), itertools.product( *product ) ] ): + for linked_values, product_values in itertools.product( zip( *linked ), itertools.product( *product ) ): new_params = copy.deepcopy( inputs ) new_keys = [] - for ( step_id, key ), value in zip( linked_keys, linked_values ) + zip( product_keys, product_values ): + for ( step_id, key ), value in list(zip( linked_keys, linked_values )) + list(zip( product_keys, product_values )): if step_id is not None: new_params[ step_id ][ key ] = value new_keys.append( value[ 'hid' ] ) diff --git a/lib/galaxy/tools/parameters/output_collect.py b/lib/galaxy/tools/parameters/output_collect.py index 4be37808550c..91a4f00401d5 100644 --- a/lib/galaxy/tools/parameters/output_collect.py +++ b/lib/galaxy/tools/parameters/output_collect.py @@ -1,24 +1,25 @@ """ Code allowing tools to define extra files associated with an output datset. """ -import os -import re -import operator import glob import json +import logging +import operator +import os +import re from galaxy import jobs from galaxy import util -from galaxy.util import odict -from galaxy.util import ExecutionTimer from galaxy.tools.parser.output_collection_def import ( DEFAULT_DATASET_COLLECTOR_DESCRIPTION, INPUT_DBKEY_TOKEN, ) +from galaxy.util import ( + ExecutionTimer, + odict +) DATASET_ID_TOKEN = "DATASET_ID" - -import logging log = logging.getLogger( __name__ ) @@ -105,7 +106,7 @@ def populate_collection_elements( self, collection, root_collection_builder, out filenames = self.find_files( collection, dataset_collectors ) element_datasets = [] - for filename, extra_file_collector in filenames.iteritems(): + for filename, extra_file_collector in filenames.items(): create_dataset_timer = ExecutionTimer() fields_match = extra_file_collector.match( collection, os.path.basename( filename ) ) if not fields_match: @@ -248,7 +249,7 @@ def collect_primary_datasets( tool, output, job_working_directory, input_ext, in if 'job_working_directory' in app.config.collect_outputs_from: for path, extra_file_collector in walk_over_extra_files( dataset_collectors, job_working_directory, outdata ): filenames[ path ] = extra_file_collector - for filename_index, ( filename, extra_file_collector ) in enumerate( filenames.iteritems() ): + for filename_index, ( filename, extra_file_collector ) in enumerate( filenames.items() ): fields_match = extra_file_collector.match( outdata, os.path.basename( filename ) ) if not fields_match: # Before I guess pop() would just have thrown an IndexError diff --git a/lib/galaxy/tools/search/__init__.py b/lib/galaxy/tools/search/__init__.py index 6261781f6f4f..9c4867c71b3f 100644 --- a/lib/galaxy/tools/search/__init__.py +++ b/lib/galaxy/tools/search/__init__.py @@ -5,16 +5,23 @@ import logging import re import tempfile - -from galaxy.web.framework.helpers import to_unicode from datetime import datetime -from whoosh.filedb.filestore import RamStorage, FileStorage -from whoosh.fields import KEYWORD, Schema, STORED, TEXT -from whoosh.scoring import BM25F -from whoosh.qparser import MultifieldParser from whoosh import analysis +from whoosh.fields import ( + KEYWORD, + Schema, + STORED, + TEXT +) +from whoosh.filedb.filestore import ( + FileStorage, + RamStorage +) +from whoosh.qparser import MultifieldParser +from whoosh.scoring import BM25F +from galaxy.web.framework.helpers import to_unicode log = logging.getLogger( __name__ ) diff --git a/lib/galaxy/tools/util/galaxyops/__init__.py b/lib/galaxy/tools/util/galaxyops/__init__.py index b7a4a5f2dd8a..7ec5e7590544 100644 --- a/lib/galaxy/tools/util/galaxyops/__init__.py +++ b/lib/galaxy/tools/util/galaxyops/__init__.py @@ -1,16 +1,18 @@ """Utility functions for galaxyops""" +from __future__ import print_function + import sys def warn( msg ): # TODO: since everything printed to stderr results in job.state = error, we # don't need both a warn and a fail... - print >> sys.stderr, msg + print(msg, file=sys.stderr) sys.exit( 1 ) def fail( msg ): - print >> sys.stderr, msg + print(msg, file=sys.stderr) sys.exit( 1 ) @@ -25,15 +27,15 @@ def parse_cols_arg( cols ): # looks something like 1,2,3, if cols.endswith( ',' ): cols += '0' - col_list = map( lambda x: int( x ) - 1, cols.split(",") ) + col_list = [int( x ) - 1 for x in cols.split(",")] return col_list else: return BED_DEFAULT_COLS def default_printer( stream, exc, obj ): - print >> stream, "%d: %s" % ( obj.linenum, obj.current_line ) - print >> stream, "\tError: %s" % ( str(exc) ) + print("%d: %s" % ( obj.linenum, obj.current_line ), file=stream) + print("\tError: %s" % ( str(exc) ), file=stream) def skipped( reader, filedesc="" ): diff --git a/lib/galaxy/tools/util/maf_utilities.py b/lib/galaxy/tools/util/maf_utilities.py index d2e92b061d75..c50dc42747e6 100644 --- a/lib/galaxy/tools/util/maf_utilities.py +++ b/lib/galaxy/tools/util/maf_utilities.py @@ -3,18 +3,21 @@ Provides wrappers and utilities for working with MAF files and alignments. """ # Dan Blankenberg +from __future__ import print_function + import logging import os +import resource import string import sys import tempfile +from copy import deepcopy +from errno import EMFILE import bx.align.maf -import bx.intervals import bx.interval_index_file -from errno import EMFILE -import resource -from copy import deepcopy +import bx.intervals +from six.moves import xrange assert sys.version_info[:2] >= ( 2, 4 ) @@ -51,7 +54,7 @@ def get_species_in_block( block ): def tool_fail( msg="Unknown Error" ): - print >> sys.stderr, "Fatal Error: %s" % msg + print("Fatal Error: %s" % msg, file=sys.stderr) sys.exit() @@ -136,7 +139,7 @@ def __del__( self ): class RegionAlignment( object ): DNA_COMPLEMENT = string.maketrans( "ACGTacgt", "TGCAtgca" ) - MAX_SEQUENCE_SIZE = sys.maxint # Maximum length of sequence allowed + MAX_SEQUENCE_SIZE = sys.maxsize # Maximum length of sequence allowed def __init__( self, size, species=[], temp_file_handler=None ): assert size <= self.MAX_SEQUENCE_SIZE, "Maximum length allowed for an individual sequence has been exceeded (%i > %i)." % ( size, self.MAX_SEQUENCE_SIZE ) @@ -161,7 +164,7 @@ def add_species( self, species ): def get_species_names( self, skip=[] ): if not isinstance( skip, list ): skip = [skip] - names = self.sequences.keys() + names = list(self.sequences.keys()) for name in skip: try: names.remove( name ) @@ -314,7 +317,7 @@ def build_maf_index_species_chromosomes( filename, index_species=None ): maf_reader = bx.align.maf.Reader( open( filename ) ) while True: pos = maf_reader.file.tell() - block = maf_reader.next() + block = next(maf_reader) if block is None: break blocks += 1 @@ -478,7 +481,7 @@ def __split_components_by_species( components_by_species, new_block ): empty_block = bx.align.Alignment( score=block.score, attributes=deepcopy( block.attributes ) ) # should we copy attributes? empty_block.text_size = block.text_size # call recursive function to split into each combo of spec/blocks - for value in __split_components_by_species( spec_dict.values(), empty_block ): + for value in __split_components_by_species( list(spec_dict.values()), empty_block ): sort_block_components_by_block( value, block ) # restore original component order yield value @@ -612,10 +615,10 @@ def get_starts_ends_fields_from_gene_bed( line ): # Calculate and store starts and ends of coding exons region_start, region_end = cds_start, cds_end - exon_starts = map( int, fields[11].rstrip( ',\n' ).split( ',' ) ) - exon_starts = map( ( lambda x: x + tx_start ), exon_starts ) - exon_ends = map( int, fields[10].rstrip( ',' ).split( ',' ) ) - exon_ends = map( ( lambda x, y: x + y ), exon_starts, exon_ends ) + exon_starts = list(map( int, fields[11].rstrip( ',\n' ).split( ',' ) )) + exon_starts = [x + tx_start for x in exon_starts] + exon_ends = list(map( int, fields[10].rstrip( ',' ).split( ',' ) )) + exon_ends = [x + y for x, y in zip( exon_starts, exon_ends )] for start, end in zip( exon_starts, exon_ends ): start = max( start, region_start ) end = min( end, region_end ) @@ -680,7 +683,7 @@ def remove_temp_index_file( index_filename ): def get_fasta_header( component, attributes={}, suffix=None ): header = ">%s(%s):%i-%i|" % ( component.src, component.strand, component.get_forward_strand_start(), component.get_forward_strand_end() ) - for key, value in attributes.iteritems(): + for key, value in attributes.items(): header = "%s%s=%s|" % ( header, key, value ) if suffix: header = "%s%s" % ( header, suffix ) @@ -714,7 +717,7 @@ def get_attributes_from_fasta_header( header ): # fields 0 is not a region coordinate pass if len( fields ) > 2: - for i in xrange( 1, len( fields ) - 1 ): + for i in range( 1, len( fields ) - 1 ): prop = fields[i].split( '=', 1 ) if len( prop ) == 2: attributes[ prop[0] ] = prop[1] diff --git a/lib/galaxy/tools/verify/asserts/__init__.py b/lib/galaxy/tools/verify/asserts/__init__.py index d0c8f19a8cf3..51f2c7cf31af 100644 --- a/lib/galaxy/tools/verify/asserts/__init__.py +++ b/lib/galaxy/tools/verify/asserts/__init__.py @@ -44,7 +44,7 @@ def verify_assertion(data, assertion_description): assert_function_args = inspect.getargspec(assert_function).args args = {} - for attribute, value in assertion_description["attributes"].iteritems(): + for attribute, value in assertion_description["attributes"].items(): if attribute in assert_function_args: args[attribute] = value diff --git a/lib/galaxy/tools/verify/test_data.py b/lib/galaxy/tools/verify/test_data.py index 01ef28bca7f0..01459a217c30 100644 --- a/lib/galaxy/tools/verify/test_data.py +++ b/lib/galaxy/tools/verify/test_data.py @@ -28,7 +28,7 @@ class TestDataResolver(object): def __init__(self, env_var='GALAXY_TEST_FILE_DIR', environ=os.environ): file_dirs = environ.get(env_var, None) if file_dirs: - self.resolvers = map(lambda u: build_resolver(u, environ), LIST_SEP.split(file_dirs)) + self.resolvers = [build_resolver(u, environ) for u in LIST_SEP.split(file_dirs)] else: self.resolvers = [] diff --git a/lib/galaxy/tools/wrappers.py b/lib/galaxy/tools/wrappers.py index d9801e76d5ff..7e0631eeff28 100644 --- a/lib/galaxy/tools/wrappers.py +++ b/lib/galaxy/tools/wrappers.py @@ -1,13 +1,16 @@ +import logging import os import pipes import tempfile + +from six import string_types + from galaxy import exceptions -from galaxy.util.none_like import NoneDataset from galaxy.util import odict +from galaxy.util.none_like import NoneDataset from galaxy.util.object_wrapper import wrap_with_safe_string -from logging import getLogger -log = getLogger( __name__ ) +log = logging.getLogger( __name__ ) # Fields in .log files corresponding to paths, must have one of the following # field names and all such fields are assumed to be paths. This is to allow @@ -27,8 +30,9 @@ class ToolParameterValueWrapper( object ): Base class for object that Wraps a Tool Parameter and Value. """ - def __nonzero__( self ): + def __bool__( self ): return bool( self.value ) + __nonzero__ = __bool__ def get_display_text( self, quote=True ): """ @@ -48,8 +52,9 @@ class RawObjectWrapper( ToolParameterValueWrapper ): def __init__( self, obj ): self.obj = obj - def __nonzero__( self ): + def __bool__( self ): return bool( self.obj ) # FIXME: would it be safe/backwards compatible to rename .obj to .value, so that we can just inherit this method? + __nonzero__ = __bool__ def __str__( self ): try: @@ -72,7 +77,7 @@ def __init__( self, input, value, other_values={} ): self._other_values = other_values def __eq__( self, other ): - if isinstance( other, basestring ): + if isinstance( other, string_types ): return str( self ) == other elif isinstance( other, int ): return int( self ) == other @@ -144,7 +149,7 @@ def __init__( self, input, value, app, other_values={}, path_rewriter=None ): self.fields = self.SelectToolParameterFieldWrapper( input, value, other_values, self._path_rewriter ) def __eq__( self, other ): - if isinstance( other, basestring ): + if isinstance( other, string_types ): return str( self ) == other else: return super( SelectToolParameterWrapper, self ) == other @@ -193,8 +198,9 @@ def __getattr__( self, name ): rval = wrap_with_safe_string( rval ) return rval - def __nonzero__( self ): + def __bool__( self ): return self.metadata.__nonzero__() + __nonzero__ = __bool__ def __iter__( self ): return self.metadata.__iter__() @@ -206,7 +212,7 @@ def get( self, key, default=None ): return default def items( self ): - return iter( [ ( k, self.get( k ) ) for k, v in self.metadata.items() ] ) + return iter( ( k, self.get( k ) ) for k, v in self.metadata.items() ) def __init__( self, dataset, datatypes_registry=None, tool=None, name=None, dataset_path=None, identifier=None ): if not dataset: @@ -276,8 +282,9 @@ def __getattr__( self, key ): else: return getattr( self.dataset, key ) - def __nonzero__( self ): + def __bool__( self ): return bool( self.dataset ) + __nonzero__ = __bool__ class HasDatasets: @@ -407,7 +414,8 @@ def __iter__( self ): return [].__iter__() return self.__element_instance_list.__iter__() - def __nonzero__( self ): + def __bool__( self ): # Fail `#if $param` checks in cheetah is optional input # not specified or if resulting collection is empty. return self.__input_supplied and bool( self.__element_instance_list ) + __nonzero__ = __bool__