diff --git a/.github/config/profile.yaml b/.github/config/profile.yaml index 478b7d1233..84bdab3e91 100644 --- a/.github/config/profile.yaml +++ b/.github/config/profile.yaml @@ -18,3 +18,4 @@ broker_host: 127.0.0.1 broker_port: 5672 broker_virtual_host: '' repository: /tmp/test_repository_test_aiida/ +test_profile: true diff --git a/aiida/cmdline/commands/cmd_setup.py b/aiida/cmdline/commands/cmd_setup.py index 7795a7c616..5dc552517c 100644 --- a/aiida/cmdline/commands/cmd_setup.py +++ b/aiida/cmdline/commands/cmd_setup.py @@ -39,11 +39,12 @@ @options_setup.SETUP_BROKER_PORT() @options_setup.SETUP_BROKER_VIRTUAL_HOST() @options_setup.SETUP_REPOSITORY_URI() +@options_setup.SETUP_TEST_PROFILE() @options.CONFIG_FILE() def setup( non_interactive, profile: Profile, email, first_name, last_name, institution, db_engine, db_backend, db_host, db_port, db_name, db_username, db_password, broker_protocol, broker_username, broker_password, broker_host, - broker_port, broker_virtual_host, repository + broker_port, broker_virtual_host, repository, test_profile ): """Setup a new profile. @@ -74,6 +75,7 @@ def setup( 'broker_virtual_host': broker_virtual_host, } ) + profile.is_test_profile = test_profile config = get_config() @@ -142,12 +144,13 @@ def setup( @options_setup.QUICKSETUP_BROKER_PORT() @options_setup.QUICKSETUP_BROKER_VIRTUAL_HOST() @options_setup.QUICKSETUP_REPOSITORY_URI() +@options_setup.QUICKSETUP_TEST_PROFILE() @options.CONFIG_FILE() @click.pass_context def quicksetup( ctx, non_interactive, profile, email, first_name, last_name, institution, db_engine, db_backend, db_host, db_port, db_name, db_username, db_password, su_db_name, su_db_username, su_db_password, broker_protocol, broker_username, - broker_password, broker_host, broker_port, broker_virtual_host, repository + broker_password, broker_host, broker_port, broker_virtual_host, repository, test_profile ): """Setup a new profile in a fully automated fashion.""" # pylint: disable=too-many-arguments,too-many-locals @@ -202,5 +205,6 @@ def quicksetup( 'broker_port': broker_port, 'broker_virtual_host': broker_virtual_host, 'repository': repository, + 'test_profile': test_profile, } ctx.invoke(setup, **setup_parameters) diff --git a/aiida/cmdline/params/options/commands/setup.py b/aiida/cmdline/params/options/commands/setup.py index 14881ecf09..13132b95e7 100644 --- a/aiida/cmdline/params/options/commands/setup.py +++ b/aiida/cmdline/params/options/commands/setup.py @@ -360,3 +360,9 @@ def get_quicksetup_password(ctx, param, value): # pylint: disable=unused-argume contextual_default=get_repository_uri_default, cls=options.interactive.InteractiveOption ) + +SETUP_TEST_PROFILE = options.OverridableOption( + '--test-profile', is_flag=True, help='Designate the profile to be used for running the test suite only.' +) + +QUICKSETUP_TEST_PROFILE = SETUP_TEST_PROFILE.clone() diff --git a/aiida/manage/configuration/config.py b/aiida/manage/configuration/config.py index 9f69c98d39..85b7123cfe 100644 --- a/aiida/manage/configuration/config.py +++ b/aiida/manage/configuration/config.py @@ -26,7 +26,7 @@ __all__ = ('Config', 'config_schema', 'ConfigValidationError') -SCHEMA_FILE = 'config-v7.schema.json' +SCHEMA_FILE = 'config-v8.schema.json' @lru_cache(1) diff --git a/aiida/manage/configuration/migrations/migrations.py b/aiida/manage/configuration/migrations/migrations.py index 6d4d3baedb..007c061b44 100644 --- a/aiida/manage/configuration/migrations/migrations.py +++ b/aiida/manage/configuration/migrations/migrations.py @@ -25,8 +25,8 @@ # When the configuration file format is changed in a backwards-incompatible way, the oldest compatible version should # be set to the new current version. -CURRENT_CONFIG_VERSION = 7 -OLDEST_COMPATIBLE_CONFIG_VERSION = 7 +CURRENT_CONFIG_VERSION = 8 +OLDEST_COMPATIBLE_CONFIG_VERSION = 8 CONFIG_LOGGER = AIIDA_LOGGER.getChild('config') @@ -296,6 +296,53 @@ def downgrade(self, config: ConfigType) -> None: CONFIG_LOGGER.warning(f'profile {profile_name!r} had no expected "storage._v6_backend" key') +class AddTestProfileKey(SingleMigration): + """Add the ``test_profile`` key.""" + down_revision = 7 + down_compatible = 7 + up_revision = 8 + up_compatible = 8 + + def upgrade(self, config: ConfigType) -> None: + for profile_name, profile in config.get('profiles', {}).items(): + profile['test_profile'] = profile_name.startswith('test_') + + def downgrade(self, config: ConfigType) -> None: + profiles = config.get('profiles', {}) + profile_names = list(profiles.keys()) + + # Iterate over the fixed list of the profile names, since we are mutating the profiles dictionary. + for profile_name in profile_names: + + profile = profiles.pop(profile_name) + profile_name_new = None + test_profile = profile.pop('test_profile', False) # If absent, assume it is not a test profile + + if test_profile and not profile_name.startswith('test_'): + profile_name_new = f'test_{profile_name}' + CONFIG_LOGGER.warning( + f'profile `{profile_name}` is a test profile but does not start with the required `test_` prefix.' + ) + + if not test_profile and profile_name.startswith('test_'): + profile_name_new = profile_name[5:] + CONFIG_LOGGER.warning( + f'profile `{profile_name}` is not a test profile but starts with the `test_` prefix.' + ) + + if profile_name_new is not None: + + if profile_name_new in profile_names: + raise exceptions.ConfigurationError( + f'cannot change `{profile_name}` to `{profile_name_new}` because it already exists.' + ) + + CONFIG_LOGGER.warning(f'changing profile name from `{profile_name}` to `{profile_name_new}`.') + profile_name = profile_name_new + + profiles[profile_name] = profile + + MIGRATIONS = ( Initial, AddProfileUuid, @@ -304,6 +351,7 @@ def downgrade(self, config: ConfigType) -> None: SimplifyOptions, AbstractStorageAndProcess, MergeStorageBackendTypes, + AddTestProfileKey, ) diff --git a/aiida/manage/configuration/profile.py b/aiida/manage/configuration/profile.py index 19b9aa9270..fc5e9d96b4 100644 --- a/aiida/manage/configuration/profile.py +++ b/aiida/manage/configuration/profile.py @@ -47,6 +47,7 @@ class Profile: # pylint: disable=too-many-public-methods KEY_PROCESS_BACKEND = 'backend' KEY_PROCESS_CONFIG = 'config' KEY_OPTIONS = 'options' + KEY_TEST_PROFILE = 'test_profile' # keys that are expected to be in the parsed configuration REQUIRED_KEYS = ( @@ -199,8 +200,17 @@ def is_test_profile(self) -> bool: :return: boolean, True if test profile, False otherwise """ - # Currently, whether a profile is a test profile is solely determined by its name starting with 'test_' - return self.name.startswith('test_') + # Check explicitly for ``True`` for safety. If an invalid value is defined, we default to treating it as not + # a test profile as that can unintentionally clear the database. + return self._attributes.get(self.KEY_TEST_PROFILE, False) is True + + @is_test_profile.setter + def is_test_profile(self, value: bool) -> None: + """Set whether the profile is a test profile. + + :param value: boolean indicating whether this profile is a test profile. + """ + self._attributes[self.KEY_TEST_PROFILE] = value @property def repository_path(self) -> pathlib.Path: diff --git a/aiida/manage/configuration/schema/config-v8.schema.json b/aiida/manage/configuration/schema/config-v8.schema.json new file mode 100644 index 0000000000..5fa7d6c062 --- /dev/null +++ b/aiida/manage/configuration/schema/config-v8.schema.json @@ -0,0 +1,342 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "description": "Schema for AiiDA configuration files, format version 8", + "type": "object", + "definitions": { + "options": { + "type": "object", + "properties": { + "runner.poll.interval": { + "type": "integer", + "default": 60, + "minimum": 0, + "description": "Polling interval in seconds to be used by process runners" + }, + "daemon.default_workers": { + "type": "integer", + "default": 1, + "minimum": 1, + "description": "Default number of workers to be launched by `verdi daemon start`" + }, + "daemon.timeout": { + "type": "integer", + "default": 20, + "minimum": 0, + "description": "Timeout in seconds for calls to the circus client" + }, + "daemon.worker_process_slots": { + "type": "integer", + "default": 200, + "minimum": 1, + "description": "Maximum number of concurrent process tasks that each daemon worker can handle" + }, + "db.batch_size": { + "type": "integer", + "default": 100000, + "minimum": 1, + "description": "Batch size for bulk CREATE operations in the database. Avoids hitting MaxAllocSize of PostgreSQL (1GB) when creating large numbers of database records in one go." + }, + "verdi.shell.auto_import": { + "type": "string", + "default": "", + "description": "Additional modules/functions/classes to be automatically loaded in `verdi shell`, split by ':'" + }, + "logging.aiida_loglevel": { + "type": "string", + "enum": ["CRITICAL", "ERROR", "WARNING", "REPORT", "INFO", "DEBUG"], + "default": "REPORT", + "description": "Minimum level to log to daemon log and the `DbLog` table for the `aiida` logger" + }, + "logging.db_loglevel": { + "type": "string", + "enum": ["CRITICAL", "ERROR", "WARNING", "REPORT", "INFO", "DEBUG"], + "default": "REPORT", + "description": "Minimum level to log to the DbLog table" + }, + "logging.plumpy_loglevel": { + "type": "string", + "enum": ["CRITICAL", "ERROR", "WARNING", "REPORT", "INFO", "DEBUG"], + "default": "WARNING", + "description": "Minimum level to log to daemon log and the `DbLog` table for the `plumpy` logger" + }, + "logging.kiwipy_loglevel": { + "type": "string", + "enum": ["CRITICAL", "ERROR", "WARNING", "REPORT", "INFO", "DEBUG"], + "default": "WARNING", + "description": "Minimum level to log to daemon log and the `DbLog` table for the `kiwipy` logger" + }, + "logging.paramiko_loglevel": { + "key": "logging_paramiko_log_level", + "type": "string", + "enum": ["CRITICAL", "ERROR", "WARNING", "REPORT", "INFO", "DEBUG"], + "default": "WARNING", + "description": "Minimum level to log to daemon log and the `DbLog` table for the `paramiko` logger" + }, + "logging.alembic_loglevel": { + "type": "string", + "enum": ["CRITICAL", "ERROR", "WARNING", "REPORT", "INFO", "DEBUG"], + "default": "WARNING", + "description": "Minimum level to log to daemon log and the `DbLog` table for the `alembic` logger" + }, + "logging.sqlalchemy_loglevel": { + "type": "string", + "enum": ["CRITICAL", "ERROR", "WARNING", "REPORT", "INFO", "DEBUG"], + "default": "WARNING", + "description": "Minimum level to log to daemon log and the `DbLog` table for the `sqlalchemy` logger" + }, + "logging.circus_loglevel": { + "type": "string", + "enum": ["CRITICAL", "ERROR", "WARNING", "REPORT", "INFO", "DEBUG"], + "default": "INFO", + "description": "Minimum level to log to daemon log and the `DbLog` table for the `circus` logger" + }, + "logging.aiopika_loglevel": { + "type": "string", + "enum": ["CRITICAL", "ERROR", "WARNING", "REPORT", "INFO", "DEBUG"], + "default": "WARNING", + "description": "Minimum level to log to daemon log and the `DbLog` table for the `aio_pika` logger" + }, + "warnings.showdeprecations": { + "type": "boolean", + "default": true, + "description": "Whether to print AiiDA deprecation warnings" + }, + "warnings.development_version": { + "type": "boolean", + "default": true, + "description": "Whether to print a warning when a profile is loaded while a development version is installed", + "global_only": true + }, + "transport.task_retry_initial_interval": { + "type": "integer", + "default": 20, + "minimum": 1, + "description": "Initial time interval for the exponential backoff mechanism." + }, + "transport.task_maximum_attempts": { + "type": "integer", + "default": 5, + "minimum": 1, + "description": "Maximum number of transport task attempts before a Process is Paused." + }, + "rmq.task_timeout": { + "type": "integer", + "default": 10, + "minimum": 1, + "description": "Timeout in seconds for communications with RabbitMQ" + }, + "caching.default_enabled": { + "type": "boolean", + "default": false, + "description": "Enable calculation caching by default" + }, + "caching.enabled_for": { + "description": "Calculation entry points to enable caching on", + "type": "array", + "default": [], + "items": { + "type": "string" + } + }, + "caching.disabled_for": { + "description": "Calculation entry points to disable caching on", + "type": "array", + "default": [], + "items": { + "type": "string" + } + }, + "autofill.user.email": { + "type": "string", + "global_only": true, + "description": "Default user email to use when creating new profiles." + }, + "autofill.user.first_name": { + "type": "string", + "global_only": true, + "description": "Default user first name to use when creating new profiles." + }, + "autofill.user.last_name": { + "type": "string", + "global_only": true, + "description": "Default user last name to use when creating new profiles." + }, + "autofill.user.institution": { + "type": "string", + "global_only": true, + "description": "Default user institution to use when creating new profiles." + } + } + }, + "profile": { + "type": "object", + "required": ["storage", "process_control"], + "properties": { + "PROFILE_UUID": { + "description": "The profile's unique key", + "type": "string" + }, + "storage": { + "description": "The storage configuration", + "type": "object", + "required": ["backend", "config"], + "properties": { + "backend": { + "description": "The storage backend type to use", + "type": "string", + "default": "psql_dos" + }, + "config": { + "description": "The configuration to pass to the storage backend", + "type": "object", + "properties": { + "database_engine": { + "type": "string", + "default": "postgresql_psycopg2" + }, + "database_port": { + "type": ["integer", "string"], + "minimum": 1, + "pattern": "\\d+", + "default": 5432 + }, + "database_hostname": { + "type": ["string", "null"], + "default": null + }, + "database_username": { + "type": "string" + }, + "database_password": { + "type": ["string", "null"], + "default": null + }, + "database_name": { + "type": "string" + }, + "repository_uri": { + "description": "URI to the AiiDA object store", + "type": "string" + } + } + } + } + }, + "process_control": { + "description": "The process control configuration", + "type": "object", + "required": ["backend", "config"], + "properties": { + "backend": { + "description": "The process execution backend type to use", + "type": "string", + "default": "rabbitmq" + }, + "config": { + "description": "The configuration to pass to the process execution backend", + "type": "object", + "parameters": { + "broker_protocol": { + "description": "Protocol for connecting to the RabbitMQ server", + "type": "string", + "enum": ["amqp", "amqps"], + "default": "amqp" + }, + "broker_username": { + "description": "Username for RabbitMQ authentication", + "type": "string", + "default": "guest" + }, + "broker_password": { + "description": "Password for RabbitMQ authentication", + "type": "string", + "default": "guest" + }, + "broker_host": { + "description": "Hostname of the RabbitMQ server", + "type": "string", + "default": "127.0.0.1" + }, + "broker_port": { + "description": "Port of the RabbitMQ server", + "type": "integer", + "minimum": 1, + "default": 5672 + }, + "broker_virtual_host": { + "description": "RabbitMQ virtual host to connect to", + "type": "string", + "default": "" + }, + "broker_parameters": { + "description": "RabbitMQ arguments that will be encoded as query parameters", + "type": "object", + "default": { + "heartbeat": 600 + }, + "properties": { + "heartbeat": { + "description": "After how many seconds the peer TCP connection should be considered unreachable", + "type": "integer", + "default": 600, + "minimum": 0 + } + } + } + } + } + } + }, + "default_user_email": { + "type": ["string", "null"], + "default": null + }, + "test_profile": { + "type": "boolean", + "default": false + }, + "options": { + "description": "Profile specific options", + "$ref": "#/definitions/options" + } + } + } + }, + "required": [], + "properties": { + "CONFIG_VERSION": { + "description": "The configuration version", + "type": "object", + "required": ["CURRENT", "OLDEST_COMPATIBLE"], + "properties": { + "CURRENT": { + "description": "Version number of configuration file format", + "type": "integer", + "const": 8 + }, + "OLDEST_COMPATIBLE": { + "description": "Version number of oldest configuration file format this file is compatible with", + "type": "integer", + "const": 8 + } + } + }, + "profiles": { + "description": "Configured profiles", + "type": "object", + "patternProperties": { + ".+": { + "$ref": "#/definitions/profile" + } + } + }, + "default_profile": { + "description": "Default profile to use", + "type": "string" + }, + "options": { + "description": "Global options", + "$ref": "#/definitions/options" + } + } +} diff --git a/aiida/manage/tests/main.py b/aiida/manage/tests/main.py index 450c8a0450..a1000e5884 100644 --- a/aiida/manage/tests/main.py +++ b/aiida/manage/tests/main.py @@ -52,7 +52,8 @@ 'broker_password': 'guest', 'broker_host': '127.0.0.1', 'broker_port': 5672, - 'broker_virtual_host': '' + 'broker_virtual_host': '', + 'test_profile': True, } @@ -251,6 +252,7 @@ def profile_dictionary(self): Used to set up AiiDA profile from self.profile_info dictionary. """ dictionary = { + 'test_profile': True, 'storage': { 'backend': self.profile_info.get('storage_backend'), 'config': { diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index 1a42cef636..7043a13b40 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -399,6 +399,8 @@ Below is a list with all available subcommands. --broker-virtual-host TEXT Name of the virtual host for the message broker without leading forward slash. --repository DIRECTORY Absolute path to the file repository. + --test-profile Designate the profile to be used for running the test + suite only. --config FILEORURL Load option values from configuration file in yaml format (local path or URL). --help Show this message and exit. @@ -497,6 +499,8 @@ Below is a list with all available subcommands. --broker-virtual-host TEXT Name of the virtual host for the message broker without leading forward slash. [required] --repository DIRECTORY Absolute path to the file repository. + --test-profile Designate the profile to be used for running the test + suite only. --config FILEORURL Load option values from configuration file in yaml format (local path or URL). --help Show this message and exit. diff --git a/tests/manage/configuration/migrations/test_migrations.py b/tests/manage/configuration/migrations/test_migrations.py index 9608aaaaf2..68c2b06336 100644 --- a/tests/manage/configuration/migrations/test_migrations.py +++ b/tests/manage/configuration/migrations/test_migrations.py @@ -7,17 +7,18 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +# pylint: disable=redefined-outer-name """Tests for the configuration migration functionality.""" -from copy import deepcopy -import os -from unittest import TestCase -from unittest.mock import patch +import copy +import pathlib import uuid +import pytest + from aiida.common import json from aiida.common.exceptions import ConfigurationError from aiida.manage.configuration.migrations import check_and_migrate_config -from aiida.manage.configuration.migrations.migrations import Initial, downgrade_config, upgrade_config +from aiida.manage.configuration.migrations.migrations import MIGRATIONS, Initial, downgrade_config, upgrade_config class CircularMigration(Initial): @@ -25,108 +26,113 @@ class CircularMigration(Initial): down_revision = 5 -class TestConfigMigration(TestCase): - """Tests for the configuration migration functionality.""" +@pytest.fixture +def load_config_sample(): + """Load a configuration file from a fixture.""" - @staticmethod - def load_config_sample(filename): - """Load a configuration file from a fixture.""" - currdir = os.path.dirname(os.path.abspath(__file__)) - filepath = os.path.join(currdir, 'test_samples', filename) - with open(filepath, 'r', encoding='utf8') as handle: + def _factory(filename): + with (pathlib.Path(__file__).parent / 'test_samples' / filename).open() as handle: return json.load(handle) - def setUp(self): - super().setUp() - self.maxDiff = None # pylint: disable=invalid-name - - def test_upgrade_path_fail(self): - """Test failure when no upgrade path is available.""" - config_initial = self.load_config_sample('reference/5.json') - # target lower than initial - with self.assertRaises(ConfigurationError): - upgrade_config(deepcopy(config_initial), 1) - # no migration available - with self.assertRaises(ConfigurationError): - upgrade_config(deepcopy(config_initial), 100) - # circular dependency - with self.assertRaises(ConfigurationError): - upgrade_config(config_initial, 6, migrations=[CircularMigration]) - - def test_downgrade_path_fail(self): - """Test failure when no downgrade path is available.""" - config_initial = self.load_config_sample('reference/5.json') - # target higher than initial - with self.assertRaises(ConfigurationError): - downgrade_config(deepcopy(config_initial), 6) - # no migration available - with self.assertRaises(ConfigurationError): - downgrade_config(deepcopy(config_initial), -1) - # circular dependency - with self.assertRaises(ConfigurationError): - downgrade_config(config_initial, 4, migrations=[CircularMigration]) - - def test_check_and_migrate(self): - """Test the full config migration.""" - config_initial = self.load_config_sample('input/0.json') - with patch.object(uuid, 'uuid4', return_value=uuid.UUID(hex='0' * 32)): - config_migrated = check_and_migrate_config(config_initial) - config_reference = self.load_config_sample('reference/final.json') - self.assertEqual(config_migrated, config_reference) - - def test_0_1_migration(self): - """Test the step between config versions 0 and 1.""" - config_initial = self.load_config_sample('input/0.json') - config_reference = self.load_config_sample('reference/1.json') - config_migrated = upgrade_config(config_initial, 1) - self.assertEqual(config_migrated, config_reference) - - def test_1_2_migration(self): - """Test the step between config versions 1 and 2.""" - config_initial = self.load_config_sample('input/1.json') - config_reference = self.load_config_sample('reference/2.json') - with patch.object(uuid, 'uuid4', return_value=uuid.UUID(hex='0' * 32)): - config_migrated = upgrade_config(config_initial, 2) - self.assertEqual(config_migrated, config_reference) - - def test_2_3_migration(self): - """Test the step between config versions 2 and 3.""" - config_initial = self.load_config_sample('input/2.json') - config_reference = self.load_config_sample('reference/3.json') - config_migrated = upgrade_config(config_initial, 3) - self.assertEqual(config_migrated, config_reference) - - def test_3_4_migration(self): - """Test the step between config versions 3 and 4.""" - config_initial = self.load_config_sample('input/3.json') - config_reference = self.load_config_sample('reference/4.json') - config_migrated = upgrade_config(config_initial, 4) - self.assertEqual(config_migrated, config_reference) - - def test_4_5_migration(self): - """Test the step between config versions 4 and 5.""" - config_initial = self.load_config_sample('input/4.json') - config_reference = self.load_config_sample('reference/5.json') - config_migrated = upgrade_config(config_initial, 5) - self.assertEqual(config_migrated, config_reference) - - def test_5_6_migration(self): - """Test the step between config versions 5 and 6.""" - config_initial = self.load_config_sample('input/5.json') - config_reference = self.load_config_sample('reference/6.json') - config_migrated = upgrade_config(config_initial, 6) - self.assertEqual(config_migrated, config_reference) - - def test_6_5_migration(self): - """Test the step between config versions 6 and 5.""" - config_initial = self.load_config_sample('reference/6.json') - config_reference = self.load_config_sample('input/5.json') - config_migrated = downgrade_config(config_initial, 5) - self.assertEqual(config_migrated, config_reference) - - def test_6_7_migration(self): - """Test the step between config versions 6 and 7.""" - config_initial = self.load_config_sample('input/6.json') - config_reference = self.load_config_sample('reference/7.json') - config_migrated = upgrade_config(config_initial, 7) - self.assertEqual(config_migrated, config_reference) + return _factory + + +def test_upgrade_path_fail(load_config_sample): + """Test failure when no upgrade path is available.""" + config_initial = load_config_sample('reference/5.json') + # target lower than initial + with pytest.raises(ConfigurationError): + upgrade_config(copy.deepcopy(config_initial), 1) + # no migration available + with pytest.raises(ConfigurationError): + upgrade_config(copy.deepcopy(config_initial), 100) + # circular dependency + with pytest.raises(ConfigurationError): + upgrade_config(config_initial, 6, migrations=[CircularMigration]) + + +def test_downgrade_path_fail(load_config_sample): + """Test failure when no downgrade path is available.""" + config_initial = load_config_sample('reference/5.json') + # target higher than initial + with pytest.raises(ConfigurationError): + downgrade_config(copy.deepcopy(config_initial), 6) + # no migration available + with pytest.raises(ConfigurationError): + downgrade_config(copy.deepcopy(config_initial), -1) + # circular dependency + with pytest.raises(ConfigurationError): + downgrade_config(config_initial, 4, migrations=[CircularMigration]) + + +def test_migrate_full(load_config_sample, monkeypatch): + """Test the full config migration.""" + config_initial = load_config_sample('input/0.json') + config_target = load_config_sample('reference/final.json') + + # This change is necessary for the migration to version 2. + monkeypatch.setattr(uuid, 'uuid4', lambda: uuid.UUID(hex='0' * 32)) + + config_migrated = check_and_migrate_config(config_initial) + assert config_migrated == config_target + + +@pytest.mark.parametrize('initial, target', ((m.down_revision, m.up_revision) for m in MIGRATIONS)) +def test_migrate_individual(load_config_sample, initial, target, monkeypatch): + """Test the individual config migrations.""" + config_initial = load_config_sample(f'input/{initial}.json') + config_target = load_config_sample(f'reference/{target}.json') + + if target == 2: + monkeypatch.setattr(uuid, 'uuid4', lambda: uuid.UUID(hex='0' * 32)) + + config_migrated = upgrade_config(config_initial, target) + assert config_migrated == config_target + + +def test_add_test_profile_key_downgrade_profile(empty_config, profile_factory, caplog): + """Test the downgrade of schema version 8. + + Test what happens if a configuration contains a normal profile whose name starts with ``test_``. In this case, it + should automatically rename the profile by removing the prefix, unless that name already exists, in which case an + exception should be raised. + """ + config = empty_config + profile = profile_factory('test_profile') + profile.is_test_profile = False + config.add_profile(profile) + + config_migrated = downgrade_config(config.dictionary, 7) + assert list(config_migrated['profiles'].keys()) == ['profile'] + assert 'profile `test_profile` is not a test profile but starts with' in caplog.records[0].message + assert 'changing profile name from `test_profile` to `profile`.' in caplog.records[1].message + + profile = profile_factory('profile') + config.add_profile(profile) + + with pytest.raises(ConfigurationError, match=r'cannot change `.*` to `.*` because it already exists.'): + downgrade_config(config.dictionary, 7) + + +def test_add_test_profile_key_downgrade_test_profile(empty_config, profile_factory, caplog): + """Test the downgrade of schema version 8. + + Some special care needs to be taken in the downgrade in case the profile name does not have the correct heuristics + with respect to its ``test_profile`` value, as the profile name is interpreted in schema version 7 and lower to + determine whether a profile is a test profile or not. + """ + config = empty_config + profile = profile_factory('profile') + profile.is_test_profile = True + config.add_profile(profile) + + config_migrated = downgrade_config(config.dictionary, 7) + assert list(config_migrated['profiles'].keys()) == ['test_profile'] + assert 'profile `profile` is a test profile but does not start with' in caplog.records[0].message + assert 'changing profile name from `profile` to `test_profile`.' in caplog.records[1].message + + profile = profile_factory('test_profile') + config.add_profile(profile) + + with pytest.raises(ConfigurationError, match=r'cannot change `.*` to `.*` because it already exists.'): + downgrade_config(config.dictionary, 7) diff --git a/tests/manage/configuration/migrations/test_samples/input/7.json b/tests/manage/configuration/migrations/test_samples/input/7.json new file mode 100644 index 0000000000..f8f929f1ce --- /dev/null +++ b/tests/manage/configuration/migrations/test_samples/input/7.json @@ -0,0 +1,34 @@ +{ + "CONFIG_VERSION": { "CURRENT": 7, "OLDEST_COMPATIBLE": 7 }, + "default_profile": "default", + "profiles": { + "default": { + "PROFILE_UUID": "00000000000000000000000000000000", + "default_user_email": "email@aiida.net", + "storage": { + "_v6_backend": "django", + "backend": "psql_dos", + "config": { + "database_engine": "postgresql_psycopg2", + "database_password": "some_random_password", + "database_name": "aiidadb_qs_some_user", + "database_hostname": "localhost", + "database_port": "5432", + "database_username": "aiida_qs_greschd", + "repository_uri": "file:////home/some_user/.aiida/repository-quicksetup/" + } + }, + "process_control": { + "backend": "rabbitmq", + "config": { + "broker_protocol": "amqp", + "broker_username": "guest", + "broker_password": "guest", + "broker_host": "127.0.0.1", + "broker_port": 5672, + "broker_virtual_host": "" + } + } + } + } +} diff --git a/tests/manage/configuration/migrations/test_samples/reference/8.json b/tests/manage/configuration/migrations/test_samples/reference/8.json new file mode 100644 index 0000000000..5ca153216a --- /dev/null +++ b/tests/manage/configuration/migrations/test_samples/reference/8.json @@ -0,0 +1,35 @@ +{ + "CONFIG_VERSION": { "CURRENT": 8, "OLDEST_COMPATIBLE": 8 }, + "default_profile": "default", + "profiles": { + "default": { + "default_user_email": "email@aiida.net", + "PROFILE_UUID": "00000000000000000000000000000000", + "storage": { + "backend": "psql_dos", + "config": { + "database_engine": "postgresql_psycopg2", + "database_password": "some_random_password", + "database_name": "aiidadb_qs_some_user", + "database_hostname": "localhost", + "database_port": "5432", + "database_username": "aiida_qs_greschd", + "repository_uri": "file:////home/some_user/.aiida/repository-quicksetup/" + }, + "_v6_backend": "django" + }, + "process_control": { + "backend": "rabbitmq", + "config": { + "broker_protocol": "amqp", + "broker_username": "guest", + "broker_password": "guest", + "broker_host": "127.0.0.1", + "broker_port": 5672, + "broker_virtual_host": "" + } + }, + "test_profile": false + } + } +} diff --git a/tests/manage/configuration/migrations/test_samples/reference/final.json b/tests/manage/configuration/migrations/test_samples/reference/final.json index b493dbf903..5ca153216a 100644 --- a/tests/manage/configuration/migrations/test_samples/reference/final.json +++ b/tests/manage/configuration/migrations/test_samples/reference/final.json @@ -1,5 +1,5 @@ { - "CONFIG_VERSION": { "CURRENT": 7, "OLDEST_COMPATIBLE": 7 }, + "CONFIG_VERSION": { "CURRENT": 8, "OLDEST_COMPATIBLE": 8 }, "default_profile": "default", "profiles": { "default": { @@ -28,7 +28,8 @@ "broker_port": 5672, "broker_virtual_host": "" } - } + }, + "test_profile": false } } } diff --git a/tests/manage/configuration/test_profile.py b/tests/manage/configuration/test_profile.py index d2752edbd4..5ff43a1fee 100644 --- a/tests/manage/configuration/test_profile.py +++ b/tests/manage/configuration/test_profile.py @@ -27,6 +27,7 @@ def init_profile(self): # pylint: disable=unused-argument # pylint: disable=attribute-defined-outside-init self.profile_name = 'test_profile' self.profile_dictionary = { + 'test_profile': True, 'default_user_email': 'dummy@localhost', 'storage': { 'backend': 'psql_dos', @@ -73,9 +74,9 @@ def test_base_properties(self): assert self.profile.uuid in self.profile.rmq_prefix def test_is_test_profile(self): - """Test that a profile whose name starts with `test_` is marked as a test profile.""" - profile_name = 'not_a_test_profile' - profile = create_mock_profile(name=profile_name) + """Test the :meth:`aiida.manage.configuration.profile.is_test_profile` property.""" + profile = create_mock_profile(name='not_test_profile') + profile.is_test_profile = False # The one constructed in the setUpClass should be a test profile assert self.profile.is_test_profile diff --git a/tests/storage/psql_dos/migrations/conftest.py b/tests/storage/psql_dos/migrations/conftest.py index 0650cc9a75..347ccdde74 100644 --- a/tests/storage/psql_dos/migrations/conftest.py +++ b/tests/storage/psql_dos/migrations/conftest.py @@ -76,6 +76,7 @@ def uninitialised_profile(empty_pg_cluster: PGTest, tmp_path): # pylint: disabl yield Profile( 'test_migrate', { + 'test_profile': True, 'storage': { 'backend': 'psql_dos', 'config': {