diff --git a/doc/code_snippets/snippets/migrations/README.md b/doc/code_snippets/snippets/migrations/README.md new file mode 100644 index 0000000000..1f2f58b298 --- /dev/null +++ b/doc/code_snippets/snippets/migrations/README.md @@ -0,0 +1,4 @@ +# Centralized migrations with tt + +Sample applications demonstrating how to use the centralized migration mechanism +for Tarantool EE clusters via the tt utility. Learn more at [Centralized configuration storages](https://www.tarantool.io/en/doc/latest/platform/https://www.tarantool.io/en/doc/latest/platform/ddl_dml/migrations/). diff --git a/doc/code_snippets/snippets/migrations/etcd_setup.sh b/doc/code_snippets/snippets/migrations/etcd_setup.sh new file mode 100644 index 0000000000..31855a5992 --- /dev/null +++ b/doc/code_snippets/snippets/migrations/etcd_setup.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# 1. Remove the 'default.etcd' directory to reset etcd to initial state. +# 2. Start etcd by executing the 'etcd' command. +# 3. Execute this script to enable authentication. + +etcdctl user add root:topsecret +etcdctl role add app_config_manager +etcdctl role grant-permission app_config_manager --prefix=true readwrite /myapp/ +etcdctl user add app_user:config_pass +etcdctl user grant-role app_user app_config_manager +etcdctl auth enable diff --git a/doc/code_snippets/snippets/migrations/instances.enabled/myapp/config.yaml b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/config.yaml new file mode 100644 index 0000000000..2e9d6f9fd4 --- /dev/null +++ b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/config.yaml @@ -0,0 +1,10 @@ +config: + etcd: + endpoints: + - http://localhost:2379 + prefix: /myapp/ + username: app_user + password: config_pass + http: + request: + timeout: 3 diff --git a/doc/code_snippets/snippets/migrations/instances.enabled/myapp/instances-3-storages.yml b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/instances-3-storages.yml new file mode 100644 index 0000000000..ef5d67a472 --- /dev/null +++ b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/instances-3-storages.yml @@ -0,0 +1,7 @@ +router-001-a: +storage-001-a: +storage-001-b: +storage-002-a: +storage-002-b: +storage-003-a: +storage-003-b: diff --git a/doc/code_snippets/snippets/migrations/instances.enabled/myapp/instances.yml b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/instances.yml new file mode 100644 index 0000000000..de6c038972 --- /dev/null +++ b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/instances.yml @@ -0,0 +1,5 @@ +router-001-a: +storage-001-a: +storage-001-b: +storage-002-a: +storage-002-b: diff --git a/doc/code_snippets/snippets/migrations/instances.enabled/myapp/myapp-scm-1.rockspec b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/myapp-scm-1.rockspec new file mode 100644 index 0000000000..a7a4035198 --- /dev/null +++ b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/myapp-scm-1.rockspec @@ -0,0 +1,14 @@ +package = 'myapp' +version = 'scm-1' + +source = { + url = '/dev/null', +} + +dependencies = { + 'crud == 1.5.2', +} + +build = { + type = 'none'; +} diff --git a/doc/code_snippets/snippets/migrations/instances.enabled/myapp/source-3-storages.yaml b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/source-3-storages.yaml new file mode 100644 index 0000000000..61122da382 --- /dev/null +++ b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/source-3-storages.yaml @@ -0,0 +1,88 @@ +credentials: + users: + client: + password: 'secret' + roles: [super] + replicator: + password: 'secret' + roles: [replication] + storage: + password: 'secret' + roles: [sharding] + +iproto: + advertise: + peer: + login: replicator + sharding: + login: storage + +sharding: + bucket_count: 3000 + +groups: + routers: + sharding: + roles: [router] + roles: [roles.crud-router] + replicasets: + router-001: + instances: + router-001-a: + iproto: + listen: + - uri: localhost:3301 + advertise: + client: localhost:3301 + storages: + sharding: + roles: [storage] + roles: [roles.crud-storage] + replication: + failover: manual + replicasets: + storage-001: + leader: storage-001-a + instances: + storage-001-a: + iproto: + listen: + - uri: localhost:3302 + advertise: + client: localhost:3302 + storage-001-b: + iproto: + listen: + - uri: localhost:3303 + advertise: + client: localhost:3303 + storage-002: + leader: storage-002-a + instances: + storage-002-a: + iproto: + listen: + - uri: localhost:3304 + advertise: + client: localhost:3304 + storage-002-b: + iproto: + listen: + - uri: localhost:3305 + advertise: + client: localhost:3305 + storage-003: + leader: storage-003-a + instances: + storage-003-a: + iproto: + listen: + - uri: localhost:3306 + advertise: + client: localhost:3306 + storage-003-b: + iproto: + listen: + - uri: localhost:3307 + advertise: + client: localhost:3307 diff --git a/doc/code_snippets/snippets/migrations/instances.enabled/myapp/source.yaml b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/source.yaml new file mode 100644 index 0000000000..7e49410ab3 --- /dev/null +++ b/doc/code_snippets/snippets/migrations/instances.enabled/myapp/source.yaml @@ -0,0 +1,73 @@ +credentials: + users: + client: + password: 'secret' + roles: [super] + replicator: + password: 'secret' + roles: [replication] + storage: + password: 'secret' + roles: [sharding] + +iproto: + advertise: + peer: + login: replicator + sharding: + login: storage + +sharding: + bucket_count: 3000 + +groups: + routers: + sharding: + roles: [router] + roles: [roles.crud-router] + replicasets: + router-001: + instances: + router-001-a: + iproto: + listen: + - uri: localhost:3301 + advertise: + client: localhost:3301 + storages: + sharding: + roles: [storage] + roles: [roles.crud-storage] + replication: + failover: manual + replicasets: + storage-001: + leader: storage-001-a + instances: + storage-001-a: + iproto: + listen: + - uri: localhost:3302 + advertise: + client: localhost:3302 + storage-001-b: + iproto: + listen: + - uri: localhost:3303 + advertise: + client: localhost:3303 + storage-002: + leader: storage-002-a + instances: + storage-002-a: + iproto: + listen: + - uri: localhost:3304 + advertise: + client: localhost:3304 + storage-002-b: + iproto: + listen: + - uri: localhost:3305 + advertise: + client: localhost:3305 diff --git a/doc/code_snippets/snippets/migrations/migrations/scenario/000001_create_writers_space.lua b/doc/code_snippets/snippets/migrations/migrations/scenario/000001_create_writers_space.lua new file mode 100644 index 0000000000..c8e6fc45ce --- /dev/null +++ b/doc/code_snippets/snippets/migrations/migrations/scenario/000001_create_writers_space.lua @@ -0,0 +1,23 @@ +local helpers = require('tt-migrations.helpers') + +local function apply_scenario() + local space = box.schema.space.create('writers') + + space:format({ + {name = 'id', type = 'number'}, + {name = 'bucket_id', type = 'number'}, + {name = 'name', type = 'string'}, + {name = 'age', type = 'number'}, + }) + + space:create_index('primary', {parts = {'id'}}) + space:create_index('bucket_id', {parts = {'bucket_id'}}) + + helpers.register_sharding_key('writers', {'id'}) +end + +return { + apply = { + scenario = apply_scenario, + }, +} diff --git a/doc/code_snippets/snippets/migrations/migrations/scenario/000002_create_writers_index.lua b/doc/code_snippets/snippets/migrations/migrations/scenario/000002_create_writers_index.lua new file mode 100644 index 0000000000..fdee011a3e --- /dev/null +++ b/doc/code_snippets/snippets/migrations/migrations/scenario/000002_create_writers_index.lua @@ -0,0 +1,11 @@ +local function apply_scenario() + local space = box.space['writers'] + + space:create_index('age', {parts = {'age'}}) +end + +return { + apply = { + scenario = apply_scenario, + }, +} diff --git a/doc/code_snippets/snippets/migrations/migrations/scenario/000003_alter_writers_space.lua b/doc/code_snippets/snippets/migrations/migrations/scenario/000003_alter_writers_space.lua new file mode 100644 index 0000000000..3d53314782 --- /dev/null +++ b/doc/code_snippets/snippets/migrations/migrations/scenario/000003_alter_writers_space.lua @@ -0,0 +1,48 @@ +local function apply_scenario() + local space = box.space['writers'] + local new_format = { + {name = 'id', type = 'number'}, + {name = 'bucket_id', type = 'number'}, + {name = 'first_name', type = 'string'}, + {name = 'last_name', type = 'string'}, + {name = 'age', type = 'number'}, + } + box.space.writers.index.age:drop() + + box.schema.func.create('_writers_split_name', { + language = 'lua', + is_deterministic = true, + body = [[ + function(t) + local name = t[3] + + local split_data = {} + local split_regex = '([^%s]+)' + for v in string.gmatch(name, split_regex) do + table.insert(split_data, v) + end + + local first_name = split_data[1] + assert(first_name ~= nil) + + local last_name = split_data[2] + assert(last_name ~= nil) + + return {t[1], t[2], first_name, last_name, t[4]} + end + ]], + }) + + local future = space:upgrade({ + func = '_writers_split_name', + format = new_format, + }) + + future:wait() +end + +return { + apply = { + scenario = apply_scenario, + }, +} diff --git a/doc/code_snippets/snippets/migrations/tt.yaml b/doc/code_snippets/snippets/migrations/tt.yaml new file mode 100644 index 0000000000..e9cf7000cf --- /dev/null +++ b/doc/code_snippets/snippets/migrations/tt.yaml @@ -0,0 +1,54 @@ +modules: + # Directory where the external modules are stored. + directory: modules + +env: + # Restart instance on failure. + restart_on_failure: false + + # Directory that stores binary files. + bin_dir: bin + + # Directory that stores Tarantool header files. + inc_dir: include + + # Path to directory that stores all applications. + # The directory can also contain symbolic links to applications. + instances_enabled: instances.enabled + + # Tarantoolctl artifacts layout compatibility: if set to true tt will not create application + # sub-directories for control socket, pid files, log files, etc.. Data files (wal, vinyl, + # snap) and multi-instance applications are not affected by this option. + tarantoolctl_layout: false + +app: + # Directory that stores various instance runtime + # artifacts like console socket, PID file, etc. + run_dir: var/run + + # Directory that stores log files. + log_dir: var/log + + # Directory where write-ahead log (.xlog) files are stored. + wal_dir: var/lib + + # Directory where memtx stores snapshot (.snap) files. + memtx_dir: var/lib + + # Directory where vinyl files or subdirectories will be stored. + vinyl_dir: var/lib + +# Path to file with credentials for downloading Tarantool Enterprise Edition. +# credential_path: /path/to/file +ee: + credential_path: + +templates: + # The path to templates search directory. + - path: templates + +repo: + # Directory where local rocks files could be found. + rocks: + # Directory that stores installation files. + distfiles: distfiles diff --git a/doc/platform/ddl_dml/index.rst b/doc/platform/ddl_dml/index.rst index d595172eb4..d629312295 100644 --- a/doc/platform/ddl_dml/index.rst +++ b/doc/platform/ddl_dml/index.rst @@ -32,7 +32,7 @@ This section contains guides on performing data operations in Tarantool. value_store schema_desc operations - migrations + migrations/index read_views sql/index diff --git a/doc/platform/ddl_dml/migrations.rst b/doc/platform/ddl_dml/migrations.rst deleted file mode 100644 index eb4c95736e..0000000000 --- a/doc/platform/ddl_dml/migrations.rst +++ /dev/null @@ -1,154 +0,0 @@ -.. _migrations: - -Migrations -========== - -**Migration** refers to any change in a data schema: adding/removing a field, -creating/dropping an index, changing a field format, etc. - -In Tarantool, there are two types of schema migration -that do not require data migration: - -- adding a field to the end of a space - -- creating an index - -.. note:: - - Check the :ref:`Upgrading space schema ` section. - With the help of ``space:upgrade()``, - you can enable compression and migrate, including already created tuples. - - -Adding a field to the end of a space ------------------------------------- - -You can add a field as follows: - -.. code:: lua - - local users = box.space.users - local fmt = users:format() - - table.insert(fmt, { name = 'age', type = 'number', is_nullable = true }) - users:format(fmt) - -Note that the field must have the ``is_nullable`` parameter. Otherwise, -an error will occur. - -After creating a new field, you probably want to fill it with data. -The `tarantool/moonwalker `_ -module is useful for this task. -The README file describes how to work with this module. - -Creating an index ------------------ - -Index creation is described in the -:doc:`/reference/reference_lua/box_space/create_index` method. - -.. _other-migrations: - -Other types of migrations -------------------------- - -Other types of migrations are also allowed, but it would be more difficult to -maintain data consistency. - -Migrations are possible in two cases: - -- When Tarantool starts, and no client uses the database yet - -- During request processing, when active clients are already using the database - -For the first case, it is enough to write and test the migration code. -The most difficult task is to migrate data when there are active clients. -You should keep it in mind when you initially design the data schema. - -We identify the following problems if there are active clients: - -- Associated data can change atomically. - -- The system should be able to transfer data using both the new schema and the old one. - -- When data is being transferred to a new space, data access should consider - that the data might be in one space or another. - -- Write requests must not interfere with the migration. - A common approach is to write according to the new data schema. - -These issues may or may not be relevant depending on your application and -its availability requirements. - -What you need to know when writing complex migrations ------------------------------------------------------ - -Tarantool has a transaction mechanism. It is useful when writing a migration, -because it allows you to work with the data atomically. But before using -the transaction mechanism, you should explore its limitations. - -For details, see the section about :ref:`transactions `. - -How you can apply migration ---------------------------- - -The migration code is executed on a running Tarantool instance. -Important: no method guarantees you transactional application of migrations -on the whole cluster. - -**Method 1**: include migrations in the application code - -This is quite simple: when you reload the code, the data is migrated at the right moment, -and the database schema is updated. -However, this method may not work for everyone. -You may not be able to restart Tarantool or update the code using the hot-reload mechanism. - -**Method 2**: tarantool/migrations (only for a Tarantool Cartridge cluster) - -This method is described in the README file of the -`tarantool/migrations `_ module. - -.. note:: - - There are also two other methods that we **do not recommend**, - but you may find them useful for one reason or another. - - **Method 3**: the :ref:`tt ` utility - - Connect to the necessary instance using ``tt connect``. - - .. code:: console - - $ tt connect admin:password@localhost:3301 - - - If your migration is written in a Lua file, you can execute it - using ``dofile()``. Call this function and specify the path to the - migration file as the first argument. It looks like this: - - .. code-block:: tarantoolsession - - tarantool> dofile('0001-delete-space.lua') - --- - ... - - - (or) Copy the migration script code, - paste it into the console, and run it. - - You can also connect to the instance and execute the migration script in a single call: - - .. code:: console - - $ tt connect admin:password@localhost:3301 -f 0001-delete-space.lua - - - **Method 4**: applying migration with Ansible - - If you use the `Ansible role `_ - to deploy a Tarantool cluster, you can use ``eval``. - You can find more information about it - `in the Ansible role documentation `_. - -.. toctree:: - :hidden: - - space_upgrade diff --git a/doc/platform/ddl_dml/migrations/basic_migrations_tt.rst b/doc/platform/ddl_dml/migrations/basic_migrations_tt.rst new file mode 100644 index 0000000000..9ae58f6609 --- /dev/null +++ b/doc/platform/ddl_dml/migrations/basic_migrations_tt.rst @@ -0,0 +1,311 @@ +.. _basic_migrations_tt: + +Basic tt migrations tutorial +============================ + +**Example on GitHub:** `migrations `_ + +In this tutorial, you learn to define the cluster data schema using the centralized +migration management mechanism implemented in the Enterprise Edition of the :ref:`tt ` utility. + +.. _basic_migrations_tt_prereq: + +Prerequisites +------------- + +Before starting this tutorial: + +- Download and :ref:`install Tarantool Enterprise SDK `. +- Install `etcd `__. + +.. _basic_migrations_tt_cluster: + +Preparing a cluster +------------------- + +The centralized migration mechanism works with Tarantool EE clusters that: + +- use etcd as a centralized configuration storage +- use the `CRUD `__ module or its Enterprise + version for data distribution + +.. _basic_migrations_tt_cluster_etcd: + +Setting up etcd +~~~~~~~~~~~~~~~ + +First, start up an etcd instance to use as a configuration storage: + +.. code-block:: console + + $ etcd + +etcd runs on the default port 2379. + +Optionally, enable etcd authentication by executing the following script: + +.. code-block:: bash + + #!/usr/bin/env bash + + etcdctl user add root:topsecret + etcdctl role add app_config_manager + etcdctl role grant-permission app_config_manager --prefix=true readwrite /myapp/ + etcdctl user add app_user:config_pass + etcdctl user grant-role app_user app_config_manager + etcdctl auth enable + +It creates an etcd user ``app_user`` with read and write permissions to the ``/myapp`` +prefix, in which the cluster configuration will be stored. The user's password is ``config_pass``. + +.. note:: + + If you don't enable etcd authentication, make ``tt migrations`` calls without + the configuration storage credentials. + +.. _basic_migrations_tt_cluster_create: + +Creating a cluster +~~~~~~~~~~~~~~~~~~ + +#. Initialize a ``tt`` environment: + + .. code-block:: console + + $ tt init + +#. In the ``instances.enabled`` directory, create the ``myapp`` directory. +#. Go to the ``instances.enabled/myapp`` directory and create application files: + + - ``instances.yml``: + + .. literalinclude:: /code_snippets/snippets/migrations/instances.enabled/myapp/instances.yml + :language: yaml + :dedent: + + - ``config.yaml``: + + .. literalinclude:: /code_snippets/snippets/migrations/instances.enabled/myapp/config.yaml + :language: yaml + :dedent: + + - ``myapp-scm-1.rockspec``: + + .. literalinclude:: /code_snippets/snippets/migrations/instances.enabled/myapp/myapp-scm-1.rockspec + :language: none + :dedent: + +4. Create the ``source.yaml`` with a cluster configuration to publish to etcd: + + .. note:: + + This configuration describes a typical CRUD-enabled sharded cluster with + one router and two storage replica sets, each including one master and one read-only replica. + + .. literalinclude:: /code_snippets/snippets/migrations/instances.enabled/myapp/source.yaml + :language: yaml + :dedent: + +#. Publish the configuration to etcd: + + .. code-block:: console + + $ tt cluster publish "http://app_user:config_pass@localhost:2379/myapp/" source.yaml + +The full cluster code is available on GitHub here: `migrations `_. + +.. _basic_migrations_tt_cluster_start: + +Building and starting the cluster +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Build the application: + + .. code-block:: console + + $ tt build myapp + +#. Start the cluster: + + .. code-block:: console + + $ tt start myapp + + To check that the cluster is up and running, use ``tt status``: + + .. code-block:: console + + $ tt status myapp + +#. Bootstrap vshard in the cluster: + + .. code-block:: console + + $ tt replicaset vshard bootstrap myapp + +.. _basic_migrations_tt_write: + +Writing migrations +------------------ + +To perform migrations in the cluster, write them in Lua and publish to the cluster's +etcd configuration storage. + +Each migration file must return a Lua table with one object named ``apply``. +This object has one field -- ``scenario`` -- that stores the migration function: + +.. code-block:: lua + + local function apply_scenario() + -- migration code + end + + return { + apply = { + scenario = apply_scenario, + }, + } + +The migration unit is a single file: its ``scenario`` is executed as a whole. An error +that happens in any step of the ``scenario`` causes the entire migration to fail. + +Migrations are executed in the lexicographical order. Thus, it's convenient to +use filenames that start with ordered numbers to define the migrations order, for example: + +.. code-block:: text + + 000001_create_space.lua + 000002_create_index.lua + 000003_alter_space.lua + +The default location where ``tt`` searches for migration files is ``/migrations/scenario``. +Create this subdirectory inside the ``tt`` environment. Then, create two migration files: + +- ``000001_create_writers_space.lua``: create a space, define its format, and + create a primary index. + + .. literalinclude:: /code_snippets/snippets/migrations/migrations/scenario/000001_create_writers_space.lua + :language: lua + :dedent: + + .. note:: + + Note the usage of the ``tt-migrations.helpers`` module. + In this example, its function ``register_sharding_key`` is used + to define a sharding key for the space. + +- ``000002_create_writers_index.lua``: add one more index. + + .. literalinclude:: /code_snippets/snippets/migrations/migrations/scenario/000002_create_writers_index.lua + :language: lua + :dedent: + +.. _basic_migrations_tt_publish: + +Publishing migrations +--------------------- + +To publish migrations to the etcd configuration storage, run ``tt migrations publish``: + +.. code-block:: console + + $ tt migrations publish "http://app_user:config_pass@localhost:2379/myapp" + • 000001_create_writes_space.lua: successfully published to key "000001_create_writes_space.lua" + • 000002_create_writers_index.lua: successfully published to key "000002_create_writers_index.lua" + +.. _basic_migrations_tt_apply: + +Applying migrations +------------------- + +To apply published migrations to the cluster, run ``tt migrations apply`` providing +a cluster user's credentials: + +.. code-block:: console + + $ tt migrations apply "http://app_user:config_pass@localhost:2379/myapp" \ + --tarantool-username=client --tarantool-password=secret + +.. important:: + + The cluster user must have enough access privileges to execute the migrations code. + +The output should look as follows: + +.. code-block:: console + + • router-001: + • 000001_create_writes_space.lua: successfully applied + • 000002_create_writers_index.lua: successfully applied + • storage-001: + • 000001_create_writes_space.lua: successfully applied + • 000002_create_writers_index.lua: successfully applied + • storage-002: + • 000001_create_writes_space.lua: successfully applied + • 000002_create_writers_index.lua: successfully applied + +The migrations are applied on all replica set leaders. Read-only replicas +receive the changes from the corresponding replica set leaders. + +Check the migrations status with ``tt migration status``: + +.. code-block:: console + + $ tt migrations status "http://app_user:config_pass@localhost:2379/myapp" \ + --tarantool-username=client --tarantool-password=secret + • migrations centralized storage scenarios: + • 000001_create_writes_space.lua + • 000002_create_writers_index.lua + • migrations apply status on Tarantool cluster: + • router-001: + • 000001_create_writes_space.lua: APPLIED + • 000002_create_writers_index.lua: APPLIED + • storage-001: + • 000001_create_writes_space.lua: APPLIED + • 000002_create_writers_index.lua: APPLIED + • storage-002: + • 000001_create_writes_space.lua: APPLIED + • 000002_create_writers_index.lua: APPLIED + +To make sure that the space and indexes are created in the cluster, connect to the router +instance and retrieve the space information: + +.. code-block:: console + + $ tt connect myapp:router-001-a + +.. code-block:: tarantoolsession + + myapp:router-001-a> require('crud').schema('writers') + --- + - indexes: + 0: + unique: true + parts: + - fieldno: 1 + type: number + exclude_null: false + is_nullable: false + id: 0 + type: TREE + name: primary + 2: + unique: true + parts: + - fieldno: 4 + type: number + exclude_null: false + is_nullable: false + id: 2 + type: TREE + name: age + format: [{'name': 'id', 'type': 'number'}, {'type': 'number', 'name': 'bucket_id', + 'is_nullable': true}, {'name': 'name', 'type': 'string'}, {'name': 'age', 'type': 'number'}] + ... + +.. _basic_migrations_tt_next: + +Next steps +---------- + +Learn to write and perform data migration in :ref:`upgrade_migrations_tt`. \ No newline at end of file diff --git a/doc/platform/ddl_dml/migrations/centralized_migrations_tt.rst b/doc/platform/ddl_dml/migrations/centralized_migrations_tt.rst new file mode 100644 index 0000000000..db59261a7e --- /dev/null +++ b/doc/platform/ddl_dml/migrations/centralized_migrations_tt.rst @@ -0,0 +1,25 @@ +.. _centralized_migrations_tt: + +Centralized migrations with tt +============================== + +**Example on GitHub:** `migrations `_ + +In this section, you learn to use the centralized migration management mechanism +implemented in the Enterprise Edition of the :ref:`tt ` utility. + +The section includes the following tutorials: + +.. toctree:: + :maxdepth: 1 + + basic_migrations_tt + upgrade_migrations_tt + extend_migrations_tt + troubleshoot_migrations_tt + + +See also: + +- :ref:`tt migrations reference ` for the full list of command-line options. +- :ref:`tcm_cluster_migrations` to learn about managing migrations from |tcm_full_name|. \ No newline at end of file diff --git a/doc/platform/ddl_dml/migrations/extend_migrations_tt.rst b/doc/platform/ddl_dml/migrations/extend_migrations_tt.rst new file mode 100644 index 0000000000..d82d9e8148 --- /dev/null +++ b/doc/platform/ddl_dml/migrations/extend_migrations_tt.rst @@ -0,0 +1,103 @@ +.. _extend_migrations_tt: + +Extending the cluster +===================== + +**Example on GitHub:** `migrations `_ + +In this tutorial, you learn how to consistently define the data schema on newly +added cluster instances using the centralized migration management mechanism. + +.. _extend_migrations_tt_prereq: + +Prerequisites +------------- + +Before starting this tutorial, complete the :ref:`basic_migrations_tt` and :ref:`upgrade_migrations_tt`. +As a result, you have a sharded Tarantool EE cluster that uses an etcd-based configuration +storage. The cluster has a space with two indexes. + +.. _extend_migrations_tt_cluster: + +Extending the cluster +--------------------- + +Having all migrations in a centralized etcd storage, you can extend the cluster +and consistently define the data schema on new instances on the fly. + +Add one more storage replica set to the cluster. To do this, edit the cluster files in ``instances.enabled/myapp``: + +- ``instances.yml``: add the lines below to the end. + + .. literalinclude:: /code_snippets/snippets/migrations/instances.enabled/myapp/instances-3-storages.yml + :language: yaml + :start-at: storage-003-a: + :dedent: + +- ``source.yaml``: add the lines below to the end. + + .. literalinclude:: /code_snippets/snippets/migrations/instances.enabled/myapp/source-3-storages.yaml + :language: yaml + :start-at: storage-003: + :dedent: + +Publish the new cluster configuration to etcd: + +.. code-block:: console + + $ tt cluster publish "http://app_user:config_pass@localhost:2379/myapp/" source.yaml + +Run ``tt start`` to start up the new instances: + +.. code-block:: console + + $ tt start myapp + • The instance myapp:router-001-a (PID = 61631) is already running. + • The instance myapp:storage-001-a (PID = 61632) is already running. + • The instance myapp:storage-001-b (PID = 61634) is already running. + • The instance myapp:storage-002-a (PID = 61639) is already running. + • The instance myapp:storage-002-b (PID = 61640) is already running. + • Starting an instance [myapp:storage-003-a]... + • Starting an instance [myapp:storage-003-b]... + +Now the cluster contains three storage replica sets. + + +.. _extend_migrations_tt_apply: + +Applying migrations to the new replica set +------------------------------------------ + +The new replica set -- ``storage-003``-- is just started and has no data schema yet. +Apply all stored migrations to the cluster to load the same data schema to the new replica set: + +.. code-block:: console + + $ tt migrations apply "http://app_user:config_pass@localhost:2379/myapp" \ + --tarantool-username=client --tarantool-password=secret + --replicaset=storage-003 + +.. note:: + + You can also apply migrations without specifying the replica set. All published + migrations are already applied on other replica sets, so ``tt`` skips the + operation on them. + + .. code-block:: console + + $ tt migrations apply "http://app_user:config_pass@localhost:2379/myapp" \ + --tarantool-username=client --tarantool-password=secret + +To make sure that the space exists on the new instances, connect to ``storage-003-a`` +and check ``box.space.writers``: + +.. code-block:: console + + $ tt connect myapp:storage-003-a + +.. code-block:: tarantoolsession + + myapp:storage-003-a> box.space.writers ~= nil + --- + - true + ... diff --git a/doc/platform/ddl_dml/images/ddl-state.png b/doc/platform/ddl_dml/migrations/images/ddl-state.png similarity index 100% rename from doc/platform/ddl_dml/images/ddl-state.png rename to doc/platform/ddl_dml/migrations/images/ddl-state.png diff --git a/doc/platform/ddl_dml/migrations/index.rst b/doc/platform/ddl_dml/migrations/index.rst new file mode 100644 index 0000000000..ffdae15d40 --- /dev/null +++ b/doc/platform/ddl_dml/migrations/index.rst @@ -0,0 +1,165 @@ +.. _migrations: + +Migrations +========== + +**Migration** refers to any change in a data schema: adding or removing a field, +creating or dropping an index, changing a field format, and so on. Space creation +is also a migration. Using migrations, you can track the evolution of your +data schema since its initial state. In Tarantool, migrations are presented as Lua +code that alters the data schema using the built-in Lua API. + +There are two types of migrations: + +- *simple migrations* don't require additional actions on existing data +- *complex migrations* include both schema and data changes + +.. _migrations_simple: + +Simple migrations +----------------- + +There are two types of schema migration that do not require data migration: + +- *Creating an index*. A new index can be created at any time. To learn more about + index creation, see :ref:`concepts-data_model_indexes` and the :ref:`box_space-create_index` reference. +- *Adding a field to the end of a space*. To add a field, update the space format so + that it includes all its fields and also the new field. For example: + + .. code-block:: lua + + local users = box.space.writers + local fmt = users:format() + + table.insert(fmt, { name = 'age', type = 'number', is_nullable = true }) + users:format(fmt) + + The field must have the ``is_nullable`` parameter. Otherwise, an error occurs + if the space contains tuples of old format. + + .. note:: + + After creating a new field, you probably want to fill it with data. + The `tarantool/moonwalker `_ + module is useful for this task. + +.. _migrations_complex: + +Complex migrations +------------------ + +Other types of migrations are more complex and require additional actions to +maintain data consistency. + +Migrations are possible in two cases: + +- When Tarantool starts, and no client uses the database yet +- During request processing, when active clients are already using the database + +For the first case, it is enough to write and test the migration code. +The most difficult task is to migrate data when there are active clients. +You should keep it in mind when you initially design the data schema. + +We identify the following problems if there are active clients: + +- Associated data can change atomically. + +- The system should be able to transfer data using both the new schema and the old one. + +- When data is being transferred to a new space, data access should consider + that the data might be in one space or another. + +- Write requests must not interfere with the migration. + A common approach is to write according to the new data schema. + +These issues may or may not be relevant depending on your application and +its availability requirements. + +Tarantool offers the following features that make migrations easier and safer: + +- *Transaction mechanism*. It is useful when writing a migration, + because it allows you to work with the data atomically. But before using + the transaction mechanism, you should explore its limitations. + For details, see the section about :ref:`transactions `. + +- ``space:upgrade()`` *function* (EE only). With the help of ``space:upgrade()``, + you can enable compression and migrate, including already created tuples. + For details, check the :ref:`Upgrading space schema ` section. + +- *Centralized migration management mechanism* (EE only). Implemented + in the Enterprise version of the :ref:`tt ` utility and in :ref:`tcm`, + this mechanism enables migration execution and tracking in the replication + clusters. For details, see :ref:`migrations_centralized`. + +.. _migrations_apply: + +Applying migrations +------------------- + +The migration code is executed on a running Tarantool instance. +Important: no method guarantees you transactional application of migrations +on the whole cluster. + +**Method 1**: include migrations in the application code + +This is quite simple: when you reload the code, the data is migrated at the right moment, +and the database schema is updated. +However, this method may not work for everyone. +You may not be able to restart Tarantool or update the code using the hot-reload mechanism. + +**Method 2**: the :ref:`tt ` utility + +Connect to the necessary instance using ``tt connect``. + +.. code:: console + + $ tt connect admin:password@localhost:3301 + +- If your migration is written in a Lua file, you can execute it + using ``dofile()``. Call this function and specify the path to the + migration file as the first argument. It looks like this: + + .. code-block:: tarantoolsession + + tarantool> dofile('0001-delete-space.lua') + --- + ... + +- (or) Copy the migration script code, + paste it into the console, and run it. + +You can also connect to the instance and execute the migration script in a single call: + +.. code:: console + + $ tt connect admin:password@localhost:3301 -f 0001-delete-space.lua + +.. _migrations_centralized: + +Centralized migration management +-------------------------------- + +.. admonition:: Enterprise Edition + :class: fact + + Centralized migration management is available in the `Enterprise Edition `_ only. + +Tarantool EE offers a mechanism for centralized migration management in replication +clusters that use etcd as a :ref:`configuration storage `. +The mechanism uses the same etcd storage to store migrations and applies them +across the entire Tarantool cluster. This ensures migration consistency +in the cluster and enables migration history tracking. + +The centralized migration management mechanism is implemented in the Enterprise +version of the :ref:`tt ` utility and in :ref:`tcm`. + +To learn how to manage migrations in Tarantool EE clusters from the command line, +see :ref:`centralized_migrations_tt`. To learn how to use the mechanism from the |tcm| +web interface, see the :ref:`tcm_cluster_migrations` |tcm| documentation page. + +.. toctree:: + :maxdepth: 1 + + centralized_migrations_tt + space_upgrade + diff --git a/doc/platform/ddl_dml/space_upgrade.rst b/doc/platform/ddl_dml/migrations/space_upgrade.rst similarity index 99% rename from doc/platform/ddl_dml/space_upgrade.rst rename to doc/platform/ddl_dml/migrations/space_upgrade.rst index 2776d39db9..13f266a783 100644 --- a/doc/platform/ddl_dml/space_upgrade.rst +++ b/doc/platform/ddl_dml/migrations/space_upgrade.rst @@ -1,4 +1,5 @@ .. _enterprise-space_upgrade: +.. _box_space-upgrade: Upgrading space schema ====================== @@ -20,7 +21,7 @@ If you need to change a data schema, there are several possible cases: To solve the task of migrating the data, you can: -* :ref:`Migrate data ` to a new space manually. +* :ref:`Migrate data ` to a new space manually. * Use the ``space:upgrade()`` feature. diff --git a/doc/platform/ddl_dml/migrations/troubleshoot_migrations_tt.rst b/doc/platform/ddl_dml/migrations/troubleshoot_migrations_tt.rst new file mode 100644 index 0000000000..dd655b9fb9 --- /dev/null +++ b/doc/platform/ddl_dml/migrations/troubleshoot_migrations_tt.rst @@ -0,0 +1,96 @@ +.. _centralized_migrations_tt_troubleshoot: + +Troubleshooting migrations +========================== + +The centralized migrations mechanism allows troubleshooting migration issues using +dedicated ``tt migration`` options. When troubleshooting migrations, remember that +any unfinished or failed migration can bring the data schema into to inconsistency. +Additional steps may be needed to fix this. + +.. warning:: + + The options used for migration troubleshooting can cause migration inconsistency + in the cluster. Use them only for local development and testing purposes. + +.. _centralized_migrations_tt_troubleshoot_publish: + +Incorrect migration published +----------------------------- + +If an incorrect migration was published to etcd but wasn't applied yet, +fix the migration file and publish it again with the ``--overwrite`` option: + +.. code-block:: console + + $ tt migrations publish "http://app_user:config_pass@localhost:2379/myapp" \ + 000001_create_space.lua --overwrite + +If the migration that needs a fix isn't the last in the lexicographical order, +add also ``--ignore-order-violation``: + +.. code-block:: console + + $ tt migrations publish "http://app_user:config_pass@localhost:2379/myapp" \ + 000001_create_space.lua --overwrite --ignore-order-violation + +If a migration was published by mistake and wasn't applied yet, you can delete it +from etcd using ``tt migrations remove``: + +.. code-block:: console + + $ tt migrations remove "http://app_user:config_pass@localhost:2379/myapp" \ + --migration 000003_not_needed.lua + +.. _centralized_migrations_tt_troubleshoot_apply: + +Incorrect migration applied +--------------------------- + +If the migration is already applied, publish the fixed version and apply it with +the ``--force-reapply`` option: + +.. code-block:: console + + $ tt migrations apply "http://app_user:config_pass@localhost:2379/myapp" \ + --tarantool-username=client --tarantool-password=secret \ + --force-reapply + +If execution of the incorrect migration version has failed, you may also need to add +the ``--ignore-preceding-status`` option: + +When you reapply a migration, ``tt`` checks the statuses of preceding migrations +to ensure consistency. To skip this check, add the ``--ignore-preceding-status`` option: + +.. code-block:: console + + $ tt migrations apply "http://app_user:config_pass@localhost:2379/myapp" \ + --tarantool-username=client --tarantool-password=secret \ + --migration=00003_alter_space.lua + --force-reapply --ignore-preceding-status + +.. _centralized_migrations_tt_troubleshoot_stop: + +Migration execution takes too long +---------------------------------- + +To interrupt migration execution on the cluster, use ``tt migrations stop``: + +.. code-block:: console + + $ tt migrations stop "http://app_user:config_pass@localhost:2379/myapp" \ + --tarantool-username=client --tarantool-password=secret + +You can adjust the maximum migration execution time using the ``--execution-timeout`` +option of ``tt migrations apply``: + +.. code-block:: console + + $ tt migrations apply "http://app_user:config_pass@localhost:2379/myapp" \ + --tarantool-username=client --tarantool-password=secret \ + --execution-timeout=60 + +.. note:: + + If a migration timeout is reached, you may need to call ``tt migrations stop`` + to cancel requests that were sent when applying migrations. \ No newline at end of file diff --git a/doc/platform/ddl_dml/migrations/upgrade_migrations_tt.rst b/doc/platform/ddl_dml/migrations/upgrade_migrations_tt.rst new file mode 100644 index 0000000000..234e58e915 --- /dev/null +++ b/doc/platform/ddl_dml/migrations/upgrade_migrations_tt.rst @@ -0,0 +1,152 @@ +.. _upgrade_migrations_tt: + +Data migrations with space.upgrade() +==================================== + +**Example on GitHub:** `migrations `_ + +In this tutorial, you learn to write migrations that include data migration using +the ``space.upgrade()`` function. + +.. _upgrade_migrations_tt_prereq: + +Prerequisites +------------- + +Before starting this tutorial, complete the :ref:`basic_migrations_tt`. +As a result, you have a sharded Tarantool EE cluster that uses an etcd-based configuration +storage. The cluster has a space with two indexes. + +.. _upgrade_migrations_tt_write: + +Writing a complex migration +--------------------------- + +Complex migrations require data migration along with schema migration. Connect to +the router instance and insert some tuples into the space before proceeding to the next steps. + +.. code-block:: console + + $ tt connect myapp:router-001-a + +.. code-block:: tarantoolsession + + myapp:router-001-a> require('crud').insert_object_many('writers', { + {id = 1, name = 'Haruki Murakami', age = 75}, + {id = 2, name = 'Douglas Adams', age = 49}, + {id = 3, name = 'Eiji Mikage', age = 41}, + }, {noreturn = true}) + +The next migration changes the space format incompatibly: instead of one ``name`` +field, the new format includes two fields ``first_name`` and ``last_name``. +To apply this migration, you need to change each tuple's structure preserving the stored +data. The :ref:`space.upgrade ` function helps with this task. + +Create a new file ``000003_alter_writers_space.lua`` in ``/migrations/scenario``. +Prepare its initial structure the same way as in previous migrations: + +.. code-block:: lua + + local function apply_scenario() + -- migration code + end + return { + apply = { + scenario = apply_scenario, + }, + } + +Start the migration function with the new format description: + +.. literalinclude:: /code_snippets/snippets/migrations/migrations/scenario/000003_alter_writers_space.lua + :language: lua + :start-at: local function apply_scenario() + :end-at: box.space.writers.index.age:drop() + :dedent: + +.. note:: + + ``box.space.writers.index.age:drop()`` drops an existing index. This is done + because indexes rely on field numbers and may break during this format change. + If you need the ``age`` field indexed, recreate the index after applying the + new format. + +Next, create a stored function that transforms tuples to fit the new format. +In this case, the function extracts the first and the last name from the ``name`` field +and returns a tuple of the new format: + +.. literalinclude:: /code_snippets/snippets/migrations/migrations/scenario/000003_alter_writers_space.lua + :language: lua + :start-at: box.schema.func.create + :end-before: local future = space:upgrade + :dedent: + +Finally, call ``space:upgrade()`` with the new format and the transformation function +as its arguments. Here is the complete migration code: + +.. literalinclude:: /code_snippets/snippets/migrations/migrations/scenario/000003_alter_writers_space.lua + :language: lua + :dedent: + +Learn more about ``space.upgrade()`` in :ref:`enterprise-space_upgrade`. + +.. _upgrade_migrations_tt_publish: + +Publishing the migration +------------------------ + +Publish the new migration to etcd. + +.. code-block:: console + + $ tt migrations publish "http://app_user:config_pass@localhost:2379/myapp" \ + migrations/scenario/000003_alter_writers_space.lua + +.. note:: + + You can also publish all migrations from the default location ``/migrations/scenario``. + All other migrations stored in this directory are already published, so ``tt`` + skips them. + + .. code-block:: console + + $ tt migrations publish "http://app_user:config_pass@localhost:2379/myapp" + + +.. _upgrade_migrations_tt_apply: + +Applying the migration +---------------------- + +Apply the published migrations: + +.. code-block:: console + + $ tt migrations apply "http://app_user:config_pass@localhost:2379/myapp" \ + --tarantool-username=client --tarantool-password=secret + +Connect to the router instance and check that the space and its tuples have the new format: + +.. code-block:: console + + $ tt connect myapp:router-001-a + +.. code-block:: tarantoolsession + + myapp:router-001-a> require('crud').get('writers', 2) + --- + - rows: [2, 401, 'Douglas', 'Adams', 49] + metadata: [{'name': 'id', 'type': 'number'}, {'name': 'bucket_id', 'type': 'number'}, + {'name': 'first_name', 'type': 'string'}, {'name': 'last_name', 'type': 'string'}, + {'name': 'age', 'type': 'number'}] + - null + ... + + +.. _upgrade_migrations_tt_next: + +Next steps +---------- + +Learn to use migrations for data schema definition on new instances added to the cluster +in :ref:`extend_migrations_tt`. \ No newline at end of file diff --git a/doc/reference/reference_lua/box_space.rst b/doc/reference/reference_lua/box_space.rst index cc51bcba00..5795ec8489 100644 --- a/doc/reference/reference_lua/box_space.rst +++ b/doc/reference/reference_lua/box_space.rst @@ -97,6 +97,9 @@ Below is a list of all ``box.space`` functions and members. * - :doc:`./box_space/update` - Update a tuple + * - :ref:`box_space-upgrade` + - Upgrade the space format and tuples + * - :doc:`./box_space/upsert` - Update a tuple diff --git a/doc/tooling/tt_cli/commands.rst b/doc/tooling/tt_cli/commands.rst index 3c7adf9130..1bc9d09678 100644 --- a/doc/tooling/tt_cli/commands.rst +++ b/doc/tooling/tt_cli/commands.rst @@ -58,6 +58,8 @@ help for the given command. - Print instance logs * - :doc:`logrotate ` - Rotate instance logs + * - :doc:`migrations ` + - Manage migrations * - :doc:`pack ` - Package an application * - :doc:`play ` @@ -112,6 +114,7 @@ help for the given command. kill log logrotate + migrations pack play replicaset diff --git a/doc/tooling/tt_cli/migrations.rst b/doc/tooling/tt_cli/migrations.rst new file mode 100644 index 0000000000..f7a590b3cb --- /dev/null +++ b/doc/tooling/tt_cli/migrations.rst @@ -0,0 +1,486 @@ +.. _tt-migrations: + +Managing centralized migrations +=============================== + +.. admonition:: Enterprise Edition + :class: fact + + This command is supported by the `Enterprise Edition `_ only. + +.. code-block:: console + + $ tt migrations COMMAND [COMMAND_OPTION ...] + +``tt migrations`` manages :ref:`centralized migrations ` +in a Tarantool EE cluster. See :ref:`centralized_migrations_tt` for a detailed guide +on using the centralized migrations mechanism. + +.. important:: + + Only Tarantool EE clusters with etcd centralized configuration storage are supported. + +``COMMAND`` is one of the following: + + +* :ref:`publish ` +* :ref:`apply ` +* :ref:`status ` +* :ref:`stop ` +* :ref:`remove ` + + +.. _tt-migrations-publish: + +publish +------- + +.. code-block:: console + + $ tt migrations publish ETCD_URI [MIGRATIONS_DIR | MIGRATION_FILE] [OPTION ...] + +``tt migrations publish`` sends the migration files to the cluster's centralized +configuration storage for future execution. + +By default, the command sends all files stored in ``migrations/`` inside the current +directory. + +.. code-block:: console + + $ tt migrations publish "https://user:pass@localhost:2379/myapp" + +To select another directory with migration files, provide a path to it as the command +argument: + +.. code-block:: console + + $ tt migrations publish "https://user:pass@localhost:2379/myapp" my_migrations + +To publish a single migration from a file, use its name or path as the command argument: + +.. code-block:: console + + $ tt migrations publish "https://user:pass@localhost:2379/myapp" migrations/000001_create_space.lua + +Optionally, you can provide a key to use as a migration identifier instead of the filename: + +.. code-block:: console + + $ tt migrations publish "https://user:pass@localhost:2379/myapp" file.lua \ + --key=000001_create_space.lua + +When publishing migrations, ``tt`` performs checks for: + +- Syntax errors in migration files. To skip syntax check, add the ``--skip-syntax-check`` option. +- Existence of migrations with same names. To overwrite an existing migration with + the same name, add the ``--overwirte`` option. +- Migration names order. By default, ``tt migrations`` only adds new migrations + to the end of the migrations list ordered lexicographically. For example, if + migrations ``001.lua`` and ``003.lua`` are already published, an attempt to publish + ``002.lua`` will fail. To force publishing migrations disregarding the order, + add the ``--ignore-order-violation`` option. + +.. warning:: + + Using the options that ignore checks when publishing migration may cause + migration inconsistency in the cluster. + + +.. _tt-migrations-apply: + +apply +----- + +.. code-block:: console + + $ tt migrations apply ETCD_URI [OPTION ...] + +``tt migrations apply`` applies :ref:`published ` migrations +to the cluster. It executes all migrations from the cluster's centralized +configuration storage on all its read-write instances (replica set leaders). + +.. code-block:: console + + $ tt migrations apply "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass + +To apply a single published migration, pass its name in the ``--migration`` option: + +.. code-block:: console + + $ tt migrations apply "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass \ + --migration=000001_create_space.lua + +To apply migrations on a single replica set, specify the ``replicaset`` option: + +.. code-block:: console + + $ tt migrations apply "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass \ + --replicaset=storage-001 + +The command also provides options for migration troubleshooting: ``--ignore-order-violation``, +``--force-reapply``, and ``--ignore-preceding-status``. Learn to use them in +:ref:`centralized_migrations_tt_troubleshoot`. + +.. warning:: + + The use of migration troubleshooting options may lead to migration inconsistency + in the cluster. Use them only for local development and testing purposes. + + +.. _tt-migrations-status: + +status +------ + +.. code-block:: console + + $ tt migrations status ETCD_URI [OPTION ...] + +``tt migrations status`` prints the list of migrations published to the centralized +storage and the result of their execution on the cluster instances. + +Possible migration statuses are: + +- ``APPLY_STARTED`` -- the migration execution has started but not completed yet + or has been interrupted with :ref:`tt migrations stop `` +- ``APPLIED`` -- the migration is successfully applied on the instance +- ``FAILED`` -- there were errors during the migration execution on the instance + +To get the list of migrations stored in the given etcd storage and information about +their execution on the cluster, run: + +.. code-block:: console + + $ tt migrations status "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass + +If the cluster uses SSL encryption, add SSL options. Learn more in :ref:`Authentication `. + +Use the ``--migration`` and ``--replicaset`` options to get information about specific +migrations or replica sets: + +.. code-block:: console + + $ tt migrations status "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass \ + --replicaset=storage-001 --migration=000001_create_writers_space.lua + +The ``--display-mode`` option allows to tailor the command output: + +- with ``--display-mode config-storage``, the command prints only the list of migrations + published to the centralized storage. +- with ``--display-mode cluster``, the command prints only the migration statuses + on the cluster instances. + +To find out the results of a migration execution on a specific replica set in the cluster, run: + +.. code-block:: console + + $ tt migrations status "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass \ + --replicaset=storage-001 --display-mode=cluster + + +.. _tt-migrations-stop: + +stop +---- + +.. code-block:: console + + $ tt migrations stop ETCD_URI [OPTION ...] + +``tt migrations stop`` stops the execution of migrations in the cluster. + +.. warning:: + + Calling ``tt migration stop`` may cause migration inconsistency in the cluster. + +To stop the execution of a migration currently running in the cluster: + +.. code-block:: console + + $ tt migrations stop "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass + +``tt migrations stop`` interrupts a single migration. If you call it to interrupt +the process that applies multiple migrations, the ones completed before the call +receive the ``APPLIED`` status. The migration is interrupted by the call remains in +``APPLY_STARTED``. + +.. _tt-migrations-remove: + +remove +------ + +.. code-block:: console + + $ tt migrations remove ETCD_URI [OPTION ...] + +``tt migrations remove`` removes published migrations from the centralized storage. +With additional options, it can also remove the information about the migration execution +on the cluster instances. + +To remove all migrations from a specified centralized storage: + +.. code-block:: console + + $ tt migrations remove "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass + +To remove a specific migration, pass its name in the ``--migration`` option: + +.. code-block:: console + + $ tt migrations remove "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass \ + --migration=000001_create_writers_space.lua + +Before removing migrations, the command checks their :ref:`status ` +on the cluster. To ignore the status and remove migrations anyway, add the +``--force-remove-on=config-storage`` option: + +.. code-block:: console + + $ tt migrations remove "https://user:pass@localhost:2379/myapp" \ + --force-remove-on=config-storage + +.. note:: + + In this case, cluster credentials are not required. + +To remove migration execution information from the cluster (clear the migration status), +use the ``--force-remove-on=cluster`` option: + +.. code-block:: console + + $ tt migrations remove "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass \ + --force-remove-on=cluster + +To clear all migration information from the centralized storage and cluster, +use the ``--force-remove-on=all`` option: + +.. code-block:: console + + $ tt migrations remove "https://user:pass@localhost:2379/myapp" \ + --tarantool-username=admin --tarantool-password=pass \ + --force-remove-on=all + +.. _tt-migrations-auth: + +Authentication +-------------- + +Since ``tt migrations`` operates migrations via a centralizes etcd storage, it +needs credentials to access this storage. There are two ways to pass etcd credentials: + +- command-line options ``--config-storage-username`` and ``--config-storage-password`` +- the etcd URI, for example, ``https://user:pass@localhost:2379/myapp`` + +Credentials specified in the URI have a higher priority. + +For commands that connect to the cluster (that is, all except ``publish``), Tarantool +credentials are also required. The are passed in the ``--tarantool-username`` and +``--tarantool-password`` options. + +If the cluster uses SSL traffic encryption, provide the necessary connection +parameters in the ``--tarantool-ssl*`` options: ``--tarantool-sslcertfile``, +``--tarantool-sslkeyfile``, and other. All options are listed in :ref:`tt-migrations-options`. + +.. _tt-migrations-options: + +Options +------- + +.. option:: --acquire-lock-timeout INT + + **Applicable to:** ``apply`` + + Migrations fiber lock acquire timeout in seconds. Default: 60. + Fiber lock is used to prevent concurrent migrations run + +.. option:: --config-storage-password STRING + + A password for connecting to the centralized migrations storage (etcd). + + See also: :ref:`tt-migrations-auth`. + +.. option:: --config-storage-username STRING + + A username for connecting to the centralized migrations storage (etcd). + + See also: :ref:`tt-migrations-auth`. + +.. option:: --display-mode STRING + + **Applicable to:** ``status`` + + Display only specific information. Possible values: + + - ``config-storage`` -- information about migrations published to the centralized storage. + - ``cluster`` -- information about migration applied on the cluster. + + See also: :ref:`tt-migrations-status`. + +.. option:: --execution-timeout INT + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + A timeout for completing the operation on a single Tarantool instance, in seconds. + Default values: + + - ``3`` for ``remove``, ``status``, and ``stop`` + - ``3600`` for ``apply`` + +.. option:: --force-reapply + + **Applicable to:** ``apply`` + + Apply migrations disregarding their previous status. + + .. warning:: + + Using this option may lead to migrations inconsistency in the cluster. + +.. option:: --force-remove-on STRING + + **Applicable to:** ``remove`` + + Remove migrations disregarding their status. Possible values: + + - ``config-storage``: remove migrations on etcd centralized migrations storage disregarding the cluster apply status. + - ``cluster``: remove migrations status info only on a Tarantool cluster. + - ``all`` to execute both ``config-storage`` and ``cluster`` force removals. + + .. warning:: + + Using this option may lead to migrations inconsistency in the cluster. + +.. option:: --ignore-order-violation + + **Applicable to:** ``apply``, ``publish`` + + Skip migration scenarios order check before publish. + + .. warning:: + + Using this option may lead to migrations inconsistency in the cluster. + +.. option:: --ignore-preceding-status + + **Applicable to:** ``apply`` + + Skip preceding migrations status check on apply. + + .. warning:: + + Using this option may lead to migrations inconsistency in the cluster. + +.. option:: --key STRING + + **Applicable to:** ``publish`` + + Put scenario to ``//migrations/scenario/`` etcd key instead. + Only for single file publish. + +.. option:: --migration STRING + + **Applicable to:** ``apply``, ``remove``, ``status`` + + A migration to apply, remove, or check status. + +.. option:: --overwrite + + **Applicable to:** ``publish`` + + overwrite existing migration storage keys. + + .. warning:: + + Using this option may lead to migrations inconsistency in the cluster. + +.. option:: --replicaset STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + Execute the operation only on the specified replica set. + +.. option:: --skip-syntax-check + + **Applicable to:** ``publish`` + + Skip syntax check before publish. + + .. warning:: + + Using this option may cause further ``tt migrations`` calls to fail. + +.. option:: --tarantool-auth STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + Authentication type used to connect to the cluster instances. + +.. option:: --tarantool-connect-timeout INT + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + Tarantool cluster instances connection timeout, in seconds. Default: 3. + +.. option:: --tarantool-password STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + A password used to connect to the cluster instances. + +.. option:: --tarantool-sslcafile STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + SSL CA file used to connect to the cluster instances. + +.. option:: --tarantool-sslcertfile STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + SSL cert file used to connect to the cluster instances. + +.. option:: --tarantool-sslciphers STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + Colon-separated list of SSL ciphers used to connect to the cluster instances. + +.. option:: --tarantool-sslkeyfile STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + SSL key file used to connect to the cluster instances. + +.. option:: --tarantool-sslpassword STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + SSL key file password used to connect to the cluster instances. + +.. option:: --tarantool-sslpasswordfile STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + File with list of password to SSL key file used to connect to the cluster instances. + +.. option:: --tarantool-use-ssl + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + Whether SSL is used to connect to the cluster instances. + +.. option:: --tarantool-username STRING + + **Applicable to:** ``apply``, ``remove``, ``status``, ``stop`` + + A username for connecting to the Tarantool cluster instances.