diff --git a/changelog/51986.fixed b/changelog/51986.fixed new file mode 100644 index 000000000000..2ac8623e8594 --- /dev/null +++ b/changelog/51986.fixed @@ -0,0 +1 @@ +Fixed Salt master does not renew token diff --git a/changelog/57561.fixed b/changelog/57561.fixed new file mode 100644 index 000000000000..57ca72619ee6 --- /dev/null +++ b/changelog/57561.fixed @@ -0,0 +1 @@ +Fixed vault module fetching more than one secret in one run with single-use tokens diff --git a/changelog/58174.fixed b/changelog/58174.fixed new file mode 100644 index 000000000000..e20599211094 --- /dev/null +++ b/changelog/58174.fixed @@ -0,0 +1 @@ +Fixed Vault verify option to work on minions when only specified in master config diff --git a/changelog/58580.fixed b/changelog/58580.fixed new file mode 100644 index 000000000000..b86d0ac8d4b1 --- /dev/null +++ b/changelog/58580.fixed @@ -0,0 +1 @@ +Fixed vault command errors configured locally diff --git a/changelog/60779.fixed b/changelog/60779.fixed new file mode 100644 index 000000000000..597117d1f18e --- /dev/null +++ b/changelog/60779.fixed @@ -0,0 +1 @@ +Fixed sdb.get_or_set_hash with Vault single-use tokens diff --git a/changelog/62380.fixed b/changelog/62380.fixed new file mode 100644 index 000000000000..839ec661d1a5 --- /dev/null +++ b/changelog/62380.fixed @@ -0,0 +1 @@ +Fixed Vault session storage to allow unlimited use tokens diff --git a/changelog/62823.added b/changelog/62823.added new file mode 100644 index 000000000000..cdce46c5b4e8 --- /dev/null +++ b/changelog/62823.added @@ -0,0 +1 @@ +Added Vault AppRole and identity issuance to minions diff --git a/changelog/62825.added b/changelog/62825.added new file mode 100644 index 000000000000..8935d16d2373 --- /dev/null +++ b/changelog/62825.added @@ -0,0 +1 @@ +Added Vault AppRole auth mount path configuration option diff --git a/changelog/62828.added b/changelog/62828.added new file mode 100644 index 000000000000..d848300f676b --- /dev/null +++ b/changelog/62828.added @@ -0,0 +1 @@ +Added distribution of Vault authentication details via response wrapping diff --git a/changelog/63406.added b/changelog/63406.added new file mode 100644 index 000000000000..25e0a5341df9 --- /dev/null +++ b/changelog/63406.added @@ -0,0 +1 @@ +Added Vault token lifecycle management diff --git a/changelog/63440.added b/changelog/63440.added new file mode 100644 index 000000000000..a3fdd865d755 --- /dev/null +++ b/changelog/63440.added @@ -0,0 +1 @@ +Added Vault lease management utility diff --git a/doc/_ext/vaultpolicylexer.py b/doc/_ext/vaultpolicylexer.py new file mode 100644 index 000000000000..4e4acb0c2ac9 --- /dev/null +++ b/doc/_ext/vaultpolicylexer.py @@ -0,0 +1,25 @@ +from pygments.lexer import bygroups, inherit +from pygments.lexers.configs import TerraformLexer +from pygments.token import Keyword, Name, Punctuation, Whitespace + + +class VaultPolicyLexer(TerraformLexer): + aliases = ["vaultpolicy"] + filenames = ["*.hcl"] + mimetypes = ["application/x-hcl-policy"] + + tokens = { + "basic": [ + inherit, + ( + r"(path)(\s+)(\".*\")(\s+)(\{)", + bygroups( + Keyword.Reserved, Whitespace, Name.Variable, Whitespace, Punctuation + ), + ), + ], + } + + +def setup(app): + app.add_lexer("vaultpolicy", VaultPolicyLexer) diff --git a/doc/conf.py b/doc/conf.py index bd5f35d11330..ce7d4a3f69fe 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -151,7 +151,8 @@ "sphinx.ext.intersphinx", "httpdomain", "youtube", - "saltrepo" + "saltrepo", + "vaultpolicylexer", #'saltautodoc', # Must be AFTER autodoc #'shorturls', ] diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index cbd8b0cdc52f..7083ade7ff9e 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -493,6 +493,7 @@ execution modules vagrant varnish vault + vault_db vbox_guest vboxmanage vcenter diff --git a/doc/ref/modules/all/salt.modules.vault_db.rst b/doc/ref/modules/all/salt.modules.vault_db.rst new file mode 100644 index 000000000000..66f542ade709 --- /dev/null +++ b/doc/ref/modules/all/salt.modules.vault_db.rst @@ -0,0 +1,5 @@ +salt.modules.vault_db +===================== + +.. automodule:: salt.modules.vault_db + :members: diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst index 13ff645b59fb..c6120ed00a4d 100644 --- a/doc/ref/states/all/index.rst +++ b/doc/ref/states/all/index.rst @@ -319,6 +319,7 @@ state modules user vagrant vault + vault_db vbox_guest victorops virt diff --git a/doc/ref/states/all/salt.states.vault_db.rst b/doc/ref/states/all/salt.states.vault_db.rst new file mode 100644 index 000000000000..59633d601bef --- /dev/null +++ b/doc/ref/states/all/salt.states.vault_db.rst @@ -0,0 +1,5 @@ +salt.states.vault_db +==================== + +.. automodule:: salt.states.vault_db + :members: diff --git a/salt/modules/vault.py b/salt/modules/vault.py index 8abd7bb04d7f..7ef1dc77bba6 100644 --- a/salt/modules/vault.py +++ b/salt/modules/vault.py @@ -1,5 +1,6 @@ """ Functions to interact with Hashicorp Vault. +=========================================== :maintainer: SaltStack :maturity: new @@ -13,299 +14,763 @@ [salt.pillar][CRITICAL][14337] Pillar render error: Failed to load ext_pillar vault: {'error': "request() got an unexpected keyword argument 'json'"} -:configuration: The salt-master must be configured to allow peer-runner - configuration, as well as configuration for the module. - - Add this segment to the master configuration file, or - /etc/salt/master.d/vault.conf: +Configuration +------------- + +In addition to the module configuration, it is required for the Salt master +to be configured to allow peer runs in order to use the Vault integration. + +.. versionchanged:: 3007.0 + + The ``vault`` configuration structure has changed significantly to account + for many new features. If found, the old structure will be automatically + translated to the new one. + + **Please update your peer_run configuration** to take full advantage of the + updated modules. The old endpoint (``vault.generate_token``) will continue + to work, but result in unnecessary roundtrips once your minions have been + updated. + +To allow minions to pull configuration and credentials from the Salt master, +add this segment to the master configuration file: + +.. code-block:: yaml + + peer_run: + .*: + - vault.get_config # always + - vault.generate_new_token # relevant when `token` == `issue:type` + - vault.generate_secret_id # relevant when `approle` == `issue:type` + +Minimally required configuration: + +.. code-block:: yaml + + vault: + auth: + token: abcdefg-hijklmnop-qrstuvw + server: + url: https://vault.example.com:8200 + +A sensible example configuration, e.g. in ``/etc/salt/master.d/vault.conf``: + +.. code-block:: yaml + + vault: + auth: + method: approle + role_id: e5a7b66e-5d08-da9c-7075-71984634b882 + secret_id: 841771dc-11c9-bbc7-bcac-6a3945a69cd9 + cache: + backend: file + issue: + token: + role_name: salt_minion + params: + explicit_max_ttl: 30 + num_uses: 10 + policies: + assign: + - salt_minion + - salt_role_{pillar[roles]} + server: + url: https://vault.example.com:8200 + +The above configuration requires the following policies for the master: + +.. code-block:: vaultpolicy + + # Issue tokens + path "auth/token/create" { + capabilities = ["create", "read", "update"] + } + + # Issue tokens with token roles + path "auth/token/create/*" { + capabilities = ["create", "read", "update"] + } + +A sensible example configuration that issues AppRoles to minions +from a separate authentication endpoint (notice differing mounts): + +.. code-block:: yaml + + vault: + auth: + method: approle + mount: approle # <-- mount the salt master authenticates at + role_id: e5a7b66e-5d08-da9c-7075-71984634b882 + secret_id: 841771dc-11c9-bbc7-bcac-6a3945a69cd9 + cache: + backend: file + issue: + type: approle + approle: + mount: salt-minions # <-- mount the salt master manages + metadata: + entity: + minion-id: '{minion}' + role: '{pillar[role]}' + server: + url: https://vault.example.com:8200 + ext_pillar: + - vault: path=salt/minions/{minion} + - vault: path=salt/roles/{pillar[role]} + +The above configuration requires the following policies for the master: + +.. code-block:: vaultpolicy + + # List existing AppRoles + path "auth/salt-minions/role" { + capabilities = ["list"] + } + + # Manage AppRoles + path "auth/salt-minions/role/*" { + capabilities = ["read", "create", "update", "delete"] + } + + # Lookup mount accessor + path "sys/auth/salt-minions" { + capabilities = ["read", "sudo"] + } + + # Lookup entities by alias name (role-id) and alias mount accessor + path "identity/lookup/entity" { + capabilities = ["create", "update"] + allowed_parameters = { + "alias_name" = [] + "alias_mount_accessor" = ["auth_approle_0a1b2c3d"] + } + } + + # Manage entities with name prefix salt_minion_ + path "identity/entity/name/salt_minion_*" { + capabilities = ["read", "create", "update", "delete"] + } + + # Create entity aliases – you can restrict the mount_accessor + # This might allow privilege escalation in case the salt master + # is compromised and the attacker knows the entity ID of an + # entity with relevant policies attached - although you might + # have other problems at that point. + path "identity/entity-alias" { + capabilities = ["create", "update"] + allowed_parameters = { + "id" = [] + "canonical_id" = [] + "mount_accessor" = ["auth_approle_0a1b2c3d"] + "name" = [] + } + } + +This enables you to write templated ACL policies like: + +.. code-block:: vaultpolicy + + path "salt/data/minions/{{identity.entity.metadata.minion-id}}" { + capabilities = ["read"] + } + + path "salt/data/roles/{{identity.entity.metadata.role}}" { + capabilities = ["read"] + } + + +All possible master configuration options with defaults: + +.. code-block:: yaml + + vault: + auth: + approle_mount: approle + approle_name: salt-master + method: token + role_id: + secret_id: null + token: + token_lifecycle: + minimum_ttl: 10 + renew_increment: null + cache: + backend: session + config: 3600 + kv_metadata: connection + secret: ttl + issue: + allow_minion_override_params: false + type: token + approle: + mount: salt-minions + params: + bind_secret_id: true + secret_id_num_uses: 1 + secret_id_ttl: 60 + token_explicit_max_ttl: 60 + token_num_uses: 10 + secret_id_bound_cidrs: null + token_ttl: null + token_max_ttl: null + token_no_default_policy: false + token_period: null + token_bound_cidrs: null + token: + role_name: null + params: + explicit_max_ttl: null + num_uses: 1 + ttl: null + period: null + no_default_policy: false + renewable: true + wrap: 30s + keys: [] + metadata: + entity: + minion-id: '{minion}' + secret: + saltstack-jid: '{jid}' + saltstack-minion: '{minion}' + saltstack-user: '{user}' + policies: + assign: + - saltstack/minions + - saltstack/{minion} + cache_time: 60 + refresh_pillar: null + server: + url: + namespace: null + verify: null + +``auth`` +~~~~~~~~ +Contains authentication information for the local machine. + +approle_mount + .. versionadded:: 3007.0 + + The name of the AppRole authentication mount point. Defaults to ``approle``. + +approle_name + .. versionadded:: 3007.0 + + The name of the AppRole. Defaults to ``salt-master``. + + .. note:: + + Only relevant when a locally configured role_id/secret_id uses + response wrapping. + +method + Currently only ``token`` and ``approle`` auth types are supported. + Defaults to ``token``. + + Approle is the preferred way to authenticate with Vault as it provides + some advanced options to control the authentication process. + Please see the `Vault documentation `_ + for more information. + +role_id + The role ID of the AppRole. Required if ``auth:method`` == ``approle``. + + .. versionchanged:: 3007.0 + + In addition to a plain string, this can also be specified as a + dictionary that includes ``wrap_info``, i.e. the return payload + of a wrapping request. + +secret_id + The secret ID of the AppRole. + Only required if the configured AppRole requires it. + + .. versionchanged:: 3007.0 + + In addition to a plain string, this can also be specified as a + dictionary that includes ``wrap_info``, i.e. the return payload + of a wrapping request. + +token + Token to authenticate to Vault with. Required if ``auth:method`` == ``token``. + + The token must be able to create tokens with the policies that should be + assigned to minions. + You can still use the token auth via a OS environment variable via this + config example: .. code-block:: yaml vault: + auth: + method: token + token: sdb://osenv/VAULT_TOKEN + server: url: https://vault.service.domain:8200 - verify: /etc/ssl/certs/ca-certificates.crt - role_name: minion_role - namespace: vault_enterprice_namespace - auth: - method: approle - role_id: 11111111-2222-3333-4444-1111111111111 - secret_id: 11111111-1111-1111-1111-1111111111111 - policies: - - saltstack/minions - - saltstack/minion/{minion} - .. more policies - keys: - - n63/TbrQuL3xaIW7ZZpuXj/tIfnK1/MbVxO4vT3wYD2A - - S9OwCvMRhErEA4NVVELYBs6w/Me6+urgUr24xGK44Uy3 - - F1j4b7JKq850NS6Kboiy5laJ0xY8dWJvB3fcwA+SraYl - - 1cYtvjKJNDVam9c7HNqJUfINk4PYyAXIpjkpN/sIuzPv - - 3pPK5X6vGtwLhNOFv1U2elahECz3HpRUfNXJFYLw6lid - - url - Url to your Vault installation. Required. - - verify - For details please see - https://requests.readthedocs.io/en/master/user/advanced/#ssl-cert-verification - - .. versionadded:: 2018.3.0 - - namespaces - Optional Vault Namespace. Used with Vault enterprice - - For detail please see: - https://www.vaultproject.io/docs/enterprise/namespaces - - .. versionadded:: 3004 - role_name - Role name for minion tokens created. If omitted, minion tokens will be - created without any role, thus being able to inherit any master token - policy (including token creation capabilities). Optional. - - For details please see: - https://www.vaultproject.io/api/auth/token/index.html#create-token + osenv: + driver: env + + And then export the VAULT_TOKEN variable in your OS: - Example configuration: - https://www.nomadproject.io/docs/vault-integration/index.html#vault-token-role-configuration - - auth - Currently only token and approle auth types are supported. Required. - - Approle is the preferred way to authenticate with Vault as it provide - some advanced options to control authentication process. - Please visit Vault documentation for more info: - https://www.vaultproject.io/docs/auth/approle.html + .. code-block:: bash - The token must be able to create tokens with the policies that should be - assigned to minions. - You can still use the token auth via a OS environment variable via this - config example: - - .. code-block:: yaml - - vault: - url: https://vault.service.domain:8200 - auth: - method: token - token: sdb://osenv/VAULT_TOKEN - osenv: - driver: env - - And then export the VAULT_TOKEN variable in your OS: - - .. code-block:: bash - - export VAULT_TOKEN=11111111-1111-1111-1111-1111111111111 - - Configuration keys ``uses`` or ``ttl`` may also be specified under ``auth`` - to configure the tokens generated on behalf of minions to be reused for the - defined number of uses or length of time in seconds. These settings may also be configured - on the minion when ``allow_minion_override`` is set to ``True`` in the master - config. - - Defining ``uses`` will cause the salt master to generate a token with that number of uses rather - than a single use token. This multi-use token will be cached on the minion. The type of minion - cache can be specified with ``token_backend: session`` or ``token_backend: disk``. The value of - ``session`` is the default, and will store the vault information in memory only for that session. - The value of ``disk`` will write to an on disk file, and persist between state runs (most - helpful for multi-use tokens). - - .. code-block:: bash - - vault: - auth: - method: token - token: xxxxxx - uses: 10 - ttl: 43200 - allow_minion_override: True - token_backend: disk - - .. versionchanged:: 3001 - - policies - Policies that are assigned to minions when requesting a token. These - can either be static, eg ``saltstack/minions``, or templated with grain - values, eg ``my-policies/{grains[os]}``. ``{minion}`` is shorthand for - ``grains[id]``, eg ``saltstack/minion/{minion}``. + export VAULT_TOKEN=11111111-1111-1111-1111-1111111111111 - .. versionadded:: 3006.0 + .. versionchanged:: 3007.0 - Policies can be templated with pillar values as well: ``salt_role_{pillar[roles]}`` - Make sure to only reference pillars that are not sourced from Vault since the latter - ones might be unavailable during policy rendering. + In addition to a plain string, this can also be specified as a + dictionary that includes ``wrap_info``, i.e. the return payload + of a wrapping request. - .. important:: +token_lifecycle + Token renewal settings. - See :ref:`Is Targeting using Grain Data Secure? - ` for important security information. In short, - everything except ``grains[id]`` is minion-controlled. + .. note:: - If a template contains a grain which evaluates to a list, it will be - expanded into multiple policies. For example, given the template - ``saltstack/by-role/{grains[roles]}``, and a minion having these grains: + This setting can be specified inside a minion's configuration as well + and will override the master's default for the minion. - .. code-block:: yaml + Token lifecycle settings have significancy for any authentication method, + not just ``token``. - grains: - roles: - - web - - database + ``minimum_ttl`` specifies the time (in seconds or as a time string like ``24h``) + an in-use token should be valid for. If the current validity period is less + than this and the token is renewable, a renewal will be attempted. If it is + not renewable or a renewal does not extend the ttl beyond the specified minimum, + a new token will be generated. - The minion will have the policies ``saltstack/by-role/web`` and - ``saltstack/by-role/database``. + .. note:: - .. note:: + Since leases like database credentials are tied to a token, setting this to + a much higher value than the default can be necessary, depending on your + specific use case. - List members which do not have simple string representations, - such as dictionaries or objects, do not work and will - throw an exception. Strings and numbers are examples of - types which work well. + ``renew_increment`` specifies the amount of time the token's validity should + be requested to be renewed for when renewing a token. When unset, will extend + the token's validity by its initial ttl. + Set this to ``false`` to disable token renewals. - Optional. If policies is not configured, ``saltstack/minions`` and - ``saltstack/{minion}`` are used as defaults. + .. note:: - policies_refresh_pillar - Whether to refresh the pillar data when rendering templated policies. - When unset (=null/None), will only refresh when the cached data - is unavailable, boolean values force one behavior always. + The Vault server is allowed to disregard this request. - .. note:: +``cache`` +~~~~~~~~~ +Configures configuration cache on minions and secret cache on all hosts as well +as metadata cache for KV secrets. - Using cached pillar data only (policies_refresh_pillar=False) - might cause the policies to be out of sync. If there is no cached pillar - data available for the minion, pillar templates will fail to render at all. +backend + .. versionchanged:: 3007.0 - If you use pillar values for templating policies and do not disable - refreshing pillar data, make sure the relevant values are not sourced - from Vault (ext_pillar, sdb) or from a pillar sls file that uses the vault - execution module. Although this will often work when cached pillar data is - available, if the master needs to compile the pillar data during policy rendering, - all Vault modules will be broken to prevent an infinite loop. + This used to be found in ``auth:token_backend``. - policies_cache_time - Policy computation can be heavy in case pillar data is used in templated policies and - it has not been cached. Therefore, a short-lived cache specifically for rendered policies - is used. This specifies the expiration timeout in seconds. Defaults to 60. + The cache backend in use. Defaults to ``session``, which will store the + vault information in memory only for that session. + ``disk``/``file``/``localfs`` will force using the localfs driver, regardless + of configured minion data cache. + Setting this to anything else will use the default configured cache for + minion data (:conf_master:`cache `), by default the local filesystem + as well. - keys - List of keys to use to unseal vault server with the vault.unseal runner. +config + .. versionadded:: 3007.0 - config_location - Where to get the connection details for calling vault. By default, - vault will try to determine if it needs to request the connection - details from the master or from the local config. This optional option - will force vault to use the connection details from the master or the - local config. Can only be either ``master`` or ``local``. + The time in seconds to cache queried configuration from the master. + Defaults to ``3600`` (one hour). - .. versionadded:: 3006.0 +kv_metadata + .. versionadded:: 3007.0 - Add this segment to the master configuration file, or - /etc/salt/master.d/peer_run.conf: + The time in seconds to cache KV metadata used to determine if a path + is using version 1/2 for. Defaults to ``connection``, which will clear + the metadata cache once a new configuration is requested from the + master. Setting this to ``None``/``null`` will keep the information + indefinitely until the cache is cleared manually. - .. code-block:: yaml +secret + .. versionadded:: 3007.0 + + The time in seconds to cache tokens/secret IDs for. Defaults to ``ttl``, + which caches the secret for as long as it is valid, unless a new configuration + is requested from the master. + +``issue`` +~~~~~~~~~ +Configures authentication data issued by the master to minions. + +type + .. versionadded:: 3007.0 + + The type of authentication to issue to minions. Can be ``token`` or ``approle``. + Defaults to ``token``. + + To be able to issue AppRoles to minions, the master needs to be able to + create new AppRoles on the configured auth mount (see policy example above). + It is strongly encouraged to create a separate mount dedicated to minions. + +approle + .. versionadded:: 3007.0 + + Configuration regarding issued AppRoles. + + ``mount`` specifies the name of the auth mount the master manages. + Defaults to ``salt-minions``. This mount should be exclusively dedicated + to the Salt master. + + ``params`` configures the AppRole the master creates for minions. See the + `Vault AppRole API docs `_ + for details. If you update these params, you can update the minion AppRoles + manually using the vault runner: ``salt-run vault.sync_approles``, but they + will be updated automatically during a request by a minion as well. + +token + .. versionadded:: 3007.0 + + Configuration regarding issued tokens. + + ``role_name`` specifies the role name for minion tokens created. Optional. + + .. versionchanged:: 3007.0 + + This used to be found in ``role_name``. + + If omitted, minion tokens will be created without any role, thus being able + to inherit any master token policy (including token creation capabilities). + + Example configuration: + https://www.nomadproject.io/docs/vault-integration/index.html#vault-token-role-configuration + + ``params`` configures the tokens the master issues to minions. + + .. versionchanged:: 3007.0 + + This used to be found in ``auth:ttl`` and ``auth:uses``. + + See the `Vault token API docs `_ + for details. To make full use of multi-use tokens, you should configure a cache + that survives a single session. + + .. note:: + + If unset, the master issues single-use tokens to minions, which can be quite expensive. + + +allow_minion_override_params + .. versionchanged:: 3007.0 + + This used to be found in ``auth:allow_minion_override``. + + Whether to allow minions to request to override parameters for issuing credentials, + especially ``ttl`` and ``num_uses``. Defaults to False. + + .. note:: + + Minion override parameters can be specified in the minion configuration + under ``vault:issue_params``. ``ttl`` and ``uses`` always refer to + issued token lifecycle settings. For AppRoles specifically, there + are more parameters, such as ``secret_id_num_uses`` and ``secret_id_ttl``. + ``bind_secret_id`` can not be overridden. + +wrap + .. versionadded:: 3007.0 + + The time a minion has to unwrap a wrapped secret issued by the master. + Set this to false to disable wrapping, otherwise a time string like ``30s`` + can be used. Defaults to ``30s``. + +``keys`` +~~~~~~~~ + List of keys to use to unseal vault server with the ``vault.unseal`` runner. + +``metadata`` +~~~~~~~~~~~~ +.. versionadded:: 3007.0 + +Configures metadata for the issued entities/secrets. Values have to be strings +and can be templated with the following variables: + +- ``{jid}`` Salt job ID that issued the secret. +- ``{minion}`` The minion ID the secret was issued for. +- ``{user}`` The user the Salt daemon issuing the secret was running as. +- ``{pillar[]}`` A minion pillar value that does not depend on Vault. +- ``{grains[]}`` A minion grain value. + +.. note:: + + Values have to be strings, hence templated variables that resolve to lists + will be concatenated to a lexicographically sorted comma-separated list + (Python ``list.sort()``). + +entity + Configures the metadata associated with the minion entity inside Vault. + Entities are only created when issuing AppRoles to minions. + +secret + Configures the metadata associated with issued tokens/secret IDs. They + are logged in plaintext to the Vault audit log. + +``policies`` +~~~~~~~~~~~~ +.. versionchanged:: 3007.0 + + This used to specify the list of policies associated with a minion token only. + The equivalent is found in ``assign``. + +assign + List of policies that are assigned to issued minion authentication data, + either token or AppRole. + + They can be static strings or string templates with + + - ``{minion}`` The minion ID. + - ``{pillar[]}`` A minion pillar value. + - ``{grains[]}`` A minion grain value. + + For pillar and grain values, lists are expanded, so ``salt_role_{pillar[roles]}`` + with ``[a, b]`` results in ``salt_role_a`` and ``salt_role_b`` to be issued. + + Defaults to ``[saltstack/minions, saltstack/{minion}]``. + + .. versionadded:: 3006.0 + + Policies can be templated with pillar values as well: ``salt_role_{pillar[roles]}``. + Make sure to only reference pillars that are not sourced from Vault since the latter + ones might be unavailable during policy rendering. If you use the Vault + integration in one of your pillar ``sls`` files, all values from that file + will be absent during policy rendering, even the ones that do not depend on Vault. + + .. important:: + + See :ref:`Is Targeting using Grain Data Secure? + ` for important security information. In short, + everything except ``grains[id]`` is minion-controlled. + + .. note:: + + List members which do not have simple string representations, + such as dictionaries or objects, do not work and will + throw an exception. Strings and numbers are examples of + types which work well. + +cache_time + .. versionadded:: 3007.0 + + Number of seconds compiled templated policies are cached on the master. + This is important when using pillar values in templates, since compiling + the pillar is an expensive operation. + + .. note:: + + Only effective (and sensible) when issuing tokens to minions. Token policies + need to be compiled every time a token is requested, while AppRole-associated + policies are written to Vault configuration the first time authentication data + is requested (they can be refreshed on demand by running + ``salt-run vault.sync_approles``). + + They will also be refreshed in case other issuance parameters are changed + (such as uses/ttl), either on the master or the minion + (if allow_minion_override_params is True). - peer_run: - .*: - - vault.generate_token +refresh_pillar + .. versionadded:: 3007.0 + + Whether to refresh the minion pillar when compiling templated policies + that contain pillar variables. + Only effective when issuing tokens to minions (see note on cache_time above). + + - ``null`` (default) only compiles the pillar when no cached pillar is found. + - ``false`` never compiles the pillar. This means templated policies that + contain pillar values are skipped if no cached pillar is found. + - ``true`` always compiles the pillar. This can cause additional strain + on the master since the compilation is costly. + + .. note:: + + Hardcoded to True when issuing AppRoles. + + Using cached pillar data only (refresh_pillar=False) might cause the policies + to be out of sync. If there is no cached pillar data available for the minion, + pillar templates will fail to render at all. + + If you use pillar values for templating policies and do not disable + refreshing pillar data, make sure the relevant values are not sourced + from Vault (ext_pillar, sdb) or from a pillar sls file that uses the vault + execution/sdb module. Although this will often work when cached pillar data is + available, if the master needs to compile the pillar data during policy rendering, + all Vault modules will be broken to prevent an infinite loop. + +``server`` +~~~~~~~~~~ +.. versionchanged:: 3007.0 + + The values found in here were found in the ``vault`` root namespace previously. + +Configures Vault server details. + +url + URL of your Vault installation. Required. + +verify + Configures certificate verification behavior when issuing requests to the + Vault server. If unset, requests will use the system default CA bundle. + + For details, please see `the requests documentation `_. + + .. versionadded:: 2018.3.0 + + .. versionchanged:: 3007.0 + + Minions again respect the master configuration value, which was changed + implicitly in v3001. If this value is set in the minion configuration + as well, it will take precedence. + +namespace + Optional Vault namespace. Used with Vault Enterprise. + + For details please see: + https://www.vaultproject.io/docs/enterprise/namespaces + + .. versionadded:: 3004 + + +Minion configuration (optional): + +``config_location`` +~~~~~~~~~~~~~~~~~~~ + Where to get the connection details for calling vault. By default, + vault will try to determine if it needs to request the connection + details from the master or from the local config. This optional option + will force vault to use the connection details from the master or the + local config. Can only be either ``master`` or ``local``. + + .. versionadded:: 3006.0 + +``issue_params`` +~~~~~~~~~~~~~~~~ + Request overrides for token/AppRole issuance. This needs to be allowed + on the master by setting ``issue:allow_minion_override_params`` to true. + See the master configuration ``issue:token:params`` or ``issue:approle:params`` + for reference. + + .. versionchanged:: 3007.0 + + For token issuance, this used to be found in ``auth:ttl`` and ``auth:uses``. + +.. note:: + + ``auth:token_lifecycle`` and ``server:verify`` can be set on the minion as well. .. _vault-setup: """ import logging -import os +import salt.utils.vault as vault from salt.defaults import NOT_SET -from salt.exceptions import CommandExecutionError +from salt.exceptions import CommandExecutionError, SaltException, SaltInvocationError log = logging.getLogger(__name__) def read_secret(path, key=None, metadata=False, default=NOT_SET): """ + Return the value of at in vault, or entire secret. + .. versionchanged:: 3001 The ``default`` argument has been added. When the path or path/key combination is not found, an exception will be raised, unless a default is provided. - Return the value of key at path in vault, or entire secret + CLI Example: - :param metadata: Optional - If using KV v2 backend, display full results, including metadata + .. code-block:: bash - .. versionadded:: 3001 + salt '*' vault.read_secret salt/kv/secret + + Required policy: + + .. code-block:: vaultpolicy + + path "/" { + capabilities = ["read"] + } - Jinja Example: + # or KV v2 + path "/data/" { + capabilities = ["read"] + } - .. code-block:: jinja + path + The path to the secret, including mount. - my-secret: {{ salt['vault'].read_secret('secret/my/secret', 'some-key') }} + key + The data field at to read. If unspecified, returns the + whole dataset. - {{ salt['vault'].read_secret('/secret/my/secret', 'some-key', metadata=True)['data'] }} + metadata + .. versionadded:: 3001 + + If using KV v2 backend, display full results, including metadata. + Defaults to False. - .. code-block:: jinja + default + .. versionadded:: 3001 - {% set supersecret = salt['vault'].read_secret('secret/my/secret') %} - secrets: - first: {{ supersecret.first }} - second: {{ supersecret.second }} + When the path or path/key combination is not found, an exception will + be raised, unless a default is provided here. """ if default == NOT_SET: default = CommandExecutionError - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - log.debug("Reading Vault secret for %s at %s", __grains__["id"], path) + if key is not None: + metadata = False + log.debug("Reading Vault secret for %s at %s", __grains__.get("id"), path) try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("GET", url) - if response.status_code != 200: - response.raise_for_status() - data = response.json()["data"] - - # Return data of subkey if requested + data = vault.read_kv(path, __opts__, __context__, include_metadata=metadata) if key is not None: - if version2["v2"]: - return data["data"][key] - else: - return data[key] - # Just return data from KV V2 if metadata isn't needed - if version2["v2"]: - if not metadata: - return data["data"] - + return data[key] return data except Exception as err: # pylint: disable=broad-except if default is CommandExecutionError: raise CommandExecutionError( "Failed to read secret! {}: {}".format(type(err).__name__, err) - ) + ) from err return default def write_secret(path, **kwargs): """ - Set secret at the path in vault. The vault policy used must allow this. + Set secret dataset at . The vault policy used must allow this. + Fields are specified as arbitrary keyword arguments. CLI Example: .. code-block:: bash salt '*' vault.write_secret "secret/my/secret" user="foo" password="bar" + + Required policy: + + .. code-block:: vaultpolicy + + path "/" { + capabilities = ["create", "update"] + } + + # or KV v2 + path "/data/" { + capabilities = ["create", "update"] + } + + path + The path to the secret, including mount. """ - log.debug("Writing vault secrets for %s at %s", __grains__["id"], path) + log.debug("Writing vault secrets for %s at %s", __grains__.get("id"), path) data = {x: y for x, y in kwargs.items() if not x.startswith("__")} - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - data = {"data": data} try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("POST", url, json=data) - if response.status_code == 200: - return response.json()["data"] - elif response.status_code != 204: - response.raise_for_status() - return True + res = vault.write_kv(path, data, __opts__, __context__) + if isinstance(res, dict): + return res["data"] + return res except Exception as err: # pylint: disable=broad-except log.error("Failed to write secret! %s: %s", type(err).__name__, err) return False @@ -313,52 +778,115 @@ def write_secret(path, **kwargs): def write_raw(path, raw): """ - Set raw data at the path in vault. The vault policy used must allow this. + Set raw data at . The vault policy used must allow this. CLI Example: .. code-block:: bash salt '*' vault.write_raw "secret/my/secret" '{"user":"foo","password": "bar"}' + + Required policy: see write_secret + + path + The path to the secret, including mount. + + raw + Secret data to write to . Has to be a mapping. """ - log.debug("Writing vault secrets for %s at %s", __grains__["id"], path) - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - raw = {"data": raw} + log.debug("Writing vault secrets for %s at %s", __grains__.get("id"), path) try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("POST", url, json=raw) - if response.status_code == 200: - return response.json()["data"] - elif response.status_code != 204: - response.raise_for_status() - return True + res = vault.write_kv(path, raw, __opts__, __context__) + if isinstance(res, dict): + return res["data"] + return res except Exception as err: # pylint: disable=broad-except log.error("Failed to write secret! %s: %s", type(err).__name__, err) return False -def delete_secret(path): +def patch_secret(path, **kwargs): + """ + Patch secret dataset at . Fields are specified as arbitrary keyword arguments. + Requires KV v2 and "patch" capability. + + .. note:: + + This uses JSON Merge Patch format internally. + Keys set to ``null`` (JSON/YAML)/``None`` (Python) will be deleted. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.patch_secret "secret/my/secret" password="baz" + + Required policy: + + .. code-block:: vaultpolicy + + path "/data/" { + capabilities = ["patch"] + } + + path + The path to the secret, including mount. + """ + # TODO: patch can be emulated as read, local update and write + # -> catch VaultPermissionDeniedError and try that way + log.debug("Patching vault secrets for %s at %s", __grains__.get("id"), path) + data = {x: y for x, y in kwargs.items() if not x.startswith("__")} + try: + res = vault.patch_kv(path, data, __opts__, __context__) + if isinstance(res, dict): + return res["data"] + return res + except Exception as err: # pylint: disable=broad-except + log.error("Failed to patch secret! %s: %s", type(err).__name__, err) + return False + + +def delete_secret(path, *args): """ - Delete secret at the path in vault. The vault policy used must allow this. + Delete secret at . The vault policy used must allow this. + If is on KV v2, the secret will be soft-deleted. CLI Example: .. code-block:: bash salt '*' vault.delete_secret "secret/my/secret" + salt '*' vault.delete_secret "secret/my/secret" 1 2 3 + + Required policy: + + .. code-block:: vaultpolicy + + path "/" { + capabilities = ["delete"] + } + + # or KV v2 + path "/data/" { + capabilities = ["delete"] + } + + # KV v2 versions + path "/delete/" { + capabilities = ["update"] + } + + path + The path to the secret, including mount. + + .. versionadded:: 3007.0 + + For KV v2, you can specify versions to soft-delete as supplemental + positional arguments. """ - log.debug("Deleting vault secrets for %s in %s", __grains__["id"], path) - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] + log.debug("Deleting vault secrets for %s in %s", __grains__.get("id"), path) try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("DELETE", url) - if response.status_code != 204: - response.raise_for_status() - return True + return vault.delete_kv(path, __opts__, __context__, versions=list(args) or None) except Exception as err: # pylint: disable=broad-except log.error("Failed to delete secret! %s: %s", type(err).__name__, err) return False @@ -368,88 +896,286 @@ def destroy_secret(path, *args): """ .. versionadded:: 3001 - Destroy specified secret version at the path in vault. The vault policy - used must allow this. Only supported on Vault KV version 2 + Destroy specified secret versions . The vault policy + used must allow this. Only supported on Vault KV version 2. CLI Example: .. code-block:: bash salt '*' vault.destroy_secret "secret/my/secret" 1 2 + + Required policy: + + .. code-block:: vaultpolicy + + path "/destroy/" { + capabilities = ["update"] + } + + path + The path to the secret, including mount. + + You can specify versions to destroy as supplemental positional arguments. + At least one is required. """ - log.debug("Destroying vault secrets for %s in %s", __grains__["id"], path) - data = {"versions": list(args)} - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["destroy"] - else: - log.error("Destroy operation is only supported on KV version 2") - return False + if not args: + raise SaltInvocationError("Need at least one version to destroy.") + log.debug("Destroying vault secrets for %s in %s", __grains__.get("id"), path) try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("POST", url, json=data) - if response.status_code != 204: - response.raise_for_status() - return True + return vault.destroy_kv(path, list(args), __opts__, __context__) except Exception as err: # pylint: disable=broad-except - log.error("Failed to delete secret! %s: %s", type(err).__name__, err) + log.error("Failed to destroy secret! %s: %s", type(err).__name__, err) return False -def list_secrets(path, default=NOT_SET): +def list_secrets(path, default=NOT_SET, keys_only=False): """ + List secret keys at . The vault policy used must allow this. + The path should end with a trailing slash. + .. versionchanged:: 3001 The ``default`` argument has been added. When the path or path/key combination is not found, an exception will be raised, unless a default is provided. - List secret keys at the path in vault. The vault policy used must allow this. - The path should end with a trailing slash. - CLI Example: .. code-block:: bash - salt '*' vault.list_secrets "secret/my/" + salt '*' vault.list_secrets "secret/my/" + + Required policy: + + .. code-block:: vaultpolicy + + path "/" { + capabilities = ["list"] + } + + # or KV v2 + path "/metadata/" { + capabilities = ["list"] + } + + path + The path to the secret, including mount. + + default + .. versionadded:: 3001 + + When the path is not found, an exception will be raised, unless a default + is provided here. + + keys_only + .. versionadded:: 3007.0 + + This function used to return a dictionary like ``{"keys": ["some/", "some/key"]}``. + Setting this to True will only return the list of keys. + For backwards-compatibility reasons, this defaults to False. """ if default == NOT_SET: default = CommandExecutionError - log.debug("Listing vault secret keys for %s in %s", __grains__["id"], path) - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["metadata"] + log.debug("Listing vault secret keys for %s in %s", __grains__.get("id"), path) try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("LIST", url) - if response.status_code != 200: - response.raise_for_status() - return response.json()["data"] + keys = vault.list_kv(path, __opts__, __context__) + if keys_only: + return keys + # this is the way Salt behaved previously + return {"keys": keys} except Exception as err: # pylint: disable=broad-except if default is CommandExecutionError: raise CommandExecutionError( "Failed to list secrets! {}: {}".format(type(err).__name__, err) - ) + ) from err return default -def clear_token_cache(): +def clear_token_cache(connection_only=True): """ .. versionchanged:: 3001 - Delete minion Vault token cache file + Delete minion Vault token cache. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.clear_token_cache + + connection_only + .. versionadded:: 3007.0 + + Only delete cache data scoped to a connection configuration. + This includes config and secret cache always and KV metadata + cache, depending on if ``vault:cache:kv_metadata`` is set to + ``connection``, which is the default value. + Defaults to True. + """ + log.debug("Deleting vault connection cache.") + return vault.clear_cache(__opts__, __context__, connection=connection_only) + + +def policy_fetch(policy): + """ + .. versionadded:: 3007.0 + + Fetch the rules associated with an ACL policy. Returns None if the policy + does not exist. CLI Example: .. code-block:: bash - salt '*' vault.clear_token_cache + salt '*' vault.policy_fetch salt_minion + + Required policy: + + .. code-block:: vaultpolicy + + path "sys/policy/" { + capabilities = ["read"] + } + + policy + The name of the policy to fetch. """ - log.debug("Deleting cache file") - cache_file = os.path.join(__opts__["cachedir"], "salt_vault_token") - - if os.path.exists(cache_file): - os.remove(cache_file) - return True - else: - log.info("Attempted to delete vault cache file, but it does not exist.") + # there is also "sys/policies/acl/{policy}" + endpoint = f"sys/policy/{policy}" + + try: + data = vault.query("GET", endpoint, __opts__, __context__) + return data["rules"] + + except vault.VaultNotFoundError: + return None + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err + + +def policy_write(policy, rules): + r""" + .. versionadded:: 3007.0 + + Create or update an ACL policy. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.policy_write salt_minion 'path "secret/foo" {...}' + + Required policy: + + .. code-block:: vaultpolicy + + path "sys/policy/" { + capabilities = ["create", "update"] + } + + policy + The name of the policy to create/update. + + rules + Rules to write, formatted as in-line HCL. + """ + endpoint = f"sys/policy/{policy}" + payload = {"policy": rules} + try: + return vault.query("POST", endpoint, __opts__, __context__, payload=payload) + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err + + +def policy_delete(policy): + """ + .. versionadded:: 3007.0 + + Delete an ACL policy. Returns False if the policy did not exist. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.policy_delete salt_minion + + Required policy: + + .. code-block:: vaultpolicy + + path "sys/policy/" { + capabilities = ["delete"] + } + + policy + The name of the policy to delete. + """ + endpoint = f"sys/policy/{policy}" + + try: + return vault.query("DELETE", endpoint, __opts__, __context__) + except vault.VaultNotFoundError: return False + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err + + +def policies_list(): + """ + .. versionadded:: 3007.0 + + List all ACL policies. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.policies_list + + Required policy: + + .. code-block:: vaultpolicy + + path "sys/policy" { + capabilities = ["read"] + } + """ + try: + return vault.query("GET", "sys/policy", __opts__, __context__)["policies"] + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err + + +def query(method, endpoint, payload=None): + """ + .. versionadded:: 3007.0 + + Issue arbitrary queries against the Vault API. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.query GET auth/token/lookup-self + + Required policy: Depends on the query. + + You can ask the vault CLI to output the necessary policy: + + .. code-block:: bash + + vault read -output-policy auth/token/lookup-self + + method + HTTP method to use. + + endpoint + Vault API endpoint to issue the request against. Do not include ``/v1/``. + + payload + Optional dictionary to use as JSON payload. + """ + try: + return vault.query(method, endpoint, __opts__, __context__, payload=payload) + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err diff --git a/salt/modules/vault_db.py b/salt/modules/vault_db.py new file mode 100644 index 000000000000..93023b9cdd55 --- /dev/null +++ b/salt/modules/vault_db.py @@ -0,0 +1,765 @@ +""" +Manage the Vault database secret engine. + +Configuration instructions are documented in the :ref:`vault execution module docs `. + +.. versionadded:: 3007.0 +""" + +import logging + +import salt.utils.vault as vault +from salt.exceptions import CommandExecutionError, SaltInvocationError + +log = logging.getLogger(__name__) + + +PLUGINS = { + "cassandra": { + "name": "cassandra", + "required": [ + "hosts", + "username", + "password", + ], + }, + "couchbase": { + "name": "couchbase", + "required": [ + "hosts", + "username", + "password", + ], + }, + "elasticsearch": { + "name": "elasticsearch", + "required": [ + "url", + "username", + "password", + ], + }, + "influxdb": { + "name": "influxdb", + "required": [ + "host", + "username", + "password", + ], + }, + "hanadb": { + "name": "hana", + "required": [ + "connection_url", + ], + }, + "mongodb": { + "name": "mongodb", + "required": [ + "connection_url", + ], + }, + "mongodb_atlas": { + "name": "mongodbatlas", + "required": [ + "public_key", + "private_key", + "project_id", + ], + }, + "mssql": { + "name": "mssql", + "required": [ + "connection_url", + ], + }, + "mysql": { + "name": "mysql", + "required": [ + "connection_url", + ], + }, + "oracle": { + "name": "oracle", + "required": [ + "connection_url", + ], + }, + "postgresql": { + "name": "postgresql", + "required": [ + "connection_url", + ], + }, + "redis": { + "name": "redis", + "required": [ + "host", + "port", + "username", + "password", + ], + }, + "redis_elasticache": { + "name": "redis-elasticache", + "required": [ + "url", + "username", + "password", + ], + }, + "redshift": { + "name": "redshift", + "required": [ + "connection_url", + ], + }, + "snowflake": { + "name": "snowflake", + "required": [ + "connection_url", + ], + }, + "default": { + "name": "", + "required": [], + }, +} + + +def list_connections(mount="database"): + """ + List configured database connections. + + `API method docs `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.list_connections + + mount + The mount path the DB backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/config" + try: + return vault.query("LIST", endpoint, __opts__, __context__)["data"]["keys"] + except vault.VaultNotFoundError: + return [] + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def fetch_connection(name, mount="database"): + """ + Read a configured database connection. Returns None if it does not exist. + + `API method docs `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.fetch_connection mydb + + name + The name of the database connection. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/config/{name}" + try: + return vault.query("GET", endpoint, __opts__, __context__)["data"] + except vault.VaultNotFoundError: + return None + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def write_connection( + name, + plugin, + version="", + verify=True, + allowed_roles=None, + root_rotation_statements=None, + password_policy=None, + rotate=True, + mount="database", + **kwargs, +): + """ + Create/update a configured database connection. + + .. note:: + + This endpoint distinguishes between create and update ACL capabilities. + + .. note:: + + It is highly recommended to use a Vault-specific user rather than the admin user in the + database when configuring the plugin. This user will be used to create/update/delete users + within the database so it will need to have the appropriate permissions to do so. + If the plugin supports rotating the root credentials, it is highly recommended to perform + that action after configuring the plugin. This will change the password of the user + configured in this step. The new password will not be viewable by users. + + `API method docs `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.write_connection mydb elasticsearch \ + url=http://127.0.0.1:9200 username=vault password=hunter2 + + name + The name of the database connection. + + plugin + The name of the database plugin. Known plugins to this module are: + ``cassandra``, ``couchbase``, ``elasticsearch``, ``influxdb``, ``hanadb``, ``mongodb``, + ``mongodb_atlas``, ``mssql``, ``mysql``, ``oracle``, ``postgresql``, ``redis``, + ``redis_elasticache``, ``redshift``, ``snowflake``. + If you pass an unknown plugin, make sure its Vault-internal name can be formatted + as ``{plugin}-database-plugin`` and to pass all required parameters as kwargs. + + version + Specifies the semantic version of the plugin to use for this connection. + + verify + Verify the connection during initial configuration. Defaults to True. + + allowed_roles + List of the roles allowed to use this connection. ``["*"]`` means any role + can use this connection. Defaults to empty (no role can use it). + + root_rotation_statements + Specifies the database statements to be executed to rotate the root user's credentials. + See the plugin's API page for more information on support and formatting for this parameter. + + password_policy + The name of the password policy to use when generating passwords for this database. + If not specified, this will use a default policy defined as: + 20 characters with at least 1 uppercase, 1 lowercase, 1 number, and 1 dash character. + + rotate + Rotate the root credentials after plugin setup. Defaults to True. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + + kwargs + Different plugins require different parameters. You need to make sure that you pass them + as supplemental keyword arguments. For known plugins, the required arguments will + be checked. + """ + endpoint = f"{mount}/config/{name}" + plugin_meta = PLUGINS.get(plugin, "default") + plugin_name = plugin_meta["name"] or plugin + payload = {k: v for k, v in kwargs.items() if not k.startswith("_")} + + if fetch_connection(name, mount=mount) is None: + missing_kwargs = set(plugin_meta["required"]) - set(payload) + if missing_kwargs: + raise SaltInvocationError( + f"The plugin {plugin} requires the following additional kwargs: {missing_kwargs}." + ) + + payload["plugin_name"] = f"{plugin_name}-database-plugin" + payload["verify_connection"] = verify + if version is not None: + payload["plugin_version"] = version + if allowed_roles is not None: + payload["allowed_roles"] = allowed_roles + if root_rotation_statements is not None: + payload["root_rotation_statements"] = root_rotation_statements + if password_policy is not None: + payload["password_policy"] = password_policy + + try: + vault.query("POST", endpoint, __opts__, __context__, payload=payload) + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + if not rotate: + return True + return rotate_root(name, mount=mount) + + +def delete_connection(name, mount="database"): + """ + Delete a configured database connection. Returns None if it does not exist. + + `API method docs `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.delete_connection mydb + + name + The name of the database connection. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/config/{name}" + try: + return vault.query("DELETE", endpoint, __opts__, __context__) + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def reset_connection(name, mount="database"): + """ + Close a connection and restart its plugin with the configuration stored in the barrier. + + `API method docs `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.reset_connection mydb + + name + The name of the database connection. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/reset/{name}" + try: + return vault.query("POST", endpoint, __opts__, __context__) + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def rotate_root(name, mount="database"): + """ + Rotate the "root" user credentials stored for the database connection. + + .. warning:: + + The rotated password will not be accessible, so it is highly recommended to create + a dedicated user account as Vault's configured "root". + + `API method docs `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.rotate_root mydb + + name + The name of the database connection. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/rotate-root/{name}" + try: + return vault.query("POST", endpoint, __opts__, __context__) + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def list_roles(static=False, mount="database"): + """ + List configured database roles. + + `API method docs `_. + `API method docs static `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.list_roles + + static + Whether to list static roles. Defaults to False. + + mount + The mount path the DB backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/{'static-' if static else ''}roles" + try: + return vault.query("LIST", endpoint, __opts__, __context__)["data"]["keys"] + except vault.VaultNotFoundError: + return [] + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def fetch_role(name, static=False, mount="database"): + """ + Read a configured database role. Returns None if it does not exist. + + `API method docs `_. + `API method docs static `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.fetch_role myrole + + name + The name of the database role. + + static + Whether this role is static. Defaults to False. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/{'static-' if static else ''}roles/{name}" + try: + return vault.query("GET", endpoint, __opts__, __context__)["data"] + except vault.VaultNotFoundError: + return None + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def write_static_role( + name, + connection, + username, + rotation_period, + rotation_statements=None, + credential_type=None, + credential_config=None, + mount="database", +): + """ + Create/update a database Static Role. Mind that not all databases support Static Roles. + + `API method docs `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.write_static_role myrole mydb myuser 24h + + name + The name of the database role. + + connection + The name of the database connection this role applies to. + + username + The username to manage. + + rotation_period + Specifies the amount of time Vault should wait before rotating the password. + The minimum is ``5s``. + + rotation_statements + Specifies the database statements to be executed to rotate the password for the + configured database user. Not every plugin type will support this functionality. + + credential_type + Specifies the type of credential that will be generated for the role. + Options include: ``password``, ``rsa_private_key``. Defaults to ``password``. + See the plugin's API page for credential types supported by individual databases. + + credential_config + Specifies the configuration for the given ``credential_type`` as a mapping. + For ``password``, only ``password_policy`` can be passed. + For ``rsa_private_key``, ``key_bits`` (defaults to 2048) and ``format`` + (defaults to ``pkcs8``) are available. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + payload = { + "username": username, + "rotation_period": rotation_period, + } + if rotation_statements is not None: + payload["rotation_statements"] = rotation_statements + return _write_role( + name, + connection, + payload, + credential_type=credential_type, + credential_config=credential_config, + static=True, + mount=mount, + ) + + +def write_role( + name, + connection, + creation_statements, + default_ttl=None, + max_ttl=None, + revocation_statements=None, + rollback_statements=None, + renew_statements=None, + credential_type=None, + credential_config=None, + mount="database", +): + r""" + Create/update a regular database role. + + `API method docs `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.write_role myrole mydb \ + \["CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'", "GRANT SELECT ON *.* TO '{{name}}'@'%'"\] + + name + The name of the database role. + + connection + The name of the database connection this role applies to. + + creation_statements + Specifies a list of database statements executed to create and configure a user, + usually templated with {{name}} and {{password}}. Required. + + default_ttl + Specifies the TTL for the leases associated with this role. Accepts time suffixed + strings (1h) or an integer number of seconds. Defaults to system/engine default TTL time. + + max_ttl + Specifies the maximum TTL for the leases associated with this role. Accepts time suffixed + strings (1h) or an integer number of seconds. Defaults to sys/mounts's default TTL time; + this value is allowed to be less than the mount max TTL (or, if not set, + the system max TTL), but it is not allowed to be longer. + + revocation_statements + Specifies a list of database statements to be executed to revoke a user. + + rollback_statements + Specifies a list of database statements to be executed to rollback a create operation + in the event of an error. Availability and formatting depend on the specific plugin. + + renew_statements + Specifies a list of database statements to be executed to renew a user. + Availability and formatting depend on the specific plugin. + + credential_type + Specifies the type of credential that will be generated for the role. + Options include: ``password``, ``rsa_private_key``. Defaults to ``password``. + See the plugin's API page for credential types supported by individual databases. + + credential_config + Specifies the configuration for the given ``credential_type`` as a mapping. + For ``password``, only ``password_policy`` can be passed. + For ``rsa_private_key``, ``key_bits`` (defaults to 2048) and ``format`` + (defaults to ``pkcs8``) are available. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + payload = { + "creation_statements": creation_statements, + } + if default_ttl is not None: + payload["default_ttl"] = default_ttl + if max_ttl is not None: + payload["max_ttl"] = max_ttl + if revocation_statements is not None: + payload["revocation_statements"] = revocation_statements + if rollback_statements is not None: + payload["rollback_statements"] = rollback_statements + if renew_statements is not None: + payload["renew_statements"] = renew_statements + return _write_role( + name, + connection, + payload, + credential_type=credential_type, + credential_config=credential_config, + static=False, + mount=mount, + ) + + +def _write_role( + name, + connection, + payload, + credential_type=None, + credential_config=None, + static=False, + mount="database", +): + endpoint = f"{mount}/{'static-' if static else ''}roles/{name}" + payload["db_name"] = connection + if credential_type is not None: + payload["credential_type"] = credential_type + if credential_config is not None: + valid_cred_configs = { + "password": ["password_policy"], + "rsa_private_key": ["key_bits", "format"], + } + credential_type = credential_type or "password" + if credential_type in valid_cred_configs: + invalid_configs = set(credential_config) - set( + valid_cred_configs[credential_type] + ) + if invalid_configs: + raise SaltInvocationError( + f"The following options are invalid for credential type {credential_type}: {invalid_configs}" + ) + payload["credential_config"] = credential_config + try: + return vault.query("POST", endpoint, __opts__, __context__, payload=payload) + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def delete_role(name, static=False, mount="database"): + """ + Delete a configured database role. + + `API method docs `_. + `API method docs static `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.delete_role myrole + + name + The name of the database role. + + static + Whether this role is static. Defaults to False. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/{'static-' if static else ''}roles/{name}" + try: + return vault.query("DELETE", endpoint, __opts__, __context__) + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def get_creds(name, static=False, cache=True, valid_for=0, mount="database"): + """ + Read credentials based on the named role. + + `API method docs `_. + `API method docs static `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.get_creds myrole + + name + The name of the database role. + + static + Whether this role is static. Defaults to False. + + cache + Whether to use cached credentials local to this minion to avoid + unnecessary reissuance. + When ``static`` is false, set this to a string to be able to use multiple + distinct credentials using the same role on the same minion. + Set this to false to disable caching. + Defaults to true. + + .. note:: + + This uses the same cache backend as the Vault integration, so make + sure you configure a persistent backend like ``disk`` if you expect + the credentials to survive a single run. + + + valid_for + When using cache, ensure the credentials are valid for at least this + amount of time, otherwise request new ones. + This can be an integer, which will be interpreted as seconds, or a time string + using the same format as Vault does: + Suffix ``s`` for seconds, ``m`` for minuts, ``h`` for hours, ``d`` for days. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/{'static-' if static else ''}creds/{name}" + + if cache: + ckey = f"db.{mount}.{'static' if static else 'dynamic'}.{name}" + if not static and isinstance(cache, str): + ckey += f".{cache}" + else: + ckey += ".default" + creds_cache = vault.get_lease_store(__opts__, __context__) + cached_creds = creds_cache.get(ckey, valid_for=valid_for) + if cached_creds: + return cached_creds.data + + try: + res = vault.query("GET", endpoint, __opts__, __context__) + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + lease = vault.VaultLease(**res) + if cache: + creds_cache.store(ckey, lease) + return lease.data + + +def rotate_static_role(name, mount="database"): + """ + Rotate Static Role credentials stored for a given role name. + + `API method docs static `_. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.rotate_static_role mystaticrole + + name + The name of the database role. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + endpoint = f"{mount}/rotate-role/{name}" + try: + return vault.query("POST", endpoint, __opts__, __context__) + except vault.VaultException as err: + raise CommandExecutionError(f"{err.__class__}: {err}") from err + + +def get_plugin_name(plugin): + """ + Get the name of a plugin as rendered by this module. This is a utility for the state + module primarily and should be in a utils module. + + CLI Example: + + .. code-block:: bash + + salt '*' vault_db.get_plugin_name hanadb + + plugin + The name of the database plugin. + """ + plugin_name = PLUGINS.get(plugin, "default")["name"] or plugin + return f"{plugin_name}-database-plugin" diff --git a/salt/pillar/vault.py b/salt/pillar/vault.py index 3e7d06b6f78a..b7443d3c00a0 100644 --- a/salt/pillar/vault.py +++ b/salt/pillar/vault.py @@ -22,7 +22,7 @@ - vault: path=secret/salt Each key needs to have all the key-value pairs with the names you -require. Avoid naming every key 'password' as you they will collide: +require. Avoid naming every key 'password' as they will collide. If you want to nest results under a nesting_key name use the following format: @@ -56,7 +56,7 @@ - vault: path=secret/minions/{minion}/pass - vault: path=secret/roles/{pillar[roles]}/pass -You can also use nesting here as well. Identical nesting keys will get merged. +You can also use nesting here as well. Identical nesting keys will get merged. .. code-block:: yaml @@ -131,6 +131,7 @@ Using pillar values to template vault pillar paths requires them to be defined before the vault ext_pillar is called. Especially consider the significancy of :conf_master:`ext_pillar_first ` master config setting. +You cannot use pillar values sourced from Vault in pillar-templated policies. If a pillar pattern matches multiple paths, the results are merged according to the master configuration values :conf_master:`pillar_source_merging_strategy ` @@ -153,20 +154,13 @@ import logging -from requests.exceptions import HTTPError - import salt.utils.dictupdate +import salt.utils.vault as vault +from salt.exceptions import SaltException log = logging.getLogger(__name__) -def __virtual__(): - """ - This module has no external dependencies - """ - return True - - def ext_pillar( minion_id, # pylint: disable=W0613 pillar, # pylint: disable=W0613 @@ -183,7 +177,6 @@ def ext_pillar( if extra_minion_data.get("_vault_runner_is_compiling_pillar_templates"): # Disable vault ext_pillar while compiling pillar for vault policy templates return {} - comps = conf.split() paths = [comp for comp in comps if comp.startswith("path=")] @@ -195,30 +188,20 @@ def ext_pillar( "pillar_source_merging_strategy", "smart" ) merge_lists = merge_lists or __opts__.get("pillar_merge_lists", False) + vault_pillar = {} path_pattern = paths[0].replace("path=", "") for path in _get_paths(path_pattern, minion_id, pillar): try: - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("GET", url) - response.raise_for_status() - vault_pillar_single = response.json().get("data", {}) - - if vault_pillar_single and version2["v2"]: - vault_pillar_single = vault_pillar_single["data"] - + vault_pillar_single = vault.read_kv(path, __opts__, __context__) vault_pillar = salt.utils.dictupdate.merge( vault_pillar, vault_pillar_single, strategy=merge_strategy, merge_lists=merge_lists, ) - except HTTPError: + except SaltException: log.info("Vault secret not found for: %s", path) if nesting_key: @@ -234,12 +217,10 @@ def _get_paths(path_pattern, minion_id, pillar): paths = [] try: - for expanded_pattern in __utils__["vault.expand_pattern_lists"]( - path_pattern, **mappings - ): + for expanded_pattern in vault.expand_pattern_lists(path_pattern, **mappings): paths.append(expanded_pattern.format(**mappings)) except KeyError: log.warning("Could not resolve pillar path pattern %s", path_pattern) - log.debug(f"{minion_id} vault pillar paths: {paths}") + log.debug("%s vault pillar paths: %s", minion_id, paths) return paths diff --git a/salt/runners/vault.py b/salt/runners/vault.py index f7c5ce37f102..88d0ca058ffd 100644 --- a/salt/runners/vault.py +++ b/salt/runners/vault.py @@ -1,6 +1,6 @@ """ Runner functions supporting the Vault modules. Configuration instructions are -documented in the execution module docs. +documented in the :ref:`execution module docs `. :maintainer: SaltStack :maturity: new @@ -9,31 +9,81 @@ import base64 import copy -import json import logging -import time +import os from collections.abc import Mapping -import requests - import salt.cache import salt.crypt import salt.exceptions import salt.pillar +import salt.utils.data +import salt.utils.immutabletypes as immutabletypes +import salt.utils.json +import salt.utils.vault as vault +import salt.utils.versions from salt.defaults import NOT_SET -from salt.exceptions import SaltRunnerError +from salt.exceptions import SaltInvocationError, SaltRunnerError log = logging.getLogger(__name__) +VALID_PARAMS = immutabletypes.freeze( + { + "approle": [ + "bind_secret_id", + "secret_id_bound_cidrs", + "secret_id_num_uses", + "secret_id_ttl", + "token_ttl", + "token_max_ttl", + "token_explicit_max_ttl", + "token_num_uses", + "token_no_default_policy", + "token_period", + "token_bound_cidrs", + ], + "token": [ + "ttl", + "period", + "explicit_max_ttl", + "num_uses", + "no_default_policy", + "renewable", + ], + } +) + +NO_OVERRIDE_PARAMS = immutabletypes.freeze( + { + "approle": [ + "bind_secret_id", + "token_policies", + "policies", + ], + "token": [ + "role_name", + "policies", + "meta", + ], + } +) + def generate_token( - minion_id, signature, impersonated_by_master=False, ttl=None, uses=None + minion_id, + signature, + impersonated_by_master=False, + ttl=None, + uses=None, + upgrade_request=False, ): """ - Generate a Vault token for minion minion_id + .. deprecated:: 3007.0 + + Generate a Vault token for minion . minion_id - The id of the minion that requests a token + The ID of the minion that requests a token. signature Cryptographic signature which validates that the request is indeed sent @@ -48,7 +98,22 @@ def generate_token( uses Number of times a token can be used + + upgrade_request + In case the new runner endpoints have not been whitelisted for peer running, + this endpoint serves as a gateway to ``vault.get_config``. + Defaults to False. """ + if upgrade_request: + log.warning( + "Detected minion fallback to old vault.generate_token peer run function. " + "Please update your master peer_run configuration." + ) + issue_params = {"explicit_max_ttl": ttl, "num_uses": uses} + return get_config( + minion_id, signature, impersonated_by_master, issue_params=issue_params + ) + log.debug( "Token generation request for %s (impersonated by master: %s)", minion_id, @@ -56,89 +121,374 @@ def generate_token( ) _validate_signature(minion_id, signature, impersonated_by_master) try: - config = __opts__.get("vault", {}) - verify = config.get("verify", None) - # Vault Enterprise requires a namespace - namespace = config.get("namespace") - # Allow disabling of minion provided values via the master - allow_minion_override = config["auth"].get("allow_minion_override", False) - # This preserves the previous behavior of default TTL and 1 use - if not allow_minion_override or uses is None: - uses = config["auth"].get("uses", 1) - if not allow_minion_override or ttl is None: - ttl = config["auth"].get("ttl", None) - storage_type = config["auth"].get("token_backend", "session") - policies_refresh_pillar = config.get("policies_refresh_pillar", None) - policies_cache_time = config.get("policies_cache_time", 60) - - if config["auth"]["method"] == "approle": - if _selftoken_expired(): - log.debug("Vault token expired. Recreating one") - # Requesting a short ttl token - url = "{}/v1/auth/approle/login".format(config["url"]) - payload = {"role_id": config["auth"]["role_id"]} - if "secret_id" in config["auth"]: - payload["secret_id"] = config["auth"]["secret_id"] - # Vault Enterprise call requires headers - headers = None - if namespace is not None: - headers = {"X-Vault-Namespace": namespace} - response = requests.post( - url, headers=headers, json=payload, verify=verify - ) - if response.status_code != 200: - return {"error": response.reason} - config["auth"]["token"] = response.json()["auth"]["client_token"] - - url = _get_token_create_url(config) - headers = {"X-Vault-Token": config["auth"]["token"]} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - audit_data = { - "saltstack-jid": globals().get("__jid__", ""), - "saltstack-minion": minion_id, - "saltstack-user": globals().get("__user__", ""), + salt.utils.versions.warn_until( + "Argon", + "vault.generate_token endpoint is deprecated. Please update your minions.", + ) + + if _config("issue:type") != "token": + log.warning( + "Master is not configured to issue tokens. Since the minion uses " + "this deprecated endpoint, issuing token anyways." + ) + + issue_params = {} + if ttl is not None: + issue_params["explicit_max_ttl"] = ttl + if uses is not None: + issue_params["num_uses"] = uses + + token, _ = _generate_token( + minion_id, issue_params=issue_params or None, wrap=False + ) + ret = { + "token": token["client_token"], + "lease_duration": token["lease_duration"], + "renewable": token["renewable"], + "issued": token["creation_time"], + "url": _config("server:url"), + "verify": _config("server:verify"), + "token_backend": _config("cache:backend"), + "namespace": _config("server:namespace"), + } + if token["num_uses"] >= 0: + ret["uses"] = token["num_uses"] + + return ret + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} + + +def generate_new_token( + minion_id, signature, impersonated_by_master=False, issue_params=None +): + """ + .. versionadded:: 3007.0 + + Generate a Vault token for minion . + + minion_id + The ID of the minion that requests a token. + + signature + Cryptographic signature which validates that the request is indeed sent + by the minion (or the master, see impersonated_by_master). + + impersonated_by_master + If the master needs to create a token on behalf of the minion, this is + True. This happens when the master generates minion pillars. + + issue_params + Dictionary of parameters for the generated tokens. + See master configuration ``vault:issue:token:params`` for possible values. + Requires ``vault:issue:allow_minion_override_params`` master configuration + setting to be effective. + """ + log.debug( + "Token generation request for %s (impersonated by master: %s)", + minion_id, + impersonated_by_master, + ) + _validate_signature(minion_id, signature, impersonated_by_master) + try: + if _config("issue:type") != "token": + return {"expire_cache": True, "error": "Master does not issue tokens."} + + ret = { + "server": _config("server"), + "auth": {}, + } + + wrap = _config("issue:wrap") + token, num_uses = _generate_token( + minion_id, issue_params=issue_params, wrap=wrap + ) + + if wrap: + ret.update(token) + ret.update({"misc_data": {"num_uses": num_uses}}) + else: + ret["auth"] = token + + return ret + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} + + +def _generate_token(minion_id, issue_params, wrap): + endpoint = "auth/token/create" + if _config("issue:token:role_name") is not None: + endpoint += "/" + _config("issue:token:role_name") + + payload = _parse_issue_params(issue_params, issue_type="token") + payload["policies"] = _get_policies_cached( + minion_id, + refresh_pillar=_config("policies:refresh_pillar"), + expire=_config("policies:cache_time"), + ) + + if not payload["policies"]: + raise SaltRunnerError("No policies matched minion.") + + payload["meta"] = _get_metadata(minion_id, _config("metadata:secret")) + client = _get_master_client() + log.trace("Sending token creation request to Vault.") + res = client.post(endpoint, payload=payload, wrap=wrap) + + if wrap: + return res.serialize_for_minion(), payload["num_uses"] + if "num_uses" not in res["auth"]: + # older vault versions do not include num_uses in output + res["auth"]["num_uses"] = payload["num_uses"] + token = vault.VaultToken(**res["auth"]) + return token.serialize_for_minion(), payload["num_uses"] + + +def get_config(minion_id, signature, impersonated_by_master=False, issue_params=None): + """ + .. versionadded:: 3007.0 + + Return Vault configuration for minion . + + minion_id + The ID of the minion that requests the configuration. + + signature + Cryptographic signature which validates that the request is indeed sent + by the minion (or the master, see impersonated_by_master). + + impersonated_by_master + If the master needs to contact the Vault server on behalf of the minion, this is + True. This happens when the master generates minion pillars. + + issue_params + Parameters for credential issuance. + Requires ``vault:issue:allow_minion_override_params`` master configuration + setting to be effective. + """ + log.debug( + "Config request for %s (impersonated by master: %s)", + minion_id, + impersonated_by_master, + ) + _validate_signature(minion_id, signature, impersonated_by_master) + try: + minion_config = { + "auth": { + "method": _config("issue:type"), + }, + "cache": _config("cache"), + "server": _config("server"), + "wrap_info_nested": [], } - payload = { - "policies": _get_policies_cached( + wrap = _config("issue:wrap") + + if _config("issue:type") == "token": + minion_config["auth"]["token"], num_uses = _generate_token( minion_id, - config, - refresh_pillar=policies_refresh_pillar, - expire=policies_cache_time, - ), - "num_uses": uses, - "meta": audit_data, + issue_params=issue_params, + wrap=wrap, + ) + if wrap: + minion_config["wrap_info_nested"].append("auth:token") + minion_config.update({"misc_data": {"token:num_uses": num_uses}}) + if _config("issue:type") == "approle": + minion_config["auth"]["approle_mount"] = _config("issue:approle:mount") + minion_config["auth"]["approle_name"] = minion_id + minion_config["auth"]["secret_id"] = _config( + "issue:approle:params:bind_secret_id" + ) + minion_config["auth"]["role_id"] = _get_role_id( + minion_id, issue_params=issue_params, wrap=wrap + ) + if wrap: + minion_config["wrap_info_nested"].append("auth:role_id") + + return minion_config + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} + + +def get_role_id(minion_id, signature, impersonated_by_master=False, issue_params=None): + """ + .. versionadded:: 3007.0 + + Return the Vault role-id for minion . Requires the master to be configured + to generate AppRoles for minions (configuration: ``vault:issue:type``). + + minion_id + The ID of the minion that requests a role-id. + + signature + Cryptographic signature which validates that the request is indeed sent + by the minion (or the master, see impersonated_by_master). + + impersonated_by_master + If the master needs to create a token on behalf of the minion, this is + True. This happens when the master generates minion pillars. + + issue_params + Dictionary of configuration values for the generated AppRole. + See master configuration vault:issue:approle:params for possible values. + Requires ``vault:issue:allow_minion_override_params`` master configuration + setting to be effective. + """ + log.debug( + "role-id request for %s (impersonated by master: %s)", + minion_id, + impersonated_by_master, + ) + _validate_signature(minion_id, signature, impersonated_by_master) + + try: + if _config("issue:type") != "approle": + return {"expire_cache": True, "error": "Master does not issue AppRoles."} + + ret = { + "server": _config("server"), + "data": {}, } - if ttl is not None: - payload["explicit_max_ttl"] = str(ttl) + wrap = _config("issue:wrap") + role_id = _get_role_id(minion_id, issue_params=issue_params, wrap=wrap) + if wrap: + ret.update(role_id) + else: + ret["data"]["role_id"] = role_id + return ret + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} + + +def _get_role_id(minion_id, issue_params, wrap): + approle = _lookup_approle_cached(minion_id) + issue_params_parsed = _parse_issue_params(issue_params) - if payload["policies"] == []: - return {"error": "No policies matched minion"} + if approle is False or ( + vault._get_salt_run_type(__opts__) != vault.SALT_RUNTYPE_MASTER_IMPERSONATING + and not _approle_params_match(approle, issue_params_parsed) + ): + # This means the role has to be created/updated first + # create/update AppRole with role name + # token_policies are set on the AppRole + log.debug("Managing AppRole for %s.", minion_id) + _manage_approle(minion_id, issue_params) + # Make sure cached data is refreshed. Clearing the cache would suffice + # here, but this branch should not be hit too often, so opt for simplicity. + _lookup_approle_cached(minion_id, refresh=True) - log.trace("Sending token creation request to Vault") - response = requests.post(url, headers=headers, json=payload, verify=verify) + role_id = _lookup_role_id(minion_id, wrap=wrap) + if role_id is False: + raise SaltRunnerError(f"Failed to create AppRole for minion {minion_id}.") - if response.status_code != 200: - return {"error": response.reason} + if approle is False: + # This means the AppRole has just been created + # create/update entity with name salt_minion_ + # metadata is set on the entity (to allow policy path templating) + _manage_entity(minion_id) + # ensure the new AppRole is mapped to the entity + _manage_entity_alias(minion_id) + + if wrap: + return role_id.serialize_for_minion() + + return role_id + + +def _approle_params_match(current, issue_params): + """ + Check if minion-overridable AppRole parameters match + """ + req = _parse_issue_params(issue_params) + for var in set(VALID_PARAMS["approle"]) - set(NO_OVERRIDE_PARAMS["approle"]): + if var in req and req[var] != current.get(var, NOT_SET): + return False + return True + + +def generate_secret_id( + minion_id, signature, impersonated_by_master=False, issue_params=None +): + """ + .. versionadded:: 3007.0 + + Generate a Vault secret ID for minion . Requires the master to be configured + to generate AppRoles for minions (configuration: ``vault:issue:type``). + + minion_id + The ID of the minion that requests a secret ID. + + signature + Cryptographic signature which validates that the request is indeed sent + by the minion (or the master, see impersonated_by_master). + + impersonated_by_master + If the master needs to create a token on behalf of the minion, this is + True. This happens when the master generates minion pillars. + + issue_params + Dictionary of configuration values for the generated AppRole. + See master configuration vault:issue:approle:params for possible values. + Requires ``vault:issue:allow_minion_override_params`` master configuration + setting to be effective. + """ + log.debug( + "Secret ID generation request for %s (impersonated by master: %s)", + minion_id, + impersonated_by_master, + ) + _validate_signature(minion_id, signature, impersonated_by_master) + try: + if _config("issue:type") != "approle": + return { + "expire_cache": True, + "error": "Master does not issue AppRoles nor secret IDs.", + } + + approle_meta = _lookup_approle_cached(minion_id) + if approle_meta is False: + raise vault.VaultNotFoundError(f"No AppRole found for minion {minion_id}.") + + if vault._get_salt_run_type( + __opts__ + ) != vault.SALT_RUNTYPE_MASTER_IMPERSONATING and not _approle_params_match( + approle_meta, issue_params + ): + _manage_approle(minion_id, issue_params) + approle_meta = _lookup_approle_cached(minion_id, refresh=True) + + if not approle_meta["bind_secret_id"]: + return { + "expire_cache": True, + "error": "Minion AppRole does not require a secret ID.", + } - auth_data = response.json()["auth"] ret = { - "token": auth_data["client_token"], - "lease_duration": auth_data["lease_duration"], - "renewable": auth_data["renewable"], - "issued": int(round(time.time())), - "url": config["url"], - "verify": verify, - "token_backend": storage_type, - "namespace": namespace, + "server": _config("server"), + "data": {}, } - if uses >= 0: - ret["uses"] = uses + wrap = _config("issue:wrap") + secret_id = _get_secret_id(minion_id, wrap=wrap) + + if wrap: + ret.update(secret_id) + else: + ret["data"] = secret_id.serialize_for_minion() + + ret["misc_data"] = { + "secret_id_num_uses": approle_meta["secret_id_num_uses"], + } return ret - except Exception as e: # pylint: disable=broad-except - return {"error": str(e)} + except vault.VaultNotFoundError as err: + # when the role does not exist, make sure the minion requests + # new configuration details to generate one + return { + "expire_cache": True, + "error": "{}: {}".format(type(err).__name__, str(err)), + } + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} def unseal(): @@ -165,9 +515,9 @@ def unseal(): salt-run vault.unseal """ for key in __opts__["vault"]["keys"]: - ret = __utils__["vault.make_request"]( - "PUT", "v1/sys/unseal", data=json.dumps({"key": key}) - ).json() + ret = vault.query( + "POST", "sys/unseal", __opts__, __context__, payload={"key": key} + ) if ret["sealed"] is False: return True return False @@ -178,19 +528,25 @@ def show_policies(minion_id, refresh_pillar=NOT_SET, expire=None): Show the Vault policies that are applied to tokens for the given minion. minion_id - The minion's id. + The ID of the minion to show policies for. refresh_pillar Whether to refresh the pillar data when rendering templated policies. None will only refresh when the cached data is unavailable, boolean values force one behavior always. - Defaults to config value ``policies_refresh_pillar`` or None. + Defaults to config value ``vault:policies:refresh_pillar`` or None. expire Policy computation can be heavy in case pillar data is used in templated policies and it has not been cached. Therefore, a short-lived cache specifically for rendered policies is used. This specifies the expiration timeout in seconds. - Defaults to config value ``policies_cache_time`` or 60. + Defaults to config value ``vault:policies:cache_time`` or 60. + + .. note:: + + When issuing AppRoles to minions, the shown policies are read from Vault + configuration for the minion's AppRole and thus refresh_pillar/expire + will not be honored. CLI Example: @@ -198,13 +554,267 @@ def show_policies(minion_id, refresh_pillar=NOT_SET, expire=None): salt-run vault.show_policies myminion """ - config = __opts__.get("vault", {}) + if _config("issue:type") == "approle": + meta = _lookup_approle(minion_id) + return meta["token_policies"] + if refresh_pillar == NOT_SET: - refresh_pillar = config.get("policies_refresh_pillar") - expire = expire if expire is not None else config.get("policies_cache_time", 60) - return _get_policies_cached( - minion_id, config, refresh_pillar=refresh_pillar, expire=expire - ) + refresh_pillar = _config("policies:refresh_pillar") + expire = expire if expire is not None else _config("policies:cache_time") + return _get_policies_cached(minion_id, refresh_pillar=refresh_pillar, expire=expire) + + +def sync_approles(minions=None, up=False, down=False): + """ + Sync minion AppRole parameters with current settings, including associated + token policies. + + .. note:: + Only updates existing AppRoles. They are issued during the first request + for one by the minion. + Running this will reset minion overrides, which are reapplied automatically + during the next request for authentication details. + + .. note:: + Unlike when issuing tokens, AppRole-associated policies are not regularly + refreshed automatically. It is advised to schedule regular runs of this function. + + If no parameter is specified, will try to sync AppRoles for all known minions. + + CLI Example: + + .. code-block:: bash + + salt-run vault.sync_approles + salt-run vault.sync_approles ecorp + + minions + (List of) ID(s) of the minion(s) to update the AppRole for. + Defaults to None. + + up + Find all minions that are up and update their AppRoles. + Defaults to False. + + down + Find all minions that are down and update their AppRoles. + Defaults to False. + """ + if _config("issue:type") != "approle": + raise SaltRunnerError("Master does not issue AppRoles to minions.") + if minions is not None: + if not isinstance(minions, list): + minions = [minions] + elif up or down: + minions = [] + if up: + minions.extend(__salt__["manage.list_state"]()) + if down: + minions.extend(__salt__["manage.list_not_state"]()) + else: + minions = _list_all_known_minions() + + for minion in set(minions) & set(list_approles()): + _manage_approle(minion, issue_params=None) + return True + + +def list_approles(): + """ + List all AppRoles that have been created by the Salt master. + They are named after the minions. + + CLI Example: + + .. code-block:: bash + + salt-run vault.list_approles + """ + if _config("issue:type") != "approle": + raise SaltRunnerError("Master does not issue AppRoles to minions.") + endpoint = "auth/{}/role".format(_config("issue:approle:mount")) + client = _get_master_client() + return client.list(endpoint)["data"]["keys"] + + +def sync_entities(minions=None, up=False, down=False): + """ + Sync minion entities with current settings. Only updates entities for minions + with existing AppRoles. + + .. note:: + This updates associated metadata only. Entities are created only + when issuing AppRoles to minions (``vault:issue:type`` == ``approle``). + + If no parameter is specified, will try to sync entities for all known minions. + + CLI Example: + + .. code-block:: bash + + salt-run vault.sync_entities + + minions + (List of) ID(s) of the minion(s) to update the entity for. + Defaults to None. + + up + Find all minions that are up and update their associated entities. + Defaults to False. + + down + Find all minions that are down and update their associated entities. + Defaults to False. + """ + if _config("issue:type") != "approle": + raise SaltRunnerError( + "Master is not configured to issue AppRoles to minions, which is a " + "requirement to use managed entities with Salt." + ) + if minions is not None: + if not isinstance(minions, list): + minions = [minions] + elif up or down: + minions = [] + if up: + minions.extend(__salt__["manage.list_state"]()) + if down: + minions.extend(__salt__["manage.list_not_state"]()) + else: + minions = _list_all_known_minions() + + for minion in set(minions) & set(list_approles()): + _manage_entity(minion) + entity = _lookup_entity_by_alias(minion) + if not entity or entity["name"] != f"salt_minion_{minion}": + log.info( + "Fixing association of minion AppRole to minion entity for %s.", minion + ) + _manage_entity_alias(minion) + return True + + +def list_entities(): + """ + List all entities that have been created by the Salt master. + They are named `salt_minion_{minion_id}`. + + CLI Example: + + .. code-block:: bash + + salt-run vault.list_entities + """ + if _config("issue:type") != "approle": + raise SaltRunnerError("Master does not issue AppRoles to minions.") + endpoint = "identity/entity/name" + client = _get_master_client() + entities = client.list(endpoint)["data"]["keys"] + return [x for x in entities if x.startswith("salt_minion_")] + + +def show_entity(minion_id): + """ + Show entity metadata for . + + CLI Example: + + .. code-block:: bash + + salt-run vault.show_entity db1 + """ + if _config("issue:type") != "approle": + raise SaltRunnerError("Master does not issue AppRoles to minions.") + endpoint = f"identity/entity/name/salt_minion_{minion_id}" + client = _get_master_client() + return client.get(endpoint)["data"]["metadata"] + + +def show_approle(minion_id): + """ + Show AppRole configuration for . + + CLI Example: + + .. code-block:: bash + + salt-run vault.show_approle db1 + """ + if _config("issue:type") != "approle": + raise SaltRunnerError("Master does not issue AppRoles to minions.") + endpoint = "auth/{}/role/{}".format(_config("issue:approle:mount"), minion_id) + client = _get_master_client() + return client.get(endpoint)["data"] + + +def cleanup_auth(): + """ + Removes AppRoles and entities associated with unknown minion IDs. + Can only clean up entities if the AppRole still exists. + + .. warning:: + Make absolutely sure that the configured minion approle issue mount is + exclusively dedicated to the Salt master, otherwise you might lose data + by using this function! (config: ``vault:issue:approle:mount``) + + This detects unknown existing AppRoles by listing all roles on the + configured minion AppRole mount and deducting known minions from the + returned list. + + CLI Example: + + .. code-block:: bash + + salt-run vault.cleanup_auth + """ + ret = {"approles": [], "entities": []} + + for minion in set(list_approles()) - set(_list_all_known_minions()): + if _fetch_entity_by_name(minion): + _delete_entity(minion) + ret["entities"].append(minion) + _delete_approle(minion) + ret["approles"].append(minion) + return {"deleted": ret} + + +def clear_cache(): + """ + Clears master cache of Vault-specific data. This can include: + - AppRole metadata + - rendered policies + - cached authentication credentials for impersonated minions + - cached KV metadata for impersonated minions + + CLI Example: + + .. code-block:: bash + + salt-run vault.clear_cache + """ + cache = salt.cache.factory(__opts__) + cache.flush("vault") + for minion in cache.list("minions"): + cache.flush(f"minions/{minion}/vault") + + +def _config(key=None, default=vault.VaultException): + ckey = "vault_master_config" + if ckey not in __context__: + __context__[ckey] = vault.parse_config(__opts__.get("vault", {})) + + if key is None: + return __context__[ckey] + val = salt.utils.data.traverse_dict(__context__[ckey], key, default) + if val is vault.VaultException: + raise vault.VaultException( + f"Requested configuration value {key} does not exist." + ) + return val + + +def _list_all_known_minions(): + return os.listdir(__opts__["pki_dir"] + "/minions") def _validate_signature(minion_id, signature, impersonated_by_master): @@ -214,38 +824,33 @@ def _validate_signature(minion_id, signature, impersonated_by_master): """ pki_dir = __opts__["pki_dir"] if impersonated_by_master: - public_key = "{}/master.pub".format(pki_dir) + public_key = f"{pki_dir}/master.pub" else: - public_key = "{}/minions/{}".format(pki_dir, minion_id) + public_key = f"{pki_dir}/minions/{minion_id}" log.trace("Validating signature for %s", minion_id) signature = base64.b64decode(signature) if not salt.crypt.verify_signature(public_key, minion_id, signature): raise salt.exceptions.AuthenticationError( - "Could not validate token request from {}".format(minion_id) + f"Could not validate token request from {minion_id}" ) log.trace("Signature ok") # **kwargs because salt.cache.Cache does not pop "expire" from kwargs def _get_policies( - minion_id, config, refresh_pillar=None, **kwargs + minion_id, refresh_pillar=None, **kwargs ): # pylint: disable=unused-argument """ - Get the policies that should be applied to a token for minion_id + Get the policies that should be applied to a token for """ grains, pillar = _get_minion_data(minion_id, refresh_pillar) - policy_patterns = config.get( - "policies", ["saltstack/minion/{minion}", "saltstack/minions"] - ) mappings = {"minion": minion_id, "grains": grains, "pillar": pillar} policies = [] - for pattern in policy_patterns: + for pattern in _config("policies:assign"): try: - for expanded_pattern in __utils__["vault.expand_pattern_lists"]( - pattern, **mappings - ): + for expanded_pattern in vault.expand_pattern_lists(pattern, **mappings): policies.append( expanded_pattern.format(**mappings).lower() # Vault requirement ) @@ -258,10 +863,10 @@ def _get_policies( return policies -def _get_policies_cached(minion_id, config, refresh_pillar=None, expire=60): +def _get_policies_cached(minion_id, refresh_pillar=None, expire=60): # expiration of 0 disables cache if not expire: - return _get_policies(minion_id, config, refresh_pillar=refresh_pillar) + return _get_policies(minion_id, refresh_pillar=refresh_pillar) cbank = f"minions/{minion_id}/vault" ckey = "policies" cache = salt.cache.factory(__opts__) @@ -271,7 +876,6 @@ def _get_policies_cached(minion_id, config, refresh_pillar=None, expire=60): _get_policies, expire=expire, minion_id=minion_id, - config=config, refresh_pillar=refresh_pillar, ) if not isinstance(policies, list): @@ -283,7 +887,6 @@ def _get_policies_cached(minion_id, config, refresh_pillar=None, expire=60): _get_policies, expire=expire, minion_id=minion_id, - config=config, refresh_pillar=refresh_pillar, ) return policies @@ -331,39 +934,280 @@ def _get_minion_data(minion_id, refresh_pillar=None): return grains, pillar -def _selftoken_expired(): - """ - Validate the current token exists and is still valid - """ +def _get_metadata(minion_id, metadata_patterns, refresh_pillar=None): + _, pillar = _get_minion_data(minion_id, refresh_pillar) + mappings = { + "minion": minion_id, + "pillar": pillar, + "jid": globals().get("__jid__", ""), + "user": globals().get("__user__", ""), + } + metadata = {} + for key, pattern in metadata_patterns.items(): + metadata[key] = [] + try: + for expanded_pattern in vault.expand_pattern_lists(pattern, **mappings): + metadata[key].append(expanded_pattern.format(**mappings)) + except KeyError: + log.warning( + "Could not resolve metadata pattern %s for minion %s", + pattern, + minion_id, + ) + # Since composite values are disallowed for metadata, + # at least ensure the order of the comma-separated string + # is predictable + metadata[key].sort() + + log.debug("%s metadata: %s", minion_id, metadata) + return {k: ",".join(v) for k, v in metadata.items()} + + +def _parse_issue_params(params, issue_type=None): + if not _config("issue:allow_minion_override_params") or not isinstance( + params, dict + ): + params = {} + + # issue_type is used to override the configured type for minions using the old endpoint + # TODO: remove this once the endpoint has been removed + issue_type = issue_type or _config("issue:type") + + if issue_type not in VALID_PARAMS: + raise SaltRunnerError( + "Invalid configuration for minion Vault authentication issuance." + ) + + configured_params = _config(f"issue:{issue_type}:params") + ret = {} + + for valid_param in VALID_PARAMS[issue_type]: + if ( + valid_param in configured_params + and configured_params[valid_param] is not None + ): + ret[valid_param] = configured_params[valid_param] + if ( + valid_param in params + and valid_param not in NO_OVERRIDE_PARAMS[issue_type] + and params[valid_param] is not None + ): + ret[valid_param] = params[valid_param] + + return ret + + +def _manage_approle(minion_id, issue_params): + endpoint = "auth/{}/role/{}".format(_config("issue:approle:mount"), minion_id) + payload = _parse_issue_params(issue_params) + # When the entity is managed during the same run, this can result in a duplicate + # pillar refresh. Potential for optimization. + payload["token_policies"] = _get_policies(minion_id, refresh_pillar=True) + client = _get_master_client() + log.debug("Creating/updating AppRole for minion %s.", minion_id) + return client.post(endpoint, payload=payload) + + +def _delete_approle(minion_id): + endpoint = "auth/{}/role/{}".format(_config("issue:approle:mount"), minion_id) + client = _get_master_client() + log.debug("Deleting approle for minion %s.", minion_id) + return client.delete(endpoint) + + +def _lookup_approle(minion_id, **kwargs): # pylint: disable=unused-argument + endpoint = "auth/{}/role/{}".format(_config("issue:approle:mount"), minion_id) + client = _get_master_client() try: - verify = __opts__["vault"].get("verify", None) - # Vault Enterprise requires a namespace - namespace = __opts__["vault"].get("namespace") - url = "{}/v1/auth/token/lookup-self".format(__opts__["vault"]["url"]) - if "token" not in __opts__["vault"]["auth"]: - return True - headers = {"X-Vault-Token": __opts__["vault"]["auth"]["token"]} - # Add Vault namespace to headers if Vault Enterprise enabled - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - response = requests.get(url, headers=headers, verify=verify) - if response.status_code != 200: - return True + return client.get(endpoint)["data"] + except vault.VaultNotFoundError: return False - except Exception as e: # pylint: disable=broad-except - raise salt.exceptions.CommandExecutionError( - "Error while looking up self token : {}".format(str(e)) + + +def _lookup_approle_cached(minion_id, expire=3600, refresh=False): + # expiration of 0 disables cache + if not expire: + return _lookup_approle(minion_id) + cbank = f"minions/{minion_id}/vault" + ckey = "approle_meta" + cache = salt.cache.factory(__opts__) + if refresh: + cache.flush(cbank, ckey) + meta = cache.cache( + cbank, + ckey, + _lookup_approle, + expire=expire, + minion_id=minion_id, + ) + if not isinstance(meta, dict): + log.warning( + "Cached Vault AppRole meta information was not formed as a dictionary. Refreshing." ) + cache.flush(cbank, ckey) + meta = cache.cache( + cbank, + ckey, + _lookup_approle, + expire=expire, + minion_id=minion_id, + ) + # Falsey values are always refreshed by salt.cache.Cache + return meta + + +def _lookup_role_id(minion_id, wrap): + client = _get_master_client() + endpoint = "auth/{}/role/{}/role-id".format( + _config("issue:approle:mount"), minion_id + ) + try: + role_id = client.get(endpoint, wrap=wrap) + except vault.VaultNotFoundError: + return False + if wrap: + return role_id + return role_id["data"]["role_id"] -def _get_token_create_url(config): + +def _get_secret_id(minion_id, wrap, meta_info=False): + payload = { + "meta": salt.utils.json.dumps( + _get_metadata(minion_id, _config("metadata:secret")) + ) + } + client = _get_master_client() + endpoint = "auth/{}/role/{}/secret-id".format( + _config("issue:approle:mount"), minion_id + ) + response = client.post(endpoint, payload=payload, wrap=wrap) + if wrap: + # Wrapped responses are always VaultWrappedResponse objects + secret_id = response.serialize_for_minion() + accessor = response.wrapped_accessor + else: + secret_id = vault.VaultSecretId(**response["data"]) + accessor = response["data"]["secret_id_accessor"] + if not meta_info: + return secret_id + # Sadly, secret_id_num_uses is not part of the information returned + meta_info = client.post( + endpoint + "-accessor/lookup", payload={"secret_id_accessor": accessor} + )["data"] + + return secret_id, meta_info + + +def _lookup_mount_accessor(mount): + log.debug("Looking up mount accessor ID for mount %s.", mount) + endpoint = f"sys/auth/{mount}" + client = _get_master_client() + return client.get(endpoint)["accessor"] + + +def _lookup_entity_by_alias(minion_id): """ - Create Vault url for token creation + This issues a lookup for the entity using the role-id and mount accessor, + thus verifies that an entity and associated entity alias exists. """ - role_name = config.get("role_name", None) - auth_path = "/v1/auth/token/create" - base_url = config["url"] - return "/".join(x.strip("/") for x in (base_url, auth_path, role_name) if x) + minion_mount_accessor = _lookup_mount_accessor(_config("issue:approle:mount")) + role_id = _lookup_role_id(minion_id, wrap=False) + client = _get_master_client() + endpoint = "identity/lookup/entity" + payload = { + "alias_name": role_id, + "alias_mount_accessor": minion_mount_accessor, + } + entity = client.post(endpoint, payload=payload) + if isinstance(entity, dict): + return entity["data"] + return False + + +def _fetch_entity_by_name(minion_id): + client = _get_master_client() + endpoint = f"identity/entity/name/salt_minion_{minion_id}" + try: + return client.get(endpoint)["data"] + except vault.VaultNotFoundError: + return False + + +def _manage_entity(minion_id): + endpoint = f"identity/entity/name/salt_minion_{minion_id}" + # When the approle is managed during the same run, this can result in a duplicate + # pillar refresh. Potential for optimization. + payload = { + "metadata": _get_metadata( + minion_id, _config("metadata:entity"), refresh_pillar=True + ), + } + client = _get_master_client() + client.post(endpoint, payload=payload) + + +def _delete_entity(minion_id): + endpoint = f"identity/entity/name/salt_minion_{minion_id}" + client = _get_master_client() + client.delete(endpoint) + + +def _manage_entity_alias(minion_id): + log.debug("Creating entity alias for minion %s.", minion_id) + minion_mount_accessor = _lookup_mount_accessor(_config("issue:approle:mount")) + role_id = _lookup_role_id(minion_id, wrap=False) + entity = _fetch_entity_by_name(minion_id) + if not entity: + raise SaltRunnerError( + f"There is no entity to create an alias for for minion {minion_id}." + ) + payload = { + "canonical_id": entity["id"], + "mount_accessor": minion_mount_accessor, + "name": str(role_id), + } + for alias in entity["aliases"]: + if alias["mount_accessor"] == minion_mount_accessor: + payload["id"] = alias["id"] + client = _get_master_client() + client.post("identity/entity-alias", payload=payload) + + +def _get_master_client(): + # force_local is necessary when issuing credentials while impersonating + # minions since the opts dict cannot be used to distinguish master from + # minion in that case + client = vault.get_authd_client(__opts__, __context__, force_local=True) + return client + + +def _revoke_token(token=None, accessor=None): + if not token and not accessor: + raise SaltInvocationError("Need either token or accessor to revoke token.") + endpoint = "auth/token/revoke" + if token: + payload = {"token": token} + else: + endpoint += "-accessor" + payload = {"accessor": accessor} + client = _get_master_client() + return client.post(endpoint, payload=payload) + + +def _destroy_secret_id(minion_id, mount, secret_id=None, accessor=None): + if not secret_id and not accessor: + raise SaltInvocationError( + "Need either secret_id or accessor to destroy secret ID." + ) + if secret_id: + endpoint = f"auth/{mount}/role/{minion_id}/secret-id/destroy" + payload = {"secret_id": str(secret_id)} + else: + endpoint = f"auth/{mount}/role/{minion_id}/secret-id-accessor/destroy" + payload = {"secret_id_accessor": accessor} + client = _get_master_client() + return client.post(endpoint, payload=payload) class LazyPillar(Mapping): diff --git a/salt/sdb/vault.py b/salt/sdb/vault.py index 08360e2d84fb..70efa057a072 100644 --- a/salt/sdb/vault.py +++ b/salt/sdb/vault.py @@ -9,7 +9,7 @@ This module allows access to Hashicorp Vault using an ``sdb://`` URI. -Base configuration instructions are documented in the execution module docs. +Base configuration instructions are documented in the :ref:`execution module docs `. Below are noted extra configuration required for the sdb module, but the base configuration must also be completed. @@ -43,6 +43,7 @@ import logging import salt.exceptions +import salt.utils.vault as vault log = logging.getLogger(__name__) @@ -59,61 +60,34 @@ def set_(key, value, profile=None): path, key = key.rsplit("/", 1) data = {key: value} - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - data = {"data": data} - try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("POST", url, json=data) - - if response.status_code != 204: - response.raise_for_status() + vault.write_kv(path, data, __opts__, __context__) return True - except Exception as e: # pylint: disable=broad-except - log.error("Failed to write secret! %s: %s", type(e).__name__, e) - raise salt.exceptions.CommandExecutionError(e) + except Exception as err: # pylint: disable=broad-except + log.error("Failed to write secret! %s: %s", type(err).__name__, err) + raise salt.exceptions.CommandExecutionError(err) from err def get(key, profile=None): """ Get a value from the vault service """ + full_path = key if "?" in key: path, key = key.split("?") else: path, key = key.rsplit("/", 1) - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("GET", url) - if response.status_code == 404: - if version2["v2"]: - path = version2["data"] + "/" + key - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("GET", url) - if response.status_code == 404: - return None - else: - return None - if response.status_code != 200: - response.raise_for_status() - data = response.json()["data"] - - if version2["v2"]: - if key in data["data"]: - return data["data"][key] - else: - return data["data"] - else: - if key in data: - return data[key] + try: + res = vault.read_kv(path, __opts__, __context__) + if key in res: + return res[key] + return None + except vault.VaultNotFoundError: + return vault.read_kv(full_path, __opts__, __context__) + except vault.VaultNotFoundError: return None - except Exception as e: # pylint: disable=broad-except - log.error("Failed to read secret! %s: %s", type(e).__name__, e) - raise salt.exceptions.CommandExecutionError(e) + except Exception as err: # pylint: disable=broad-except + log.error("Failed to read secret! %s: %s", type(err).__name__, err) + raise salt.exceptions.CommandExecutionError(err) from err diff --git a/salt/states/vault.py b/salt/states/vault.py index 54de5b8f435a..a89513d334f2 100644 --- a/salt/states/vault.py +++ b/salt/states/vault.py @@ -1,6 +1,7 @@ """ States for managing Hashicorp Vault. -Currently handles policies. Configuration instructions are documented in the execution module docs. +Currently handles policies. +Configuration instructions are documented in the :ref:`execution module docs `. :maintainer: SaltStack :maturity: new @@ -13,6 +14,8 @@ import difflib import logging +from salt.exceptions import CommandExecutionError + log = logging.getLogger(__name__) @@ -41,85 +44,88 @@ def policy_present(name, rules): } """ - url = "v1/sys/policy/{}".format(name) - response = __utils__["vault.make_request"]("GET", url) + ret = {"name": name, "changes": {}, "result": True, "comment": ""} + try: - if response.status_code == 200: - return _handle_existing_policy(name, rules, response.json()["rules"]) - elif response.status_code == 404: - return _create_new_policy(name, rules) - else: - response.raise_for_status() - except Exception as e: # pylint: disable=broad-except - return { - "name": name, - "changes": {}, - "result": False, - "comment": "Failed to get policy: {}".format(e), - } + existing_rules = __salt__["vault.policy_fetch"](name) + except CommandExecutionError as err: + ret["result"] = False + ret["comment"] = f"Failed to read policy: {err}" + return ret + if existing_rules == rules: + ret["comment"] = "Policy exists, and has the correct content" + return ret + + diff = "".join( + difflib.unified_diff( + (existing_rules or "").splitlines(True), rules.splitlines(True) + ) + ) + + ret["changes"] = {name: diff} -def _create_new_policy(name, rules): if __opts__["test"]: - return { - "name": name, - "changes": {name: {"old": "", "new": rules}}, - "result": None, - "comment": "Policy would be created", - } + ret["result"] = None + ret["comment"] = "Policy would be " + ( + "created" if existing_rules is None else "updated" + ) + return ret - payload = {"rules": rules} - url = "v1/sys/policy/{}".format(name) - response = __utils__["vault.make_request"]("PUT", url, json=payload) - if response.status_code not in [200, 204]: + try: + __salt__["vault.policy_write"](name, rules) + ret["comment"] = "Policy has been " + ( + "created" if existing_rules is None else "updated" + ) + return ret + except CommandExecutionError as err: return { "name": name, "changes": {}, "result": False, - "comment": "Failed to create policy: {}".format(response.reason), + "comment": f"Failed to write policy: {err}", } - return { - "name": name, - "result": True, - "changes": {name: {"old": None, "new": rules}}, - "comment": "Policy was created", - } +def policy_absent(name): + """ + Ensure a Vault policy with the given name and rules is absent. -def _handle_existing_policy(name, new_rules, existing_rules): - ret = {"name": name} - if new_rules == existing_rules: - ret["result"] = True - ret["changes"] = {} - ret["comment"] = "Policy exists, and has the correct content" + name + The name of the policy + """ + ret = {"name": name, "changes": {}, "result": True, "comment": ""} + + try: + existing_rules = __salt__["vault.policy_fetch"](name) + except CommandExecutionError as err: + ret["result"] = False + ret["comment"] = f"Failed to read policy: {err}" return ret - change = "".join( - difflib.unified_diff( - existing_rules.splitlines(True), new_rules.splitlines(True) - ) - ) + if existing_rules is None: + ret["comment"] = "Policy is already absent" + return ret + + ret["changes"] = {"deleted": name} + if __opts__["test"]: ret["result"] = None - ret["changes"] = {name: {"change": change}} - ret["comment"] = "Policy would be changed" + ret["comment"] = "Policy would be deleted" return ret - payload = {"rules": new_rules} - - url = "v1/sys/policy/{}".format(name) - response = __utils__["vault.make_request"]("PUT", url, json=payload) - if response.status_code not in [200, 204]: + try: + if not __salt__["vault.policy_delete"](name): + raise CommandExecutionError( + "Policy was initially reported as existent, but seemed to be " + "absent while deleting." + ) + ret["comment"] = "Policy has been deleted" + return ret + except CommandExecutionError as err: return { "name": name, "changes": {}, "result": False, - "comment": "Failed to change policy: {}".format(response.reason), + "comment": f"Failed to delete policy: {err}", } - - ret["result"] = True - ret["changes"] = {name: {"change": change}} - ret["comment"] = "Policy was updated" - - return ret diff --git a/salt/states/vault_db.py b/salt/states/vault_db.py new file mode 100644 index 000000000000..6db8352786fe --- /dev/null +++ b/salt/states/vault_db.py @@ -0,0 +1,545 @@ +""" +Manage the Vault database secret engine. + +Configuration instructions are documented in the :ref:`vault execution module docs `. + +.. versionadded:: 3007.0 +""" + +import logging + +import salt.utils.vault as vault +from salt.exceptions import CommandExecutionError, SaltInvocationError + +log = logging.getLogger(__name__) + + +def connection_present( + name, + plugin, + version=None, + verify=True, + allowed_roles=None, + root_rotation_statements=None, + password_policy=None, + rotate=True, + force=False, + mount="database", + **kwargs, +): + """ + Ensure a database connection is present as specified. + + name + The name of the database connection. + + plugin + The name of the database plugin. Known plugins to this module are: + ``cassandra``, ``couchbase``, ``elasticsearch``, ``influxdb``, ``hanadb``, ``mongodb``, + ``mongodb_atlas``, ``mssql``, ``mysql``, ``oracle``, ``postgresql``, ``redis``, + ``redis_elasticache``, ``redshift``, ``snowflake``. + If you pass an unknown plugin, make sure its Vault-internal name can be formatted + as ``{plugin}-database-plugin`` and to pass all required parameters as kwargs. + + version + Specifies the semantic version of the plugin to use for this connection. + + verify + Verify the connection during initial configuration. Defaults to True. + + allowed_roles + List of the roles allowed to use this connection. ``["*"]`` means any role + can use this connection. Defaults to empty (no role can use it). + + root_rotation_statements + Specifies the database statements to be executed to rotate the root user's credentials. + See the plugin's API page for more information on support and formatting for this parameter. + + password_policy + The name of the password policy to use when generating passwords for this database. + If not specified, this will use a default policy defined as: + 20 characters with at least 1 uppercase, 1 lowercase, 1 number, and 1 dash character. + + rotate + Rotate the root credentials after plugin setup. Defaults to True. + + force + When the plugin changes, this state fails to protect from accidental errors. + Set force to True to delete existing connections with the same name and a + different plugin type. Defaults to False. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + + kwargs + Different plugins require different parameters. You need to make sure that you pass them + as supplemental keyword arguments. For known plugins, the required arguments will + be checked. + """ + ret = {"name": name, "result": True, "comment": "", "changes": {}} + kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")} + + def _diff_params(current): + nonlocal version, allowed_roles, root_rotation_statements, password_policy, kwargs + diff_params = ( + ("plugin_version", version), + ("allowed_roles", allowed_roles), + ("root_credentials_rotate_statements", root_rotation_statements), + ("password_policy", password_policy), + ) + changed = {} + for param, arg in diff_params: + if arg is None: + continue + if param not in current or current[param] != arg: + changed.update({param: {"old": current.get(param), "new": arg}}) + for param, val in kwargs.items(): + if param == "password": + # password is not reported + continue + if ( + param not in current["connection_details"] + or current["connection_details"][param] != val + ): + changed.update({param: {"old": current.get(param), "new": val}}) + return changed + + try: + current = __salt__["vault_db.fetch_connection"](name, mount=mount) + changes = {} + + if current: + if current["plugin_name"] != __salt__["vault_db.get_plugin_name"](plugin): + if not force: + raise CommandExecutionError( + "Cannot change plugin type without deleting the existing connection. " + "Set force: true to override." + ) + if not __opts__["test"]: + __salt__["vault_db.delete_connection"](name, mount=mount) + ret["changes"]["deleted for plugin change"] = name + current = None + else: + changes = _diff_params(current) + if not changes: + ret["comment"] = "Connection is present as specified" + return ret + + if __opts__["test"]: + ret["result"] = None + ret[ + "comment" + ] = f"Connection `{name}` would have been {'updated' if current else 'created'}" + ret["changes"].update(changes) + if not current: + ret["changes"]["created"] = name + return ret + + if current and "password" in kwargs: + kwargs.pop("password") + + __salt__["vault_db.write_connection"]( + name, + plugin, + version=version, + verify=verify, + allowed_roles=allowed_roles, + root_rotation_statements=root_rotation_statements, + password_policy=password_policy, + rotate=rotate, + mount=mount, + **kwargs, + ) + new = __salt__["vault_db.fetch_connection"](name, mount=mount) + + if new is None: + raise CommandExecutionError( + "There were no errors during role management, but it is reported as absent." + ) + if not current: + ret["changes"]["created"] = name + + new_diff = _diff_params(new) + if new_diff: + ret["result"] = False + ret["comment"] = ( + "There were no errors during connection management, but " + f"the reported parameters do not match: {new_diff}" + ) + return ret + ret["changes"].update(changes) + + except CommandExecutionError as err: + ret["result"] = False + ret["comment"] = str(err) + # do not reset changes + + return ret + + +def connection_absent(name, mount="database"): + """ + Ensure a database connection is absent. + + name + The name of the connection. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + ret = {"name": name, "result": True, "comment": "", "changes": {}} + + try: + current = __salt__["vault_db.fetch_connection"](name, mount=mount) + + if current is None: + ret["comment"] = f"Connection `{name}` is already absent." + return ret + + ret["changes"]["deleted"] = name + + if __opts__["test"]: + ret["result"] = None + ret["comment"] = f"Connection `{name}` would have been deleted." + return ret + + __salt__["vault_db.delete_connection"](name, mount=mount) + + if __salt__["vault_db.fetch_connection"](name, mount=mount) is not None: + raise CommandExecutionError( + "There were no errors during connection deletion, " + "but it is still reported as present." + ) + ret["comment"] = f"Connection `{name}` has been deleted." + + except CommandExecutionError as err: + ret["result"] = False + ret["comment"] = str(err) + ret["changes"] = {} + + return ret + + +def role_present( + name, + connection, + creation_statements, + default_ttl=None, + max_ttl=None, + revocation_statements=None, + rollback_statements=None, + renew_statements=None, + credential_type=None, + credential_config=None, + mount="database", +): + """ + Ensure a regular database role is present as specified. + + name + The name of the database role. + + connection + The name of the database connection this role applies to. + + creation_statements + Specifies a list of database statements executed to create and configure a user, + usually templated with {{name}} and {{password}}. Required. + + default_ttl + Specifies the TTL for the leases associated with this role. Accepts time suffixed + strings (1h) or an integer number of seconds. Defaults to system/engine default TTL time. + + max_ttl + Specifies the maximum TTL for the leases associated with this role. Accepts time suffixed + strings (1h) or an integer number of seconds. Defaults to sys/mounts's default TTL time; + this value is allowed to be less than the mount max TTL (or, if not set, + the system max TTL), but it is not allowed to be longer. + + revocation_statements + Specifies a list of database statements to be executed to revoke a user. + + rollback_statements + Specifies a list of database statements to be executed to rollback a create operation + in the event of an error. Availability and formatting depend on the specific plugin. + + renew_statements + Specifies a list of database statements to be executed to renew a user. + Availability and formatting depend on the specific plugin. + + credential_type + Specifies the type of credential that will be generated for the role. + Options include: ``password``, ``rsa_private_key``. Defaults to ``password``. + See the plugin's API page for credential types supported by individual databases. + + credential_config + Specifies the configuration for the given ``credential_type`` as a mapping. + For ``password``, only ``password_policy`` can be passed. + For ``rsa_private_key``, ``key_bits`` (defaults to 2048) and ``format`` + (defaults to ``pkcs8``) are available. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + ret = {"name": name, "result": True, "comment": "", "changes": {}} + + if not isinstance(creation_statements, list): + creation_statements = [creation_statements] + if revocation_statements and not isinstance(revocation_statements, list): + revocation_statements = [revocation_statements] + if rollback_statements and not isinstance(rollback_statements, list): + rollback_statements = [rollback_statements] + if renew_statements and not isinstance(renew_statements, list): + renew_statements = [renew_statements] + + def _diff_params(current): + nonlocal connection, creation_statements, default_ttl, max_ttl, revocation_statements + nonlocal rollback_statements, renew_statements, credential_type, credential_config + + diff_params = ( + ("db_name", connection), + ("creation_statements", creation_statements), + ("default_ttl", vault.timestring_map(default_ttl)), + ("max_ttl", vault.timestring_map(max_ttl)), + ("revocation_statements", revocation_statements), + ("rollback_statements", rollback_statements), + ("renew_statements", renew_statements), + ("credential_type", credential_type), + ("credential_config", credential_config), + ) + changed = {} + for param, arg in diff_params: + if arg is None: + continue + if param not in current or current[param] != arg: + changed.update({param: {"old": current.get(param), "new": arg}}) + return changed + + try: + current = __salt__["vault_db.fetch_role"](name, static=False, mount=mount) + + if current: + changed = _diff_params(current) + if not changed: + ret["comment"] = "Role is present as specified" + return ret + ret["changes"].update(changed) + + if __opts__["test"]: + ret["result"] = None + ret[ + "comment" + ] = f"Role `{name}` would have been {'updated' if current else 'created'}" + if not current: + ret["changes"]["created"] = name + return ret + + __salt__["vault_db.write_role"]( + name, + connection, + creation_statements, + default_ttl=default_ttl, + max_ttl=max_ttl, + revocation_statements=revocation_statements, + rollback_statements=rollback_statements, + renew_statements=renew_statements, + credential_type=credential_type, + credential_config=credential_config, + mount=mount, + ) + new = __salt__["vault_db.fetch_role"](name, static=False, mount=mount) + + if new is None: + raise CommandExecutionError( + "There were no errors during role management, but it is reported as absent." + ) + + if not current: + ret["changes"]["created"] = name + + new_diff = _diff_params(new) + if new_diff: + ret["result"] = False + ret["comment"] = ( + "There were no errors during role management, but " + f"the reported parameters do not match: {new_diff}" + ) + return ret + + except (CommandExecutionError, SaltInvocationError) as err: + ret["result"] = False + ret["comment"] = str(err) + ret["changes"] = {} + + return ret + + +def role_absent(name, static=False, mount="database"): + """ + Ensure a database role is absent. + + name + The name of the role. + + static + Whether this role is static. Defaults to False. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + ret = {"name": name, "result": True, "comment": "", "changes": {}} + + try: + current = __salt__["vault_db.fetch_role"](name, static=static, mount=mount) + + if current is None: + ret["comment"] = f"Role `{name}` is already absent." + return ret + + ret["changes"]["deleted"] = name + + if __opts__["test"]: + ret["result"] = None + ret["comment"] = f"Role `{name}` would have been deleted." + return ret + + __salt__["vault_db.delete_role"](name, static=static, mount=mount) + + if ( + __salt__["vault_db.fetch_role"](name, static=static, mount=mount) + is not None + ): + raise CommandExecutionError( + "There were no errors during role deletion, but it is still reported as present." + ) + ret["comment"] = f"Role `{name}` has been deleted." + + except CommandExecutionError as err: + ret["result"] = False + ret["comment"] = str(err) + ret["changes"] = {} + + return ret + + +def static_role_present( + name, + connection, + username, + rotation_period, + rotation_statements=None, + credential_type=None, + credential_config=None, + mount="database", +): + """ + Ensure a database Static Role is present as specified. + + name + The name of the database role. + + connection + The name of the database connection this role applies to. + + username + The username to manage. + + rotation_period + Specifies the amount of time Vault should wait before rotating the password. + The minimum is ``5s``. + + rotation_statements + Specifies the database statements to be executed to rotate the password for the + configured database user. Not every plugin type will support this functionality. + + credential_type + Specifies the type of credential that will be generated for the role. + Options include: ``password``, ``rsa_private_key``. Defaults to ``password``. + See the plugin's API page for credential types supported by individual databases. + + credential_config + Specifies the configuration for the given ``credential_type`` as a mapping. + For ``password``, only ``password_policy`` can be passed. + For ``rsa_private_key``, ``key_bits`` (defaults to 2048) and ``format`` + (defaults to ``pkcs8``) are available. + + mount + The mount path the database backend is mounted to. Defaults to ``database``. + """ + ret = {"name": name, "result": True, "comment": "", "changes": {}} + + if rotation_statements and not isinstance(rotation_statements, list): + rotation_statements = [rotation_statements] + + def _diff_params(current): + nonlocal connection, username, rotation_period, rotation_statements, credential_type, credential_config + diff_params = ( + ("db_name", connection), + ("username", username), + ("rotation_period", vault.timestring_map(rotation_period)), + ("rotation_statements", rotation_statements), + ("credential_type", credential_type), + ("credential_config", credential_config), + ) + changed = {} + for param, arg in diff_params: + if arg is None: + continue + if param not in current or current[param] != arg: + changed.update({param: {"old": current.get(param), "new": arg}}) + return changed + + try: + current = __salt__["vault_db.fetch_role"](name, static=True, mount=mount) + + if current: + changed = _diff_params(current) + if not changed: + ret["comment"] = "Role is present as specified" + return ret + ret["changes"].update(changed) + + if __opts__["test"]: + ret["result"] = None + ret[ + "comment" + ] = f"Role `{name}` would have been {'updated' if current else 'created'}" + if not current: + ret["changes"]["created"] = name + return ret + + __salt__["vault_db.write_static_role"]( + name, + connection, + username, + rotation_period, + rotation_statements=None, + credential_type=credential_type, + credential_config=credential_config, + mount=mount, + ) + new = __salt__["vault_db.fetch_role"](name, static=True, mount=mount) + + if new is None: + raise CommandExecutionError( + "There were no errors during role management, but it is reported as absent." + ) + + if not current: + ret["changes"]["created"] = name + + new_diff = _diff_params(new) + if new_diff: + ret["result"] = False + ret["comment"] = ( + "There were no errors during role management, but " + f"the reported parameters do not match: {new_diff}" + ) + return ret + + except (CommandExecutionError, SaltInvocationError) as err: + ret["result"] = False + ret["comment"] = str(err) + ret["changes"] = {} + + return ret diff --git a/salt/utils/vault.py b/salt/utils/vault.py index cbd2aec2b0b1..7fb9ea75bd72 100644 --- a/salt/utils/vault.py +++ b/salt/utils/vault.py @@ -4,562 +4,341 @@ :platform: all Utilities supporting modules for Hashicorp Vault. Configuration instructions are -documented in the execution module docs. +documented in the :ref:`execution module docs `. """ import base64 +import copy +import datetime import logging -import os +import re import string -import tempfile import time import requests +import salt.cache import salt.crypt import salt.exceptions +import salt.utils.data +import salt.utils.dictupdate import salt.utils.json import salt.utils.versions +from salt.defaults import NOT_SET +from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) +logging.getLogger("requests").setLevel(logging.WARNING) -# Load the __salt__ dunder if not already loaded (when called from utils-module) -__salt__ = None +# Make __salt__ available globally to avoid loading minion_mods multiple times +__salt__ = {} - -def __virtual__(): - try: - global __salt__ # pylint: disable=global-statement - if not __salt__: - __salt__ = salt.loader.minion_mods(__opts__) - logging.getLogger("requests").setLevel(logging.WARNING) - return True - except Exception as e: # pylint: disable=broad-except - log.error("Could not load __salt__: %s", e) - return False +TOKEN_CKEY = "__token" -def _get_token_and_url_from_master(): +def query( + method, + endpoint, + opts, + context, + payload=None, + wrap=False, + raise_error=True, + is_unauthd=False, + **kwargs, +): """ - Get a token with correct policies for the minion, and the url to the Vault - service + Query the Vault API. Supplemental arguments to ``requestes.request`` + can be passed as kwargs. + + method + HTTP verb to use. + + endpoint + API path to call (without leading ``/v1/``). + + opts + Pass ``__opts__`` from the module. + + context + Pass ``__context__`` from the module. + + payload + Dictionary of payload values to send, if any. + + wrap + Whether to request response wrapping. Should be a time string + like ``30s`` or False (default). + + raise_error + Whether to inspect the response code and raise exceptions. + Defaults to True. + + is_unauthd + Whether the queried endpoint is an unauthenticated one and hence + does not deduct a token use. Only relevant for endpoints not found + in ``sys``. Defaults to False. """ - minion_id = __grains__["id"] - pki_dir = __opts__["pki_dir"] - # Allow minion override salt-master settings/defaults + vault = get_authd_client(opts, context) try: - uses = __opts__.get("vault", {}).get("auth", {}).get("uses", None) - ttl = __opts__.get("vault", {}).get("auth", {}).get("ttl", None) - except (TypeError, AttributeError): - # If uses or ttl are not defined, just use defaults - uses = None - ttl = None - - # When rendering pillars, the module executes on the master, but the token - # should be issued for the minion, so that the correct policies are applied - if __opts__.get("__role", "minion") == "minion": - private_key = "{}/minion.pem".format(pki_dir) - log.debug("Running on minion, signing token request with key %s", private_key) - signature = base64.b64encode(salt.crypt.sign_message(private_key, minion_id)) - result = __salt__["publish.runner"]( - "vault.generate_token", arg=[minion_id, signature, False, ttl, uses] + return vault.request( + method, + endpoint, + payload=payload, + wrap=wrap, + raise_error=raise_error, + is_unauthd=is_unauthd, + **kwargs, ) - else: - private_key = "{}/master.pem".format(pki_dir) - log.debug( - "Running on master, signing token request for %s with key %s", - minion_id, - private_key, + except VaultPermissionDeniedError: + # in case cached authentication data was revoked + clear_cache(opts, context) + vault = get_authd_client(opts, context) + return vault.request( + method, + endpoint, + payload=payload, + wrap=wrap, + raise_error=raise_error, + is_unauthd=is_unauthd, + **kwargs, ) - signature = base64.b64encode(salt.crypt.sign_message(private_key, minion_id)) - result = __salt__["saltutil.runner"]( - "vault.generate_token", - minion_id=minion_id, - signature=signature, - impersonated_by_master=True, - ttl=ttl, - uses=uses, - ) - if not result: - log.error( - "Failed to get token from master! No result returned - " - "is the peer publish configuration correct?" - ) - raise salt.exceptions.CommandExecutionError(result) - if not isinstance(result, dict): - log.error("Failed to get token from master! Response is not a dict: %s", result) - raise salt.exceptions.CommandExecutionError(result) - if "error" in result: - log.error( - "Failed to get token from master! An error was returned: %s", - result["error"], - ) - raise salt.exceptions.CommandExecutionError(result) - if "session" in result.get("token_backend", "session"): - # This is the only way that this key can be placed onto __context__ - # Thus is tells the minion that the master is configured for token_backend: session - log.debug("Using session storage for vault credentials") - __context__["vault_secret_path_metadata"] = {} - return { - "url": result["url"], - "token": result["token"], - "verify": result.get("verify", None), - "namespace": result.get("namespace"), - "uses": result.get("uses", 1), - "lease_duration": result["lease_duration"], - "issued": result["issued"], - } -def get_vault_connection(): - """ - Get the connection details for calling Vault, from local configuration if - it exists, or from the master otherwise +def query_raw( + method, + endpoint, + opts, + context, + payload=None, + wrap=False, + retry=True, + is_unauthd=False, + **kwargs, +): """ + Query the Vault API, returning the raw response object. Supplemental + arguments to ``requestes.request`` can be passed as kwargs. - def _use_local_config(): - log.debug("Using Vault connection details from local config") - # Vault Enterprise requires a namespace - namespace = __opts__["vault"].get("namespace") - try: - if __opts__["vault"]["auth"]["method"] == "approle": - verify = __opts__["vault"].get("verify", None) - if _selftoken_expired(): - log.debug("Vault token expired. Recreating one") - # Requesting a short ttl token - url = "{}/v1/auth/approle/login".format(__opts__["vault"]["url"]) - payload = {"role_id": __opts__["vault"]["auth"]["role_id"]} - if "secret_id" in __opts__["vault"]["auth"]: - payload["secret_id"] = __opts__["vault"]["auth"]["secret_id"] - if namespace is not None: - headers = {"X-Vault-Namespace": namespace} - response = requests.post( - url, headers=headers, json=payload, verify=verify - ) - else: - response = requests.post(url, json=payload, verify=verify) - if response.status_code != 200: - errmsg = "An error occurred while getting a token from approle" - raise salt.exceptions.CommandExecutionError(errmsg) - __opts__["vault"]["auth"]["token"] = response.json()["auth"][ - "client_token" - ] - if __opts__["vault"]["auth"]["method"] == "wrapped_token": - verify = __opts__["vault"].get("verify", None) - if _wrapped_token_valid(): - url = "{}/v1/sys/wrapping/unwrap".format(__opts__["vault"]["url"]) - headers = {"X-Vault-Token": __opts__["vault"]["auth"]["token"]} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - response = requests.post(url, headers=headers, verify=verify) - if response.status_code != 200: - errmsg = "An error occured while unwrapping vault token" - raise salt.exceptions.CommandExecutionError(errmsg) - __opts__["vault"]["auth"]["token"] = response.json()["auth"][ - "client_token" - ] - return { - "url": __opts__["vault"]["url"], - "namespace": namespace, - "token": __opts__["vault"]["auth"]["token"], - "verify": __opts__["vault"].get("verify", None), - "issued": int(round(time.time())), - "ttl": 3600, - } - except KeyError as err: - errmsg = 'Minion has "vault" config section, but could not find key "{}" within'.format( - err - ) - raise salt.exceptions.CommandExecutionError(errmsg) + method + HTTP verb to use. - config = __opts__["vault"].get("config_location") - if config: - if config not in ["local", "master"]: - log.error("config_location must be either local or master") - return False - if config == "local": - return _use_local_config() - elif config == "master": - return _get_token_and_url_from_master() - - if "vault" in __opts__ and __opts__.get("__role", "minion") == "master": - if "id" in __grains__: - log.debug("Contacting master for Vault connection details") - return _get_token_and_url_from_master() - else: - return _use_local_config() - elif any( - ( - __opts__.get("local", None), - __opts__.get("file_client", None) == "local", - __opts__.get("master_type", None) == "disable", - ) - ): - return _use_local_config() - else: - log.debug("Contacting master for Vault connection details") - return _get_token_and_url_from_master() + endpoint + API path to call (without leading ``/v1/``). + opts + Pass ``__opts__`` from the module. -def del_cache(): - """ - Delete cache - """ - log.debug("Deleting session cache") - if "vault_token" in __context__: - del __context__["vault_token"] + context + Pass ``__context__`` from the module. - log.debug("Deleting cache file") - cache_file = os.path.join(__opts__["cachedir"], "salt_vault_token") + payload + Dictionary of payload values to send, if any. - if os.path.exists(cache_file): - os.remove(cache_file) - else: - log.debug("Attempted to delete vault cache file, but it does not exist.") + retry + Retry the query with cleared cache in case the permission + was denied (to check for revoked cached credentials). + Defaults to True. + wrap + Whether to request response wrapping. Should be a time string + like ``30s`` or False (default). -def write_cache(connection): - """ - Write the vault token to cache + is_unauthd + Whether the queried endpoint is an unauthenticated one and hence + does not deduct a token use. Only relevant for endpoints not found + in ``sys``. Defaults to False. """ - # If uses is 1 and unlimited_use_token is not true, then this is a single use token and should not be cached - # In that case, we still want to cache the vault metadata lookup information for paths, so continue on - if ( - connection.get("uses", None) == 1 - and "unlimited_use_token" not in connection - and "vault_secret_path_metadata" not in connection - ): - log.debug("Not caching vault single use token") - __context__["vault_token"] = connection - return True - elif ( - "vault_secret_path_metadata" in __context__ - and "vault_secret_path_metadata" not in connection - ): - # If session storage is being used, and info passed is not the already saved metadata - log.debug("Storing token only for this session") - __context__["vault_token"] = connection - return True - elif "vault_secret_path_metadata" in __context__: - # Must have been passed metadata. This is already handled by _get_secret_path_metadata - # and does not need to be resaved - return True - temp_fp, temp_file = tempfile.mkstemp(dir=__opts__["cachedir"]) - cache_file = os.path.join(__opts__["cachedir"], "salt_vault_token") - try: - log.debug("Writing vault cache file") - # Detect if token was issued without use limit - if connection.get("uses") == 0: - connection["unlimited_use_token"] = True - else: - connection["unlimited_use_token"] = False - with salt.utils.files.fpopen(temp_file, "w", mode=0o600) as fp_: - fp_.write(salt.utils.json.dumps(connection)) - os.close(temp_fp) - # Atomic operation to pervent race condition with concurrent calls. - os.rename(temp_file, cache_file) - return True - except OSError: - log.error( - "Failed to cache vault information", exc_info_on_loglevel=logging.DEBUG + vault = get_authd_client(opts, context) + res = vault.request_raw( + method, endpoint, payload=payload, wrap=wrap, is_unauthd=is_unauthd, **kwargs + ) + + if not retry: + return res + + if res.status_code == 403: + # in case cached authentication data was revoked + clear_cache(opts, context) + vault = get_authd_client(opts, context) + res = vault.request_raw( + method, + endpoint, + payload=payload, + wrap=wrap, + is_unauthd=is_unauthd, + **kwargs, ) - return False + return res -def _read_cache_file(): +def is_v2(path, opts=None, context=None): """ - Return contents of cache file + Determines if a given secret path is kv version 1 or 2. """ - try: - cache_file = os.path.join(__opts__["cachedir"], "salt_vault_token") - with salt.utils.files.fopen(cache_file, "r") as contents: - return salt.utils.json.load(contents) - except FileNotFoundError: - return {} + # TODO: consider if at least context is really necessary to require + if opts is None or context is None: + opts = globals().get("__opts__", {}) if opts is None else opts + context = globals().get("__context__", {}) if context is None else context + salt.utils.versions.warn_until( + "Argon", + "The __utils__ loader functionality will be removed. This will " + "cause context/opts dunders to be unavailable in utility modules. " + "Please pass opts and context from importing Salt modules explicitly.", + ) + kv = _get_kv(opts, context) + return kv.is_v2(path) -def get_cache(): +def read_kv(path, opts, context, include_metadata=False): """ - Return connection information from vault cache file + Read secret at . """ - - def _gen_new_connection(): - log.debug("Refreshing token") - connection = get_vault_connection() - write_status = write_cache(connection) - return connection - - connection = _read_cache_file() - # If no cache, or only metadata info is saved in cache, generate a new token - if not connection or "url" not in connection: - return _gen_new_connection() - - # Drop 10 seconds from ttl to be safe - if "lease_duration" in connection: - ttl = connection["lease_duration"] - else: - ttl = connection["ttl"] - ttl10 = connection["issued"] + ttl - 10 - cur_time = int(round(time.time())) - - # Determine if ttl still valid - if ttl10 < cur_time: - log.debug("Cached token has expired %s < %s: DELETING", ttl10, cur_time) - del_cache() - return _gen_new_connection() - else: - log.debug("Token has not expired %s > %s", ttl10, cur_time) - return connection + kv = _get_kv(opts, context) + try: + return kv.read(path, include_metadata=include_metadata) + except VaultPermissionDeniedError: + # in case cached authentication data was revoked + clear_cache(opts, context) + kv = _get_kv(opts, context) + return kv.read(path, include_metadata=include_metadata) -def make_request( - method, - resource, - token=None, - vault_url=None, - namespace=None, - get_token_url=False, - retry=False, - **args -): +def write_kv(path, data, opts, context): """ - Make a request to Vault + Write secret to . """ - if "vault_token" in __context__: - connection = __context__["vault_token"] - else: - connection = get_cache() - token = connection["token"] if not token else token - vault_url = connection["url"] if not vault_url else vault_url - namespace = namespace or connection.get("namespace") - if "verify" in args: - args["verify"] = args["verify"] - else: - try: - args["verify"] = __opts__.get("vault").get("verify", None) - except (TypeError, AttributeError): - # Don't worry about setting verify if it doesn't exist - pass - url = "{}/{}".format(vault_url, resource) - headers = {"X-Vault-Token": str(token), "Content-Type": "application/json"} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - response = requests.request(method, url, headers=headers, **args) - if not response.ok and response.json().get("errors", None) == ["permission denied"]: - log.info("Permission denied from vault") - del_cache() - if not retry: - log.debug("Retrying with new credentials") - response = make_request( - method, - resource, - token=None, - vault_url=vault_url, - get_token_url=get_token_url, - retry=True, - **args - ) - else: - log.error("Unable to connect to vault server: %s", response.text) - return response - elif not response.ok: - log.error("Error from vault: %s", response.text) - return response - - # Decrement vault uses, only on secret URL lookups and multi use tokens - if ( - "uses" in connection - and not connection.get("unlimited_use_token") - and not resource.startswith("v1/sys") - ): - log.debug("Decrementing Vault uses on limited token for url: %s", resource) - connection["uses"] -= 1 - if connection["uses"] <= 0: - log.debug("Cached token has no more uses left.") - if "vault_token" not in __context__: - del_cache() - else: - log.debug("Deleting token from memory") - del __context__["vault_token"] - else: - log.debug("Token has %s uses left", connection["uses"]) - write_cache(connection) - - if get_token_url: - return response, token, vault_url - else: - return response + kv = _get_kv(opts, context) + try: + return kv.write(path, data) + except VaultPermissionDeniedError: + clear_cache(opts, context) + kv = _get_kv(opts, context) + return kv.write(path, data) -def _selftoken_expired(): +def patch_kv(path, data, opts, context): """ - Validate the current token exists and is still valid + Patch secret at . """ + kv = _get_kv(opts, context) try: - verify = __opts__["vault"].get("verify", None) - # Vault Enterprise requires a namespace - namespace = __opts__["vault"].get("namespace") - url = "{}/v1/auth/token/lookup-self".format(__opts__["vault"]["url"]) - if "token" not in __opts__["vault"]["auth"]: - return True - headers = {"X-Vault-Token": __opts__["vault"]["auth"]["token"]} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - response = requests.get(url, headers=headers, verify=verify) - if response.status_code != 200: - return True - return False - except Exception as e: # pylint: disable=broad-except - raise salt.exceptions.CommandExecutionError( - "Error while looking up self token : {}".format(e) - ) + return kv.patch(path, data) + except VaultPermissionDeniedError: + clear_cache(opts, context) + kv = _get_kv(opts, context) + return kv.patch(path, data) -def _wrapped_token_valid(): +def delete_kv(path, opts, context, versions=None): """ - Validate the wrapped token exists and is still valid + Delete secret at . For KV v2, versions can be specified, + which will be soft-deleted. """ + kv = _get_kv(opts, context) try: - verify = __opts__["vault"].get("verify", None) - # Vault Enterprise requires a namespace - namespace = __opts__["vault"].get("namespace") - url = "{}/v1/sys/wrapping/lookup".format(__opts__["vault"]["url"]) - if "token" not in __opts__["vault"]["auth"]: - return False - headers = {"X-Vault-Token": __opts__["vault"]["auth"]["token"]} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - response = requests.post(url, headers=headers, verify=verify) - if response.status_code != 200: - return False - return True - except Exception as e: # pylint: disable=broad-except - raise salt.exceptions.CommandExecutionError( - "Error while looking up wrapped token : {}".format(e) - ) + return kv.delete(path, versions=versions) + except VaultPermissionDeniedError: + clear_cache(opts, context) + kv = _get_kv(opts, context) + return kv.delete(path, versions=versions) -def is_v2(path): +def destroy_kv(path, versions, opts, context): """ - Determines if a given secret path is kv version 1 or 2 - - CLI Example: - - .. code-block:: bash - - salt '*' vault.is_v2 "secret/my/secret" + Destroy secret at . Requires KV v2. """ - ret = {"v2": False, "data": path, "metadata": path, "delete": path, "type": None} - path_metadata = _get_secret_path_metadata(path) - if not path_metadata: - # metadata lookup failed. Simply return not v2 - return ret - ret["type"] = path_metadata.get("type", "kv") - if ( - ret["type"] == "kv" - and path_metadata["options"] is not None - and path_metadata.get("options", {}).get("version", "1") in ["2"] - ): - ret["v2"] = True - ret["data"] = _v2_the_path(path, path_metadata.get("path", path)) - ret["metadata"] = _v2_the_path( - path, path_metadata.get("path", path), "metadata" - ) - ret["destroy"] = _v2_the_path(path, path_metadata.get("path", path), "destroy") - return ret + kv = _get_kv(opts, context) + try: + return kv.destroy(path, versions) + except VaultPermissionDeniedError: + clear_cache(opts, context) + kv = _get_kv(opts, context) + return kv.destroy(path, versions) -def _v2_the_path(path, pfilter, ptype="data"): +def list_kv(path, opts, context): """ - Given a path, a filter, and a path type, properly inject 'data' or 'metadata' into the path - - CLI Example: - - .. code-block:: python - - _v2_the_path('dev/secrets/fu/bar', 'dev/secrets', 'data') => 'dev/secrets/data/fu/bar' + List secrets at . Returns ``{"keys": []}`` by default + for backwards-compatibility reasons, unless is True. """ - possible_types = ["data", "metadata", "destroy"] - assert ptype in possible_types - msg = ( - "Path {} already contains {} in the right place - saltstack duct tape?".format( - path, ptype - ) + kv = _get_kv(opts, context) + try: + return kv.list(path) + except VaultPermissionDeniedError: + clear_cache(opts, context) + kv = _get_kv(opts, context) + return kv.list(path) + + +def _get_kv(opts, context): + client, config = get_authd_client(opts, context, get_config=True) + ttl = None + connection = True + if config["cache"]["kv_metadata"] != "connection": + ttl = config["cache"]["kv_metadata"] + connection = False + cbank = _get_cache_bank(opts, connection=connection) + ckey = "secret_path_metadata" + metadata_cache = VaultCache( + context, cbank, ckey, cache_backend=_get_cache_backend(config, opts), ttl=ttl ) - - path = path.rstrip("/").lstrip("/") - pfilter = pfilter.rstrip("/").lstrip("/") - - together = pfilter + "/" + ptype - - otype = possible_types[0] if possible_types[0] != ptype else possible_types[1] - other = pfilter + "/" + otype - if path.startswith(other): - path = path.replace(other, together, 1) - msg = 'Path is a "{}" type but "{}" type requested - Flipping: {}'.format( - otype, ptype, path - ) - elif not path.startswith(together): - msg = "Converting path to v2 {} => {}".format( - path, path.replace(pfilter, together, 1) - ) - path = path.replace(pfilter, together, 1) - - log.debug(msg) - return path + return VaultKV(client, metadata_cache) -def _get_secret_path_metadata(path): +def get_lease_store(opts, context): """ - Given a path, query vault to determine mount point, type, and version - - CLI Example: + Return an instance of LeaseStore, which can be used + to cache leases and handle operations like renewals and revocations. + """ + client, config = get_authd_client(opts, context, get_config=True) + session_cbank = _get_cache_bank(opts, session=True) + lease_cache = VaultLeaseCache( + context, + session_cbank + "/leases", + cache_backend=_get_cache_backend(config, opts), + ) + return LeaseStore(client, lease_cache) - .. code-block:: python - _get_secret_path_metadata('dev/secrets/fu/bar') +def clear_cache(opts, context, ckey=None, connection=True, session=False): """ - ckey = "vault_secret_path_metadata" - - # Attempt to lookup from cache - if ckey in __context__: - cache_content = __context__[ckey] - else: - cache_content = _read_cache_file() - if ckey not in cache_content: - cache_content[ckey] = {} - - ret = None - if path.startswith(tuple(cache_content[ckey].keys())): - log.debug("Found cached metadata for %s", path) - ret = next(v for k, v in cache_content[ckey].items() if path.startswith(k)) - else: - log.debug("Fetching metadata for %s", path) - try: - url = "v1/sys/internal/ui/mounts/{}".format(path) - response = make_request("GET", url) - if response.ok: - response.raise_for_status() - if response.json().get("data", False): - log.debug("Got metadata for %s", path) - ret = response.json()["data"] - # Write metadata to cache file - # Check for new cache content from make_request - if "url" not in cache_content: - if ckey in __context__: - cache_content = __context__[ckey] - else: - cache_content = _read_cache_file() - if ckey not in cache_content: - cache_content[ckey] = {} - cache_content[ckey][path] = ret - write_cache(cache_content) - else: - raise response.json() - except Exception as err: # pylint: disable=broad-except - log.error("Failed to get secret metadata %s: %s", type(err).__name__, err) - return ret + Clears connection cache. + """ + cbank = _get_cache_bank( + opts, connection=connection, session=session and not connection + ) + if cbank in context: + if ckey is None: + context.pop(cbank) + else: + context[cbank].pop(ckey, None) + # also remove sub-banks from context to mimic cache behavior + if ckey is None: + for bank in list(context): + if bank.startswith(cbank): + context.pop(bank) + cache = salt.cache.factory(opts) + if cache.contains(cbank, ckey): + return cache.flush(cbank, ckey) + local_opts = copy.copy(opts) + opts["cache"] = "localfs" + cache = salt.cache.factory(local_opts) + return cache.flush(cbank, ckey) + + +def _get_cache_backend(config, opts): + if config["cache"]["backend"] == "session": + return None + if config["cache"]["backend"] in ["localfs", "disk", "file"]: + # cache.Cache does not allow setting the type of cache by param + local_opts = copy.copy(opts) + local_opts["cache"] = "localfs" + return salt.cache.factory(local_opts) + # this should usually resolve to localfs as well on minions, + # but can be overridden by setting cache in the minion config + return salt.cache.factory(opts) def expand_pattern_lists(pattern, **mappings): @@ -603,10 +382,2662 @@ def expand_pattern_lists(pattern, **mappings): continue (value, _) = f.get_field(field_name, None, mappings) if isinstance(value, list): - token = "{{{0}}}".format(field_name) + token = f"{{{field_name}}}" expanded = [pattern.replace(token, str(elem)) for elem in value] for expanded_item in expanded: result = expand_pattern_lists(expanded_item, **mappings) expanded_patterns += result return expanded_patterns return [pattern] + + +def timestring_map(val): + """ + Turn a time string (like ``60m``) into a float with seconds as a unit. + """ + if val is None: + return val + if isinstance(val, (int, float)): + return float(val) + try: + return float(val) + except ValueError: + pass + if not isinstance(val, str): + raise SaltInvocationError("Expected integer or time string") + if not re.match(r"^\d+(?:\.\d+)?[smhd]$", val): + raise SaltInvocationError(f"Invalid time string format: {val}") + raw, unit = float(val[:-1]), val[-1] + if unit == "s": + return raw + raw *= 60 + if unit == "m": + return raw + raw *= 60 + if unit == "h": + return raw + raw *= 24 + if unit == "d": + return raw + raise RuntimeError("This path should not have been hit") + + +SALT_RUNTYPE_MASTER = 0 +SALT_RUNTYPE_MASTER_IMPERSONATING = 1 +SALT_RUNTYPE_MASTER_PEER_RUN = 2 +SALT_RUNTYPE_MINION_LOCAL = 3 +SALT_RUNTYPE_MINION_REMOTE = 4 + + +def _get_salt_run_type(opts): + if "vault" in opts and opts.get("__role", "minion") == "master": + if opts.get("minion_id"): + return SALT_RUNTYPE_MASTER_IMPERSONATING + if "grains" in opts and "id" in opts["grains"]: + return SALT_RUNTYPE_MASTER_PEER_RUN + return SALT_RUNTYPE_MASTER + + config_location = opts.get("vault", {}).get("config_location") + if config_location and config_location not in ["local", "master"]: + raise salt.exceptions.InvalidConfigError( + "Invalid vault configuration: config_location must be either local or master" + ) + + if config_location == "master": + pass + elif any( + ( + opts.get("local", None), + opts.get("file_client", None) == "local", + opts.get("master_type", None) == "disable", + config_location == "local", + ) + ): + return SALT_RUNTYPE_MINION_LOCAL + return SALT_RUNTYPE_MINION_REMOTE + + +def _get_cache_bank(opts, force_local=False, connection=True, session=False): + minion_id = None + # force_local is necessary because pillar compilation would otherwise + # leak tokens between master and minions + if not force_local and _get_salt_run_type(opts) in [ + SALT_RUNTYPE_MASTER_IMPERSONATING, + SALT_RUNTYPE_MASTER_PEER_RUN, + ]: + minion_id = opts["grains"]["id"] + prefix = "vault" if minion_id is None else f"minions/{minion_id}/vault" + if session: + return prefix + "/connection/session" + if connection: + return prefix + "/connection" + return prefix + + +def get_authd_client(opts, context, force_local=False, get_config=False): + """ + Returns an AuthenticatedVaultClient that is valid for at least one query. + """ + retry = False + try: + client, config = _build_authd_client(opts, context, force_local=force_local) + except (VaultAuthExpired, VaultConfigExpired, VaultPermissionDeniedError): + retry = True + # First, check if the token needs to be and can be renewed. + # Since this needs to check the possibly active session and does not care + # about valid secret IDs etc, we need to inspect the actual token. + if ( + not retry + and config["auth"]["token_lifecycle"]["renew_increment"] is not False + and client.auth.get_token().is_renewable() + and not client.auth.get_token().is_valid( + config["auth"]["token_lifecycle"]["minimum_ttl"] + ) + ): + log.debug("Renewing token") + client.token_renew( + increment=config["auth"]["token_lifecycle"]["renew_increment"] + ) + + if retry or not client.token_valid( + config["auth"]["token_lifecycle"]["minimum_ttl"] or 0, remote=False + ): + log.debug("Deleting cache and requesting new authentication credentials") + clear_cache(opts, context) + client, config = _build_authd_client(opts, context, force_local=force_local) + if not client.token_valid( + config["auth"]["token_lifecycle"]["minimum_ttl"] or 0, remote=False + ): + if config["auth"]["token_lifecycle"]["minimum_ttl"]: + log.warning( + "Configuration error: auth:token_lifecycle:minimum_ttl cannot be honored because fresh tokens are issued with less ttl. Continuing anyways." + ) + else: + raise VaultException( + "Could not build valid client. This is most likely a bug." + ) + + if get_config: + return client, config + return client + + +def _build_authd_client(opts, context, force_local=False): + connection_cbank = _get_cache_bank(opts, force_local=force_local) + config, embedded_token = _get_connection_config( + connection_cbank, opts, context, force_local=force_local + ) + # Tokens are cached in a distinct scope to enable cache per session + session_cbank = _get_cache_bank(opts, force_local=force_local, session=True) + cache_ttl = ( + config["cache"]["secret"] if config["cache"]["secret"] != "ttl" else None + ) + token_cache = VaultAuthCache( + context, + session_cbank, + TOKEN_CKEY, + VaultToken, + cache_backend=_get_cache_backend(config, opts), + ttl=cache_ttl, + ) + + client = None + + if config["auth"]["method"] == "approle": + secret_id = config["auth"]["secret_id"] or None + cached_token = token_cache.get(10) + secret_id_cache = None + if secret_id: + secret_id_cache = VaultAuthCache( + context, + connection_cbank, + "secret_id", + VaultSecretId, + cache_backend=_get_cache_backend(config, opts), + ttl=cache_ttl, + ) + secret_id = secret_id_cache.get() + # Only fetch secret ID if there is no cached valid token + if cached_token is None and secret_id is None: + secret_id = _fetch_secret_id( + config, opts, secret_id_cache, force_local=force_local + ) + if secret_id is None: + secret_id = InvalidVaultSecretId() + role_id = config["auth"]["role_id"] + # this happens with wrapped response merging + if isinstance(role_id, dict): + role_id = role_id["role_id"] + approle = VaultAppRole(role_id, secret_id) + token_auth = VaultTokenAuth(cache=token_cache) + unauthd_client = VaultClient(**config["server"]) + auth = VaultAppRoleAuth( + approle, + unauthd_client, + mount=config["auth"]["approle_mount"], + cache=secret_id_cache, + token_store=token_auth, + ) + client = AuthenticatedVaultClient(auth, **config["server"]) + elif config["auth"]["method"] in ["token", "wrapped_token"]: + token = _fetch_token( + config, + opts, + token_cache, + force_local=force_local, + embedded_token=embedded_token, + ) + auth = VaultTokenAuth(token=token, cache=token_cache) + client = AuthenticatedVaultClient(auth, **config["server"]) + + if client is not None: + return client, config + raise salt.exceptions.SaltException("Connection configuration is invalid.") + + +def _get_connection_config(cbank, opts, context, force_local=False): + if ( + _get_salt_run_type(opts) in [SALT_RUNTYPE_MASTER, SALT_RUNTYPE_MINION_LOCAL] + or force_local + ): + # only cache config fetched from remote + return _use_local_config(opts) + + log.debug("Using Vault server connection configuration from remote.") + config_cache = _get_config_cache(opts, context, cbank, "config") + + # In case cached data is available, this takes care of resetting + # all connection-scoped data if the config is outdated. + config = config_cache.get() + if config is not None: + log.debug("Using cached Vault server connection configuration.") + return config, None + + log.debug("Using new Vault server connection configuration.") + try: + issue_params = parse_config(opts.get("vault", {}), validate=False)[ + "issue_params" + ] + config = _query_master( + "get_config", + opts, + issue_params=issue_params or None, + ) + except VaultConfigExpired as err: + # Make sure to still work with old peer_run configuration + if "Peer runner return was empty" not in err.message: + raise err + log.warning( + "Got empty response to Vault config request. Falling back to vault.generate_token. " + "Please update your master peer_run configuration." + ) + config = _query_master( + "generate_token", + opts, + ttl=issue_params.get("explicit_max_ttl"), + uses=issue_params.get("num_uses"), + upgrade_request=True, + ) + config = parse_config(config, opts=opts) + # do not couple token cache with configuration cache + embedded_token = config["auth"].pop("token", None) + config = { + "auth": config["auth"], + "cache": config["cache"], + "server": config["server"], + } + config_cache.store(config) + return config, embedded_token + + +def _use_local_config(opts): + log.debug("Using Vault connection details from local config.") + config = parse_config(opts.get("vault", {})) + embedded_token = config["auth"].pop("token", None) + return { + "auth": config["auth"], + "cache": config["cache"], + "server": config["server"], + }, embedded_token + + +def _fetch_secret_id(config, opts, secret_id_cache, force_local=False): + def cache_or_fetch(config, opts, secret_id_cache): + secret_id = secret_id_cache.get() + if secret_id is not None: + return secret_id + + log.debug("Fetching new Vault AppRole secret ID.") + secret_id = _query_master( + "generate_secret_id", + opts, + expected_server=config["server"], + unwrap_expected_creation_path=_get_expected_creation_path( + "secret_id", config + ), + issue_params=parse_config(opts.get("vault", {}), validate=False)[ + "issue_params" + ] + or None, + ) + secret_id = VaultSecretId(**secret_id["data"]) + # Do not cache single-use secret IDs + if secret_id.num_uses != 1: + secret_id_cache.store(secret_id) + return secret_id + + if ( + _get_salt_run_type(opts) in [SALT_RUNTYPE_MASTER, SALT_RUNTYPE_MINION_LOCAL] + or force_local + ): + secret_id = config["auth"]["secret_id"] + if isinstance(secret_id, dict): + if secret_id.get("wrap_info"): + unauthd_client = VaultClient(**config["server"]) + secret_id = unauthd_client.unwrap( + secret_id["wrap_info"]["token"], + expected_creation_path=_get_expected_creation_path( + "secret_id", config + ), + ) + secret_id = secret_id["data"] + return LocalVaultSecretId(**secret_id) + if secret_id: + # assume locally configured secret_ids do not expire + return LocalVaultSecretId( + secret_id=config["auth"]["secret_id"], + secret_id_ttl=config["cache"]["config"], + secret_id_num_uses=0, + ) + # When secret_id is falsey, the approle does not require secret IDs, + # hence a call to this function is superfluous + raise salt.exceptions.SaltException("This code path should not be hit at all.") + + log.debug("Using secret_id issued by master.") + return cache_or_fetch(config, opts, secret_id_cache) + + +def _fetch_token(config, opts, token_cache, force_local=False, embedded_token=None): + def cache_or_fetch(config, opts, token_cache, embedded_token): + token = token_cache.get(10) + if token is not None: + log.debug("Using cached token.") + return token + + if isinstance(embedded_token, dict): + token = VaultToken(**embedded_token) + + if not isinstance(token, VaultToken) or not token.is_valid(10): + log.debug("Fetching new Vault token.") + token = _query_master( + "generate_new_token", + opts, + expected_server=config["server"], + unwrap_expected_creation_path=_get_expected_creation_path( + "token", config + ), + issue_params=parse_config(opts.get("vault", {}), validate=False)[ + "issue_params" + ] + or None, + ) + token = VaultToken(**token["auth"]) + + # do not cache single-use tokens + if token.num_uses != 1: + token_cache.store(token) + return token + + if ( + _get_salt_run_type(opts) in [SALT_RUNTYPE_MASTER, SALT_RUNTYPE_MINION_LOCAL] + or force_local + ): + token = None + if isinstance(embedded_token, dict): + if embedded_token.get("wrap_info"): + unauthd_client = VaultClient(**config["server"]) + embedded_token = unauthd_client.unwrap( + embedded_token["wrap_info"]["token"], + expected_creation_path=_get_expected_creation_path("token", config), + )["auth"] + token = VaultToken(**embedded_token) + elif config["auth"]["method"] == "wrapped_token": + unauthd_client = VaultClient(**config["server"]) + embedded_token = unauthd_client.unwrap( + embedded_token, + expected_creation_path=_get_expected_creation_path("token", config), + )["auth"] + token = VaultToken(**embedded_token) + elif embedded_token is not None: + # if the embedded plain token info has been cached before, don't repeat + # the query unnecessarily + token = token_cache.get() + if token is None or embedded_token != str(token): + # lookup and verify raw token + client = VaultClient(**config["server"]) + token_info = client.token_lookup(embedded_token, raw=True) + if token_info.status_code != 200: + raise VaultException( + "Configured token cannot be verified. It is most likely expired or invalid." + ) + token_meta = token_info.json()["data"] + token = VaultToken( + lease_id=embedded_token, + lease_duration=token_meta["ttl"], + **token_meta, + ) + token_cache.store(token) + if token is not None: + return token + raise VaultException("Invalid configuration, missing token.") + + log.debug("Using token generated by master.") + return cache_or_fetch(config, opts, token_cache, embedded_token) + + +def _query_master( + func, + opts, + expected_server=None, + unwrap_client=None, + unwrap_expected_creation_path=None, + **kwargs, +): + def check_result( + result, + expected_server=None, + unwrap_client=None, + unwrap_expected_creation_path=None, + ): + if not result: + log.error( + "Failed to get Vault connection from master! No result returned - " + "does the peer runner publish configuration include `vault.%s`?", + func, + ) + # Expire configuration in case this is the result of an auth method change. + raise VaultConfigExpired( + f"Peer runner return was empty. Make sure {func} is listed in the master peer_run config." + ) + if not isinstance(result, dict): + log.error( + "Failed to get Vault connection from master! Response is not a dict: %s", + result, + ) + raise salt.exceptions.CommandExecutionError(result) + if "error" in result: + log.error( + "Failed to get Vault connection from master! An error was returned: %s", + result["error"], + ) + if result.get("expire_cache"): + log.warning("Master returned error and requested cache expiration.") + raise VaultConfigExpired() + raise salt.exceptions.CommandExecutionError(result) + + config_expired = False + + if result.get("expire_cache", False): + log.info("Master requested Vault config expiration.") + config_expired = True + + if "server" in result: + # Ensure locally overridden verify parameter does not + # always invalidate cache. + reported_server = parse_config(result["server"], validate=False, opts=opts)[ + "server" + ] + result.update({"server": reported_server}) + + if expected_server is not None and result.get("server") != expected_server: + log.info( + "Mismatch of cached and reported server data detected. Invalidating cache." + ) + # make sure to fetch wrapped data anyways for security reasons + config_expired = True + unwrap_expected_creation_path = None + + # This is used to augment some vault responses with data fetched by the master + # e.g. secret_id_num_uses + misc_data = result.get("misc_data", {}) + + if result.get("wrap_info") or result.get("wrap_info_nested"): + if unwrap_client is not None and unwrap_client.get_config() != result.get( + "server" + ): + unwrap_client = None + # Ensure to fetch wrapped data anyways for security reasons + config_expired = True + + if unwrap_client is None: + unwrap_client = VaultClient(**result["server"]) + + for key in [""] + result.get("wrap_info_nested", []): + if key: + wrapped = salt.utils.data.traverse_dict(result, key) + else: + wrapped = result + if not wrapped or "wrap_info" not in wrapped: + continue + wrapped_response = VaultWrappedResponse(**wrapped["wrap_info"]) + unwrapped_response = unwrap_client.unwrap( + wrapped_response, + expected_creation_path=unwrap_expected_creation_path, + ) + if key: + salt.utils.dictupdate.set_dict_key_value( + result, + key, + unwrapped_response.get("auth") + or unwrapped_response.get("data"), + ) + else: + if unwrapped_response.get("auth"): + result.update({"auth": unwrapped_response["auth"]}) + if unwrapped_response.get("data"): + result.update({"data": unwrapped_response["data"]}) + + if config_expired: + raise VaultConfigExpired() + + for key, val in misc_data.items(): + tgt = "data" if result.get("data") is not None else "auth" + if ( + salt.utils.data.traverse_dict_and_list(result, f"{tgt}:{key}", NOT_SET) + == NOT_SET + ): + salt.utils.dictupdate.set_dict_key_value( + result, + f"{tgt}:{key}", + val, + ) + + result.pop("wrap_info", None) + result.pop("wrap_info_nested", None) + result.pop("misc_data", None) + return result + + global __salt__ # pylint: disable=global-statement + if not __salt__: + __salt__ = salt.loader.minion_mods(opts) + + minion_id = opts["grains"]["id"] + pki_dir = opts["pki_dir"] + + # When rendering pillars, the module executes on the master, but the token + # should be issued for the minion, so that the correct policies are applied + if opts.get("__role", "minion") == "minion": + private_key = f"{pki_dir}/minion.pem" + log.debug( + "Running on minion, signing request `vault.%s` with key %s", + func, + private_key, + ) + signature = base64.b64encode(salt.crypt.sign_message(private_key, minion_id)) + arg = [ + ("minion_id", minion_id), + ("signature", signature), + ("impersonated_by_master", False), + ] + list(kwargs.items()) + + result = __salt__["publish.runner"]( + f"vault.{func}", arg=[{"__kwarg__": True, k: v} for k, v in arg] + ) + else: + private_key = f"{pki_dir}/master.pem" + log.debug( + "Running on master, signing request `vault.%s` for %s with key %s", + func, + minion_id, + private_key, + ) + signature = base64.b64encode(salt.crypt.sign_message(private_key, minion_id)) + result = __salt__["saltutil.runner"]( + f"vault.{func}", + minion_id=minion_id, + signature=signature, + impersonated_by_master=True, + **kwargs, + ) + return check_result( + result, + expected_server=expected_server, + unwrap_client=unwrap_client, + unwrap_expected_creation_path=unwrap_expected_creation_path, + ) + + +def parse_config(config, validate=True, opts=None): + """ + Returns a vault configuration dictionary that has all + keys with defaults. Checks if required data is available. + """ + default_config = { + "auth": { + "approle_mount": "approle", + "approle_name": "salt-master", + "method": "token", + "secret_id": None, + "token_lifecycle": { + "minimum_ttl": 10, + "renew_increment": None, + }, + }, + "cache": { + "backend": "session", + "config": 3600, + "kv_metadata": "connection", + "secret": "ttl", + }, + "issue": { + "allow_minion_override_params": False, + "type": "token", + "approle": { + "mount": "salt-minions", + "params": { + "bind_secret_id": True, + "secret_id_num_uses": 1, + "secret_id_ttl": 60, + "token_explicit_max_ttl": 60, + "token_num_uses": 10, + }, + }, + "token": { + "role_name": None, + "params": { + "explicit_max_ttl": None, + "num_uses": 1, + }, + }, + "wrap": "30s", + }, + "issue_params": {}, + "metadata": { + "entity": { + "minion-id": "{minion}", + }, + "secret": { + "saltstack-jid": "{jid}", + "saltstack-minion": "{minion}", + "saltstack-user": "{user}", + }, + }, + "policies": { + "assign": [ + "saltstack/minions", + "saltstack/{minion}", + ], + "cache_time": 60, + "refresh_pillar": None, + }, + "server": { + "namespace": None, + "verify": None, + }, + } + try: + # Policy generation has params, the new config groups them together. + if isinstance(config.get("policies", {}), list): + config["policies"] = {"assign": config.pop("policies")} + merged = salt.utils.dictupdate.merge( + default_config, + config, + strategy="smart", + merge_lists=False, + ) + # ttl, uses were used as configuration for issuance and minion overrides as well + # as token meta information. The new configuration splits those semantics. + for old_token_conf, new_token_conf in [ + ("ttl", "explicit_max_ttl"), + ("uses", "num_uses"), + ]: + if old_token_conf in merged["auth"]: + merged["issue"]["token"]["params"][new_token_conf] = merged[ + "issue_params" + ][new_token_conf] = merged["auth"].pop(old_token_conf) + # Those were found in the root namespace, but grouping them together + # makes semantic and practical sense. + for old_server_conf in ["namespace", "url", "verify"]: + if old_server_conf in merged: + merged["server"][old_server_conf] = merged.pop(old_server_conf) + if "role_name" in merged: + merged["issue"]["token"]["role_name"] = merged.pop("role_name") + if "token_backend" in merged["auth"]: + merged["cache"]["backend"] = merged["auth"].pop("token_backend") + if "allow_minion_override" in merged["auth"]: + merged["issue"]["allow_minion_override_params"] = merged["auth"].pop( + "allow_minion_override" + ) + if opts is not None and "vault" in opts: + local_config = opts["vault"] + # Respect locally configured verify parameter + if local_config.get("verify", NOT_SET) != NOT_SET: + merged["server"]["verify"] = local_config["verify"] + elif local_config.get("server", {}).get("verify", NOT_SET) != NOT_SET: + merged["server"]["verify"] = local_config["server"]["verify"] + # same for token_lifecycle + if local_config.get("auth", {}).get("token_lifecycle"): + merged["auth"]["token_lifecycle"] = local_config["auth"][ + "token_lifecycle" + ] + + if not validate: + return merged + + if merged["auth"]["method"] == "approle": + if "role_id" not in merged["auth"]: + raise AssertionError("auth:role_id is required for approle auth") + elif merged["auth"]["method"] == "token": + if "token" not in merged["auth"]: + raise AssertionError("auth:token is required for token auth") + else: + raise AssertionError( + f"`{merged['auth']['method']}` is not a valid auth method." + ) + + if "url" not in merged["server"]: + raise AssertionError("server:url is required") + except AssertionError as err: + raise salt.exceptions.InvalidConfigError( + f"Invalid vault configuration: {err}" + ) from err + return merged + + +def _get_expected_creation_path(secret_type, config=None): + if secret_type == "token": + return r"auth/token/create(/[^/]+)?" + + if secret_type == "secret_id": + if config is not None: + return r"auth/{}/role/{}/secret\-id".format( + re.escape(config["auth"]["approle_mount"]), + re.escape(config["auth"]["approle_name"]), + ) + return r"auth/[^/]+/role/[^/]+/secret\-id" + + if secret_type == "role_id": + if config is not None: + return r"auth/{}/role/{}/role\-id".format( + re.escape(config["auth"]["approle_mount"]), + re.escape(config["auth"]["approle_name"]), + ) + return r"auth/[^/]+/role/[^/]+/role\-id" + + raise VaultInvocationError( + f"secret_type must be one of token, secret_id, role_id, got `{secret_type}`." + ) + + +class VaultException(salt.exceptions.SaltException): + """ + Base class for exceptions raised by this module + """ + + +class VaultAuthExpired(VaultException): + """ + Raised when authentication data is reported to be outdated locally. + """ + + +class VaultConfigExpired(VaultException): + """ + Raised when secret authentication data queried from the master reports + a different server configuration than locally cached. + """ + + +class VaultUnwrapException(VaultException): + """ + Raised when an expected creation path for a wrapping token differs + from the reported one. + This has to be taken seriously as it indicates tampering. + """ + + +# https://www.vaultproject.io/api-docs#http-status-codes +class VaultInvocationError(VaultException): + """ + HTTP 400 and InvalidArgumentException for this module + """ + + +class VaultPermissionDeniedError(VaultException): + """ + HTTP 403 + """ + + +class VaultNotFoundError(VaultException): + """ + HTTP 404 + In some cases, this is also raised when the client does not have + the correct permissions for the requested endpoint. + """ + + +class VaultUnsupportedOperationError(VaultException): + """ + HTTP 405 + """ + + +class VaultPreconditionFailedError(VaultException): + """ + HTTP 412 + """ + + +class VaultServerError(VaultException): + """ + HTTP 500 + HTTP 502 + """ + + +class VaultUnavailableError(VaultException): + """ + HTTP 503 + Indicates maintenance or sealed status. + """ + + +class VaultClient: + """ + Unauthenticated client for the Vault API. + Base class for authenticated client. + """ + + def __init__(self, url, namespace=None, verify=None): + self.url = url + self.namespace = namespace + self.verify = verify + + def delete(self, endpoint, wrap=False, raise_error=True, add_headers=None): + """ + Wrapper for client.request("DELETE", ...) + """ + return self.request( + "DELETE", + endpoint, + wrap=wrap, + raise_error=raise_error, + add_headers=add_headers, + ) + + def get(self, endpoint, wrap=False, raise_error=True, add_headers=None): + """ + Wrapper for client.request("GET", ...) + """ + return self.request( + "GET", endpoint, wrap=wrap, raise_error=raise_error, add_headers=add_headers + ) + + def list(self, endpoint, wrap=False, raise_error=True, add_headers=None): + """ + Wrapper for client.request("LIST", ...) + TODO: configuration to enable GET requests with query parameters for LIST? + """ + return self.request( + "LIST", + endpoint, + wrap=wrap, + raise_error=raise_error, + add_headers=add_headers, + ) + + def post( + self, endpoint, payload=None, wrap=False, raise_error=True, add_headers=None + ): + """ + Wrapper for client.request("POST", ...) + Vault considers POST and PUT to be synonymous. + """ + return self.request( + "POST", + endpoint, + payload=payload, + wrap=wrap, + raise_error=raise_error, + add_headers=add_headers, + ) + + def patch(self, endpoint, payload, wrap=False, raise_error=True, add_headers=None): + """ + Wrapper for client.request("PATCH", ...) + """ + return self.request( + "PATCH", + endpoint, + payload=payload, + wrap=wrap, + raise_error=raise_error, + add_headers=add_headers, + ) + + def request( + self, + method, + endpoint, + payload=None, + wrap=False, + raise_error=True, + add_headers=None, + **kwargs, + ): + """ + Issue a request against the Vault API. + Returns boolean when no data was returned, otherwise the decoded json data + or a VaultWrappedResponse object if wrapping was requested. + """ + res = self.request_raw( + method, + endpoint, + payload=payload, + wrap=wrap, + add_headers=add_headers, + **kwargs, + ) + if res.status_code == 204: + return True + data = res.json() + if not res.ok: + if raise_error: + self._raise_status(res) + return data + if wrap: + return VaultWrappedResponse(**data["wrap_info"]) + return data + + def request_raw( + self, method, endpoint, payload=None, wrap=False, add_headers=None, **kwargs + ): + """ + Issue a request against the Vault API. Returns the raw response object. + """ + url = self._get_url(endpoint) + headers = self._get_headers(wrap) + try: + headers.update(add_headers) + except TypeError: + pass + res = requests.request( + method, url, headers=headers, json=payload, verify=self.verify, **kwargs + ) + return res + + def unwrap(self, wrapped, expected_creation_path=None): + """ + Unwraps the data associated with a wrapping token. + + wrapped + Wrapping token to unwrap + + expected_creation_path + Regex expression or list of expressions that should fully match the + wrapping token creation path. At least one match is required. + Defaults to None, which skips the check. + + .. note:: + This check prevents tampering with wrapping tokens, which are + valid for one request only. Usually, if an attacker sniffs a wrapping + token, there will be two unwrapping requests, causing an audit warning. + If the attacker can issue a new wrapping token and insert it into the + response instead, this warning would be silenced. Assuming they do not + possess the permissions to issue a wrapping token from the correct + endpoint, checking the creation path makes this kind of attack obvious. + """ + if expected_creation_path: + wrap_info = self.wrap_info(wrapped) + if not isinstance(expected_creation_path, list): + expected_creation_path = [expected_creation_path] + if not any( + re.fullmatch(p, wrap_info["creation_path"]) + for p in expected_creation_path + ): + # TODO: consider firing an event here as well + raise VaultUnwrapException( + "Wrapped response was not created from expected Vault path: " + f"`{wrap_info['creation_path']}` is not matched by any of `{expected_creation_path}`.\n" + "This indicates tampering with the wrapping token by a third party " + "and should be taken very seriously! If you changed some authentication-" + "specific configuration on the master recently, especially minion " + "approle mount, you should consider if this error was caused by outdated " + "cached data on this minion instead." + ) + url = self._get_url("sys/wrapping/unwrap") + headers = self._get_headers() + payload = {} + if "X-Vault-Token" not in headers: + headers["X-Vault-Token"] = str(wrapped) + else: + payload["token"] = str(wrapped) + res = requests.request("POST", url, headers=headers, json=payload) + if not res.ok: + self._raise_status(res) + return res.json() + + def wrap_info(self, wrapped): + """ + Lookup wrapping token meta information. + """ + endpoint = "sys/wrapping/lookup" + add_headers = {"X-Vault-Token": str(wrapped)} + return self.post(endpoint, wrap=False, add_headers=add_headers)["data"] + + def token_lookup(self, token=None, accessor=None, raw=False): + """ + Lookup token meta information. + + token + The token to look up or to use to look up the accessor. + Required. + + accessor + The accessor to use to query the token meta information. + + raw + Return the raw response object instead of response data. + Also disables status code checking. + """ + endpoint = "auth/token/lookup-self" + method = "GET" + payload = {} + if token is None: + raise VaultInvocationError( + "Unauthenticated VaultClient needs a token to lookup." + ) + add_headers = {"X-Vault-Token": token} + + if accessor is not None: + endpoint = "auth/token/lookup-accessor" + payload["accessor"] = accessor + + res = self.request_raw( + method, endpoint, payload=payload, wrap=False, add_headers=add_headers + ) + if raw: + return res + self._raise_status(res) + return res.json()["data"] + + def token_valid(self, valid_for=0, remote=True): # pylint: disable=unused-argument + return False + + def get_config(self): + """ + Returns Vault server configuration used by this client. + """ + return { + "url": self.url, + "namespace": self.namespace, + "verify": self.verify, + } + + def _get_url(self, endpoint): + endpoint = endpoint.strip("/") + return f"{self.url}/v1/{endpoint}" + + def _get_headers(self, wrap=False): + headers = {"Content-Type": "application/json", "X-Vault-Request": "true"} + if self.namespace is not None: + headers["X-Vault-Namespace"] = self.namespace + if wrap: + headers["X-Vault-Wrap-TTL"] = str(wrap) + return headers + + def _raise_status(self, res): + errors = ", ".join(res.json().get("errors", [])) + if res.status_code == 400: + raise VaultInvocationError(errors) + if res.status_code == 403: + raise VaultPermissionDeniedError(errors) + if res.status_code == 404: + raise VaultNotFoundError(errors) + if res.status_code == 405: + raise VaultUnsupportedOperationError(errors) + if res.status_code == 412: + raise VaultPreconditionFailedError(errors) + if res.status_code in [500, 502]: + raise VaultServerError(errors) + if res.status_code == 503: + raise VaultUnavailableError(errors) + res.raise_for_status() + + +# This list is not complete at all, but contains +# the most important paths. +VAULT_UNAUTHD_PATHS = ( + "sys/wrapping/lookup", + "sys/internal/ui/mounts", + "sys/internal/ui/namespaces", + "sys/seal-status", + "sys/health", +) + + +class AuthenticatedVaultClient(VaultClient): + """ + Authenticated client for the Vault API. + This should be used for most operations. + """ + + auth = None + + def __init__(self, auth, url, **kwargs): + self.auth = auth + super().__init__(url, **kwargs) + + def token_valid(self, valid_for=0, remote=True): + """ + Check whether this client's authentication information is + still valid. + + remote + Check with the remote Vault server as well. This consumes + a token use. Defaults to true. + """ + if not self.auth.is_valid(valid_for): + return False + if not remote: + return True + try: + res = self.token_lookup(raw=True) + if res.status_code != 200: + return False + return True + except Exception as err: # pylint: disable=broad-except + raise salt.exceptions.CommandExecutionError( + "Error while looking up self token." + ) from err + + def token_lookup(self, token=None, accessor=None, raw=False): + """ + Lookup token meta information. + + token + The token to look up. If neither token nor accessor + are specified, looks up the current token in use by + this client. + + accessor + The accessor of the token to query the meta information for. + + raw + Return the raw response object instead of response data. + Also disables status code checking. + """ + endpoint = "auth/token/lookup" + method = "POST" + payload = {} + if token is None and accessor is None: + endpoint += "-self" + method = "GET" + if token is not None: + payload["token"] = token + elif accessor is not None: + endpoint += "-accessor" + payload["accessor"] = accessor + if raw: + return self.request_raw(method, endpoint, payload=payload, wrap=False) + return self.request(method, endpoint, payload=payload, wrap=False)["data"] + + def token_renew(self, increment=None, token=None, accessor=None): + """ + Renew a token. + + increment + Request the token to be valid for this amount of time from the current + point of time onwards. Can also be used to reduce the validity period. + The server might not honor this increment. + Can be an integer (seconds) or a time string like ``1h``. Optional. + + token + The token that should be renewed. Optional. + If token and accessor are unset, renews the token currently in use + by this client. + + accessor + The accessor of the token that should be renewed. Optional. + """ + endpoint = "auth/token/renew" + payload = {} + + if token is None and accessor is None: + if not self.auth.is_renewable(): + return False + endpoint += "-self" + + if increment is not None: + payload["increment"] = increment + if token is not None: + payload["token"] = token + elif accessor is not None: + endpoint += "-accessor" + payload["accessor"] = accessor + + res = self.post(endpoint, payload=payload) + + if token is None and accessor is None: + self.auth.update_token(res["auth"]) + return res["auth"] + + def request_raw( + self, + method, + endpoint, + payload=None, + wrap=False, + add_headers=None, + is_unauthd=False, + **kwargs, + ): # pylint: disable=arguments-differ + """ + Issue an authenticated request against the Vault API. Returns the raw response object. + """ + ret = super().request_raw( + method, + endpoint, + payload=payload, + wrap=wrap, + add_headers=add_headers, + **kwargs, + ) + # tokens are used regardless of status code + if not is_unauthd and not endpoint.startswith(VAULT_UNAUTHD_PATHS): + self.auth.used() + return ret + + def _get_headers(self, wrap=False): + headers = super()._get_headers(wrap) + headers["X-Vault-Token"] = str(self.auth.get_token()) + return headers + + +def iso_to_timestamp(iso_time): + """ + Most endpoints respond with RFC3339-formatted strings + This is a hacky way to use inbuilt tools only for converting + to a timestamp + """ + # drop subsecond precision to make it easier on us + # (length would need to be 3, 6 or 9) + iso_time = re.sub(r"\.[\d]+", "", iso_time) + iso_time = re.sub(r"Z$", "+00:00", iso_time) + try: + # Python >=v3.7 + return int(datetime.datetime.fromisoformat(iso_time).timestamp()) + except AttributeError: + # Python < v3.7 + dstr, tstr = iso_time.split("T") + year = int(dstr[:4]) + month = int(dstr[5:7]) + day = int(dstr[8:10]) + hour = int(tstr[:2]) + minute = int(tstr[3:5]) + second = int(tstr[6:8]) + tz_pos = (tstr.find("-") + 1 or tstr.find("+") + 1) - 1 + tz_hour = int(tstr[tz_pos + 1 : tz_pos + 3]) + tz_minute = int(tstr[tz_pos + 4 : tz_pos + 6]) + if all(x == 0 for x in (tz_hour, tz_minute)): + tz = datetime.timezone.utc + else: + tz_sign = -1 if tstr[tz_pos] == "-" else 1 + td = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + tz = datetime.timezone(tz_sign * td) + return int( + datetime.datetime(year, month, day, hour, minute, second, 0, tz).timestamp() + ) + + +class DurationMixin: + """ + Mixin that handles expiration with time + """ + + def __init__( + self, + renewable=False, + duration=0, + creation_time=None, + expire_time=None, + **kwargs, + ): + if "lease_duration" in kwargs: + duration = kwargs.pop("lease_duration") + self.renewable = renewable + self.duration = duration + creation_time = ( + creation_time if creation_time is not None else round(time.time()) + ) + try: + creation_time = int(creation_time) + except ValueError: + creation_time = iso_to_timestamp(creation_time) + self.creation_time = creation_time + + expire_time = ( + expire_time if expire_time is not None else round(time.time()) + duration + ) + try: + expire_time = int(expire_time) + except ValueError: + expire_time = iso_to_timestamp(expire_time) + self.expire_time = expire_time + super().__init__(**kwargs) + + def is_renewable(self): + """ + Checks whether the lease is renewable + """ + return self.renewable + + def is_valid_for(self, valid_for=0, blur=0): + """ + Checks whether the entity is valid + + valid_for + Check whether the entity will still be valid in the future. + This can be an integer, which will be interpreted as seconds, or a + time string using the same format as Vault does: + Suffix ``s`` for seconds, ``m`` for minutes, ``h`` for hours, ``d`` for days. + Defaults to 0. + + blur + Allow undercutting ``valid_for`` for this amount of seconds. + Defaults to 0. + """ + if not self.duration: + return True + delta = self.expire_time - time.time() - timestring_map(valid_for) + if delta >= 0: + return True + return abs(delta) <= blur + + +class UseCountMixin: + """ + Mixin that handles expiration with number of uses + """ + + def __init__(self, num_uses=0, use_count=0, **kwargs): + self.num_uses = num_uses + self.use_count = use_count + super().__init__(**kwargs) + + def used(self): + """ + Increment the use counter by one. + """ + self.use_count += 1 + + def has_uses_left(self, uses=1): + """ + Check whether this entity has uses left. + """ + return self.num_uses == 0 or self.num_uses - (self.use_count + uses) >= 0 + + +class DropInitKwargsMixin: + """ + Mixin that breaks the chain of passing unhandled kwargs up the MRO. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args) + + +class AccessorMixin: + """ + Mixin that manages accessor information relevant for tokens/secret IDs + """ + + def __init__(self, accessor=None, wrapped_accessor=None, **kwargs): + self.accessor = accessor if wrapped_accessor is None else wrapped_accessor + self.wrapping_accessor = accessor if wrapped_accessor is not None else None + super().__init__(**kwargs) + + def accessor_payload(self): + if self.accessor is not None: + return {"accessor": self.accessor} + raise VaultInvocationError("No accessor information available") + + +class BaseLease(DurationMixin, DropInitKwargsMixin): + """ + Base class for leases that expire with time. + """ + + def __init__(self, lease_id, **kwargs): + self.id = self.lease_id = lease_id + super().__init__(**kwargs) + + def __str__(self): + return self.id + + def __repr__(self): + return repr(self.to_dict()) + + def __eq__(self, other): + try: + data = other.__dict__ + except AttributeError: + data = other + return data == self.__dict__ + + def with_renewed(self, **kwargs): + """ + Partially update the contained data after lease renewal + """ + attrs = copy.copy(self.__dict__) + # ensure expire_time is reset properly + attrs.pop("expire_time") + attrs.update(kwargs) + return type(self)(**attrs) + + def to_dict(self): + """ + Return a dict of all contained attributes + """ + return self.__dict__ + + +class VaultLease(BaseLease): + """ + Data object representing a Vault lease. + """ + + def __init__(self, lease_id, data, **kwargs): + # save lease-associated data + self.data = data + super().__init__(lease_id, **kwargs) + + def is_valid(self, valid_for=0, blur=0): + """ + Checks whether the lease is valid for an amount of time + + valid_for + Check whether the token will still be valid in the future. + This can be an integer, which will be interpreted as seconds, or a + time string using the same format as Vault does: + Suffix ``s`` for seconds, ``m`` for minutes, ``h`` for hours, ``d`` for days. + Defaults to 0. + + blur + Allow undercutting ``valid_for`` for this amount of seconds. + Defaults to 0. + """ + return self.is_valid_for(valid_for, blur=blur) + + +class VaultToken(UseCountMixin, AccessorMixin, BaseLease): + """ + Data object representing an authentication token + """ + + def __init__(self, **kwargs): + if "client_token" in kwargs: + # Ensure response data from Vault is accepted as well + kwargs["lease_id"] = kwargs.pop("client_token") + super().__init__(**kwargs) + + def is_valid(self, valid_for=0, uses=1): + """ + Checks whether the token is valid for an amount of time and number of uses + + valid_for + Check whether the token will still be valid in the future. + This can be an integer, which will be interpreted as seconds, or a + time string using the same format as Vault does: + Suffix ``s`` for seconds, ``m`` for minutes, ``h`` for hours, ``d`` for days. + Defaults to 0. + + uses + Check whether the token has at least this number of uses left. Defaults to 1. + """ + return self.is_valid_for(valid_for) and self.has_uses_left(uses) + + def is_renewable(self): + """ + Check whether the token is renewable, which requires it + to be currently valid for at least two uses and renewable + """ + # Renewing a token deducts a use, hence it does not make sense to + # renew a token on the last use + return self.renewable and self.is_valid(uses=2) + + def payload(self): + """ + Return the payload to use for POST requests using this token + """ + return {"token": str(self)} + + def serialize_for_minion(self): + """ + Serialize all necessary data to recreate this object + into a dict that can be sent to a minion. + """ + return { + "client_token": self.id, + "renewable": self.renewable, + "lease_duration": self.duration, + "num_uses": self.num_uses, + "creation_time": self.creation_time, + "expire_time": self.expire_time, + } + + +class VaultSecretId(UseCountMixin, AccessorMixin, BaseLease): + """ + Data object representing an AppRole secret ID. + """ + + def __init__(self, **kwargs): + if "secret_id" in kwargs: + # Ensure response data from Vault is accepted as well + kwargs["lease_id"] = kwargs.pop("secret_id") + kwargs["lease_duration"] = kwargs.pop("secret_id_ttl") + kwargs["num_uses"] = kwargs.pop("secret_id_num_uses", 0) + kwargs["accessor"] = kwargs.pop("secret_id_accessor", None) + super().__init__(**kwargs) + + def is_valid(self, valid_for=0, uses=1): # pylint: disable=arguments-differ + """ + Checks whether the secret ID is valid for an amount of time and number of uses + + valid_for + Check whether the secret ID will still be valid in the future. + This can be an integer, which will be interpreted as seconds, or a + time string using the same format as Vault does: + Suffix ``s`` for seconds, ``m`` for minutes, ``h`` for hours, ``d`` for days. + Defaults to 0. + + uses + Check whether the secret ID has at least this number of uses left. Defaults to 1. + """ + return self.is_valid_for(valid_for) and self.has_uses_left(uses) + + def payload(self): + """ + Return the payload to use for POST requests using this secret ID + """ + return {"secret_id": str(self)} + + def serialize_for_minion(self): + """ + Serialize all necessary data to recreate this object + into a dict that can be sent to a minion. + """ + return { + "secret_id": self.id, + "secret_id_ttl": self.duration, + "secret_id_num_uses": self.num_uses, + "creation_time": self.creation_time, + "expire_time": self.expire_time, + } + + +class VaultWrappedResponse(AccessorMixin, BaseLease): + """ + Data object representing a wrapped response + """ + + def __init__( + self, + token, + ttl, + creation_path, + wrapped_accessor=None, + **kwargs, + ): + super().__init__(lease_id=token, lease_duration=ttl, renewable=False, **kwargs) + self.creation_path = creation_path + self.wrapped_accessor = wrapped_accessor + + def serialize_for_minion(self): + """ + Serialize all necessary data to recreate this object + into a dict that can be sent to a minion. + """ + return { + "wrap_info": { + "token": self.id, + "ttl": self.duration, + "creation_time": self.creation_time, + "creation_path": self.creation_path, + }, + } + + +class CommonCache: + """ + Base class that unifies context and other cache backends. + """ + + def __init__(self, context, cbank, cache_backend=None, ttl=None): + self.context = context + self.cbank = cbank + self.cache = cache_backend + self.ttl = ttl + + def _ckey_exists(self, ckey, flush=True): + if self.cbank in self.context and ckey in self.context[self.cbank]: + return True + if self.cache is not None: + if not self.cache.contains(self.cbank, ckey): + return False + if self.ttl is not None: + updated = self.cache.updated(self.cbank, ckey) + if int(time.time()) - updated >= self.ttl: + if flush: + log.debug( + f"Cached data in {self.cbank}/{ckey} outdated, flushing." + ) + self.flush() + return False + return True + return False + + def _get_ckey(self, ckey, flush=True): + if not self._ckey_exists(ckey, flush=flush): + return None + if self.cbank in self.context and ckey in self.context[self.cbank]: + return self.context[self.cbank][ckey] + if self.cache is not None: + return ( + self.cache.fetch(self.cbank, ckey) or None + ) # account for race conditions + raise RuntimeError("This code path should not have been hit.") + + def _store_ckey(self, ckey, value): + if self.cache is not None: + self.cache.store(self.cbank, ckey, value) + if self.cbank not in self.context: + self.context[self.cbank] = {} + self.context[self.cbank][ckey] = value + + def _flush(self, ckey=None): + if self.cache is not None: + self.cache.flush(self.cbank, ckey) + if self.cbank in self.context: + if ckey is None: + self.context.pop(self.cbank) + else: + self.context[self.cbank].pop(ckey, None) + # also remove sub-banks from context to mimic cache behavior + if ckey is None: + for bank in list(self.context): + if bank.startswith(self.cbank): + self.context.pop(bank) + + def _list(self): + ckeys = [] + if self.cbank in self.context: + ckeys += list(self.context[self.cbank]) + if self.cache is not None: + ckeys += self.cache.list(self.cbank) + return set(ckeys) + + +class VaultCache(CommonCache): + """ + Encapsulates session and other cache backends for a single domain + like secret path metadata. Uses a single cache key. + """ + + def __init__(self, context, cbank, ckey, cache_backend=None, ttl=None): + super().__init__(context, cbank, cache_backend=cache_backend, ttl=ttl) + self.ckey = ckey + + def exists(self, flush=True): + """ + Check whether data for this domain exists + """ + return self._ckey_exists(self.ckey, flush=flush) + + def get(self, flush=True): + """ + Return the cached data for this domain or None + """ + return self._get_ckey(self.ckey, flush=flush) + + def flush(self, cbank=False): + """ + Flush the cache for this domain + """ + return self._flush(self.ckey if not cbank else None) + + def store(self, value): + """ + Store data for this domain + """ + return self._store_ckey(self.ckey, value) + + +class VaultConfigCache(VaultCache): + """ + Handles caching of received configuration + """ + + def __init__( + self, + context, + cbank, + ckey, + opts, + cache_backend_factory=_get_cache_backend, + init_config=None, + ): # pylint: disable=super-init-not-called + self.context = context + self.cbank = cbank + self.ckey = ckey + self.opts = opts + self.config = None + self.cache = None + self.ttl = None + self.cache_backend_factory = cache_backend_factory + if init_config is not None: + self._load(init_config) + + def exists(self, flush=True): + """ + Check if a configuration has been loaded and cached + """ + if self.config is None: + return False + return super().exists(flush=flush) + + def get(self, flush=True): + """ + Return the current cached configuration + """ + if self.config is None: + return None + return super().get(flush=flush) + + def flush(self, cbank=True): + """ + Flush all connection-scoped data + """ + if self.config is None: + log.warning( + "Tried to flush uninitialized configuration cache. Skipping flush." + ) + return + # flush the whole connection-scoped cache by default + super().flush(cbank=cbank) + self.config = None + self.cache = None + self.ttl = None + + def _load(self, config): + if self.config is not None: + if ( + self.config["cache"]["backend"] != "session" + and self.config["cache"]["backend"] != config["cache"]["backend"] + ): + self.flush() + self.config = config + self.cache = self.cache_backend_factory(self.config, self.opts) + self.ttl = self.config["cache"]["config"] + + def store(self, value): + """ + Reload cache configuration, then store the new Vault configuration, + overwriting the existing one. + """ + self._load(value) + super().store(value) + + +class LeaseCacheMixin: + """ + Mixin for auth and lease cache that checks validity + and acts with hydrated objects + """ + + def __init__(self, *args, **kwargs): + self.lease_cls = kwargs.pop("lease_cls", VaultLease) + super().__init__(*args, **kwargs) + + def _check_validity(self, lease_data, valid_for=0): + lease = self.lease_cls(**lease_data) + if lease.is_valid(valid_for): + log.debug("Using cached lease.") + return lease + return None + + +class VaultLeaseCache(LeaseCacheMixin, CommonCache): + """ + Handles caching of Vault leases. Supports multiple cache keys. + Checks whether cached leases are still valid before returning. + """ + + def get(self, ckey, valid_for=0, flush=True): + """ + Returns valid cached lease data or None. + Flushes cache if invalid by default. + """ + data = self._get_ckey(ckey, flush=flush) + if data is None: + return data + ret = self._check_validity(data, valid_for=valid_for) + if ret is None and flush: + log.debug("Cached lease not valid anymore. Flushing cache.") + self._flush(ckey) + return ret + + def store(self, ckey, value): + """ + Store a lease in cache + """ + try: + value = value.to_dict() + except AttributeError: + pass + return self._store_ckey(ckey, value) + + def exists(self, ckey, flush=True): + """ + Check whether a named lease exists in cache + """ + return self._ckey_exists(ckey, flush=flush) + + def flush(self, ckey=None): + """ + Flush the lease cache or a single lease from the lease cache + """ + return self._flush(ckey) + + def list(self): + """ + List all cached leases. Does not filter invalid ones, + so fetching a reported one might still return None. + """ + return self._list() + + +class VaultAuthCache(LeaseCacheMixin, CommonCache): + """ + Implements authentication secret-specific caches. Checks whether + the cached secrets are still valid before returning. + """ + + def __init__(self, context, cbank, ckey, auth_cls, cache_backend=None, ttl=None): + super().__init__( + context, cbank, lease_cls=auth_cls, cache_backend=cache_backend, ttl=ttl + ) + self.ckey = ckey + + def exists(self, flush=True): + """ + Check whether data for this domain exists + """ + return self._ckey_exists(self.ckey, flush=flush) + + def get(self, valid_for=0, flush=True): + """ + Returns valid cached auth data or None. + Flushes cache if invalid by default. + """ + data = self._get_ckey(self.ckey, flush=flush) + if data is None: + return data + ret = self._check_validity(data, valid_for=valid_for) + if ret is None and flush: + log.debug("Cached auth data not valid anymore. Flushing cache.") + self.flush() + return ret + + def store(self, value): + """ + Store an auth credential in cache. Will overwrite possibly existing one. + """ + try: + value = value.to_dict() + except AttributeError: + pass + return self._store_ckey(self.ckey, value) + + def flush(self, cbank=None): + """ + Flush the cached auth credentials. If this is a token cache, + flushing it will delete the whole session-scoped cache bank by default. + """ + if cbank is None: + # flush the whole cbank (session-scope) if this is a token cache by default + ckey = None if self.lease_cls is VaultToken else self.ckey + else: + ckey = None if cbank else self.ckey + return self._flush(ckey) + + +def _get_config_cache(opts, context, cbank, ckey): + """ + Factory for VaultConfigCache to get around some + chicken-and-egg problems + """ + config = None + if cbank in context and ckey in context[cbank]: + config = context[cbank][ckey] + else: + cache = salt.cache.factory(opts) + if cache.contains(cbank, ckey): + # expiration check is done inside the class + config = cache.fetch(cbank, ckey) + elif opts.get("cache", "localfs") != "localfs": + local_opts = copy.copy(opts) + local_opts["cache"] = "localfs" + cache = salt.cache.factory(local_opts) + if cache.contains(cbank, ckey): + # expiration check is done inside the class + config = cache.fetch(cbank, ckey) + + return VaultConfigCache(context, cbank, ckey, opts, init_config=config) + + +class VaultTokenAuth: + """ + Container for authentication tokens + """ + + def __init__(self, cache=None, token=None): + self.cache = cache + if token is None and cache is not None: + token = cache.get() + if token is None: + token = InvalidVaultToken() + if isinstance(token, dict): + token = VaultToken(**token) + self.token = token + + def is_renewable(self): + """ + Check whether the contained token is renewable, which requires it + to be currently valid for at least two uses and renewable + """ + return self.token.is_renewable() + + def is_valid(self, valid_for=0): + """ + Check whether the contained token is valid + """ + return self.token.is_valid(valid_for) + + def get_token(self): + """ + Get the contained token if it is valid, otherwise + raises VaultAuthExpired + """ + if self.token.is_valid(): + return self.token + raise VaultAuthExpired() + + def used(self): + """ + Increment the use counter for the contained token + """ + self.token.used() + if self.token.num_uses != 0: + self._write_cache() + + def update_token(self, auth): + """ + Partially update the contained token (e.g. after renewal) + """ + self.token = self.token.with_renewed(**auth) + self._write_cache() + + def replace_token(self, token): + """ + Completely replace the contained token with a new one + """ + self.token = token + self._write_cache() + + def _write_cache(self): + if self.cache is not None: + if self.token.is_valid(): + self.cache.store(self.token) + else: + self.cache.flush() + + +class VaultAppRoleAuth: + """ + Issues tokens from AppRole credentials. + """ + + def __init__(self, approle, client, mount="approle", cache=None, token_store=None): + self.approle = approle + self.client = client + self.mount = mount + self.cache = cache + if token_store is None: + token_store = VaultTokenAuth() + self.token = token_store + + def is_renewable(self): + """ + Check whether the currently used token is renewable. + Secret IDs are not renewable anyways. + """ + return self.token.is_renewable() + + def is_valid(self, valid_for=0): + """ + Check whether the contained authentication data can be used + to issue a valid token + """ + return self.token.is_valid(valid_for) or self.approle.is_valid(valid_for) + + def get_token(self): + """ + Return the token issued by the last login, if it is still valid, otherwise + login with the contained AppRole, if it is valid. Otherwise, + raises VaultAuthExpired + """ + if self.token.is_valid(): + return self.token.get_token() + if self.approle.is_valid(): + return self._login() + raise VaultAuthExpired() + + def used(self): + """ + Increment the use counter for the currently used token + """ + self.token.used() + + def update_token(self, auth): + """ + Partially update the contained token (e.g. after renewal) + """ + self.token.update_token(auth) + + def _login(self): + log.debug("Vault token expired. Recreating one by authenticating with AppRole.") + endpoint = f"auth/{self.mount}/login" + payload = self.approle.payload() + res = self.client.post(endpoint, payload=payload) + self.approle.used() + self._replace_token(res["auth"]) + self._write_cache() + return self.token.get_token() + + def _write_cache(self): + if self.cache is not None and self.approle.secret_id is not None: + if isinstance(self.approle.secret_id, LocalVaultSecretId): + pass + elif self.approle.secret_id.num_uses == 0: + pass + elif self.approle.secret_id.is_valid(): + self.cache.store(self.approle.secret_id) + else: + self.cache.flush() + + def _replace_token(self, auth): + self.token.replace_token(VaultToken(**auth)) + + +class LocalVaultSecretId(VaultSecretId): + """ + Represents a secret ID from local configuration and should not be cached. + """ + + +class VaultAppRole: + """ + Container that represents an AppRole + """ + + def __init__(self, role_id, secret_id=None): + self.role_id = role_id + self.secret_id = secret_id + + def replace_secret_id(self, secret_id): + """ + Replace the contained secret ID with a new one + """ + self.secret_id = secret_id + + def is_valid(self, valid_for=0, uses=1): + """ + Checks whether the contained data can be used to authenticate + to Vault. Secret IDs might not be required by the server when + bind_secret_id is set to false. + + valid_for + Allows to check whether the AppRole will still be valid in the future. + This can be an integer, which will be interpreted as seconds, or a + time string using the same format as Vault does: + Suffix ``s`` for seconds, ``m`` for minutes, ``h`` for hours, ``d`` for days. + Defaults to 0. + + uses + Check whether the AppRole has at least this number of uses left. Defaults to 1. + """ + if self.secret_id is None: + return True + return self.secret_id.is_valid(valid_for=valid_for, uses=uses) + + def used(self): + """ + Increment the secret ID use counter by one, if this AppRole uses one. + """ + if self.secret_id is not None: + self.secret_id.used() + + def payload(self): + """ + Return the payload to use for POST requests using this AppRole + """ + payload = {} + if self.secret_id is not None: + payload = self.secret_id.payload() + payload["role_id"] = self.role_id + return payload + + +class InvalidVaultToken(VaultToken): + def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called + self.renewable = False + self.use_count = 0 + self.num_uses = 0 + + def is_valid(self, valid_for=0, uses=1): + return False + + +class InvalidVaultSecretId(VaultSecretId): + def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called + pass + + def is_valid(self, valid_for=0, uses=1): + return False + + +class VaultKV: + """ + Interface to Vault secret paths + """ + + def __init__(self, client, metadata_cache): + self.client = client + self.metadata_cache = metadata_cache + + def read(self, path, include_metadata=False): + """ + Read secret data at path. + + include_metadata + For kv-v2, include metadata in the return value: + ``{"data": {} ,"metadata": {}}``. + """ + v2_info = self.is_v2(path) + if v2_info["v2"]: + path = v2_info["data"] + res = self.client.get(path) + ret = res["data"] + if v2_info["v2"] and not include_metadata: + return ret["data"] + return ret + + def write(self, path, data): + """ + Write secret data to path. + """ + v2_info = self.is_v2(path) + if v2_info["v2"]: + path = v2_info["data"] + data = {"data": data} + return self.client.post(path, payload=data) + + def patch(self, path, data): + """ + Patch existing data. Requires kv-v2. + This uses JSON Merge Patch format, see + https://datatracker.ietf.org/doc/html/draft-ietf-appsawg-json-merge-patch-07 + """ + v2_info = self.is_v2(path) + if not v2_info["v2"]: + raise VaultInvocationError("Patch operation requires kv-v2.") + path = v2_info["data"] + data = {"data": data} + add_headers = {"Content-Type": "application/merge-patch+json"} + return self.client.patch(path, payload=data, add_headers=add_headers) + + def delete(self, path, versions=None): + """ + Delete secret path data. For kv-v1, this is permanent. + For kv-v2, this only soft-deletes the data. + + versions + For kv-v2, specifies versions to soft-delete. Needs to be castable + to a list of integers. + """ + method = "DELETE" + payload = None + versions = self._parse_versions(versions) + v2_info = self.is_v2(path) + + if v2_info["v2"]: + if versions is not None: + method = "POST" + path = v2_info["delete_versions"] + payload = {"versions": versions} + else: + # data and delete operations only differ by HTTP verb + path = v2_info["data"] + elif versions is not None: + raise VaultInvocationError("Versioning support requires kv-v2.") + + return self.client.request(method, path, payload=payload) + + def destroy(self, path, versions): + """ + Permanently remove version data. Requires kv-v2. + + versions + Specifies versions to destroy. Needs to be castable + to a list of integers. + """ + versions = self._parse_versions(versions) + v2_info = self.is_v2(path) + if not v2_info["v2"]: + raise VaultInvocationError("Destroy operation requires kv-v2.") + path = v2_info["destroy"] + payload = {"versions": versions} + return self.client.post(path, payload=payload) + + def _parse_versions(self, versions): + if versions is None: + return versions + if not isinstance(versions, list): + versions = [versions] + try: + versions = [int(x) for x in versions] + except ValueError as err: + raise VaultInvocationError( + "Versions have to be specified as integers." + ) from err + return versions + + def nuke(self, path): + """ + Delete path metadata and version data, including all version history. + Requires kv-v2. + """ + v2_info = self.is_v2(path) + if not v2_info["v2"]: + raise VaultInvocationError("Nuke operation requires kv-v2.") + path = v2_info["metadata"] + return self.client.delete(path) + + def list(self, path): + """ + List keys at path. + """ + v2_info = self.is_v2(path) + if v2_info["v2"]: + path = v2_info["metadata"] + + return self.client.list(path)["data"]["keys"] + + def is_v2(self, path): + """ + Determines if a given secret path is kv version 1 or 2. + """ + ret = { + "v2": False, + "data": path, + "metadata": path, + "delete": path, + "type": None, + } + path_metadata = self._get_secret_path_metadata(path) + if not path_metadata: + # metadata lookup failed. Simply return not v2 + return ret + ret["type"] = path_metadata.get("type", "kv") + if ( + ret["type"] == "kv" + and path_metadata["options"] is not None + and path_metadata.get("options", {}).get("version", "1") in ["2"] + ): + ret["v2"] = True + ret["data"] = self._v2_the_path(path, path_metadata.get("path", path)) + ret["metadata"] = self._v2_the_path( + path, path_metadata.get("path", path), "metadata" + ) + ret["delete"] = ret["data"] + ret["delete_versions"] = self._v2_the_path( + path, path_metadata.get("path", path), "delete" + ) + ret["destroy"] = self._v2_the_path( + path, path_metadata.get("path", path), "destroy" + ) + return ret + + def _v2_the_path(self, path, pfilter, ptype="data"): + """ + Given a path, a filter, and a path type, properly inject + 'data' or 'metadata' into the path. + """ + possible_types = ["data", "metadata", "delete", "destroy"] + if ptype not in possible_types: + raise AssertionError() + msg = f"Path {path} already contains {ptype} in the right place - saltstack duct tape?" + + path = path.rstrip("/").lstrip("/") + pfilter = pfilter.rstrip("/").lstrip("/") + + together = pfilter + "/" + ptype + + otype = possible_types[0] if possible_types[0] != ptype else possible_types[1] + other = pfilter + "/" + otype + if path.startswith(other): + path = path.replace(other, together, 1) + msg = f'Path is a "{otype}" type but "{ptype}" type requested - Flipping: {path}' + elif not path.startswith(together): + old_path = path + path = path.replace(pfilter, together, 1) + msg = f"Converting path to v2 {old_path} => {path}" + log.debug(msg) + return path + + def _get_secret_path_metadata(self, path): + """ + Given a path, query vault to determine mount point, type, and version. + """ + cache_content = self.metadata_cache.get() or {} + + ret = None + if path.startswith(tuple(cache_content.keys())): + log.debug("Found cached metadata for %s", path) + ret = next(v for k, v in cache_content.items() if path.startswith(k)) + else: + log.debug("Fetching metadata for %s", path) + try: + endpoint = f"sys/internal/ui/mounts/{path}" + res = self.client.get(endpoint) + if "data" in res: + log.debug("Got metadata for %s", path) + cache_content[path] = ret = res["data"] + self.metadata_cache.store(cache_content) + else: + raise VaultException("Unexpected response to metadata query.") + except Exception as err: # pylint: disable=broad-except + log.error( + "Failed to get secret metadata %s: %s", type(err).__name__, err + ) + return ret + + +class LeaseStore: + """ + Caches leases and handles lease operations + """ + + def __init__(self, client, cache): + self.client = client + self.cache = cache + + def get( + self, + ckey, + valid_for=0, + renew=True, + renew_increment=None, + renew_blur=2, + flush=True, + ): + """ + Return cached lease or None. + + ckey + Cache key the lease has been saved in. + + valid_for + Ensure the returned lease is valid for at least this amount of time. + This can be an integer, which will be interpreted as seconds, or a + time string using the same format as Vault does: + Suffix ``s`` for seconds, ``m`` for minutes, ``h`` for hours, ``d`` for days. + Defaults to 0. + + .. note:: + + This does not take into account token validity, which active leases + are bound to as well. + + renew + If the lease is still valid, but not valid for ``valid_for``, attempt to + renew it. Defaults to true. + + renew_increment + When renewing, request the lease to be valid for this amount of time from + the current point of time onwards. + If unset, will renew the lease by its default validity period and, if + the renewed lease does not pass ``valid_for``, will try to renew it + by ``valid_for``. + + renew_blur + When checking validity after renewal, allow this amount of seconds in leeway + to account for latency. Especially important when renew_increment is unset + and the default validity period is less than ``valid_for``. + Defaults to 2. + + flush + If the lease is invalid or not valid for ``valid_for`` and renewals + are disabled or impossible, flush the cache. Defaults to true. + """ + if renew_increment is not None and timestring_map(valid_for) > timestring_map( + renew_increment + ): + raise VaultInvocationError( + "When renew_increment is set, it must be at least valid_for to make sense" + ) + + def check_flush(): + if flush: + self.cache.flush(ckey) + return None + + def renew_lease(increment): + try: + ret = self.renew(lease, increment=increment) + except (VaultNotFoundError, VaultPermissionDeniedError): + ret = {} + # Do not overwrite data of renewed leases! + ret.pop("data", None) + return lease.with_renewed(**ret) + + # Since we can renew leases, do not check for future validity in cache + lease = self.cache.get(ckey, flush=flush) + if lease is None or lease.is_valid(valid_for): + return lease + if not renew: + return check_flush() + lease = renew_lease(renew_increment) + if not lease.is_valid(valid_for, blur=renew_blur): + if renew_increment is not None: + # valid_for cannot possibly be respected + return check_flush() + # Maybe valid_for is greater than the default validity period, so check if + # the lease can be renewed by valid_for + lease = renew_lease(valid_for) + if not lease.is_valid(valid_for, blur=renew_blur): + return check_flush() + # Ensure the new validity is cached + self.cache.store(ckey, lease) + return lease + + def list(self): + """ + List all cached leases. + """ + return self.cache.list() + + def lookup(self, lease): + """ + Lookup lease meta information. + + lease + A lease ID or VaultLease object to look up. + """ + endpoint = "sys/leases/lookup" + payload = {"lease_id": str(lease)} + return self.client.post(endpoint, payload=payload) + + def renew(self, lease, increment=None): + """ + Renew a lease. + + lease + A lease ID or VaultLease object to renew. + + increment + Request the lease to be valid for this amount of time from the current + point of time onwards. Can also be used to reduce the validity period. + The server might not honor this increment. + Can be an integer (seconds) or a time string like ``1h``. Optional. + """ + endpoint = "sys/leases/renew" + payload = {"lease_id": str(lease)} + if increment is not None: + payload["increment"] = int(timestring_map(increment)) + return self.client.post(endpoint, payload=payload) + + def revoke(self, lease, sync=False): + """ + Revoke a lease. + + lease + A lease ID or VaultLease object to revoke. + + sync + Only return once the lease has been revoked. Defaults to false. + """ + endpoint = "sys/leases/renew" + payload = {"lease_id": str(lease), "sync": sync} + return self.client.post(endpoint, payload) + + def store(self, ckey, lease): + """ + Cache a lease. + + ckey + The cache key the lease should be saved in. + + lease + A lease ID or VaultLease object to store. + """ + return self.cache.store(ckey, lease) + + +#################################################################################### +# The following functions were available in previous versions and are deprecated +# TODO: remove deprecated functions after v3008 (Argon) +#################################################################################### + + +def get_vault_connection(): + """ + Get the connection details for calling Vault, from local configuration if + it exists, or from the master otherwise + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.get_vault_connection is deprecated, " + "please use salt.utils.vault.get_authd_client.", + ) + + opts = globals().get("__opts__", {}) + context = globals().get("__context__", {}) + + try: + vault = get_authd_client(opts, context) + except salt.exceptions.InvalidConfigError as err: + # This exception class was raised previously + raise salt.exceptions.CommandExecutionError(err) from err + + token = vault.auth.get_token() + server_config = vault.get_config() + + ret = { + "url": server_config["url"], + "namespace": server_config["namespace"], + "token": str(token), + "verify": server_config["verify"], + "issued": token.creation_time, + } + + if _get_salt_run_type(opts) in [ + SALT_RUNTYPE_MASTER_IMPERSONATING, + SALT_RUNTYPE_MASTER_PEER_RUN, + SALT_RUNTYPE_MINION_REMOTE, + ]: + ret["lease_duration"] = token.explicit_max_ttl + ret["uses"] = token.num_uses + else: + ret["ttl"] = token.explicit_max_ttl + + return ret + + +def del_cache(): + """ + Delete cache file + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.del_cache is deprecated, please use salt.utils.vault.clear_cache.", + ) + clear_cache( + globals().get("__opts__", {}), + globals().get("__context__", {}), + connection=False, + ) + + +def write_cache(connection): # pylint: disable=unused-argument + """ + Write the vault token to cache + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.write_cache is deprecated without replacement.", + ) + # always return false since cache is managed internally + return False + + +def get_cache(): + """ + Return connection information from vault cache file + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.get_cache is deprecated, please use salt.utils.vault.get_authd_client.", + ) + return get_vault_connection() + + +def make_request( + method, + resource, + token=None, + vault_url=None, + namespace=None, + get_token_url=False, + retry=False, + **args, +): + """ + Make a request to Vault + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.make_request is deprecated, please use " + "salt.utils.vault.query or salt.utils.vault.query_raw." + "To override token/url/namespace, please make use of the" + "provided classes directly.", + ) + + def _get_client(token, vault_url, namespace, args): + vault = get_authd_client(opts, context) + if token is not None: + vault.auth.cache = None + vault.auth.token = VaultToken( + client_token=token, renewable=False, lease_duration=60, num_uses=1 + ) + if vault_url is not None: + vault.url = vault_url + if namespace is not None: + vault.namespace = namespace + if "verify" in args: + vault.verify = args.pop("verify") + + return vault + + opts = globals().get("__opts__", {}) + context = globals().get("__context__", {}) + endpoint = resource.lstrip("/").lstrip("v1/") + payload = args.pop("json", None) + + if "data" in args: + payload = salt.utils.json.loads(args.pop("data")) + + vault = _get_client(token, vault_url, namespace, args) + res = vault.request_raw(method, endpoint, payload=payload, wrap=False, **args) + if res.status_code == 403 and not retry: + # retry was used to indicate to only try once more + clear_cache(opts, context) + vault = _get_client(token, vault_url, namespace, args) + res = vault.request_raw(method, endpoint, payload=payload, wrap=False, **args) + + if get_token_url: + return res, str(vault.auth.token), vault.get_config()["url"] + return res diff --git a/tasks/docstrings.py b/tasks/docstrings.py index b8078c6dfe27..f5e43d46d239 100644 --- a/tasks/docstrings.py +++ b/tasks/docstrings.py @@ -744,7 +744,6 @@ "match_metric", ], "salt/modules/vagrant.py": ["get_machine_id", "get_vm_info"], - "salt/modules/vault.py": ["read_secret"], "salt/modules/virt.py": [ "nesthash", "pool_update", diff --git a/tests/integration/files/conf/master b/tests/integration/files/conf/master index 3938d4c298a5..7300be97acc3 100644 --- a/tests/integration/files/conf/master +++ b/tests/integration/files/conf/master @@ -103,16 +103,24 @@ discovery: false # set uses to 0 so the token # has unlimited uses available. vault: - url: http://127.0.0.1:8200 auth: method: token token: testsecret - uses: 0 + issue: + token: + params: + num_uses: 0 policies: - - testpolicy + assign: + - salt_minion + server: + url: http://127.0.0.1:8200 peer_run: .*: - - vault.generate_token + - vault.generate_token + - vault.get_config + - vault.generate_new_token + - vault.generate_secret_id sdbvault: driver: vault sdbetcd: diff --git a/tests/integration/files/conf/minion b/tests/integration/files/conf/minion index 06992fb8c035..de85fe28be9d 100644 --- a/tests/integration/files/conf/minion +++ b/tests/integration/files/conf/minion @@ -92,12 +92,11 @@ discovery: false sdbvault: driver: vault vault: - url: http://127.0.0.1:8200 auth: method: token token: testsecret - policies: - - testpolicy + server: + url: http://127.0.0.1:8200 sdbetcd: driver: etcd etcd.host: 127.0.0.1 diff --git a/tests/integration/files/vault.hcl b/tests/integration/files/vault.hcl deleted file mode 100644 index 97a1865d9189..000000000000 --- a/tests/integration/files/vault.hcl +++ /dev/null @@ -1,9 +0,0 @@ -path "secret/*" { - capabilities = ["read", "list", "create", "update", "delete"] -} -path "kv-v2/*" { - capabilities = ["read", "list", "create", "update", "delete"] -} -path "auth/*" { - capabilities = ["read", "list", "sudo", "create", "update", "delete"] -} diff --git a/tests/integration/files/vault/policies/salt_master.hcl b/tests/integration/files/vault/policies/salt_master.hcl new file mode 100644 index 000000000000..804a2e0d7e46 --- /dev/null +++ b/tests/integration/files/vault/policies/salt_master.hcl @@ -0,0 +1,44 @@ +# Test minion token creation +path "auth/token/create" { + capabilities = ["create", "read", "update"] +} + +# Test minion token creation with token roles +path "auth/token/create/*" { + capabilities = ["create", "read", "update"] +} + +# AppRole/entity management testing +path "auth/salt-minions/role" { + capabilities = ["list"] +} + +path "auth/salt-minions/role/*" { + capabilities = ["read", "create", "update", "delete"] +} + +path "sys/auth/salt-minions" { + capabilities = ["read", "sudo"] +} + +path "identity/lookup/entity" { + capabilities = ["create", "update"] + allowed_parameters = { + "alias_name" = [] + "alias_mount_accessor" = [] + } +} + +path "identity/entity/name/salt_minion_*" { + capabilities = ["read", "create", "update", "delete"] +} + +path "identity/entity-alias" { + capabilities = ["create", "update"] + allowed_parameters = { + "id" = [] + "canonical_id" = [] + "mount_accessor" = [] + "name" = [] + } +} diff --git a/tests/integration/files/vault/policies/salt_minion.hcl b/tests/integration/files/vault/policies/salt_minion.hcl new file mode 100644 index 000000000000..b79bc7e0ff1d --- /dev/null +++ b/tests/integration/files/vault/policies/salt_minion.hcl @@ -0,0 +1,29 @@ +# General KV v1 testing +path "secret/*" { + capabilities = ["read", "list", "create", "update", "delete"] +} + +# General KV v2 testing +path "kv-v2/*" { + capabilities = ["read", "list", "create", "update", "delete", "patch"] +} + +# ACL policy templating tests +path "salt/+/minions/{{identity.entity.metadata.minion-id}}" { + capabilities = ["create", "read", "update", "delete", "list", "patch"] +} + +# ACL policy templating tests with pillar values +path "salt/data/roles/{{identity.entity.metadata.role}}" { + capabilities = ["read"] +} + +# Test list policies +path "sys/policy" { + capabilities = ["read"] +} + +# Test managing policies +path "sys/policy/*" { + capabilities = ["read", "create", "update", "delete"] +} diff --git a/tests/integration/files/vault/policies/salt_minion_old.hcl b/tests/integration/files/vault/policies/salt_minion_old.hcl new file mode 100644 index 000000000000..f7d50863071e --- /dev/null +++ b/tests/integration/files/vault/policies/salt_minion_old.hcl @@ -0,0 +1,29 @@ +# General KV v1 testing +path "secret/*" { + capabilities = ["read", "list", "create", "update", "delete"] +} + +# General KV v2 testing +path "kv-v2/*" { + capabilities = ["read", "list", "create", "update", "delete"] +} + +# ACL policy templating tests +path "salt/+/minions/{{identity.entity.metadata.minion-id}}" { + capabilities = ["create", "read", "update", "delete", "list"] +} + +# ACL policy templating tests with pillar values +path "salt/data/roles/{{identity.entity.metadata.role}}" { + capabilities = ["read"] +} + +# Test list policies +path "sys/policy" { + capabilities = ["read"] +} + +# Test managing policies +path "sys/policy/*" { + capabilities = ["read", "create", "update", "delete"] +} diff --git a/tests/pytests/functional/modules/test_vault.py b/tests/pytests/functional/modules/test_vault.py index 61e6ecafa456..09353ae1ffdf 100644 --- a/tests/pytests/functional/modules/test_vault.py +++ b/tests/pytests/functional/modules/test_vault.py @@ -1,19 +1,24 @@ -import json import logging -import time import pytest -import salt.utils.path -from tests.support.runtests import RUNTIME_VARS +# pylint: disable=unused-import +from tests.support.pytest.vault import ( + vault_container_version, + vault_delete_policy, + vault_delete_secret, + vault_environ, + vault_list_policies, + vault_list_secrets, + vault_read_policy, + vault_write_policy, +) pytestmark = [ pytest.mark.slow_test, pytest.mark.skip_if_binaries_missing("dockerd", "vault", "getent"), ] -VAULT_BINARY = salt.utils.path.which("vault") - log = logging.getLogger(__name__) @@ -21,123 +26,35 @@ def minion_config_overrides(vault_port): return { "vault": { - "url": "http://127.0.0.1:{}".format(vault_port), "auth": { "method": "token", "token": "testsecret", - "uses": 0, - "policies": [ - "testpolicy", - ], + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", }, } } -def vault_container_version_id(value): - return "vault=={}".format(value) - - -@pytest.fixture( - scope="module", - params=["0.9.6", "1.3.1", "latest"], - ids=vault_container_version_id, -) -def vault_container_version(request, salt_factories, vault_port, shell): - vault_version = request.param - config = { - "backend": {"file": {"path": "/vault/file"}}, - "default_lease_ttl": "168h", - "max_lease_ttl": "720h", - "disable_mlock": False, - } - - factory = salt_factories.get_container( - "vault", - "ghcr.io/saltstack/salt-ci-containers/vault:{}".format(vault_version), - check_ports=[vault_port], - container_run_kwargs={ - "ports": {"8200/tcp": vault_port}, - "environment": { - "VAULT_DEV_ROOT_TOKEN_ID": "testsecret", - "VAULT_LOCAL_CONFIG": json.dumps(config), - }, - "cap_add": "IPC_LOCK", - }, - pull_before_start=True, - skip_on_pull_failure=True, - skip_if_docker_client_not_connectable=True, - ) - with factory.started() as factory: - attempts = 0 - while attempts < 3: - attempts += 1 - time.sleep(1) - ret = shell.run( - VAULT_BINARY, - "login", - "token=testsecret", - env={"VAULT_ADDR": "http://127.0.0.1:{}".format(vault_port)}, - ) - if ret.returncode == 0: - break - log.debug("Failed to authenticate against vault:\n%s", ret) - time.sleep(4) - else: - pytest.fail("Failed to login to vault") - - ret = shell.run( - VAULT_BINARY, - "policy", - "write", - "testpolicy", - "{}/vault.hcl".format(RUNTIME_VARS.FILES), - env={"VAULT_ADDR": "http://127.0.0.1:{}".format(vault_port)}, - ) - if ret.returncode != 0: - log.debug("Failed to assign policy to vault:\n%s", ret) - pytest.fail("unable to assign policy to vault") - yield vault_version - - @pytest.fixture(scope="module") def sys_mod(modules): return modules.sys @pytest.fixture -def vault(loaders, modules, vault_container_version, shell, vault_port): +def vault(loaders, modules, vault_container_version): try: yield modules.vault finally: # We're explicitly using the vault CLI and not the salt vault module secret_path = "secret/my" - ret = shell.run( - VAULT_BINARY, - "kv", - "list", - "--format=json", - secret_path, - env={"VAULT_ADDR": "http://127.0.0.1:{}".format(vault_port)}, - ) - if ret.returncode == 0: - for secret in ret.data: - secret_path = "secret/my/{}".format(secret) - ret = shell.run( - VAULT_BINARY, - "kv", - "delete", - secret_path, - env={"VAULT_ADDR": "http://127.0.0.1:{}".format(vault_port)}, - ) - ret = shell.run( - VAULT_BINARY, - "kv", - "metadata", - "delete", - secret_path, - env={"VAULT_ADDR": "http://127.0.0.1:{}".format(vault_port)}, - ) + for secret in vault_list_secrets(secret_path): + vault_delete_secret(f"{secret_path}/{secret}", metadata=True) + policies = vault_list_policies() + for policy in ["functional_test_policy", "policy_write_test"]: + if policy in policies: + vault_delete_policy(policy) @pytest.mark.windows_whitelisted @@ -253,12 +170,36 @@ def existing_secret(vault, vault_container_version): assert ret == expected_write +@pytest.fixture +def existing_secret_version(existing_secret, vault, vault_container_version): + ret = vault.write_secret("secret/my/secret", user="foo", password="hunter1") + assert ret + assert ret["version"] == 2 + ret = vault.read_secret("secret/my/secret") + assert ret + assert ret["password"] == "hunter1" + + @pytest.mark.usefixtures("existing_secret") def test_delete_secret(vault): ret = vault.delete_secret("secret/my/secret") assert ret is True +@pytest.mark.usefixtures("existing_secret_version") +@pytest.mark.parametrize("vault_container_version", ["1.3.1", "latest"], indirect=True) +def test_delete_secret_versions(vault, vault_container_version): + ret = vault.delete_secret("secret/my/secret", 1) + assert ret is True + ret = vault.read_secret("secret/my/secret") + assert ret + assert ret["password"] == "hunter1" + ret = vault.delete_secret("secret/my/secret", 2) + assert ret is True + ret = vault.read_secret("secret/my/secret", default="__was_deleted__") + assert ret == "__was_deleted__" + + @pytest.mark.usefixtures("existing_secret") def test_list_secrets(vault): ret = vault.list_secrets("secret/my/") @@ -268,8 +209,66 @@ def test_list_secrets(vault): @pytest.mark.usefixtures("existing_secret") +@pytest.mark.parametrize("vault_container_version", ["1.3.1", "latest"], indirect=True) def test_destroy_secret_kv2(vault, vault_container_version): - if vault_container_version == "0.9.6": - pytest.skip("Test not applicable to vault=={}".format(vault_container_version)) ret = vault.destroy_secret("secret/my/secret", "1") assert ret is True + + +@pytest.mark.usefixtures("existing_secret") +@pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) +def test_patch_secret(vault, vault_container_version): + ret = vault.patch_secret("secret/my/secret", password="baz") + assert ret + expected_write = {"destroyed": False, "deletion_time": ""} + for key in list(ret): + if key not in expected_write: + ret.pop(key) + assert ret == expected_write + ret = vault.read_secret("secret/my/secret") + assert ret == {"user": "foo", "password": "baz"} + + +@pytest.fixture +def policy_rules(): + return """\ +path "secret/some/thing" { + capabilities = ["read"] +} + """ + + +@pytest.fixture +def existing_policy(policy_rules, vault_container_version): + vault_write_policy("functional_test_policy", policy_rules) + try: + yield + finally: + vault_delete_policy("functional_test_policy") + + +@pytest.mark.usefixtures("existing_policy") +def test_policy_fetch(vault, policy_rules): + ret = vault.policy_fetch("functional_test_policy") + assert ret == policy_rules + ret = vault.policy_fetch("__does_not_exist__") + assert ret is None + + +def test_policy_write(vault, policy_rules): + ret = vault.policy_write("policy_write_test", policy_rules) + assert ret is True + assert vault_read_policy("policy_write_test") == policy_rules + + +@pytest.mark.usefixtures("existing_policy") +def test_policy_delete(vault): + ret = vault.policy_delete("functional_test_policy") + assert ret is True + assert "functional_test_policy" not in vault_list_policies() + + +@pytest.mark.usefixtures("existing_policy") +def test_policies_list(vault): + ret = vault.policies_list() + assert "functional_test_policy" in ret diff --git a/tests/pytests/functional/modules/test_vault_db.py b/tests/pytests/functional/modules/test_vault_db.py new file mode 100644 index 000000000000..83d40b639792 --- /dev/null +++ b/tests/pytests/functional/modules/test_vault_db.py @@ -0,0 +1,376 @@ +import time + +import pytest +from saltfactories.utils import random_string + +# pylint: disable=unused-import +from tests.support.pytest.mysql import ( + MySQLImage, + create_mysql_combo, + mysql_combo, + mysql_container, +) + +# pylint: disable=unused-import +from tests.support.pytest.vault import ( + vault_container_version, + vault_delete, + vault_disable_secret_engine, + vault_enable_secret_engine, + vault_environ, + vault_list, + vault_revoke, + vault_write, +) + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.skip_if_binaries_missing("dockerd", "vault", "getent"), + pytest.mark.usefixtures("vault_container_version"), + pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True), +] + + +@pytest.fixture(scope="module") +def minion_config_overrides(vault_port): + return { + "vault": { + "auth": { + "method": "token", + "token": "testsecret", + }, + "cache": { + "backend": "disk", # ensure a persistent cache is available for get_creds + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + } + } + + +@pytest.fixture(scope="module") +def mysql_image(request): + version = "10.3" + return MySQLImage( + name="mariadb", + tag=version, + container_id=random_string(f"mariadb-{version}-"), + ) + + +@pytest.fixture +def role_args_common(): + return { + "db_name": "testdb", + "creation_statements": r"CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';", + } + + +@pytest.fixture +def testrole(): + return { + "default_ttl": 3600, + "max_ttl": 86400, + } + + +@pytest.fixture +def testreissuerole(): + return { + "default_ttl": 180, + "max_ttl": 180, + } + + +@pytest.fixture +def teststaticrole(mysql_container): + return { + "db_name": "testdb", + "rotation_period": 86400, + "username": mysql_container.mysql_user, + } + + +@pytest.fixture +def testdb(mysql_container): + # This uses the default IP address of the host on the default network + # (hardcoded) because I could not get hostname resolution working properly. + return { + "plugin_name": "mysql-database-plugin", + "connection_url": f"{{{{username}}}}:{{{{password}}}}@tcp(172.17.0.1:{mysql_container.mysql_port})/", + "allowed_roles": "testrole,teststaticrole,testreissuerole", + "username": "root", + "password": mysql_container.mysql_passwd, + } + + +@pytest.fixture(scope="module") +def db_engine(vault_container_version): + assert vault_enable_secret_engine("database") + yield + assert vault_disable_secret_engine("database") + + +@pytest.fixture +def connection_setup(vault_container_version, testdb, db_engine): + try: + vault_write("database/config/testdb", **testdb) + assert "testdb" in vault_list("database/config") + yield + finally: + # prevent dangling leases, which prevent disabling the secret engine + assert vault_revoke("database/creds", prefix=True) + if "testdb" in vault_list("database/config"): + vault_delete("database/config/testdb") + assert "testdb" not in vault_list("database/config") + + +@pytest.fixture(params=[["testrole"]]) +def roles_setup(connection_setup, request, role_args_common): + try: + for role_name in request.param: + role_args = request.getfixturevalue(role_name) + role_args.update(role_args_common) + vault_write(f"database/roles/{role_name}", **role_args) + assert role_name in vault_list("database/roles") + yield + finally: + for role_name in request.param: + if role_name in vault_list("database/roles"): + vault_delete(f"database/roles/{role_name}") + assert role_name not in vault_list("database/roles") + + +@pytest.fixture +def role_static_setup(connection_setup, teststaticrole): + role_name = "teststaticrole" + try: + vault_write(f"database/static-roles/{role_name}", **teststaticrole) + assert role_name in vault_list("database/static-roles") + yield + finally: + if role_name in vault_list("database/static-roles"): + vault_delete(f"database/static-roles/{role_name}") + assert role_name not in vault_list("database/static-roles") + + +@pytest.fixture +def vault_db(modules, db_engine): + try: + yield modules.vault_db + finally: + # prevent dangling leases, which prevent disabling the secret engine + assert vault_revoke("database/creds", prefix=True) + if "testdb" in vault_list("database/config"): + vault_delete("database/config/testdb") + assert "testdb" not in vault_list("database/config") + if "testrole" in vault_list("database/roles"): + vault_delete("database/roles/testrole") + assert "testrole" not in vault_list("database/roles") + if "teststaticrole" in vault_list("database/static-roles"): + vault_delete("database/static-roles/teststaticrole") + assert "teststaticrole" not in vault_list("database/static-roles") + + +@pytest.mark.usefixtures("connection_setup") +def test_list_connections(vault_db): + ret = vault_db.list_connections() + assert ret == ["testdb"] + + +@pytest.mark.usefixtures("connection_setup") +def test_fetch_connection(vault_db, testdb): + ret = vault_db.fetch_connection("testdb") + assert ret + for var, val in testdb.items(): + if var == "password": + continue + if var in ["connection_url", "username"]: + assert var in ret["connection_details"] + assert ret["connection_details"][var] == val + else: + assert var in ret + if var == "allowed_roles": + assert ret[var] == list(val.split(",")) + else: + assert ret[var] == val + + +def test_write_connection(vault_db, testdb, mysql_container): + args = { + "plugin": "mysql", + "connection_url": f"{{{{username}}}}:{{{{password}}}}@tcp(172.17.0.1:{mysql_container.mysql_port})/", + "allowed_roles": ["testrole", "teststaticrole"], + "username": "root", + "password": mysql_container.mysql_passwd, + "rotate": False, + } + ret = vault_db.write_connection("testdb", **args) + assert ret + assert "testdb" in vault_list("database/config") + + +@pytest.mark.usefixtures("connection_setup") +def test_delete_connection(vault_db): + ret = vault_db.delete_connection("testdb") + assert ret + assert "testdb" not in vault_list("database/config") + + +@pytest.mark.usefixtures("connection_setup") +def test_reset_connection(vault_db): + ret = vault_db.reset_connection("testdb") + assert ret + + +@pytest.mark.usefixtures("roles_setup") +def test_list_roles(vault_db): + ret = vault_db.list_roles() + assert ret == ["testrole"] + + +@pytest.mark.usefixtures("role_static_setup") +def test_list_roles_static(vault_db): + ret = vault_db.list_roles(static=True) + assert ret == ["teststaticrole"] + + +@pytest.mark.usefixtures("roles_setup") +def test_fetch_role(vault_db, testrole): + ret = vault_db.fetch_role("testrole") + assert ret + for var, val in testrole.items(): + assert var in ret + if var == "creation_statements": + assert ret[var] == [val] + else: + assert ret[var] == val + + +@pytest.mark.usefixtures("role_static_setup") +def test_fetch_role_static(vault_db, teststaticrole): + ret = vault_db.fetch_role("teststaticrole", static=True) + assert ret + for var, val in teststaticrole.items(): + assert var in ret + assert ret[var] == val + + +@pytest.mark.usefixtures("connection_setup") +def test_write_role(vault_db): + args = { + "connection": "testdb", + "creation_statements": r"CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';", + } + ret = vault_db.write_role("testrole", **args) + assert ret + assert "testrole" in vault_list("database/roles") + + +@pytest.mark.usefixtures("connection_setup") +def test_write_static_role(vault_db, mysql_container): + args = { + "connection": "testdb", + "username": mysql_container.mysql_user, + "rotation_period": 86400, + } + ret = vault_db.write_static_role("teststaticrole", **args) + assert ret + assert "teststaticrole" in vault_list("database/static-roles") + + +@pytest.mark.usefixtures("roles_setup") +def test_delete_role(vault_db): + ret = vault_db.delete_role("testrole") + assert ret + assert "testrole" not in vault_list("database/roles") + + +@pytest.mark.usefixtures("role_static_setup") +def test_delete_role_static(vault_db): + ret = vault_db.delete_role("teststaticrole", static=True) + assert ret + assert "teststaticrole" not in vault_list("database/static-roles") + + +@pytest.mark.usefixtures("roles_setup") +def test_get_creds(vault_db): + ret = vault_db.get_creds("testrole", cache=False) + assert ret + assert "username" in ret + assert "password" in ret + + +@pytest.mark.usefixtures("role_static_setup") +def test_get_creds_static(vault_db, teststaticrole): + ret = vault_db.get_creds("teststaticrole", static=True, cache=False) + assert ret + assert "username" in ret + assert "password" in ret + assert ret["username"] == teststaticrole["username"] + + +@pytest.mark.usefixtures("roles_setup") +def test_get_creds_cached(vault_db): + ret = vault_db.get_creds("testrole", cache=True) + assert ret + assert "username" in ret + assert "password" in ret + ret_new = vault_db.get_creds("testrole", cache=True) + assert ret_new + assert "username" in ret_new + assert "password" in ret_new + assert ret_new["username"] == ret["username"] + assert ret_new["password"] == ret["password"] + + +@pytest.mark.usefixtures("roles_setup") +def test_get_creds_cached_multiple(vault_db): + ret = vault_db.get_creds("testrole", cache="one") + assert ret + assert "username" in ret + assert "password" in ret + ret_new = vault_db.get_creds("testrole", cache="two") + assert ret_new + assert "username" in ret_new + assert "password" in ret_new + assert ret_new["username"] != ret["username"] + assert ret_new["password"] != ret["password"] + assert vault_db.get_creds("testrole", cache="one") == ret + assert vault_db.get_creds("testrole", cache="two") == ret_new + + +@pytest.mark.usefixtures("roles_setup") +@pytest.mark.parametrize("roles_setup", [["testreissuerole"]], indirect=True) +def test_get_creds_cached_valid_for_reissue(vault_db, testreissuerole): + """ + Test that valid cached credentials that do not fulfill valid_for + and cannot be renewed as required are reissued + """ + ret = vault_db.get_creds("testreissuerole", cache=True) + assert ret + assert "username" in ret + assert "password" in ret + # 3 seconds because of leeway in lease validity check after renewals + time.sleep(3) + ret_new = vault_db.get_creds( + "testreissuerole", cache=True, valid_for=testreissuerole["default_ttl"] + ) + assert ret_new + assert "username" in ret_new + assert "password" in ret_new + assert ret_new["username"] != ret["username"] + assert ret_new["password"] != ret["password"] + + +@pytest.mark.usefixtures("role_static_setup") +def test_rotate_static_role(vault_db): + ret = vault_db.get_creds("teststaticrole", static=True, cache=False) + assert ret + old_pw = ret["password"] + ret = vault_db.rotate_static_role("teststaticrole") + assert ret + ret = vault_db.get_creds("teststaticrole", static=True, cache=False) + assert ret + assert ret["password"] != old_pw diff --git a/tests/pytests/functional/states/test_vault_db.py b/tests/pytests/functional/states/test_vault_db.py new file mode 100644 index 000000000000..bd692d1f2d54 --- /dev/null +++ b/tests/pytests/functional/states/test_vault_db.py @@ -0,0 +1,390 @@ +import pytest +from saltfactories.utils import random_string + +# pylint: disable=unused-import +from tests.support.pytest.mysql import ( + MySQLImage, + create_mysql_combo, + mysql_combo, + mysql_container, +) + +# pylint: disable=unused-import +from tests.support.pytest.vault import ( + vault_container_version, + vault_delete, + vault_disable_secret_engine, + vault_enable_secret_engine, + vault_environ, + vault_list, + vault_read, + vault_revoke, + vault_write, +) + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.skip_if_binaries_missing("dockerd", "vault", "getent"), + pytest.mark.usefixtures("vault_container_version"), + pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True), +] + + +@pytest.fixture(scope="module") +def minion_config_overrides(vault_port): + return { + "vault": { + "auth": { + "method": "token", + "token": "testsecret", + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + } + } + + +@pytest.fixture(scope="module") +def mysql_image(request): + version = "10.3" + return MySQLImage( + name="mariadb", + tag=version, + container_id=random_string(f"mariadb-{version}-"), + ) + + +@pytest.fixture +def role_args_common(): + return { + "db_name": "testdb", + "creation_statements": r"CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';", + } + + +@pytest.fixture +def testrole(): + return { + "default_ttl": 3600, + "max_ttl": 86400, + } + + +@pytest.fixture +def teststaticrole(mysql_container): + return { + "db_name": "testdb", + "rotation_period": 86400, + "username": mysql_container.mysql_user, + } + + +@pytest.fixture +def testdb(mysql_container): + # This uses the default IP address of the host on the default network + # (hardcoded) because I could not get hostname resolution working properly. + return { + "plugin_name": "mysql-database-plugin", + "connection_url": f"{{{{username}}}}:{{{{password}}}}@tcp(172.17.0.1:{mysql_container.mysql_port})/", + "allowed_roles": "testrole,teststaticrole", + "username": "root", + "password": mysql_container.mysql_passwd, + } + + +@pytest.fixture(scope="module") +def db_engine(vault_container_version): + assert vault_enable_secret_engine("database") + yield + assert vault_disable_secret_engine("database") + + +@pytest.fixture +def connection_setup(vault_container_version, testdb, db_engine): + try: + vault_write("database/config/testdb", **testdb) + assert "testdb" in vault_list("database/config") + yield + finally: + # prevent dangling leases, which prevent disabling the secret engine + assert vault_revoke("database/creds", prefix=True) + if "testdb" in vault_list("database/config"): + vault_delete("database/config/testdb") + assert "testdb" not in vault_list("database/config") + + +@pytest.fixture(params=[["testrole"]]) +def roles_setup(connection_setup, request, role_args_common): + try: + for role_name in request.param: + role_args = request.getfixturevalue(role_name) + role_args.update(role_args_common) + vault_write(f"database/roles/{role_name}", **role_args) + assert role_name in vault_list("database/roles") + yield + finally: + for role_name in request.param: + if role_name in vault_list("database/roles"): + vault_delete(f"database/roles/{role_name}") + assert role_name not in vault_list("database/roles") + + +@pytest.fixture +def role_static_setup(connection_setup, teststaticrole): + role_name = "teststaticrole" + try: + vault_write(f"database/static-roles/{role_name}", **teststaticrole) + assert role_name in vault_list("database/static-roles") + yield + finally: + if role_name in vault_list("database/static-roles"): + vault_delete(f"database/static-roles/{role_name}") + assert role_name not in vault_list("database/static-roles") + + +@pytest.fixture +def vault_db(states, db_engine): + try: + yield states.vault_db + finally: + # prevent dangling leases, which prevent disabling the secret engine + assert vault_revoke("database/creds", prefix=True) + if "testdb" in vault_list("database/config"): + vault_delete("database/config/testdb") + assert "testdb" not in vault_list("database/config") + if "testrole" in vault_list("database/roles"): + vault_delete("database/roles/testrole") + assert "testrole" not in vault_list("database/roles") + if "teststaticrole" in vault_list("database/static-roles"): + vault_delete("database/static-roles/teststaticrole") + assert "teststaticrole" not in vault_list("database/static-roles") + + +@pytest.fixture +def connargs(mysql_container): + return { + "plugin": "mysql", + "connection_url": f"{{{{username}}}}:{{{{password}}}}@tcp(172.17.0.1:{mysql_container.mysql_port})/", + "allowed_roles": ["testrole", "teststaticrole"], + "username": "root", + "password": mysql_container.mysql_passwd, + "rotate": False, + } + + +@pytest.fixture +def roleargs(): + return { + "connection": "testdb", + "creation_statements": r"CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';", + } + + +@pytest.fixture +def roleargs_static(mysql_container): + return { + "connection": "testdb", + "username": mysql_container.mysql_user, + "rotation_period": 86400, + } + + +def test_connection_present(vault_db, connargs): + ret = vault_db.connection_present("testdb", **connargs) + assert ret.result + assert ret.changes + assert "created" in ret.changes + assert ret.changes["created"] == "testdb" + assert "testdb" in vault_list("database/config") + + +@pytest.mark.usefixtures("connection_setup") +def test_connection_present_no_changes(vault_db, connargs): + ret = vault_db.connection_present("testdb", **connargs) + assert ret.result + assert not ret.changes + + +@pytest.mark.usefixtures("connection_setup") +def test_connection_present_allowed_roles_change(vault_db, connargs): + connargs["allowed_roles"] = ["testrole", "teststaticrole", "newrole"] + ret = vault_db.connection_present("testdb", **connargs) + assert ret.result + assert ret.changes + assert "allowed_roles" in ret.changes + assert ( + vault_read("database/config/testdb")["data"]["allowed_roles"] + == connargs["allowed_roles"] + ) + + +@pytest.mark.usefixtures("connection_setup") +def test_connection_present_new_param(vault_db, connargs): + connargs["username_template"] = r"{{random 20}}" + ret = vault_db.connection_present("testdb", **connargs) + assert ret.result + assert ret.changes + assert "username_template" in ret.changes + assert ( + vault_read("database/config/testdb")["data"]["connection_details"][ + "username_template" + ] + == connargs["username_template"] + ) + + +def test_connection_present_test_mode(vault_db, connargs): + ret = vault_db.connection_present("testdb", test=True, **connargs) + assert ret.result is None + assert ret.changes + assert "created" in ret.changes + assert ret.changes["created"] == "testdb" + assert "testdb" not in vault_list("database/config") + + +@pytest.mark.usefixtures("connection_setup") +def test_connection_absent(vault_db, connargs): + ret = vault_db.connection_absent("testdb") + assert ret.result + assert ret.changes + assert "deleted" in ret.changes + assert ret.changes["deleted"] == "testdb" + assert "testdb" not in vault_list("database/config") + + +def test_connection_absent_no_changes(vault_db, connargs): + ret = vault_db.connection_absent("testdb") + assert ret.result + assert not ret.changes + + +@pytest.mark.usefixtures("connection_setup") +def test_connection_absent_test_mode(vault_db, connargs): + ret = vault_db.connection_absent("testdb", test=True) + assert ret.result is None + assert ret.changes + assert "deleted" in ret.changes + assert ret.changes["deleted"] == "testdb" + assert "testdb" in vault_list("database/config") + + +@pytest.mark.usefixtures("connection_setup") +def test_role_present(vault_db, roleargs): + ret = vault_db.role_present("testrole", **roleargs) + assert ret.result + assert ret.changes + assert "created" in ret.changes + assert ret.changes["created"] == "testrole" + assert "testrole" in vault_list("database/roles") + + +@pytest.mark.usefixtures("roles_setup") +def test_role_present_no_changes(vault_db, roleargs): + ret = vault_db.role_present("testrole", **roleargs) + assert ret.result + assert not ret.changes + + +@pytest.mark.usefixtures("roles_setup") +def test_role_present_no_changes_with_time_string(vault_db, roleargs): + roleargs["default_ttl"] = "1h" + ret = vault_db.role_present("testrole", **roleargs) + assert ret.result + assert not ret.changes + + +@pytest.mark.usefixtures("roles_setup") +def test_role_present_param_change(vault_db, roleargs): + roleargs["default_ttl"] = 1337 + ret = vault_db.role_present("testrole", **roleargs) + assert ret.result + assert ret.changes + assert "default_ttl" in ret.changes + assert vault_read("database/roles/testrole")["data"]["default_ttl"] == 1337 + + +@pytest.mark.usefixtures("connection_setup") +def test_role_present_test_mode(vault_db, roleargs): + ret = vault_db.role_present("testrole", test=True, **roleargs) + assert ret.result is None + assert ret.changes + assert "created" in ret.changes + assert ret.changes["created"] == "testrole" + assert "testrole" not in vault_list("database/roles") + + +@pytest.mark.usefixtures("connection_setup") +def test_static_role_present(vault_db, roleargs_static): + ret = vault_db.static_role_present("teststaticrole", **roleargs_static) + assert ret.result + assert ret.changes + assert "created" in ret.changes + assert ret.changes["created"] == "teststaticrole" + assert "teststaticrole" in vault_list("database/static-roles") + + +@pytest.mark.usefixtures("role_static_setup") +def test_static_role_present_no_changes(vault_db, roleargs_static): + ret = vault_db.static_role_present("teststaticrole", **roleargs_static) + assert ret.result + assert not ret.changes + + +@pytest.mark.usefixtures("role_static_setup") +def test_static_role_present_param_change(vault_db, roleargs_static): + roleargs_static["rotation_period"] = 1337 + ret = vault_db.static_role_present("teststaticrole", **roleargs_static) + assert ret.result + assert ret.changes + assert "rotation_period" in ret.changes + assert ( + vault_read("database/static-roles/teststaticrole")["data"]["rotation_period"] + == 1337 + ) + + +@pytest.mark.usefixtures("connection_setup") +def test_static_role_present_test_mode(vault_db, roleargs_static): + ret = vault_db.static_role_present("teststaticrole", test=True, **roleargs_static) + assert ret.result is None + assert ret.changes + assert "created" in ret.changes + assert ret.changes["created"] == "teststaticrole" + assert "teststaticrole" not in vault_list("database/static-roles") + + +@pytest.mark.usefixtures("roles_setup") +def test_role_absent(vault_db): + ret = vault_db.role_absent("testrole") + assert ret.result + assert ret.changes + assert "deleted" in ret.changes + assert ret.changes["deleted"] == "testrole" + assert "testrole" not in vault_list("database/roles") + + +@pytest.mark.usefixtures("role_static_setup") +def test_role_absent_static(vault_db): + ret = vault_db.role_absent("teststaticrole", static=True) + assert ret.result + assert ret.changes + assert "deleted" in ret.changes + assert ret.changes["deleted"] == "teststaticrole" + assert "teststaticrole" not in vault_list("database/static-roles") + + +def test_role_absent_no_changes(vault_db): + ret = vault_db.role_absent("testrole") + assert ret.result + assert not ret.changes + + +@pytest.mark.usefixtures("roles_setup") +def test_role_absent_test_mode(vault_db): + ret = vault_db.role_absent("testrole", test=True) + assert ret.result is None + assert ret.changes + assert "deleted" in ret.changes + assert ret.changes["deleted"] == "testrole" + assert "testrole" in vault_list("database/roles") diff --git a/tests/pytests/functional/utils/test_vault.py b/tests/pytests/functional/utils/test_vault.py new file mode 100644 index 000000000000..47fa0bd225ed --- /dev/null +++ b/tests/pytests/functional/utils/test_vault.py @@ -0,0 +1,164 @@ +import logging + +import pytest +import requests.exceptions + +# pylint: disable=unused-import +from tests.support.pytest.vault import ( + vault_container_version, + vault_delete_secret, + vault_environ, + vault_list_secrets, + vault_read_secret, + vault_write_secret, +) + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.skip_if_binaries_missing("dockerd", "vault", "getent"), +] + +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def minion_config_overrides(vault_port): + return { + "vault": { + "auth": { + "method": "token", + "token": "testsecret", + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + } + } + + +@pytest.fixture +def vault(loaders): + return loaders.utils.vault + + +@pytest.fixture(scope="module", autouse=True) +def vault_testing_data(vault_container_version): + vault_write_secret("secret/utils/read", success="yup") + vault_write_secret("secret/utils/deleteme", success="nope") + try: + yield + finally: + secret_path = "secret/utils" + for secret in vault_list_secrets(secret_path): + vault_delete_secret(f"{secret_path}/{secret}", metadata=True) + + +def test_make_request_get_unauthd(vault): + """ + Test that unauthenticated GET requests are possible + """ + res = vault.make_request("GET", "/v1/sys/health") + assert res.status_code == 200 + assert res.json() + assert "initialized" in res.json() + + +def test_make_request_get_authd(vault, vault_container_version): + """ + Test that authenticated GET requests are possible + """ + endpoint = "secret/utils/read" + if vault_container_version in ["1.3.1", "latest"]: + endpoint = "secret/data/utils/read" + + res = vault.make_request("GET", f"/v1/{endpoint}") + assert res.status_code == 200 + data = res.json()["data"] + if vault_container_version in ["1.3.1", "latest"]: + data = data["data"] + assert "success" in data + assert data["success"] == "yup" + + +def test_make_request_post_json(vault, vault_container_version): + """ + Test that POST requests are possible with json param + """ + data = {"success": "yup"} + endpoint = "secret/utils/write" + + if vault_container_version in ["1.3.1", "latest"]: + data = {"data": data} + endpoint = "secret/data/utils/write" + res = vault.make_request("POST", f"/v1/{endpoint}", json=data) + assert res.status_code in [200, 204] + assert vault_read_secret("secret/utils/write") == {"success": "yup"} + + +def test_make_request_post_data(vault, vault_container_version): + """ + Test that POST requests are possible with data param + """ + data = '{"success": "yup_data"}' + endpoint = "secret/utils/write" + + if vault_container_version in ["1.3.1", "latest"]: + data = '{"data": {"success": "yup_data"}}' + endpoint = "secret/data/utils/write" + res = vault.make_request("POST", f"/v1/{endpoint}", data=data) + assert res.status_code in [200, 204] + assert vault_read_secret("secret/utils/write") == {"success": "yup_data"} + + +def test_make_request_delete(vault, vault_container_version): + """ + Test that DELETE requests are possible + """ + endpoint = "secret/utils/deleteme" + if vault_container_version in ["1.3.1", "latest"]: + endpoint = "secret/data/utils/deleteme" + + res = vault.make_request("DELETE", f"/v1/{endpoint}") + assert res.status_code in [200, 204] + assert vault_read_secret("secret/utils/deleteme") is None + + +def test_make_request_list(vault, vault_container_version): + """ + Test that LIST requests are possible + """ + endpoint = "secret/utils" + if vault_container_version in ["1.3.1", "latest"]: + endpoint = "secret/metadata/utils" + + res = vault.make_request("LIST", f"/v1/{endpoint}") + assert res.status_code == 200 + assert res.json()["data"]["keys"] == vault_list_secrets("secret/utils") + + +def test_make_request_token_override(vault, vault_container_version): + """ + Test that overriding the token in use is possible + """ + endpoint = "secret/utils/read" + if vault_container_version in ["1.3.1", "latest"]: + endpoint = "secret/data/utils/read" + + res = vault.make_request("GET", f"/v1/{endpoint}", token="invalid") + assert res.status_code == 403 + + +def test_make_request_url_override(vault, vault_container_version): + """ + Test that overriding the server URL is possible + """ + endpoint = "secret/utils/read" + if vault_container_version in ["1.3.1", "latest"]: + endpoint = "secret/data/utils/read" + + with pytest.raises( + requests.exceptions.ConnectionError, match=".*Max retries exceeded with url:.*" + ): + vault.make_request( + "GET", f"/v1/{endpoint}", vault_url="http://127.0.0.1:1", timeout=2 + ) diff --git a/tests/pytests/integration/modules/test_vault.py b/tests/pytests/integration/modules/test_vault.py new file mode 100644 index 000000000000..b3a668a5bf4c --- /dev/null +++ b/tests/pytests/integration/modules/test_vault.py @@ -0,0 +1,375 @@ +""" +Tests for the Vault module +""" + +import logging +import shutil +import time + +import pytest +from saltfactories.utils import random_string + +# pylint: disable=unused-import +from tests.support.pytest.vault import ( + vault_container_version, + vault_delete_secret, + vault_environ, + vault_list_secrets, + vault_write_secret, +) + +log = logging.getLogger(__name__) + + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.skip_if_binaries_missing("dockerd", "vault", "getent"), + pytest.mark.usefixtures("vault_container_version"), +] + + +@pytest.fixture(scope="class") +def pillar_state_tree(tmp_path_factory): + _pillar_state_tree = tmp_path_factory.mktemp("pillar") + try: + yield _pillar_state_tree + finally: + shutil.rmtree(str(_pillar_state_tree), ignore_errors=True) + + +@pytest.fixture(scope="class") +def vault_salt_master( + salt_factories, pillar_state_tree, vault_port, vault_master_config +): + factory = salt_factories.salt_master_daemon( + "vault-exemaster", defaults=vault_master_config + ) + with factory.started(): + yield factory + + +@pytest.fixture(scope="class") +def vault_salt_minion(vault_salt_master, vault_minion_config): + assert vault_salt_master.is_running() + factory = vault_salt_master.salt_minion_daemon( + random_string("vault-exeminion", uppercase=False), + defaults=vault_minion_config, + ) + with factory.started(): + # Sync All + salt_call_cli = factory.salt_call_cli() + ret = salt_call_cli.run("saltutil.sync_all", _timeout=120) + assert ret.returncode == 0, ret + yield factory + + +@pytest.fixture(scope="class") +def vault_minion_config(): + return {"open_mode": True, "grains": {}} + + +@pytest.fixture(scope="class") +def vault_salt_run_cli(vault_salt_master): + return vault_salt_master.salt_run_cli() + + +@pytest.fixture(scope="class") +def vault_salt_call_cli(vault_salt_minion): + return vault_salt_minion.salt_call_cli() + + +@pytest.fixture(scope="class") +def pillar_dual_use_tree( + vault_salt_master, + vault_salt_minion, +): + top_pillar_contents = f""" + base: + '{vault_salt_minion.id}': + - testvault + """ + test_pillar_contents = """ + test: + foo: bar + jvmdump_pubkey: {{ salt["vault.read_secret"]("secret/test/jvmdump/ssh_key", "public_key") }} + jenkins_pubkey: {{ salt["vault.read_secret"]("secret/test/jenkins/master/ssh_key", "public_key") }} + """ + top_file = vault_salt_master.pillar_tree.base.temp_file( + "top.sls", top_pillar_contents + ) + test_file = vault_salt_master.pillar_tree.base.temp_file( + "testvault.sls", test_pillar_contents + ) + + with top_file, test_file: + yield + + +@pytest.fixture(scope="class") +def vault_testing_data(vault_container_version): + vault_write_secret("secret/test/jvmdump/ssh_key", public_key="yup_dump") + vault_write_secret("secret/test/jenkins/master/ssh_key", public_key="yup_master") + vault_write_secret("secret/test/deleteme", pls=":)") + try: + yield + finally: + vault_delete_secret("secret/test/jvmdump/ssh_key") + vault_delete_secret("secret/test/jenkins/master/ssh_key") + for x in ["deleteme", "write"]: + if x in vault_list_secrets("secret/test"): + vault_delete_secret(f"secret/test/{x}") + + +@pytest.mark.usefixtures("vault_testing_data", "pillar_dual_use_tree") +@pytest.mark.parametrize("vault_container_version", ["1.3.1", "latest"], indirect=True) +class TestSingleUseToken: + """ + Single-use tokens and read operations on versions below 0.10.0 + do not work since the necessary metadata lookup consumes a use + there without caching metadata information (sys/internal/mounts/:path + is not available, hence not an unauthenticated endpoint). + It is impossible to differentiate between the endpoint not being + available and the token not having the correct permissions. + """ + + @pytest.fixture(scope="class") + def vault_master_config(self, pillar_state_tree, vault_port): + return { + "pillar_roots": {"base": [str(pillar_state_tree)]}, + "open_mode": True, + "peer_run": { + ".*": [ + "vault.get_config", + "vault.generate_new_token", + ], + }, + "vault": { + "auth": {"token": "testsecret"}, + "cache": { + "backend": "file", + }, + "issue": { + "type": "token", + "token": { + "params": { + "num_uses": 1, + } + }, + }, + "policies": { + "assign": [ + "salt_minion", + ] + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + }, + "minion_data_cache": False, + } + + def test_vault_read_secret(self, vault_salt_call_cli): + """ + Test that the Vault module can fetch a single secret when tokens + are issued with uses=1. + """ + ret = vault_salt_call_cli.run( + "vault.read_secret", "secret/test/jvmdump/ssh_key" + ) + assert ret.returncode == 0 + assert ret.data == {"public_key": "yup_dump"} + + def test_vault_read_secret_can_fetch_more_than_one_secret_in_one_run( + self, + vault_salt_call_cli, + vault_salt_minion, + caplog, + ): + """ + Test that the Vault module can fetch multiple secrets during + a single run when tokens are issued with uses=1. + Issue #57561 + """ + ret = vault_salt_call_cli.run("saltutil.refresh_pillar", wait=True) + assert ret.returncode == 0 + assert ret.data is True + ret = vault_salt_call_cli.run("pillar.items") + assert ret.returncode == 0 + assert ret.data + assert "Pillar render error" not in caplog.text + assert "test" in ret.data + assert "jvmdump_pubkey" in ret.data["test"] + assert ret.data["test"]["jvmdump_pubkey"] == "yup_dump" + assert "jenkins_pubkey" in ret.data["test"] + assert ret.data["test"]["jenkins_pubkey"] == "yup_master" + + def test_vault_write_secret(self, vault_salt_call_cli): + """ + Test that the Vault module can write a single secret when tokens + are issued with uses=1. + """ + ret = vault_salt_call_cli.run( + "vault.write_secret", "secret/test/write", success="yup" + ) + assert ret.returncode == 0 + assert ret.data + assert "write" in vault_list_secrets("secret/test") + + def test_vault_delete_secret(self, vault_salt_call_cli): + """ + Test that the Vault module can delete a single secret when tokens + are issued with uses=1. + """ + ret = vault_salt_call_cli.run("vault.delete_secret", "secret/test/deleteme") + assert ret.returncode == 0 + assert ret.data + assert "delete" not in vault_list_secrets("secret/test") + + +class TestTokenMinimumTTLUnrenewable: + """ + Test that a new token is requested when the current one does not + fulfill minimum_ttl and cannot be renewed + """ + + @pytest.fixture(scope="class") + def vault_master_config(self, vault_port): + return { + "pillar_roots": {}, + "open_mode": True, + "peer_run": { + ".*": [ + "vault.get_config", + "vault.generate_new_token", + ], + }, + "vault": { + "auth": {"token": "testsecret"}, + "cache": { + "backend": "file", + }, + "issue": { + "type": "token", + "token": { + "params": { + "num_uses": 0, + "explicit_max_ttl": 180, + } + }, + }, + "policies": { + "assign": [ + "salt_minion", + ] + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + }, + "minion_data_cache": False, + } + + @pytest.fixture(scope="class") + def vault_minion_config(self): + return { + "open_mode": True, + "grains": {}, + "vault": { + "auth": { + "token_lifecycle": {"minimum_ttl": 178, "renew_increment": None} + } + }, + } + + def test_minimum_ttl_is_respected(self, vault_salt_call_cli): + # create token by looking it up + ret = vault_salt_call_cli.run("vault.query", "GET", "auth/token/lookup-self") + assert ret.data + assert ret.returncode == 0 + # wait + time_before = time.time() + while time.time() - time_before < 3: + time.sleep(0.1) + # reissue token by looking it up + ret_new = vault_salt_call_cli.run( + "vault.query", "GET", "auth/token/lookup-self" + ) + assert ret_new.returncode == 0 + assert ret_new.data + # ensure a new token was created, even though the previous one would have been + # valid still + assert ret_new.data["data"]["id"] != ret.data["data"]["id"] + + +class TestTokenMinimumTTLRenewable: + """ + Test that tokens are renewed and minimum_ttl is respected + """ + + @pytest.fixture(scope="class") + def vault_master_config(self, vault_port): + return { + "pillar_roots": {}, + "open_mode": True, + "peer_run": { + ".*": [ + "vault.get_config", + "vault.generate_new_token", + ], + }, + "vault": { + "auth": {"token": "testsecret"}, + "cache": { + "backend": "file", + }, + "issue": { + "type": "token", + "token": { + "params": { + "num_uses": 0, + "ttl": 180, + } + }, + }, + "policies": { + "assign": [ + "salt_minion", + ] + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + }, + "minion_data_cache": False, + } + + @pytest.fixture(scope="class") + def vault_minion_config(self): + return { + "open_mode": True, + "grains": {}, + "vault": { + "auth": { + "token_lifecycle": {"minimum_ttl": 177, "renew_increment": None} + } + }, + } + + def test_minimum_ttl_is_respected(self, vault_salt_call_cli): + # create token by looking it up + ret = vault_salt_call_cli.run("vault.query", "GET", "auth/token/lookup-self") + assert ret.data + assert ret.returncode == 0 + # wait + time_before = time.time() + while time.time() - time_before < 4: + time.sleep(0.1) + # renew token by looking it up + ret_new = vault_salt_call_cli.run( + "vault.query", "GET", "auth/token/lookup-self" + ) + assert ret_new.returncode == 0 + assert ret_new.data + # ensure the current token's validity has been extended + assert ret_new.data["data"]["id"] == ret.data["data"]["id"] + assert ret_new.data["data"]["expire_time"] > ret.data["data"]["expire_time"] diff --git a/tests/pytests/integration/runners/test_vault.py b/tests/pytests/integration/runners/test_vault.py index 5f662d551702..6b0c3fca73f9 100644 --- a/tests/pytests/integration/runners/test_vault.py +++ b/tests/pytests/integration/runners/test_vault.py @@ -3,15 +3,30 @@ """ import logging +import os import shutil +from pathlib import Path import pytest +from saltfactories.utils import random_string + +import salt.utils.files + +# pylint: disable=unused-import +from tests.support.pytest.vault import ( + vault_container_version, + vault_delete_secret, + vault_environ, + vault_write_secret, +) log = logging.getLogger(__name__) pytestmark = [ pytest.mark.slow_test, + pytest.mark.skip_if_binaries_missing("dockerd", "vault", "getent"), + pytest.mark.usefixtures("vault_container_version"), ] @@ -25,24 +40,41 @@ def pillar_state_tree(tmp_path_factory): @pytest.fixture(scope="class") -def pillar_salt_master(salt_factories, pillar_state_tree): +def pillar_salt_master(salt_factories, pillar_state_tree, vault_port): config_defaults = { "pillar_roots": {"base": [str(pillar_state_tree)]}, "open_mode": True, - "ext_pillar": [{"vault": "path=does/not/matter"}], + "ext_pillar": [{"vault": "path=secret/path/foo"}], "sdbvault": { "driver": "vault", }, "vault": { - "auth": {"method": "token", "token": "testsecret"}, - "policies": [ - "salt_minion", - "salt_minion_{minion}", - "salt_role_{pillar[roles]}", - "salt_unsafe_{grains[foo]}", - ], - "policies_cache_time": 0, - "url": "http://127.0.0.1:8200", + "auth": {"token": "testsecret"}, + "issue": { + "token": { + "params": { + # otherwise the tests might fail because of + # cached tokens (should not, because by default, + # the cache is valid for one session only) + "num_uses": 1, + }, + }, + }, + "policies": { + "assign": [ + "salt_minion", + "salt_minion_{minion}", + "salt_role_{pillar[roles]}", + "salt_unsafe_{grains[foo]}", + "extpillar_this_should_always_be_absent_{pillar[vault_sourced]}", + "sdb_this_should_always_be_absent_{pillar[vault_sourced_sdb]}", + "exe_this_should_always_be_absent_{pillar[vault_sourced_exe]}", + ], + "cache_time": 0, + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, }, "minion_data_cache": False, } @@ -54,20 +86,35 @@ def pillar_salt_master(salt_factories, pillar_state_tree): @pytest.fixture(scope="class") -def pillar_caching_salt_master(salt_factories, pillar_state_tree): +def pillar_caching_salt_master(salt_factories, pillar_state_tree, vault_port): config_defaults = { "pillar_roots": {"base": [str(pillar_state_tree)]}, "open_mode": True, + "ext_pillar": [{"vault": "path=secret/path/foo"}], "vault": { - "auth": {"method": "token", "token": "testsecret"}, - "policies": [ - "salt_minion", - "salt_minion_{minion}", - "salt_role_{pillar[roles]}", - "salt_unsafe_{grains[foo]}", - ], - "policies_cache_time": 0, - "url": "http://127.0.0.1:8200", + "auth": {"token": "testsecret"}, + "issue": { + "token": { + "params": { + # otherwise the tests might fail because of + # cached tokens + "num_uses": 1, + }, + }, + }, + "policies": { + "assign": [ + "salt_minion", + "salt_minion_{minion}", + "salt_role_{pillar[roles]}", + "salt_unsafe_{grains[foo]}", + "extpillar_this_will_not_always_be_absent_{pillar[vault_sourced]}", + ], + "cache_time": 0, + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, }, "minion_data_cache": True, } @@ -128,6 +175,17 @@ def pillar_caching_salt_call_cli(pillar_caching_salt_minion): return pillar_caching_salt_minion.salt_call_cli() +@pytest.fixture(scope="class") +def vault_pillar_values_policy(vault_container_version): + vault_write_secret("secret/path/foo", vault_sourced="fail") + try: + yield + finally: + vault_delete_secret("secret/path/foo") + + +@pytest.mark.usefixtures("vault_pillar_values_policy") +@pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) class TestVaultPillarPolicyTemplatesWithoutCache: @pytest.fixture(autouse=True) def pillar_policy_tree( @@ -135,13 +193,11 @@ def pillar_policy_tree( pillar_salt_master, pillar_salt_minion, ): - top_pillar_contents = """ + top_pillar_contents = f""" base: - '{}': + '{pillar_salt_minion.id}': - roles - """.format( - pillar_salt_minion.id - ) + """ roles_pillar_contents = """ roles: - minion @@ -157,18 +213,16 @@ def pillar_policy_tree( with top_file, roles_file: yield - @pytest.fixture() + @pytest.fixture def pillar_exe_loop(self, pillar_state_tree, pillar_salt_minion): - top_file = """ + top_file = f""" base: - '{}': + '{pillar_salt_minion.id}': - roles - exe_loop - """.format( - pillar_salt_minion.id - ) + """ exe_loop_pillar = r""" - bar: {{ salt["vault.read_secret"]("does/not/matter") }} + vault_sourced_exe: {{ salt["vault.read_secret"]("secret/path/foo", "vault_sourced") }} """ top_tempfile = pytest.helpers.temp_file("top.sls", top_file, pillar_state_tree) exe_loop_tempfile = pytest.helpers.temp_file( @@ -178,18 +232,16 @@ def pillar_exe_loop(self, pillar_state_tree, pillar_salt_minion): with top_tempfile, exe_loop_tempfile: yield - @pytest.fixture() + @pytest.fixture def pillar_sdb_loop(self, pillar_state_tree, pillar_salt_minion): - top_file = """ + top_file = f""" base: - '{}': + '{pillar_salt_minion.id}': - roles - sdb_loop - """.format( - pillar_salt_minion.id - ) + """ sdb_loop_pillar = r""" - foo: {{ salt["sdb.get"]("sdb://sdbvault/does/not/matter/val") }} + vault_sourced_sdb: {{ salt["sdb.get"]("sdb://sdbvault/secret/path/foo/vault_sourced") }} """ top_tempfile = pytest.helpers.temp_file("top.sls", top_file, pillar_state_tree) sdb_loop_tempfile = pytest.helpers.temp_file( @@ -240,11 +292,11 @@ def test_show_policies_uncached_data_no_pillar_refresh( ) assert ret.data == ["salt_minion", f"salt_minion_{pillar_salt_minion.id}"] + @pytest.mark.usefixtures("pillar_exe_loop") def test_policy_compilation_prevents_loop_for_execution_module( self, pillar_salt_run_cli, pillar_salt_minion, - pillar_exe_loop, ): """ Test that the runner prevents a recursive cycle from happening @@ -260,12 +312,13 @@ def test_policy_compilation_prevents_loop_for_execution_module( ] assert "Pillar render error: Rendering SLS 'exe_loop' failed" in ret.stderr assert "Cyclic dependency detected while refreshing pillar" in ret.stderr + assert "RecursionError" not in ret.stderr + @pytest.mark.usefixtures("pillar_sdb_loop") def test_policy_compilation_prevents_loop_for_sdb_module( self, pillar_salt_run_cli, pillar_salt_minion, - pillar_sdb_loop, ): """ Test that the runner prevents a recursive cycle from happening @@ -281,20 +334,21 @@ def test_policy_compilation_prevents_loop_for_sdb_module( ] assert "Pillar render error: Rendering SLS 'sdb_loop' failed" in ret.stderr assert "Cyclic dependency detected while refreshing pillar" in ret.stderr + assert "RecursionError" not in ret.stderr +@pytest.mark.usefixtures("vault_pillar_values_policy") +@pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) class TestVaultPillarPolicyTemplatesWithCache: @pytest.fixture(autouse=True) def pillar_caching_policy_tree( self, pillar_caching_salt_master, pillar_caching_salt_minion ): - top_pillar_contents = """ + top_pillar_contents = f""" base: - '{}': + '{pillar_caching_salt_minion.id}': - roles - """.format( - pillar_caching_salt_minion.id - ) + """ roles_pillar_contents = """ roles: - minion @@ -347,7 +401,7 @@ def minion_data_cache_outdated( assert "pillar" in cached.data assert "grains" in cached.data assert "roles" in cached.data["pillar"] - assert ["minion", "web"] == cached.data["pillar"]["roles"] + assert cached.data["pillar"]["roles"] == ["minion", "web"] with roles_file: yield @@ -368,6 +422,7 @@ def test_show_policies_cached_data_no_pillar_refresh( "salt_role_minion", "salt_role_web", "salt_unsafe_bar", + "extpillar_this_will_not_always_be_absent_fail", ] def test_show_policies_refresh_pillar( @@ -392,3 +447,844 @@ def test_show_policies_refresh_pillar( "salt_role_fresh", "salt_unsafe_bar", ] + + +# The tests above use different fixtures because I could not +# make them behave as expected otherwise. + + +@pytest.fixture(scope="class") +def vault_salt_master( + salt_factories, pillar_state_tree, vault_port, vault_master_config +): + factory = salt_factories.salt_master_daemon( + "vault-master", defaults=vault_master_config + ) + with factory.started(): + yield factory + + +@pytest.fixture(scope="class") +def vault_salt_minion(vault_salt_master): + assert vault_salt_master.is_running() + factory = vault_salt_master.salt_minion_daemon( + random_string("vault-minion", uppercase=False), + defaults={"open_mode": True, "grains": {}}, + ) + with factory.started(): + # Sync All + salt_call_cli = factory.salt_call_cli() + ret = salt_call_cli.run("saltutil.sync_all", _timeout=120) + assert ret.returncode == 0, ret + yield factory + + +@pytest.fixture(scope="class") +def overriding_vault_salt_minion(vault_salt_master, issue_overrides): + assert vault_salt_master.is_running() + factory = vault_salt_master.salt_minion_daemon( + random_string("vault-minion", uppercase=False), + defaults={"open_mode": True, "grains": {}}, + overrides={"vault": {"issue_params": issue_overrides}}, + ) + with factory.started(): + # Sync All + salt_call_cli = factory.salt_call_cli() + ret = salt_call_cli.run("saltutil.sync_all", _timeout=120) + assert ret.returncode == 0, ret + yield factory + + +@pytest.fixture(scope="class") +def vault_salt_run_cli(vault_salt_master): + return vault_salt_master.salt_run_cli() + + +@pytest.fixture(scope="class") +def vault_salt_call_cli(vault_salt_minion): + return vault_salt_minion.salt_call_cli() + + +@pytest.fixture(scope="class") +def pillar_roles_tree( + vault_salt_master, + vault_salt_minion, +): + top_pillar_contents = f""" + base: + '{vault_salt_minion.id}': + - roles + """ + roles_pillar_contents = """ + roles: + - dev + - web + # this is for entity metadata since lists are cumbersome at best + role: foo + """ + top_file = vault_salt_master.pillar_tree.base.temp_file( + "top.sls", top_pillar_contents + ) + roles_file = vault_salt_master.pillar_tree.base.temp_file( + "roles.sls", roles_pillar_contents + ) + + with top_file, roles_file: + yield + + +@pytest.fixture(scope="class") +def vault_pillar_values_approle(vault_salt_minion): + vault_write_secret( + f"salt/minions/{vault_salt_minion.id}", minion_id_acl_template="worked" + ) + vault_write_secret("salt/roles/foo", pillar_role_acl_template="worked") + try: + yield + finally: + vault_delete_secret(f"salt/minions/{vault_salt_minion.id}") + vault_delete_secret("salt/roles/foo") + + +@pytest.fixture(scope="class") +def vault_testing_values(vault_container_version): + vault_write_secret("secret/path/foo", success="yeehaaw") + try: + yield + finally: + vault_delete_secret("secret/path/foo") + + +@pytest.fixture +def minion_conn_cachedir(vault_salt_call_cli): + ret = vault_salt_call_cli.run("config.get", "cachedir") + assert ret.returncode == 0 + assert ret.data + cachedir = Path(ret.data) / "vault" / "connection" + if not cachedir.exists(): + cachedir.mkdir(parents=True) + yield cachedir + + +@pytest.fixture +def missing_auth_cache(minion_conn_cachedir): + token_cachefile = minion_conn_cachedir / "session" / "__token.p" + secret_id_cachefile = minion_conn_cachedir / "secret_id.p" + for file in [secret_id_cachefile, token_cachefile]: + if file.exists(): + file.unlink() + yield + + +@pytest.fixture(scope="class") +def minion_data_cache_present( + vault_salt_call_cli, + vault_salt_minion, + pillar_roles_tree, + vault_salt_run_cli, +): + ret = vault_salt_run_cli.run("pillar.show_top", minion=vault_salt_minion.id) + assert ret.returncode == 0 + assert ret.data + ret = vault_salt_call_cli.run("saltutil.refresh_pillar", wait=True) + assert ret.returncode == 0 + assert ret.data is True + ret = vault_salt_call_cli.run("pillar.items") + assert ret.returncode == 0 + assert ret.data + assert "role" in ret.data + assert "roles" in ret.data + yield + + +@pytest.fixture +def conn_cache_absent(minion_conn_cachedir): + shutil.rmtree(minion_conn_cachedir) + assert not minion_conn_cachedir.exists() + yield + + +@pytest.fixture(scope="class") +def approles_synced( + vault_salt_run_cli, + minion_data_cache_present, + vault_salt_minion, +): + ret = vault_salt_run_cli.run("vault.sync_approles", vault_salt_minion.id) + assert ret.returncode == 0 + assert ret.data is True + ret = vault_salt_run_cli.run("vault.list_approles") + assert ret.returncode == 0 + assert vault_salt_minion.id in ret.data + yield + + +@pytest.fixture(scope="class") +def entities_synced( + vault_salt_run_cli, + minion_data_cache_present, + vault_salt_minion, +): + ret = vault_salt_run_cli.run("vault.sync_entities", vault_salt_minion.id) + assert ret.returncode == 0 + assert ret.data is True + ret = vault_salt_run_cli.run("vault.list_approles") + assert ret.returncode == 0 + assert vault_salt_minion.id in ret.data + ret = vault_salt_run_cli.run("vault.list_entities") + assert ret.returncode == 0 + assert f"salt_minion_{vault_salt_minion.id}" in ret.data + ret = vault_salt_run_cli.run("vault.show_entity", vault_salt_minion.id) + assert ret.returncode == 0 + assert ret.data == {"minion-id": vault_salt_minion.id, "role": "foo"} + yield + + +@pytest.mark.usefixtures( + "vault_pillar_values_approle", + "vault_testing_values", + "pillar_roles_tree", + "minion_data_cache_present", +) +@pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) +class TestAppRoleIssuance: + @pytest.fixture(scope="class") + def vault_master_config(self, pillar_state_tree, vault_port): + return { + "pillar_roots": {"base": [str(pillar_state_tree)]}, + "open_mode": True, + # ensure approles/entities are generated during pillar rendering + "ext_pillar": [ + {"vault": "path=salt/minions/{minion}"}, + {"vault": "path=salt/roles/{pillar[role]}"}, + ], + "peer_run": { + ".*": [ + "vault.get_config", + # for test_auth_method_switch_does_not_break_minion_auth + "vault.generate_new_token", + "vault.generate_secret_id", + ], + }, + "vault": { + "auth": {"token": "testsecret"}, + "cache": { + "backend": "file", + }, + "issue": { + "allow_minion_override_params": True, + "type": "approle", + "approle": { + "params": { + "secret_id_num_uses": 0, + "secret_id_ttl": 1800, + "token_explicit_max_ttl": 1800, + "token_num_uses": 0, + } + }, + }, + "metadata": { + "entity": { + "minion-id": "{minion}", + "role": "{pillar[role]}", + }, + }, + "policies": { + "assign": [ + "salt_minion", + "salt_minion_{minion}", + "salt_role_{pillar[roles]}", + ], + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + }, + } + + @pytest.fixture(scope="class") + def issue_overrides(self): + return { + "token_explicit_max_ttl": 1337, + "token_num_uses": 42, + "secret_id_num_uses": 3, + "secret_id_ttl": 1338, + } + + @pytest.fixture + def cache_auth_outdated(self, missing_auth_cache, minion_conn_cachedir, vault_port): + vault_url = f"http://127.0.0.1:{vault_port}" + config_data = b"\xdf\x00\x00\x00\x03\xa4auth\xdf\x00\x00\x00\x04\xadapprole_mount\xa7approle\xacapprole_name\xbavault-approle-int-minion-1\xa6method\xa5token\xa9secret_id\xc0\xa5cache\xdf\x00\x00\x00\x03\xa7backend\xa4disk\xa6config\xcd\x0e\x10\xa6secret\xa3ttl\xa6server\xdf\x00\x00\x00\x03\xa9namespace\xc0\xa6verify\xc0\xa3url" + config_data += (len(vault_url) + 160).to_bytes(1, "big") + vault_url.encode() + config_cachefile = minion_conn_cachedir / "config.p" + with salt.utils.files.fopen(config_cachefile, "wb") as f: + f.write(config_data) + try: + yield + finally: + if config_cachefile.exists(): + config_cachefile.unlink() + + @pytest.fixture + def cache_server_outdated(self, missing_auth_cache, minion_conn_cachedir): + config_data = b"\xdf\x00\x00\x00\x03\xa4auth\xdf\x00\x00\x00\x05\xadapprole_mount\xa7approle\xacapprole_name\xbavault-approle-int-minion-1\xa6method\xa7approle\xa7role_id\xactest-role-id\xa9secret_id\xc3\xa5cache\xdf\x00\x00\x00\x03\xa7backend\xa4disk\xa6config\xcd\x0e\x10\xa6secret\xa3ttl\xa6server\xdf\x00\x00\x00\x03\xa9namespace\xc0\xa6verify\xc0\xa3url\xb2http://127.0.0.1:8" + config_cachefile = minion_conn_cachedir / "config.p" + with salt.utils.files.fopen(config_cachefile, "wb") as f: + f.write(config_data) + try: + yield + finally: + if config_cachefile.exists(): + config_cachefile.unlink() + + @pytest.mark.usefixtures("conn_cache_absent") + def test_minion_can_authenticate(self, vault_salt_call_cli): + """ + Test that the minion can run queries against Vault. + The master impersonating the minion is already tested in the fixture setup + (ext_pillar). + """ + ret = vault_salt_call_cli.run("vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert ret.data + assert ret.data.get("success") == "yeehaaw" + + @pytest.mark.usefixtures("entities_synced") + def test_minion_pillar_is_populated_as_expected(self, vault_salt_call_cli): + """ + Test that ext_pillar pillar-templated paths are resolved as expectd + (and that the ACL policy templates work on the Vault side). + """ + ret = vault_salt_call_cli.run("pillar.items") + assert ret.returncode == 0 + assert ret.data + assert ret.data.get("minion_id_acl_template") == "worked" + assert ret.data.get("pillar_role_acl_template") == "worked" + + @pytest.mark.usefixtures("approles_synced") + @pytest.mark.usefixtures("conn_cache_absent") + def test_minion_token_policies_are_assigned_as_expected( + self, vault_salt_call_cli, vault_salt_minion + ): + """ + Test that issued tokens have the expected policies. + """ + ret = vault_salt_call_cli.run("vault.query", "GET", "auth/token/lookup-self") + assert ret.returncode == 0 + assert ret.data + assert set(ret.data["data"]["policies"]) == { + "default", + "salt_minion", + f"salt_minion_{vault_salt_minion.id}", + "salt_role_dev", + "salt_role_web", + } + + @pytest.mark.usefixtures("cache_auth_outdated") + def test_auth_method_switch_does_not_break_minion_auth( + self, vault_salt_call_cli, caplog + ): + """ + Test that after a master configuration switch from another authentication method, + minions with cached configuration flush it and request a new one. + """ + ret = vault_salt_call_cli.run("vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert ret.data + assert ret.data.get("success") == "yeehaaw" + assert "Master returned error and requested cache expiration" in caplog.text + + @pytest.mark.usefixtures("cache_server_outdated") + def test_server_switch_does_not_break_minion_auth( + self, vault_salt_call_cli, caplog + ): + """ + Test that after a master configuration switch to another server URL, + minions with cached configuration detect the mismatchand request a + new configuration. + """ + ret = vault_salt_call_cli.run("vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert ret.data + assert ret.data.get("success") == "yeehaaw" + assert "Mismatch of cached and reported server data detected" in caplog.text + + @pytest.mark.parametrize("ckey", ["config", "__token", "secret_id"]) + def test_cache_is_used_on_the_minion( + self, ckey, vault_salt_call_cli, minion_conn_cachedir + ): + """ + Test that remote configuration, tokens acquired by authenticating with an AppRole + and issued secret IDs are written to cache. + """ + cache = minion_conn_cachedir + if ckey == "__token": + cache = cache / "session" + if not cache.exists(): + cache.mkdir() + if f"{ckey}.p" not in os.listdir(cache): + ret = vault_salt_call_cli.run("vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert f"{ckey}.p" in os.listdir(cache) + + @pytest.mark.parametrize("ckey", ["config", "__token", "secret_id"]) + def test_cache_is_used_on_the_impersonating_master( + self, ckey, vault_salt_run_cli, vault_salt_minion + ): + """ + Test that remote configuration, tokens acquired by authenticating with an AppRole + and issued secret IDs are written to cache when a master is impersonating + a minion during pillar rendering. + """ + cbank = f"minions/{vault_salt_minion.id}/vault/connection" + if ckey == "__token": + cbank += "/session" + ret = vault_salt_run_cli.run("cache.list", cbank) + assert ret.returncode == 0 + assert ret.data + assert ckey in ret.data + + def test_cache_is_used_for_master_token_information(self, vault_salt_run_cli): + """ + Test that a locally configured token is cached, including meta information. + """ + ret = vault_salt_run_cli.run("cache.list", "vault/connection/session") + assert ret.returncode == 0 + assert ret.data + assert "__token" in ret.data + + @pytest.mark.usefixtures("approles_synced") + def test_issue_param_overrides_work( + self, overriding_vault_salt_minion, issue_overrides, vault_salt_run_cli + ): + """ + Test that minion overrides of issue params work for AppRoles. + """ + ret = overriding_vault_salt_minion.salt_call_cli().run( + "vault.query", "GET", "auth/token/lookup-self" + ) + assert ret.returncode == 0 + assert ret.data + ret = vault_salt_run_cli.run( + "vault.show_approle", overriding_vault_salt_minion.id + ) + assert ret.returncode == 0 + assert ret.data + for val in [ + "token_explicit_max_ttl", + "token_num_uses", + "secret_id_num_uses", + "secret_id_ttl", + ]: + assert ret.data[val] == issue_overrides[val] + + def test_impersonating_master_does_not_override_issue_param_overrides( + self, overriding_vault_salt_minion, vault_salt_run_cli, issue_overrides + ): + """ + Test that rendering the pillar does not remove issue param overrides + requested by a minion + """ + # ensure the minion requests a new configuration + ret = overriding_vault_salt_minion.salt_call_cli().run( + "vault.clear_token_cache" + ) + assert ret.returncode == 0 + # check that the overrides are applied + ret = overriding_vault_salt_minion.salt_call_cli().run( + "vault.query", "GET", "auth/token/lookup-self" + ) + assert ret.returncode == 0 + assert ret.data + assert ( + ret.data["data"]["explicit_max_ttl"] + == issue_overrides["token_explicit_max_ttl"] + ) + # ensure the master does not have cached authentication + ret = vault_salt_run_cli.run("vault.clear_cache") + assert ret.returncode == 0 + ret = vault_salt_run_cli.run( + "pillar.show_pillar", overriding_vault_salt_minion.id + ) + assert ret.returncode == 0 + # check that issue overrides are still present + ret = vault_salt_run_cli.run( + "vault.show_approle", overriding_vault_salt_minion.id + ) + assert ret.returncode == 0 + assert ret.data + assert ( + ret.data["token_explicit_max_ttl"] + == issue_overrides["token_explicit_max_ttl"] + ) + + +@pytest.mark.usefixtures( + "vault_testing_values", "pillar_roles_tree", "minion_data_cache_present" +) +class TestTokenIssuance: + @pytest.fixture(scope="class") + def vault_master_config(self, pillar_state_tree, vault_port): + return { + "pillar_roots": {"base": [str(pillar_state_tree)]}, + "open_mode": True, + "ext_pillar": [{"vault": "path=secret/path/foo"}], + "peer_run": { + ".*": [ + "vault.get_config", + "vault.generate_new_token", + # for test_auth_method_switch_does_not_break_minion_auth + "vault.generate_secret_id", + ], + }, + "vault": { + "auth": {"token": "testsecret"}, + "cache": { + "backend": "file", + }, + "issue": { + "type": "token", + "token": { + "params": { + "num_uses": 0, + } + }, + }, + "policies": { + "assign": [ + "salt_minion", + "salt_minion_{minion}", + "salt_role_{pillar[roles]}", + ], + "cache_time": 0, + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + }, + "minion_data_cache": True, + } + + @pytest.fixture + def cache_auth_outdated(self, missing_auth_cache, minion_conn_cachedir, vault_port): + vault_url = f"http://127.0.0.1:{vault_port}" + config_data = b"\xdf\x00\x00\x00\x03\xa4auth\xdf\x00\x00\x00\x05\xadapprole_mount\xa7approle\xacapprole_name\xbavault-approle-int-minion-1\xa6method\xa7approle\xa7role_id\xactest-role-id\xa9secret_id\xc3\xa5cache\xdf\x00\x00\x00\x03\xa7backend\xa4disk\xa6config\xcd\x0e\x10\xa6secret\xa3ttl\xa6server\xdf\x00\x00\x00\x03\xa9namespace\xc0\xa6verify\xc0\xa3url" + config_data += (len(vault_url) + 160).to_bytes(1, "big") + vault_url.encode() + config_cachefile = minion_conn_cachedir / "config.p" + with salt.utils.files.fopen(config_cachefile, "wb") as f: + f.write(config_data) + try: + yield + finally: + if config_cachefile.exists(): + config_cachefile.unlink() + + @pytest.fixture(scope="class") + def issue_overrides(self): + # only explicit_max_ttl and num_uses are respected, the rest is for testing purposes + return { + "explicit_max_ttl": 1337, + "num_uses": 42, + "secret_id_num_uses": 3, + "secret_id_ttl": 1338, + "irrelevant_setting": "abc", + } + + @pytest.mark.usefixtures("conn_cache_absent") + @pytest.mark.parametrize( + "vault_container_version", ["0.9.6", "1.3.1", "latest"], indirect=True + ) + def test_minion_can_authenticate(self, vault_salt_call_cli): + """ + Test that the minion can run queries against Vault. + The master impersonating the minion is already tested in the fixture setup + (ext_pillar). + """ + ret = vault_salt_call_cli.run("vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert ret.data + assert ret.data.get("success") == "yeehaaw" + + @pytest.mark.usefixtures("conn_cache_absent") + @pytest.mark.parametrize( + "vault_container_version", ["0.9.6", "1.3.1", "latest"], indirect=True + ) + def test_minion_token_policies_are_assigned_as_expected( + self, vault_salt_call_cli, vault_salt_minion + ): + """ + Test that issued tokens have the expected policies. + """ + ret = vault_salt_call_cli.run("vault.query", "GET", "auth/token/lookup-self") + assert ret.returncode == 0 + assert ret.data + assert set(ret.data["data"]["policies"]) == { + "default", + "salt_minion", + f"salt_minion_{vault_salt_minion.id}", + "salt_role_dev", + "salt_role_web", + } + + @pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) + @pytest.mark.usefixtures("cache_auth_outdated") + def test_auth_method_switch_does_not_break_minion_auth( + self, vault_salt_call_cli, caplog + ): + """ + Test that after a master configuration switch from another authentication method, + minions with cached configuration flush it and request a new one. + """ + ret = vault_salt_call_cli.run("vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert ret.data + assert ret.data.get("success") == "yeehaaw" + assert "Master returned error and requested cache expiration" in caplog.text + + @pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) + @pytest.mark.parametrize("ckey", ["config", "__token"]) + def test_cache_is_used_on_the_minion( + self, ckey, vault_salt_call_cli, minion_conn_cachedir + ): + """ + Test that remote configuration and tokens are written to cache. + """ + cache = minion_conn_cachedir + if ckey == "__token": + cache = cache / "session" + if not cache.exists(): + cache.mkdir() + if f"{ckey}.p" not in os.listdir(cache): + ret = vault_salt_call_cli.run("vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert f"{ckey}.p" in os.listdir(cache) + + @pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) + @pytest.mark.parametrize("ckey", ["config", "__token"]) + def test_cache_is_used_on_the_impersonating_master( + self, ckey, vault_salt_run_cli, vault_salt_minion + ): + """ + Test that remote configuration and tokens are written to cache when a + master is impersonating a minion during pillar rendering. + """ + cbank = f"minions/{vault_salt_minion.id}/vault/connection" + if ckey == "__token": + cbank += "/session" + ret = vault_salt_run_cli.run("cache.list", cbank) + assert ret.returncode == 0 + assert ret.data + assert ckey in ret.data + + @pytest.mark.usefixtures("conn_cache_absent") + @pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) + def test_issue_param_overrides_require_setting(self, overriding_vault_salt_minion): + """ + Test that minion overrides of issue params are not set by default + and require setting ``issue:allow_minion_override_params``. + """ + ret = overriding_vault_salt_minion.salt_call_cli().run( + "vault.query", "GET", "auth/token/lookup-self" + ) + assert ret.returncode == 0 + assert ret.data + assert ret.data["data"]["explicit_max_ttl"] != 1337 + assert ret.data["data"]["num_uses"] != 41 # one use is consumed by the lookup + + +@pytest.mark.usefixtures("vault_testing_values") +@pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) +class TestAppRoleIssuanceWithoutSecretId: + @pytest.fixture(scope="class") + def vault_master_config(self, vault_port): + return { + "open_mode": True, + "peer_run": { + ".*": [ + "vault.get_config", + "vault.generate_secret_id", + ], + }, + "vault": { + "auth": {"token": "testsecret"}, + "cache": { + "backend": "file", + }, + "issue": { + "type": "approle", + "approle": { + "params": { + "bind_secret_id": False, + # "at least one constraint should be enabled on the role" + # this should be quite secure :) + "token_bound_cidrs": "0.0.0.0/0", + "token_explicit_max_ttl": 1800, + "token_num_uses": 0, + } + }, + }, + "policies": { + "assign": { + "salt_minion", + "salt_minion_{minion}", + }, + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + }, + } + + @pytest.mark.usefixtures("conn_cache_absent") + def test_minion_can_authenticate(self, vault_salt_call_cli, caplog): + """ + Test that the minion can run queries against Vault. + The master impersonating the minion is already tested in the fixture setup + (ext_pillar). + """ + ret = vault_salt_call_cli.run("vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert ret.data + assert ret.data.get("success") == "yeehaaw" + assert "Minion AppRole does not require a secret ID" not in caplog.text + + +@pytest.mark.usefixtures("vault_testing_values") +@pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) +class TestOldConfigSyntax: + @pytest.fixture(scope="class") + def vault_master_config(self, pillar_state_tree, vault_port): + return { + "pillar_roots": {"base": [str(pillar_state_tree)]}, + "open_mode": True, + "peer_run": { + ".*": [ + "vault.generate_token", + ], + }, + "vault": { + "auth": { + "allow_minion_override": True, + "token": "testsecret", + "token_backend": "file", + "ttl": 90, + "uses": 3, + }, + "policies": [ + "salt_minion", + "salt_minion_{minion}", + ], + "url": f"http://127.0.0.1:{vault_port}", + }, + "minion_data_cache": True, + } + + @pytest.fixture(scope="class") + def overriding_vault_salt_minion(self, vault_salt_master): + assert vault_salt_master.is_running() + factory = vault_salt_master.salt_minion_daemon( + random_string("vault-minion", uppercase=False), + defaults={"open_mode": True, "grains": {}}, + overrides={"vault": {"auth": {"uses": 5, "ttl": 180}}}, + ) + with factory.started(): + # Sync All + salt_call_cli = factory.salt_call_cli() + ret = salt_call_cli.run("saltutil.sync_all", _timeout=120) + assert ret.returncode == 0, ret + yield factory + + @pytest.mark.usefixtures("conn_cache_absent") + def test_minion_can_authenticate(self, vault_salt_call_cli, caplog): + """ + Test that the minion can authenticate, even if the master peer_run + configuration has not been updated. + """ + ret = vault_salt_call_cli.run("vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert ret.data + assert ret.data.get("success") == "yeehaaw" + assert ( + "does the peer runner publish configuration include `vault.get_config`" + in caplog.text + ) + assert "Peer runner return was empty." not in caplog.text + assert "Falling back to vault.generate_token." in caplog.text + assert ( + "Detected minion fallback to old vault.generate_token peer run function" + in caplog.text + ) + + @pytest.mark.usefixtures("conn_cache_absent") + def test_token_is_configured_as_expected( + self, vault_salt_call_cli, vault_salt_minion + ): + """ + Test that issued tokens have the expected parameters. + """ + ret = vault_salt_call_cli.run("vault.query", "GET", "auth/token/lookup-self") + assert ret.returncode == 0 + assert ret.data + assert ret.data["data"]["explicit_max_ttl"] == 90 + assert ret.data["data"]["num_uses"] == 2 # one use is consumed by the lookup + assert set(ret.data["data"]["policies"]) == { + "default", + "salt_minion", + f"salt_minion_{vault_salt_minion.id}", + } + + @pytest.mark.usefixtures("conn_cache_absent") + def test_issue_param_overrides_work(self, overriding_vault_salt_minion): + """ + Test that minion overrides of issue params work for the old configuration. + """ + ret = overriding_vault_salt_minion.salt_call_cli().run( + "vault.query", "GET", "auth/token/lookup-self" + ) + assert ret.returncode == 0 + assert ret.data + assert ret.data["data"]["explicit_max_ttl"] == 180 + assert ret.data["data"]["num_uses"] == 4 # one use is consumed by the lookup + + +@pytest.mark.usefixtures("vault_testing_values") +class TestMinionLocal: + @pytest.fixture(scope="class") + def vault_master_config(self): + return {"open_mode": True} + + @pytest.fixture(scope="class") + def vault_salt_minion(self, vault_salt_master, vault_port): + assert vault_salt_master.is_running() + factory = vault_salt_master.salt_minion_daemon( + random_string("vault-minion", uppercase=False), + defaults={ + "open_mode": True, + "vault": { + "auth": {"token": "testsecret"}, + "cache": { + "backend": "file", + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + }, + "grains": {}, + }, + ) + with factory.started(): + # Sync All + salt_call_cli = factory.salt_call_cli() + ret = salt_call_cli.run("saltutil.sync_all", _timeout=120) + assert ret.returncode == 0, ret + yield factory + + def test_minion_can_authenticate(self, vault_salt_call_cli): + """ + Test that salt-call --local works with the Vault module. + Issue #58580 + """ + ret = vault_salt_call_cli.run("--local", "vault.read_secret", "secret/path/foo") + assert ret.returncode == 0 + assert ret.data + assert ret.data.get("success") == "yeehaaw" diff --git a/tests/pytests/integration/sdb/conftest.py b/tests/pytests/integration/sdb/conftest.py index a3e9b495972f..3abefb2bf78a 100644 --- a/tests/pytests/integration/sdb/conftest.py +++ b/tests/pytests/integration/sdb/conftest.py @@ -11,7 +11,6 @@ def pillar_tree(salt_master, salt_minion): salt_minion.id ) sdb_pillar_file = """ - test_vault_pillar_sdb: sdb://sdbvault/secret/test/test_pillar_sdb/foo test_etcd_pillar_sdb: sdb://sdbetcd/secret/test/test_pillar_sdb/foo """ top_tempfile = salt_master.pillar_tree.base.temp_file("top.sls", top_file) diff --git a/tests/pytests/integration/sdb/test_vault.py b/tests/pytests/integration/sdb/test_vault.py index c70c251c2a93..10c0fbf40345 100644 --- a/tests/pytests/integration/sdb/test_vault.py +++ b/tests/pytests/integration/sdb/test_vault.py @@ -1,17 +1,19 @@ """ Integration tests for the vault modules """ -import json import logging -import subprocess -import time import pytest -from pytestshellutils.utils.processes import ProcessResult +from saltfactories.utils import random_string -import salt.utils.path -from tests.support.helpers import PatchedEnviron -from tests.support.runtests import RUNTIME_VARS +# pylint: disable=unused-import +from tests.support.pytest.vault import ( + vault_container_version, + vault_delete_secret, + vault_environ, + vault_list_secrets, + vault_write_secret, +) log = logging.getLogger(__name__) @@ -19,237 +21,102 @@ pytestmark = [ pytest.mark.slow_test, pytest.mark.skip_if_binaries_missing("dockerd", "vault", "getent"), + pytest.mark.usefixtures("vault_container_version"), ] -@pytest.fixture(scope="module") -def patched_environ(vault_port): - with PatchedEnviron(VAULT_ADDR="http://127.0.0.1:{}".format(vault_port)): +@pytest.fixture(scope="class") +def pillar_tree(vault_salt_master, vault_salt_minion): + top_file = f""" + base: + '{vault_salt_minion.id}': + - sdb + """ + sdb_pillar_file = """ + test_vault_pillar_sdb: sdb://sdbvault/secret/test/test_pillar_sdb/foo + """ + top_tempfile = vault_salt_master.pillar_tree.base.temp_file("top.sls", top_file) + sdb_tempfile = vault_salt_master.pillar_tree.base.temp_file( + "sdb.sls", sdb_pillar_file + ) + + with top_tempfile, sdb_tempfile: yield -def vault_container_version_id(value): - return "vault=={}".format(value) +@pytest.fixture(scope="class") +def vault_salt_master(salt_factories, vault_port, vault_master_config): + factory = salt_factories.salt_master_daemon( + "vault-sdbmaster", defaults=vault_master_config + ) + with factory.started(): + yield factory -@pytest.fixture( - scope="module", - autouse=True, - params=["0.9.6", "1.3.1", "latest"], - ids=vault_container_version_id, -) -def vault_container_version(request, salt_factories, vault_port, patched_environ): - vault_version = request.param - vault_binary = salt.utils.path.which("vault") - config = { - "backend": {"file": {"path": "/vault/file"}}, - "default_lease_ttl": "168h", - "max_lease_ttl": "720h", - } - factory = salt_factories.get_container( - "vault", - "ghcr.io/saltstack/salt-ci-containers/vault:{}".format(vault_version), - check_ports=[vault_port], - container_run_kwargs={ - "ports": {"8200/tcp": vault_port}, - "environment": { - "VAULT_DEV_ROOT_TOKEN_ID": "testsecret", - "VAULT_LOCAL_CONFIG": json.dumps(config), - }, - "cap_add": "IPC_LOCK", - }, - pull_before_start=True, - skip_on_pull_failure=True, - skip_if_docker_client_not_connectable=True, +@pytest.fixture(scope="class") +def vault_salt_minion(vault_salt_master): + assert vault_salt_master.is_running() + factory = vault_salt_master.salt_minion_daemon( + random_string("vault-sdbminion", uppercase=False), + defaults={"open_mode": True, "grains": {}, "sdbvault": {"driver": "vault"}}, ) - with factory.started() as factory: - attempts = 0 - while attempts < 3: - attempts += 1 - time.sleep(1) - proc = subprocess.run( - [vault_binary, "login", "token=testsecret"], - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - if proc.returncode == 0: - break - ret = ProcessResult( - returncode=proc.returncode, - stdout=proc.stdout, - stderr=proc.stderr, - cmdline=proc.args, - ) - log.debug("Failed to authenticate against vault:\n%s", ret) - time.sleep(4) - else: - pytest.fail("Failed to login to vault") - - proc = subprocess.run( - [ - vault_binary, - "policy", - "write", - "testpolicy", - "{}/vault.hcl".format(RUNTIME_VARS.FILES), - ], - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - if proc.returncode != 0: - ret = ProcessResult( - returncode=proc.returncode, - stdout=proc.stdout, - stderr=proc.stderr, - cmdline=proc.args, - ) - log.debug("Failed to assign policy to vault:\n%s", ret) - pytest.fail("unable to assign policy to vault") - if vault_version in ("1.3.1", "latest"): - proc = subprocess.run( - [vault_binary, "secrets", "enable", "kv-v2"], - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - ret = ProcessResult( - returncode=proc.returncode, - stdout=proc.stdout, - stderr=proc.stderr, - cmdline=proc.args, - ) - if proc.returncode != 0: - log.debug("Failed to enable kv-v2:\n%s", ret) - pytest.fail("Could not enable kv-v2") - - if "path is already in use at kv-v2/" in proc.stdout: - pass - elif "Success" in proc.stdout: - pass - else: - log.debug("Failed to enable kv-v2:\n%s", ret) - pytest.fail("Could not enable kv-v2 {}".format(proc.stdout)) - if vault_version == "latest": - proc = subprocess.run( - [ - vault_binary, - "secrets", - "enable", - "-version=2", - "-path=salt/", - "kv", - ], - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - ret = ProcessResult( - returncode=proc.returncode, - stdout=proc.stdout, - stderr=proc.stderr, - cmdline=proc.args, - ) - if proc.returncode != 0: - log.debug("Failed to enable kv-v2:\n%s", ret) - pytest.fail("Could not enable kv-v2") - - if "path is already in use at kv-v2/" in proc.stdout: - pass - elif "Success" in proc.stdout: - proc = subprocess.run( - [ - vault_binary, - "kv", - "put", - "salt/user1", - "password=p4ssw0rd", - "desc=test user", - ], - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - ret = ProcessResult( - returncode=proc.returncode, - stdout=proc.stdout, - stderr=proc.stderr, - cmdline=proc.args, - ) - if proc.returncode != 0: - log.debug("Failed to enable kv-v2:\n%s", ret) - pytest.fail("Could not enable kv-v2") - if "path is already in use at kv-v2/" in proc.stdout: - pass - elif "created_time" in proc.stdout: - proc = subprocess.run( - [ - vault_binary, - "kv", - "put", - "salt/user/user1", - "password=p4ssw0rd", - "desc=test user", - ], - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - ret = ProcessResult( - returncode=proc.returncode, - stdout=proc.stdout, - stderr=proc.stderr, - cmdline=proc.args, - ) - if proc.returncode != 0: - log.debug("Failed to enable kv-v2:\n%s", ret) - pytest.fail("Could not enable kv-v2") - - if "path is already in use at kv-v2/" in proc.stdout: - pass - elif "created_time" in proc.stdout: - proc = subprocess.run( - [vault_binary, "kv", "get", "salt/user1"], - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - ret = ProcessResult( - returncode=proc.returncode, - stdout=proc.stdout, - stderr=proc.stderr, - cmdline=proc.args, - ) - - else: - log.debug("Failed to enable kv-v2:\n%s", ret) - pytest.fail("Could not enable kv-v2 {}".format(proc.stdout)) - yield vault_version - - -def test_sdb(salt_call_cli): + with factory.started(): + # Sync All + salt_call_cli = factory.salt_call_cli() + ret = salt_call_cli.run("saltutil.sync_all", _timeout=120) + assert ret.returncode == 0, ret + yield factory + + +@pytest.fixture(scope="class") +def vault_salt_call_cli(vault_salt_minion): + return vault_salt_minion.salt_call_cli() + + +@pytest.fixture(scope="class") +def vault_salt_run_cli(vault_salt_master): + return vault_salt_master.salt_run_cli() + + +@pytest.fixture +def kv_root_dual_item(vault_container_version): + if vault_container_version == "latest": + vault_write_secret("salt/user1", password="p4ssw0rd", desc="test user") + vault_write_secret("salt/user/user1", password="p4ssw0rd", desc="test user") + yield + if vault_container_version == "latest": + vault_delete_secret("salt/user1") + vault_delete_secret("salt/user/user1") + + +@pytest.mark.parametrize("vault_container_version", ["1.3.1", "latest"], indirect=True) +def test_sdb_kv_kvv2_path_local(salt_call_cli, vault_container_version): ret = salt_call_cli.run( - "sdb.set", uri="sdb://sdbvault/secret/test/test_sdb/foo", value="bar" + "--local", + "sdb.set", + uri="sdb://sdbvault/kv-v2/test/test_sdb_local/foo", + value="local", ) assert ret.returncode == 0 assert ret.data is True - ret = salt_call_cli.run("sdb.get", uri="sdb://sdbvault/secret/test/test_sdb/foo") - assert ret.returncode == 0 + ret = salt_call_cli.run( + "--local", "sdb.get", "sdb://sdbvault/kv-v2/test/test_sdb_local/foo" + ) assert ret.data - assert ret.data == "bar" + assert ret.data == "local" + + +@pytest.mark.usefixtures("kv_root_dual_item") +@pytest.mark.parametrize("vault_container_version", ["latest"], indirect=True) +def test_sdb_kv_dual_item(salt_call_cli, vault_container_version): + ret = salt_call_cli.run("--local", "sdb.get", "sdb://sdbvault/salt/data/user1") + assert ret.data + assert ret.data == {"desc": "test user", "password": "p4ssw0rd"} def test_sdb_runner(salt_run_cli): ret = salt_run_cli.run( - "sdb.set", uri="sdb://sdbvault/secret/test/test_sdb_runner/foo", value="bar" + "sdb.set", uri="sdb://sdbvault/secret/test/test_sdb_runner/foo", value="runner" ) assert ret.returncode == 0 assert ret.data is True @@ -258,40 +125,145 @@ def test_sdb_runner(salt_run_cli): ) assert ret.returncode == 0 assert ret.stdout - assert ret.stdout == "bar" + assert ret.stdout == "runner" -def test_config(salt_call_cli, pillar_tree): - ret = salt_call_cli.run( - "sdb.set", uri="sdb://sdbvault/secret/test/test_pillar_sdb/foo", value="bar" - ) - assert ret.returncode == 0 - assert ret.data is True - ret = salt_call_cli.run("config.get", "test_vault_pillar_sdb") - assert ret.returncode == 0 - assert ret.data - assert ret.data == "bar" +@pytest.mark.usefixtures("pillar_tree") +class TestSDB: + @pytest.fixture(scope="class") + def vault_master_config(self, vault_port): + return { + "open_mode": True, + "peer_run": { + ".*": [ + "vault.get_config", + "vault.generate_new_token", + ], + }, + "vault": { + "auth": { + "token": "testsecret", + }, + "issue": { + "token": { + "params": { + "num_uses": 0, + } + } + }, + "policies": { + "assign": [ + "salt_minion", + ] + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + }, + "minion_data_cache": True, + } + def test_sdb(self, vault_salt_call_cli): + ret = vault_salt_call_cli.run( + "sdb.set", uri="sdb://sdbvault/secret/test/test_sdb/foo", value="bar" + ) + assert ret.returncode == 0 + assert ret.data is True + ret = vault_salt_call_cli.run( + "sdb.get", uri="sdb://sdbvault/secret/test/test_sdb/foo" + ) + assert ret.returncode == 0 + assert ret.data + assert ret.data == "bar" -def test_sdb_kv2_kvv2_path_local(salt_call_cli, vault_container_version): - if vault_container_version not in ["1.3.1", "latest"]: - pytest.skip("Test not applicable to vault {}".format(vault_container_version)) + def test_config(self, vault_salt_call_cli): + ret = vault_salt_call_cli.run( + "sdb.set", uri="sdb://sdbvault/secret/test/test_pillar_sdb/foo", value="baz" + ) + assert ret.returncode == 0 + assert ret.data is True + ret = vault_salt_call_cli.run("config.get", "test_vault_pillar_sdb") + assert ret.returncode == 0 + assert ret.data + assert ret.data == "baz" - ret = salt_call_cli.run( - "sdb.set", uri="sdb://sdbvault/kv-v2/test/test_sdb/foo", value="bar" - ) - assert ret.returncode == 0 - assert ret.data is True - ret = salt_call_cli.run( - "--local", "sdb.get", "sdb://sdbvault/kv-v2/test/test_sdb/foo" - ) - assert ret.data - assert ret.data == "bar" +class TestGetOrSetHashSingleUseToken: + @pytest.fixture(scope="class") + def vault_master_config(self, vault_port): + return { + "open_mode": True, + "peer_run": { + ".*": [ + "vault.get_config", + "vault.generate_new_token", + ], + }, + "vault": { + "auth": {"token": "testsecret"}, + "cache": { + "backend": "file", + }, + "issue": { + "type": "token", + "token": { + "params": { + "num_uses": 1, + } + }, + }, + "policies": { + "assign": [ + "salt_minion", + ], + }, + "server": { + "url": f"http://127.0.0.1:{vault_port}", + }, + }, + "minion_data_cache": True, + } -def test_sdb_kv_dual_item(salt_call_cli, vault_container_version): - if vault_container_version not in ["latest"]: - pytest.skip("Test not applicable to vault {}".format(vault_container_version)) - ret = salt_call_cli.run("--local", "sdb.get", "sdb://sdbvault/salt/data/user1") - assert ret.data - assert ret.data == {"desc": "test user", "password": "p4ssw0rd"} + @pytest.fixture + def get_or_set_absent(self): + secret_path = "secret/test" + secret_name = "sdb_get_or_set_hash" + ret = vault_list_secrets(secret_path) + if secret_name in ret: + vault_delete_secret(f"{secret_path}/{secret_name}") + ret = vault_list_secrets(secret_path) + assert secret_name not in ret + try: + yield + finally: + ret = vault_list_secrets(secret_path) + if secret_name in ret: + vault_delete_secret(f"{secret_path}/{secret_name}") + + @pytest.mark.usefixtures("get_or_set_absent") + @pytest.mark.parametrize( + "vault_container_version", ["1.3.1", "latest"], indirect=True + ) + def test_sdb_get_or_set_hash_single_use_token(self, vault_salt_call_cli): + """ + Test that sdb.get_or_set_hash works with uses=1. + This fails for versions that do not have the sys/internal/ui/mounts/:path + endpoint (<0.10.0) because the path metadata lookup consumes a token use there. + Issue #60779 + """ + ret = vault_salt_call_cli.run( + "sdb.get_or_set_hash", + "sdb://sdbvault/secret/test/sdb_get_or_set_hash/foo", + 10, + ) + assert ret.returncode == 0 + result = ret.data + assert result + ret = vault_salt_call_cli.run( + "sdb.get_or_set_hash", + "sdb://sdbvault/secret/test/sdb_get_or_set_hash/foo", + 10, + ) + assert ret.returncode == 0 + assert ret.data + assert ret.data == result diff --git a/tests/pytests/unit/modules/test_vault.py b/tests/pytests/unit/modules/test_vault.py new file mode 100644 index 000000000000..8b2baa4b8e61 --- /dev/null +++ b/tests/pytests/unit/modules/test_vault.py @@ -0,0 +1,442 @@ +import logging + +import pytest + +import salt.exceptions +import salt.modules.vault as vault +import salt.utils.vault as vaultutil +from tests.support.mock import ANY, patch + + +@pytest.fixture +def configure_loader_modules(): + return { + vault: { + "__grains__": {"id": "test-minion"}, + } + } + + +@pytest.fixture +def data(): + return {"foo": "bar"} + + +@pytest.fixture +def policy_response(): + return { + "name": "test-policy", + "rules": 'path "secret/*"\\n{\\n capabilities = ["read"]\\n}', + } + + +@pytest.fixture +def policies_list_response(): + return { + "policies": ["default", "root", "test-policy"], + } + + +@pytest.fixture +def data_list(): + return ["foo"] + + +@pytest.fixture +def read_kv(data): + with patch("salt.utils.vault.read_kv", autospec=True) as read: + read.return_value = data + yield read + + +@pytest.fixture +def list_kv(data_list): + with patch("salt.utils.vault.list_kv", autospec=True) as list: + list.return_value = data_list + yield list + + +@pytest.fixture +def read_kv_not_found(read_kv): + read_kv.side_effect = vaultutil.VaultNotFoundError + yield read_kv + + +@pytest.fixture +def list_kv_not_found(list_kv): + list_kv.side_effect = vaultutil.VaultNotFoundError + yield list_kv + + +@pytest.fixture +def write_kv(): + with patch("salt.utils.vault.write_kv", autospec=True) as write: + yield write + + +@pytest.fixture +def write_kv_err(write_kv): + write_kv.side_effect = vaultutil.VaultPermissionDeniedError("damn") + yield write_kv + + +@pytest.fixture +def patch_kv(): + with patch("salt.utils.vault.patch_kv", autospec=True) as patch_kv: + yield patch_kv + + +@pytest.fixture +def patch_kv_err(patch_kv): + patch_kv.side_effect = vaultutil.VaultPermissionDeniedError("damn") + yield patch_kv + + +@pytest.fixture +def delete_kv(): + with patch("salt.utils.vault.delete_kv", autospec=True) as delete_kv: + yield delete_kv + + +@pytest.fixture +def delete_kv_err(delete_kv): + delete_kv.side_effect = vaultutil.VaultPermissionDeniedError("damn") + yield delete_kv + + +@pytest.fixture +def destroy_kv(): + with patch("salt.utils.vault.destroy_kv", autospec=True) as destroy_kv: + yield destroy_kv + + +@pytest.fixture +def destroy_kv_err(destroy_kv): + destroy_kv.side_effect = vaultutil.VaultPermissionDeniedError("damn") + yield destroy_kv + + +@pytest.fixture +def query(): + with patch("salt.utils.vault.query", autospec=True) as query: + yield query + + +@pytest.mark.parametrize("key,expected", [(None, {"foo": "bar"}), ("foo", "bar")]) +def test_read_secret(read_kv, key, expected): + """ + Ensure read_secret works as expected without and with specified key. + KV v1/2 is handled in the utils module. + """ + res = vault.read_secret("some/path", key=key) + assert res == expected + + +@pytest.mark.usefixtures("read_kv_not_found", "list_kv_not_found") +@pytest.mark.parametrize("func", ["read_secret", "list_secrets"]) +def test_read_list_secret_with_default(func): + """ + Ensure read_secret and list_secrets with defaults set return those + if the path was not found. + """ + tgt = getattr(vault, func) + res = tgt("some/path", default=["f"]) + assert res == ["f"] + + +@pytest.mark.usefixtures("read_kv_not_found", "list_kv_not_found") +@pytest.mark.parametrize("func", ["read_secret", "list_secrets"]) +def test_read_list_secret_without_default(func): + """ + Ensure read_secret and list_secrets without defaults set raise + a CommandExecutionError when the path is not found. + """ + tgt = getattr(vault, func) + with pytest.raises( + salt.exceptions.CommandExecutionError, match=".*VaultNotFoundError.*" + ): + tgt("some/path") + + +@pytest.mark.usefixtures("list_kv") +@pytest.mark.parametrize( + "keys_only,expected", + [ + (False, {"keys": ["foo"]}), + (True, ["foo"]), + ], +) +def test_list_secrets(keys_only, expected): + """ + Ensure list_secrets works as expected. keys_only=False is default to + stay backwards-compatible. There should not be a reason to have the + function return a dict with a single predictable key otherwise. + """ + res = vault.list_secrets("some/path", keys_only=keys_only) + assert res == expected + + +def test_write_secret(data, write_kv): + """ + Ensure write_secret parses kwargs as expected + """ + path = "secret/some/path" + res = vault.write_secret(path, **data) + assert res + write_kv.assert_called_once_with(path, data, opts=ANY, context=ANY) + + +@pytest.mark.usefixtures("write_kv_err") +def test_write_secret_err(data, caplog): + """ + Ensure write_secret handles exceptions as expected + """ + with caplog.at_level(logging.ERROR): + res = vault.write_secret("secret/some/path", **data) + assert not res + assert ( + "Failed to write secret! VaultPermissionDeniedError: damn" + in caplog.messages + ) + + +def test_write_raw(data, write_kv): + """ + Ensure write_secret works as expected + """ + path = "secret/some/path" + res = vault.write_raw(path, data) + assert res + write_kv.assert_called_once_with(path, data, opts=ANY, context=ANY) + + +@pytest.mark.usefixtures("write_kv_err") +def test_write_raw_err(data, caplog): + """ + Ensure write_raw handles exceptions as expected + """ + with caplog.at_level(logging.ERROR): + res = vault.write_raw("secret/some/path", data) + assert not res + assert ( + "Failed to write secret! VaultPermissionDeniedError: damn" + in caplog.messages + ) + + +def test_patch_secret(data, patch_kv): + """ + Ensure patch_secret parses kwargs as expected + """ + path = "secret/some/path" + res = vault.patch_secret(path, **data) + assert res + patch_kv.assert_called_once_with(path, data, opts=ANY, context=ANY) + + +@pytest.mark.usefixtures("patch_kv_err") +def test_patch_secret_err(data, caplog): + """ + Ensure patch_secret handles exceptions as expected + """ + with caplog.at_level(logging.ERROR): + res = vault.patch_secret("secret/some/path", **data) + assert not res + assert ( + "Failed to patch secret! VaultPermissionDeniedError: damn" + in caplog.messages + ) + + +@pytest.mark.parametrize("args", [[], [1, 2]]) +def test_delete_secret(delete_kv, args): + """ + Ensure delete_secret works as expected + """ + path = "secret/some/path" + res = vault.delete_secret(path, *args) + assert res + delete_kv.assert_called_once_with( + path, opts=ANY, context=ANY, versions=args or None + ) + + +@pytest.mark.usefixtures("delete_kv_err") +@pytest.mark.parametrize("args", [[], [1, 2]]) +def test_delete_secret_err(args, caplog): + """ + Ensure delete_secret handles exceptions as expected + """ + with caplog.at_level(logging.ERROR): + res = vault.delete_secret("secret/some/path", *args) + assert not res + assert ( + "Failed to delete secret! VaultPermissionDeniedError: damn" + in caplog.messages + ) + + +@pytest.mark.parametrize("args", [[1], [1, 2]]) +def test_destroy_secret(destroy_kv, args): + """ + Ensure destroy_secret works as expected + """ + path = "secret/some/path" + res = vault.destroy_secret(path, *args) + assert res + destroy_kv.assert_called_once_with(path, args, opts=ANY, context=ANY) + + +@pytest.mark.usefixtures("destroy_kv") +def test_destroy_secret_requires_version(): + """ + Ensure destroy_secret requires at least one version + """ + with pytest.raises( + salt.exceptions.SaltInvocationError, match=".*at least one version.*" + ): + vault.destroy_secret("secret/some/path") + + +@pytest.mark.usefixtures("destroy_kv_err") +@pytest.mark.parametrize("args", [[1], [1, 2]]) +def test_destroy_secret_err(caplog, args): + """ + Ensure destroy_secret handles exceptions as expected + """ + with caplog.at_level(logging.ERROR): + res = vault.destroy_secret("secret/some/path", *args) + assert not res + assert ( + "Failed to destroy secret! VaultPermissionDeniedError: damn" + in caplog.messages + ) + + +@pytest.mark.parametrize("connection_only", [True, False]) +def test_clear_token_cache(connection_only): + """ + Ensure clear_token_cache wraps the utility function properly + """ + with patch("salt.utils.vault.clear_cache") as cache: + vault.clear_token_cache(connection_only=connection_only) + cache.assert_called_once_with(ANY, ANY, connection=connection_only) + + +def test_policy_fetch(query, policy_response): + """ + Ensure policy_fetch returns rules only and calls the API as expected + """ + query.return_value = policy_response + res = vault.policy_fetch("test-policy") + assert res == policy_response["rules"] + query.assert_called_once_with( + "GET", "sys/policy/test-policy", opts=ANY, context=ANY + ) + + +def test_policy_fetch_not_found(query): + """ + Ensure policy_fetch returns None when the policy was not found + """ + query.side_effect = vaultutil.VaultNotFoundError + res = vault.policy_fetch("test-policy") + assert res is None + + +@pytest.mark.parametrize( + "func,args", + [ + ("policy_fetch", []), + ("policy_write", ["rule"]), + ("policy_delete", []), + ("policies_list", None), + ], +) +def test_policy_functions_raise_errors(query, func, args): + """ + Ensure policy functions raise CommandExecutionErrors + """ + query.side_effect = vaultutil.VaultPermissionDeniedError + func = getattr(vault, func) + with pytest.raises( + salt.exceptions.CommandExecutionError, match=".*VaultPermissionDeniedError.*" + ): + if args is None: + func() + else: + func("test-policy", *args) + + +def test_policy_write(query, policy_response): + """ + Ensure policy_write calls the API as expected + """ + query.return_value = True + res = vault.policy_write("test-policy", policy_response["rules"]) + assert res + query.assert_called_once_with( + "POST", + "sys/policy/test-policy", + opts=ANY, + context=ANY, + payload={"policy": policy_response["rules"]}, + ) + + +def test_policy_delete(query): + """ + Ensure policy_delete calls the API as expected + """ + query.return_value = True + res = vault.policy_delete("test-policy") + assert res + query.assert_called_once_with( + "DELETE", "sys/policy/test-policy", opts=ANY, context=ANY + ) + + +def test_policy_delete_handles_not_found(query): + """ + Ensure policy_delete returns False instead of raising CommandExecutionError + when a policy was absent already. + """ + query.side_effect = vaultutil.VaultNotFoundError + res = vault.policy_delete("test-policy") + assert not res + + +def test_policies_list(query, policies_list_response): + """ + Ensure policies_list returns policy list only and calls the API as expected + """ + query.return_value = policies_list_response + res = vault.policies_list() + assert res == policies_list_response["policies"] + query.assert_called_once_with("GET", "sys/policy", opts=ANY, context=ANY) + + +@pytest.mark.parametrize("method", ["POST", "DELETE"]) +@pytest.mark.parametrize("payload", [None, {"data": {"foo": "bar"}}]) +def test_query(query, method, payload): + """ + Ensure query wraps the utility function properly + """ + query.return_value = True + endpoint = "test/endpoint" + res = vault.query(method, endpoint, payload=payload) + assert res + query.assert_called_once_with( + method, endpoint, opts=ANY, context=ANY, payload=payload + ) + + +def test_query_raises_errors(query): + """ + Ensure query raises CommandExecutionErrors + """ + query.side_effect = vaultutil.VaultPermissionDeniedError + with pytest.raises( + salt.exceptions.CommandExecutionError, match=".*VaultPermissionDeniedError.*" + ): + vault.query("GET", "test/endpoint") diff --git a/tests/pytests/unit/pillar/test_vault.py b/tests/pytests/unit/pillar/test_vault.py index 77f56421c34e..a2433a3b6fbc 100644 --- a/tests/pytests/unit/pillar/test_vault.py +++ b/tests/pytests/unit/pillar/test_vault.py @@ -1,11 +1,10 @@ -import copy import logging import pytest -from requests.exceptions import HTTPError import salt.pillar.vault as vault -from tests.support.mock import Mock, patch +import salt.utils.vault as vaultutil +from tests.support.mock import ANY, Mock, patch @pytest.fixture @@ -22,93 +21,69 @@ def configure_loader_modules(): @pytest.fixture -def vault_kvv1(): - res = Mock(status_code=200) - res.json.return_value = {"data": {"foo": "bar"}} - return Mock(return_value=res) +def data(): + return {"foo": "bar"} @pytest.fixture -def vault_kvv2(): - res = Mock(status_code=200) - res.json.return_value = {"data": {"data": {"foo": "bar"}}, "metadata": {}} - return Mock(return_value=res) +def read_kv(data): + with patch("salt.utils.vault.read_kv", autospec=True) as read: + read.return_value = data + yield read @pytest.fixture -def is_v2_false(): - path = "secret/path" - return {"v2": False, "data": path, "metadata": path, "delete": path, "type": "kv"} +def read_kv_not_found(read_kv): + read_kv.side_effect = vaultutil.VaultNotFoundError @pytest.fixture -def is_v2_true(): +def role_a(): return { - "v2": True, - "data": "secret/data/path", - "metadata": "secret/metadata/path", - "type": "kv", + "from_db": True, + "pass": "hunter2", + "list": ["a", "b"], } -@pytest.mark.parametrize( - "is_v2,vaultkv", [("is_v2_false", "vault_kvv1"), ("is_v2_true", "vault_kvv2")] -) -def test_ext_pillar(is_v2, vaultkv, request): +@pytest.fixture +def role_b(): + return { + "from_web": True, + "pass": "hunter1", + "list": ["c", "d"], + } + + +def test_ext_pillar(read_kv, data): """ - Test ext_pillar functionality for KV v1/2 + Test ext_pillar functionality. KV v1/2 is handled by the utils module. """ - is_v2 = request.getfixturevalue(is_v2) - vaultkv = request.getfixturevalue(vaultkv) - with patch.dict( - vault.__utils__, - {"vault.is_v2": Mock(return_value=is_v2), "vault.make_request": vaultkv}, - ): - ext_pillar = vault.ext_pillar("testminion", {}, "path=secret/path") - vaultkv.assert_called_once_with("GET", "v1/" + is_v2["data"]) - assert "foo" in ext_pillar - assert "metadata" not in ext_pillar - assert "data" not in ext_pillar - assert ext_pillar["foo"] == "bar" + ext_pillar = vault.ext_pillar("testminion", {}, "path=secret/path") + read_kv.assert_called_once_with("secret/path", opts=ANY, context=ANY) + assert ext_pillar == data -def test_ext_pillar_not_found(is_v2_false, caplog): +@pytest.mark.usefixtures("read_kv_not_found") +def test_ext_pillar_not_found(caplog): """ Test that HTTP 404 is handled correctly """ - res = Mock(status_code=404, ok=False) - res.raise_for_status.side_effect = HTTPError() with caplog.at_level(logging.INFO): - with patch.dict( - vault.__utils__, - { - "vault.is_v2": Mock(return_value=is_v2_false), - "vault.make_request": Mock(return_value=res), - }, - ): - ext_pillar = vault.ext_pillar("testminion", {}, "path=secret/path") - assert ext_pillar == {} - assert "Vault secret not found for: secret/path" in caplog.messages - - -def test_ext_pillar_nesting_key(is_v2_false, vault_kvv1): + ext_pillar = vault.ext_pillar("testminion", {}, "path=secret/path") + assert ext_pillar == {} + assert "Vault secret not found for: secret/path" in caplog.messages + + +@pytest.mark.usefixtures("read_kv") +def test_ext_pillar_nesting_key(data): """ Test that nesting_key is honored as expected """ - with patch.dict( - vault.__utils__, - { - "vault.is_v2": Mock(return_value=is_v2_false), - "vault.make_request": vault_kvv1, - }, - ): - ext_pillar = vault.ext_pillar( - "testminion", {}, "path=secret/path", nesting_key="baz" - ) - assert "foo" not in ext_pillar - assert "baz" in ext_pillar - assert "foo" in ext_pillar["baz"] - assert ext_pillar["baz"]["foo"] == "bar" + ext_pillar = vault.ext_pillar( + "testminion", {}, "path=secret/path", nesting_key="baz" + ) + assert ext_pillar == {"baz": data} @pytest.mark.parametrize( @@ -132,78 +107,52 @@ def test_get_paths(pattern, expected): assert result == expected -def test_ext_pillar_merging(is_v2_false): - """ - Test that patterns that result in multiple paths are merged as expected. - """ - - def make_request(method, resource, *args, **kwargs): - vault_data = { - "v1/salt/roles/db": { - "from_db": True, - "pass": "hunter2", - "list": ["a", "b"], - }, - "v1/salt/roles/web": { - "from_web": True, - "pass": "hunter1", - "list": ["c", "d"], - }, - } - res = Mock(status_code=200, ok=True) - res.json.return_value = {"data": copy.deepcopy(vault_data[resource])} - return res - - cases = [ +@pytest.mark.parametrize( + "first,second,expected", + [ ( - ["salt/roles/db", "salt/roles/web"], + "role_a", + "role_b", {"from_db": True, "from_web": True, "list": ["c", "d"], "pass": "hunter1"}, ), ( - ["salt/roles/web", "salt/roles/db"], + "role_b", + "role_a", {"from_db": True, "from_web": True, "list": ["a", "b"], "pass": "hunter2"}, ), - ] - vaultkv = Mock(side_effect=make_request) - - for expanded_patterns, expected in cases: - with patch.dict( - vault.__utils__, - { - "vault.make_request": vaultkv, - "vault.expand_pattern_lists": Mock(return_value=expanded_patterns), - "vault.is_v2": Mock(return_value=is_v2_false), - }, - ): - ext_pillar = vault.ext_pillar( - "test-minion", - {"roles": ["db", "web"]}, - conf="path=salt/roles/{pillar[roles]}", - merge_strategy="smart", - merge_lists=False, - ) - assert ext_pillar == expected - - -def test_ext_pillar_disabled_during_policy_pillar_rendering(): + ], +) +def test_ext_pillar_merging(read_kv, first, second, expected, request): + """ + Test that patterns that result in multiple paths are merged as expected. + """ + first = request.getfixturevalue(first) + second = request.getfixturevalue(second) + read_kv.side_effect = (first, second) + ext_pillar = vault.ext_pillar( + "test-minion", + {"roles": ["db", "web"]}, + conf="path=salt/roles/{pillar[roles]}", + merge_strategy="smart", + merge_lists=False, + ) + assert ext_pillar == expected + + +def test_ext_pillar_disabled_during_pillar_rendering(read_kv): """ Ensure ext_pillar returns an empty dict when called during pillar template rendering to prevent a cyclic dependency. """ - mock_version = Mock() - mock_vault = Mock() extra = {"_vault_runner_is_compiling_pillar_templates": True} - - with patch.dict( - vault.__utils__, {"vault.make_request": mock_vault, "vault.is_v2": mock_version} - ): - assert {} == vault.ext_pillar( - "test-minion", {}, conf="path=secret/path", extra_minion_data=extra - ) - assert mock_version.call_count == 0 - assert mock_vault.call_count == 0 + res = vault.ext_pillar( + "test-minion", {}, conf="path=secret/path", extra_minion_data=extra + ) + assert res == {} + read_kv.assert_not_called() +@pytest.mark.usefixtures("read_kv") def test_invalid_config(caplog): """ Ensure an empty dict is returned and an error is logged in case diff --git a/tests/pytests/unit/runners/vault/test_app_role_auth.py b/tests/pytests/unit/runners/vault/test_app_role_auth.py deleted file mode 100644 index 241da179a379..000000000000 --- a/tests/pytests/unit/runners/vault/test_app_role_auth.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Unit tests for the Vault runner -""" - - -import logging - -import pytest - -import salt.runners.vault as vault -from tests.support.mock import ANY, MagicMock, Mock, call, patch - -log = logging.getLogger(__name__) - - -def _mock_json_response(data, status_code=200, reason=""): - """ - Mock helper for http response - """ - response = MagicMock() - response.json = MagicMock(return_value=data) - response.status_code = status_code - response.reason = reason - return Mock(return_value=response) - - -@pytest.fixture -def configure_loader_modules(): - sig_valid_mock = patch( - "salt.runners.vault._validate_signature", MagicMock(return_value=None) - ) - token_url_mock = patch( - "salt.runners.vault._get_token_create_url", - MagicMock(return_value="http://fake_url"), - ) - with sig_valid_mock, token_url_mock: - yield { - vault: { - "__opts__": { - "vault": { - "url": "http://127.0.0.1", - "auth": { - "method": "approle", - "role_id": "role", - "secret_id": "secret", - }, - } - } - } - } - - -def test_generate_token(): - """ - Basic test for test_generate_token with approle (two vault calls) - """ - mock = _mock_json_response( - {"auth": {"client_token": "test", "renewable": False, "lease_duration": 0}} - ) - with patch( - "salt.runners.vault._get_policies_cached", - Mock(return_value=["saltstack/minion/test-minion", "saltstack/minions"]), - ), patch("requests.post", mock): - result = vault.generate_token("test-minion", "signature") - log.debug("generate_token result: %s", result) - assert isinstance(result, dict) - assert "error" not in result - assert "token" in result - assert result["token"] == "test" - calls = [ - call( - "http://127.0.0.1/v1/auth/approle/login", - headers=ANY, - json=ANY, - verify=ANY, - ), - call("http://fake_url", headers=ANY, json=ANY, verify=ANY), - ] - mock.assert_has_calls(calls) diff --git a/tests/pytests/unit/runners/vault/test_token_auth.py b/tests/pytests/unit/runners/vault/test_token_auth.py deleted file mode 100644 index 60307dc0955b..000000000000 --- a/tests/pytests/unit/runners/vault/test_token_auth.py +++ /dev/null @@ -1,155 +0,0 @@ -""" -Unit tests for the Vault runner -""" - - -import logging - -import pytest - -import salt.runners.vault as vault -from tests.support.mock import ANY, MagicMock, Mock, patch - -log = logging.getLogger(__name__) - - -def _mock_json_response(data, status_code=200, reason=""): - """ - Mock helper for http response - """ - response = MagicMock() - response.json = MagicMock(return_value=data) - response.status_code = status_code - response.reason = reason - return Mock(return_value=response) - - -@pytest.fixture -def configure_loader_modules(): - sig_valid_mock = patch( - "salt.runners.vault._validate_signature", MagicMock(return_value=None) - ) - token_url_mock = patch( - "salt.runners.vault._get_token_create_url", - MagicMock(return_value="http://fake_url"), - ) - cached_policies = patch( - "salt.runners.vault._get_policies_cached", - Mock(return_value=["saltstack/minion/test-minion", "saltstack/minions"]), - ) - with sig_valid_mock, token_url_mock, cached_policies: - yield { - vault: { - "__opts__": { - "vault": { - "url": "http://127.0.0.1", - "auth": { - "token": "test", - "method": "token", - "allow_minion_override": True, - }, - } - } - } - } - - -def test_generate_token(): - """ - Basic tests for test_generate_token: all exits - """ - mock = _mock_json_response( - {"auth": {"client_token": "test", "renewable": False, "lease_duration": 0}} - ) - with patch("requests.post", mock): - result = vault.generate_token("test-minion", "signature") - log.debug("generate_token result: %s", result) - assert isinstance(result, dict) - assert "error" not in result - assert "token" in result - assert result["token"] == "test" - mock.assert_called_with("http://fake_url", headers=ANY, json=ANY, verify=ANY) - - # Test uses - num_uses = 6 - result = vault.generate_token("test-minion", "signature", uses=num_uses) - assert "uses" in result - assert result["uses"] == num_uses - json_request = { - "policies": ["saltstack/minion/test-minion", "saltstack/minions"], - "num_uses": num_uses, - "meta": { - "saltstack-jid": "", - "saltstack-minion": "test-minion", - "saltstack-user": "", - }, - } - mock.assert_called_with( - "http://fake_url", headers=ANY, json=json_request, verify=ANY - ) - - # Test ttl - expected_ttl = "6h" - result = vault.generate_token("test-minion", "signature", ttl=expected_ttl) - assert result["uses"] == 1 - json_request = { - "policies": ["saltstack/minion/test-minion", "saltstack/minions"], - "num_uses": 1, - "explicit_max_ttl": expected_ttl, - "meta": { - "saltstack-jid": "", - "saltstack-minion": "test-minion", - "saltstack-user": "", - }, - } - mock.assert_called_with( - "http://fake_url", headers=ANY, json=json_request, verify=ANY - ) - - mock = _mock_json_response({}, status_code=403, reason="no reason") - with patch("requests.post", mock): - result = vault.generate_token("test-minion", "signature") - assert isinstance(result, dict) - assert "error" in result - assert result["error"] == "no reason" - - with patch("salt.runners.vault._get_policies_cached", MagicMock(return_value=[])): - result = vault.generate_token("test-minion", "signature") - assert isinstance(result, dict) - assert "error" in result - assert result["error"] == "No policies matched minion" - - with patch( - "requests.post", MagicMock(side_effect=Exception("Test Exception Reason")) - ): - result = vault.generate_token("test-minion", "signature") - assert isinstance(result, dict) - assert "error" in result - assert result["error"] == "Test Exception Reason" - - -def test_generate_token_with_namespace(): - """ - Basic tests for test_generate_token: all exits - """ - mock = _mock_json_response( - {"auth": {"client_token": "test", "renewable": False, "lease_duration": 0}} - ) - supplied_config = {"namespace": "test_namespace"} - with patch("requests.post", mock): - with patch.dict(vault.__opts__["vault"], supplied_config): - result = vault.generate_token("test-minion", "signature") - log.debug("generate_token result: %s", result) - assert isinstance(result, dict) - assert "error" not in result - assert "token" in result - assert result["token"] == "test" - mock.assert_called_with( - "http://fake_url", - headers={ - "X-Vault-Token": "test", - "X-Vault-Namespace": "test_namespace", - }, - json=ANY, - verify=ANY, - ) diff --git a/tests/pytests/unit/runners/vault/test_token_auth_deprecated.py b/tests/pytests/unit/runners/vault/test_token_auth_deprecated.py new file mode 100644 index 000000000000..7ea03ffb42b8 --- /dev/null +++ b/tests/pytests/unit/runners/vault/test_token_auth_deprecated.py @@ -0,0 +1,150 @@ +""" +Unit tests for the Vault runner + +This module only tests a deprecated function, see +tests/pytests/unit/runners/test_vault.py for the current tests. +""" + + +import logging + +import pytest + +import salt.runners.vault as vault +import salt.utils.vault as vaultutil +from tests.support.mock import ANY, Mock, patch + +pytestmark = [ + pytest.mark.usefixtures("validate_sig", "policies"), +] + +log = logging.getLogger(__name__) + + +@pytest.fixture +def configure_loader_modules(): + return { + vault: { + "__opts__": { + "vault": { + "url": "http://127.0.0.1", + "auth": { + "token": "test", + "method": "token", + "allow_minion_override": True, + }, + } + } + } + } + + +@pytest.fixture +def auth(): + return { + "auth": { + "client_token": "test", + "renewable": False, + "lease_duration": 0, + } + } + + +@pytest.fixture +def client(auth): + client_mock = Mock(vaultutil.AuthenticatedVaultClient) + client_mock.post.return_value = auth + with patch("salt.runners.vault._get_master_client", Mock(return_value=client_mock)): + yield client_mock + + +@pytest.fixture +def validate_sig(): + with patch( + "salt.runners.vault._validate_signature", autospec=True, return_value=None + ): + yield + + +@pytest.fixture +def policies(): + with patch("salt.runners.vault._get_policies_cached", autospec=True) as policies: + policies.return_value = ["saltstack/minion/test-minion", "saltstack/minions"] + yield policies + + +# Basic tests for test_generate_token: all exits + + +def test_generate_token(client): + result = vault.generate_token("test-minion", "signature") + log.debug("generate_token result: %s", result) + assert isinstance(result, dict) + assert "error" not in result + assert "token" in result + assert result["token"] == "test" + client.post.assert_called_with("auth/token/create", payload=ANY, wrap=False) + + +def test_generate_token_uses(client): + # Test uses + num_uses = 6 + result = vault.generate_token("test-minion", "signature", uses=num_uses) + assert "uses" in result + assert result["uses"] == num_uses + json_request = { + "policies": ["saltstack/minion/test-minion", "saltstack/minions"], + "num_uses": num_uses, + "meta": { + "saltstack-jid": "", + "saltstack-minion": "test-minion", + "saltstack-user": "", + }, + } + client.post.assert_called_with( + "auth/token/create", payload=json_request, wrap=False + ) + + +def test_generate_token_ttl(client): + # Test ttl + expected_ttl = "6h" + result = vault.generate_token("test-minion", "signature", ttl=expected_ttl) + assert result["uses"] == 1 + json_request = { + "policies": ["saltstack/minion/test-minion", "saltstack/minions"], + "num_uses": 1, + "explicit_max_ttl": expected_ttl, + "meta": { + "saltstack-jid": "", + "saltstack-minion": "test-minion", + "saltstack-user": "", + }, + } + client.post.assert_called_with( + "auth/token/create", payload=json_request, wrap=False + ) + + +def test_generate_token_permission_denied(client): + client.post.side_effect = vaultutil.VaultPermissionDeniedError("no reason") + result = vault.generate_token("test-minion", "signature") + assert isinstance(result, dict) + assert "error" in result + assert result["error"] == "VaultPermissionDeniedError: no reason" + + +def test_generate_token_exception(client): + client.post.side_effect = Exception("Test Exception Reason") + result = vault.generate_token("test-minion", "signature") + assert isinstance(result, dict) + assert "error" in result + assert result["error"] == "Exception: Test Exception Reason" + + +def test_generate_token_no_matching_policies(client, policies): + policies.return_value = [] + result = vault.generate_token("test-minion", "signature") + assert isinstance(result, dict) + assert "error" in result + assert result["error"] == "SaltRunnerError: No policies matched minion." diff --git a/tests/pytests/unit/runners/vault/test_vault.py b/tests/pytests/unit/runners/vault/test_vault.py index 3634e862e8ea..9238cf023aaf 100644 --- a/tests/pytests/unit/runners/vault/test_vault.py +++ b/tests/pytests/unit/runners/vault/test_vault.py @@ -1,21 +1,278 @@ -""" -Unit tests for the Vault runner -""" +import pytest +import salt.exceptions +import salt.runners.vault as vault +import salt.utils.vault as vaultutil +from tests.support.mock import ANY, MagicMock, Mock, patch -import logging -import pytest +@pytest.fixture +def configure_loader_modules(): + return { + vault: { + "__grains__": {"id": "test-master"}, + } + } -import salt.runners.vault as vault -from tests.support.mock import MagicMock, Mock, patch -log = logging.getLogger(__name__) +@pytest.fixture +def default_config(): + return { + "auth": { + "approle_mount": "approle", + "approle_name": "salt-master", + "method": "token", + "token": "test-token", + "role_id": "test-role-id", + "secret_id": None, + }, + "cache": { + "backend": "session", + "config": 3600, + "kv_metadata": "connection", + "secret": "ttl", + }, + "issue": { + "allow_minion_override_params": False, + "type": "token", + "approle": { + "mount": "salt-minions", + "params": { + "bind_secret_id": True, + "secret_id_num_uses": 1, + "secret_id_ttl": 60, + "token_explicit_max_ttl": 9999999999, + "token_num_uses": 1, + }, + }, + "token": { + "role_name": None, + "params": { + "explicit_max_ttl": 9999999999, + "num_uses": 1, + }, + }, + "wrap": "30s", + }, + "issue_params": {}, + "metadata": { + "entity": { + "minion-id": "{minion}", + }, + "secret": { + "saltstack-jid": "{jid}", + "saltstack-minion": "{minion}", + "saltstack-user": "{user}", + }, + }, + "policies": { + "assign": [ + "saltstack/minions", + "saltstack/{minion}", + ], + "cache_time": 60, + "refresh_pillar": None, + }, + "server": { + "url": "http://test-vault:8200", + "namespace": None, + "verify": None, + }, + } @pytest.fixture -def configure_loader_modules(): - return {vault: {}} +def token_response(): + return { + "request_id": "0e8c388e-2cb6-bcb2-83b7-625127d568bb", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "auth": { + "client_token": "test-token", + "renewable": True, + "lease_duration": 9999999999, + "num_uses": 1, + "creation_time": 1661188581, + # "expire_time": 11661188580, + }, + } + + +@pytest.fixture +def secret_id_response(): + return { + "request_id": "0e8c388e-2cb6-bcb2-83b7-625127d568bb", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": { + "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780", + "secret_id": "841771dc-11c9-bbc7-bcac-6a3945a69cd9", + "secret_id_ttl": 60, + }, + } + + +@pytest.fixture +def wrapped_response(): + return { + "request_id": "", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": None, + "warnings": None, + "wrap_info": { + "token": "test-wrapping-token", + "accessor": "test-wrapping-token-accessor", + "ttl": 180, + "creation_time": "2022-09-10T13:37:12.123456789+00:00", + "creation_path": "whatever/not/checked/here", + "wrapped_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780", + }, + } + + +@pytest.fixture +def token_serialized(token_response): + return { + "client_token": token_response["auth"]["client_token"], + "renewable": token_response["auth"]["renewable"], + "lease_duration": token_response["auth"]["lease_duration"], + "num_uses": token_response["auth"]["num_uses"], + "creation_time": token_response["auth"]["creation_time"], + # "expire_time": token_response["auth"]["expire_time"], + } + + +@pytest.fixture +def secret_id_serialized(secret_id_response): + return { + "secret_id": secret_id_response["data"]["secret_id"], + "secret_id_ttl": secret_id_response["data"]["secret_id_ttl"], + "secret_id_num_uses": 1, + # + creation_time + # + expire_time + } + + +@pytest.fixture +def secret_id_lookup_accessor_response(): + return { + "request_id": "28f2f9fb-26c0-6022-4970-baeb6366b085", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": { + "cidr_list": [], + "creation_time": "2022-09-09T15:11:28.358490481+00:00", + "expiration_time": "2022-10-11T15:11:28.358490481+00:00", + "last_updated_time": "2022-09-09T15:11:28.358490481+00:00", + "metadata": {}, + "secret_id_accessor": "0380eb9f-3041-1c1c-234c-fde31a1a1fc1", + "secret_id_num_uses": 1, + "secret_id_ttl": 9999999999, + "token_bound_cidrs": [], + }, + "warnings": None, + } + + +@pytest.fixture +def wrapped_serialized(wrapped_response): + return { + "wrap_info": { + "token": wrapped_response["wrap_info"]["token"], + "ttl": wrapped_response["wrap_info"]["ttl"], + "creation_time": 1662817032, + "creation_path": wrapped_response["wrap_info"]["creation_path"], + }, + } + + +@pytest.fixture +def approle_meta(token_serialized, secret_id_serialized): + return { + "bind_secret_id": True, + "local_secret_ids": False, + "secret_id_bound_cidrs": [], + "secret_id_num_uses": secret_id_serialized["secret_id_num_uses"], + "secret_id_ttl": secret_id_serialized["secret_id_ttl"], + "token_bound_cidrs": [], + "token_explicit_max_ttl": token_serialized["lease_duration"], + "token_max_ttl": 0, + "token_no_default_policy": False, + "token_num_uses": token_serialized["num_uses"], + "token_period": 0, + "token_policies": ["default"], + "token_ttl": 0, + "token_type": "default", + } + + +@pytest.fixture +def entity_lookup_response(): + return { + "data": { + "aliases": [], + "creation_time": "2017-11-13T21:01:33.543497Z", + "direct_group_ids": [], + "group_ids": [], + "id": "043fedec-967d-b2c9-d3af-0c467b04e1fd", + "inherited_group_ids": [], + "last_update_time": "2017-11-13T21:01:33.543497Z", + "merged_entity_ids": None, + "metadata": None, + "name": "test-minion", + "policies": None, + } + } + + +@pytest.fixture +def entity_fetch_response(): + return { + "data": { + "aliases": [], + "creation_time": "2018-09-19T17:20:27.705389973Z", + "direct_group_ids": [], + "disabled": False, + "group_ids": [], + "id": "test-entity-id", + "inherited_group_ids": [], + "last_update_time": "2018-09-19T17:20:27.705389973Z", + "merged_entity_ids": None, + "metadata": { + "minion-id": "test-minion", + }, + "name": "salt_minion_test-minion", + "policies": [ + "default", + "saltstack/minions", + "saltstack/minion/test-minion", + ], + } + } + + +@pytest.fixture +def policies_default(): + return ["saltstack/minions", "saltstack/minion/test-minion"] + + +@pytest.fixture +def metadata_secret_default(): + return { + "saltstack-jid": "", + "saltstack-minion": "test-minion", + "saltstack-user": "", + } + + +@pytest.fixture +def metadata_entity_default(): + return {"minion-id": "test-minion"} @pytest.fixture @@ -32,133 +289,1366 @@ def grains(): @pytest.fixture def pillar(): return { + "mixedcase": "UP-low-UP", "role": "test", } @pytest.fixture -def expand_pattern_lists(): - with patch.dict( - vault.__utils__, +def client(): + with patch("salt.runners.vault._get_master_client", autospec=True) as get_client: + client = Mock(vaultutil.AuthenticatedVaultClient) + get_client.return_value = client + yield client + + +@pytest.fixture +def client_token(client, token_response, wrapped_response): + def res_or_wrap(*args, **kwargs): + if kwargs.get("wrap"): + return vaultutil.VaultWrappedResponse(**wrapped_response["wrap_info"]) + return token_response + + client.post.side_effect = res_or_wrap + yield client + + +@pytest.fixture +def config(request, default_config): + def rec(config, path, val=None, default=vaultutil.VaultException): + ptr = config + parts = path.split(":") + while parts: + cur = parts.pop(0) + if val: + if parts and not isinstance(ptr.get(cur), dict): + ptr[cur] = {} + elif not parts: + ptr[cur] = val + return + if cur not in ptr: + if isinstance(default, Exception): + raise default() + return default + ptr = ptr[cur] + return ptr + + def get_config(key=None, default=vaultutil.VaultException): + overrides = getattr(request, "param", {}) + if key is None: + for ovar, oval in overrides.items(): + rec(default_config, ovar, oval) + return default_config + if key in overrides: + return overrides[key] + return rec(default_config, key, default=default) + + with patch("salt.runners.vault._config", autospec=True) as config: + config.side_effect = get_config + yield config + + +@pytest.fixture +def policies(request, policies_default): + policies_list = getattr(request, "param", policies_default) + with patch( + "salt.runners.vault._get_policies_cached", autospec=True + ) as get_policies_cached: + get_policies_cached.return_value = policies_list + with patch("salt.runners.vault._get_policies", autospec=True) as get_policies: + get_policies.return_value = policies_list + yield + + +@pytest.fixture +def metadata(request, metadata_entity_default, metadata_secret_default): + def _get_metadata(minion_id, metadata_patterns, *args, **kwargs): + if getattr(request, "param", None) is not None: + return request.param + if "saltstack-jid" not in metadata_patterns: + return metadata_entity_default + return metadata_secret_default + + with patch("salt.runners.vault._get_metadata", autospec=True) as get_metadata: + get_metadata.side_effect = _get_metadata + yield get_metadata + + +@pytest.fixture +def validate_signature(): + with patch( + "salt.runners.vault._validate_signature", autospec=True, return_value=None + ) as validate: + yield validate + + +@pytest.mark.usefixtures("policies", "metadata") +@pytest.mark.parametrize( + "config", + [{}, {"issue:token:role_name": "test-role"}, {"issue:wrap": False}], + indirect=True, +) +def test_generate_token( + client_token, + config, + policies_default, + token_serialized, + wrapped_serialized, + metadata_secret_default, +): + """ + Ensure _generate_token calls the API as expected + """ + wrap = config("issue:wrap") + res_token, res_num_uses = vault._generate_token( + "test-minion", issue_params=None, wrap=wrap + ) + endpoint = "auth/token/create" + role_name = config("issue:token:role_name") + payload = {} + if config("issue:token:params:explicit_max_ttl"): + payload["explicit_max_ttl"] = config("issue:token:params:explicit_max_ttl") + if config("issue:token:params:num_uses"): + payload["num_uses"] = config("issue:token:params:num_uses") + payload["meta"] = metadata_secret_default + payload["policies"] = policies_default + if role_name: + endpoint += f"/{role_name}" + if config("issue:wrap"): + assert res_token == wrapped_serialized + client_token.post.assert_called_once_with( + endpoint, payload=payload, wrap=config("issue:wrap") + ) + else: + res_token.pop("expire_time") + assert res_token == token_serialized + assert res_num_uses == 1 + + +@pytest.mark.usefixtures("config") +@pytest.mark.parametrize("policies", [[]], indirect=True) +def test_generate_token_no_policies_denied(policies): + """ + Ensure generated tokens need at least one attached policy + """ + with pytest.raises( + salt.exceptions.SaltRunnerError, match=".*No policies matched minion.*" + ): + vault._generate_token("test-minion", issue_params=None, wrap=False) + + +@pytest.mark.parametrize("ttl", [None, 1337]) +@pytest.mark.parametrize("uses", [None, 1, 30]) +@pytest.mark.parametrize("config", [{}, {"issue:type": "approle"}], indirect=True) +def test_generate_token_deprecated( + ttl, uses, token_serialized, config, validate_signature, caplog +): + """ + Ensure the deprecated generate_token function returns data in the old format + """ + issue_params = {} + if ttl is not None: + token_serialized["lease_duration"] = ttl + issue_params["explicit_max_ttl"] = ttl + if uses is not None: + token_serialized["num_uses"] = uses + issue_params["num_uses"] = uses + expected = { + "token": token_serialized["client_token"], + "lease_duration": token_serialized["lease_duration"], + "renewable": token_serialized["renewable"], + "issued": token_serialized["creation_time"], + "url": config("server:url"), + "verify": config("server:verify"), + "token_backend": config("cache:backend"), + "namespace": config("server:namespace"), + "uses": token_serialized["num_uses"], + } + with patch("salt.runners.vault._generate_token", autospec=True) as gen: + gen.return_value = (token_serialized, token_serialized["num_uses"]) + res = vault.generate_token("test-minion", "sig", ttl=ttl, uses=uses) + validate_signature.assert_called_once_with("test-minion", "sig", False) + assert res == expected + gen.assert_called_once_with( + "test-minion", issue_params=issue_params or None, wrap=False + ) + if config("issue:type") != "token": + assert "Master is not configured to issue tokens" in caplog.text + + +@pytest.mark.parametrize("config", [{}, {"issue:wrap": False}], indirect=True) +@pytest.mark.parametrize( + "issue_params", [None, {"explicit_max_ttl": 120, "num_uses": 3}] +) +def test_generate_new_token( + issue_params, config, validate_signature, token_serialized, wrapped_serialized +): + """ + Ensure generate_new_token returns data as expected + """ + if issue_params is not None: + if issue_params.get("explicit_max_ttl") is not None: + token_serialized["lease_duration"] = issue_params["explicit_max_ttl"] + if issue_params.get("num_uses") is not None: + token_serialized["num_uses"] = issue_params["num_uses"] + expected = {"server": config("server"), "auth": {}} + if config("issue:wrap"): + expected.update(wrapped_serialized) + expected.update({"misc_data": {"num_uses": token_serialized["num_uses"]}}) + else: + expected["auth"] = token_serialized + + with patch("salt.runners.vault._generate_token", autospec=True) as gen: + + def res_or_wrap(*args, **kwargs): + if kwargs.get("wrap"): + return wrapped_serialized, token_serialized["num_uses"] + return token_serialized, token_serialized["num_uses"] + + gen.side_effect = res_or_wrap + res = vault.generate_new_token("test-minion", "sig", issue_params=issue_params) + validate_signature.assert_called_once_with("test-minion", "sig", False) + assert res == expected + gen.assert_called_once_with( + "test-minion", issue_params=issue_params or None, wrap=config("issue:wrap") + ) + + +@pytest.mark.usefixtures("validate_signature") +@pytest.mark.parametrize("config", [{"issue:type": "approle"}], indirect=True) +def test_generate_new_token_refuses_if_not_configured(config): + """ + Ensure generate_new_token only issues tokens if configured to issue them + """ + res = vault.generate_new_token("test-minion", "sig") + assert "error" in res + assert "Master does not issue tokens" in res["error"] + + +@pytest.mark.parametrize("config", [{}, {"issue:wrap": False}], indirect=True) +@pytest.mark.parametrize( + "issue_params", [None, {"explicit_max_ttl": 120, "num_uses": 3}] +) +def test_get_config_token( + config, validate_signature, token_serialized, wrapped_serialized, issue_params +): + """ + Ensure get_config returns data in the expected format when configured for token auth + """ + expected = { + "auth": { + "method": "token", + }, + "cache": config("cache"), + "server": config("server"), + "wrap_info_nested": [], + } + + if issue_params is not None: + if issue_params.get("explicit_max_ttl") is not None: + token_serialized["lease_duration"] = issue_params["explicit_max_ttl"] + if issue_params.get("num_uses") is not None: + token_serialized["num_uses"] = issue_params["num_uses"] + if config("issue:wrap"): + expected["auth"].update({"token": wrapped_serialized}) + expected.update( + { + "wrap_info_nested": ["auth:token"], + "misc_data": {"token:num_uses": token_serialized["num_uses"]}, + } + ) + else: + expected["auth"].update({"token": token_serialized}) + + with patch("salt.runners.vault._generate_token", autospec=True) as gen: + + def res_or_wrap(*args, **kwargs): + if kwargs.get("wrap"): + return wrapped_serialized, token_serialized["num_uses"] + return token_serialized, token_serialized["num_uses"] + + gen.side_effect = res_or_wrap + res = vault.get_config("test-minion", "sig", issue_params=issue_params) + validate_signature.assert_called_once_with("test-minion", "sig", False) + assert res == expected + gen.assert_called_once_with( + "test-minion", issue_params=issue_params or None, wrap=config("issue:wrap") + ) + + +@pytest.mark.parametrize( + "config", + [ + {"issue:type": "approle"}, { - "vault.expand_pattern_lists": Mock( - side_effect=lambda x, *args, **kwargs: [x] - ) + "issue:type": "approle", + "issue:wrap": False, + "issue:approle:mount": "test-mount", }, + {"issue:type": "approle", "issue:approle:params:bind_secret_id": False}, + ], + indirect=True, +) +@pytest.mark.parametrize( + "issue_params", + [ + None, + {"token_explicit_max_ttl": 120, "token_num_uses": 3}, + {"secret_id_num_uses": 2, "secret_id_ttl": 120}, + ], +) +def test_get_config_approle( + config, validate_signature, wrapped_serialized, issue_params +): + """ + Ensure get_config returns data in the expected format when configured for AppRole auth + """ + expected = { + "auth": { + "approle_mount": config("issue:approle:mount"), + "approle_name": "test-minion", + "method": "approle", + "secret_id": config("issue:approle:params:bind_secret_id"), + }, + "cache": config("cache"), + "server": config("server"), + "wrap_info_nested": [], + } + + if config("issue:wrap"): + expected["auth"].update({"role_id": wrapped_serialized}) + expected.update({"wrap_info_nested": ["auth:role_id"]}) + else: + expected["auth"].update({"role_id": "test-role-id"}) + + with patch("salt.runners.vault._get_role_id", autospec=True) as gen: + + def res_or_wrap(*args, **kwargs): + if kwargs.get("wrap"): + return wrapped_serialized + return "test-role-id" + + gen.side_effect = res_or_wrap + res = vault.get_config("test-minion", "sig", issue_params=issue_params) + validate_signature.assert_called_once_with("test-minion", "sig", False) + assert res == expected + gen.assert_called_once_with( + "test-minion", issue_params=issue_params or None, wrap=config("issue:wrap") + ) + + +@pytest.mark.parametrize( + "config", + [{"issue:type": "approle"}, {"issue:type": "approle", "issue:wrap": False}], + indirect=True, +) +@pytest.mark.parametrize( + "issue_params", + [ + None, + {"token_explicit_max_ttl": 120, "token_num_uses": 3}, + {"secret_id_num_uses": 2, "secret_id_ttl": 120}, + ], +) +def test_get_role_id(config, validate_signature, wrapped_serialized, issue_params): + """ + Ensure get_role_id returns data in the expected format + """ + expected = {"server": config("server"), "data": {}} + if config("issue:wrap"): + expected.update(wrapped_serialized) + else: + expected["data"].update({"role_id": "test-role-id"}) + with patch("salt.runners.vault._get_role_id", autospec=True) as gen: + + def res_or_wrap(*args, **kwargs): + if kwargs.get("wrap"): + return wrapped_serialized + return "test-role-id" + + gen.side_effect = res_or_wrap + res = vault.get_role_id("test-minion", "sig", issue_params=issue_params) + validate_signature.assert_called_once_with("test-minion", "sig", False) + assert res == expected + gen.assert_called_once_with( + "test-minion", issue_params=issue_params or None, wrap=config("issue:wrap") + ) + + +@pytest.mark.usefixtures("validate_signature") +@pytest.mark.parametrize("config", [{"issue:type": "token"}], indirect=True) +def test_get_role_id_refuses_if_not_configured(config): + """ + Ensure get_role_id returns an error if not configured to issue AppRoles + """ + res = vault.get_role_id("test-minion", "sig") + assert "error" in res + assert "Master does not issue AppRoles" in res["error"] + + +class TestGetRoleId: + @pytest.fixture(autouse=True) + def lookup_approle(self, approle_meta): + with patch( + "salt.runners.vault._lookup_approle_cached", autospec=True + ) as lookup_approle: + lookup_approle.return_value = approle_meta + yield lookup_approle + + @pytest.fixture(autouse=True) + def lookup_roleid(self, wrapped_serialized): + role_id = MagicMock(return_value="test-role-id") + role_id.serialize_for_minion.return_value = wrapped_serialized + with patch( + "salt.runners.vault._lookup_role_id", autospec=True + ) as lookup_roleid: + lookup_roleid.return_value = role_id + yield lookup_roleid + + @pytest.fixture(autouse=True) + def manage_approle(self): + with patch( + "salt.runners.vault._manage_approle", autospec=True + ) as manage_approle: + yield manage_approle + + @pytest.fixture(autouse=True) + def manage_entity(self): + with patch("salt.runners.vault._manage_entity", autospec=True) as manage_entity: + yield manage_entity + + @pytest.fixture(autouse=True) + def manage_entity_alias(self): + with patch( + "salt.runners.vault._manage_entity_alias", autospec=True + ) as manage_entity_alias: + yield manage_entity_alias + + @pytest.mark.parametrize( + "config", + [{"issue:type": "approle"}, {"issue:type": "approle", "issue:wrap": False}], + indirect=True, + ) + def test_get_role_id( + self, + config, + lookup_approle, + lookup_roleid, + manage_approle, + manage_entity, + manage_entity_alias, + wrapped_serialized, ): - yield + """ + Ensure _get_role_id returns data in the expected format and does not + try to generate a new AppRole if it exists and is configured correctly + """ + wrap = config("issue:wrap") + res = vault._get_role_id("test-minion", issue_params=None, wrap=wrap) + lookup_approle.assert_called_with("test-minion") + lookup_roleid.assert_called_with("test-minion", wrap=wrap) + manage_approle.assert_not_called() + manage_entity.assert_not_called() + manage_entity_alias.assert_not_called() + if wrap: + assert res == wrapped_serialized + lookup_roleid.return_value.serialize_for_minion.assert_called_once() + else: + assert res() == "test-role-id" + lookup_roleid.return_value.serialize_for_minion.assert_not_called() -@pytest.mark.usefixtures("expand_pattern_lists") -def test_get_policies_for_nonexisting_minions(): - minion_id = "salt_master" - # For non-existing minions, or the master-minion, grains will be None - cases = { - "no-tokens-to-replace": ["no-tokens-to-replace"], - "single-dict:{minion}": ["single-dict:{}".format(minion_id)], - "single-grain:{grains[os]}": [], + @pytest.mark.parametrize( + "config", + [ + {"issue:type": "approle"}, + {"issue:type": "approle", "issue:allow_minion_override_params": True}, + ], + indirect=True, + ) + @pytest.mark.parametrize( + "issue_params", [None, {"token_explicit_max_ttl": 120, "token_num_uses": 3}] + ) + def test_get_role_id_generate_new( + self, + config, + lookup_approle, + lookup_roleid, + manage_approle, + manage_entity, + manage_entity_alias, + wrapped_serialized, + issue_params, + ): + """ + Ensure _get_role_id returns data in the expected format and does not + try to generate a new AppRole if it exists and is configured correctly + """ + lookup_approle.return_value = False + wrap = config("issue:wrap") + res = vault._get_role_id("test-minion", issue_params=issue_params, wrap=wrap) + assert res == wrapped_serialized + lookup_roleid.assert_called_with("test-minion", wrap=wrap) + manage_approle.assert_called_once_with("test-minion", issue_params) + manage_entity.assert_called_once_with("test-minion") + manage_entity_alias.assert_called_once_with("test-minion") + + @pytest.mark.parametrize("config", [{"issue:type": "approle"}], indirect=True) + def test_get_role_id_generate_new_errors_on_generation_failure( + self, config, lookup_approle, lookup_roleid + ): + """ + Ensure _get_role_id returns an error if the AppRole generation failed + """ + lookup_approle.return_value = False + lookup_roleid.return_value = False + with pytest.raises( + salt.exceptions.SaltRunnerError, + match="Failed to create AppRole for minion.*", + ): + vault._get_role_id("test-minion", issue_params=None, wrap=False) + + +@pytest.mark.parametrize( + "config", + [{"issue:type": "approle"}, {"issue:type": "approle", "issue:wrap": False}], + indirect=True, +) +def test_generate_secret_id( + config, validate_signature, wrapped_serialized, approle_meta, secret_id_serialized +): + """ + Ensure generate_secret_id returns data in the expected format + """ + expected = { + "server": config("server"), + "data": {}, + "misc_data": {"secret_id_num_uses": approle_meta["secret_id_num_uses"]}, } + if config("issue:wrap"): + expected.update(wrapped_serialized) + else: + expected["data"].update(secret_id_serialized) + with patch("salt.runners.vault._get_secret_id", autospec=True) as gen, patch( + "salt.runners.vault._approle_params_match", autospec=True, return_value=True + ) as matcher, patch( + "salt.runners.vault._lookup_approle_cached", autospec=True + ) as lookup_approle: + + def res_or_wrap(*args, **kwargs): + if kwargs.get("wrap"): + return wrapped_serialized + secret_id = Mock() + secret_id.serialize_for_minion.return_value = secret_id_serialized + return secret_id + + gen.side_effect = res_or_wrap + lookup_approle.return_value = approle_meta + res = vault.generate_secret_id("test-minion", "sig", issue_params=None) + validate_signature.assert_called_once_with("test-minion", "sig", False) + assert res == expected + gen.assert_called_once_with("test-minion", wrap=config("issue:wrap")) + matcher.assert_called_once() + + +@pytest.mark.usefixtures("validate_signature") +@pytest.mark.parametrize("config", [{"issue:type": "approle"}], indirect=True) +def test_generate_secret_id_nonexistent_approle(config): + """ + Ensure generate_secret_id fails and prompts the minion to refresh cache if + no associated AppRole could be found. + """ with patch( - "salt.utils.minions.get_minion_data", - MagicMock(return_value=(None, None, None)), + "salt.runners.vault._lookup_approle_cached", autospec=True + ) as lookup_approle: + lookup_approle.return_value = False + res = vault.generate_secret_id("test-minion", "sig", issue_params=None) + assert "error" in res + assert "expire_cache" in res + assert res["expire_cache"] + + +@pytest.mark.usefixtures("validate_signature") +@pytest.mark.parametrize("config", [{"issue:type": "token"}], indirect=True) +def test_get_secret_id_refuses_if_not_configured(config): + """ + Ensure get_secret_id returns an error if not configured to issue AppRoles + """ + res = vault.generate_secret_id("test-minion", "sig") + assert "error" in res + assert "Master does not issue AppRoles" in res["error"] + + +@pytest.mark.parametrize("config", [{"issue:type": "approle"}], indirect=True) +def test_generate_secret_id_updates_params( + config, validate_signature, wrapped_serialized, approle_meta +): + """ + Ensure generate_secret_id returns data in the expected format + """ + expected = { + "server": config("server"), + "data": {}, + "misc_data": {"secret_id_num_uses": approle_meta["secret_id_num_uses"]}, + "wrap_info": wrapped_serialized["wrap_info"], + } + with patch("salt.runners.vault._get_secret_id", autospec=True) as gen, patch( + "salt.runners.vault._approle_params_match", autospec=True, return_value=False + ) as matcher, patch( + "salt.runners.vault._manage_approle", autospec=True + ) as manage_approle, patch( + "salt.runners.vault._lookup_approle_cached", autospec=True + ) as lookup_approle: + gen.return_value = wrapped_serialized + lookup_approle.return_value = approle_meta + res = vault.generate_secret_id("test-minion", "sig", issue_params=None) + validate_signature.assert_called_once_with("test-minion", "sig", False) + assert res == expected + gen.assert_called_once_with("test-minion", wrap=config("issue:wrap")) + matcher.assert_called_once() + manage_approle.assert_called_once() + + +@pytest.mark.parametrize("config", [{"issue:type": "approle"}], indirect=True) +def test_list_approles(client, config): + """ + Ensure list_approles call the API as expected and returns only a list of names + """ + client.list.return_value = {"data": {"keys": ["foo", "bar"]}} + res = vault.list_approles() + assert res == ["foo", "bar"] + client.list.assert_called_once_with("auth/salt-minions/role") + + +@pytest.mark.parametrize("config", [{"issue:type": "token"}], indirect=True) +def test_list_approles_raises_exception_if_not_configured(config): + """ + Ensure test_list_approles returns an error if not configured to issue AppRoles + """ + with pytest.raises( + salt.exceptions.SaltRunnerError, match="Master does not issue AppRoles.*" ): - for case, correct_output in cases.items(): - test_config = {"policies": [case]} - output = vault._get_policies( - minion_id, test_config - ) # pylint: disable=protected-access - diff = set(output).symmetric_difference(set(correct_output)) - if diff: - log.debug("Test %s failed", case) - log.debug("Expected:\n\t%s\nGot\n\t%s", output, correct_output) - log.debug("Difference:\n\t%s", diff) - assert output == correct_output - - -@pytest.mark.usefixtures("expand_pattern_lists") -def test_get_policies(grains): + vault.list_approles() + + +@pytest.mark.parametrize( + "config,expected", + [ + ({"policies:assign": ["no-tokens-to-replace"]}, ["no-tokens-to-replace"]), + ({"policies:assign": ["single-dict:{minion}"]}, ["single-dict:test-minion"]), + ( + { + "policies:assign": [ + "should-not-cause-an-exception,but-result-empty:{foo}" + ] + }, + [], + ), + ( + {"policies:assign": ["Case-Should-Be-Lowered:{grains[mixedcase]}"]}, + ["case-should-be-lowered:up-low-up"], + ), + ( + {"policies:assign": ["pillar-rendering:{pillar[role]}"]}, + ["pillar-rendering:test"], + ), + ], + indirect=["config"], +) +def test_get_policies(config, expected, grains, pillar): """ Ensure _get_policies works as intended. The expansion of lists is tested in the vault utility module unit tests. """ - cases = { - "no-tokens-to-replace": ["no-tokens-to-replace"], - "single-dict:{minion}": ["single-dict:test-minion"], - "should-not-cause-an-exception,but-result-empty:{foo}": [], - "Case-Should-Be-Lowered:{grains[mixedcase]}": [ - "case-should-be-lowered:up-low-up" - ], - } - with patch( "salt.utils.minions.get_minion_data", - MagicMock(return_value=(None, grains, None)), + MagicMock(return_value=(None, grains, pillar)), ): - for case, correct_output in cases.items(): - test_config = {"policies": [case]} - output = vault._get_policies( - "test-minion", test_config - ) # pylint: disable=protected-access - diff = set(output).symmetric_difference(set(correct_output)) - if diff: - log.debug("Test %s failed", case) - log.debug("Expected:\n\t%s\nGot\n\t%s", output, correct_output) - log.debug("Difference:\n\t%s", diff) - assert output == correct_output - - -@pytest.mark.usefixtures("expand_pattern_lists") + with patch( + "salt.utils.vault.expand_pattern_lists", + Mock(side_effect=lambda x, *args, **kwargs: [x]), + ): + res = vault._get_policies("test-minion", refresh_pillar=False) + assert res == expected + + @pytest.mark.parametrize( - "pattern,count", + "config", [ - ("salt_minion_{minion}", 0), - ("salt_grain_{grains[id]}", 0), - ("unset_{foo}", 0), - ("salt_pillar_{pillar[role]}", 1), + {"policies:assign": ["salt_minion_{minion}"]}, + {"policies:assign": ["salt_grain_{grains[id]}"]}, + {"policies:assign": ["unset_{foo}"]}, + {"policies:assign": ["salt_pillar_{pillar[role]}"]}, ], + indirect=True, ) -def test_get_policies_does_not_render_pillar_unnecessarily( - pattern, count, grains, pillar -): +def test_get_policies_does_not_render_pillar_unnecessarily(config, grains, pillar): """ The pillar data should only be refreshed in case items are accessed. """ with patch("salt.utils.minions.get_minion_data", autospec=True) as get_minion_data: get_minion_data.return_value = (None, grains, None) - with patch("salt.pillar.get_pillar", autospec=True) as get_pillar: - get_pillar.return_value.compile_pillar.return_value = pillar - test_config = {"policies": [pattern]} - vault._get_policies( - "test-minion", test_config, refresh_pillar=True - ) # pylint: disable=protected-access - assert get_pillar.call_count == count + with patch( + "salt.utils.vault.expand_pattern_lists", + Mock(side_effect=lambda x, *args, **kwargs: [x]), + ): + with patch("salt.pillar.get_pillar", autospec=True) as get_pillar: + get_pillar.return_value.compile_pillar.return_value = pillar + vault._get_policies("test-minion", refresh_pillar=True) + assert get_pillar.call_count == int( + "pillar" in config("policies:assign")[0] + ) -def test_get_token_create_url(): +@pytest.mark.parametrize( + "config,expected", + [ + ({"policies:assign": ["no-tokens-to-replace"]}, ["no-tokens-to-replace"]), + ({"policies:assign": ["single-dict:{minion}"]}, ["single-dict:test-minion"]), + ({"policies:assign": ["single-grain:{grains[os]}"]}, []), + ], + indirect=["config"], +) +def test_get_policies_for_nonexisting_minions(config, expected): """ - Ensure _get_token_create_url parses config correctly + For non-existing minions, or the master-minion, grains will be None. """ - assert ( - vault._get_token_create_url( # pylint: disable=protected-access - {"url": "http://127.0.0.1"} - ) - == "http://127.0.0.1/v1/auth/token/create" + with patch("salt.utils.minions.get_minion_data", autospec=True) as get_minion_data: + get_minion_data.return_value = (None, None, None) + with patch( + "salt.utils.vault.expand_pattern_lists", + Mock(side_effect=lambda x, *args, **kwargs: [x]), + ): + res = vault._get_policies("test-minion", refresh_pillar=False) + assert res == expected + + +@pytest.mark.parametrize( + "metadata_patterns,expected", + [ + ( + {"no-tokens-to-replace": "no-tokens-to-replace"}, + {"no-tokens-to-replace": "no-tokens-to-replace"}, + ), + ( + {"single-dict:{minion}": "single-dict:{minion}"}, + {"single-dict:{minion}": "single-dict:test-minion"}, + ), + ( + {"should-not-cause-an-exception,but-result-empty:{foo}": "empty:{foo}"}, + {"should-not-cause-an-exception,but-result-empty:{foo}": ""}, + ), + ( + { + "Case-Should-Not-Be-Lowered": "Case-Should-Not-Be-Lowered:{pillar[mixedcase]}" + }, + {"Case-Should-Not-Be-Lowered": "Case-Should-Not-Be-Lowered:UP-low-UP"}, + ), + ( + {"pillar-rendering:{pillar[role]}": "pillar-rendering:{pillar[role]}"}, + {"pillar-rendering:{pillar[role]}": "pillar-rendering:test"}, + ), + ], +) +def test_get_metadata(metadata_patterns, expected, pillar): + """ + Ensure _get_policies works as intended. + The expansion of lists is tested in the vault utility module unit tests. + """ + with patch("salt.utils.minions.get_minion_data", autospec=True) as get_minion_data: + get_minion_data.return_value = (None, None, pillar) + with patch( + "salt.utils.vault.expand_pattern_lists", + Mock(side_effect=lambda x, *args, **kwargs: [x]), + ): + res = vault._get_metadata( + "test-minion", metadata_patterns, refresh_pillar=False + ) + assert res == expected + + +def test_get_metadata_list(): + """ + Test that lists are concatenated to an alphabetically sorted + comma-separated list string since the API does not allow + composite metadata values + """ + with patch("salt.utils.minions.get_minion_data", autospec=True) as get_minion_data: + get_minion_data.return_value = (None, None, None) + with patch("salt.utils.vault.expand_pattern_lists", autospec=True) as expand: + expand.return_value = ["salt_role_foo", "salt_role_bar"] + res = vault._get_metadata( + "test-minion", + {"salt_role": "salt_role_{pillar[roles]}"}, + refresh_pillar=False, + ) + assert res == {"salt_role": "salt_role_bar,salt_role_foo"} + + +@pytest.mark.parametrize( + "config,issue_params,expected", + [ + ( + {"issue:token:params": {"explicit_max_ttl": None, "num_uses": None}}, + None, + {}, + ), + ( + {"issue:token:params": {"explicit_max_ttl": 1337, "num_uses": None}}, + None, + {"explicit_max_ttl": 1337}, + ), + ( + {"issue:token:params": {"explicit_max_ttl": None, "num_uses": 3}}, + None, + {"num_uses": 3}, + ), + ( + {"issue:token:params": {"explicit_max_ttl": 1337, "num_uses": 3}}, + None, + {"explicit_max_ttl": 1337, "num_uses": 3}, + ), + ( + { + "issue:token:params": { + "explicit_max_ttl": 1337, + "num_uses": 3, + "invalid": True, + } + }, + None, + {"explicit_max_ttl": 1337, "num_uses": 3}, + ), + ( + {"issue:token:params": {"explicit_max_ttl": None, "num_uses": None}}, + {"num_uses": 42, "explicit_max_ttl": 1338}, + {}, + ), + ( + {"issue:token:params": {"explicit_max_ttl": 1337, "num_uses": None}}, + {"num_uses": 42, "explicit_max_ttl": 1338}, + {"explicit_max_ttl": 1337}, + ), + ( + {"issue:token:params": {"explicit_max_ttl": None, "num_uses": 3}}, + {"num_uses": 42, "explicit_max_ttl": 1338}, + {"num_uses": 3}, + ), + ( + {"issue:token:params": {"explicit_max_ttl": 1337, "num_uses": 3}}, + {"num_uses": 42, "explicit_max_ttl": 1338, "invalid": True}, + {"explicit_max_ttl": 1337, "num_uses": 3}, + ), + ( + { + "issue:token:params": {"explicit_max_ttl": None, "num_uses": None}, + "issue:allow_minion_override_params": True, + }, + {"num_uses": None, "explicit_max_ttl": None}, + {}, + ), + ( + { + "issue:token:params": {"explicit_max_ttl": None, "num_uses": 3}, + "issue:allow_minion_override_params": True, + }, + {"num_uses": 42, "explicit_max_ttl": None}, + {"num_uses": 42}, + ), + ( + { + "issue:token:params": {"explicit_max_ttl": 1337, "num_uses": None}, + "issue:allow_minion_override_params": True, + }, + {"num_uses": None, "explicit_max_ttl": 1338}, + {"explicit_max_ttl": 1338}, + ), + ( + { + "issue:token:params": {"explicit_max_ttl": 1337, "num_uses": None}, + "issue:allow_minion_override_params": True, + }, + {"num_uses": 42, "explicit_max_ttl": None}, + {"num_uses": 42, "explicit_max_ttl": 1337}, + ), + ( + { + "issue:token:params": {"explicit_max_ttl": None, "num_uses": 3}, + "issue:allow_minion_override_params": True, + }, + {"num_uses": None, "explicit_max_ttl": 1338}, + {"num_uses": 3, "explicit_max_ttl": 1338}, + ), + ( + { + "issue:token:params": {"explicit_max_ttl": None, "num_uses": None}, + "issue:allow_minion_override_params": True, + }, + {"num_uses": 42, "explicit_max_ttl": 1338}, + {"num_uses": 42, "explicit_max_ttl": 1338}, + ), + ( + { + "issue:token:params": {"explicit_max_ttl": 1337, "num_uses": 3}, + "issue:allow_minion_override_params": True, + }, + {"num_uses": 42, "explicit_max_ttl": 1338, "invalid": True}, + {"num_uses": 42, "explicit_max_ttl": 1338}, + ), + ({"issue:type": "approle", "issue:approle:params": {}}, None, {}), + ( + { + "issue:type": "approle", + "issue:approle:params": { + "token_explicit_max_ttl": 1337, + "token_num_uses": 3, + "secret_id_num_uses": 3, + "secret_id_ttl": 60, + }, + }, + None, + { + "token_explicit_max_ttl": 1337, + "token_num_uses": 3, + "secret_id_num_uses": 3, + "secret_id_ttl": 60, + }, + ), + ( + { + "issue:type": "approle", + "issue:approle:params": { + "token_explicit_max_ttl": 1337, + "token_num_uses": 3, + "secret_id_num_uses": 3, + "secret_id_ttl": 60, + }, + }, + { + "token_explicit_max_ttl": 1338, + "token_num_uses": 42, + "secret_id_num_uses": 42, + "secret_id_ttl": 1338, + }, + { + "token_explicit_max_ttl": 1337, + "token_num_uses": 3, + "secret_id_num_uses": 3, + "secret_id_ttl": 60, + }, + ), + ( + { + "issue:type": "approle", + "issue:allow_minion_override_params": True, + "issue:approle:params": {}, + }, + { + "token_explicit_max_ttl": 1338, + "token_num_uses": 42, + "secret_id_num_uses": 42, + "secret_id_ttl": 1338, + }, + { + "token_explicit_max_ttl": 1338, + "token_num_uses": 42, + "secret_id_num_uses": 42, + "secret_id_ttl": 1338, + }, + ), + ( + { + "issue:type": "approle", + "issue:allow_minion_override_params": True, + "issue:approle:params": { + "token_explicit_max_ttl": 1337, + "token_num_uses": 3, + "secret_id_num_uses": 3, + "secret_id_ttl": 60, + }, + }, + { + "token_explicit_max_ttl": 1338, + "token_num_uses": 42, + "secret_id_num_uses": 42, + "secret_id_ttl": 1338, + }, + { + "token_explicit_max_ttl": 1338, + "token_num_uses": 42, + "secret_id_num_uses": 42, + "secret_id_ttl": 1338, + }, + ), + ], + indirect=["config"], +) +def test_parse_issue_params(config, issue_params, expected): + """ + Ensure all known parameters can only be overridden if it was configured + on the master. Also ensure the mapping to API requests is correct (for tokens). + """ + res = vault._parse_issue_params(issue_params) + assert res == expected + + +@pytest.mark.parametrize( + "config,issue_params,expected", + [ + ( + {"issue:type": "approle", "issue:approle:params": {}}, + {"bind_secret_id": False}, + False, + ), + ( + {"issue:type": "approle", "issue:approle:params": {}}, + {"bind_secret_id": True}, + False, + ), + ( + {"issue:type": "approle", "issue:approle:params": {"bind_secret_id": True}}, + {"bind_secret_id": False}, + True, + ), + ( + { + "issue:type": "approle", + "issue:approle:params": {"bind_secret_id": False}, + }, + {"bind_secret_id": True}, + False, + ), + ], + indirect=["config"], +) +def test_parse_issue_params_does_not_allow_bind_secret_id_override( + config, issue_params, expected +): + """ + Ensure bind_secret_id can only be set on the master. + """ + res = vault._parse_issue_params(issue_params) + assert res.get("bind_secret_id", False) == expected + + +@pytest.mark.usefixtures("config", "policies") +def test_manage_approle(client, policies_default): + """ + Ensure _manage_approle calls the API as expected. + """ + vault._manage_approle("test-minion", None) + payload = { + "explicit_max_ttl": 9999999999, + "num_uses": 1, + "token_policies": policies_default, + } + client.post.assert_called_once_with( + "auth/salt-minions/role/test-minion", payload=payload ) - assert ( - vault._get_token_create_url( # pylint: disable=protected-access - {"url": "https://127.0.0.1/"} - ) - == "https://127.0.0.1/v1/auth/token/create" + + +@pytest.mark.usefixtures("config") +def test_delete_approle(client): + """ + Ensure _delete_approle calls the API as expected. + """ + vault._delete_approle("test-minion") + client.delete.assert_called_once_with("auth/salt-minions/role/test-minion") + + +@pytest.mark.usefixtures("config") +def test_lookup_approle(client, approle_meta): + """ + Ensure _lookup_approle calls the API as expected. + """ + client.get.return_value = {"data": approle_meta} + res = vault._lookup_approle("test-minion") + assert res == approle_meta + client.get.assert_called_once_with("auth/salt-minions/role/test-minion") + + +@pytest.mark.usefixtures("config") +def test_lookup_approle_nonexistent(client): + """ + Ensure _lookup_approle catches VaultNotFoundErrors and returns False. + """ + client.get.side_effect = vaultutil.VaultNotFoundError + res = vault._lookup_approle("test-minion") + assert res is False + + +@pytest.mark.usefixtures("config") +@pytest.mark.parametrize("wrap", ["30s", False]) +def test_lookup_role_id(client, wrapped_response, wrap): + """ + Ensure _lookup_role_id calls the API as expected. + """ + + def res_or_wrap(*args, **kwargs): + if kwargs.get("wrap"): + return vaultutil.VaultWrappedResponse(**wrapped_response["wrap_info"]) + return {"data": {"role_id": "test-role-id"}} + + client.get.side_effect = res_or_wrap + res = vault._lookup_role_id("test-minion", wrap=wrap) + if wrap: + assert res == vaultutil.VaultWrappedResponse(**wrapped_response["wrap_info"]) + else: + assert res == "test-role-id" + client.get.assert_called_once_with( + "auth/salt-minions/role/test-minion/role-id", wrap=wrap ) - assert ( - vault._get_token_create_url( # pylint: disable=protected-access - {"url": "http://127.0.0.1:8200", "role_name": "therole"} + + +@pytest.mark.usefixtures("config") +def test_lookup_role_id_nonexistent(client): + """ + Ensure _lookup_role_id catches VaultNotFoundErrors and returns False. + """ + client.get.side_effect = vaultutil.VaultNotFoundError + res = vault._lookup_role_id("test-minion", wrap=False) + assert res is False + + +@pytest.mark.usefixtures("config") +@pytest.mark.parametrize("wrap", ["30s", False]) +def test_get_secret_id(client, wrapped_response, secret_id_response, wrap): + """ + Ensure _get_secret_id calls the API as expected. + """ + + def res_or_wrap(*args, **kwargs): + if kwargs.get("wrap"): + return vaultutil.VaultWrappedResponse(**wrapped_response["wrap_info"]) + return secret_id_response + + client.post.side_effect = res_or_wrap + res = vault._get_secret_id("test-minion", wrap=wrap) + if wrap: + assert ( + res + == vaultutil.VaultWrappedResponse( + **wrapped_response["wrap_info"] + ).serialize_for_minion() ) - == "http://127.0.0.1:8200/v1/auth/token/create/therole" + else: + assert res == vaultutil.VaultSecretId(**secret_id_response["data"]) + client.post.assert_called_once_with( + "auth/salt-minions/role/test-minion/secret-id", payload=ANY, wrap=wrap ) - assert ( - vault._get_token_create_url( # pylint: disable=protected-access - {"url": "https://127.0.0.1/test", "role_name": "therole"} + + +@pytest.mark.usefixtures("config") +@pytest.mark.parametrize("wrap", ["30s", False]) +def test_get_secret_id_meta_info( + client, + wrapped_response, + secret_id_response, + wrap, + secret_id_lookup_accessor_response, +): + """ + Ensure _get_secret_id calls the API as expected when querying for meta info. + """ + + def res_or_wrap(*args, **kwargs): + if args[0].endswith("lookup"): + return secret_id_lookup_accessor_response + if kwargs.get("wrap"): + return vaultutil.VaultWrappedResponse(**wrapped_response["wrap_info"]) + return secret_id_response + + client.post.side_effect = res_or_wrap + res = vault._get_secret_id("test-minion", wrap=wrap, meta_info=True) + if wrap: + assert res == ( + vaultutil.VaultWrappedResponse( + **wrapped_response["wrap_info"] + ).serialize_for_minion(), + secret_id_lookup_accessor_response["data"], ) - == "https://127.0.0.1/test/v1/auth/token/create/therole" + else: + assert res == ( + vaultutil.VaultSecretId(**secret_id_response["data"]), + secret_id_lookup_accessor_response["data"], + ) + payload = {"secret_id_accessor": wrapped_response["wrap_info"]["wrapped_accessor"]} + client.post.assert_called_with( + "auth/salt-minions/role/test-minion/secret-id-accessor/lookup", payload=payload + ) + + +def test_lookup_mount_accessor(client): + """ + Ensure _lookup_mount_accessor calls the API as expected. + """ + client.get.return_value = MagicMock() + vault._lookup_mount_accessor("salt-minions") + client.get.assert_called_once_with("sys/auth/salt-minions") + + +@pytest.mark.usefixtures("config") +def test_lookup_entity_by_alias(client, entity_lookup_response): + """ + Ensure _lookup_entity_by_alias calls the API as expected. + """ + with patch( + "salt.runners.vault._lookup_mount_accessor", return_value="test-accessor" + ), patch("salt.runners.vault._lookup_role_id", return_value="test-role-id"): + client.post.return_value = entity_lookup_response + res = vault._lookup_entity_by_alias("test-minion") + assert res == entity_lookup_response["data"] + payload = { + "alias_name": "test-role-id", + "alias_mount_accessor": "test-accessor", + } + client.post.assert_called_once_with("identity/lookup/entity", payload=payload) + + +@pytest.mark.usefixtures("config") +def test_lookup_entity_by_alias_failed(client): + """ + Ensure _lookup_entity_by_alias returns False if the lookup fails. + """ + with patch( + "salt.runners.vault._lookup_mount_accessor", return_value="test-accessor" + ), patch("salt.runners.vault._lookup_role_id", return_value="test-role-id"): + client.post.return_value = [] + res = vault._lookup_entity_by_alias("test-minion") + assert res is False + + +@pytest.mark.usefixtures("config") +def test_fetch_entity_by_name(client, entity_fetch_response): + """ + Ensure _fetch_entity_by_name calls the API as expected. + """ + client.get.return_value = entity_fetch_response + res = vault._fetch_entity_by_name("test-minion") + assert res == entity_fetch_response["data"] + client.get.assert_called_once_with("identity/entity/name/salt_minion_test-minion") + + +@pytest.mark.usefixtures("config") +def test_fetch_entity_by_name_failed(client): + """ + Ensure _fetch_entity_by_name returns False if the lookup fails. + """ + client.get.side_effect = vaultutil.VaultNotFoundError + res = vault._fetch_entity_by_name("test-minion") + assert res is False + + +@pytest.mark.usefixtures("config") +def test_manage_entity(client, metadata, metadata_entity_default): + """ + Ensure _manage_entity calls the API as expected. + """ + vault._manage_entity("test-minion") + payload = {"metadata": metadata_entity_default} + client.post.assert_called_with( + "identity/entity/name/salt_minion_test-minion", payload=payload + ) + + +@pytest.mark.usefixtures("config") +def test_delete_entity(client): + """ + Ensure _delete_entity calls the API as expected. + """ + vault._delete_entity("test-minion") + client.delete.assert_called_with("identity/entity/name/salt_minion_test-minion") + + +@pytest.mark.usefixtures("config") +@pytest.mark.parametrize( + "aliases", + [ + [], + [ + {"mount_accessor": "test-accessor", "id": "test-entity-alias-id"}, + {"mount_accessor": "other-accessor", "id": "other-entity-alias-id"}, + ], + ], +) +def test_manage_entity_alias(client, aliases, entity_fetch_response): + """ + Ensure _manage_entity_alias calls the API as expected. + """ + payload = { + "canonical_id": "test-entity-id", + "mount_accessor": "test-accessor", + "name": "test-role-id", + } + if aliases: + entity_fetch_response["data"]["aliases"] = aliases + if aliases[0]["mount_accessor"] == "test-accessor": + payload["id"] = aliases[0]["id"] + + with patch( + "salt.runners.vault._lookup_mount_accessor", return_value="test-accessor" + ), patch("salt.runners.vault._lookup_role_id", return_value="test-role-id"), patch( + "salt.runners.vault._fetch_entity_by_name", + return_value=entity_fetch_response["data"], + ): + vault._manage_entity_alias("test-minion") + client.post.assert_called_with("identity/entity-alias", payload=payload) + + +@pytest.mark.usefixtures("config", "client") +def test_manage_entity_alias_raises_errors(): + """ + Ensure _manage_entity_alias raises exceptions. + """ + with patch( + "salt.runners.vault._lookup_mount_accessor", return_value="test-accessor" + ), patch("salt.runners.vault._lookup_role_id", return_value="test-role-id"), patch( + "salt.runners.vault._fetch_entity_by_name", return_value=False + ): + with pytest.raises( + salt.exceptions.SaltRunnerError, + match="There is no entity to create an alias for.*", + ): + vault._manage_entity_alias("test-minion") + + +def test_revoke_token_by_token(client): + """ + Ensure _revoke_token calls the API as expected. + """ + vault._revoke_token(token="test-token") + client.post.assert_called_once_with( + "auth/token/revoke", payload={"token": "test-token"} + ) + + +def test_revoke_token_by_accessor(client): + """ + Ensure _revoke_token calls the API as expected. + """ + vault._revoke_token(accessor="test-accessor") + client.post.assert_called_once_with( + "auth/token/revoke-accessor", payload={"accessor": "test-accessor"} + ) + + +def test_destroy_secret_id_by_secret_id(client): + """ + Ensure _revoke_token calls the API as expected. + """ + vault._destroy_secret_id("test-minion", "salt-minions", secret_id="test-secret-id") + client.post.assert_called_once_with( + "auth/salt-minions/role/test-minion/secret-id/destroy", + payload={"secret_id": "test-secret-id"}, + ) + + +def test_destroy_secret_id_by_accessor(client): + """ + Ensure _revoke_token calls the API as expected. + """ + vault._destroy_secret_id("test-minion", "salt-minions", accessor="test-accessor") + client.post.assert_called_once_with( + "auth/salt-minions/role/test-minion/secret-id-accessor/destroy", + payload={"secret_id_accessor": "test-accessor"}, ) diff --git a/tests/pytests/unit/sdb/test_vault.py b/tests/pytests/unit/sdb/test_vault.py index eeeb7e8b9f96..fda8f2314f87 100644 --- a/tests/pytests/unit/sdb/test_vault.py +++ b/tests/pytests/unit/sdb/test_vault.py @@ -4,182 +4,138 @@ import pytest +import salt.exceptions import salt.sdb.vault as vault -from tests.support.mock import MagicMock, call, patch +import salt.utils.vault as vaultutil +from tests.support.mock import ANY, patch @pytest.fixture def configure_loader_modules(): - return { - vault: { - "__opts__": { - "vault": { - "url": "http://127.0.0.1", - "auth": {"token": "test", "method": "token"}, - } - } - } - } - - -def test_set(): - """ - Test salt.sdb.vault.set function - """ - version = {"v2": False, "data": None, "metadata": None, "type": None} - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - with patch.dict(vault.__utils__, {"vault.make_request": mock_vault}), patch.dict( - vault.__utils__, {"vault.is_v2": mock_version} - ): - vault.set_("sdb://myvault/path/to/foo/bar", "super awesome") - - assert mock_vault.call_args_list == [ - call( - "POST", - "v1/sdb://myvault/path/to/foo", - json={"bar": "super awesome"}, - ) - ] - - -def test_set_v2(): - """ - Test salt.sdb.vault.set function with kv v2 backend - """ - version = { - "v2": True, - "data": "path/data/to/foo", - "metadata": "path/metadata/to/foo", - "type": "kv", - } - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - with patch.dict(vault.__utils__, {"vault.make_request": mock_vault}), patch.dict( - vault.__utils__, {"vault.is_v2": mock_version} - ): - vault.set_("sdb://myvault/path/to/foo/bar", "super awesome") - - assert mock_vault.call_args_list == [ - call( - "POST", - "v1/path/data/to/foo", - json={"data": {"bar": "super awesome"}}, - ) - ] - - -def test_set_question_mark(): + return {vault: {}} + + +@pytest.fixture +def data(): + return {"bar": "super awesome"} + + +@pytest.fixture +def read_kv(data): + with patch("salt.utils.vault.read_kv", autospec=True) as read: + read.return_value = data + yield read + + +@pytest.fixture +def read_kv_not_found(read_kv): + read_kv.side_effect = vaultutil.VaultNotFoundError + + +@pytest.fixture +def read_kv_not_found_once(read_kv, data): + read_kv.side_effect = (vaultutil.VaultNotFoundError, data) + yield read_kv + + +@pytest.fixture +def read_kv_err(read_kv): + read_kv.side_effect = vaultutil.VaultPermissionDeniedError("damn") + yield read_kv + + +@pytest.fixture +def write_kv(): + with patch("salt.utils.vault.write_kv", autospec=True) as write: + yield write + + +@pytest.fixture +def write_kv_err(write_kv): + write_kv.side_effect = vaultutil.VaultPermissionDeniedError("damn") + yield write_kv + + +@pytest.mark.parametrize( + "key,exp_path", + [ + ("sdb://myvault/path/to/foo/bar", "path/to/foo"), + ("sdb://myvault/path/to/foo?bar", "path/to/foo"), + ], +) +def test_set(write_kv, key, exp_path, data): """ - Test salt.sdb.vault.set_ while using the old - deprecated solution with a question mark. + Test salt.sdb.vault.set_ with current and old (question mark) syntax. + KV v1/2 distinction is unnecessary, since that is handled in the utils module. """ - version = {"v2": False, "data": None, "metadata": None, "type": None} - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - with patch.dict(vault.__utils__, {"vault.make_request": mock_vault}), patch.dict( - vault.__utils__, {"vault.is_v2": mock_version} - ): - vault.set_("sdb://myvault/path/to/foo?bar", "super awesome") - - assert mock_vault.call_args_list == [ - call( - "POST", - "v1/sdb://myvault/path/to/foo", - json={"bar": "super awesome"}, - ) - ] - - -def test_get(): + vault.set_(key, "super awesome") + write_kv.assert_called_once_with( + f"sdb://myvault/{exp_path}", data, opts=ANY, context=ANY + ) + + +@pytest.mark.usefixtures("write_kv_err") +def test_set_err(): """ - Test salt.sdb.vault.get function + Test that salt.sdb.vault.set_ raises CommandExecutionError from other exceptions """ - version = {"v2": False, "data": None, "metadata": None, "type": None} - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - mock_vault.return_value.json.return_value = {"data": {"bar": "test"}} - with patch.dict(vault.__utils__, {"vault.make_request": mock_vault}), patch.dict( - vault.__utils__, {"vault.is_v2": mock_version} - ): - assert vault.get("sdb://myvault/path/to/foo/bar") == "test" - - assert mock_vault.call_args_list == [call("GET", "v1/sdb://myvault/path/to/foo")] + with pytest.raises(salt.exceptions.CommandExecutionError, match="damn") as exc: + vault.set_("sdb://myvault/path/to/foo/bar", "foo") -def test_get_v2(): +@pytest.mark.parametrize( + "key,exp_path", + [ + ("sdb://myvault/path/to/foo/bar", "path/to/foo"), + ("sdb://myvault/path/to/foo?bar", "path/to/foo"), + ], +) +def test_get(read_kv, key, exp_path): """ - Test salt.sdb.vault.get function with kv v2 backend + Test salt.sdb.vault.get_ with current and old (question mark) syntax. + KV v1/2 distinction is unnecessary, since that is handled in the utils module. """ - version = { - "v2": True, - "data": "path/data/to/foo", - "metadata": "path/metadata/to/foo", - "type": "kv", - } - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - mock_vault.return_value.json.return_value = {"data": {"data": {"bar": "test"}}} - with patch.dict(vault.__utils__, {"vault.make_request": mock_vault}), patch.dict( - vault.__utils__, {"vault.is_v2": mock_version} - ): - assert vault.get("sdb://myvault/path/to/foo/bar") == "test" - - assert mock_vault.call_args_list == [call("GET", "v1/path/data/to/foo")] - - -def test_get_question_mark(): + res = vault.get(key) + assert res == "super awesome" + read_kv.assert_called_once_with(f"sdb://myvault/{exp_path}", opts=ANY, context=ANY) + + +@pytest.mark.usefixtures("read_kv") +def test_get_missing_key(): """ - Test salt.sdb.vault.get while using the old - deprecated solution with a question mark. + Test that salt.sdb.vault.get returns None if vault does not have the key + but does have the entry. """ - version = {"v2": False, "data": None, "metadata": None, "type": None} - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - mock_vault.return_value.json.return_value = {"data": {"bar": "test"}} - with patch.dict(vault.__utils__, {"vault.make_request": mock_vault}), patch.dict( - vault.__utils__, {"vault.is_v2": mock_version} - ): - assert vault.get("sdb://myvault/path/to/foo?bar") == "test" - assert mock_vault.call_args_list == [call("GET", "v1/sdb://myvault/path/to/foo")] + res = vault.get("sdb://myvault/path/to/foo/foo") + assert res is None +@pytest.mark.usefixtures("read_kv_not_found") def test_get_missing(): """ - Test salt.sdb.vault.get function returns None - if vault does not have an entry + Test that salt.sdb.vault.get returns None if vault does have the entry. """ - version = {"v2": False, "data": None, "metadata": None, "type": None} - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 404 - with patch.dict(vault.__utils__, {"vault.make_request": mock_vault}), patch.dict( - vault.__utils__, {"vault.is_v2": mock_version} - ): - assert vault.get("sdb://myvault/path/to/foo/bar") is None + res = vault.get("sdb://myvault/path/to/foo/foo") + assert res is None - assert mock_vault.call_args_list == [call("GET", "v1/sdb://myvault/path/to/foo")] +def test_get_whole_dataset(read_kv_not_found_once, data): + """ + Test that salt.sdb.vault.get retries the whole path without key if the + first request reported the dataset was not found. + """ + res = vault.get("sdb://myvault/path/to/foo") + assert res == data + read_kv_not_found_once.assert_called_with( + "sdb://myvault/path/to/foo", opts=ANY, context=ANY + ) + assert read_kv_not_found_once.call_count == 2 -def test_get_missing_key(): + +@pytest.mark.usefixtures("read_kv_err") +def test_get_err(): """ - Test salt.sdb.vault.get function returns None - if vault does not have the key but does have the entry + Test that salt.sdb.vault.get raises CommandExecutionError from other exceptions """ - version = {"v2": False, "data": None, "metadata": None, "type": None} - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - mock_vault.return_value.json.return_value = {"data": {"bar": "test"}} - with patch.dict(vault.__utils__, {"vault.make_request": mock_vault}), patch.dict( - vault.__utils__, {"vault.is_v2": mock_version} - ): - assert vault.get("sdb://myvault/path/to/foo/foo") is None - - assert mock_vault.call_args_list == [call("GET", "v1/sdb://myvault/path/to/foo")] + with pytest.raises(salt.exceptions.CommandExecutionError, match="damn") as exc: + vault.get("sdb://myvault/path/to/foo/bar") diff --git a/tests/pytests/unit/states/test_vault.py b/tests/pytests/unit/states/test_vault.py new file mode 100644 index 000000000000..f392c0f5022f --- /dev/null +++ b/tests/pytests/unit/states/test_vault.py @@ -0,0 +1,112 @@ +import pytest + +import salt.modules.vault as vaultexe +import salt.states.vault as vault +from tests.support.mock import Mock, patch + + +@pytest.fixture +def configure_loader_modules(): + return {vault: {}} + + +@pytest.fixture +def policy_fetch(): + fetch = Mock(return_value="test-rules", spec=vaultexe.policy_fetch) + with patch.dict(vault.__salt__, {"vault.policy_fetch": fetch}): + yield fetch + + +@pytest.fixture +def policy_write(): + write = Mock(return_value=True, spec=vaultexe.policy_write) + with patch.dict(vault.__salt__, {"vault.policy_write": write}): + yield write + + +@pytest.mark.usefixtures("policy_fetch") +@pytest.mark.parametrize("test", [False, True]) +def test_policy_present_no_changes(test): + """ + Test that when a policy is present as requested, no changes + are reported for success, regardless of opts["test"]. + """ + with patch.dict(vault.__opts__, {"test": test}): + res = vault.policy_present("test-policy", "test-rules") + assert res["result"] + assert not res["changes"] + + +@pytest.mark.parametrize("test", [False, True]) +def test_policy_present_create(policy_fetch, policy_write, test): + """ + Test that when a policy does not exist, it will be created. + The function should respect opts["test"]. + """ + policy_fetch.return_value = None + with patch.dict(vault.__opts__, {"test": test}): + res = vault.policy_present("test-policy", "test-rules") + assert res["changes"] + if test: + assert res["result"] is None + assert "would be created" in res["comment"] + policy_write.assert_not_called() + else: + assert res["result"] + assert "has been created" in res["comment"] + policy_write.assert_called_once_with("test-policy", "test-rules") + + +@pytest.mark.usefixtures("policy_fetch") +@pytest.mark.parametrize("test", [False, True]) +def test_policy_present_changes(policy_write, test): + """ + Test that when a policy exists, but the rules need to be updated, + it is detected and respects the value of opts["test"]. + """ + with patch.dict(vault.__opts__, {"test": test}): + res = vault.policy_present("test-policy", "new-test-rules") + assert res["changes"] + if test: + assert res["result"] is None + assert "would be updated" in res["comment"] + policy_write.assert_not_called() + else: + assert res["result"] + assert "has been updated" in res["comment"] + policy_write.assert_called_once_with("test-policy", "new-test-rules") + + +@pytest.mark.parametrize("test", [False, True]) +def test_policy_absent_no_changes(policy_fetch, test): + """ + Test that when a policy is absent as requested, no changes + are reported for success, regardless of opts["test"]. + """ + policy_fetch.return_value = None + with patch.dict(vault.__opts__, {"test": test}): + res = vault.policy_absent("test-policy") + assert res["result"] + assert not res["changes"] + + +@pytest.mark.usefixtures("policy_fetch") +@pytest.mark.parametrize("test", [False, True]) +def test_policy_absent_changes(test): + """ + Test that when a policy exists, it will be deleted. + The function should respect opts["test"]. + """ + delete = Mock(spec=vaultexe.policy_delete) + with patch.dict(vault.__salt__, {"vault.policy_delete": delete}): + with patch.dict(vault.__opts__, {"test": test}): + res = vault.policy_absent("test-policy") + assert res["changes"] + if test: + assert res["result"] is None + assert "would be deleted" in res["comment"] + delete.assert_not_called() + else: + assert res["result"] + assert "has been deleted" in res["comment"] + delete.assert_called_once_with("test-policy") diff --git a/tests/pytests/unit/utils/test_vault.py b/tests/pytests/unit/utils/test_vault.py index 9e3dfe59f166..500e023cdf63 100644 --- a/tests/pytests/unit/utils/test_vault.py +++ b/tests/pytests/unit/utils/test_vault.py @@ -1,572 +1,4120 @@ -import json -import logging -import threading -from copy import copy +import time +from copy import deepcopy + +# this needs to be from! see test_iso_to_timestamp_polyfill +from datetime import datetime import pytest +import requests + +import salt.exceptions +import salt.utils.vault as vault +from tests.support.mock import ANY, MagicMock, Mock, call, patch + + +@pytest.fixture +def server_config(): + return { + "url": "http://127.0.0.1:8200", + "namespace": None, + "verify": None, + } + + +@pytest.fixture(params=["token", "approle"]) +def test_config(server_config, request): + defaults = { + "auth": { + "approle_mount": "approle", + "approle_name": "salt-master", + "method": "token", + "secret_id": None, + "token_lifecycle": { + "minimum_ttl": 10, + "renew_increment": None, + }, + }, + "cache": { + "backend": "session", + "config": 3600, + "secret": "ttl", + }, + "issue": { + "allow_minion_override_params": False, + "type": "token", + "approle": { + "mount": "salt-minions", + "params": { + "bind_secret_id": True, + "secret_id_num_uses": 1, + "secret_id_ttl": 60, + "token_explicit_max_ttl": 60, + "token_num_uses": 10, + }, + }, + "token": { + "role_name": None, + "params": { + "explicit_max_ttl": None, + "num_uses": 1, + }, + }, + "wrap": "30s", + }, + "issue_params": {}, + "metadata": { + "entity": { + "minion-id": "{minion}", + }, + "token": { + "saltstack-jid": "{jid}", + "saltstack-minion": "{minion}", + "saltstack-user": "{user}", + }, + }, + "policies": { + "assign": [ + "saltstack/minions", + "saltstack/{minion}", + ], + "cache_time": 60, + "refresh_pillar": None, + }, + "server": server_config, + } + + if request.param == "token": + defaults["auth"]["token"] = "test-token" + return defaults + + if request.param == "wrapped_token": + defaults["auth"]["method"] = "wrapped_token" + defaults["auth"]["token"] = "test-wrapped-token" + return defaults + + if request.param == "approle": + defaults["auth"]["method"] = "approle" + defaults["auth"]["role_id"] = "test-role-id" + defaults["auth"]["secret_id"] = "test-secret-id" + return defaults + + if request.param == "approle_no_secretid": + defaults["auth"]["method"] = "approle" + defaults["auth"]["role_id"] = "test-role-id" + return defaults + + +@pytest.fixture(params=["token", "approle"]) +def test_remote_config(server_config, request): + defaults = { + "auth": { + "approle_mount": "approle", + "approle_name": "salt-master", + "method": "token", + "secret_id": None, + "token_lifecycle": { + "minimum_ttl": 10, + "renew_increment": None, + }, + }, + "cache": { + "backend": "session", + "config": 3600, + "kv_metadata": "connection", + "secret": "ttl", + }, + "server": server_config, + } + + if request.param == "token": + defaults["auth"]["token"] = "test-token" + return defaults + + if request.param == "wrapped_token": + defaults["auth"]["method"] = "wrapped_token" + defaults["auth"]["token"] = "test-wrapped-token" + return defaults + + if request.param == "token_changed": + defaults["auth"]["token"] = "test-token-changed" + return defaults + + if request.param == "approle": + defaults["auth"]["method"] = "approle" + defaults["auth"]["role_id"] = "test-role-id" + # actual remote config would not contain secret_id, but + # this is used for testing both from local and from remote + defaults["auth"]["secret_id"] = "test-secret-id" + return defaults + + if request.param == "approle_no_secretid": + defaults["auth"]["method"] = "approle" + defaults["auth"]["role_id"] = "test-role-id" + return defaults + + # this happens when wrapped role_ids are merged by _query_master + if request.param == "approle_wrapped_roleid": + defaults["auth"]["method"] = "approle" + defaults["auth"]["role_id"] = {"role_id": "test-role-id"} + # actual remote config does not contain secret_id + defaults["auth"]["secret_id"] = True + return defaults + + +@pytest.fixture +def role_id_response(): + return { + "request_id": "c85838c5-ecfe-6d07-4b28-1935ac2e304a", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": {"role_id": "58b4c650-3d13-5932-a2fa-03865c8e85d7"}, + "warnings": None, + } + + +@pytest.fixture +def secret_id_response(): + return { + "request_id": "c85838c5-ecfe-6d07-4b28-1935ac2e304a", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": { + "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780", + "secret_id": "841771dc-11c9-bbc7-bcac-6a3945a69cd9", + "secret_id_ttl": 1337, + }, + "warnings": None, + } + + +@pytest.fixture +def secret_id_meta_response(): + return { + "request_id": "7c97d03d-2166-6217-8da1-19604febae5c", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": { + "cidr_list": [], + "creation_time": "2022-08-22T17:37:07.753989459+00:00", + "expiration_time": "2339-07-13T13:23:46.753989459+00:00", + "last_updated_time": "2022-08-22T17:37:07.753989459+00:00", + "metadata": {}, + "secret_id_accessor": "b1c88755-f2f5-2fd2-4bcc-cade95f6ba96", + "secret_id_num_uses": 0, + "secret_id_ttl": 9999999999, + "token_bound_cidrs": [], + }, + "warnings": None, + } + + +@pytest.fixture +def wrapped_role_id_response(): + return { + "request_id": "", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": None, + "warnings": None, + "wrap_info": { + "token": "test-wrapping-token", + "accessor": "test-wrapping-token-accessor", + "ttl": 180, + "creation_time": "2022-09-10T13:37:12.123456789+00:00", + "creation_path": "auth/approle/role/test-minion/role-id", + "wrapped_accessor": "", + }, + } + + +@pytest.fixture +def wrapped_secret_id_response(): + return { + "request_id": "", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": None, + "warnings": None, + "wrap_info": { + "token": "test-wrapping-token", + "accessor": "test-wrapping-token-accessor", + "ttl": 180, + "creation_time": "2022-09-10T13:37:12.123456789+00:00", + "creation_path": "auth/approle/role/test-minion/secret-id", + "wrapped_accessor": "", + }, + } + + +@pytest.fixture +def wrapped_role_id_lookup_response(): + return { + "request_id": "31e7020e-3ce3-2c63-e453-d5da8a9890f1", + "lease_id": "", + "renewable": False, + "lease_duration": 0, + "data": { + "creation_path": "auth/approle/role/test-minion/role-id", + "creation_time": "2022-09-10T13:37:12.123456789+00:00", + "creation_ttl": 180, + }, + "wrap_info": None, + "warnings": None, + "auth": None, + } + + +@pytest.fixture +def wrapped_token_auth_response(): + return { + "request_id": "", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": None, + "warnings": None, + "wrap_info": { + "token": "test-wrapping-token", + "accessor": "test-wrapping-token-accessor", + "ttl": 180, + "creation_time": "2022-09-10T13:37:12.123456789+00:00", + "creation_path": "auth/token/create/salt-minion", + "wrapped_accessor": "", + }, + } + + +@pytest.fixture +def token_lookup_self_response(): + return { + "request_id": "0e8c388e-2cb6-bcb2-83b7-625127d568bb", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "data": { + "accessor": "test-token-accessor", + "creation_time": 1661188581, + "creation_ttl": 9999999999, + "display_name": "", + "entity_id": "", + "expire_time": "2339-07-13T11:03:00.473212541+00:00", + "explicit_max_ttl": 0, + "id": "test-token", + "issue_time": "2022-08-22T17:16:21.473219641+00:00", + "meta": {}, + "num_uses": 0, + "orphan": True, + "path": "", + "policies": ["default"], + "renewable": True, + "ttl": 9999999999, + "type": "service", + }, + "warnings": None, + } + + +@pytest.fixture +def token_renew_self_response(): + return { + "auth": { + "client_token": "test-token", + "policies": ["default", "renewed"], + "metadata": {}, + }, + "lease_duration": 3600, + "renewable": True, + } + + +@pytest.fixture +def token_renew_other_response(): + return { + "auth": { + "client_token": "other-test-token", + "policies": ["default", "renewed"], + "metadata": {}, + }, + "lease_duration": 3600, + "renewable": True, + } + + +@pytest.fixture +def token_renew_accessor_response(): + return { + "auth": { + "client_token": "", + "policies": ["default", "renewed"], + "metadata": {}, + }, + "lease_duration": 3600, + "renewable": True, + } + + +@pytest.fixture +def token_auth(): + return { + "request_id": "0e8c388e-2cb6-bcb2-83b7-625127d568bb", + "lease_id": "", + "lease_duration": 0, + "renewable": False, + "auth": { + "client_token": "test-token", + "renewable": True, + "lease_duration": 9999999999, + "num_uses": 0, + "creation_time": 1661188581, + }, + } + + +@pytest.fixture +def kvv1_meta_response(): + return { + "request_id": "b82f2df7-a9b6-920c-0ed2-a3463b996f9e", + "lease_id": "", + "renewable": False, + "lease_duration": 0, + "data": { + "accessor": "kv_f8731f1b", + "config": { + "default_lease_ttl": 0, + "force_no_cache": False, + "max_lease_ttl": 0, + }, + "description": "key/value secret storage", + "external_entropy_access": False, + "local": False, + "options": None, + "path": "secret/", + "seal_wrap": False, + "type": "kv", + "uuid": "1d9431ac-060a-9b63-4572-3ca7ffd78347", + }, + "wrap_info": None, + "warnings": None, + "auth": None, + } + + +@pytest.fixture +def kvv2_meta_response(): + return { + "request_id": "b82f2df7-a9b6-920c-0ed2-a3463b996f9e", + "lease_id": "", + "renewable": False, + "lease_duration": 0, + "data": { + "accessor": "kv_f8731f1b", + "config": { + "default_lease_ttl": 0, + "force_no_cache": False, + "max_lease_ttl": 0, + }, + "description": "key/value secret storage", + "external_entropy_access": False, + "local": False, + "options": { + "version": "2", + }, + "path": "secret/", + "seal_wrap": False, + "type": "kv", + "uuid": "1d9431ac-060a-9b63-4572-3ca7ffd78347", + }, + "wrap_info": None, + "warnings": None, + "auth": None, + } + + +@pytest.fixture +def kvv1_info(): + return { + "v2": False, + "data": "secret/some/path", + "metadata": "secret/some/path", + "delete": "secret/some/path", + "type": "kv", + } + + +@pytest.fixture +def kvv2_info(): + return { + "v2": True, + "data": "secret/data/some/path", + "metadata": "secret/metadata/some/path", + "delete": "secret/data/some/path", + "delete_versions": "secret/delete/some/path", + "destroy": "secret/destroy/some/path", + "type": "kv", + } + + +@pytest.fixture +def no_kv_info(): + return { + "v2": False, + "data": "secret/some/path", + "metadata": "secret/some/path", + "delete": "secret/some/path", + "type": None, + } + + +@pytest.fixture +def kvv1_response(): + return { + "request_id": "35df4df1-c3d8-b270-0682-ddb0160c7450", + "lease_id": "", + "renewable": False, + "lease_duration": 0, + "data": { + "foo": "bar", + }, + "wrap_info": None, + "warnings": None, + "auth": None, + } + + +@pytest.fixture +def kvv2_response(): + return { + "request_id": "35df4df1-c3d8-b270-0682-ddb0160c7450", + "lease_id": "", + "renewable": False, + "lease_duration": 0, + "data": { + "data": {"foo": "bar"}, + "metadata": { + "created_time": "2020-05-02T07:26:12.180848003Z", + "deletion_time": "", + "destroyed": False, + "version": 1, + }, + }, + "wrap_info": None, + "warnings": None, + "auth": None, + } + + +@pytest.fixture +def kv_list_response(): + return { + "request_id": "35df4df1-c3d8-b270-0682-ddb0160c7450", + "lease_id": "", + "renewable": False, + "lease_duration": 0, + "data": { + "keys": ["foo"], + }, + "wrap_info": None, + "warnings": None, + "auth": None, + } + + +@pytest.fixture +def req_unwrapping(wrapped_role_id_lookup_response, role_id_response, req): + req.side_effect = ( + lambda method, url, **kwargs: _mock_json_response( + wrapped_role_id_lookup_response + ) + if url.endswith("sys/wrapping/lookup") + else _mock_json_response(role_id_response) + ) + yield req + + +@pytest.fixture(params=[None, "valid_token"]) +def client(server_config, request): + if request.param is None: + return vault.VaultClient(**server_config) + if request.param == "valid_token": + token = request.getfixturevalue(request.param) + auth = Mock(spec=vault.VaultTokenAuth) + auth.is_renewable.return_value = True + auth.is_valid.return_value = True + auth.get_token.return_value = token + return vault.AuthenticatedVaultClient(auth, **server_config) + if request.param == "invalid_token": + token = request.getfixturevalue(request.param) + auth = Mock(spec=vault.VaultTokenAuth) + auth.is_renewable.return_value = True + auth.is_valid.return_value = False + auth.get_token.side_effect = vault.VaultAuthExpired + return vault.AuthenticatedVaultClient(auth, **server_config) + + +@pytest.fixture +def valid_token(token_auth): + token = MagicMock(spec=vault.VaultToken, **token_auth["auth"]) + token.is_valid.return_value = True + token.is_renewable.return_value = True + token.payload.return_value = {"token": token_auth["auth"]["client_token"]} + token.__str__.return_value = token_auth["auth"]["client_token"] + token.to_dict.return_value = token_auth["auth"] + return token + + +@pytest.fixture +def invalid_token(valid_token): + valid_token.is_valid.return_value = False + valid_token.is_renewable.return_value = False + return valid_token + + +@pytest.fixture +def metadata_nocache(): + cache = Mock(spec=vault.VaultCache) + cache.get.return_value = None + return cache + + +@pytest.fixture(params=["v1", "v2"]) +def kv_meta(request, metadata_nocache): + client = Mock(spec=vault.AuthenticatedVaultClient) + if request.param == "invalid": + res = {"wrap_info": {}} + else: + res = request.getfixturevalue(f"kv{request.param}_meta_response") + client.get.return_value = res + return vault.VaultKV(client, metadata_nocache) + + +@pytest.fixture(params=["v1", "v2"]) +def kv_meta_cached(request): + cache = Mock(spec=vault.VaultCache) + client = Mock(spec=vault.AuthenticatedVaultClient) + kv_meta_response = request.getfixturevalue(f"kv{request.param}_meta_response") + client.get.return_value = kv_meta_response + cache.get.return_value = {"secret/some/path": kv_meta_response["data"]} + return vault.VaultKV(client, cache) + + +@pytest.fixture +def kvv1(kvv1_info, kvv1_response, metadata_nocache, kv_list_response): + client = Mock(spec=vault.AuthenticatedVaultClient) + client.get.return_value = kvv1_response + client.post.return_value = True + client.patch.return_value = True + client.list.return_value = kv_list_response + client.delete.return_value = True + with patch("salt.utils.vault.VaultKV.is_v2", Mock(return_value=kvv1_info)): + yield vault.VaultKV(client, metadata_nocache) + + +@pytest.fixture +def kvv2(kvv2_info, kvv2_response, metadata_nocache, kv_list_response): + client = Mock(spec=vault.AuthenticatedVaultClient) + client.get.return_value = kvv2_response + client.post.return_value = True + client.patch.return_value = True + client.list.return_value = kv_list_response + client.delete.return_value = True + with patch("salt.utils.vault.VaultKV.is_v2", Mock(return_value=kvv2_info)): + yield vault.VaultKV(client, metadata_nocache) + + +@pytest.fixture +def req(): + with patch("requests.request", autospec=True) as req: + yield req + + +@pytest.fixture +def req_failed(req, request): + status_code = getattr(request, "param", 502) + req.return_value = _mock_json_response({"errors": ["foo"]}, status_code=status_code) + yield req + + +@pytest.fixture +def req_success(req): + req.return_value = _mock_json_response(None, status_code=204) + yield req + + +@pytest.fixture(params=[200]) +def req_any(req, request): + data = {} + if request.param != 204: + data["data"] = {"foo": "bar"} + if request.param >= 400: + data["errors"] = ["foo"] + req.return_value = _mock_json_response(data, status_code=request.param) + yield req + + +def _mock_json_response(data, status_code=200, reason=""): + """ + Mock helper for http response + """ + response = Mock(spec=requests.models.Response) + response.json.return_value = data + response.status_code = status_code + response.reason = reason + if status_code < 400: + response.ok = True + else: + response.ok = False + response.raise_for_status.side_effect = requests.exceptions.HTTPError + return response + + +@pytest.fixture( + params=["MASTER", "MASTER_IMPERSONATING", "MINION_LOCAL", "MINION_REMOTE"] +) +def salt_runtype(request): + runtype = Mock(spec=vault._get_salt_run_type) + runtype.return_value = getattr(vault, f"SALT_RUNTYPE_{request.param}") + with patch("salt.utils.vault._get_salt_run_type", runtype): + yield + + +@pytest.fixture( + params=[ + "master", + "master_impersonating", + "minion_local_1", + "minion_local_2", + "minion_local_3", + "minion_remote", + ] +) +def opts_runtype(request): + return { + "master": { + "__role": "master", + "vault": {}, + }, + "master_peer_run": { + "__role": "master", + "grains": { + "id": "test-minion", + }, + "vault": {}, + }, + "master_impersonating": { + "__role": "master", + "minion_id": "test-minion", + "grains": { + "id": "test-minion", + }, + "vault": {}, + }, + "minion_local_1": { + "grains": {"id": "test-minion"}, + "local": True, + }, + "minion_local_2": { + "file_client": "local", + "grains": {"id": "test-minion"}, + }, + "minion_local_3": { + "grains": {"id": "test-minion"}, + "master_type": "disable", + }, + "minion_remote": { + "grains": {"id": "test-minion"}, + }, + }[request.param] + + +############################################ +# Wrapper functions tests +############################################ + + +@pytest.mark.parametrize( + "wrapper,param,result", + [ + ("read_kv", None, {"foo": "bar"}), + ("write_kv", {"foo": "bar"}, True), + ("patch_kv", {"foo": "bar"}, True), + ("delete_kv", None, True), + ("destroy_kv", [0], True), + ("list_kv", None, ["foo"]), + ], +) +@pytest.mark.parametrize("exception", ["VaultPermissionDeniedError"]) +def test_kv_wrapper_handles_auth_exceptions(wrapper, param, result, exception): + """ + Test that *_kv wrappers retry with a new client if the authentication might + be outdated. + """ + func = getattr(vault, wrapper) + exc = getattr(vault, exception) + args = ["secret/some/path"] + if param: + args.append(param) + args += [{}, {}] + with patch("salt.utils.vault._get_kv", autospec=True) as getkv: + with patch("salt.utils.vault.clear_cache", autospec=True) as cache: + kv = Mock(spec=vault.VaultKV) + getattr(kv, wrapper.rstrip("_kv")).side_effect = (exc, result) + getkv.return_value = kv + res = func(*args) + assert res == result + cache.assert_called_once() + + +############################################ +# Factory tests +############################################ + + +class TestGetAuthdClient: + @pytest.fixture + def client_valid(self): + client = Mock(spec=vault.AuthenticatedVaultClient) + client.token_valid.return_value = True + return client + + @pytest.fixture + def client_invalid(self): + client = Mock(spec=vault.AuthenticatedVaultClient) + client.token_valid.return_value = False + return client + + @pytest.fixture + def client_renewable(self): + client = Mock(spec=vault.AuthenticatedVaultClient) + client.auth.get_token.return_value.is_renewable.return_value = True + client.auth.get_token.return_value.is_valid.return_value = False + client.token_valid.return_value = True + return client + + @pytest.fixture + def client_unrenewable(self): + client = Mock(spec=vault.AuthenticatedVaultClient) + client.auth.get_token.return_value.is_renewable.return_value = False + client.auth.get_token.return_value.is_valid.return_value = False + client.token_valid.side_effect = (False, True) + return client + + @pytest.fixture + def client_renewable_max_ttl(self): + client = Mock(spec=vault.AuthenticatedVaultClient) + client.auth.get_token.return_value.is_renewable.return_value = True + client.auth.get_token.return_value.is_valid.return_value = False + client.token_valid.side_effect = (False, True) + return client + + @pytest.fixture( + params=[ + {"auth": {"token_lifecycle": {"minimum_ttl": 10, "renew_increment": False}}} + ] + ) + def build_succeeds(self, client_valid, request): + with patch("salt.utils.vault._build_authd_client", autospec=True) as build: + build.return_value = (client_valid, request.param) + yield build + + @pytest.fixture( + params=["VaultAuthExpired", "VaultConfigExpired", "VaultPermissionDeniedError"] + ) + def build_fails(self, request): + exception = request.param + with patch("salt.utils.vault._build_authd_client", autospec=True) as build: + build.side_effect = getattr(vault, exception) + yield build + + @pytest.fixture( + params=["VaultAuthExpired", "VaultConfigExpired", "VaultPermissionDeniedError"] + ) + def build_exception_first(self, client_valid, request): + exception = request.param + with patch("salt.utils.vault._build_authd_client", autospec=True) as build: + build.side_effect = ( + getattr(vault, exception), + ( + client_valid, + { + "auth": { + "token_lifecycle": { + "minimum_ttl": 10, + "renew_increment": False, + } + } + }, + ), + ) + yield build + + @pytest.fixture( + params=[ + {"auth": {"token_lifecycle": {"minimum_ttl": 10, "renew_increment": False}}} + ] + ) + def build_invalid_first(self, client_valid, client_invalid, request): + with patch("salt.utils.vault._build_authd_client", autospec=True) as build: + build.side_effect = ( + (client_invalid, request.param), + (client_valid, request.param), + ) + yield build + + @pytest.fixture( + params=[ + {"auth": {"token_lifecycle": {"minimum_ttl": 10, "renew_increment": 60}}} + ] + ) + def build_renewable(self, client_renewable, request): + with patch("salt.utils.vault._build_authd_client", autospec=True) as build: + build.return_value = (client_renewable, request.param) + yield build + + @pytest.fixture( + params=[ + {"auth": {"token_lifecycle": {"minimum_ttl": 10, "renew_increment": 60}}} + ] + ) + def build_unrenewable(self, client_unrenewable, request): + with patch("salt.utils.vault._build_authd_client", autospec=True) as build: + build.return_value = (client_unrenewable, request.param) + yield build + + @pytest.fixture( + params=[ + {"auth": {"token_lifecycle": {"minimum_ttl": 10, "renew_increment": 60}}} + ] + ) + def build_renewable_max_ttl(self, client_renewable_max_ttl, request): + with patch("salt.utils.vault._build_authd_client", autospec=True) as build: + build.return_value = (client_renewable_max_ttl, request.param) + yield build + + @pytest.fixture(autouse=True) + def clear_cache(self): + with patch("salt.utils.vault.clear_cache", autospec=True) as clear: + clear.return_value = True + yield clear + + @pytest.mark.parametrize("get_config", [False, True]) + def test_get_authd_client_succeeds(self, build_succeeds, clear_cache, get_config): + """ + Ensure a valid client is returned directly without clearing cache. + """ + client = vault.get_authd_client({}, {}, get_config=get_config) + if get_config: + client, config = client + client.token_valid.assert_called_with(10, remote=False) + assert client.token_valid() + clear_cache.assert_not_called() + assert build_succeeds.call_count == 1 + if get_config: + assert config == { + "auth": { + "token_lifecycle": {"minimum_ttl": 10, "renew_increment": False} + } + } + + @pytest.mark.parametrize("get_config", [False, True]) + def test_get_authd_client_invalid( + self, build_invalid_first, clear_cache, get_config + ): + """ + Ensure invalid clients are not returned but rebuilt after + clearing cache. + """ + client = vault.get_authd_client({}, {}, get_config=get_config) + if get_config: + client, config = client + client.token_valid.assert_called_with(10, remote=False) + assert client.token_valid() + clear_cache.assert_called_once_with({}, {}) + assert build_invalid_first.call_count == 2 + if get_config: + assert config == { + "auth": { + "token_lifecycle": {"minimum_ttl": 10, "renew_increment": False} + } + } + + @pytest.mark.parametrize("get_config", [False, True]) + def test_get_authd_client_exception( + self, build_exception_first, clear_cache, get_config + ): + """ + Ensure relevant exceptions are caught, cache is cleared and + new credentials are requested. + """ + client = vault.get_authd_client({}, {}, get_config=get_config) + if get_config: + client, config = client + client.token_valid.assert_called_with(10, remote=False) + assert client.token_valid() + clear_cache.assert_called_once_with({}, {}) + assert build_exception_first.call_count == 2 + if get_config: + assert config == { + "auth": { + "token_lifecycle": {"minimum_ttl": 10, "renew_increment": False} + } + } + + def test_get_authd_client_fails(self, build_fails, clear_cache): + """ + Ensure exceptions are leaked after one retry. + """ + with pytest.raises(build_fails.side_effect): + vault.get_authd_client({}, {}) + clear_cache.assert_called_once() + + @pytest.mark.usefixtures("build_renewable") + def test_get_authd_client_renews_token(self, clear_cache): + """ + Ensure renewable tokens are renewed when necessary. + """ + client = vault.get_authd_client({}, {}, get_config=False) + client.token_renew.assert_called_once_with(increment=60) + clear_cache.assert_not_called() + + @pytest.mark.usefixtures("build_unrenewable") + def test_get_authd_client_unrenewable_new_token(self, clear_cache): + """ + Ensure minimum_ttl is respected such that a new token is requested, + even though the current one would still be valid for some time. + """ + client = vault.get_authd_client({}, {}, get_config=False) + client.token_renew.assert_not_called() + clear_cache.assert_called_once() + + @pytest.mark.usefixtures("build_renewable_max_ttl") + def test_get_authd_client_renewable_token_max_ttl_insufficient( + self, build_renewable_max_ttl, clear_cache + ): + """ + Ensure minimum_ttl is respected when a token can be renewed, but the + new ttl does not satisfy it. + """ + client = vault.get_authd_client({}, {}, get_config=False) + client.token_renew.assert_called_once_with(increment=60) + clear_cache.assert_called_once() + + +class TestBuildAuthdClient: + @pytest.fixture(autouse=True) + def cbank(self): + with patch("salt.utils.vault._get_cache_bank", autospec=True) as cbank: + cbank.return_value = "vault" + yield cbank + + @pytest.fixture(autouse=True) + def conn_config(self): + with patch( + "salt.utils.vault._get_connection_config", autospec=True + ) as conn_config: + yield conn_config + + @pytest.fixture(autouse=True) + def fetch_secret_id(self, secret_id_response): + with patch("salt.utils.vault._fetch_secret_id") as fetch_secret_id: + fetch_secret_id.return_value = vault.VaultSecretId( + **secret_id_response["data"] + ) + yield fetch_secret_id + + @pytest.fixture(autouse=True) + def fetch_token(self, token_auth): + with patch("salt.utils.vault._fetch_token") as fetch_token: + fetch_token.return_value = vault.VaultToken(**token_auth["auth"]) + yield fetch_token + + @pytest.fixture(params=["token", "secret_id", "both", "none"]) + def cached(self, token_auth, secret_id_response, request): + cached_what = request.param + + def _cache(context, cbank, ckey, *args, **kwargs): + token = Mock(spec=vault.VaultAuthCache) + token.get.return_value = None + approle = Mock(spec=vault.VaultAuthCache) + approle.get.return_value = None + if cached_what in ["token", "both"]: + token.get.return_value = vault.VaultToken(**token_auth["auth"]) + if cached_what in ["secret_id", "both"]: + approle.get.return_value = vault.VaultSecretId( + **secret_id_response["data"] + ) + return token if ckey == vault.TOKEN_CKEY else approle + + cache = MagicMock(spec=vault.VaultAuthCache) + cache.side_effect = _cache + with patch("salt.utils.vault.VaultAuthCache", cache): + yield cache + + @pytest.mark.parametrize( + "test_remote_config", + ["token", "approle", "approle_no_secretid", "approle_wrapped_roleid"], + indirect=True, + ) + def test_build_authd_client( + self, test_remote_config, conn_config, fetch_secret_id, cached + ): + """ + Ensure credentials are only requested if necessary. + """ + conn_config.return_value = (test_remote_config, None) + client, config = vault._build_authd_client({}, {}) + assert client.token_valid(remote=False) + if test_remote_config["auth"]["method"] == "approle": + if ( + not test_remote_config["auth"]["secret_id"] + or cached(None, None, vault.TOKEN_CKEY).get() + or cached(None, None, "secret_id").get() + ): + # In case a secret_id is not necessary or only a cached token is available, + # make sure we do not request a new secret ID from the master + fetch_secret_id.assert_not_called() + else: + fetch_secret_id.assert_called_once() + + +class TestGetConnectionConfig: + @pytest.fixture + def cached(self, test_remote_config): + cache = Mock(spec=vault.VaultConfigCache) + # cached config does not include tokens + test_remote_config["auth"].pop("token", None) + cache.get.return_value = test_remote_config + with patch("salt.utils.vault._get_config_cache", autospec=True) as factory: + factory.return_value = cache + yield cache + + @pytest.fixture + def uncached(self): + cache = Mock(spec=vault.VaultConfigCache) + cache.get.return_value = None + with patch("salt.utils.vault._get_config_cache", autospec=True) as factory: + factory.return_value = cache + yield cache + + @pytest.fixture + def local(self): + with patch("salt.utils.vault._use_local_config", autospec=True) as local: + yield local + + @pytest.fixture + def remote(self, test_remote_config): + with patch("salt.utils.vault._query_master") as query: + query.return_value = test_remote_config + yield query + + @pytest.fixture + def remote_unused(self): + with patch("salt.utils.vault._query_master") as query: + yield query + + @pytest.mark.parametrize( + "salt_runtype,force_local", + [ + ("MASTER", False), + ("MASTER_IMPERSONATING", True), + ("MINION_LOCAL", False), + ], + indirect=["salt_runtype"], + ) + def test_get_connection_config_local(self, salt_runtype, force_local, local): + """ + Ensure the local configuration is used when + a) running on master + b) running on master impersonating a minion when called from runner + c) running on minion in local mode + """ + vault._get_connection_config("vault", {}, {}, force_local=force_local) + local.assert_called_once() + + def test_get_connection_config_cached(self, cached, remote_unused): + """ + Ensure cache is respected + """ + res, embedded_token = vault._get_connection_config("vault", {}, {}) + assert res == cached.get() + assert embedded_token is None + cached.store.assert_not_called() + remote_unused.assert_not_called() + + def test_get_connection_config_uncached(self, uncached, remote): + """ + Ensure uncached configuration is treated as expected, especially + that the embedded token is removed and returned separately. + """ + res, embedded_token = vault._get_connection_config("vault", {}, {}) + uncached.store.assert_called_once() + remote.assert_called_once() + data = remote() + token = data["auth"].pop("token", None) + assert res == data + assert embedded_token == token + + @pytest.mark.usefixtures("uncached", "local") + @pytest.mark.parametrize("test_remote_config", ["token"], indirect=True) + @pytest.mark.parametrize( + "conf_location,called", + [("local", False), ("master", True), (None, False), ("doesnotexist", False)], + ) + def test_get_connection_config_location(self, conf_location, called, remote): + """ + test the _get_connection_config function when + config_location is set in opts + """ + opts = {"vault": {"config_location": conf_location}, "file_client": "local"} + if conf_location == "doesnotexist": + with pytest.raises( + salt.exceptions.InvalidConfigError, + match=".*config_location must be either local or master.*", + ): + vault._get_connection_config("vault", opts, {}) + else: + vault._get_connection_config("vault", opts, {}) + if called: + remote.assert_called() + else: + remote.assert_not_called() + + +class TestFetchSecretId: + @pytest.fixture + def cached(self, secret_id_response): + cache = Mock(spec=vault.VaultAuthCache) + cache.get.return_value = vault.VaultSecretId(**secret_id_response["data"]) + return cache + + @pytest.fixture + def uncached(self): + cache = Mock(spec=vault.VaultConfigCache) + cache.get.return_value = None + return cache + + @pytest.fixture + def remote(self, secret_id_response, server_config): + with patch("salt.utils.vault._query_master") as query: + query.return_value = { + "data": secret_id_response["data"], + "server": server_config, + } + yield query + + @pytest.fixture + def remote_unused(self): + with patch("salt.utils.vault._query_master") as query: + yield query + + @pytest.fixture + def local(self): + with patch("salt.utils.vault._use_local_config", autospec=True) as local: + yield local + + @pytest.fixture(params=["plain", "wrapped", "dict"]) + def secret_id(self, secret_id_response, wrapped_secret_id_response, request): + return { + "plain": "test-secret-id", + "wrapped": {"wrap_info": wrapped_secret_id_response["wrap_info"]}, + "dict": secret_id_response["data"], + }[request.param] + + @pytest.mark.parametrize("test_remote_config", ["approle"], indirect=True) + @pytest.mark.parametrize( + "salt_runtype,force_local", + [ + ("MASTER", False), + ("MASTER_IMPERSONATING", True), + ("MINION_LOCAL", False), + ], + indirect=["salt_runtype"], + ) + def test_fetch_secret_id_local( + self, + salt_runtype, + force_local, + uncached, + test_remote_config, + secret_id, + secret_id_response, + ): + """ + Ensure the local configuration is used when + a) running on master + b) running on master impersonating a minion when called from runner + c) running on minion in local mode + Also ensure serialized or wrapped secret ids are resolved. + """ + test_remote_config["auth"]["secret_id"] = secret_id + with patch("salt.utils.vault.VaultClient.unwrap") as unwrap: + unwrap.return_value = secret_id_response + res = vault._fetch_secret_id( + test_remote_config, {}, uncached, force_local=force_local + ) + if not isinstance(secret_id, str): + if "wrap_info" not in secret_id: + unwrap.assert_not_called() + else: + secret_id = secret_id_response["data"] + assert res == vault.VaultSecretId(**secret_id) + else: + assert res == vault.VaultSecretId( + secret_id=secret_id, + secret_id_ttl=test_remote_config["cache"]["config"], + secret_id_num_uses=0, + ) + uncached.get.assert_not_called() + uncached.store.assert_not_called() + + @pytest.mark.parametrize("test_remote_config", ["approle"], indirect=True) + def test_fetch_secret_id_cached(self, test_remote_config, cached, remote_unused): + """ + Ensure cache is respected + """ + res = vault._fetch_secret_id(test_remote_config, {}, cached) + assert res == cached.get() + cached.store.assert_not_called() + remote_unused.assert_not_called() + + @pytest.mark.parametrize("test_remote_config", ["approle"], indirect=True) + def test_fetch_secret_id_uncached(self, test_remote_config, uncached, remote): + """ + Ensure requested credentials are cached and returned as data objects + """ + res = vault._fetch_secret_id(test_remote_config, {}, uncached) + uncached.store.assert_called_once() + remote.assert_called_once() + data = remote() + assert res == vault.VaultSecretId(**data["data"]) + + @pytest.mark.parametrize("test_remote_config", ["approle"], indirect=True) + def test_fetch_secret_id_uncached_single_use( + self, test_remote_config, uncached, remote, secret_id_response, server_config + ): + """ + Check that single-use secret ids are not cached + """ + secret_id_response["data"]["secret_id_num_uses"] = 1 + remote.return_value = { + "data": secret_id_response["data"], + "server": server_config, + } + res = vault._fetch_secret_id(test_remote_config, {}, uncached) + uncached.store.assert_not_called() + remote.assert_called_once() + data = remote() + assert res == vault.VaultSecretId(**data["data"]) + + @pytest.mark.usefixtures("local") + @pytest.mark.parametrize("test_remote_config", ["approle"], indirect=True) + @pytest.mark.parametrize( + "conf_location,called", + [("local", False), ("master", True), (None, False), ("doesnotexist", False)], + ) + def test_fetch_secret_id_config_location( + self, conf_location, called, remote, uncached, test_remote_config + ): + """ + Ensure config_location is respected. + """ + test_remote_config["config_location"] = conf_location + opts = {"vault": test_remote_config, "file_client": "local"} + if conf_location == "doesnotexist": + with pytest.raises( + salt.exceptions.InvalidConfigError, + match=".*config_location must be either local or master.*", + ): + vault._fetch_secret_id(test_remote_config, opts, uncached) + else: + vault._fetch_secret_id(test_remote_config, opts, uncached) + if called: + remote.assert_called() + else: + remote.assert_not_called() + + +class TestFetchToken: + @pytest.fixture + def cached(self, token_auth): + cache = Mock(spec=vault.VaultAuthCache) + cache.get.return_value = vault.VaultToken(**token_auth["auth"]) + return cache + + @pytest.fixture + def uncached(self): + cache = Mock(spec=vault.VaultConfigCache) + cache.get.return_value = None + return cache + + @pytest.fixture + def remote(self, token_auth, server_config): + with patch("salt.utils.vault._query_master", autospec=True) as query: + query.return_value = {"auth": token_auth["auth"], "server": server_config} + yield query + + @pytest.fixture + def remote_unused(self): + with patch("salt.utils.vault._query_master") as query: + yield query + + @pytest.fixture + def local(self): + with patch("salt.utils.vault._use_local_config", autospec=True) as local: + yield local + + @pytest.fixture(params=["plain", "wrapped", "dict"]) + def token(self, token_auth, wrapped_token_auth_response, request): + return { + "plain": token_auth["auth"]["client_token"], + "wrapped": {"wrap_info": wrapped_token_auth_response["wrap_info"]}, + "dict": token_auth["auth"], + }[request.param] + + @pytest.mark.parametrize( + "test_remote_config", ["token", "wrapped_token"], indirect=True + ) + @pytest.mark.parametrize( + "salt_runtype,force_local", + [ + ("MASTER", False), + ("MASTER_IMPERSONATING", True), + ("MINION_LOCAL", False), + ], + indirect=["salt_runtype"], + ) + def test_fetch_token_local( + self, + salt_runtype, + force_local, + uncached, + test_remote_config, + token, + token_auth, + token_lookup_self_response, + ): + """ + Ensure the local configuration is used when + a) running on master + b) running on master impersonating a minion when called from runner + c) running on minion in local mode + Also ensure serialized or wrapped tokens are resolved and plain tokens + are looked up. + Also ensure only plain token metadata is cached. + """ + test_remote_config["auth"].pop("token", None) + with patch("salt.utils.vault.VaultClient.unwrap") as unwrap: + unwrap.return_value = token_auth + with patch("salt.utils.vault.VaultClient.token_lookup") as token_lookup: + token_lookup.return_value = _mock_json_response( + token_lookup_self_response, status_code=200 + ) + res = vault._fetch_token( + test_remote_config, + {}, + uncached, + force_local=force_local, + embedded_token=token, + ) + if not isinstance(token, str): + token_lookup.assert_not_called() + if "wrap_info" not in token: + unwrap.assert_not_called() + else: + token = token_auth["auth"] + assert res == vault.VaultToken(**token) + elif test_remote_config["auth"]["method"] == "wrapped_token": + unwrap.assert_called_once() + token_lookup.assert_not_called() + token = token_auth["auth"] + assert res == vault.VaultToken(**token) + else: + unwrap.assert_not_called() + token_lookup.assert_called_once() + assert res == vault.VaultToken( + client_token=token, + lease_duration=token_lookup_self_response["data"]["ttl"], + **token_lookup_self_response["data"], + ) + if not isinstance(token, str): + uncached.get.assert_not_called() + uncached.store.assert_not_called() + else: + uncached.get.assert_called_once() + uncached.store.assert_called_once() + + @pytest.mark.parametrize( + "test_remote_config", ["token", "token_changed"], indirect=True + ) + @pytest.mark.parametrize( + "salt_runtype,force_local", + [ + ("MASTER", False), + ("MASTER_IMPERSONATING", True), + ("MINION_LOCAL", False), + ], + indirect=["salt_runtype"], + ) + def test_fetch_token_local_cached_changed( + self, + salt_runtype, + force_local, + cached, + test_remote_config, + token_lookup_self_response, + ): + """ + Test that only when the embedded plain token changed, the token metadata + cache is written/refreshed. + """ + embedded_token = test_remote_config["auth"].pop("token") + with patch("salt.utils.vault.VaultClient.token_lookup") as token_lookup: + token_lookup.return_value = _mock_json_response( + token_lookup_self_response, status_code=200 + ) + res = vault._fetch_token( + test_remote_config, + {}, + cached, + force_local=force_local, + embedded_token=embedded_token, + ) + if embedded_token == "test-token": + token_lookup.assert_not_called() + assert res == cached.get() + elif embedded_token == "test-token-changed": + token_lookup.assert_called_once() + assert res == vault.VaultToken( + lease_id=embedded_token, + lease_duration=token_lookup_self_response["data"]["ttl"], + **token_lookup_self_response["data"], + ) + + @pytest.mark.parametrize( + "test_remote_config", ["token", "wrapped_token"], indirect=True + ) + def test_fetch_token_cached(self, test_remote_config, cached, remote_unused): + """ + Ensure that cache is respected + """ + res = vault._fetch_token(test_remote_config, {}, cached) + assert res == cached.get() + cached.store.assert_not_called() + remote_unused.assert_not_called() + + @pytest.mark.parametrize("test_remote_config", ["token"], indirect=True) + def test_fetch_token_uncached_embedded( + self, test_remote_config, uncached, remote_unused, token_auth + ): + """ + Test that tokens that were sent with the connection configuration + are used when no cached token is available + """ + test_remote_config["auth"].pop("token", None) + res = vault._fetch_token( + test_remote_config, {}, uncached, embedded_token=token_auth["auth"] + ) + uncached.store.assert_called_once() + remote_unused.assert_not_called() + assert res == vault.VaultToken(**token_auth["auth"]) + + @pytest.mark.parametrize("test_remote_config", ["token"], indirect=True) + def test_fetch_token_uncached(self, test_remote_config, uncached, remote): + """ + Test that tokens that were sent with the connection configuration + are used when no cached token is available + """ + test_remote_config["auth"].pop("token", None) + res = vault._fetch_token(test_remote_config, {}, uncached) + uncached.store.assert_called_once() + remote.assert_called_once() + assert res == vault.VaultToken(**remote("func", {})["auth"]) + + @pytest.mark.parametrize("test_remote_config", ["token"], indirect=True) + def test_fetch_token_uncached_single_use( + self, test_remote_config, uncached, remote, token_auth, server_config + ): + """ + Check that single-use tokens are not cached + """ + token_auth["auth"]["num_uses"] = 1 + remote.return_value = {"auth": token_auth["auth"], "server": server_config} + res = vault._fetch_token(test_remote_config, {}, uncached) + uncached.store.assert_not_called() + remote.assert_called_once() + assert res == vault.VaultToken(**remote("func", {})["auth"]) + + @pytest.mark.usefixtures("local") + @pytest.mark.parametrize("test_remote_config", ["token"], indirect=True) + @pytest.mark.parametrize( + "conf_location,called", + [("local", False), ("master", True), (None, False), ("doesnotexist", False)], + ) + def test_fetch_token_config_location( + self, conf_location, called, remote, uncached, test_remote_config, token_auth + ): + """ + Ensure config_location is respected. + """ + test_remote_config["config_location"] = conf_location + opts = {"vault": test_remote_config, "file_client": "local"} + embedded_token = token_auth["auth"] if not called else None + if conf_location == "doesnotexist": + with pytest.raises( + salt.exceptions.InvalidConfigError, + match=".*config_location must be either local or master.*", + ): + vault._fetch_token( + test_remote_config, opts, uncached, embedded_token=embedded_token + ) + else: + vault._fetch_token( + test_remote_config, opts, uncached, embedded_token=embedded_token + ) + if called: + remote.assert_called() + else: + remote.assert_not_called() + + +@pytest.mark.parametrize( + "test_config,expected_config,expected_token", + [ + ( + "token", + { + "auth": { + "approle_mount": "approle", + "approle_name": "salt-master", + "method": "token", + "secret_id": None, + "token_lifecycle": { + "minimum_ttl": 10, + "renew_increment": None, + }, + }, + "cache": { + "backend": "session", + "config": 3600, + "secret": "ttl", + }, + "server": { + "url": "http://127.0.0.1:8200", + "namespace": None, + "verify": None, + }, + }, + "test-token", + ), + ( + "approle", + { + "auth": { + "approle_mount": "approle", + "approle_name": "salt-master", + "method": "approle", + "role_id": "test-role-id", + "secret_id": "test-secret-id", + "token_lifecycle": { + "minimum_ttl": 10, + "renew_increment": None, + }, + }, + "cache": { + "backend": "session", + "config": 3600, + "secret": "ttl", + }, + "server": { + "url": "http://127.0.0.1:8200", + "namespace": None, + "verify": None, + }, + }, + None, + ), + ], + indirect=["test_config"], +) +def test_use_local_config(test_config, expected_config, expected_token): + """ + Ensure that _use_local_config only returns auth, cache, server scopes + and pops an embedded token, if present + """ + with patch("salt.utils.vault.parse_config", Mock(return_value=test_config)): + output, token = vault._use_local_config({}) + assert output == expected_config + assert token == expected_token + + +class TestQueryMaster: + @pytest.fixture(autouse=True) + def publish_runner(self): + runner = Mock(return_value={"success": True}) + with patch.dict(vault.__salt__, {"publish.runner": runner}): + yield runner + + @pytest.fixture(autouse=True) + def saltutil_runner(self): + runner = Mock(return_value={"success": True}) + with patch.dict(vault.__salt__, {"saltutil.runner": runner}): + yield runner + + @pytest.fixture(autouse=True, scope="class") + def b64encode_sig(self): + with patch("base64.b64encode", Mock(return_value="signature")): + yield + + @pytest.fixture(autouse=True, scope="class") + def salt_crypt(self): + with patch("salt.crypt.sign_message", Mock(return_value="signature")): + yield + + @pytest.fixture(params=["minion"]) + def opts(self, request): + if request.param == "no_role": + return { + "grains": {"id": "test-minion"}, + "pki_dir": "/var/cache/salt/minion", + } + return { + "__role": request.param, + "grains": {"id": "test-minion"}, + "pki_dir": f"/var/cache/salt/{request.param}", + } + + @pytest.fixture(params=["data"]) + def unwrap_client(self, server_config, request): + with patch("salt.utils.vault.VaultClient") as unwrap_client: + unwrap_client.get_config.return_value = server_config + unwrap_client.unwrap.return_value = {request.param: {"bar": "baz"}} + yield unwrap_client + + def test_query_master_loads_minion_mods_if_necessary( + self, opts, saltutil_runner, publish_runner + ): + """ + Ensure that the runner requests loading execution modules + if the global has not been populated. + """ + with patch("salt.loader.minion_mods") as loader: + loader.return_value = { + "publish.runner": publish_runner, + "saltutil.runner": saltutil_runner, + } + with patch.dict(vault.__salt__, {}, clear=True): + vault._query_master("func", opts) + loader.assert_called_once_with(opts) + + @pytest.mark.parametrize( + "opts,expected", + [ + ("master", "saltutil"), + ("minion", "publish"), + ("no_role", "publish"), + ], + indirect=["opts"], + ) + def test_query_master_uses_correct_module( + self, opts, expected, publish_runner, saltutil_runner + ): + """ + Ensure that the correct module to call the vault runner is used: + minion - publish.runner + master impersonating - saltutil.runner + """ + out = vault._query_master("func", opts) + assert out == {"success": True} + if expected == "saltutil": + publish_runner.assert_not_called() + saltutil_runner.assert_called_once() + else: + publish_runner.assert_called_once() + saltutil_runner.assert_not_called() + + @pytest.mark.parametrize("response", [None, False, {}, "f", {"error": "error"}]) + def test_query_master_validates_response( + self, opts, response, publish_runner, saltutil_runner + ): + """ + Ensure that falsey return values invalidate config (auth method change) + or reported errors by the master are recognized and raised + """ + publish_runner.return_value = saltutil_runner.return_value = response + if not response: + with pytest.raises(vault.VaultConfigExpired): + vault._query_master("func", opts) + else: + with pytest.raises(salt.exceptions.CommandExecutionError): + vault._query_master("func", opts) + + @pytest.mark.parametrize( + "response", [{"expire_cache": True}, {"error": {"error"}, "expire_cache": True}] + ) + def test_query_master_invalidates_cache_when_requested_by_master( + self, opts, response, publish_runner, saltutil_runner + ): + """ + Ensure that "expire_cache" set to True invalidates cache + """ + publish_runner.return_value = saltutil_runner.return_value = response + with pytest.raises(vault.VaultConfigExpired): + vault._query_master("func", opts) + + @pytest.mark.parametrize( + "url,verify,namespace", + [ + ("new-url", None, None), + ("http://127.0.0.1:8200", "/etc/ssl/certs.pem", None), + ("http://127.0.0.1:8200", None, "test-namespace"), + ], + ) + def test_query_master_invalidates_cache_when_expected_server_differs( + self, + opts, + url, + verify, + namespace, + server_config, + publish_runner, + saltutil_runner, + unwrap_client, + ): + """ + Ensure that VaultConfigExpired is raised when expected_server is passed + and differs from what the server reports. Also ensure that the unwrapping + still takes place (for security reasons) and with the correct server + configuration. + """ + publish_runner.return_value = saltutil_runner.return_value = { + "server": {"url": url, "verify": verify, "namespace": namespace} + } + with pytest.raises(vault.VaultConfigExpired): + vault._query_master("func", opts, expected_server=server_config) + unwrap_client.unwrap.assert_called_once() + + @pytest.mark.parametrize( + "url,verify,namespace", + [ + ("new-url", None, None), + ("http://127.0.0.1:8200", "/etc/ssl/certs.pem", None), + ("http://127.0.0.1:8200", None, "test-namespace"), + ], + ) + def test_query_master_invalidates_cache_when_unwrap_client_has_different_server_config( + self, + opts, + url, + verify, + namespace, + server_config, + wrapped_role_id_response, + unwrap_client, + publish_runner, + saltutil_runner, + ): + """ + Ensure that VaultConfigExpired is raised when a passed unwrap client has a different + configuration than the server reports. Also ensure that the unwrapping still takes + place (for security reasons) and with the correct server configuration. + """ + publish_runner.return_value = saltutil_runner.return_value = { + "server": {"url": url, "verify": verify, "namespace": namespace}, + "wrap_info": wrapped_role_id_response["wrap_info"], + } + old_unwrap_client = Mock(spec=vault.VaultClient) + old_unwrap_client.get_config.return_value = server_config + with pytest.raises(vault.VaultConfigExpired): + vault._query_master("func", opts, unwrap_client=old_unwrap_client) + old_unwrap_client.unwrap.assert_not_called() + unwrap_client.unwrap.assert_called_once() + + def test_query_master_verify_does_not_interfere_with_expected_server( + self, + opts, + publish_runner, + saltutil_runner, + caplog, + ): + """ + Ensure that a locally configured verify parameter is inserted before + checking if there is a config mismatch. + """ + publish_runner.return_value = saltutil_runner.return_value = { + "server": { + "url": "http://127.0.0.1:8200", + "verify": None, + "namespace": None, + }, + "data": {"foo": "bar"}, + } + expected_server = { + "url": "http://127.0.0.1:8200", + "verify": "/etc/ssl/certs.pem", + "namespace": None, + } + expected_return = { + "server": { + "url": "http://127.0.0.1:8200", + "verify": "/etc/ssl/certs.pem", + "namespace": None, + }, + "data": {"foo": "bar"}, + } + opts["vault"] = {"server": {"verify": "/etc/ssl/certs.pem"}} + + ret = vault._query_master("func", opts, expected_server=expected_server) + assert ret == expected_return + assert "Mismatch of cached and reported server data detected" not in caplog.text + + def test_query_master_verify_does_not_interfere_with_unwrap_client_config( + self, + opts, + publish_runner, + saltutil_runner, + wrapped_role_id_response, + role_id_response, + unwrap_client, + caplog, + ): + """ + Ensure that a locally configured verify parameter is inserted before + checking if there is a config mismatch. + """ + publish_runner.return_value = saltutil_runner.return_value = { + "server": { + "url": "http://127.0.0.1:8200", + "verify": None, + "namespace": None, + }, + "wrap_info": wrapped_role_id_response["wrap_info"], + } + expected_server = { + "url": "http://127.0.0.1:8200", + "verify": "/etc/ssl/certs.pem", + "namespace": None, + } + opts["vault"] = {"server": {"verify": "/etc/ssl/certs.pem"}} + + unwrap_client = Mock(spec=vault.VaultClient) + unwrap_client.get_config.return_value = expected_server + unwrap_client.unwrap.return_value = role_id_response + with patch("salt.utils.vault.VaultClient") as vc: + ret = vault._query_master("func", opts, unwrap_client=unwrap_client) + vc.assert_not_called() + assert ret == { + "data": role_id_response["data"], + "server": expected_server, + } + + @pytest.mark.parametrize( + "unwrap_client,key", + [ + ("data", "data"), + ("auth", "auth"), + ], + indirect=["unwrap_client"], + ) + def test_query_master_merges_unwrapped_result( + self, + opts, + publish_runner, + saltutil_runner, + wrapped_role_id_response, + unwrap_client, + key, + server_config, + ): + """ + Ensure that "data"/"auth" keys from unwrapped result are correctly merged + """ + publish_runner.return_value = saltutil_runner.return_value = { + "server": server_config, + "wrap_info": wrapped_role_id_response["wrap_info"], + } + out = vault._query_master("func", opts, unwrap_client=unwrap_client) + assert "wrap_info" not in out + assert key in out + assert out[key] == {"bar": "baz"} + + @pytest.mark.parametrize( + "unwrap_client", ["data", "auth"], indirect=["unwrap_client"] + ) + def test_query_master_merges_nested_unwrapped_result( + self, + opts, + publish_runner, + saltutil_runner, + wrapped_role_id_response, + unwrap_client, + server_config, + ): + """ + Ensure that "data"/"auth" keys from unwrapped results of nested + wrapped responses are correctly merged + """ + publish_runner.return_value = saltutil_runner.return_value = { + "server": server_config, + "wrap_info_nested": ["auth:role_id"], + "auth": {"role_id": {"wrap_info": wrapped_role_id_response["wrap_info"]}}, + } + out = vault._query_master("func", opts, unwrap_client=unwrap_client) + assert "wrap_info_nested" not in out + assert "wrap_info" not in out["auth"]["role_id"] + assert out["auth"]["role_id"] == {"bar": "baz"} + + @pytest.mark.parametrize("misc_data", ["secret_id_num_uses", "secret_id_ttl"]) + @pytest.mark.parametrize("key", ["auth", "data"]) + def test_query_master_merges_misc_data( + self, opts, publish_runner, saltutil_runner, secret_id_response, misc_data, key + ): + """ + Ensure that "misc_data" is merged into "data"/"auth" only if the key is not + set there. + This is used to provide miscellaneous information that might only be + easily available to the master (such as secret_id_num_uses, which is + not reported in the secret ID generation response currently and would + consume a token use for the minion to look up). + """ + response = { + key: secret_id_response["data"], + "misc_data": {misc_data: "merged"}, + } + publish_runner.return_value = saltutil_runner.return_value = deepcopy(response) + out = vault._query_master("func", opts) + assert misc_data in out[key] + assert "misc_data" not in out + if misc_data in secret_id_response["data"]: + assert out[key][misc_data] == secret_id_response["data"][misc_data] + else: + assert out[key][misc_data] == "merged" + + @pytest.mark.parametrize("misc_data", ["nested:value", "nested:num_uses"]) + @pytest.mark.parametrize("key", ["auth", "data"]) + def test_query_master_merges_misc_data_recursively( + self, opts, publish_runner, saltutil_runner, misc_data, key + ): + """ + Ensure that "misc_data" is merged recursively into "data"/"auth" only if + the key is not set there. + This is used to provide miscellaneous information that might only be + easily available to the master (such as num_uses for old vault versions, + which is not reported in the token generation response there and would + consume a token use for the minion to look up). + """ + response = { + key: {"nested": {"value": "existing"}}, + "misc_data": {misc_data: "merged"}, + } + publish_runner.return_value = saltutil_runner.return_value = deepcopy(response) + out = vault._query_master("func", opts) + nested_key = misc_data.split(":")[1] + assert nested_key in out[key]["nested"] + assert "misc_data" not in out + if nested_key in response[key]["nested"]: + assert out[key]["nested"][nested_key] == "existing" + else: + assert out[key]["nested"][nested_key] == "merged" + + +############################################ +# [Authenticated]VaultClient tests +############################################ + + +@pytest.mark.parametrize( + "endpoint", + [ + "secret/some/path", + "/secret/some/path", + "secret/some/path/", + "/secret/some/path/", + ], +) +def test_vault_client_request_raw_url(endpoint, client, req): + """ + Test that requests are sent to the correct endpoint, regardless of leading or trailing slashes + """ + expected_url = f"{client.url}/v1/secret/some/path" + client.request_raw("GET", endpoint) + req.assert_called_with( + "GET", + expected_url, + headers=ANY, + json=None, + verify=client.get_config()["verify"], + ) + + +def test_vault_client_request_raw_kwargs_passthrough(client, req): + """ + Test that kwargs for requests.request are passed through + """ + client.request_raw( + "GET", "secret/some/path", allow_redirects=False, cert="/etc/certs/client.pem" + ) + req.assert_called_with( + "GET", + ANY, + headers=ANY, + json=ANY, + verify=ANY, + allow_redirects=False, + cert="/etc/certs/client.pem", + ) + + +@pytest.mark.parametrize("namespace", [None, "test-namespace"]) +def test_vault_client_request_raw_headers_namespace(namespace, server_config, req): + """ + Test that namespace is present in the HTTP headers only if it was specified + """ + if namespace is not None: + server_config.update({"namespace": namespace}) + client = vault.VaultClient(**server_config) + + namespace_header = "X-Vault-Namespace" + client.request_raw("GET", "secret/some/path") + headers = req.call_args.kwargs.get("headers", {}) + if namespace is None: + assert namespace_header not in headers + else: + assert headers.get(namespace_header) == namespace + + +@pytest.mark.parametrize("wrap", [False, 30, "1h"]) +def test_vault_client_request_raw_headers_wrap(wrap, client, req): + """ + Test that the wrap header is present only if it was specified and supports time strings + """ + wrap_header = "X-Vault-Wrap-TTL" + client.request_raw("GET", "secret/some/path", wrap=wrap) + headers = req.call_args.kwargs.get("headers", {}) + if not wrap: + assert wrap_header not in headers + else: + assert headers.get(wrap_header) == str(wrap) + + +@pytest.mark.parametrize("header", ["X-Custom-Header", "X-Existing-Header"]) +def test_vault_client_request_raw_headers_additional(header, client, req): + """ + Test that additional headers are passed correctly and override default ones + """ + with patch.object( + client, "_get_headers", Mock(return_value={"X-Existing-Header": "unchanged"}) + ): + client.request_raw("GET", "secret/some/path", add_headers={header: "changed"}) + actual_header = req.call_args.kwargs.get("headers", {}).get(header) + assert actual_header == "changed" + + +@pytest.mark.parametrize( + "req_failed", [400, 403, 404, 502, 401], indirect=["req_failed"] +) +def test_vault_client_request_raw_does_not_raise_http_exception( + req_failed, server_config +): + """ + request_raw should return the raw response object regardless of HTTP status code + """ + client = vault.VaultClient(**server_config) + res = client.request_raw("GET", "secret/some/path") + with pytest.raises(requests.exceptions.HTTPError): + res.raise_for_status() + + +@pytest.mark.parametrize( + "req_failed,expected", + [ + (400, vault.VaultInvocationError), + (403, vault.VaultPermissionDeniedError), + (404, vault.VaultNotFoundError), + (405, vault.VaultUnsupportedOperationError), + (412, vault.VaultPreconditionFailedError), + (500, vault.VaultServerError), + (502, vault.VaultServerError), + (503, vault.VaultUnavailableError), + (401, requests.exceptions.HTTPError), + ], + indirect=["req_failed"], +) +@pytest.mark.parametrize("raise_error", [True, False]) +def test_vault_client_request_respects_raise_error( + raise_error, req_failed, expected, client +): + """ + request should inspect the response object and raise appropriate errors + or fall back to raise_for_status if raise_error is true + """ + if raise_error: + with pytest.raises(expected): + client.request("GET", "secret/some/path", raise_error=raise_error) + else: + res = client.request("GET", "secret/some/path", raise_error=raise_error) + assert "errors" in res + + +def test_vault_client_request_returns_whole_response_data( + role_id_response, req, client +): + """ + request should return the whole returned payload, not auth/data etc only + """ + req.return_value = _mock_json_response(role_id_response) + res = client.request("GET", "auth/approle/role/test-minion/role-id") + assert res == role_id_response + + +def test_vault_client_request_hydrates_wrapped_response( + wrapped_role_id_response, req, client +): + """ + request should detect wrapped responses and return an instance of VaultWrappedResponse + instead of raw data + """ + req.return_value = _mock_json_response(wrapped_role_id_response) + res = client.request("GET", "auth/approle/role/test-minion/role-id", wrap="180s") + assert isinstance(res, vault.VaultWrappedResponse) + + +@pytest.mark.usefixtures("req_success") +def test_vault_client_request_returns_true_when_no_data_is_reported(client): + """ + HTTP 204 indicates success with no data returned + """ + res = client.request("GET", "secret/some/path") + assert res is True + + +def test_vault_client_get_config(server_config, client): + """ + The returned configuration should match the one used to create an instance of VaultClient + """ + assert client.get_config() == server_config + + +@pytest.mark.parametrize("client", [None], indirect=["client"]) +def test_vault_client_token_valid_false(client): + """ + The unauthenticated client should always report the token as being invalid + """ + assert client.token_valid() is False + + +@pytest.mark.parametrize("client", ["valid_token", "invalid_token"], indirect=True) +@pytest.mark.parametrize("req_any", [200, 403], indirect=True) +@pytest.mark.parametrize("remote", [False, True]) +def test_vault_client_token_valid(client, remote, req_any): + valid = client.token_valid(remote=remote) + if not remote or not client.auth.is_valid(): + req_any.assert_not_called() + else: + req_any.assert_called_once() + should_be_valid = client.auth.is_valid() and ( + not remote or req_any("POST", "abc").status_code == 200 + ) + assert valid is should_be_valid + + +@pytest.mark.parametrize("func", ["get", "delete", "post", "list"]) +def test_vault_client_wrapper_should_not_require_payload(func, client, req): + """ + Check that wrappers for get/delete/post/list do not require a payload + """ + req.return_value = _mock_json_response({}, status_code=200) + tgt = getattr(client, func) + res = tgt("auth/approle/role/test-role/secret-id") + assert res == {} + + +@pytest.mark.parametrize("func", ["patch"]) +def test_vault_client_wrapper_should_require_payload(func, client, req): + """ + Check that patch wrapper does require a payload + """ + req.return_value = _mock_json_response({}, status_code=200) + tgt = getattr(client, func) + with pytest.raises(TypeError): + tgt("auth/approle/role/test-role/secret-id") + + +def test_vault_client_wrap_info_only_data(wrapped_role_id_lookup_response, client, req): + """ + wrap_info should only return the data portion of the returned wrapping information + """ + req.return_value = _mock_json_response(wrapped_role_id_lookup_response) + res = client.wrap_info("test-wrapping-token") + assert res == wrapped_role_id_lookup_response["data"] + + +@pytest.mark.parametrize( + "req_failed,expected", [(502, vault.VaultServerError)], indirect=["req_failed"] +) +def test_vault_client_wrap_info_should_fail_with_sensible_response( + req_failed, expected, client +): + """ + wrap_info should return sensible Exceptions, not KeyError etc + """ + with pytest.raises(expected): + client.wrap_info("test-wrapping-token") + + +def test_vault_client_unwrap_returns_whole_response(role_id_response, client, req): + """ + The unwrapped response should be returned as a whole, not auth/data etc only + """ + req.return_value = _mock_json_response(role_id_response) + res = client.unwrap("test-wrapping-token") + assert res == role_id_response + + +def test_vault_client_unwrap_should_default_to_token_header_before_payload( + role_id_response, client, req +): + """ + When unwrapping a wrapping token, it can be used as the authentication token header. + If the client has a valid token, it should be used in the header instead and the + unwrapping token should be passed in the payload + """ + token = "test-wrapping-token" + req.return_value = _mock_json_response(role_id_response) + client.unwrap(token) + if client.token_valid(remote=False): + payload = req.call_args.kwargs.get("json", {}) + assert payload.get("token") == token + else: + headers = req.call_args.kwargs.get("headers", {}) + assert headers.get("X-Vault-Token") == token + + +@pytest.mark.parametrize("func", ["unwrap", "token_lookup"]) +@pytest.mark.parametrize( + "req_failed,expected", + [ + (400, vault.VaultInvocationError), + (403, vault.VaultPermissionDeniedError), + (404, vault.VaultNotFoundError), + (502, vault.VaultServerError), + (401, requests.exceptions.HTTPError), + ], + indirect=["req_failed"], +) +def test_vault_client_unwrap_should_raise_appropriate_errors( + func, req_failed, expected, client +): + """ + unwrap/token_lookup should raise exceptions the same way request does + """ + with pytest.raises(expected): + tgt = getattr(client, func) + tgt("test-wrapping-token") + + +@pytest.mark.usefixtures("req_unwrapping") +@pytest.mark.parametrize( + "path", + [ + "auth/approle/role/test-minion/role-id", + "auth/approle/role/[^/]+/role-id", + ["incorrect/path", "[^a]+", "auth/approle/role/[^/]+/role-id"], + ], +) +def test_vault_client_unwrap_should_match_check_expected_creation_path( + path, role_id_response, client +): + """ + Expected creation paths should be accepted as strings and list of strings, + where the strings can be regex patterns + """ + res = client.unwrap("test-wrapping-token", expected_creation_path=path) + assert res == role_id_response + + +@pytest.mark.usefixtures("req_unwrapping") +@pytest.mark.parametrize( + "path", + [ + "auth/other_mount/role/test-minion/role-id", + "auth/approle/role/[^tes/]+/role-id", + ["incorrect/path", "[^a]+", "auth/approle/role/[^/]/role-id"], + ], +) +def test_vault_client_unwrap_should_fail_on_unexpected_creation_path(path, client): + """ + When none of the patterns match, a (serious) exception should be raised + """ + with pytest.raises(vault.VaultUnwrapException): + client.unwrap("test-wrapping-token", expected_creation_path=path) + + +def test_vault_client_token_lookup_returns_data_only( + token_lookup_self_response, req, client +): + """ + token_lookup should return "data" only, not the whole response payload + """ + req.return_value = _mock_json_response(token_lookup_self_response) + res = client.token_lookup("test-token") + assert res == token_lookup_self_response["data"] + + +@pytest.mark.parametrize("raw", [False, True]) +def test_vault_client_token_lookup_respects_raw(raw, req, client): + """ + when raw is True, token_lookup should return the raw response + """ + response_data = {"foo": "bar"} + req.return_value = _mock_json_response({"data": response_data}) + res = client.token_lookup("test-token", raw=raw) + if raw: + assert res.json() == {"data": response_data} + else: + assert res == response_data + + +def test_vault_client_token_lookup_uses_accessor(client, req_any): + """ + Ensure a client can lookup tokens with provided accessor + """ + token = "test-token" + if client.token_valid(): + token = None + client.token_lookup(token=token, accessor="test-token-accessor") + payload = req_any.call_args.kwargs.get("json", {}) + _, url = req_any.call_args[0] + assert payload.get("accessor") == "test-token-accessor" + assert url.endswith("lookup-accessor") + + +# VaultClient only + + +@pytest.mark.usefixtures("req") +@pytest.mark.parametrize("client", [None], indirect=["client"]) +def test_vault_client_token_lookup_requires_token_for_unauthenticated_client(client): + with pytest.raises(vault.VaultInvocationError): + client.token_lookup() + + +# AuthenticatedVaultClient only + + +@pytest.mark.usefixtures("req_any") +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +@pytest.mark.parametrize( + "endpoint,use", + [ + ("secret/data/some/path", True), + ("auth/approle/role/test-minion", True), + ("sys/internal/ui/mounts", False), + ("sys/internal/ui/mounts/secret", False), + ("sys/wrapping/lookup", False), + ("sys/internal/ui/namespaces", False), + ("sys/health", False), + ("sys/seal-status", False), + ], +) +def test_vault_client_request_raw_increases_use_count_when_necessary_depending_on_path( + endpoint, use, client +): + """ + When a request is issued to an endpoint that consumes a use, ensure it is passed + along to the token. + https://github.com/hashicorp/vault/blob/d467681e15898041b6dd5f2bf7789bd7c236fb16/vault/logical_system.go#L119-L155 + """ + client.request_raw("GET", endpoint) + assert client.auth.used.called is use + + +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +@pytest.mark.parametrize( + "req_failed", + [400, 403, 404, 405, 412, 500, 502, 503, 401], + indirect=True, +) +def test_vault_client_request_raw_increases_use_count_when_necessary_depending_on_response( + req_failed, client +): + """ + When a request is issued to an endpoint that consumes a use, make sure that + this is registered regardless of status code: + https://github.com/hashicorp/vault/blob/c1cf97adac5c53301727623a74b828a5f12592cf/vault/request_handling.go#L864-L866 + ref: PR #62552 + """ + client.request_raw("GET", "secret/data/some/path") + assert client.auth.used.called is True + + +@pytest.mark.usefixtures("req_any") +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +def test_vault_client_request_raw_does_not_increase_use_count_with_unauthd_endpoint( + client, +): + """ + Unauthenticated endpoints do not consume a token use. Since some cannot be detected + easily because of customizable mount points for secret engines and auth methods, + this can be specified in the request. Make sure it is honored. + """ + client.request("GET", "pki/cert/ca", is_unauthd=True) + client.auth.used.assert_not_called() + + +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +def test_vault_client_token_lookup_self_possible(client, req_any): + """ + Ensure an authenticated client can lookup its own token + """ + client.token_lookup() + headers = req_any.call_args.kwargs.get("headers", {}) + _, url = req_any.call_args[0] + assert headers.get("X-Vault-Token") == str(client.auth.get_token()) + assert url.endswith("lookup-self") + + +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +def test_vault_client_token_lookup_supports_token_arg(client, req_any): + """ + Ensure an authenticated client can lookup other tokens + """ + token = "other-test-token" + client.token_lookup(token=token) + headers = req_any.call_args.kwargs.get("headers", {}) + payload = req_any.call_args.kwargs.get("json", {}) + _, url = req_any.call_args[0] + assert payload.get("token") == token + assert headers.get("X-Vault-Token") == str(client.auth.get_token()) + assert url.endswith("lookup") + + +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +@pytest.mark.parametrize("renewable", [True, False]) +def test_vault_client_token_renew_self_possible( + token_renew_self_response, client, req, renewable +): + """ + Ensure an authenticated client can renew its own token only when + it is renewable and that the renewed data is passed along to the + token store + """ + req.return_value = _mock_json_response(token_renew_self_response) + client.auth.is_renewable.return_value = renewable + res = client.token_renew() + if renewable: + headers = req.call_args.kwargs.get("headers", {}) + _, url = req.call_args[0] + assert headers.get("X-Vault-Token") == str(client.auth.get_token()) + assert url.endswith("renew-self") + req.assert_called_once() + client.auth.update_token.assert_called_once_with( + token_renew_self_response["auth"] + ) + assert res == token_renew_self_response["auth"] + else: + assert res is False + + +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +def test_vault_client_token_renew_supports_token_arg( + token_renew_other_response, client, req +): + """ + Ensure an authenticated client can renew other tokens + """ + req.return_value = _mock_json_response(token_renew_other_response) + token = "other-test-token" + client.token_renew(token=token) + headers = req.call_args.kwargs.get("headers", {}) + payload = req.call_args.kwargs.get("json", {}) + _, url = req.call_args[0] + assert payload.get("token") == token + assert headers.get("X-Vault-Token") == str(client.auth.get_token()) + assert url.endswith("renew") + + +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +def test_vault_client_token_renew_uses_accessor( + token_renew_accessor_response, client, req +): + """ + Ensure a client can renew tokens with provided accessor + """ + req.return_value = _mock_json_response(token_renew_accessor_response) + client.token_renew(accessor="test-token-accessor") + payload = req.call_args.kwargs.get("json", {}) + _, url = req.call_args[0] + assert payload.get("accessor") == "test-token-accessor" + assert url.endswith("renew-accessor") + + +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +@pytest.mark.parametrize("token", [None, "other-test-token"]) +def test_vault_client_token_renew_self_updates_token( + token_renew_self_response, client, token, req +): + """ + Ensure the current client token is updated when it is renewed, but not + when another token is renewed + """ + req.return_value = _mock_json_response(token_renew_self_response) + client.token_renew(token=token) + if token is None: + assert client.auth.update_token.called + else: + assert not client.auth.update_token.called + + +@pytest.mark.parametrize("client", ["valid_token"], indirect=True) +@pytest.mark.parametrize( + "token,accessor", + [(None, None), ("other-test-token", None), (None, "test-accessor")], +) +def test_vault_client_token_renew_increment_is_honored( + token, accessor, client, token_renew_self_response, req +): + """ + Ensure the renew increment is passed to vault if provided + """ + req.return_value = _mock_json_response(token_renew_self_response) + client.token_renew(token=token, accessor=accessor, increment=3600) + payload = req.call_args.kwargs.get("json", {}) + assert payload.get("increment") == 3600 + + +############################################ +# VaultLease tests +############################################ + + +@pytest.mark.parametrize( + "creation_time,expected", + [ + ("2022-08-22T17:16:21-09:30", 1661222781), + ("2022-08-22T17:16:21-01:00", 1661192181), + ("2022-08-22T17:16:21+00:00", 1661188581), + ("2022-08-22T17:16:21Z", 1661188581), + ("2022-08-22T17:16:21+02:00", 1661181381), + ("2022-08-22T17:16:21+12:30", 1661143581), + ], +) +def test_iso_to_timestamp_polyfill(creation_time, expected): + with patch("salt.utils.vault.datetime.datetime") as d: + d.fromisoformat.side_effect = AttributeError + # needs from datetime import datetime, otherwise results + # in infinite recursion + + # pylint: disable=unnecessary-lambda + d.side_effect = lambda *args: datetime(*args) + res = vault.iso_to_timestamp(creation_time) + assert res == expected + + +@pytest.mark.parametrize( + "creation_time", + [ + 1661188581, + "1661188581", + "2022-08-22T17:16:21.473219641+00:00", + "2022-08-22T17:16:21.47321964+00:00", + "2022-08-22T17:16:21.4732196+00:00", + "2022-08-22T17:16:21.473219+00:00", + "2022-08-22T17:16:21.47321+00:00", + "2022-08-22T17:16:21.4732+00:00", + "2022-08-22T17:16:21.473+00:00", + "2022-08-22T17:16:21.47+00:00", + "2022-08-22T17:16:21.4+00:00", + ], +) +def test_vault_lease_creation_time_normalization(creation_time): + """ + Ensure the normalization of different creation_time formats works as expected - + many token endpoints report a timestamp, while other endpoints report RFC3339-formatted + strings that may have a variable number of digits for sub-second precision (0 omitted) + while datetime.fromisoformat expects exactly 6 digits + """ + data = { + "lease_id": "id", + "renewable": False, + "lease_duration": 1337, + "creation_time": creation_time, + "data": None, + } + res = vault.VaultLease(**data) + assert res.creation_time == 1661188581 + + +@pytest.mark.parametrize( + "tock,duration,offset,expected", + [ + (0, 50, 0, True), + (50, 10, 0, False), + (0, 60, 10, True), + (0, 60, 600, False), + ], +) +def test_vault_lease_is_valid_accounts_for_time(tock, duration, offset, expected): + """ + Ensure lease validity is checked correctly and can look into the future + """ + data = { + "lease_id": "id", + "renewable": False, + "lease_duration": duration, + "creation_time": 0, + "expire_time": duration, + "data": None, + } + with patch("salt.utils.vault.time.time", return_value=tock): + res = vault.VaultLease(**data) + assert res.is_valid(offset) is expected + + +@pytest.mark.parametrize( + "tock,duration,offset,expected", + [ + (0, 50, 0, True), + (50, 10, 0, False), + (0, 60, 10, True), + (0, 60, 600, False), + ], +) +def test_vault_token_is_valid_accounts_for_time(tock, duration, offset, expected): + """ + Ensure token time validity is checked correctly and can look into the future + """ + data = { + "client_token": "id", + "renewable": False, + "lease_duration": duration, + "num_uses": 0, + "creation_time": 0, + "expire_time": duration, + } + with patch("salt.utils.vault.time.time", return_value=tock): + res = vault.VaultToken(**data) + assert res.is_valid(offset) is expected + + +@pytest.mark.parametrize( + "num_uses,uses,expected", + [(0, 999999, True), (1, 0, True), (1, 1, False), (1, 2, False)], +) +def test_vault_token_is_valid_accounts_for_num_uses(num_uses, uses, expected): + """ + Ensure token uses validity is checked correctly + """ + data = { + "client_token": "id", + "renewable": False, + "lease_duration": 0, + "num_uses": num_uses, + "creation_time": 0, + "use_count": uses, + } + with patch("salt.utils.vault.VaultLease.is_valid", Mock(return_value=True)): + res = vault.VaultToken(**data) + assert res.is_valid() is expected + + +@pytest.mark.parametrize( + "tock,duration,offset,expected", + [ + (0, 50, 0, True), + (50, 10, 0, False), + (0, 60, 10, True), + (0, 60, 600, False), + ], +) +def test_vault_approle_secret_id_is_valid_accounts_for_time( + tock, duration, offset, expected +): + """ + Ensure secret ID time validity is checked correctly and can look into the future + """ + data = { + "secret_id": "test-secret-id", + "renewable": False, + "creation_time": 0, + "expire_time": duration, + "secret_id_num_uses": 0, + "secret_id_ttl": duration, + } + with patch("salt.utils.vault.time.time", return_value=tock): + res = vault.VaultSecretId(**data) + assert res.is_valid(offset) is expected + + +@pytest.mark.parametrize( + "num_uses,uses,expected", + [(0, 999999, True), (1, 0, True), (1, 1, False), (1, 2, False)], +) +def test_vault_approle_secret_id_is_valid_accounts_for_num_uses( + num_uses, uses, expected +): + """ + Ensure secret ID uses validity is checked correctly + """ + data = { + "secret_id": "test-secret-id", + "renewable": False, + "creation_time": 0, + "secret_id_ttl": 0, + "secret_id_num_uses": num_uses, + "use_count": uses, + } + with patch("salt.utils.vault.VaultLease.is_valid", Mock(return_value=True)): + res = vault.VaultSecretId(**data) + assert res.is_valid() is expected + + +############################################ +# Auth tests +############################################ + + +class TestAuthMethods: + @pytest.fixture + def token(self, token_auth): + return vault.VaultToken(**token_auth["auth"]) + + @pytest.fixture + def token_invalid(self, token_auth): + token_auth["auth"]["num_uses"] = 1 + token_auth["auth"]["use_count"] = 1 + return vault.VaultToken(**token_auth["auth"]) + + @pytest.fixture + def token_unrenewable(self, token_auth): + token_auth["auth"]["renewable"] = False + return vault.VaultToken(**token_auth["auth"]) + + @pytest.fixture + def secret_id(self, secret_id_response): + return vault.VaultSecretId(**secret_id_response["data"]) + + @pytest.fixture + def secret_id_invalid(self, secret_id_response): + secret_id_response["data"]["secret_id_num_uses"] = 1 + secret_id_response["data"]["use_count"] = 1 + return vault.VaultSecretId(**secret_id_response["data"]) + + @pytest.fixture(params=["secret_id"]) + def approle(self, request): + secret_id = request.param + if secret_id is not None: + secret_id = request.getfixturevalue(secret_id) + return vault.VaultAppRole("test-role-id", secret_id) -import salt.utils.files -import salt.utils.vault as vault -from tests.support.mock import ANY, MagicMock, Mock, patch + @pytest.fixture + def approle_invalid(self, secret_id_invalid): + return vault.VaultAppRole("test-role-id", secret_id_invalid) -log = logging.getLogger(__name__) + @pytest.fixture + def token_store(self, token): + store = Mock(spec=vault.VaultTokenAuth) + store.is_valid.return_value = True + store.get_token.return_value = token + return store + @pytest.fixture + def token_store_empty(self, token_store): + token_store.is_valid.return_value = False + token_store.get_token.side_effect = vault.VaultAuthExpired + return token_store -@pytest.fixture -def tmp_cache(tmp_path): - cachedir = tmp_path / "cachedir" - cachedir.mkdir() - return cachedir + @pytest.fixture + def token_store_empty_first(self, token_store, token): + token_store.is_valid.side_effect = (False, True) + token_store.get_token.side_effect = (token, vault.VaultException) + return token_store + @pytest.fixture + def uncached(self): + cache = Mock(spec=vault.VaultAuthCache) + cache.exists.return_value = False + cache.get.return_value = None + return cache -@pytest.fixture -def configure_loader_modules(tmp_cache): - return { - vault: { - "__opts__": { - "vault": { - "url": "http://127.0.0.1", - "auth": { - "token": "test", - "method": "token", - "uses": 15, - "ttl": 500, - }, - }, - "file_client": "local", - "cachedir": str(tmp_cache), - }, - "__grains__": {"id": "test-minion"}, - "__context__": {}, - } - } + @pytest.fixture + def cached_token(self, uncached, token): + uncached.exists.return_value = True + uncached.get.return_value = token + return uncached + @pytest.fixture + def client(self, token_auth): + token_auth["auth"]["client_token"] = "new-test-token" + with patch("salt.utils.vault.VaultClient", autospec=True) as client: + client.post.return_value = token_auth + yield client -@pytest.fixture -def json_success(): - return { - "request_id": "35df4df1-c3d8-b270-0682-ddb0160c7450", - "lease_id": "", - "renewable": False, - "lease_duration": 0, - "data": { - "data": {"something": "myvalue"}, - "metadata": { - "created_time": "2020-05-02T07:26:12.180848003Z", - "deletion_time": "", - "destroyed": False, - "version": 1, - }, - }, - "wrap_info": None, - "warnings": None, - "auth": None, - } + def test_token_auth_uninitialized(self, uncached): + """ + Test that an exception is raised when a token is requested + and the authentication container was not passed a valid token. + """ + auth = vault.VaultTokenAuth(cache=uncached) + uncached.get.assert_called_once() + assert auth.is_valid() is False + assert auth.is_renewable() is False + auth.used() + with pytest.raises(vault.VaultAuthExpired): + auth.get_token() + def test_token_auth_cached(self, cached_token, token): + """ + Test that tokens are read from cache. + """ + auth = vault.VaultTokenAuth(cache=cached_token) + assert auth.is_valid() + assert auth.get_token() == token -@pytest.fixture -def json_denied(): - return {"errors": ["permission denied"]} + def test_token_auth_invalid_token(self, invalid_token): + """ + Test that an exception is raised when a token is requested + and the container's token is invalid. + """ + auth = vault.VaultTokenAuth(token=invalid_token) + assert auth.is_valid() is False + assert auth.is_renewable() is False + with pytest.raises(vault.VaultAuthExpired): + auth.get_token() + def test_token_auth_unrenewable_token(self, token_unrenewable): + """ + Test that it is reported correctly by the container + when a token is not renewable. + """ + auth = vault.VaultTokenAuth(token=token_unrenewable) + assert auth.is_valid() is True + assert auth.is_renewable() is False + assert auth.get_token() == token_unrenewable -@pytest.fixture -def cache_single(): - return { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": None, - "uses": 1, - "lease_duration": 100, - "issued": 3000, - } + @pytest.mark.parametrize("num_uses", [0, 1, 10]) + def test_token_auth_used_num_uses(self, uncached, token, num_uses): + """ + Ensure that cache writes for use count are only done when + num_uses is not 0 (= unlimited) + """ + token = token.with_renewed(num_uses=num_uses) + auth = vault.VaultTokenAuth(cache=uncached, token=token) + auth.used() + if num_uses > 1: + uncached.store.assert_called_once_with(token) + elif num_uses: + uncached.flush.assert_called_once() + else: + uncached.store.assert_not_called() + @pytest.mark.parametrize("num_uses", [0, 1, 10]) + def test_token_auth_update_token(self, uncached, token, num_uses): + """ + Ensure that partial updates to the token in use are possible + and that the cache writes are independent from num_uses. + Also ensure the token is treated as immutable + """ + auth = vault.VaultTokenAuth(cache=uncached, token=token) + old_token = token + old_token_ttl = old_token.duration + auth.update_token({"num_uses": num_uses, "ttl": 8483}) + updated_token = token.with_renewed(num_uses=num_uses, ttl=8483) + assert auth.token == updated_token + assert old_token.duration == old_token_ttl + uncached.store.assert_called_once_with(updated_token) -@pytest.fixture -def cache_single_namespace(): - return { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": "test_namespace", - "uses": 1, - "lease_duration": 100, - "issued": 3000, - } + def test_token_auth_replace_token(self, uncached, token): + """ + Ensure completely replacing the token is possible and + results in a cache write. This is important when an + InvalidVaultToken has to be replaced with a VaultToken, + eg by a different authentication method. + """ + auth = vault.VaultTokenAuth(cache=uncached) + assert isinstance(auth.token, vault.InvalidVaultToken) + auth.replace_token(token) + assert isinstance(auth.token, vault.VaultToken) + assert auth.token == token + uncached.store.assert_called_once_with(token) + @pytest.mark.parametrize("token", [False, True]) + @pytest.mark.parametrize("approle", [False, True]) + def test_approle_auth_is_valid(self, token, approle): + """ + Test that is_valid reports true when either the token + or the secret ID is valid + """ + token = Mock(spec=vault.VaultToken) + token.is_valid.return_value = token + approle = Mock(spec=vault.VaultSecretId) + approle.is_valid.return_value = approle + auth = vault.VaultAppRoleAuth(approle, None, token_store=token) + assert auth.is_valid() is (token or approle) -@pytest.fixture -def cache_uses(): - return { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": None, - "uses": 10, - "lease_duration": 100, - "issued": 3000, - "unlimited_use_token": False, - } + def test_approle_auth_get_token_store_available(self, token_store, approle, token): + """ + Ensure no login attempt is made when a cached token is available + """ + auth = vault.VaultAppRoleAuth(approle, None, token_store=token_store) + with patch("salt.utils.vault.VaultAppRoleAuth._login") as login: + res = auth.get_token() + login.assert_not_called() + assert res == token + def test_approle_auth_get_token_store_empty( + self, token_store_empty, approle, token + ): + """ + Ensure a token is returned if no cached token is available + """ + auth = vault.VaultAppRoleAuth(approle, None, token_store=token_store_empty) + with patch("salt.utils.vault.VaultAppRoleAuth._login") as login: + login.return_value = token + res = auth.get_token() + login.assert_called_once() + assert res == token -@pytest.fixture -def cache_uses_last(): - return { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": None, - "uses": 1, - "lease_duration": 100, - "issued": 3000, - "unlimited_use_token": False, - } + def test_approle_auth_get_token_invalid(self, token_store_empty, approle_invalid): + """ + Ensure VaultAuthExpired is raised if a token request was made, but + cannot be fulfilled + """ + auth = vault.VaultAppRoleAuth( + approle_invalid, None, token_store=token_store_empty + ) + with pytest.raises(vault.VaultAuthExpired): + auth.get_token() + @pytest.mark.parametrize("mount", ["approle", "salt_minions"]) + @pytest.mark.parametrize("approle", ["secret_id", None], indirect=True) + def test_approle_auth_get_token_login( + self, approle, mount, client, token_store_empty_first, token + ): + """ + Ensure that login with secret-id returns a token that is passed to the + token store/cache as well + """ + auth = vault.VaultAppRoleAuth( + approle, client, mount=mount, token_store=token_store_empty_first + ) + res = auth.get_token() + assert res == token + args, kwargs = client.post.call_args + endpoint = args[0] + payload = kwargs.get("payload", {}) + assert endpoint == f"auth/{mount}/login" + assert "role_id" in payload + if approle.secret_id is not None: + assert "secret_id" in payload + token_store_empty_first.replace_token.assert_called_once_with(res) -@pytest.fixture -def cache_unlimited(): - return { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": None, - "uses": 0, - "lease_duration": 100, - "issued": 3000, - "unlimited_use_token": True, - } + @pytest.mark.parametrize("num_uses", [0, 1, 10]) + def test_approle_auth_used_num_uses( + self, token_store_empty_first, approle, client, uncached, num_uses, token + ): + """ + Ensure that cache writes for use count are only done when + num_uses is not 0 (= unlimited) + """ + approle.secret_id = approle.secret_id.with_renewed(num_uses=num_uses) + auth = vault.VaultAppRoleAuth( + approle, client, cache=uncached, token_store=token_store_empty_first + ) + res = auth.get_token() + assert res == token + if num_uses > 1: + uncached.store.assert_called_once_with(approle.secret_id) + elif num_uses: + uncached.store.assert_not_called() + uncached.flush.assert_called_once() + else: + uncached.store.assert_not_called() + def test_approle_auth_used_locally_configured( + self, token_store_empty_first, approle, client, uncached, token + ): + """ + Ensure that locally configured secret IDs are not cached. + """ + approle.secret_id = vault.LocalVaultSecretId(**approle.secret_id.to_dict()) + auth = vault.VaultAppRoleAuth( + approle, client, cache=uncached, token_store=token_store_empty_first + ) + res = auth.get_token() + assert res == token + uncached.store.assert_not_called() -@pytest.fixture -def metadata_v2(): - return { - "accessor": "kv_f8731f1b", - "config": { - "default_lease_ttl": 0, - "force_no_cache": False, - "max_lease_ttl": 0, - }, - "description": "key/value secret storage", - "external_entropy_access": False, - "local": False, - "options": {"version": "2"}, - "path": "secret/", - "seal_wrap": False, - "type": "kv", - "uuid": "1d9431ac-060a-9b63-4572-3ca7ffd78347", - } +def test_approle_allows_no_secret_id(): + """ + Ensure AppRole containers are still valid if no + secret ID has been set (bind_secret_id can be set to False!) + """ + role = vault.VaultAppRole("test-role-id") + assert role.is_valid() -@pytest.fixture -def cache_secret_meta(metadata_v2): - return {"vault_secret_path_metadata": {"secret/mything": metadata_v2}} +############################################ +# Cache tests +############################################ -def _mock_json_response(data, status_code=200, reason=""): + +@pytest.mark.parametrize("ckey", ["token", None]) +@pytest.mark.parametrize("connection", [True, False]) +def test_clear_cache(ckey, connection): """ - Mock helper for http response + Make sure clearing cache works as expected, allowing for + connection-scoped cache and global cache that survives + a configuration refresh """ - response = MagicMock() - response.json = MagicMock(return_value=data) - response.status_code = status_code - response.reason = reason - if status_code == 200: - response.ok = True - else: - response.ok = False - return Mock(return_value=response) + cbank = "vault" + if connection: + cbank += "/connection" + context = {cbank: {"token": "fake_token"}} + with patch("salt.cache.factory", autospec=True) as factory: + vault.clear_cache({}, context, ckey=ckey, connection=connection) + factory.return_value.flush.assert_called_once_with(cbank, ckey) + if ckey: + assert ckey not in context[cbank] + else: + assert cbank not in context -def test_write_cache_multi_use_token(cache_uses, tmp_cache): +@pytest.mark.parametrize("connection", [True, False]) +@pytest.mark.parametrize( + "salt_runtype,force_local,expected", + [ + ("MASTER", False, "vault"), + ("MASTER_IMPERSONATING", False, "minions/test-minion/vault"), + ("MASTER_IMPERSONATING", True, "vault"), + ("MINION_LOCAL", False, "vault"), + ("MINION_REMOTE", False, "vault"), + ], + indirect=["salt_runtype"], +) +def test_get_cache_bank(connection, salt_runtype, force_local, expected): """ - Test write cache with multi-use token + Ensure the cache banks are mapped as expected, depending on run type """ - expected_write = { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": None, - "uses": 10, - "lease_duration": 100, - "issued": 3000, - "unlimited_use_token": False, - } - function_response = vault.write_cache(cache_uses) - assert function_response is True - with salt.utils.files.fopen(str(tmp_cache / "salt_vault_token"), "r") as fp: - token_data = json.loads(fp.read()) - assert token_data == expected_write + opts = {"grains": {"id": "test-minion"}} + cbank = vault._get_cache_bank(opts, force_local=force_local, connection=connection) + if connection: + expected += "/connection" + assert cbank == expected + + +class TestVaultCache: + @pytest.fixture + def cbank(self): + return "vault/connection" + + @pytest.fixture + def ckey(self): + return "test" + + @pytest.fixture + def data(self): + return {"foo": "bar"} + + @pytest.fixture + def context(self, cbank, ckey, data): + return {cbank: {ckey: data}} + + @pytest.fixture + def cache_factory(self): + with patch("salt.cache.factory", autospec=True) as factory: + yield factory + + @pytest.fixture + def cached(self, cache_factory, data): + cache = Mock(spec=salt.cache.Cache) + cache.contains.return_value = True + cache.fetch.return_value = data + cache.updated.return_value = time.time() + cache_factory.return_value = cache + return cache + + @pytest.fixture + def cached_outdated(self, cache_factory, data): + cache = Mock(spec=salt.cache.Cache) + cache.contains.return_value = True + cache.fetch.return_value = data + cache.updated.return_value = time.time() - 9999999 + cache_factory.return_value = cache + return cache + + @pytest.fixture + def uncached(self, cache_factory): + cache = Mock(spec=salt.cache.Cache) + cache.contains.return_value = False + cache.fetch.return_value = None + cache.updated.return_value = None + cache_factory.return_value = cache + return cache + + @pytest.mark.parametrize("config", ["session", "other"]) + def test_get_uncached(self, config, uncached, cbank, ckey): + """ + Ensure that unavailable cached data is reported as None. + """ + cache = vault.VaultCache( + {}, cbank, ckey, cache_backend=uncached if config != "session" else None + ) + res = cache.get() + assert res is None + if config != "session": + uncached.contains.assert_called_once_with(cbank, ckey) + + def test_get_cached_from_context(self, context, cached, cbank, ckey, data): + """ + Ensure that cached data in __context__ is respected, regardless + of cache backend. + """ + cache = vault.VaultCache(context, cbank, ckey, cache_backend=cached) + res = cache.get() + assert res == data + cached.updated.assert_not_called() + cached.fetch.assert_not_called() + + def test_get_cached_not_outdated(self, cached, cbank, ckey, data): + """ + Ensure that cached data that is still valid is returned. + """ + cache = vault.VaultCache({}, cbank, ckey, cache_backend=cached, ttl=3600) + res = cache.get() + assert res == data + cached.updated.assert_called_once_with(cbank, ckey) + cached.fetch.assert_called_once_with(cbank, ckey) + + def test_get_cached_outdated(self, cached_outdated, cbank, ckey): + """ + Ensure that cached data that is not valid anymore is flushed + and None is returned by default. + """ + cache = vault.VaultCache({}, cbank, ckey, cache_backend=cached_outdated, ttl=1) + res = cache.get() + assert res is None + cached_outdated.updated.assert_called_once_with(cbank, ckey) + cached_outdated.flush.assert_called_once_with(cbank, ckey) + cached_outdated.fetch.assert_not_called() + + @pytest.mark.parametrize("config", ["session", "other"]) + def test_flush(self, config, context, cached, cbank, ckey): + """ + Ensure that flushing clears the context key only and, if + a cache backend is in use, it is also cleared. + """ + cache = vault.VaultCache( + context, cbank, ckey, cache_backend=cached if config != "session" else None + ) + cache.flush() + assert context == {cbank: {}} + if config != "session": + cached.flush.assert_called_once_with(cbank, ckey) + + @pytest.mark.parametrize("config", ["session", "other"]) + def test_flush_cbank(self, config, context, cached, cbank, ckey): + """ + Ensure that flushing with cbank=True clears the context bank and, if + a cache backend is in use, it is also cleared. + """ + cache = vault.VaultCache( + context, cbank, ckey, cache_backend=cached if config != "session" else None + ) + cache.flush(cbank=True) + assert context == {} + if config != "session": + cached.flush.assert_called_once_with(cbank, None) + + @pytest.mark.parametrize("context", [{}, {"vault/connection": {}}]) + @pytest.mark.parametrize("config", ["session", "other"]) + def test_store(self, config, context, uncached, cbank, ckey, data): + """ + Ensure that storing data in cache always updates the context + and, if a cache backend is in use, it is also stored there. + """ + cache = vault.VaultCache( + context, + cbank, + ckey, + cache_backend=uncached if config != "session" else None, + ) + cache.store(data) + assert context == {cbank: {ckey: data}} + if config != "session": + uncached.store.assert_called_once_with(cbank, ckey, data) + + +class TestVaultConfigCache: + @pytest.fixture(params=["session", "other", None]) + def config(self, request): + if request.param is None: + return None + return { + "cache": { + "backend": request.param, + "config": 3600, + "secret": "ttl", + } + } + + @pytest.fixture + def cbank(self): + return "vault/connection" + + @pytest.fixture + def ckey(self): + return "test" + + @pytest.fixture + def data(self, config): + return { + "cache": { + "backend": "new", + "config": 1337, + "secret": "ttl", + } + } + + @pytest.fixture + def context(self, cbank, ckey, data): + return {cbank: {ckey: data}} + + # TODO: most of the following fixtures should patch the parent class + @pytest.fixture + def cache_factory(self): + with patch("salt.cache.factory", autospec=True) as factory: + yield factory + + @pytest.fixture + def cached(self, cache_factory, data): + cache = Mock(spec=salt.cache.Cache) + cache.contains.return_value = True + cache.fetch.return_value = data + cache.updated.return_value = time.time() + cache_factory.return_value = cache + return cache + + @pytest.fixture + def cached_outdated(self, cache_factory, data): + cache = Mock(spec=salt.cache.Cache) + cache.contains.return_value = True + cache.fetch.return_value = data + cache.updated.return_value = time.time() - 9999999 + cache_factory.return_value = cache + return cache + + @pytest.fixture + def uncached(self, cache_factory): + cache = Mock(spec=salt.cache.Cache) + cache.contains.return_value = False + cache.fetch.return_value = None + cache.updated.return_value = None + cache_factory.return_value = cache + return cache + + @pytest.mark.usefixtures("uncached") + def test_get_config_cache_uncached(self, cbank, ckey): + """ + Ensure an uninitialized instance is returned when there is no cache + """ + res = vault._get_config_cache({}, {}, cbank, ckey) + assert res.config is None + + def test_get_config_context_cached(self, uncached, cbank, ckey, context): + """ + Ensure cached data in context wins + """ + res = vault._get_config_cache({}, context, cbank, ckey) + assert res.config == context[cbank][ckey] + uncached.contains.assert_not_called() + + def test_get_config_other_cached(self, cached, cbank, ckey, data): + """ + Ensure cached data from other sources is respected + """ + res = vault._get_config_cache({}, {}, cbank, ckey) + assert res.config == data + cached.contains.assert_called_once_with(cbank, ckey) + cached.fetch.assert_called_once_with(cbank, ckey) + + def test_reload(self, config, data, cbank, ckey): + """ + Ensure that a changed configuration is reloaded correctly and + during instantiation. When the config backend changes and the + previous was not session only, it should be flushed. + """ + with patch("salt.utils.vault.VaultConfigCache.flush") as flush: + cache = vault.VaultConfigCache({}, cbank, ckey, {}, init_config=config) + assert cache.config == config + if config is not None: + assert cache.ttl == config["cache"]["config"] + if config["cache"]["backend"] != "session": + assert cache.cache is not None + else: + assert cache.ttl is None + assert cache.cache is None + cache._load(data) + assert cache.ttl == data["cache"]["config"] + assert cache.cache is not None + if config is not None and config["cache"]["backend"] != "session": + flush.assert_called_once() + + @pytest.mark.usefixtures("cached") + def test_exists(self, config, context, cbank, ckey): + """ + Ensure exists always evaluates to false when uninitialized + """ + cache = vault.VaultConfigCache(context, cbank, ckey, {}, init_config=config) + res = cache.exists() + assert res is bool(config) + + def test_get(self, config, cached, context, cbank, ckey, data): + """ + Ensure cached data is returned and backend settings honored, + unless the instance has not been initialized yet + """ + if config is not None and config["cache"]["backend"] != "session": + context = {} + cache = vault.VaultConfigCache(context, cbank, ckey, {}, init_config=config) + res = cache.get() + if config is not None: + assert res == data + if config["cache"]["backend"] != "session": + cached.fetch.assert_called_once_with(cbank, ckey) + else: + cached.contains.assert_not_called() + cached.fetch.assert_not_called() + else: + # uninitialized should always return None + # initialization when first stored or constructed with init_config + cached.contains.assert_not_called() + assert res is None + def test_flush(self, config, context, cached, cbank, ckey): + """ + Ensure flushing deletes the whole cache bank (=connection scope), + unless the configuration has not been initialized. + Also, it should uninitialize the instance. + """ + if config is None: + context_old = deepcopy(context) + cache = vault.VaultConfigCache(context, cbank, ckey, {}, init_config=config) + cache.flush() + if config is None: + assert context == context_old + cached.flush.assert_not_called() + else: + if config["cache"]["backend"] == "session": + assert context == {} + else: + cached.flush.assert_called_once_with(cbank, None) + assert cache.ttl is None + assert cache.cache is None + assert cache.config is None -def test_write_cache_unlimited_token(cache_uses, tmp_cache): + @pytest.mark.usefixtures("uncached") + def test_store(self, data, cbank, ckey): + """ + Ensure storing config in cache also reloads the instance + """ + cache = vault.VaultConfigCache({}, {}, cbank, ckey) + assert cache.config is None + with patch("salt.utils.vault.VaultConfigCache._load") as rld: + with patch("salt.utils.vault.VaultCache.store") as store: + cache.store(data) + rld.assert_called_once_with(data) + store.assert_called_once() + + +class TestVaultAuthCache: + @pytest.fixture + def uncached(self): + with patch( + "salt.utils.vault.CommonCache._ckey_exists", + return_value=False, + autospec=True, + ): + with patch( + "salt.utils.vault.CommonCache._get_ckey", + return_value=None, + autospec=True, + ) as get: + yield get + + @pytest.fixture + def cached(self, token_auth): + with patch( + "salt.utils.vault.CommonCache._ckey_exists", + return_value=True, + autospec=True, + ): + with patch( + "salt.utils.vault.CommonCache._get_ckey", + return_value=token_auth["auth"], + autospec=True, + ) as get: + yield get + + @pytest.fixture + def cached_invalid_flush(self, token_auth, cached): + with patch("salt.utils.vault.CommonCache._flush", autospec=True) as flush: + token_auth["auth"]["num_uses"] = 1 + token_auth["auth"]["use_count"] = 1 + cached.return_value = token_auth["auth"] + yield flush + + @pytest.mark.usefixtures("uncached") + def test_get_uncached(self): + """ + Ensure that unavailable cached data is reported as None. + """ + cache = vault.VaultAuthCache({}, None, None, vault.VaultToken) + res = cache.get() + assert res is None + + @pytest.mark.usefixtures("cached") + def test_get_cached(self, token_auth): + """ + Ensure that cached data that is still valid is returned. + """ + cache = vault.VaultAuthCache({}, None, None, vault.VaultToken) + res = cache.get() + assert res is not None + assert res == vault.VaultToken(**token_auth["auth"]) + + def test_get_cached_invalid(self, cached_invalid_flush): + """ + Ensure that cached data that is not valid anymore is flushed + and None is returned. + """ + cache = vault.VaultAuthCache({}, None, None, vault.VaultToken) + res = cache.get() + assert res is None + cached_invalid_flush.assert_called_once() + + def test_store(self, token_auth): + """ + Ensure that storing authentication data sends a dictionary + representation to the store implementation of the parent class. + """ + token = vault.VaultToken(**token_auth["auth"]) + cache = vault.VaultAuthCache({}, "cbank", "ckey", vault.VaultToken) + with patch("salt.utils.vault.CommonCache._store_ckey") as store: + cache.store(token) + store.assert_called_once_with("ckey", token.to_dict()) + + +############################################ +# VaultKV tests +############################################ + + +@pytest.mark.parametrize( + "kv_meta,expected", + [ + ( + "v1", + "kvv1_info", + ), + ( + "v2", + "kvv2_info", + ), + ( + "invalid", + "no_kv_info", + ), + ], + indirect=["kv_meta"], +) +def test_vault_kv_is_v2_no_cache(kv_meta, expected, request): """ - Test write cache with unlimited use token + Ensure path metadata is requested as expected and cached + if the lookup succeeds """ - write_data = { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": None, - "uses": 0, - "lease_duration": 100, - "issued": 3000, - } - expected_write = { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": None, - "uses": 0, - "lease_duration": 100, - "issued": 3000, - "unlimited_use_token": True, - } - function_response = vault.write_cache(write_data) - with salt.utils.files.fopen(str(tmp_cache / "salt_vault_token"), "r") as fp: - token_data = json.loads(fp.read()) - assert token_data == expected_write + expected_val = request.getfixturevalue(expected) + res = kv_meta.is_v2("secret/some/path") + kv_meta.metadata_cache.get.assert_called_once() + kv_meta.client.get.assert_called_once_with( + "sys/internal/ui/mounts/secret/some/path" + ) + if expected != "no_kv_info": + kv_meta.metadata_cache.store.assert_called_once() + assert res == expected_val -def test_write_cache_issue_59361(cache_uses, tmp_cache): +@pytest.mark.parametrize( + "kv_meta_cached,expected", + [ + ( + "v1", + "kvv1_info", + ), + ( + "v2", + "kvv2_info", + ), + ], + indirect=["kv_meta_cached"], +) +def test_vault_kv_is_v2_cached(kv_meta_cached, expected, request): """ - Test race condition fix (Issue 59361) + Ensure cache is respected for path metadata """ - evt = threading.Event() + expected = request.getfixturevalue(expected) + res = kv_meta_cached.is_v2("secret/some/path") + kv_meta_cached.metadata_cache.get.assert_called_once() + kv_meta_cached.metadata_cache.store.assert_not_called() + kv_meta_cached.client.assert_not_called() + assert res == expected - def target(evt, cache_uses): - evt.wait() - function_response = vault.write_cache(cache_uses) - cached_token = { - "url": "http://127.0.0.1:8200", - "token": "testwithmuchmuchlongertoken", - "verify": None, - "namespace": None, - "uses": 10, - "lease_duration": 100, - "issued": 3000, - "unlimited_use_token": False, - } - expected_write = { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": None, - "uses": 10, - "lease_duration": 100, - "issued": 3000, - "unlimited_use_token": False, +class TestKVV1: + path = "secret/some/path" + + @pytest.mark.parametrize("include_metadata", [False, True]) + def test_vault_kv_read(self, kvv1, include_metadata): + """ + Ensure that VaultKV.read works for KV v1 and does not fail if + metadata is requested, which is invalid for KV v1. + """ + res = kvv1.read(self.path, include_metadata=include_metadata) + kvv1.client.get.assert_called_once_with(self.path) + assert res == {"foo": "bar"} + + def test_vault_kv_write(self, kvv1): + """ + Ensure that VaultKV.write works for KV v1. + """ + data = {"bar": "baz"} + kvv1.write(self.path, data) + kvv1.client.post.assert_called_once_with(self.path, payload=data) + + def test_vault_kv_patch(self, kvv1): + """ + Ensure that VaultKV.patch fails for KV v1. This action was introduced + in KV v2. It could be simulated in Python though. + """ + with pytest.raises(vault.VaultInvocationError): + kvv1.patch(self.path, {"bar": "baz"}) + + def test_vault_kv_delete(self, kvv1): + """ + Ensure that VaultKV.delete works for KV v1. + """ + kvv1.delete(self.path) + kvv1.client.request.assert_called_once_with("DELETE", self.path, payload=None) + + def test_vault_kv_delete_versions(self, kvv1): + """ + Ensure that VaultKV.delete with versions raises an exception for KV v1. + """ + with pytest.raises( + vault.VaultInvocationError, match="Versioning support requires kv-v2.*" + ): + kvv1.delete(self.path, versions=[1, 2, 3, 4]) + + def test_vault_kv_destroy(self, kvv1): + """ + Ensure that VaultKV.destroy raises an exception for KV v1. + """ + with pytest.raises(vault.VaultInvocationError): + kvv1.destroy(self.path, [1, 2, 3, 4]) + + def test_vault_kv_nuke(self, kvv1): + """ + Ensure that VaultKV.nuke raises an exception for KV v1. + """ + with pytest.raises(vault.VaultInvocationError): + kvv1.nuke(self.path) + + def test_vault_kv_list(self, kvv1): + """ + Ensure that VaultKV.list works for KV v1 and only returns keys. + """ + res = kvv1.list(self.path) + kvv1.client.list.assert_called_once_with(self.path) + assert res == ["foo"] + + +class TestKVV2: + path = "secret/some/path" + paths = { + "data": "secret/data/some/path", + "metadata": "secret/metadata/some/path", + "delete": "secret/data/some/path", + "delete_versions": "secret/delete/some/path", + "destroy": "secret/destroy/some/path", } - thread1 = threading.Thread( - target=target, - args=( - evt, - cached_token, - ), + @pytest.mark.parametrize( + "versions,expected", + [ + (0, [0]), + ("1", [1]), + ([2], [2]), + (["3"], [3]), + ], ) - thread1.start() - thread2 = threading.Thread( - target=target, - args=( - evt, - expected_write, - ), + def test_parse_versions(self, kvv2, versions, expected): + """ + Ensure parsing versions works as expected: + single integer/number string or list of those are allowed + """ + assert kvv2._parse_versions(versions) == expected + + def test_parse_versions_raises_exception_when_unparsable(self, kvv2): + """ + Ensure unparsable versions raise an exception + """ + with pytest.raises(vault.VaultInvocationError): + kvv2._parse_versions("four") + + def test_get_secret_path_metadata_lookup_unexpected_response(self, kvv2, caplog): + """ + Ensure unexpected responses are treated as not KV + """ + kvv2.client.get.return_value = MagicMock( + _mock_json_response({"wrap_info": {}}, status_code=200) + ) + res = kvv2._get_secret_path_metadata(self.path) + assert res is None + assert "Unexpected response to metadata query" in caplog.text + + def test_get_secret_path_metadata_lookup_request_error(self, kvv2, caplog): + """ + Ensure HTTP error status codes are treated as not KV + """ + kvv2.client.get.side_effect = vault.VaultPermissionDeniedError + res = kvv2._get_secret_path_metadata(self.path) + assert res is None + assert "VaultPermissionDeniedError:" in caplog.text + + @pytest.mark.parametrize("include_metadata", [False, True]) + def test_vault_kv_read(self, kvv2, include_metadata, kvv2_response): + """ + Ensure that VaultKV.read works for KV v2 and returns metadata + if requested. + """ + res = kvv2.read(self.path, include_metadata=include_metadata) + kvv2.client.get.assert_called_once_with(self.paths["data"]) + if include_metadata: + assert res == kvv2_response["data"] + else: + assert res == kvv2_response["data"]["data"] + + def test_vault_kv_write(self, kvv2): + """ + Ensure that VaultKV.write works for KV v2. + """ + data = {"bar": "baz"} + kvv2.write(self.path, data) + kvv2.client.post.assert_called_once_with( + self.paths["data"], payload={"data": data} + ) + + def test_vault_kv_patch(self, kvv2): + """ + Ensure that VaultKV.patch works for KV v2. + """ + data = {"bar": "baz"} + kvv2.patch(self.path, data) + kvv2.client.patch.assert_called_once_with( + self.paths["data"], + payload={"data": data}, + add_headers={"Content-Type": "application/merge-patch+json"}, + ) + + def test_vault_kv_delete(self, kvv2): + """ + Ensure that VaultKV.delete works for KV v2. + """ + kvv2.delete(self.path) + kvv2.client.request.assert_called_once_with( + "DELETE", self.paths["data"], payload=None + ) + + @pytest.mark.parametrize( + "versions", [[1, 2], [2], 2, ["1", "2"], ["2"], "2", [1, "2"]] + ) + def test_vault_kv_delete_versions(self, kvv2, versions): + """ + Ensure that VaultKV.delete with versions works for KV v2. + """ + if isinstance(versions, list): + expected = [int(x) for x in versions] + else: + expected = [int(versions)] + kvv2.delete(self.path, versions=versions) + kvv2.client.request.assert_called_once_with( + "POST", self.paths["delete_versions"], payload={"versions": expected} + ) + + @pytest.mark.parametrize( + "versions", [[1, 2], [2], 2, ["1", "2"], ["2"], "2", [1, "2"]] + ) + def test_vault_kv_destroy(self, kvv2, versions): + """ + Ensure that VaultKV.destroy works for KV v2. + """ + if isinstance(versions, list): + expected = [int(x) for x in versions] + else: + expected = [int(versions)] + kvv2.destroy(self.path, versions) + kvv2.client.post.assert_called_once_with( + self.paths["destroy"], payload={"versions": expected} + ) + + def test_vault_kv_nuke(self, kvv2): + """ + Ensure that VaultKV.nuke works for KV v2. + """ + kvv2.nuke(self.path) + kvv2.client.delete.assert_called_once_with(self.paths["metadata"]) + + def test_vault_kv_list(self, kvv2): + """ + Ensure that VaultKV.list works for KV v2 and only returns keys. + """ + res = kvv2.list(self.path) + kvv2.client.list.assert_called_once_with(self.paths["metadata"]) + assert res == ["foo"] + + +############################################ +# LeaseStore tests +############################################ + + +class TestLeaseStore: + @pytest.fixture(autouse=True, params=[0]) + def time_stopped(self, request): + with patch( + "salt.utils.vault.time.time", autospec=True, return_value=request.param + ): + yield + + @pytest.fixture + def lease(self): + return { + "id": "database/creds/testrole/abcd", + "lease_id": "database/creds/testrole/abcd", + "renewable": True, + "duration": 1337, + "creation_time": 0, + "expire_time": 1337, + "data": { + "username": "test", + "password": "test", + }, + } + + @pytest.fixture + def lease_renewed_response(self): + return { + "lease_id": "database/creds/testrole/abcd", + "renewable": True, + "lease_duration": 2000, + } + + @pytest.fixture + def lease_renewed_extended_response(self): + return { + "lease_id": "database/creds/testrole/abcd", + "renewable": True, + "lease_duration": 3000, + } + + @pytest.fixture + def store(self): + client = Mock(spec=vault.AuthenticatedVaultClient) + cache = Mock(spec=vault.VaultLeaseCache) + cache.exists.return_value = False + cache.get.return_value = None + return vault.LeaseStore(client, cache) + + @pytest.fixture + def store_valid(self, store, lease, lease_renewed_response): + store.cache.exists.return_value = True + store.cache.get.return_value = vault.VaultLease(**lease) + store.client.post.return_value = lease_renewed_response + return store + + def test_get_uncached_or_invalid(self, store): + """ + Ensure uncached or invalid leases are reported as None. + """ + ret = store.get("test") + assert ret is None + store.client.post.assert_not_called() + store.cache.flush.assert_not_called() + store.cache.store.assert_not_called() + + def test_get_cached_valid(self, store_valid, lease): + """ + Ensure valid leases are returned without extra behavior. + """ + ret = store_valid.get("test") + assert ret == lease + store_valid.client.post.assert_not_called() + store_valid.cache.flush.assert_not_called() + store_valid.cache.store.assert_not_called() + + @pytest.mark.parametrize( + "valid_for", [2000, pytest.param(2002, id="2002_renewal_leeway")] + ) + def test_get_valid_renew_default_period(self, store_valid, lease, valid_for): + """ + Ensure renewals are attempted by default, cache is updated accordingly + and validity checks after renewal allow for a little leeway to account + for latency. + """ + ret = store_valid.get("test", valid_for=valid_for) + lease["duration"] = lease["expire_time"] = 2000 + assert ret == lease + store_valid.client.post.assert_called_once_with( + "sys/leases/renew", payload={"lease_id": lease["id"]} + ) + store_valid.cache.flush.assert_not_called() + store_valid.cache.store.assert_called_once_with("test", ret) + + def test_get_valid_renew_increment(self, store_valid, lease): + """ + Ensure renew_increment is honored when renewing. + """ + ret = store_valid.get("test", valid_for=1400, renew_increment=2000) + lease["duration"] = lease["expire_time"] = 2000 + assert ret == lease + store_valid.client.post.assert_called_once_with( + "sys/leases/renew", payload={"lease_id": lease["id"], "increment": 2000} + ) + store_valid.cache.flush.assert_not_called() + store_valid.cache.store.assert_called_once_with("test", ret) + + def test_get_valid_renew_increment_insufficient(self, store_valid, lease): + """ + Ensure that when renewal_increment is set, valid_for is respected and that + a second renewal using valid_for as increment is not attempted when the + Vault server does not allow renewals for at least valid_for. + """ + ret = store_valid.get("test", valid_for=2100, renew_increment=3000) + assert ret is None + store_valid.client.post.assert_called_once_with( + "sys/leases/renew", payload={"lease_id": lease["id"], "increment": 3000} + ) + store_valid.cache.flush.assert_called_once_with("test") + store_valid.cache.store.assert_not_called() + + @pytest.mark.parametrize( + "valid_for", [3000, pytest.param(3002, id="3002_renewal_leeway")] ) - thread2.start() - evt.set() - thread1.join() - thread2.join() - - with salt.utils.files.fopen(str(tmp_cache / "salt_vault_token"), "r") as fp: - try: - token_data = json.loads(fp.read()) - except json.decoder.JSONDecodeError: - assert False, "Cache file data corrupted" - - -def test_make_request_single_use_token_run_ok(json_success, cache_single): - """ - Given single use token in __context__, function should run successful secret lookup with no other modifications - """ - mock = _mock_json_response(json_success) - supplied_context = {"vault_token": copy(cache_single)} - expected_headers = {"X-Vault-Token": "test", "Content-Type": "application/json"} - with patch.dict(vault.__context__, supplied_context): - with patch("requests.request", mock): - vault_return = vault.make_request("/secret/my/secret", "key") - assert vault.__context__ == {} - mock.assert_called_with( - "/secret/my/secret", - "http://127.0.0.1:8200/key", - headers=expected_headers, - verify=ANY, + def test_get_valid_renew_valid_for( + self, + store_valid, + lease, + valid_for, + lease_renewed_response, + lease_renewed_extended_response, + ): + """ + Ensure that, if renew_increment was not set and the default period + does not yield valid_for, a second renewal is attempted by valid_for. + There should be some leeway by default to account for latency. + """ + store_valid.client.post.side_effect = ( + lease_renewed_response, + lease_renewed_extended_response, + ) + ret = store_valid.get("test", valid_for=valid_for) + lease["duration"] = lease["expire_time"] = 3000 + assert ret == lease + store_valid.client.post.assert_has_calls( + ( + call("sys/leases/renew", payload={"lease_id": lease["id"]}), + call( + "sys/leases/renew", + payload={"lease_id": lease["id"], "increment": valid_for}, + ), ) - assert vault_return.json() == json_success - - -def test_make_request_single_use_token_run_auth_error(json_denied, cache_single): - """ - Given single use token in __context__ and login error, function should request token and re-run - """ - # Disable logging because simulated http failures are logged as errors - logging.disable(logging.CRITICAL) - mock = _mock_json_response(json_denied, status_code=400) - supplied_context = {"vault_token": cache_single} - expected_headers = {"X-Vault-Token": "test", "Content-Type": "application/json"} - with patch.dict(vault.__context__, supplied_context): - with patch("requests.request", mock): - with patch.object(vault, "del_cache") as mock_del_cache: - vault_return = vault.make_request("/secret/my/secret", "key") - assert vault.__context__ == {} - mock.assert_called_with( - "/secret/my/secret", - "http://127.0.0.1:8200/key", - headers=expected_headers, - verify=ANY, - ) - assert vault_return.json() == json_denied - mock_del_cache.assert_called() - assert mock.call_count == 2 - logging.disable(logging.NOTSET) + ) + store_valid.cache.flush.assert_not_called() + store_valid.cache.store.assert_called_once_with("test", ret) + def test_get_valid_not_renew(self, store_valid): + """ + Currently valid leases should not be returned if they undercut + valid_for and cache should be flushed by default. + """ + ret = store_valid.get("test", valid_for=2000, renew=False) + assert ret is None + store_valid.cache.flush.assert_called_once_with("test") + store_valid.client.post.assert_not_called() + store_valid.cache.store.assert_not_called() -def test_multi_use_token_successful_run(json_success, cache_uses): - """ - Given multi-use token, function should get secret and decrement token - """ - expected_cache_write = { - "url": "http://127.0.0.1:8200", - "token": "test", - "verify": None, - "namespace": None, - "uses": 9, - "lease_duration": 100, - "issued": 3000, - "unlimited_use_token": False, - } - mock = _mock_json_response(json_success) - expected_headers = {"X-Vault-Token": "test", "Content-Type": "application/json"} - with patch.object(vault, "get_cache") as mock_get_cache: - mock_get_cache.return_value = copy(cache_uses) - with patch("requests.request", mock): - with patch.object(vault, "del_cache") as mock_del_cache: - with patch.object(vault, "write_cache") as mock_write_cache: - vault_return = vault.make_request("/secret/my/secret", "key") - mock.assert_called_with( - "/secret/my/secret", - "http://127.0.0.1:8200/key", - headers=expected_headers, - verify=ANY, - ) - mock_write_cache.assert_called_with(expected_cache_write) - assert vault_return.json() == json_success - assert mock.call_count == 1 - - -def test_multi_use_token_last_use(json_success, cache_uses_last): - """ - Given last use of multi-use token, function should succeed and flush token cache - """ - mock = _mock_json_response(json_success) - expected_headers = {"X-Vault-Token": "test", "Content-Type": "application/json"} - with patch.object(vault, "get_cache") as mock_get_cache: - mock_get_cache.return_value = cache_uses_last - with patch("requests.request", mock): - with patch.object(vault, "del_cache") as mock_del_cache: - with patch.object(vault, "write_cache") as mock_write_cache: - vault_return = vault.make_request("/secret/my/secret", "key") - mock.assert_called_with( - "/secret/my/secret", - "http://127.0.0.1:8200/key", - headers=expected_headers, - verify=ANY, - ) - mock_del_cache.assert_called() - assert vault_return.json() == json_success - assert mock.call_count == 1 - - -def test_unlimited_use_token_no_decrement(json_success, cache_unlimited): - """ - Given unlimited-use token, function should succeed not del cache or decrement - """ - mock = _mock_json_response(json_success) - expected_headers = {"X-Vault-Token": "test", "Content-Type": "application/json"} - with patch.object(vault, "get_cache") as mock_get_cache: - mock_get_cache.return_value = cache_unlimited - with patch("requests.request", mock): - with patch.object(vault, "del_cache") as mock_del_cache: - with patch.object(vault, "write_cache") as mock_write_cache: - vault_return = vault.make_request("/secret/my/secret", "key") - mock.assert_called_with( - "/secret/my/secret", - "http://127.0.0.1:8200/key", - headers=expected_headers, - verify=ANY, - ) - assert ( - not mock_del_cache.called - ), "del cache should not be called for unlimited use token" - assert ( - not mock_write_cache.called - ), "write cache should not be called for unlimited use token" - assert vault_return.json() == json_success - assert mock.call_count == 1 + def test_get_valid_not_flush(self, store_valid): + """ + Currently valid leases should not be returned if they undercut + valid_for and cache should not be flushed if requested so. + """ + ret = store_valid.get("test", valid_for=2000, flush=False, renew=False) + assert ret is None + store_valid.cache.flush.assert_not_called() + store_valid.client.post.assert_not_called() + store_valid.cache.store.assert_not_called() -def test_get_cache_standard(cache_single): - """ - test standard first run of no cache file. Should generate new connection and write cache - """ - with patch.object(vault, "_read_cache_file") as mock_read_cache: - mock_read_cache.return_value = {} - with patch.object(vault, "get_vault_connection") as mock_get_vault_connection: - mock_get_vault_connection.return_value = copy(cache_single) - with patch.object(vault, "write_cache") as mock_write_cache: - cache_result = vault.get_cache() - mock_write_cache.assert_called_with(copy(cache_single)) +############################################ +# Miscellaneous tests +############################################ -def test_get_cache_existing_cache_valid(cache_uses): +@pytest.mark.parametrize( + "opts_runtype,expected", + [ + ("master", vault.SALT_RUNTYPE_MASTER), + ("master_peer_run", vault.SALT_RUNTYPE_MASTER_PEER_RUN), + ("master_impersonating", vault.SALT_RUNTYPE_MASTER_IMPERSONATING), + ("minion_local_1", vault.SALT_RUNTYPE_MINION_LOCAL), + ("minion_local_2", vault.SALT_RUNTYPE_MINION_LOCAL), + ("minion_local_3", vault.SALT_RUNTYPE_MINION_LOCAL), + ("minion_remote", vault.SALT_RUNTYPE_MINION_REMOTE), + ], + indirect=["opts_runtype"], +) +def test_get_salt_run_type(opts_runtype, expected): """ - test standard valid cache file + Ensure run types are detected as expected """ - with patch("time.time", return_value=1234): - with patch.object(vault, "_read_cache_file") as mock_read_cache: - mock_read_cache.return_value = cache_uses - with patch.object(vault, "write_cache") as mock_write_cache: - with patch.object(vault, "del_cache") as mock_del_cache: - cache_result = vault.get_cache() - assert not mock_write_cache.called - assert not mock_del_cache.called - assert cache_result == cache_uses + assert vault._get_salt_run_type(opts_runtype) == expected -def test_get_cache_existing_cache_old(cache_uses): +@pytest.mark.parametrize( + "config,expected", + [ + ({"auth": {"method": "token", "token": "test-token"}}, "server:url"), + ({"auth": {"method": "token"}, "server": {"url": "test-url"}}, "auth:token"), + ( + {"auth": {"method": "approle"}, "server": {"url": "test-url"}}, + "auth:role_id", + ), + ( + {"auth": {"method": "foo"}, "server": {"url": "test-url"}}, + "not a valid auth method", + ), + ], +) +def test_parse_config_ensures_necessary_values(config, expected): """ - test old cache file + Ensure that parse_config validates the configuration """ - with patch("time.time", return_value=3101): - with patch.object(vault, "get_vault_connection") as mock_get_vault_connection: - mock_get_vault_connection.return_value = cache_uses - with patch.object(vault, "_read_cache_file") as mock_read_cache: - mock_read_cache.return_value = cache_uses - with patch.object(vault, "write_cache") as mock_write_cache: - with patch.object(vault, "del_cache") as mock_del_cache: - cache_result = vault.get_cache() - assert mock_del_cache.called - assert mock_write_cache.called - assert cache_result == cache_uses + with pytest.raises(salt.exceptions.InvalidConfigError, match=f".*{expected}.*"): + vault.parse_config(config) -def test_write_cache_standard(cache_single): +@pytest.mark.parametrize( + "opts", + [ + {"vault": {"server": {"verify": "/etc/ssl/certs/ca-certificates.crt"}}}, + {"vault": {"verify": "/etc/ssl/certs/ca-certificates.crt"}}, + ], +) +def test_parse_config_respects_local_verify(opts): """ - Test write cache with standard single use token + Ensure locally configured verify values are respected. """ - function_response = vault.write_cache(copy(cache_single)) - assert vault.__context__["vault_token"] == copy(cache_single) - assert function_response is True + testval = "/etc/ssl/certs/ca-certificates.crt" + ret = vault.parse_config( + {"server": {"verify": "default"}}, validate=False, opts=opts + ) + assert ret["server"]["verify"] == testval -def test_path_is_v2(metadata_v2): +@pytest.mark.parametrize( + "secret,config,expected", + [ + ("token", None, r"auth/token/create(/[^/]+)?"), + ("secret_id", None, r"auth/[^/]+/role/[^/]+/secret\-id"), + ("role_id", None, r"auth/[^/]+/role/[^/]+/role\-id"), + ( + "secret_id", + {"auth": {"approle_mount": "test_mount", "approle_name": "test_minion"}}, + r"auth/test_mount/role/test_minion/secret\-id", + ), + ( + "role_id", + {"auth": {"approle_mount": "test_mount", "approle_name": "test_minion"}}, + r"auth/test_mount/role/test_minion/role\-id", + ), + ( + "secret_id", + {"auth": {"approle_mount": "te$t-mount", "approle_name": "te$t-minion"}}, + r"auth/te\$t\-mount/role/te\$t\-minion/secret\-id", + ), + ( + "role_id", + {"auth": {"approle_mount": "te$t-mount", "approle_name": "te$t-minion"}}, + r"auth/te\$t\-mount/role/te\$t\-minion/role\-id", + ), + ], +) +def test_get_expected_creation_path(secret, config, expected): """ - Validated v2 path is detected as vault kv v2 + Ensure expected creation paths are resolved as expected """ - expected_return = { - "v2": True, - "data": "secret/data/mything", - "metadata": "secret/metadata/mything", - "delete": "secret/mything", - "type": "kv", - "destroy": "secret/destroy/mything", - } - with patch.object(vault, "_get_secret_path_metadata") as mock_get_metadata: - mock_get_metadata.return_value = metadata_v2 - function_return = vault.is_v2("secret/mything") - assert function_return == expected_return - - -def test_request_with_namespace(json_success, cache_single_namespace): - """ - Test request with namespace configured - """ - mock = _mock_json_response(json_success) - expected_headers = { - "X-Vault-Token": "test", - "X-Vault-Namespace": "test_namespace", - "Content-Type": "application/json", - } - supplied_config = {"namespace": "test_namespace"} - supplied_context = {"vault_token": copy(cache_single_namespace)} - with patch.dict(vault.__context__, supplied_context): - with patch.dict(vault.__opts__["vault"], supplied_config): - with patch("requests.request", mock): - vault_return = vault.make_request("/secret/my/secret", "key") - mock.assert_called_with( - "/secret/my/secret", - "http://127.0.0.1:8200/key", - headers=expected_headers, - verify=ANY, - ) - assert vault_return.json() == json_success + assert vault._get_expected_creation_path(secret, config) == expected -def test_get_secret_path_metadata_no_cache(metadata_v2, cache_uses, cache_secret_meta): +def test_get_expected_creation_path_fails_for_unknown_type(): """ - test with no cache file + Ensure unknown source types result in an exception """ - make_request_response = { - "request_id": "b82f2df7-a9b6-920c-0ed2-a3463b996f9e", - "lease_id": "", - "renewable": False, - "lease_duration": 0, - "data": metadata_v2, - "wrap_info": None, - "warnings": None, - "auth": None, - } - cache_object = copy(cache_uses) - expected_cache_object = copy(cache_uses) - expected_cache_object.update(copy(cache_secret_meta)) - secret_path = "secret/mything" - mock = _mock_json_response(make_request_response) - with patch.object(vault, "_read_cache_file") as mock_read_cache: - mock_read_cache.return_value = cache_object - with patch.object(vault, "write_cache") as mock_write_cache: - with patch("salt.utils.vault.make_request", mock): - function_result = vault._get_secret_path_metadata(secret_path) - assert function_result == metadata_v2 - mock_write_cache.assert_called_with(cache_object) - assert cache_object == expected_cache_object + with pytest.raises(vault.VaultInvocationError): + vault._get_expected_creation_path("nonexistent") -def test_expand_pattern_lists(): +@pytest.mark.parametrize( + "pattern,expected", + [ + ("no-tokens-to-replace", ["no-tokens-to-replace"]), + ("single-dict:{minion}", ["single-dict:{minion}"]), + ("single-list:{grains[roles]}", ["single-list:web", "single-list:database"]), + ( + "multiple-lists:{grains[roles]}+{grains[aux]}", + [ + "multiple-lists:web+foo", + "multiple-lists:web+bar", + "multiple-lists:database+foo", + "multiple-lists:database+bar", + ], + ), + ( + "single-list-with-dicts:{grains[id]}+{grains[roles]}+{grains[id]}", + [ + "single-list-with-dicts:{grains[id]}+web+{grains[id]}", + "single-list-with-dicts:{grains[id]}+database+{grains[id]}", + ], + ), + ( + "deeply-nested-list:{grains[deep][foo][bar][baz]}", + [ + "deeply-nested-list:hello", + "deeply-nested-list:world", + ], + ), + ], +) +def test_expand_pattern_lists(pattern, expected): """ Ensure expand_pattern_lists works as intended: - Expand list-valued patterns - Do not change non-list-valued tokens """ - cases = { - "no-tokens-to-replace": ["no-tokens-to-replace"], - "single-dict:{minion}": ["single-dict:{minion}"], - "single-list:{grains[roles]}": ["single-list:web", "single-list:database"], - "multiple-lists:{grains[roles]}+{grains[aux]}": [ - "multiple-lists:web+foo", - "multiple-lists:web+bar", - "multiple-lists:database+foo", - "multiple-lists:database+bar", - ], - "single-list-with-dicts:{grains[id]}+{grains[roles]}+{grains[id]}": [ - "single-list-with-dicts:{grains[id]}+web+{grains[id]}", - "single-list-with-dicts:{grains[id]}+database+{grains[id]}", - ], - "deeply-nested-list:{grains[deep][foo][bar][baz]}": [ - "deeply-nested-list:hello", - "deeply-nested-list:world", - ], - } - pattern_vars = { "id": "test-minion", "roles": ["web", "database"], @@ -575,47 +4123,79 @@ def test_expand_pattern_lists(): } mappings = {"minion": "test-minion", "grains": pattern_vars} - for case, correct_output in cases.items(): - output = vault.expand_pattern_lists(case, **mappings) - assert output == correct_output + output = vault.expand_pattern_lists(pattern, **mappings) + assert output == expected + + +@pytest.mark.parametrize( + "inpt,expected", + [ + (60.0, 60.0), + (60, 60.0), + ("60", 60.0), + ("60s", 60.0), + ("2m", 120.0), + ("1h", 3600.0), + ("1d", 86400.0), + ("1.5s", 1.5), + ("1.5m", 90.0), + ("1.5h", 5400.0), + ("7.5d", 648000.0), + ], +) +def test_timestring_map(inpt, expected): + assert vault.timestring_map(inpt) == expected + + +############################################ +# Deprecation tests +############################################ @pytest.mark.parametrize( - "conf_location,called", - [("local", False), ("master", True), (None, False), ("doesnotexist", False)], + "old,new", + [ + ("policies", "policies:assign"), + ("auth:ttl", "issue:token:params:explicit_max_ttl"), + ("auth:uses", "issue:token:params:num_uses"), + ("url", "server:url"), + ("namespace", "server:namespace"), + ("verify", "server:verify"), + ("role_name", "issue:token:role_name"), + ("auth:token_backend", "cache:backend"), + ("auth:allow_minion_override", "issue:allow_minion_override_params"), + ], ) -def test_get_vault_connection_config_location(tmp_path, conf_location, called, caplog): +def test_get_config_recognizes_old_config(old, new): """ - test the get_vault_connection function when - config_location is set in opts + Ensure that parse_config recognizes the old configuration format + and translates it to new equivalents correctly. """ - token_url = { - "url": "http://127.0.0.1", - "namespace": None, - "token": "test", - "verify": None, - "issued": 1666100373, - "ttl": 3600, - } - opts = {"config_location": conf_location, "pki_dir": tmp_path / "pki"} - with patch.object(vault, "_get_token_and_url_from_master") as patch_token: - patch_token.return_vaule = token_url - with patch.dict(vault.__opts__["vault"], opts): - vault.get_vault_connection() + def rec(config, path, val=None): + ptr = config + parts = path.split(":") + while parts: + cur = parts.pop(0) + if val: + if parts and not isinstance(ptr.get(cur), dict): + ptr[cur] = {} + elif not parts: + ptr[cur] = val + return + ptr = ptr[cur] + return ptr - if called: - patch_token.assert_called() - else: - patch_token.assert_not_called() - if conf_location == "doesnotexist": - assert "config_location must be either local or master" in caplog.text - - -def test_del_cache(tmp_cache): - token_file = tmp_cache / "salt_vault_token" - token_file.touch() - with patch.dict(vault.__context__, {"vault_token": "fake_token"}): - vault.del_cache() - assert "vault_token" not in vault.__context__ - assert not token_file.exists() + config = { + "auth": { + "token": "test-token", + }, + "server": { + "url": "test-url", + }, + } + + oldval = "oldval" if old != "policies" else ["oldval"] + rec(config, old, oldval) + parsed = vault.parse_config(config) + assert rec(parsed, new) == oldval diff --git a/tests/support/pytest/vault.py b/tests/support/pytest/vault.py new file mode 100644 index 000000000000..40c7aec21d13 --- /dev/null +++ b/tests/support/pytest/vault.py @@ -0,0 +1,365 @@ +import json +import logging +import subprocess +import time + +import pytest +from pytestshellutils.utils.processes import ProcessResult + +import salt.utils.files +import salt.utils.path +from tests.support.helpers import PatchedEnviron +from tests.support.runtests import RUNTIME_VARS + +log = logging.getLogger(__name__) + + +def _vault_cmd(cmd, textinput=None, raw=False): + vault_binary = salt.utils.path.which("vault") + proc = subprocess.run( + [vault_binary] + cmd, + check=False, + input=textinput, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + + ret = ProcessResult( + returncode=proc.returncode, + stdout=proc.stdout, + stderr=proc.stderr, + cmdline=proc.args, + ) + + if raw: + return ret + if ret.returncode != 0: + log.debug("Failed to run vault %s:\n%s", " ".join(cmd), ret) + raise RuntimeError(ret.stdout) + return ret + + +def vault_write_policy(name, rules): + try: + _vault_cmd(["policy", "write", name, "-"], textinput=rules) + except RuntimeError: + pytest.fail(f"Unable to write policy `{name}`") + + +def vault_write_policy_file(policy, filename=None): + if filename is None: + filename = policy + try: + _vault_cmd( + [ + "policy", + "write", + policy, + f"{RUNTIME_VARS.FILES}/vault/policies/{filename}.hcl", + ] + ) + except RuntimeError: + pytest.fail(f"Unable to write policy `{policy}`") + + +def vault_read_policy(policy): + ret = _vault_cmd(["policy", "read", "-format=json", policy], raw=True) + if ret.returncode != 0: + if "No policy named" in ret.stderr: + return None + log.debug("Failed to read policy `%s`:\n%s", policy, ret) + pytest.fail(f"Unable to read policy `{policy}`") + res = json.loads(ret.stdout) + return res["policy"] + + +def vault_list_policies(): + try: + ret = _vault_cmd(["policy", "list", "-format=json"]) + except RuntimeError: + pytest.fail("Unable to list policies") + return json.loads(ret.stdout) + + +def vault_delete_policy(policy): + try: + _vault_cmd(["policy", "delete", policy]) + except RuntimeError: + pytest.fail(f"Unable to delete policy `{policy}`") + + +def vault_enable_secret_engine(name, options=None, **kwargs): + if options is None: + options = [] + try: + ret = _vault_cmd(["secrets", "enable"] + options + [name]) + except RuntimeError: + pytest.fail(f"Could not enable secret engine `{name}`") + + if "path is already in use at" in ret.stdout: + return False + if "Success" in ret.stdout: + return True + log.debug("Failed to enable secret engine `%s`:\n%s", name, ret) + pytest.fail(f"Could not enable secret engine `{name}`: {ret.stdout}") + + +def vault_disable_secret_engine(name): + try: + ret = _vault_cmd(["secrets", "disable", name]) + except RuntimeError: + pytest.fail(f"Could not disable secret engine `{name}`") + + if "Success" in ret.stdout: + return True + log.debug("Failed to disable secret engine `%s`:\n%s", name, ret) + pytest.fail(f"Could not disable secret engine `{name}`: {ret.stdout}") + + +def vault_enable_auth_method(name, options=None, **kwargs): + if options is None: + options = [] + cmd = ( + ["auth", "enable"] + options + [name] + [f"{k}={v}" for k, v in kwargs.items()] + ) + try: + ret = _vault_cmd(cmd) + except RuntimeError: + pytest.fail(f"Could not enable auth method `{name}`") + + if "path is already in use at" in ret.stdout: + return False + if "Success" in ret.stdout: + return True + log.debug("Failed to enable auth method `%s`:\n%s", name, ret) + pytest.fail(f"Could not enable auth method `{name}`: {ret.stdout}") + + +def vault_disable_auth_method(name): + try: + ret = _vault_cmd(["auth", "disable", name]) + except RuntimeError: + pytest.fail(f"Could not disable auth method `{name}`") + + if "Success" in ret.stdout: + return True + log.debug("Failed to disable auth method `%s`:\n%s", name, ret) + pytest.fail(f"Could not disable auth method `{name}`: {ret.stdout}") + + +def vault_write_secret(path, **kwargs): + cmd = ["kv", "put", path] + [f"{k}={v}" for k, v in kwargs.items()] + try: + ret = _vault_cmd(cmd) + except RuntimeError: + pytest.fail(f"Failed to write secret at `{path}`") + + if vault_read_secret(path) != kwargs: + log.debug("Failed to write secret at `%s`:\n%s", path, ret) + pytest.fail(f"Failed to write secret at `{path}`") + return True + + +def vault_write_secret_file(path, data_name): + data_path = f"{RUNTIME_VARS.FILES}/vault/data/{data_name}.json" + with salt.utils.files.fopen(data_path) as f: + data = json.load(f) + cmd = ["kv", "put", path, f"@{data_path}"] + try: + ret = _vault_cmd([cmd]) + except RuntimeError: + pytest.fail(f"Failed to write secret at `{path}`") + + if vault_read_secret(path) != data: + log.debug("Failed to write secret at `%s`:\n%s", path, ret) + pytest.fail(f"Failed to write secret at `{path}`") + return True + + +def vault_read_secret(path): + ret = _vault_cmd(["kv", "get", "-format=json", path], raw=True) + + if ret.returncode != 0: + if "No value found at" in ret.stderr: + return None + log.debug("Failed to read secret at `%s`:\n%s", path, ret) + pytest.fail(f"Failed to read secret at `{path}`") + res = json.loads(ret.stdout) + if "data" in res["data"]: + return res["data"]["data"] + return res["data"] + + +def vault_list_secrets(path): + ret = _vault_cmd(["kv", "list", "-format=json", path], raw=True) + if ret.returncode != 0: + if ret.returncode == 2: + return [] + log.debug("Failed to list secrets at `%s`:\n%s", path, ret) + pytest.fail(f"Failed to list secrets at `{path}`") + return json.loads(ret.stdout) + + +def vault_delete_secret(path, metadata=False): + try: + ret = _vault_cmd(["kv", "delete", path]) + except RuntimeError: + pytest.fail(f"Failed to delete secret at `{path}`") + + if vault_read_secret(path) is not None: + log.debug("Failed to delete secret at `%s`:\n%s", path, ret) + pytest.fail(f"Failed to delete secret at `{path}`") + + if not metadata: + return True + + ret = _vault_cmd(["kv", "metadata", "delete", path], raw=True) + if ( + ret.returncode != 0 + and "Metadata not supported on KV Version 1" not in ret.stderr + ): + log.debug("Failed to delete secret metadata at `%s`:\n%s", path, ret) + pytest.fail(f"Failed to delete secret metadata at `{path}`") + return True + + +def vault_delete(path): + try: + ret = _vault_cmd(["delete", "-format=json", path]) + except RuntimeError as err: + pytest.fail(f"Failed to delete path at `{path}`: {err}") + try: + return json.loads(ret.stdout) or True + except json.decoder.JSONDecodeError: + return True + + +def vault_list(path): + ret = _vault_cmd(["list", "-format=json", path], raw=True) + if ret.returncode != 0: + if ret.returncode == 2: + return [] + log.debug("Failed to list secrets at `%s`:\n%s", path, ret) + pytest.fail(f"Failed to list secrets at `{path}`") + return json.loads(ret.stdout) + + +def vault_read(path): + try: + ret = _vault_cmd(["read", "-format=json", path]) + except RuntimeError as err: + pytest.fail(f"Failed to read path at `{path}`: {err}") + return json.loads(ret.stdout) + + +def vault_write(path, *args, **kwargs): + kwargs_ = [f"{k}={v}" for k, v in kwargs.items()] + cmd = ( + ["write", "-format=json"] + + (["-f"] if not (args or kwargs) else []) + + [path] + + list(args) + + kwargs_ + ) + try: + ret = _vault_cmd(cmd) + except RuntimeError as err: + pytest.fail(f"Failed to write to path at `{path}`: {err}") + try: + return json.loads(ret.stdout) or True + except json.decoder.JSONDecodeError: + return True + + +def vault_revoke(lease_id, prefix=False): + cmd = ["lease", "revoke"] + if prefix: + cmd += ["-prefix"] + cmd += [lease_id] + try: + _vault_cmd(cmd) + except RuntimeError as err: + pytest.fail(f"Failed to revoke lease `{lease_id}`: {err}") + return True + + +@pytest.fixture(scope="module") +def vault_environ(vault_port): + with PatchedEnviron(VAULT_ADDR=f"http://127.0.0.1:{vault_port}"): + yield + + +def vault_container_version_id(value): + return f"vault=={value}" + + +@pytest.fixture( + scope="module", + params=["0.9.6", "1.3.1", "latest"], + ids=vault_container_version_id, +) +def vault_container_version(request, salt_factories, vault_port, vault_environ): + vault_version = request.param + vault_binary = salt.utils.path.which("vault") + config = { + "backend": {"file": {"path": "/vault/file"}}, + "default_lease_ttl": "168h", + "max_lease_ttl": "720h", + } + + factory = salt_factories.get_container( + "vault", + f"ghcr.io/saltstack/salt-ci-containers/vault:{vault_version}", + check_ports=[vault_port], + container_run_kwargs={ + "ports": {"8200/tcp": vault_port}, + "environment": { + "VAULT_DEV_ROOT_TOKEN_ID": "testsecret", + "VAULT_LOCAL_CONFIG": json.dumps(config), + }, + "cap_add": "IPC_LOCK", + }, + pull_before_start=True, + skip_on_pull_failure=True, + skip_if_docker_client_not_connectable=True, + ) + with factory.started() as factory: + attempts = 0 + while attempts < 3: + attempts += 1 + time.sleep(1) + proc = subprocess.run( + [vault_binary, "login", "token=testsecret"], + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + if proc.returncode == 0: + break + ret = ProcessResult( + returncode=proc.returncode, + stdout=proc.stdout, + stderr=proc.stderr, + cmdline=proc.args, + ) + log.debug("Failed to authenticate against vault:\n%s", ret) + time.sleep(4) + else: + pytest.fail("Failed to login to vault") + + vault_write_policy_file("salt_master") + + if "latest" == vault_version: + vault_write_policy_file("salt_minion") + else: + vault_write_policy_file("salt_minion", "salt_minion_old") + + if vault_version in ("1.3.1", "latest"): + vault_enable_secret_engine("kv-v2") + if vault_version == "latest": + vault_enable_auth_method("approle", ["-path=salt-minions"]) + vault_enable_secret_engine("kv", ["-version=2", "-path=salt"]) + + yield vault_version diff --git a/tests/unit/modules/test_vault.py b/tests/unit/modules/test_vault.py deleted file mode 100644 index 59aebae7faf2..000000000000 --- a/tests/unit/modules/test_vault.py +++ /dev/null @@ -1,188 +0,0 @@ -""" -Test case for the vault execution module -""" - - -import salt.modules.vault as vault -from salt.exceptions import CommandExecutionError -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - - -class TestVaultModule(LoaderModuleMockMixin, TestCase): - """ - Test case for the vault execution module - """ - - def setup_loader_modules(self): - return { - vault: { - "__opts__": { - "vault": { - "url": "http://127.0.0.1", - "auth": {"token": "test", "method": "token"}, - } - }, - "__grains__": {"id": "test-minion"}, - } - } - - def test_read_secret_v1(self): - """ - Test salt.modules.vault.read_secret function - """ - version = {"v2": False, "data": None, "metadata": None, "type": None} - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - mock_vault.return_value.json.return_value = {"data": {"key": "test"}} - with patch.dict( - vault.__utils__, {"vault.make_request": mock_vault} - ), patch.dict(vault.__utils__, {"vault.is_v2": mock_version}): - vault_return = vault.read_secret("/secret/my/secret") - - self.assertDictEqual(vault_return, {"key": "test"}) - - def test_read_secret_v1_key(self): - """ - Test salt.modules.vault.read_secret function specifying key - """ - version = {"v2": False, "data": None, "metadata": None, "type": None} - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - mock_vault.return_value.json.return_value = {"data": {"key": "somevalue"}} - with patch.dict( - vault.__utils__, {"vault.make_request": mock_vault} - ), patch.dict(vault.__utils__, {"vault.is_v2": mock_version}): - vault_return = vault.read_secret("/secret/my/secret", "key") - - self.assertEqual(vault_return, "somevalue") - - def test_read_secret_v2(self): - """ - Test salt.modules.vault.read_secret function for v2 of kv secret backend - """ - # given path secrets/mysecret generate v2 output - version = { - "v2": True, - "data": "secrets/data/mysecret", - "metadata": "secrets/metadata/mysecret", - "type": "kv", - } - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - v2_return = { - "data": { - "data": {"akey": "avalue"}, - "metadata": { - "created_time": "2018-10-23T20:21:55.042755098Z", - "destroyed": False, - "version": 13, - "deletion_time": "", - }, - } - } - - mock_vault.return_value.json.return_value = v2_return - with patch.dict( - vault.__utils__, {"vault.make_request": mock_vault} - ), patch.dict(vault.__utils__, {"vault.is_v2": mock_version}): - # Validate metadata returned - vault_return = vault.read_secret("/secret/my/secret", metadata=True) - self.assertDictContainsSubset({"data": {"akey": "avalue"}}, vault_return) - # Validate just data returned - vault_return = vault.read_secret("/secret/my/secret") - self.assertDictContainsSubset({"akey": "avalue"}, vault_return) - - def test_read_secret_v2_key(self): - """ - Test salt.modules.vault.read_secret function for v2 of kv secret backend - with specified key - """ - # given path secrets/mysecret generate v2 output - version = { - "v2": True, - "data": "secrets/data/mysecret", - "metadata": "secrets/metadata/mysecret", - "type": "kv", - } - mock_version = MagicMock(return_value=version) - mock_vault = MagicMock() - mock_vault.return_value.status_code = 200 - v2_return = { - "data": { - "data": {"akey": "avalue"}, - "metadata": { - "created_time": "2018-10-23T20:21:55.042755098Z", - "destroyed": False, - "version": 13, - "deletion_time": "", - }, - } - } - - mock_vault.return_value.json.return_value = v2_return - with patch.dict( - vault.__utils__, {"vault.make_request": mock_vault} - ), patch.dict(vault.__utils__, {"vault.is_v2": mock_version}): - vault_return = vault.read_secret("/secret/my/secret", "akey") - - self.assertEqual(vault_return, "avalue") - - -class VaultDefaultTestCase(TestCase, LoaderModuleMockMixin): - """ - Test cases for the default argument in the vault module - - NOTE: This test class is crafted such that the vault.make_request call will - always fail. If you want to add other unit tests, you should put them in a - separate class. - """ - - def setup_loader_modules(self): - return { - vault: { - "__grains__": {"id": "foo"}, - "__utils__": { - "vault.make_request": MagicMock(side_effect=Exception("FAILED")), - "vault.is_v2": MagicMock( - return_value={ - "v2": True, - "data": "secrets/data/mysecret", - "metadata": "secrets/metadata/mysecret", - "type": "kv", - } - ), - }, - }, - } - - def setUp(self): - self.path = "foo/bar/" - - def test_read_secret_with_default(self): - assert vault.read_secret(self.path, default="baz") == "baz" - - def test_read_secret_no_default(self): - try: - vault.read_secret(self.path) - except CommandExecutionError: - # This is expected - pass - else: - raise Exception("Should have raised a CommandExecutionError") - - def test_list_secrets_with_default(self): - assert vault.list_secrets(self.path, default=["baz"]) == ["baz"] - - def test_list_secrets_no_default(self): - try: - vault.list_secrets(self.path) - except CommandExecutionError: - # This is expected - pass - else: - raise Exception("Should have raised a CommandExecutionError")