diff --git a/mypy.ini b/mypy.ini index 497c022c95..f6915caefe 100644 --- a/mypy.ini +++ b/mypy.ini @@ -59,6 +59,6 @@ ignore_missing_imports=True ignore_missing_imports=True # progressive add typechecks and these modules already complete the process, let's keep them clean -[mypy-samcli.commands.build,samcli.lib.build.*,samcli.commands.local.cli_common.invoke_context,samcli.commands.local.lib.local_lambda,samcli.lib.providers.*,samcli.lib.utils.git_repo.py] +[mypy-samcli.commands.build,samcli.lib.build.*,samcli.commands.local.cli_common.invoke_context,samcli.commands.local.lib.local_lambda,samcli.lib.providers.*,samcli.lib.utils.git_repo.py,samcli.lib.cookiecutter.*,samcli.lib.pipeline.*,samcli.commands.pipeline.*] disallow_untyped_defs=True disallow_incomplete_defs=True \ No newline at end of file diff --git a/samcli/cli/command.py b/samcli/cli/command.py index 384529f78b..c135400586 100644 --- a/samcli/cli/command.py +++ b/samcli/cli/command.py @@ -21,6 +21,7 @@ "samcli.commands.deploy", "samcli.commands.logs", "samcli.commands.publish", + "samcli.commands.pipeline.pipeline", # We intentionally do not expose the `bootstrap` command for now. We might open it up later # "samcli.commands.bootstrap", ] diff --git a/samcli/cli/context.py b/samcli/cli/context.py index a69ebb9ff2..74c35155a1 100644 --- a/samcli/cli/context.py +++ b/samcli/cli/context.py @@ -4,7 +4,7 @@ import logging import uuid -from typing import Optional, cast +from typing import Optional, cast, List import boto3 import botocore @@ -186,7 +186,7 @@ def _refresh_session(self): raise CredentialsError(str(ex)) from ex -def get_cmd_names(cmd_name, ctx): +def get_cmd_names(cmd_name, ctx) -> List[str]: """ Given the click core context, return a list representing all the subcommands passed to the CLI diff --git a/samcli/commands/_utils/template.py b/samcli/commands/_utils/template.py index bd9658b55b..08c02836da 100644 --- a/samcli/commands/_utils/template.py +++ b/samcli/commands/_utils/template.py @@ -9,9 +9,6 @@ import yaml from botocore.utils import set_value_from_jmespath -from samcli.commands.exceptions import UserException -from samcli.lib.utils.packagetype import ZIP -from samcli.yamlhelper import yaml_parse, yaml_dump from samcli.commands._utils.resources import ( METADATA_WITH_LOCAL_PATHS, RESOURCES_WITH_LOCAL_PATHS, @@ -19,6 +16,9 @@ AWS_LAMBDA_FUNCTION, get_packageable_resource_paths, ) +from samcli.commands.exceptions import UserException +from samcli.lib.utils.packagetype import ZIP +from samcli.yamlhelper import yaml_parse, yaml_dump class TemplateNotFoundException(UserException): diff --git a/samcli/commands/deploy/guided_context.py b/samcli/commands/deploy/guided_context.py index dafdf0a331..10fd3b6da8 100644 --- a/samcli/commands/deploy/guided_context.py +++ b/samcli/commands/deploy/guided_context.py @@ -6,7 +6,6 @@ from typing import Dict, Any, List import click -from botocore.session import get_session from click import confirm from click import prompt from click.types import FuncParamType @@ -36,6 +35,7 @@ from samcli.lib.providers.sam_function_provider import SamFunctionProvider from samcli.lib.providers.sam_stack_provider import SamLocalStackProvider from samcli.lib.utils.colors import Colored +from samcli.lib.utils.defaults import get_default_aws_region from samcli.lib.utils.packagetype import IMAGE LOG = logging.getLogger(__name__) @@ -110,7 +110,7 @@ def guided_prompts(self, parameter_override_keys): The keys of parameters to override, for each key, customers will be asked to provide a value """ default_stack_name = self.stack_name or "sam-app" - default_region = self.region or get_session().get_config_variable("region") or "us-east-1" + default_region = self.region or get_default_aws_region() default_capabilities = self.capabilities[0] or ("CAPABILITY_IAM",) default_config_env = self.config_env or DEFAULT_ENV default_config_file = self.config_file or DEFAULT_CONFIG_FILE_NAME diff --git a/samcli/commands/exceptions.py b/samcli/commands/exceptions.py index 7b8f253609..a27f4872cf 100644 --- a/samcli/commands/exceptions.py +++ b/samcli/commands/exceptions.py @@ -59,3 +59,22 @@ class ContainersInitializationException(UserException): """ Exception class when SAM is not able to initialize any of the lambda functions containers """ + + +class PipelineTemplateCloneException(UserException): + """ + Exception class when unable to download pipeline templates from a Git repository during `sam pipeline init` + """ + + +class AppPipelineTemplateManifestException(UserException): + """ + Exception class when SAM is not able to parse the "manifest.yaml" file located in the SAM pipeline templates + Git repo: "github.com/aws/aws-sam-cli-pipeline-init-templates.git + """ + + +class AppPipelineTemplateMetadataException(UserException): + """ + Exception class when SAM is not able to parse the "metadata.json" file located in the SAM pipeline templates + """ diff --git a/samcli/commands/pipeline/__init__.py b/samcli/commands/pipeline/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/samcli/commands/pipeline/bootstrap/__init__.py b/samcli/commands/pipeline/bootstrap/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/samcli/commands/pipeline/bootstrap/cli.py b/samcli/commands/pipeline/bootstrap/cli.py new file mode 100644 index 0000000000..9e8b454992 --- /dev/null +++ b/samcli/commands/pipeline/bootstrap/cli.py @@ -0,0 +1,238 @@ +""" +CLI command for "pipeline bootstrap", which sets up the require pipeline infrastructure resources +""" +import os +from textwrap import dedent +from typing import Any, Dict, List, Optional + +import click + +from samcli.cli.cli_config_file import configuration_option, TomlProvider +from samcli.cli.main import pass_context, common_options, aws_creds_options, print_cmdline_args +from samcli.lib.config.samconfig import SamConfig +from samcli.lib.pipeline.bootstrap.stage import Stage +from samcli.lib.telemetry.metric import track_command +from samcli.lib.utils.colors import Colored +from samcli.lib.utils.version_checker import check_newer_version +from .guided_context import GuidedContext +from ..external_links import CONFIG_AWS_CRED_ON_CICD_URL + +SHORT_HELP = "Generates the necessary AWS resources to connect your CI/CD system." + +HELP_TEXT = """ +SAM Pipeline Bootstrap generates the necessary AWS resources to connect your +CI/CD system. This step must be completed for each pipeline stage prior to +running sam pipeline init +""" + +PIPELINE_CONFIG_DIR = os.path.join(".aws-sam", "pipeline") +PIPELINE_CONFIG_FILENAME = "pipelineconfig.toml" + + +@click.command("bootstrap", short_help=SHORT_HELP, help=HELP_TEXT, context_settings=dict(max_content_width=120)) +@configuration_option(provider=TomlProvider(section="parameters")) +@click.option( + "--interactive/--no-interactive", + is_flag=True, + default=True, + help="Disable interactive prompting for bootstrap parameters, and fail if any required arguments are missing.", +) +@click.option( + "--stage", + help="The name of the corresponding stage. It is used as a suffix for the created resources.", + required=False, +) +@click.option( + "--pipeline-user", + help="An IAM user generated or referenced by sam pipeline bootstrap in order to " + "allow the connected CI/CD system to connect to the SAM CLI.", + required=False, +) +@click.option( + "--pipeline-execution-role", + help="Execution role that the CI/CD system assumes in order to make changes to resources on your behalf.", + required=False, +) +@click.option( + "--cloudformation-execution-role", + help="Execution role that CloudFormation assumes in order to make changes to resources on your behalf", + required=False, +) +@click.option( + "--bucket", + help="The name of the S3 bucket where this command uploads your CloudFormation template. This is required for" + "deployments of templates sized greater than 51,200 bytes.", + required=False, +) +@click.option( + "--create-image-repository/--no-create-image-repository", + is_flag=True, + default=False, + help="If set to true and no ECR image repository is provided, this command will create an ECR image repository " + "to hold the container images of Lambda functions having an Image package type.", +) +@click.option( + "--image-repository", + help="ECR repo uri where this command uploads the image artifacts that are referenced in your template.", + required=False, +) +@click.option( + "--confirm-changeset/--no-confirm-changeset", + default=True, + is_flag=True, + help="Prompt to confirm if the resources is to be deployed by SAM CLI.", +) +@common_options +@aws_creds_options +@pass_context +@track_command +@check_newer_version +@print_cmdline_args +def cli( + ctx: Any, + interactive: bool, + stage: Optional[str], + pipeline_user: Optional[str], + pipeline_execution_role: Optional[str], + cloudformation_execution_role: Optional[str], + bucket: Optional[str], + create_image_repository: bool, + image_repository: Optional[str], + confirm_changeset: bool, + config_file: Optional[str], + config_env: Optional[str], +) -> None: + """ + `sam pipeline bootstrap` command entry point + """ + do_cli( + region=ctx.region, + profile=ctx.profile, + interactive=interactive, + stage_name=stage, + pipeline_user_arn=pipeline_user, + pipeline_execution_role_arn=pipeline_execution_role, + cloudformation_execution_role_arn=cloudformation_execution_role, + artifacts_bucket_arn=bucket, + create_image_repository=create_image_repository, + image_repository_arn=image_repository, + confirm_changeset=confirm_changeset, + config_file=config_env, + config_env=config_file, + ) # pragma: no cover + + +def do_cli( + region: Optional[str], + profile: Optional[str], + interactive: bool, + stage_name: Optional[str], + pipeline_user_arn: Optional[str], + pipeline_execution_role_arn: Optional[str], + cloudformation_execution_role_arn: Optional[str], + artifacts_bucket_arn: Optional[str], + create_image_repository: bool, + image_repository_arn: Optional[str], + confirm_changeset: bool, + config_file: Optional[str], + config_env: Optional[str], + standalone: bool = True, +) -> None: + """ + implementation of `sam pipeline bootstrap` command + """ + if not pipeline_user_arn: + pipeline_user_arn = _load_saved_pipeline_user_arn() + + if interactive: + if standalone: + click.echo( + dedent( + """\ + + sam pipeline bootstrap generates the necessary AWS resources to connect a stage in + your CI/CD system. We will ask for [1] stage definition, [2] account details, and + [3] references to existing resources in order to bootstrap these pipeline + resources. + """ + ), + ) + + guided_context = GuidedContext( + profile=profile, + stage_name=stage_name, + pipeline_user_arn=pipeline_user_arn, + pipeline_execution_role_arn=pipeline_execution_role_arn, + cloudformation_execution_role_arn=cloudformation_execution_role_arn, + artifacts_bucket_arn=artifacts_bucket_arn, + create_image_repository=create_image_repository, + image_repository_arn=image_repository_arn, + region=region, + ) + guided_context.run() + stage_name = guided_context.stage_name + pipeline_user_arn = guided_context.pipeline_user_arn + pipeline_execution_role_arn = guided_context.pipeline_execution_role_arn + cloudformation_execution_role_arn = guided_context.cloudformation_execution_role_arn + artifacts_bucket_arn = guided_context.artifacts_bucket_arn + create_image_repository = guided_context.create_image_repository + image_repository_arn = guided_context.image_repository_arn + region = guided_context.region + profile = guided_context.profile + + if not stage_name: + raise click.UsageError("Missing required parameter '--stage'") + + environment: Stage = Stage( + name=stage_name, + aws_profile=profile, + aws_region=region, + pipeline_user_arn=pipeline_user_arn, + pipeline_execution_role_arn=pipeline_execution_role_arn, + cloudformation_execution_role_arn=cloudformation_execution_role_arn, + artifacts_bucket_arn=artifacts_bucket_arn, + create_image_repository=create_image_repository, + image_repository_arn=image_repository_arn, + ) + + bootstrapped: bool = environment.bootstrap(confirm_changeset=confirm_changeset) + + if bootstrapped: + environment.print_resources_summary() + + environment.save_config_safe( + config_dir=PIPELINE_CONFIG_DIR, filename=PIPELINE_CONFIG_FILENAME, cmd_names=_get_bootstrap_command_names() + ) + + click.secho( + dedent( + f"""\ + View the definition in {os.path.join(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME)}, + run sam pipeline bootstrap to generate another set of resources, or proceed to + sam pipeline init to create your pipeline configuration file. + """ + ) + ) + + if not environment.pipeline_user.is_user_provided: + click.secho( + dedent( + f"""\ + Before running {Colored().bold("sam pipeline init")}, we recommend first setting up AWS credentials + in your CI/CD account. Read more about how to do so with your provider in + {CONFIG_AWS_CRED_ON_CICD_URL}. + """ + ) + ) + + +def _load_saved_pipeline_user_arn() -> Optional[str]: + samconfig: SamConfig = SamConfig(config_dir=PIPELINE_CONFIG_DIR, filename=PIPELINE_CONFIG_FILENAME) + if not samconfig.exists(): + return None + config: Dict[str, str] = samconfig.get_all(cmd_names=_get_bootstrap_command_names(), section="parameters") + return config.get("pipeline_user") + + +def _get_bootstrap_command_names() -> List[str]: + return ["pipeline", "bootstrap"] diff --git a/samcli/commands/pipeline/bootstrap/guided_context.py b/samcli/commands/pipeline/bootstrap/guided_context.py new file mode 100644 index 0000000000..a7f1f89b08 --- /dev/null +++ b/samcli/commands/pipeline/bootstrap/guided_context.py @@ -0,0 +1,249 @@ +""" +An interactive flow that prompt the user for required information to bootstrap the AWS account of an environment +with the required infrastructure +""" +import os +import sys +from textwrap import dedent +from typing import Optional, List, Tuple, Callable + +import click +from botocore.credentials import EnvProvider + +from samcli.commands.exceptions import CredentialsError +from samcli.commands.pipeline.external_links import CONFIG_AWS_CRED_DOC_URL +from samcli.lib.bootstrap.bootstrap import get_current_account_id +from samcli.lib.utils.colors import Colored + +from samcli.lib.utils.defaults import get_default_aws_region +from samcli.lib.utils.profile import list_available_profiles + + +class GuidedContext: + def __init__( + self, + profile: Optional[str] = None, + stage_name: Optional[str] = None, + pipeline_user_arn: Optional[str] = None, + pipeline_execution_role_arn: Optional[str] = None, + cloudformation_execution_role_arn: Optional[str] = None, + artifacts_bucket_arn: Optional[str] = None, + create_image_repository: bool = False, + image_repository_arn: Optional[str] = None, + region: Optional[str] = None, + ) -> None: + self.profile = profile + self.stage_name = stage_name + self.pipeline_user_arn = pipeline_user_arn + self.pipeline_execution_role_arn = pipeline_execution_role_arn + self.cloudformation_execution_role_arn = cloudformation_execution_role_arn + self.artifacts_bucket_arn = artifacts_bucket_arn + self.create_image_repository = create_image_repository + self.image_repository_arn = image_repository_arn + self.region = region + self.color = Colored() + + def _prompt_account_id(self) -> None: + profiles = list_available_profiles() + click.echo("The following AWS credential sources are available to use:") + click.echo( + dedent( + f"""\ + To know more about configuration AWS credentials, visit the link below: + {CONFIG_AWS_CRED_DOC_URL}\ + """ + ) + ) + has_env_creds = os.getenv(EnvProvider.ACCESS_KEY) and os.getenv(EnvProvider.SECRET_KEY) + click.echo(f"\t1 - Environment variables{' (not available)' if not has_env_creds else ''}") + for i, profile in enumerate(profiles): + click.echo(f"\t{i + 2} - {profile} (named profile)") + click.echo("\tq - Quit and configure AWS credentials") + answer = click.prompt( + "Select a credential source to associate with this stage", + show_choices=False, + show_default=False, + type=click.Choice((["1"] if has_env_creds else []) + [str(i + 2) for i in range(len(profiles))] + ["q"]), + ) + if answer == "q": + sys.exit(0) + elif answer == "1": + # by default, env variable has higher precedence + # https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html#envvars-list + self.profile = None + else: + self.profile = profiles[int(answer) - 2] + + try: + account_id = get_current_account_id(self.profile) + click.echo(self.color.green(f"Associated account {account_id} with stage {self.stage_name}.")) + except CredentialsError as ex: + click.echo(f"{self.color.red(ex.message)}\n") + self._prompt_account_id() + + def _prompt_stage_name(self) -> None: + click.echo( + "Enter a name for this stage. This will be referenced later when you use the sam pipeline init command:" + ) + self.stage_name = click.prompt( + "Stage name", + default=self.stage_name, + type=click.STRING, + ) + + def _prompt_region_name(self) -> None: + self.region = click.prompt( + "Enter the region in which you want these resources to be created", + type=click.STRING, + default=get_default_aws_region(), + ) + + def _prompt_pipeline_user(self) -> None: + self.pipeline_user_arn = click.prompt( + "Enter the pipeline IAM user ARN if you have previously created one, or we will create one for you", + default="", + type=click.STRING, + ) + + def _prompt_pipeline_execution_role(self) -> None: + self.pipeline_execution_role_arn = click.prompt( + "Enter the pipeline execution role ARN if you have previously created one, " + "or we will create one for you", + default="", + type=click.STRING, + ) + + def _prompt_cloudformation_execution_role(self) -> None: + self.cloudformation_execution_role_arn = click.prompt( + "Enter the CloudFormation execution role ARN if you have previously created one, " + "or we will create one for you", + default="", + type=click.STRING, + ) + + def _prompt_artifacts_bucket(self) -> None: + self.artifacts_bucket_arn = click.prompt( + "Please enter the artifact bucket ARN for your Lambda function. " + "If you do not have a bucket, we will create one for you", + default="", + type=click.STRING, + ) + + def _prompt_image_repository(self) -> None: + if click.confirm("Does your application contain any IMAGE type Lambda functions?"): + self.image_repository_arn = click.prompt( + "Please enter the ECR image repository ARN(s) for your Image type function(s)." + "If you do not yet have a repository, we will create one for you", + default="", + type=click.STRING, + ) + self.create_image_repository = not bool(self.image_repository_arn) + else: + self.create_image_repository = False + + def _get_user_inputs(self) -> List[Tuple[str, Callable[[], None]]]: + return [ + (f"Account: {get_current_account_id(self.profile)}", self._prompt_account_id), + (f"Stage name: {self.stage_name}", self._prompt_stage_name), + (f"Region: {self.region}", self._prompt_region_name), + ( + f"Pipeline user ARN: {self.pipeline_user_arn}" + if self.pipeline_user_arn + else "Pipeline user: [to be created]", + self._prompt_pipeline_user, + ), + ( + f"Pipeline execution role ARN: {self.pipeline_execution_role_arn}" + if self.pipeline_execution_role_arn + else "Pipeline execution role: [to be created]", + self._prompt_pipeline_execution_role, + ), + ( + f"CloudFormation execution role ARN: {self.cloudformation_execution_role_arn}" + if self.cloudformation_execution_role_arn + else "CloudFormation execution role: [to be created]", + self._prompt_cloudformation_execution_role, + ), + ( + f"Artifacts bucket ARN: {self.artifacts_bucket_arn}" + if self.artifacts_bucket_arn + else "Artifacts bucket: [to be created]", + self._prompt_artifacts_bucket, + ), + ( + f"ECR image repository ARN: {self.image_repository_arn}" + if self.image_repository_arn + else f"ECR image repository: [{'to be created' if self.create_image_repository else 'skipped'}]", + self._prompt_image_repository, + ), + ] + + def run(self) -> None: # pylint: disable=too-many-branches + """ + Runs an interactive questionnaire to prompt the user for the ARNs of the AWS resources(infrastructure) required + for the pipeline to work. Users can provide all, none or some resources' ARNs and leave the remaining empty + and it will be created by the bootstrap command + """ + click.secho(self.color.bold("[1] Stage definition")) + if self.stage_name: + click.echo(f"Stage name: {self.stage_name}") + else: + self._prompt_stage_name() + click.echo() + + click.secho(self.color.bold("[2] Account details")) + self._prompt_account_id() + click.echo() + + if not self.region: + self._prompt_region_name() + + if self.pipeline_user_arn: + click.echo(f"Pipeline IAM user ARN: {self.pipeline_user_arn}") + else: + self._prompt_pipeline_user() + click.echo() + + click.secho(self.color.bold("[3] Reference application build resources")) + + if self.pipeline_execution_role_arn: + click.echo(f"Pipeline execution role ARN: {self.pipeline_execution_role_arn}") + else: + self._prompt_pipeline_execution_role() + + if self.cloudformation_execution_role_arn: + click.echo(f"CloudFormation execution role ARN: {self.cloudformation_execution_role_arn}") + else: + self._prompt_cloudformation_execution_role() + + if self.artifacts_bucket_arn: + click.echo(f"Artifacts bucket ARN: {self.cloudformation_execution_role_arn}") + else: + self._prompt_artifacts_bucket() + + if self.image_repository_arn: + click.echo(f"ECR image repository ARN: {self.image_repository_arn}") + else: + self._prompt_image_repository() + click.echo() + + # Ask customers to confirm the inputs + click.secho(self.color.bold("[4] Summary")) + while True: + inputs = self._get_user_inputs() + click.secho("Below is the summary of the answers:") + for i, (text, _) in enumerate(inputs): + click.secho(f"\t{i + 1} - {text}") + edit_input = click.prompt( + text="Press enter to confirm the values above, or select an item to edit the value", + default="0", + show_choices=False, + show_default=False, + type=click.Choice(["0"] + [str(i + 1) for i in range(len(inputs))]), + ) + click.echo() + if int(edit_input): + inputs[int(edit_input) - 1][1]() + click.echo() + else: + break diff --git a/samcli/commands/pipeline/external_links.py b/samcli/commands/pipeline/external_links.py new file mode 100644 index 0000000000..77301ebb1b --- /dev/null +++ b/samcli/commands/pipeline/external_links.py @@ -0,0 +1,8 @@ +""" +The module to store external links. Put them in a centralized place so that we can verify their +validity automatically. +""" +CONFIG_AWS_CRED_DOC_URL = "https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html" + +_SAM_DOC_PREFIX = "https://docs.aws.amazon.com/serverless-application-model/latest/developerguide" +CONFIG_AWS_CRED_ON_CICD_URL = _SAM_DOC_PREFIX + "/serverless-generating-example-ci-cd-others.html" diff --git a/samcli/commands/pipeline/init/__init__.py b/samcli/commands/pipeline/init/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/samcli/commands/pipeline/init/cli.py b/samcli/commands/pipeline/init/cli.py new file mode 100644 index 0000000000..bcbe205c6a --- /dev/null +++ b/samcli/commands/pipeline/init/cli.py @@ -0,0 +1,51 @@ +""" +CLI command for "pipeline init" command +""" +from typing import Any, Optional + +import click + +from samcli.cli.cli_config_file import configuration_option, TomlProvider +from samcli.cli.main import pass_context, common_options as cli_framework_options +from samcli.commands.pipeline.init.interactive_init_flow import InteractiveInitFlow +from samcli.lib.telemetry.metric import track_command + +SHORT_HELP = "Generates CI/CD pipeline configuration files." +HELP_TEXT = """ +sam pipeline init generates a pipeline configuration file that you can use to connect your +AWS account(s) to your CI/CD system. Before using sam pipeline init, you must +bootstrap the necessary resources for each stage in your pipeline. You can do this +by running sam pipeline init --bootstrap to be guided through the setup and configuration +file generation process, or refer to resources you have previously created with the +sam pipeline bootstrap command. +""" + + +@click.command("init", help=HELP_TEXT, short_help=SHORT_HELP) +@configuration_option(provider=TomlProvider(section="parameters")) +@click.option( + "--bootstrap", + is_flag=True, + default=False, + help="Allow bootstrapping resources.", +) +@cli_framework_options +@pass_context +@track_command # pylint: disable=R0914 +def cli(ctx: Any, config_env: Optional[str], config_file: Optional[str], bootstrap: bool) -> None: + """ + `sam pipeline init` command entry point + """ + + # Currently we support interactive mode only, i.e. the user doesn't provide the required arguments during the call + # so we call do_cli without any arguments. This will change after supporting the non interactive mode. + do_cli(bootstrap) + + +def do_cli(bootstrap: bool) -> None: + """ + implementation of `sam pipeline init` command + """ + # TODO non-interactive mode + init_flow = InteractiveInitFlow(bootstrap) + init_flow.do_interactive() diff --git a/samcli/commands/pipeline/init/interactive_init_flow.py b/samcli/commands/pipeline/init/interactive_init_flow.py new file mode 100644 index 0000000000..7504f3a66b --- /dev/null +++ b/samcli/commands/pipeline/init/interactive_init_flow.py @@ -0,0 +1,482 @@ +""" +Interactive flow that prompts that users for pipeline template (cookiecutter template) and used it to generate +pipeline configuration file +""" +import json +import logging +import os +from json import JSONDecodeError +from pathlib import Path +from textwrap import dedent +from typing import Dict, List, Tuple + +import click + +from samcli.cli.main import global_cfg +from samcli.commands.exceptions import ( + AppPipelineTemplateMetadataException, + PipelineTemplateCloneException, +) +from samcli.lib.config.samconfig import SamConfig +from samcli.lib.cookiecutter.interactive_flow import InteractiveFlow +from samcli.lib.cookiecutter.interactive_flow_creator import InteractiveFlowCreator +from samcli.lib.cookiecutter.question import Choice +from samcli.lib.cookiecutter.template import Template +from samcli.lib.utils import osutils +from samcli.lib.utils.colors import Colored +from samcli.lib.utils.git_repo import GitRepo, CloneRepoException +from .pipeline_templates_manifest import Provider, PipelineTemplateMetadata, PipelineTemplatesManifest +from ..bootstrap.cli import ( + do_cli as do_bootstrap, + PIPELINE_CONFIG_DIR, + PIPELINE_CONFIG_FILENAME, + _get_bootstrap_command_names, +) + +LOG = logging.getLogger(__name__) +shared_path: Path = global_cfg.config_dir +APP_PIPELINE_TEMPLATES_REPO_URL = "https://github.com/aws/aws-sam-cli-pipeline-init-templates.git" +APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME = "aws-sam-cli-app-pipeline-templates" +CUSTOM_PIPELINE_TEMPLATE_REPO_LOCAL_NAME = "custom-pipeline-template" +SAM_PIPELINE_TEMPLATE_SOURCE = "AWS Quick Start Pipeline Templates" +CUSTOM_PIPELINE_TEMPLATE_SOURCE = "Custom Pipeline Template Location" + + +class InteractiveInitFlow: + def __init__(self, allow_bootstrap: bool): + self.allow_bootstrap = allow_bootstrap + self.color = Colored() + + def do_interactive(self) -> None: + """ + An interactive flow that prompts the user for pipeline template (cookiecutter template) location, downloads it, + runs its specific questionnaire then generates the pipeline config file + based on the template and user's responses + """ + click.echo( + dedent( + """\ + + sam pipeline init generates a pipeline configuration file that you can use to connect your + AWS account(s) to your CI/CD system. We will guide you through the process to + bootstrap resources for each stage, then walk through the details necessary for + creating the pipeline config file. + + Please ensure you are in the root folder of your SAM application before you begin. + """ + ) + ) + + click.echo("Select a pipeline structure template to get started:") + pipeline_template_source_question = Choice( + key="pipeline-template-source", + text="Select template", + options=[SAM_PIPELINE_TEMPLATE_SOURCE, CUSTOM_PIPELINE_TEMPLATE_SOURCE], + is_required=True, + ) + source = pipeline_template_source_question.ask() + if source == CUSTOM_PIPELINE_TEMPLATE_SOURCE: + generated_files = self._generate_from_custom_location() + else: + generated_files = self._generate_from_app_pipeline_templates() + click.secho(Colored().green("Successfully created the pipeline configuration file(s):")) + for file in generated_files: + click.secho(Colored().green(f"\t- {file}")) + + def _generate_from_app_pipeline_templates( + self, + ) -> List[str]: + """ + Prompts the user to choose a pipeline template from SAM predefined set of pipeline templates hosted in the git + repository: aws/aws-sam-cli-pipeline-init-templates.git + downloads locally, then generates the pipeline configuration file from the selected pipeline template. + Finally, return the list of generated files. + """ + pipeline_templates_local_dir: Path = _clone_app_pipeline_templates() + pipeline_templates_manifest: PipelineTemplatesManifest = _read_app_pipeline_templates_manifest( + pipeline_templates_local_dir + ) + # The manifest contains multiple pipeline-templates so select one + selected_pipeline_template_metadata: PipelineTemplateMetadata = _prompt_pipeline_template( + pipeline_templates_manifest + ) + selected_pipeline_template_dir: Path = pipeline_templates_local_dir.joinpath( + selected_pipeline_template_metadata.location + ) + return self._generate_from_pipeline_template(selected_pipeline_template_dir) + + def _generate_from_custom_location( + self, + ) -> List[str]: + """ + Prompts the user for a custom pipeline template location, downloads locally, + then generates the pipeline config file and return the list of generated files + """ + pipeline_template_git_location: str = click.prompt("Template Git location") + if os.path.exists(pipeline_template_git_location): + return self._generate_from_pipeline_template(Path(pipeline_template_git_location)) + + with osutils.mkdir_temp(ignore_errors=True) as tempdir: + tempdir_path = Path(tempdir) + pipeline_template_local_dir: Path = _clone_pipeline_templates( + pipeline_template_git_location, tempdir_path, CUSTOM_PIPELINE_TEMPLATE_REPO_LOCAL_NAME + ) + return self._generate_from_pipeline_template(pipeline_template_local_dir) + + def _prompt_run_bootstrap_within_pipeline_init(self, stage_names: List[str], number_of_stages: int) -> bool: + """ + Prompt bootstrap if `--bootstrap` flag is provided. Return True if bootstrap process is executed. + """ + if not stage_names: + click.echo("[!] None detected in this account.") + else: + click.echo( + Colored().yellow( + f"Only {len(stage_names)} stage(s) were detected, " + f"fewer than what the template requires: {number_of_stages}." + ) + ) + click.echo() + + if self.allow_bootstrap: + if click.confirm( + "Do you want to go through stage setup process now? If you choose no, " + "you can still reference other bootstrapped resources." + ): + click.secho( + dedent( + """\ + + For each stage, we will ask for [1] stage definition, [2] account details, and [3] + reference application build resources in order to bootstrap these pipeline + resources. + + We recommend using an individual AWS account profiles for each stage in your + pipeline. You can set these profiles up using [little bit of info on how to do + this/docs]. + """ + ) + ) + + click.echo(Colored().bold(f"\nStage {len(stage_names) + 1} Setup\n")) + do_bootstrap( + region=None, + profile=None, + interactive=True, + stage_name=None, + pipeline_user_arn=None, + pipeline_execution_role_arn=None, + cloudformation_execution_role_arn=None, + artifacts_bucket_arn=None, + create_image_repository=False, + image_repository_arn=None, + confirm_changeset=True, + config_file=None, + config_env=None, + standalone=False, + ) + return True + else: + click.echo( + dedent( + """\ + To set up stage(s), please quit the process using Ctrl+C and use one of the following commands: + sam pipeline init --bootstrap To be guided through the stage and config file creation process. + sam pipeline bootstrap To specify details for an individual stage. + """ + ) + ) + click.prompt( + "To reference stage resources bootstrapped in a different account, press enter to proceed", default="" + ) + return False + + def _generate_from_pipeline_template(self, pipeline_template_dir: Path) -> List[str]: + """ + Generates a pipeline config file from a given pipeline template local location + and return the list of generated files. + """ + pipeline_template: Template = _initialize_pipeline_template(pipeline_template_dir) + number_of_stages = (pipeline_template.metadata or {}).get("number_of_stages") + if not number_of_stages: + LOG.debug("Cannot find number_of_stages from template's metadata, set to default 2.") + number_of_stages = 2 + click.echo(f"You are using the {number_of_stages}-stage pipeline template.") + _draw_stage_diagram(number_of_stages) + while True: + click.echo("Checking for existing stages...\n") + stage_names, bootstrap_context = _load_pipeline_bootstrap_resources() + if len(stage_names) < number_of_stages and self._prompt_run_bootstrap_within_pipeline_init( + stage_names, number_of_stages + ): + # the customers just went through the bootstrap process, + # refresh the pipeline bootstrap resources and see whether bootstrap is still needed + continue + break + + context: Dict = pipeline_template.run_interactive_flows(bootstrap_context) + with osutils.mkdir_temp() as generate_dir: + LOG.debug("Generating pipeline files into %s", generate_dir) + context["outputDir"] = "." # prevent cookiecutter from generating a sub-folder + pipeline_template.generate_project(context, generate_dir) + return _copy_dir_contents_to_cwd(generate_dir) + + +def _load_pipeline_bootstrap_resources() -> Tuple[List[str], Dict[str, str]]: + section = "parameters" + context: Dict = {} + + config = SamConfig(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME) + if not config.exists(): + context[str(["stage_names_message"])] = "" + return [], context + + # config.get_stage_names() will return the list of + # bootstrapped stage names and "default" which is used to store shared values + # we don't want to include "default" here. + stage_names = [stage_name for stage_name in config.get_stage_names() if stage_name != "default"] + for index, stage in enumerate(stage_names): + for key, value in config.get_all(_get_bootstrap_command_names(), section, stage).items(): + context[str([stage, key])] = value + # create an index alias for each stage name + # so that if customers type "1," it is equivalent to the first stage name + context[str([str(index + 1), key])] = value + + # pre-load the list of stage names detected from pipelineconfig.toml + stage_names_message = ( + "Here are the stage names detected " + + f"in {os.path.join(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME)}:\n" + + "\n".join([f"\t{index + 1} - {stage_name}" for index, stage_name in enumerate(stage_names)]) + ) + context[str(["stage_names_message"])] = stage_names_message + + return stage_names, context + + +def _copy_dir_contents_to_cwd(source_dir: str) -> List[str]: + """ + Copy the contents of source_dir into the current cwd. + If existing files are encountered, ask for confirmation. + If not confirmed, all files will be written to + .aws-sam/pipeline/generated-files/ + """ + file_paths: List[str] = [] + existing_file_paths: List[str] = [] + for root, _, files in os.walk(source_dir): + for filename in files: + file_path = Path(root, filename) + target_file_path = Path(".").joinpath(file_path.relative_to(source_dir)) + LOG.debug("Verify %s does not exist", target_file_path) + if target_file_path.exists(): + existing_file_paths.append(str(target_file_path)) + file_paths.append(str(target_file_path)) + if existing_file_paths: + click.echo("\nThe following files already exist:") + for existing_file_path in existing_file_paths: + click.echo(f"\t- {existing_file_path}") + if not click.confirm("Do you want to override them?"): + target_dir = str(Path(PIPELINE_CONFIG_DIR, "generated-files")) + osutils.copytree(source_dir, target_dir) + click.echo(f"All files are saved to {target_dir}.") + return [str(Path(target_dir, path)) for path in file_paths] + LOG.debug("Copy contents of %s to cwd", source_dir) + osutils.copytree(source_dir, ".") + return file_paths + + +def _clone_app_pipeline_templates() -> Path: + """ + clone aws/aws-sam-cli-pipeline-init-templates.git Git repo to the local machine in SAM shared directory. + Returns: + the local directory path where the repo is cloned. + """ + try: + return _clone_pipeline_templates( + repo_url=APP_PIPELINE_TEMPLATES_REPO_URL, + clone_dir=shared_path, + clone_name=APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME, + ) + except PipelineTemplateCloneException: + # If can't clone app pipeline templates, try using an old clone from a previous run if already exist + expected_previous_clone_local_path: Path = shared_path.joinpath(APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME) + if expected_previous_clone_local_path.exists(): + click.echo("Unable to download updated app pipeline templates, using existing ones") + return expected_previous_clone_local_path + raise + + +def _clone_pipeline_templates(repo_url: str, clone_dir: Path, clone_name: str) -> Path: + """ + clone a given pipeline templates' Git repo to the user machine inside the given clone_dir directory + under the given clone name. For example, if clone_name is "custom-pipeline-template" then the location to clone + to is "/clone/dir/path/custom-pipeline-template/" + + Parameters: + repo_url: the URL of the Git repo to clone + clone_dir: the local parent directory to clone to + clone_name: The folder name to give to the created clone inside clone_dir + + Returns: + Path to the local clone + """ + try: + repo: GitRepo = GitRepo(repo_url) + clone_path: Path = repo.clone(clone_dir, clone_name, replace_existing=True) + return clone_path + except (OSError, CloneRepoException) as ex: + raise PipelineTemplateCloneException(str(ex)) from ex + + +def _read_app_pipeline_templates_manifest(pipeline_templates_dir: Path) -> PipelineTemplatesManifest: + """ + parse and return the manifest yaml file located in the root directory of the SAM pipeline templates folder: + + Parameters: + pipeline_templates_dir: local directory of SAM pipeline templates + + Raises: + AppPipelineTemplateManifestException if the manifest is not found, ill-formatted or missing required keys + + Returns: + The manifest of the pipeline templates + """ + manifest_path: Path = pipeline_templates_dir.joinpath("manifest.yaml") + return PipelineTemplatesManifest(manifest_path) + + +def _prompt_pipeline_template(pipeline_templates_manifest: PipelineTemplatesManifest) -> PipelineTemplateMetadata: + """ + Prompts the user a list of the available CI/CD systems along with associated app pipeline templates to choose + one of them + + Parameters: + pipeline_templates_manifest: A manifest file lists the available providers and the associated pipeline templates + + Returns: + The manifest (A section in the pipeline_templates_manifest) of the chosen pipeline template; + """ + provider = _prompt_cicd_provider(pipeline_templates_manifest.providers) + provider_pipeline_templates: List[PipelineTemplateMetadata] = [ + t for t in pipeline_templates_manifest.templates if t.provider == provider.id + ] + selected_template_manifest: PipelineTemplateMetadata = _prompt_provider_pipeline_template( + provider_pipeline_templates + ) + return selected_template_manifest + + +def _prompt_cicd_provider(available_providers: List[Provider]) -> Provider: + """ + Prompts the user a list of the available CI/CD systems to choose from + + Parameters: + available_providers: List of available CI/CD systems such as Jenkins, Gitlab and CircleCI + + Returns: + The chosen provider + """ + if len(available_providers) == 1: + return available_providers[0] + + question_to_choose_provider = Choice( + key="provider", text="CI/CD system", options=[p.display_name for p in available_providers], is_required=True + ) + chosen_provider_display_name = question_to_choose_provider.ask() + return next(p for p in available_providers if p.display_name == chosen_provider_display_name) + + +def _prompt_provider_pipeline_template( + provider_available_pipeline_templates_metadata: List[PipelineTemplateMetadata], +) -> PipelineTemplateMetadata: + """ + Prompts the user a list of the available pipeline templates to choose from + + Parameters: + provider_available_pipeline_templates_metadata: List of available pipeline templates manifests + + Returns: + The chosen pipeline template manifest + """ + if len(provider_available_pipeline_templates_metadata) == 1: + return provider_available_pipeline_templates_metadata[0] + question_to_choose_pipeline_template = Choice( + key="pipeline-template", + text="Which pipeline template would you like to use?", + options=[t.display_name for t in provider_available_pipeline_templates_metadata], + ) + chosen_pipeline_template_display_name = question_to_choose_pipeline_template.ask() + return next( + t + for t in provider_available_pipeline_templates_metadata + if t.display_name == chosen_pipeline_template_display_name + ) + + +def _initialize_pipeline_template(pipeline_template_dir: Path) -> Template: + """ + Initialize a pipeline template from a given pipeline template (cookiecutter template) location + + Parameters: + pipeline_template_dir: The local location of the pipeline cookiecutter template + + Returns: + The initialized pipeline's cookiecutter template + """ + interactive_flow = _get_pipeline_template_interactive_flow(pipeline_template_dir) + metadata = _get_pipeline_template_metadata(pipeline_template_dir) + return Template(location=str(pipeline_template_dir), interactive_flows=[interactive_flow], metadata=metadata) + + +def _get_pipeline_template_metadata(pipeline_template_dir: Path) -> Dict: + """ + Load the metadata from the file metadata.json located in the template directory, + raise an exception if anything wrong. + """ + metadata_path = Path(pipeline_template_dir, "metadata.json") + if not metadata_path.exists(): + raise AppPipelineTemplateMetadataException(f"Cannot find metadata file {metadata_path}") + try: + with open(metadata_path, "r", encoding="utf-8") as file: + metadata = json.load(file) + if isinstance(metadata, dict): + return metadata + raise AppPipelineTemplateMetadataException(f"Invalid content found in {metadata_path}") + except JSONDecodeError as ex: + raise AppPipelineTemplateMetadataException(f"Invalid JSON found in {metadata_path}") from ex + + +def _get_pipeline_template_interactive_flow(pipeline_template_dir: Path) -> InteractiveFlow: + """ + A pipeline template defines its own interactive flow (questionnaire) in a JSON file named questions.json located + in the root directory of the template. This questionnaire defines a set of questions to prompt to the user and + use the responses as the cookiecutter context + + Parameters: + pipeline_template_dir: The local location of the pipeline cookiecutter template + + Raises: + QuestionsNotFoundException: if the pipeline template is missing questions.json file. + QuestionsFailedParsingException: if questions.json file is ill-formatted or missing required keys. + + Returns: + The interactive flow + """ + flow_definition_path: Path = pipeline_template_dir.joinpath("questions.json") + return InteractiveFlowCreator.create_flow(str(flow_definition_path)) + + +def _lines_for_stage(stage_index: int) -> List[str]: + return [ + " _________ ", + "| |", + f"| Stage {stage_index} |", + "|_________|", + ] + + +def _draw_stage_diagram(number_of_stages: int) -> None: + delimiters = [" ", " ", "->", " "] + stage_lines = [_lines_for_stage(i + 1) for i in range(number_of_stages)] + for i, delimiter in enumerate(delimiters): + click.echo(delimiter.join([stage_lines[stage_i][i] for stage_i in range(number_of_stages)])) + click.echo("") diff --git a/samcli/commands/pipeline/init/pipeline_templates_manifest.py b/samcli/commands/pipeline/init/pipeline_templates_manifest.py new file mode 100644 index 0000000000..8249e14d85 --- /dev/null +++ b/samcli/commands/pipeline/init/pipeline_templates_manifest.py @@ -0,0 +1,61 @@ +""" +Represents a manifest that lists the available SAM pipeline templates. +Example: + providers: + - displayName:Jenkins + id: jenkins + - displayName:Gitlab CI/CD + id: gitlab + - displayName:Github Actions + id: github-actions + templates: + - displayName: jenkins-two-environments-pipeline + provider: Jenkins + location: templates/cookiecutter-jenkins-two-environments-pipeline + - displayName: gitlab-two-environments-pipeline + provider: Gitlab + location: templates/cookiecutter-gitlab-two-environments-pipeline + - displayName: Github-Actions-two-environments-pipeline + provider: Github Actions + location: templates/cookiecutter-github-actions-two-environments-pipeline +""" +from pathlib import Path +from typing import Dict, List + +import yaml + +from samcli.commands.exceptions import AppPipelineTemplateManifestException +from samcli.yamlhelper import parse_yaml_file + + +class Provider: + """ CI/CD system such as Jenkins, Gitlab and GitHub-Actions""" + + def __init__(self, manifest: Dict) -> None: + self.id: str = manifest["id"] + self.display_name: str = manifest["displayName"] + + +class PipelineTemplateMetadata: + """ The metadata of a Given pipeline template""" + + def __init__(self, manifest: Dict) -> None: + self.display_name: str = manifest["displayName"] + self.provider: str = manifest["provider"] + self.location: str = manifest["location"] + + +class PipelineTemplatesManifest: + """ The metadata of the available CI/CD systems and the pipeline templates""" + + def __init__(self, manifest_path: Path) -> None: + try: + manifest: Dict = parse_yaml_file(file_path=str(manifest_path)) + self.providers: List[Provider] = list(map(Provider, manifest["providers"])) + self.templates: List[PipelineTemplateMetadata] = list(map(PipelineTemplateMetadata, manifest["templates"])) + except (FileNotFoundError, KeyError, TypeError, yaml.YAMLError) as ex: + raise AppPipelineTemplateManifestException( + "SAM pipeline templates manifest file is not found or ill-formatted. This could happen if the file " + f"{manifest_path} got deleted or modified." + "If you believe this is not the case, please file an issue at https://github.com/aws/aws-sam-cli/issues" + ) from ex diff --git a/samcli/commands/pipeline/pipeline.py b/samcli/commands/pipeline/pipeline.py new file mode 100644 index 0000000000..2d8df4463e --- /dev/null +++ b/samcli/commands/pipeline/pipeline.py @@ -0,0 +1,21 @@ +""" +Command group for "pipeline" suite commands. It provides common CLI arguments, template parsing capabilities, +setting up stdin/stdout etc +""" + +import click + +from .bootstrap.cli import cli as bootstrap_cli +from .init.cli import cli as init_cli + + +@click.group() +def cli() -> None: + """ + Manage the continuous delivery of the application + """ + + +# Add individual commands under this group +cli.add_command(bootstrap_cli) +cli.add_command(init_cli) diff --git a/samcli/lib/bootstrap/bootstrap.py b/samcli/lib/bootstrap/bootstrap.py index eaed58d630..a9a590dc7f 100644 --- a/samcli/lib/bootstrap/bootstrap.py +++ b/samcli/lib/bootstrap/bootstrap.py @@ -4,32 +4,51 @@ import json import logging +from typing import Optional + +import boto3 +from botocore.exceptions import ClientError + from samcli import __version__ from samcli.cli.global_config import GlobalConfig -from samcli.commands.exceptions import UserException -from samcli.lib.utils.managed_cloudformation_stack import manage_stack as manage_cloudformation_stack +from samcli.commands.exceptions import UserException, CredentialsError +from samcli.lib.utils.managed_cloudformation_stack import StackOutput, manage_stack as manage_cloudformation_stack SAM_CLI_STACK_NAME = "aws-sam-cli-managed-default" LOG = logging.getLogger(__name__) def manage_stack(profile, region): - outputs = manage_cloudformation_stack( + outputs: StackOutput = manage_cloudformation_stack( profile=None, region=region, stack_name=SAM_CLI_STACK_NAME, template_body=_get_stack_template() ) - try: - bucket_name = next(o for o in outputs if o["OutputKey"] == "SourceBucket")["OutputValue"] - except StopIteration as ex: + bucket_name = outputs.get("SourceBucket") + if bucket_name is None: msg = ( "Stack " + SAM_CLI_STACK_NAME + " exists, but is missing the managed source bucket key. " "Failing as this stack was likely not created by the AWS SAM CLI." ) - raise UserException(msg) from ex + raise UserException(msg) # This bucket name is what we would write to a config file return bucket_name +def get_current_account_id(profile: Optional[str] = None): + """Returns account ID based on used AWS credentials.""" + session = boto3.Session(profile_name=profile) # type: ignore + sts_client = session.client("sts") + try: + caller_identity = sts_client.get_caller_identity() + except ClientError as ex: + if ex.response["Error"]["Code"] == "InvalidClientTokenId": + raise CredentialsError("Cannot identify account due to invalid configured credentials.") from ex + raise CredentialsError("Cannot identify account based on configured credentials.") from ex + if "Account" not in caller_identity: + raise CredentialsError("Cannot identify account based on configured credentials.") + return caller_identity["Account"] + + def _get_stack_template(): gc = GlobalConfig() info = {"version": __version__, "installationId": gc.installation_id if gc.installation_id else "unknown"} diff --git a/samcli/lib/config/samconfig.py b/samcli/lib/config/samconfig.py index 996ac5f648..5af1c0080a 100644 --- a/samcli/lib/config/samconfig.py +++ b/samcli/lib/config/samconfig.py @@ -41,6 +41,12 @@ def __init__(self, config_dir, filename=None): """ self.filepath = Path(config_dir, filename or DEFAULT_CONFIG_FILE_NAME) + def get_stage_names(self): + self._read() + if isinstance(self.document, dict): + return [stage for stage, value in self.document.items() if isinstance(value, dict)] + return [] + def get_all(self, cmd_names, section, env=DEFAULT_ENV): """ Gets a value from the configuration file for the given environment, command and section @@ -153,6 +159,10 @@ def sanity_check(self): def exists(self): return self.filepath.exists() + def _ensure_exists(self): + self.filepath.parent.mkdir(parents=True, exist_ok=True) + self.filepath.touch() + def path(self): return str(self.filepath) @@ -183,8 +193,8 @@ def _read(self): def _write(self): if not self.document: return - if not self.exists(): - open(self.filepath, "a+").close() + + self._ensure_exists() current_version = self._version() if self._version() else SAM_CONFIG_VERSION try: diff --git a/samcli/lib/cookiecutter/exceptions.py b/samcli/lib/cookiecutter/exceptions.py index af19364811..5d379228d8 100644 --- a/samcli/lib/cookiecutter/exceptions.py +++ b/samcli/lib/cookiecutter/exceptions.py @@ -4,8 +4,8 @@ class CookiecutterErrorException(Exception): fmt = "An unspecified error occurred" - def __init__(self, **kwargs): - msg = self.fmt.format(**kwargs) + def __init__(self, **kwargs): # type: ignore + msg: str = self.fmt.format(**kwargs) Exception.__init__(self, msg) self.kwargs = kwargs diff --git a/samcli/lib/cookiecutter/interactive_flow.py b/samcli/lib/cookiecutter/interactive_flow.py index 996ac89ce3..95ce846dc0 100644 --- a/samcli/lib/cookiecutter/interactive_flow.py +++ b/samcli/lib/cookiecutter/interactive_flow.py @@ -1,7 +1,10 @@ """A flow of questions to be asked to the user in an interactive way.""" -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, List, Tuple + +import click from .question import Question +from ..utils.colors import Colored class InteractiveFlow: @@ -20,6 +23,7 @@ def __init__(self, questions: Dict[str, Question], first_question_key: str): self._questions: Dict[str, Question] = questions self._first_question_key: str = first_question_key self._current_question: Optional[Question] = None + self._color = Colored() def advance_to_next_question(self, current_answer: Optional[Any] = None) -> Optional[Question]: """ @@ -61,9 +65,25 @@ def run( associated to the key of the corresponding question """ context = context.copy() + answers: List[Tuple[str, Any]] = [] + question = self.advance_to_next_question() while question: answer = question.ask(context=context) context[question.key] = answer + answers.append((question.key, answer)) question = self.advance_to_next_question(answer) + + # print summary + click.echo(self._color.bold("SUMMARY")) + click.echo("We will generate a pipeline config file based on the following information:") + + for question_key, answer in answers: + if answer is None: + # ignore unanswered questions + continue + + question = self._questions[question_key] + click.echo(f"\t{question.text}: {self._color.underline(str(answer))}") + return context diff --git a/samcli/lib/cookiecutter/interactive_flow_creator.py b/samcli/lib/cookiecutter/interactive_flow_creator.py index d861174951..b3552d4065 100644 --- a/samcli/lib/cookiecutter/interactive_flow_creator.py +++ b/samcli/lib/cookiecutter/interactive_flow_creator.py @@ -17,7 +17,7 @@ class QuestionsFailedParsingException(UserException): class InteractiveFlowCreator: @staticmethod - def create_flow(flow_definition_path: str, extra_context: Optional[Dict] = None): + def create_flow(flow_definition_path: str, extra_context: Optional[Dict] = None) -> InteractiveFlow: """ This method parses the given json/yaml file to create an InteractiveFLow. It expects the file to define a list of questions. It parses the questions and add it to the flow in the same order they are defined @@ -77,7 +77,7 @@ def _load_questions( questions_definition = InteractiveFlowCreator._parse_questions_definition(flow_definition_path, extra_context) try: - for question in questions_definition.get("questions"): + for question in questions_definition.get("questions", []): q = QuestionFactory.create_question_from_json(question) if not first_question_key: first_question_key = q.key @@ -90,7 +90,7 @@ def _load_questions( raise QuestionsFailedParsingException(f"Failed to parse questions: {str(ex)}") from ex @staticmethod - def _parse_questions_definition(file_path, extra_context: Optional[Dict] = None): + def _parse_questions_definition(file_path: str, extra_context: Optional[Dict] = None) -> Dict: """ Read the questions definition file, do variable substitution, parse it as JSON/YAML diff --git a/samcli/lib/cookiecutter/processor.py b/samcli/lib/cookiecutter/processor.py index 5994c77949..4f34df06f8 100644 --- a/samcli/lib/cookiecutter/processor.py +++ b/samcli/lib/cookiecutter/processor.py @@ -9,7 +9,7 @@ class Processor(ABC): """ @abstractmethod - def run(self, context: Dict): + def run(self, context: Dict) -> Dict: """ the processing logic of this processor diff --git a/samcli/lib/cookiecutter/question.py b/samcli/lib/cookiecutter/question.py index 786836a400..4fad0ea020 100644 --- a/samcli/lib/cookiecutter/question.py +++ b/samcli/lib/cookiecutter/question.py @@ -1,4 +1,5 @@ """ This module represents the questions to ask to the user to fulfill the cookiecutter context. """ +from abc import ABC, abstractmethod from enum import Enum from typing import Any, Dict, List, Optional, Type, Union @@ -14,7 +15,18 @@ class QuestionKind(Enum): default = "default" -class Question: +class Promptable(ABC): + """ + Abstract class Question, Info, Choice, Confirm implement. + These classes need to implement their own prompt() method to prompt differently. + """ + + @abstractmethod + def prompt(self, text: str, default_answer: Optional[Any]) -> Any: + pass + + +class Question(Promptable): """ A question to be prompt to the user in an interactive flow where the response is used to fulfill the cookiecutter context. @@ -53,12 +65,14 @@ def __init__( text: str, default: Optional[Union[str, Dict]] = None, is_required: Optional[bool] = None, + allow_autofill: Optional[bool] = None, next_question_map: Optional[Dict[str, str]] = None, default_next_question_key: Optional[str] = None, ): self._key = key self._text = text self._required = is_required + self._allow_autofill = allow_autofill self._default_answer = default # if it is an optional question, set an empty default answer to prevent click from keep asking for an answer if not self._required and self._default_answer is None: @@ -67,30 +81,30 @@ def __init__( self._default_next_question_key = default_next_question_key @property - def key(self): + def key(self) -> str: return self._key @property - def text(self): + def text(self) -> str: return self._text @property - def default_answer(self): - return self._default_answer + def default_answer(self) -> Optional[Any]: + return self._resolve_default_answer() @property - def required(self): + def required(self) -> Optional[bool]: return self._required @property - def next_question_map(self): + def next_question_map(self) -> Dict[str, str]: return self._next_question_map @property - def default_next_question_key(self): + def default_next_question_key(self) -> Optional[str]: return self._default_next_question_key - def ask(self, context: Dict) -> Any: + def ask(self, context: Optional[Dict] = None) -> Any: """ prompt the user this question @@ -104,7 +118,20 @@ def ask(self, context: Dict) -> Any: The user provided answer. """ resolved_default_answer = self._resolve_default_answer(context) - return click.prompt(text=self._text, default=resolved_default_answer) + + # skip the question and directly use the default value if autofill is allowed. + if resolved_default_answer is not None and self._allow_autofill: + return resolved_default_answer + + # if it is an optional question with no default answer, + # set an empty default answer to prevent click from keep asking for an answer + if not self._required and resolved_default_answer is None: + resolved_default_answer = "" + + return self.prompt(self._resolve_text(context), resolved_default_answer) + + def prompt(self, text: str, default_answer: Optional[Any]) -> Any: + return click.prompt(text=text, default=default_answer) def get_next_question_key(self, answer: Any) -> Optional[str]: # _next_question_map is a Dict[str(answer), str(next question key)] @@ -112,7 +139,7 @@ def get_next_question_key(self, answer: Any) -> Optional[str]: answer = str(answer) return self._next_question_map.get(answer, self._default_next_question_key) - def set_default_next_question_key(self, next_question_key): + def set_default_next_question_key(self, next_question_key: str) -> None: self._default_next_question_key = next_question_key def _resolve_key_path(self, key_path: List, context: Dict) -> List[str]: @@ -150,49 +177,59 @@ def _resolve_key_path(self, key_path: List, context: Dict) -> List[str]: raise ValueError(f'Invalid value "{unresolved_key}" in key path') return resolved_key_path - def _resolve_default_answer(self, context: Dict) -> Optional[Any]: + def _resolve_value_from_expression(self, expression: Any, context: Optional[Dict] = None) -> Optional[Any]: """ - a question may have a default answer provided directly through the "default_answer" value + a question may have a value provided directly as string or number value or indirectly from cookiecutter context using a key path Parameters ---------- context - Cookiecutter context used to resolve default values and answered questions' answers. + Cookiecutter context used to resolve values. Raises ------ KeyError - When default value depends on the answer to a non-existent question + When an expression depends on the answer to a non-existent question ValueError - The default value is malformed + The expression is malformed Returns ------- - Optional default answer, it might be resolved from cookiecutter context using specified key path. + Optional value, it might be resolved from cookiecutter context using specified key path. """ - if isinstance(self._default_answer, dict): + if isinstance(expression, dict): + context = context if context else {} + # load value using key path from cookiecutter - if "keyPath" not in self._default_answer: - raise KeyError(f'Missing key "keyPath" in question default "{self._default_answer}".') - unresolved_key_path = self._default_answer.get("keyPath", []) + if "keyPath" not in expression: + raise KeyError(f'Missing key "keyPath" in "{expression}".') + unresolved_key_path = expression.get("keyPath", []) if not isinstance(unresolved_key_path, list): - raise ValueError(f'Invalid default answer "{self._default_answer}" for question {self.key}') + raise ValueError(f'Invalid expression "{expression}" in question {self.key}') return context.get(str(self._resolve_key_path(unresolved_key_path, context))) + return expression + + def _resolve_text(self, context: Optional[Dict] = None) -> str: + resolved_text = self._resolve_value_from_expression(self._text, context) + if resolved_text is None: + raise ValueError(f"Cannot resolve value from expression: {self._text}") + return str(resolved_text) - return self._default_answer + def _resolve_default_answer(self, context: Optional[Dict] = None) -> Optional[Any]: + return self._resolve_value_from_expression(self._default_answer, context) class Info(Question): - def ask(self, context: Dict) -> None: - return click.echo(message=self._text) + def prompt(self, text: str, default_answer: Optional[Any]) -> Any: + return click.echo(message=text) class Confirm(Question): - def ask(self, context: Dict) -> bool: - return click.confirm(text=self._text) + def prompt(self, text: str, default_answer: Optional[Any]) -> Any: + return click.confirm(text=text) class Choice(Question): @@ -203,26 +240,27 @@ def __init__( options: List[str], default: Optional[str] = None, is_required: Optional[bool] = None, + allow_autofill: Optional[bool] = None, next_question_map: Optional[Dict[str, str]] = None, default_next_question_key: Optional[str] = None, ): if not options: raise ValueError("No defined options") self._options = options - super().__init__(key, text, default, is_required, next_question_map, default_next_question_key) + super().__init__(key, text, default, is_required, allow_autofill, next_question_map, default_next_question_key) - def ask(self, context: Dict) -> str: - resolved_default_answer = self._resolve_default_answer(context) - click.echo(self._text) + def prompt(self, text: str, default_answer: Optional[Any]) -> Any: + click.echo(text) for index, option in enumerate(self._options): click.echo(f"\t{index + 1} - {option}") options_indexes = self._get_options_indexes(base=1) choices = list(map(str, options_indexes)) choice = click.prompt( text="Choice", - default=resolved_default_answer, + default=default_answer, show_choices=False, type=click.Choice(choices), + show_default=default_answer is not None, ) return self._options[int(choice) - 1] @@ -245,6 +283,7 @@ def create_question_from_json(question_json: Dict) -> Question: options = question_json.get("options") default = question_json.get("default") is_required = question_json.get("isRequired") + allow_autofill = question_json.get("allowAutofill") next_question_map = question_json.get("nextQuestion") default_next_question = question_json.get("defaultNextQuestion") kind_str = question_json.get("kind") @@ -256,6 +295,7 @@ def create_question_from_json(question_json: Dict) -> Question: "text": text, "default": default, "is_required": is_required, + "allow_autofill": allow_autofill, "next_question_map": next_question_map, "default_next_question_key": default_next_question, } diff --git a/samcli/lib/cookiecutter/template.py b/samcli/lib/cookiecutter/template.py index c7d643bb43..46b851985e 100644 --- a/samcli/lib/cookiecutter/template.py +++ b/samcli/lib/cookiecutter/template.py @@ -3,15 +3,17 @@ values of the context and how to generate a project from the given template and provided context """ import logging -from typing import Any, Dict, List, Optional +from typing import Dict, List, Optional + from cookiecutter.exceptions import RepositoryNotFound, UnknownRepoType from cookiecutter.main import cookiecutter + from samcli.commands.exceptions import UserException from samcli.lib.init.arbitrary_project import generate_non_cookiecutter_project +from .exceptions import GenerateProjectFailedError, InvalidLocationError, PreprocessingError, PostprocessingError from .interactive_flow import InteractiveFlow from .plugin import Plugin from .processor import Processor -from .exceptions import GenerateProjectFailedError, InvalidLocationError, PreprocessingError, PostprocessingError LOG = logging.getLogger(__name__) @@ -41,6 +43,8 @@ class Template: An optional series of plugins to be plugged in. A plugin defines its own interactive_flow, preprocessor and postprocessor. A plugin is a sub-set of the template, if there is a common behavior among multiple templates, it is better to be extracted to a plugin that can then be plugged in to each of these templates. + metadata: Optional[Dict] + An optional dictionary with extra information about the template Methods ------- @@ -61,6 +65,7 @@ def __init__( preprocessors: Optional[List[Processor]] = None, postprocessors: Optional[List[Processor]] = None, plugins: Optional[List[Plugin]] = None, + metadata: Optional[Dict] = None, ): """ Initialize the class @@ -84,6 +89,8 @@ def __init__( An optional series of plugins to be plugged in. A plugin defines its own interactive_flow, preprocessor and postprocessor. A plugin is a sub-set of the template, if there is a common behavior among multiple templates, it is better to be extracted to a plugin that can then be plugged in to each of these templates. + metadata: Optional[Dict] + An optional dictionary with extra information about the template """ self._location = location self._interactive_flows = interactive_flows or [] @@ -97,8 +104,9 @@ def __init__( self._preprocessors.append(plugin.preprocessor) if plugin.postprocessor: self._postprocessors.append(plugin.postprocessor) + self.metadata = metadata - def run_interactive_flows(self) -> Dict: + def run_interactive_flows(self, context: Optional[Dict] = None) -> Dict: """ prompt the user a series of questions' flows and gather the answers to create the cookiecutter context. The questions are identified by keys. If multiple questions, whether within the same flow or across @@ -112,14 +120,14 @@ def run_interactive_flows(self) -> Dict: A Dictionary in the form of {question.key: answer} representing user's answers to the flows' questions """ try: - context: Dict[str, Any] = {} + context = context if context else {} for flow in self._interactive_flows: context = flow.run(context) return context except Exception as e: raise UserException(str(e), wrapped_from=e.__class__.__name__) from e - def generate_project(self, context: Dict): + def generate_project(self, context: Dict, output_dir: str) -> None: """ Generates a project based on this cookiecutter template and the given context. The context is first processed and manipulated by series of preprocessors(if any) then the project is generated and finally @@ -129,6 +137,8 @@ def generate_project(self, context: Dict): ---------- context: Dict the cookiecutter context to fulfill the values of cookiecutter.json keys + output_dir: str + the directory where project will be generated in Raise: ------ @@ -144,7 +154,13 @@ def generate_project(self, context: Dict): try: LOG.debug("Baking a new template with cookiecutter with all parameters") - cookiecutter(template=self._location, output_dir=".", no_input=True, extra_context=context) + cookiecutter( + template=self._location, + output_dir=output_dir, + no_input=True, + extra_context=context, + overwrite_if_exists=True, + ) except RepositoryNotFound as e: # cookiecutter.json is not found in the template. Let's just clone it directly without # using cookiecutter and call it done. diff --git a/samcli/lib/pipeline/__init__.py b/samcli/lib/pipeline/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/samcli/lib/pipeline/bootstrap/__init__.py b/samcli/lib/pipeline/bootstrap/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/samcli/lib/pipeline/bootstrap/resource.py b/samcli/lib/pipeline/bootstrap/resource.py new file mode 100644 index 0000000000..a7b39dd965 --- /dev/null +++ b/samcli/lib/pipeline/bootstrap/resource.py @@ -0,0 +1,138 @@ +""" Represents AWS resource""" +from typing import Optional + + +class ARNParts: + """ + Decompose a given ARN into its parts https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + + Attributes + ---------- + partition: str + the partition part(AWS, aws-cn or aws-us-gov) of the ARN + service: str + the service part(S3, IAM, ECR, ...etc) of the ARN + region: str + the AWS region part(us-east-1, eu-west-1, ...etc) of the ARN + account-id: str + the account-id part of the ARN + resource-id: str + the resource-id part of the ARN + resource-type: str + the resource-type part of the ARN + """ + + partition: str + service: str + region: str + account_id: str + resource_id: str + + def __init__(self, arn: str) -> None: + parts = arn.split(":") + try: + [_, self.partition, self.service, self.region, self.account_id, self.resource_id] = parts + except ValueError as ex: + raise ValueError(f"Invalid ARN ({arn})") from ex + + +class Resource: + """ + Represents an AWS resource + + Attributes + ---------- + arn: str + the ARN of the resource + comment: str + the comment of the resource + is_user_provided: bool + True if the user provided the ARN of the resource during the initialization. It indicates whether this pipeline- + resource is provided by the user or created by SAM during `sam pipeline bootstrap` + + Methods + ------- + name(self) -> Optional[str]: + extracts and returns the resource name from its ARN + """ + + def __init__(self, arn: Optional[str], comment: Optional[str]) -> None: + self.arn: Optional[str] = arn + self.comment: Optional[str] = comment + self.is_user_provided: bool = bool(arn) + + def name(self) -> Optional[str]: + """ + extracts and returns the resource name from its ARN + Raises + ------ + ValueError if the ARN is invalid + """ + if not self.arn: + return None + arn_parts: ARNParts = ARNParts(arn=self.arn) + return arn_parts.resource_id + + +class IAMUser(Resource): + """ + Represents an AWS IAM User resource + Attributes + ---------- + access_key_id: Optional[str] + holds the AccessKeyId of the credential of this IAM user, if any. + secret_access_key: Optional[str] + holds the SecretAccessKey of the credential of this IAM user, if any. + """ + + def __init__( + self, + arn: Optional[str], + comment: Optional[str], + access_key_id: Optional[str] = None, + secret_access_key: Optional[str] = None, + ) -> None: + self.access_key_id: Optional[str] = access_key_id + self.secret_access_key: Optional[str] = secret_access_key + super().__init__(arn=arn, comment=comment) + + +class S3Bucket(Resource): + """ + Represents an AWS S3Bucket resource + Attributes + ---------- + kms_key_arn: Optional[str] + The ARN of the KMS key used in encrypting this S3Bucket, if any. + """ + + def __init__(self, arn: Optional[str], comment: Optional[str], kms_key_arn: Optional[str] = None) -> None: + self.kms_key_arn: Optional[str] = kms_key_arn + super().__init__(arn=arn, comment=comment) + + +class ECRImageRepository(Resource): + """ Represents an AWS ECR image repository resource """ + + def __init__(self, arn: Optional[str], comment: Optional[str]) -> None: + super().__init__(arn=arn, comment=comment) + + def get_uri(self) -> Optional[str]: + """ + extracts and returns the URI of the given ECR image repository from its ARN + see https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html + Raises + ------ + ValueError if the ARN is invalid + """ + if not self.arn: + return None + arn_parts: ARNParts = ARNParts(self.arn) + # ECR's resource_id contains the resource-type("resource") which is excluded from the URL + # from docs: https://docs.aws.amazon.com/AmazonECR/latest/userguide/security_iam_service-with-iam.html + # ECR's ARN: arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name} + if not arn_parts.resource_id.startswith("repository/"): + raise ValueError(f"Invalid ECR ARN ({self.arn}), can't extract the URL from it.") + i = len("repository/") + repo_name = arn_parts.resource_id[i:] + return f"{arn_parts.account_id}.dkr.ecr.{arn_parts.region}.amazonaws.com/{repo_name}" diff --git a/samcli/lib/pipeline/bootstrap/stage.py b/samcli/lib/pipeline/bootstrap/stage.py new file mode 100644 index 0000000000..d98081237b --- /dev/null +++ b/samcli/lib/pipeline/bootstrap/stage.py @@ -0,0 +1,330 @@ +""" Application Environment """ +import json +import os +import pathlib +import re +from itertools import chain +from typing import Dict, List, Optional, Tuple + +import boto3 +import click + +from samcli.lib.config.samconfig import SamConfig +from samcli.lib.utils.colors import Colored +from samcli.lib.utils.managed_cloudformation_stack import manage_stack, StackOutput +from samcli.lib.pipeline.bootstrap.resource import Resource, IAMUser, ECRImageRepository + +CFN_TEMPLATE_PATH = str(pathlib.Path(os.path.dirname(__file__))) +STACK_NAME_PREFIX = "aws-sam-cli-managed" +STAGE_RESOURCES_STACK_NAME_SUFFIX = "pipeline-resources" +STAGE_RESOURCES_CFN_TEMPLATE = "stage_resources.yaml" +PIPELINE_USER = "pipeline_user" +PIPELINE_EXECUTION_ROLE = "pipeline_execution_role" +CLOUDFORMATION_EXECUTION_ROLE = "cloudformation_execution_role" +ARTIFACTS_BUCKET = "artifacts_bucket" +ECR_IMAGE_REPOSITORY = "image_repository" +REGION = "region" + + +class Stage: + """ + Represents an application stage: Beta, Gamma, Prod ...etc + + Attributes + ---------- + name: str + The name of the environment + aws_profile: Optional[str] + The named AWS profile (in user's machine) of the AWS account to deploy this environment to. + aws_region: Optional[str] + The AWS region to deploy this environment to. + pipeline_user: IAMUser + The IAM User having its AccessKeyId and SecretAccessKey credentials shared with the CI/CD system + pipeline_execution_role: Resource + The IAM role assumed by the pipeline-user to get access to the AWS account and executes the + CloudFormation stack. + cloudformation_execution_role: Resource + The IAM role assumed by the CloudFormation service to executes the CloudFormation stack. + artifacts_bucket: Resource + The S3 bucket to hold the SAM build artifacts of the application's CFN template. + create_image_repository: bool + A boolean flag that determines whether the user wants to create an ECR image repository or not + image_repository: ECRImageRepository + The ECR image repository to hold the image container of lambda functions with Image package-type + + Methods: + -------- + did_user_provide_all_required_resources(self) -> bool: + checks if all of the environment's required resources (pipeline_user, pipeline_execution_role, + cloudformation_execution_role, artifacts_bucket and image_repository) are provided by the user. + bootstrap(self, confirm_changeset: bool = True) -> None: + deploys the CFN template ./stage_resources.yaml to the AWS account identified by aws_profile and + aws_region member fields. if aws_profile is not provided, it will fallback to default boto3 credentials' + resolving. Note that ./stage_resources.yaml template accepts the ARNs of already existing resources(if + any) as parameters and it will skip the creation of those resources but will use the ARNs to set the proper + permissions of other missing resources(resources created by the template) + save_config(self, config_dir: str, filename: str, cmd_names: List[str]): + save the Artifacts bucket name, ECR image repository URI and ARNs of pipeline_user, pipeline_execution_role and + cloudformation_execution_role to the "pipelineconfig.toml" file so that it can be auto-filled during + the `sam pipeline init` command. + print_resources_summary(self) -> None: + prints to the screen(console) the ARNs of the created and provided resources. + """ + + def __init__( + self, + name: str, + aws_profile: Optional[str] = None, + aws_region: Optional[str] = None, + pipeline_user_arn: Optional[str] = None, + pipeline_execution_role_arn: Optional[str] = None, + cloudformation_execution_role_arn: Optional[str] = None, + artifacts_bucket_arn: Optional[str] = None, + create_image_repository: bool = False, + image_repository_arn: Optional[str] = None, + ) -> None: + self.name: str = name + self.aws_profile: Optional[str] = aws_profile + self.aws_region: Optional[str] = aws_region + self.pipeline_user: IAMUser = IAMUser(arn=pipeline_user_arn, comment="Pipeline IAM user") + self.pipeline_execution_role: Resource = Resource( + arn=pipeline_execution_role_arn, comment="Pipeline execution role" + ) + self.cloudformation_execution_role: Resource = Resource( + arn=cloudformation_execution_role_arn, comment="CloudFormation execution role" + ) + self.artifacts_bucket: Resource = Resource(arn=artifacts_bucket_arn, comment="Artifact bucket") + self.create_image_repository: bool = create_image_repository + self.image_repository: ECRImageRepository = ECRImageRepository( + arn=image_repository_arn, comment="ECR image repository" + ) + self.color = Colored() + + def did_user_provide_all_required_resources(self) -> bool: + """Check if the user provided all of the environment resources or not""" + return all(resource.is_user_provided for resource in self._get_resources()) + + def _get_non_user_provided_resources_msg(self) -> str: + resource_comments = chain.from_iterable( + [ + [] if self.pipeline_user.is_user_provided else [self.pipeline_user.comment], + [] if self.pipeline_execution_role.is_user_provided else [self.pipeline_execution_role.comment], + [] + if self.cloudformation_execution_role.is_user_provided + else [self.cloudformation_execution_role.comment], + [] if self.artifacts_bucket.is_user_provided else [self.artifacts_bucket.comment], + [] + if self.image_repository.is_user_provided or not self.create_image_repository + else [self.image_repository.comment], + ] + ) + return "\n".join([f"\t- {comment}" for comment in resource_comments]) + + def bootstrap(self, confirm_changeset: bool = True) -> bool: + """ + Deploys the CFN template(./stage_resources.yaml) which deploys: + * Pipeline IAM User + * Pipeline execution IAM role + * CloudFormation execution IAM role + * Artifacts' S3 Bucket + * ECR image repository + to the AWS account associated with the given environment. It will not redeploy the stack if already exists. + This CFN template accepts the ARNs of the resources as parameters and will not create a resource if already + provided, this way we can conditionally create a resource only if the user didn't provide it + + THIS METHOD UPDATES THE STATE OF THE CALLING INSTANCE(self) IT WILL SET THE VALUES OF THE RESOURCES ATTRIBUTES + + Parameters + ---------- + confirm_changeset: bool + if set to false, the stage_resources.yaml CFN template will directly be deployed, otherwise, + the user will be prompted for confirmation + + Returns True if bootstrapped, otherwise False + """ + + if self.did_user_provide_all_required_resources(): + click.secho( + self.color.yellow(f"\nAll required resources for the {self.name} environment exist, skipping creation.") + ) + return True + + missing_resources_msg: str = self._get_non_user_provided_resources_msg() + click.echo( + f"This will create the following required resources for the '{self.name}' environment: \n" + f"{missing_resources_msg}" + ) + if confirm_changeset: + confirmed: bool = click.confirm("Should we proceed with the creation?") + if not confirmed: + click.secho(self.color.red("Canceling pipeline bootstrap creation.")) + return False + + environment_resources_template_body = Stage._read_template(STAGE_RESOURCES_CFN_TEMPLATE) + output: StackOutput = manage_stack( + stack_name=self._get_stack_name(), + region=self.aws_region, + profile=self.aws_profile, + template_body=environment_resources_template_body, + parameter_overrides={ + "PipelineUserArn": self.pipeline_user.arn or "", + "PipelineExecutionRoleArn": self.pipeline_execution_role.arn or "", + "CloudFormationExecutionRoleArn": self.cloudformation_execution_role.arn or "", + "ArtifactsBucketArn": self.artifacts_bucket.arn or "", + "CreateImageRepository": "true" if self.create_image_repository else "false", + "ImageRepositoryArn": self.image_repository.arn or "", + }, + ) + + pipeline_user_secret_sm_id = output.get("PipelineUserSecretKey") + + self.pipeline_user.arn = output.get("PipelineUser") + if pipeline_user_secret_sm_id: + ( + self.pipeline_user.access_key_id, + self.pipeline_user.secret_access_key, + ) = Stage._get_pipeline_user_secret_pair(pipeline_user_secret_sm_id, self.aws_profile, self.aws_region) + self.pipeline_execution_role.arn = output.get("PipelineExecutionRole") + self.cloudformation_execution_role.arn = output.get("CloudFormationExecutionRole") + self.artifacts_bucket.arn = output.get("ArtifactsBucket") + self.image_repository.arn = output.get("ImageRepository") + return True + + @staticmethod + def _get_pipeline_user_secret_pair( + secret_manager_arn: str, profile: Optional[str], region: Optional[str] + ) -> Tuple[str, str]: + """ + Helper method to fetch pipeline user's AWS Credentials from secrets manager. + SecretString need to be in following JSON format: + { + "aws_access_key_id": "AWSSECRETACCESSKEY123", + "aws_secret_access_key": "mYSuperSecretDummyKey" + } + Parameters + ---------- + secret_manager_arn: + ARN of secret manager entry which holds pipeline user key. + profile: + The named AWS profile (in user's machine) of the AWS account to deploy this environment to. + region: + The AWS region to deploy this environment to. + + Returns tuple of aws_access_key_id and aws_secret_access_key. + + """ + session = boto3.Session(profile_name=profile, region_name=region if region else None) # type: ignore + secrets_manager_client = session.client("secretsmanager") + response = secrets_manager_client.get_secret_value(SecretId=secret_manager_arn) + secret_string = response["SecretString"] + secret_json = json.loads(secret_string) + return secret_json["aws_access_key_id"], secret_json["aws_secret_access_key"] + + @staticmethod + def _read_template(template_file_name: str) -> str: + template_path: str = os.path.join(CFN_TEMPLATE_PATH, template_file_name) + with open(template_path, "r", encoding="utf-8") as fp: + template_body = fp.read() + return template_body + + def save_config(self, config_dir: str, filename: str, cmd_names: List[str]) -> None: + """ + save the Artifacts bucket name, ECR image repository URI and ARNs of pipeline_user, pipeline_execution_role and + cloudformation_execution_role to the given filename and directory. + + Parameters + ---------- + config_dir: str + the directory of the toml file to save to + filename: str + the name of the toml file to save to + cmd_names: List[str] + nested command name to scope the saved configs to inside the toml file + + Raises + ------ + ValueError: if the artifacts_bucket or ImageRepository ARNs are invalid + """ + + samconfig: SamConfig = SamConfig(config_dir=config_dir, filename=filename) + + if self.pipeline_user.arn: + samconfig.put(cmd_names=cmd_names, section="parameters", key=PIPELINE_USER, value=self.pipeline_user.arn) + + # Computing Artifacts bucket name and ECR image repository URL may through an exception if the ARNs are wrong + # Let's swallow such an exception to be able to save the remaining resources + try: + artifacts_bucket_name: Optional[str] = self.artifacts_bucket.name() + except ValueError: + artifacts_bucket_name = "" + try: + image_repository_uri: Optional[str] = self.image_repository.get_uri() or "" + except ValueError: + image_repository_uri = "" + + environment_specific_configs: Dict[str, Optional[str]] = { + PIPELINE_EXECUTION_ROLE: self.pipeline_execution_role.arn, + CLOUDFORMATION_EXECUTION_ROLE: self.cloudformation_execution_role.arn, + ARTIFACTS_BUCKET: artifacts_bucket_name, + # even image repository can be None, we want to save it as empty string + # so that pipeline init command can pick it up + ECR_IMAGE_REPOSITORY: image_repository_uri, + REGION: self.aws_region, + } + + for key, value in environment_specific_configs.items(): + if value is not None: + samconfig.put( + cmd_names=cmd_names, + section="parameters", + key=key, + value=value, + env=self.name, + ) + + samconfig.flush() + + def save_config_safe(self, config_dir: str, filename: str, cmd_names: List[str]) -> None: + """ + A safe version of save_config method that doesn't raise any exception + """ + try: + self.save_config(config_dir, filename, cmd_names) + except Exception: + pass + + def _get_resources(self) -> List[Resource]: + resources = [ + self.pipeline_user, + self.pipeline_execution_role, + self.cloudformation_execution_role, + self.artifacts_bucket, + ] + if self.create_image_repository or self.image_repository.arn: # Image Repository is optional + resources.append(self.image_repository) + return resources + + def print_resources_summary(self) -> None: + """prints to the screen(console) the ARNs of the created and provided resources.""" + + provided_resources = [] + created_resources = [] + for resource in self._get_resources(): + if resource.is_user_provided: + provided_resources.append(resource) + else: + created_resources.append(resource) + + if created_resources: + click.secho(self.color.green("The following resources were created in your account:")) + for resource in created_resources: + click.secho(self.color.green(f"\t- {resource.comment}")) + + if not self.pipeline_user.is_user_provided: + click.secho(self.color.green("Pipeline IAM user credential:")) + click.secho(self.color.green(f"\tAWS_ACCESS_KEY_ID: {self.pipeline_user.access_key_id}")) + click.secho(self.color.green(f"\tAWS_SECRET_ACCESS_KEY: {self.pipeline_user.secret_access_key}")) + + def _get_stack_name(self) -> str: + sanitized_stage_name: str = re.sub("[^0-9a-zA-Z]+", "-", self.name) + return f"{STACK_NAME_PREFIX}-{sanitized_stage_name}-{STAGE_RESOURCES_STACK_NAME_SUFFIX}" diff --git a/samcli/lib/pipeline/bootstrap/stage_resources.yaml b/samcli/lib/pipeline/bootstrap/stage_resources.yaml new file mode 100644 index 0000000000..bcc5e94423 --- /dev/null +++ b/samcli/lib/pipeline/bootstrap/stage_resources.yaml @@ -0,0 +1,358 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 + +Parameters: + PipelineUserArn: + Type: String + PipelineExecutionRoleArn: + Type: String + CloudFormationExecutionRoleArn: + Type: String + ArtifactsBucketArn: + Type: String + CreateImageRepository: + Type: String + Default: false + AllowedValues: [true, false] + ImageRepositoryArn: + Type: String + +Conditions: + MissingPipelineUser: !Equals [!Ref PipelineUserArn, ""] + MissingPipelineExecutionRole: !Equals [!Ref PipelineExecutionRoleArn, ""] + MissingCloudFormationExecutionRole: !Equals [!Ref CloudFormationExecutionRoleArn, ""] + MissingArtifactsBucket: !Equals [!Ref ArtifactsBucketArn, ""] + ShouldHaveImageRepository: !Or [!Equals [!Ref CreateImageRepository, "true"], !Not [!Equals [!Ref ImageRepositoryArn, ""]]] + MissingImageRepository: !And [!Condition ShouldHaveImageRepository, !Equals [!Ref ImageRepositoryArn, ""]] + +Resources: + PipelineUser: + Type: AWS::IAM::User + Condition: MissingPipelineUser + Properties: + Tags: + - Key: ManagedStackSource + Value: AwsSamCli + Policies: + - PolicyName: AssumeRoles + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - "sts:AssumeRole" + Resource: "*" + Condition: + StringEquals: + aws:ResourceTag/Role: pipeline-execution-role + + PipelineUserAccessKey: + Type: AWS::IAM::AccessKey + Condition: MissingPipelineUser + Properties: + Serial: 1 + Status: Active + UserName: !Ref PipelineUser + + PipelineUserSecretKey: + Type: AWS::SecretsManager::Secret + Condition: MissingPipelineUser + Properties: + SecretString: !Sub '{"aws_access_key_id": "${PipelineUserAccessKey}", "aws_secret_access_key": "${PipelineUserAccessKey.SecretAccessKey}"}' + + CloudFormationExecutionRole: + Type: AWS::IAM::Role + Condition: MissingCloudFormationExecutionRole + Properties: + Tags: + - Key: ManagedStackSource + Value: AwsSamCli + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: cloudformation.amazonaws.com + Action: + - 'sts:AssumeRole' + Policies: + - PolicyName: GrantCloudFormationFullAccess + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: '*' + Resource: '*' + + PipelineExecutionRole: + Type: AWS::IAM::Role + Condition: MissingPipelineExecutionRole + Properties: + Tags: + - Key: ManagedStackSource + Value: AwsSamCli + - Key: Role + Value: pipeline-execution-role + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + AWS: + - Fn::If: + - MissingPipelineUser + - !GetAtt PipelineUser.Arn + - !Ref PipelineUserArn + Action: + - 'sts:AssumeRole' + - Effect: Allow + Principal: + # Allow roles with tag Role=aws-sam-pipeline-codebuild-service-role to assume this role. + # This is required when CodePipeline is the CI/CD system of choice. + AWS: + - !If + - MissingPipelineUser + - !Ref AWS::AccountId + - !Select [4, !Split [':', !Ref PipelineUserArn]] + Action: + - 'sts:AssumeRole' + Condition: + StringEquals: + aws:PrincipalTag/Role: aws-sam-pipeline-codebuild-service-role + + ArtifactsBucket: + Type: AWS::S3::Bucket + Condition: MissingArtifactsBucket + DeletionPolicy: "Retain" + Properties: + Tags: + - Key: ManagedStackSource + Value: AwsSamCli + LoggingConfiguration: + DestinationBucketName: + !Ref ArtifactsLoggingBucket + LogFilePrefix: "artifacts-logs" + VersioningConfiguration: + Status: Enabled + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: AES256 + + ArtifactsBucketPolicy: + Type: AWS::S3::BucketPolicy + Condition: MissingArtifactsBucket + Properties: + Bucket: !Ref ArtifactsBucket + PolicyDocument: + Statement: + - Effect: "Deny" + Action: "s3:*" + Principal: "*" + Resource: + - !Join [ '',[ !GetAtt ArtifactsBucket.Arn, '/*' ] ] + - !GetAtt ArtifactsBucket.Arn + Condition: + Bool: + aws:SecureTransport: false + - Effect: "Allow" + Action: + - 's3:GetObject*' + - 's3:PutObject*' + - 's3:GetBucket*' + - 's3:List*' + Resource: + - !Join ['',[!GetAtt ArtifactsBucket.Arn, '/*']] + - !GetAtt ArtifactsBucket.Arn + Principal: + AWS: + - Fn::If: + - MissingPipelineExecutionRole + - !GetAtt PipelineExecutionRole.Arn + - !Ref PipelineExecutionRoleArn + - Fn::If: + - MissingCloudFormationExecutionRole + - !GetAtt CloudFormationExecutionRole.Arn + - !Ref CloudFormationExecutionRoleArn + + ArtifactsLoggingBucket: + Type: AWS::S3::Bucket + Condition: MissingArtifactsBucket + DeletionPolicy: "Retain" + Properties: + AccessControl: "LogDeliveryWrite" + Tags: + - Key: ManagedStackSource + Value: AwsSamCli + VersioningConfiguration: + Status: Enabled + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: AES256 + + ArtifactsLoggingBucketPolicy: + Type: AWS::S3::BucketPolicy + Condition: MissingArtifactsBucket + Properties: + Bucket: !Ref ArtifactsLoggingBucket + PolicyDocument: + Statement: + - Effect: "Deny" + Action: "s3:*" + Principal: "*" + Resource: + - !Join [ '',[ !GetAtt ArtifactsLoggingBucket.Arn, '/*' ] ] + - !GetAtt ArtifactsLoggingBucket.Arn + Condition: + Bool: + aws:SecureTransport: false + + PipelineExecutionRolePermissionPolicy: + Type: AWS::IAM::Policy + Condition: MissingPipelineExecutionRole + Properties: + PolicyName: PipelineExecutionRolePermissions + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: 'iam:PassRole' + Resource: + Fn::If: + - MissingCloudFormationExecutionRole + - !GetAtt CloudFormationExecutionRole.Arn + - !Ref CloudFormationExecutionRoleArn + - Effect: Allow + Action: + - "cloudformation:CreateChangeSet" + - "cloudformation:DescribeChangeSet" + - "cloudformation:ExecuteChangeSet" + - "cloudformation:DescribeStackEvents" + - "cloudformation:DescribeStacks" + - "cloudformation:GetTemplateSummary" + - "cloudformation:DescribeStackResource" + Resource: '*' + - Effect: Allow + Action: + - 's3:GetObject*' + - 's3:PutObject*' + - 's3:GetBucket*' + - 's3:List*' + Resource: + Fn::If: + - MissingArtifactsBucket + - - !Join [ '',[ !GetAtt ArtifactsBucket.Arn, '/*' ] ] + - !GetAtt ArtifactsBucket.Arn + - - !Join [ '',[ !Ref ArtifactsBucketArn, '/*' ] ] + - !Ref ArtifactsBucketArn + - Fn::If: + - ShouldHaveImageRepository + - Effect: "Allow" + Action: "ecr:GetAuthorizationToken" + Resource: "*" + - !Ref AWS::NoValue + - Fn::If: + - ShouldHaveImageRepository + - Effect: "Allow" + Action: + - "ecr:GetDownloadUrlForLayer" + - "ecr:BatchGetImage" + - "ecr:BatchCheckLayerAvailability" + - "ecr:PutImage" + - "ecr:InitiateLayerUpload" + - "ecr:UploadLayerPart" + - "ecr:CompleteLayerUpload" + Resource: + Fn::If: + - MissingImageRepository + - !GetAtt ImageRepository.Arn + - !Ref ImageRepositoryArn + - !Ref AWS::NoValue + Roles: + - !Ref PipelineExecutionRole + + ImageRepository: + Type: AWS::ECR::Repository + Condition: MissingImageRepository + Properties: + RepositoryPolicyText: + Version: "2012-10-17" + Statement: + - Sid: LambdaECRImageRetrievalPolicy + Effect: Allow + Principal: + Service: lambda.amazonaws.com + Action: + - "ecr:GetDownloadUrlForLayer" + - "ecr:BatchGetImage" + - "ecr:GetRepositoryPolicy" + - "ecr:SetRepositoryPolicy" + - "ecr:DeleteRepositoryPolicy" + - Sid: AllowPushPull + Effect: Allow + Principal: + AWS: + - Fn::If: + - MissingPipelineExecutionRole + - !GetAtt PipelineExecutionRole.Arn + - !Ref PipelineExecutionRoleArn + - Fn::If: + - MissingCloudFormationExecutionRole + - !GetAtt CloudFormationExecutionRole.Arn + - !Ref CloudFormationExecutionRoleArn + Action: + - "ecr:GetDownloadUrlForLayer" + - "ecr:BatchGetImage" + - "ecr:BatchCheckLayerAvailability" + - "ecr:PutImage" + - "ecr:InitiateLayerUpload" + - "ecr:UploadLayerPart" + - "ecr:CompleteLayerUpload" + +Outputs: + PipelineUser: + Description: ARN of the Pipeline IAM User + Value: + Fn::If: + - MissingPipelineUser + - !GetAtt PipelineUser.Arn + - !Ref PipelineUserArn + + PipelineUserSecretKey: + Description: AWS Access Key and Secret Key of pipeline user. + Condition: MissingPipelineUser + Value: !Ref PipelineUserSecretKey + + CloudFormationExecutionRole: + Description: ARN of the IAM Role(CloudFormationExecutionRole) + Value: + Fn::If: + - MissingCloudFormationExecutionRole + - !GetAtt CloudFormationExecutionRole.Arn + - !Ref CloudFormationExecutionRoleArn + + PipelineExecutionRole: + Description: ARN of the IAM Role(PipelineExecutionRole) + Value: + Fn::If: + - MissingPipelineExecutionRole + - !GetAtt PipelineExecutionRole.Arn + - !Ref PipelineExecutionRoleArn + + ArtifactsBucket: + Description: ARN of the Artifacts bucket + Value: + Fn::If: + - MissingArtifactsBucket + - !GetAtt ArtifactsBucket.Arn + - !Ref ArtifactsBucketArn + + ImageRepository: + Description: ARN of the ECR image repository + Condition: ShouldHaveImageRepository + Value: + Fn::If: + - MissingImageRepository + - !GetAtt ImageRepository.Arn + - !Ref ImageRepositoryArn diff --git a/samcli/lib/utils/colors.py b/samcli/lib/utils/colors.py index 84e3cbdbd7..84767f0fec 100644 --- a/samcli/lib/utils/colors.py +++ b/samcli/lib/utils/colors.py @@ -58,6 +58,10 @@ def underline(self, msg): """Underline the input""" return click.style(msg, underline=True) if self.colorize else msg + def bold(self, msg): + """Bold the input""" + return click.style(msg, bold=True) if self.colorize else msg + def _color(self, msg, color): """Internal helper method to add colors to input""" kwargs = {"fg": color} diff --git a/samcli/lib/utils/defaults.py b/samcli/lib/utils/defaults.py new file mode 100644 index 0000000000..4a07b113ac --- /dev/null +++ b/samcli/lib/utils/defaults.py @@ -0,0 +1,8 @@ +""" +Contains helpers for providing default values +""" +from botocore.session import get_session + + +def get_default_aws_region() -> str: + return get_session().get_config_variable("region") or "us-east-1" diff --git a/samcli/lib/utils/git_repo.py b/samcli/lib/utils/git_repo.py index 33e4597726..ddc7fba52f 100644 --- a/samcli/lib/utils/git_repo.py +++ b/samcli/lib/utils/git_repo.py @@ -132,7 +132,7 @@ def clone(self, clone_dir: Path, clone_name: str, replace_existing: bool = False output = clone_error.output.decode("utf-8") if "not found" in output.lower(): LOG.warning("WARN: Could not clone repo %s", self.url, exc_info=clone_error) - raise CloneRepoException from clone_error + raise CloneRepoException(output) from clone_error finally: self.clone_attempted = True diff --git a/samcli/lib/utils/managed_cloudformation_stack.py b/samcli/lib/utils/managed_cloudformation_stack.py index 25973fbc8b..29d148a7d9 100644 --- a/samcli/lib/utils/managed_cloudformation_stack.py +++ b/samcli/lib/utils/managed_cloudformation_stack.py @@ -1,20 +1,17 @@ """ Bootstrap's user's development environment by creating cloud resources required by SAM CLI """ - import logging +from collections.abc import Collection +from typing import cast, Dict, List, Optional, Union import boto3 - import click - from botocore.config import Config from botocore.exceptions import ClientError, BotoCoreError, NoRegionError, NoCredentialsError, ProfileNotFound from samcli.commands.exceptions import UserException, CredentialsError, RegionError - -SAM_CLI_STACK_PREFIX = "aws-sam-cli-managed-" LOG = logging.getLogger(__name__) @@ -25,10 +22,45 @@ def __init__(self, ex): super().__init__(message=message_fmt.format(ex=self.ex)) -def manage_stack(profile, region, stack_name, template_body): +class StackOutput: + def __init__(self, stack_output: List[Dict[str, str]]): + self._stack_output: List[Dict[str, str]] = stack_output + + def get(self, key) -> Optional[str]: + try: + return next(o for o in self._stack_output if o.get("OutputKey") == key).get("OutputValue") + except StopIteration: + return None + + +def manage_stack( + region: Optional[str], + stack_name: str, + template_body: str, + profile: Optional[str] = None, + parameter_overrides: Optional[Dict[str, Union[str, List[str]]]] = None, +) -> StackOutput: + """ + get or create a CloudFormation stack + + Parameters + ---------- + region: str + AWS region for the CloudFormation stack + stack_name: str + CloudFormation stack name + template_body: str + CloudFormation template's content + profile: Optional[str] + AWS named profile for the AWS account + parameter_overrides: Optional[Dict[str, Union[str, List[str]]]] + Values of template parameters, if any. + + Returns: Stack output section(list of OutputKey, OutputValue pairs) + """ try: if profile: - session = boto3.Session(profile_name=profile, region_name=region if region else None) + session = boto3.Session(profile_name=profile, region_name=region if region else None) # type: ignore cloudformation_client = session.client("cloudformation") else: cloudformation_client = boto3.client( @@ -51,32 +83,41 @@ def manage_stack(profile, region, stack_name, template_body): "Error Setting Up Managed Stack Client: Unable to resolve a region. " "Please provide a region via the --region parameter or by the AWS_REGION environment variable." ) from ex - return _create_or_get_stack(cloudformation_client, stack_name, template_body) + return _create_or_get_stack(cloudformation_client, stack_name, template_body, parameter_overrides) -def _create_or_get_stack(cloudformation_client, stack_name, template_body): +# Todo Add _update_stack to handle the case when the values of the stack parameter got changed +def _create_or_get_stack( + cloudformation_client, + stack_name: str, + template_body: str, + parameter_overrides: Optional[Dict[str, Union[str, List[str]]]] = None, +) -> StackOutput: try: ds_resp = cloudformation_client.describe_stacks(StackName=stack_name) stacks = ds_resp["Stacks"] stack = stacks[0] click.echo("\n\tLooking for resources needed for deployment: Found!") - _check_sanity_of_stack(stack, stack_name) - return stack["Outputs"] + _check_sanity_of_stack(stack) + stack_outputs = cast(List[Dict[str, str]], stack["Outputs"]) + return StackOutput(stack_outputs) except ClientError: click.echo("\n\tLooking for resources needed for deployment: Not found.") try: stack = _create_stack( - cloudformation_client, stack_name, template_body + cloudformation_client, stack_name, template_body, parameter_overrides ) # exceptions are not captured from subcommands - _check_sanity_of_stack(stack, stack_name) - return stack["Outputs"] + _check_sanity_of_stack(stack) + stack_outputs = cast(List[Dict[str, str]], stack["Outputs"]) + return StackOutput(stack_outputs) except (ClientError, BotoCoreError) as ex: LOG.debug("Failed to create managed resources", exc_info=ex) raise ManagedStackError(str(ex)) from ex -def _check_sanity_of_stack(stack, stack_name): +def _check_sanity_of_stack(stack): + stack_name = stack.get("StackName") tags = stack.get("Tags", None) outputs = stack.get("Outputs", None) @@ -112,15 +153,23 @@ def _check_sanity_of_stack(stack, stack_name): raise UserException(msg) from ex -def _create_stack(cloudformation_client, stack_name, template_body): +def _create_stack( + cloudformation_client, + stack_name: str, + template_body: str, + parameter_overrides: Optional[Dict[str, Union[str, List[str]]]] = None, +): click.echo("\tCreating the required resources...") change_set_name = "InitialCreation" + parameters = _generate_stack_parameters(parameter_overrides) change_set_resp = cloudformation_client.create_change_set( StackName=stack_name, TemplateBody=template_body, Tags=[{"Key": "ManagedStackSource", "Value": "AwsSamCli"}], ChangeSetType="CREATE", ChangeSetName=change_set_name, # this must be unique for the stack, but we only create so that's fine + Capabilities=["CAPABILITY_IAM"], + Parameters=parameters, ) stack_id = change_set_resp["StackId"] change_waiter = cloudformation_client.get_waiter("change_set_create_complete") @@ -134,3 +183,16 @@ def _create_stack(cloudformation_client, stack_name, template_body): stacks = ds_resp["Stacks"] click.echo("\tSuccessfully created!") return stacks[0] + + +def _generate_stack_parameters( + parameter_overrides: Optional[Dict[str, Union[str, List[str]]]] = None +) -> List[Dict[str, str]]: + parameters = [] + if parameter_overrides: + for key, value in parameter_overrides.items(): + if isinstance(value, Collection) and not isinstance(value, str): + # Assumption: values don't include commas or spaces. Need to refactor to handle such a case if needed. + value = ",".join(value) + parameters.append({"ParameterKey": key, "ParameterValue": value}) + return parameters diff --git a/samcli/lib/utils/profile.py b/samcli/lib/utils/profile.py new file mode 100644 index 0000000000..47d0242eee --- /dev/null +++ b/samcli/lib/utils/profile.py @@ -0,0 +1,10 @@ +""" +Module for aws profile related helpers +""" +from typing import List, cast + +from botocore.session import Session + + +def list_available_profiles() -> List[str]: + return cast(List[str], Session().available_profiles) diff --git a/samcli/yamlhelper.py b/samcli/yamlhelper.py index ca091e61cb..222c7b717e 100644 --- a/samcli/yamlhelper.py +++ b/samcli/yamlhelper.py @@ -18,7 +18,7 @@ # pylint: disable=too-many-ancestors import json -from typing import Dict, Optional +from typing import cast, Dict, Optional from botocore.compat import OrderedDict import yaml @@ -109,20 +109,20 @@ def _dict_constructor(loader, node): return OrderedDict(loader.construct_pairs(node)) -def yaml_parse(yamlstr): +def yaml_parse(yamlstr) -> Dict: """Parse a yaml string""" try: # PyYAML doesn't support json as well as it should, so if the input # is actually just json it is better to parse it with the standard # json parser. - return json.loads(yamlstr, object_pairs_hook=OrderedDict) + return cast(Dict, json.loads(yamlstr, object_pairs_hook=OrderedDict)) except ValueError: yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _dict_constructor) yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor) - return yaml.safe_load(yamlstr) + return cast(Dict, yaml.safe_load(yamlstr)) -def parse_yaml_file(file_path, extra_context: Optional[Dict] = None): +def parse_yaml_file(file_path, extra_context: Optional[Dict] = None) -> Dict: """ Read the file, do variable substitution, parse it as JSON/YAML diff --git a/tests/integration/pipeline/__init__.py b/tests/integration/pipeline/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/pipeline/base.py b/tests/integration/pipeline/base.py new file mode 100644 index 0000000000..f82d27e357 --- /dev/null +++ b/tests/integration/pipeline/base.py @@ -0,0 +1,154 @@ +import os +import shutil +import logging +import uuid +from pathlib import Path +from typing import List, Optional, Set, Tuple, Any +from unittest import TestCase +from unittest.mock import Mock + +import boto3 +import botocore.exceptions +from botocore.exceptions import ClientError + +from samcli.lib.pipeline.bootstrap.stage import Stage + + +class PipelineBase(TestCase): + def base_command(self): + command = "sam" + if os.getenv("SAM_CLI_DEV"): + command = "samdev" + + return command + + +class InitIntegBase(PipelineBase): + generated_files: List[Path] = [] + + @classmethod + def setUpClass(cls) -> None: + # we need to compare the whole generated template, which is + # larger than normal diff size limit + cls.maxDiff = None + + def setUp(self) -> None: + super().setUp() + self.generated_files = [] + + def tearDown(self) -> None: + for generated_file in self.generated_files: + if generated_file.is_dir(): + shutil.rmtree(generated_file, ignore_errors=True) + elif generated_file.exists(): + generated_file.unlink() + super().tearDown() + + def get_init_command_list(self, with_bootstrap=False): + command_list = [self.base_command(), "pipeline", "init"] + if with_bootstrap: + command_list.append("--bootstrap") + return command_list + + +class BootstrapIntegBase(PipelineBase): + region = "us-east-1" + stack_names: List[str] + cf_client: Any + randomized_stage_suffix: str + + @classmethod + def setUpClass(cls): + cls.cf_client = boto3.client("cloudformation", region_name=cls.region) + cls.randomized_stage_suffix = uuid.uuid4().hex[-6:] + + def setUp(self): + self.stack_names = [] + super().setUp() + shutil.rmtree(os.path.join(os.getcwd(), ".aws-sam", "pipeline"), ignore_errors=True) + + def tearDown(self): + for stack_name in self.stack_names: + self._cleanup_s3_buckets(stack_name) + self.cf_client.delete_stack(StackName=stack_name) + shutil.rmtree(os.path.join(os.getcwd(), ".aws-sam", "pipeline"), ignore_errors=True) + super().tearDown() + + def _cleanup_s3_buckets(self, stack_name): + try: + stack_resources = self.cf_client.describe_stack_resources(StackName=stack_name) + buckets = [ + resource + for resource in stack_resources["StackResources"] + if resource["ResourceType"] == "AWS::S3::Bucket" + ] + s3_client = boto3.client("s3") + for bucket in buckets: + s3_client.delete_bucket(Bucket=bucket.get("PhysicalResourceId")) + except botocore.exceptions.ClientError: + """No need to fail in cleanup""" + + def get_bootstrap_command_list( + self, + no_interactive: bool = False, + stage_name: Optional[str] = None, + profile_name: Optional[str] = None, + region: Optional[str] = None, + pipeline_user: Optional[str] = None, + pipeline_execution_role: Optional[str] = None, + cloudformation_execution_role: Optional[str] = None, + bucket: Optional[str] = None, + create_image_repository: bool = False, + image_repository: Optional[str] = None, + no_confirm_changeset: bool = False, + ): + command_list = [self.base_command(), "pipeline", "bootstrap"] + + if no_interactive: + command_list += ["--no-interactive"] + if stage_name: + command_list += ["--stage", stage_name] + if profile_name: + command_list += ["--profile", profile_name] + if region: + command_list += ["--region", region] + if pipeline_user: + command_list += ["--pipeline-user", pipeline_user] + if pipeline_execution_role: + command_list += ["--pipeline-execution-role", pipeline_execution_role] + if cloudformation_execution_role: + command_list += ["--cloudformation-execution-role", cloudformation_execution_role] + if bucket: + command_list += ["--bucket", bucket] + if create_image_repository: + command_list += ["--create-image-repository"] + if image_repository: + command_list += ["--image-repository", image_repository] + if no_confirm_changeset: + command_list += ["--no-confirm-changeset"] + + return command_list + + def _extract_created_resource_logical_ids(self, stack_name: str) -> List[str]: + response = self.cf_client.describe_stack_resources(StackName=stack_name) + return [resource["LogicalResourceId"] for resource in response["StackResources"]] + + def _stack_exists(self, stack_name) -> bool: + try: + self.cf_client.describe_stacks(StackName=stack_name) + return True + except ClientError as ex: + if "does not exist" in ex.response.get("Error", {}).get("Message", ""): + return False + raise ex + + def _get_stage_and_stack_name(self, suffix: str = "") -> Tuple[str, str]: + # Method expects method name which can be a full path. Eg: test.integration.test_bootstrap_command.method_name + method_name = self.id().split(".")[-1] + stage_name = method_name.replace("_", "-") + suffix + "-" + self.randomized_stage_suffix + + mock_env = Mock() + mock_env.name = stage_name + stack_name = Stage._get_stack_name(mock_env) + + return stage_name, stack_name diff --git a/tests/integration/pipeline/test_bootstrap_command.py b/tests/integration/pipeline/test_bootstrap_command.py new file mode 100644 index 0000000000..0cf7741c5c --- /dev/null +++ b/tests/integration/pipeline/test_bootstrap_command.py @@ -0,0 +1,380 @@ +from unittest import skipIf + +from parameterized import parameterized + +from samcli.commands.pipeline.bootstrap.cli import PIPELINE_CONFIG_FILENAME, PIPELINE_CONFIG_DIR +from samcli.lib.config.samconfig import SamConfig +from tests.integration.pipeline.base import BootstrapIntegBase +from tests.testing_utils import ( + run_command_with_input, + RUNNING_ON_CI, + RUNNING_TEST_FOR_MASTER_ON_CI, + RUN_BY_CANARY, + run_command, + run_command_with_inputs, +) +import boto3 +from botocore.exceptions import ClientError + +# bootstrap tests require credentials and CI/CD will only add credentials to the env if the PR is from the same repo. +# This is to restrict tests to run outside of CI/CD, when the branch is not master or tests are not run by Canary +SKIP_BOOTSTRAP_TESTS = RUNNING_ON_CI and RUNNING_TEST_FOR_MASTER_ON_CI and not RUN_BY_CANARY + +# In order to run bootstrap integration test locally make sure your test account is configured as `default` account. +CREDENTIAL_PROFILE = "2" if not RUN_BY_CANARY else "1" + +CFN_OUTPUT_TO_CONFIG_KEY = { + "ArtifactsBucket": "artifacts_bucket", + "CloudFormationExecutionRole": "cloudformation_execution_role", + "PipelineExecutionRole": "pipeline_execution_role", + "PipelineUser": "pipeline_user", +} + + +@skipIf(SKIP_BOOTSTRAP_TESTS, "Skip bootstrap tests in CI/CD only") +class TestBootstrap(BootstrapIntegBase): + @parameterized.expand([("create_image_repository",), (False,)]) + def test_interactive_with_no_resources_provided(self, create_image_repository): + stage_name, stack_name = self._get_stage_and_stack_name() + self.stack_names = [stack_name] + + bootstrap_command_list = self.get_bootstrap_command_list() + + inputs = [ + stage_name, + CREDENTIAL_PROFILE, + self.region, # region + "", # pipeline user + "", # Pipeline execution role + "", # CloudFormation execution role + "", # Artifacts bucket + "y" if create_image_repository else "N", # Should we create ECR repo + ] + + if create_image_repository: + inputs.append("") # Create image repository + + inputs.append("") # Confirm summary + inputs.append("y") # Create resources + + bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + stdout = bootstrap_process_execute.stdout.decode() + # make sure pipeline user's credential is printed + self.assertIn("ACCESS_KEY_ID", stdout) + self.assertIn("SECRET_ACCESS_KEY", stdout) + + common_resources = { + "PipelineUser", + "PipelineUserAccessKey", + "PipelineUserSecretKey", + "CloudFormationExecutionRole", + "PipelineExecutionRole", + "ArtifactsBucket", + "ArtifactsLoggingBucket", + "ArtifactsLoggingBucketPolicy", + "ArtifactsBucketPolicy", + "PipelineExecutionRolePermissionPolicy", + } + if create_image_repository: + self.assertSetEqual( + { + *common_resources, + "ImageRepository", + }, + set(self._extract_created_resource_logical_ids(stack_name)), + ) + CFN_OUTPUT_TO_CONFIG_KEY["ImageRepository"] = "image_repository" + self.validate_pipeline_config(stack_name, stage_name, list(CFN_OUTPUT_TO_CONFIG_KEY.keys())) + del CFN_OUTPUT_TO_CONFIG_KEY["ImageRepository"] + else: + self.assertSetEqual(common_resources, set(self._extract_created_resource_logical_ids(stack_name))) + self.validate_pipeline_config(stack_name, stage_name) + + @parameterized.expand([("create_image_repository",), (False,)]) + def test_non_interactive_with_no_resources_provided(self, create_image_repository): + stage_name, stack_name = self._get_stage_and_stack_name() + self.stack_names = [stack_name] + + bootstrap_command_list = self.get_bootstrap_command_list( + no_interactive=True, + create_image_repository=create_image_repository, + no_confirm_changeset=True, + region=self.region, + ) + + bootstrap_process_execute = run_command(bootstrap_command_list) + + self.assertEqual(bootstrap_process_execute.process.returncode, 2) + stderr = bootstrap_process_execute.stderr.decode() + self.assertIn("Missing required parameter", stderr) + + def test_interactive_with_all_required_resources_provided(self): + stage_name, stack_name = self._get_stage_and_stack_name() + self.stack_names = [stack_name] + + bootstrap_command_list = self.get_bootstrap_command_list() + + inputs = [ + stage_name, + CREDENTIAL_PROFILE, + self.region, # region + "arn:aws:iam::123:user/user-name", # pipeline user + "arn:aws:iam::123:role/role-name", # Pipeline execution role + "arn:aws:iam::123:role/role-name", # CloudFormation execution role + "arn:aws:s3:::bucket-name", # Artifacts bucket + "N", # Should we create ECR repo, 3 - specify one + "", + ] + + bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + stdout = bootstrap_process_execute.stdout.decode() + self.assertIn("skipping creation", stdout) + + def test_no_interactive_with_all_required_resources_provided(self): + stage_name, stack_name = self._get_stage_and_stack_name() + self.stack_names = [stack_name] + + bootstrap_command_list = self.get_bootstrap_command_list( + no_interactive=True, + stage_name=stage_name, + pipeline_user="arn:aws:iam::123:user/user-name", # pipeline user + pipeline_execution_role="arn:aws:iam::123:role/role-name", # Pipeline execution role + cloudformation_execution_role="arn:aws:iam::123:role/role-name", # CloudFormation execution role + bucket="arn:aws:s3:::bucket-name", # Artifacts bucket + image_repository="arn:aws:ecr:::repository/repo-name", # ecr repo + region=self.region, + ) + + bootstrap_process_execute = run_command(bootstrap_command_list) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + stdout = bootstrap_process_execute.stdout.decode() + self.assertIn("skipping creation", stdout) + + def validate_pipeline_config(self, stack_name, stage_name, cfn_keys_to_check=None): + # Get output values from cloudformation + if cfn_keys_to_check is None: + cfn_keys_to_check = list(CFN_OUTPUT_TO_CONFIG_KEY.keys()) + response = self.cf_client.describe_stacks(StackName=stack_name) + stacks = response["Stacks"] + self.assertTrue(len(stacks) > 0) # in case stack name is invalid + stack_outputs = stacks[0]["Outputs"] + output_values = {} + for value in stack_outputs: + output_values[value["OutputKey"]] = value["OutputValue"] + + # Get values saved in config file + config = SamConfig(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME) + config_values = config.get_all(["pipeline", "bootstrap"], "parameters", stage_name) + config_values = {**config_values, **config.get_all(["pipeline", "bootstrap"], "parameters")} + + for key in CFN_OUTPUT_TO_CONFIG_KEY: + if key not in cfn_keys_to_check: + continue + value = CFN_OUTPUT_TO_CONFIG_KEY[key] + cfn_value = output_values[key] + config_value = config_values[value] + if key == "ImageRepository": + self.assertEqual(cfn_value.split("/")[-1], config_value.split("/")[-1]) + else: + self.assertTrue(cfn_value.endswith(config_value) or cfn_value == config_value) + + @parameterized.expand([("confirm_changeset",), (False,)]) + def test_no_interactive_with_some_required_resources_provided(self, confirm_changeset: bool): + stage_name, stack_name = self._get_stage_and_stack_name() + self.stack_names = [stack_name] + + bootstrap_command_list = self.get_bootstrap_command_list( + no_interactive=True, + stage_name=stage_name, + pipeline_user="arn:aws:iam::123:user/user-name", # pipeline user + pipeline_execution_role="arn:aws:iam::123:role/role-name", # Pipeline execution role + # CloudFormation execution role missing + bucket="arn:aws:s3:::bucket-name", # Artifacts bucket + image_repository="arn:aws:ecr:::repository/repo-name", # ecr repo + no_confirm_changeset=not confirm_changeset, + region=self.region, + ) + + inputs = [ + "y", # proceed + ] + + bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs if confirm_changeset else []) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + stdout = bootstrap_process_execute.stdout.decode() + self.assertIn("Successfully created!", stdout) + self.assertIn("CloudFormationExecutionRole", self._extract_created_resource_logical_ids(stack_name)) + + def test_interactive_cancelled_by_user(self): + stage_name, stack_name = self._get_stage_and_stack_name() + self.stack_names = [stack_name] + + bootstrap_command_list = self.get_bootstrap_command_list() + + inputs = [ + stage_name, + CREDENTIAL_PROFILE, + self.region, # region + "arn:aws:iam::123:user/user-name", # pipeline user + "arn:aws:iam::123:role/role-name", # Pipeline execution role + "", # CloudFormation execution role + "arn:aws:s3:::bucket-name", # Artifacts bucket + "N", # Do you have Lambda with package type Image + "", + "", # Create resources confirmation + ] + + bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + stdout = bootstrap_process_execute.stdout.decode() + self.assertTrue(stdout.strip().endswith("Canceling pipeline bootstrap creation.")) + self.assertFalse(self._stack_exists(stack_name)) + + def test_interactive_with_some_required_resources_provided(self): + stage_name, stack_name = self._get_stage_and_stack_name() + self.stack_names = [stack_name] + + bootstrap_command_list = self.get_bootstrap_command_list() + + inputs = [ + stage_name, + CREDENTIAL_PROFILE, + self.region, # region + "arn:aws:iam::123:user/user-name", # pipeline user + "arn:aws:iam::123:role/role-name", # Pipeline execution role + "", # CloudFormation execution role + "arn:aws:s3:::bucket-name", # Artifacts bucket + "N", # Do you have Lambda with package type Image + "", + "y", # Create resources confirmation + ] + + bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + stdout = bootstrap_process_execute.stdout.decode() + self.assertIn("Successfully created!", stdout) + # make sure the not provided resource is the only resource created. + self.assertIn("CloudFormationExecutionRole", self._extract_created_resource_logical_ids(stack_name)) + self.validate_pipeline_config(stack_name, stage_name) + + def test_interactive_pipeline_user_only_created_once(self): + """ + Create 3 stages, only the first stage resource stack creates + a pipeline user, and the remaining two share the same pipeline user. + """ + stage_names = [] + for suffix in ["1", "2", "3"]: + stage_name, stack_name = self._get_stage_and_stack_name(suffix) + stage_names.append(stage_name) + self.stack_names.append(stack_name) + + bootstrap_command_list = self.get_bootstrap_command_list() + + for i, stage_name in enumerate(stage_names): + inputs = [ + stage_name, + CREDENTIAL_PROFILE, + self.region, # region + *([""] if i == 0 else []), # pipeline user + "arn:aws:iam::123:role/role-name", # Pipeline execution role + "arn:aws:iam::123:role/role-name", # CloudFormation execution role + "arn:aws:s3:::bucket-name", # Artifacts bucket + "N", # Should we create ECR repo, 3 - specify one + "", + "y", # Create resources confirmation + ] + + bootstrap_process_execute = run_command_with_input( + bootstrap_command_list, ("\n".join(inputs) + "\n").encode() + ) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + stdout = bootstrap_process_execute.stdout.decode() + + # Only first environment creates pipeline user + if i == 0: + self.assertIn("The following resources were created in your account:", stdout) + resources = self._extract_created_resource_logical_ids(self.stack_names[i]) + self.assertTrue("PipelineUser" in resources) + self.assertTrue("PipelineUserAccessKey" in resources) + self.assertTrue("PipelineUserSecretKey" in resources) + self.validate_pipeline_config(self.stack_names[i], stage_name) + else: + self.assertIn("skipping creation", stdout) + + @parameterized.expand([("ArtifactsBucket",), ("ArtifactsLoggingBucket",)]) + def test_bootstrapped_buckets_accept_ssl_requests_only(self, bucket_logical_id): + stage_name, stack_name = self._get_stage_and_stack_name() + self.stack_names = [stack_name] + + bootstrap_command_list = self.get_bootstrap_command_list( + stage_name=stage_name, no_interactive=True, no_confirm_changeset=True, region=self.region + ) + + bootstrap_process_execute = run_command(bootstrap_command_list) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + + stack_resources = self.cf_client.describe_stack_resources(StackName=stack_name) + bucket = next( + resource + for resource in stack_resources["StackResources"] + if resource["LogicalResourceId"] == bucket_logical_id + ) + bucket_name = bucket["PhysicalResourceId"] + bucket_key = "any/testing/key.txt" + testing_data = b"any testing binary data" + + s3_ssl_client = boto3.client("s3", region_name=self.region) + s3_non_ssl_client = boto3.client("s3", use_ssl=False, region_name=self.region) + + # Assert SSL requests are accepted + s3_ssl_client.put_object(Body=testing_data, Bucket=bucket_name, Key=bucket_key) + res = s3_ssl_client.get_object(Bucket=bucket_name, Key=bucket_key) + retrieved_data = res["Body"].read() + self.assertEqual(retrieved_data, testing_data) + + # Assert non SSl requests are denied + with self.assertRaises(ClientError) as error: + s3_non_ssl_client.get_object(Bucket=bucket_name, Key=bucket_key) + self.assertEqual( + str(error.exception), "An error occurred (AccessDenied) when calling the GetObject operation: Access Denied" + ) + + def test_bootstrapped_artifacts_bucket_has_server_access_log_enabled(self): + stage_name, stack_name = self._get_stage_and_stack_name() + self.stack_names = [stack_name] + + bootstrap_command_list = self.get_bootstrap_command_list( + stage_name=stage_name, no_interactive=True, no_confirm_changeset=True, region=self.region + ) + + bootstrap_process_execute = run_command(bootstrap_command_list) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + + stack_resources = self.cf_client.describe_stack_resources(StackName=stack_name) + artifacts_bucket = next( + resource + for resource in stack_resources["StackResources"] + if resource["LogicalResourceId"] == "ArtifactsBucket" + ) + artifacts_bucket_name = artifacts_bucket["PhysicalResourceId"] + artifacts_logging_bucket = next( + resource + for resource in stack_resources["StackResources"] + if resource["LogicalResourceId"] == "ArtifactsLoggingBucket" + ) + artifacts_logging_bucket_name = artifacts_logging_bucket["PhysicalResourceId"] + + s3_client = boto3.client("s3", region_name=self.region) + res = s3_client.get_bucket_logging(Bucket=artifacts_bucket_name) + self.assertEqual(artifacts_logging_bucket_name, res["LoggingEnabled"]["TargetBucket"]) diff --git a/tests/integration/pipeline/test_init_command.py b/tests/integration/pipeline/test_init_command.py new file mode 100644 index 0000000000..182184a999 --- /dev/null +++ b/tests/integration/pipeline/test_init_command.py @@ -0,0 +1,299 @@ +import os.path +import shutil +from pathlib import Path +from textwrap import dedent +from typing import List +from unittest import skipIf + +from parameterized import parameterized + +from samcli.commands.pipeline.bootstrap.cli import PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME +from tests.integration.pipeline.base import InitIntegBase, BootstrapIntegBase +from tests.integration.pipeline.test_bootstrap_command import SKIP_BOOTSTRAP_TESTS, CREDENTIAL_PROFILE +from tests.testing_utils import run_command_with_inputs + +QUICK_START_JENKINS_INPUTS_WITHOUT_AUTO_FILL = [ + "1", # quick start + "1", # jenkins, this depends on the template repo. + "", + "credential-id", + "main", + "template.yaml", + "test", + "test-stack", + "test-pipeline-execution-role", + "test-cfn-execution-role", + "test-bucket", + "test-ecr", + "us-east-2", + "prod", + "prod-stack", + "prod-pipeline-execution-role", + "prod-cfn-execution-role", + "prod-bucket", + "prod-ecr", + "us-west-2", +] + + +class TestInit(InitIntegBase): + """ + Here we use Jenkins template for testing + """ + + def setUp(self) -> None: + # make sure there is no pipelineconfig.toml, otherwise the autofill could affect the question flow + pipelineconfig_file = Path(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME) + if pipelineconfig_file.exists(): + pipelineconfig_file.unlink() + + def tearDown(self) -> None: + super().tearDown() + shutil.rmtree(PIPELINE_CONFIG_DIR, ignore_errors=True) + + def test_quick_start(self): + generated_jenkinsfile_path = Path("Jenkinsfile") + self.generated_files.append(generated_jenkinsfile_path) + + init_command_list = self.get_init_command_list() + init_process_execute = run_command_with_inputs(init_command_list, QUICK_START_JENKINS_INPUTS_WITHOUT_AUTO_FILL) + + self.assertEqual(init_process_execute.process.returncode, 0) + self.assertTrue(Path("Jenkinsfile").exists()) + + expected_file_path = Path(__file__).parent.parent.joinpath(Path("testdata", "pipeline", "expected_jenkinsfile")) + with open(expected_file_path, "r") as expected, open(generated_jenkinsfile_path, "r") as output: + self.assertEqual(expected.read(), output.read()) + + def test_failed_when_generated_file_already_exist_override(self): + generated_jenkinsfile_path = Path("Jenkinsfile") + generated_jenkinsfile_path.touch() # the file now pre-exists + self.generated_files.append(generated_jenkinsfile_path) + + init_command_list = self.get_init_command_list() + init_process_execute = run_command_with_inputs( + init_command_list, [*QUICK_START_JENKINS_INPUTS_WITHOUT_AUTO_FILL, "y"] + ) + + self.assertEqual(init_process_execute.process.returncode, 0) + self.assertTrue(Path("Jenkinsfile").exists()) + + expected_file_path = Path(__file__).parent.parent.joinpath(Path("testdata", "pipeline", "expected_jenkinsfile")) + with open(expected_file_path, "r") as expected, open(generated_jenkinsfile_path, "r") as output: + self.assertEqual(expected.read(), output.read()) + + def test_failed_when_generated_file_already_exist_not_override(self): + generated_jenkinsfile_path = Path("Jenkinsfile") + generated_jenkinsfile_path.touch() # the file now pre-exists + self.generated_files.append(generated_jenkinsfile_path) + + init_command_list = self.get_init_command_list() + init_process_execute = run_command_with_inputs( + init_command_list, [*QUICK_START_JENKINS_INPUTS_WITHOUT_AUTO_FILL, ""] + ) + + self.assertEqual(init_process_execute.process.returncode, 0) + + expected_file_path = Path(__file__).parent.parent.joinpath(Path("testdata", "pipeline", "expected_jenkinsfile")) + with open(expected_file_path, "r") as expected, open( + os.path.join(".aws-sam", "pipeline", "generated-files", "Jenkinsfile"), "r" + ) as output: + self.assertEqual(expected.read(), output.read()) + + # also check the Jenkinsfile is not overridden + self.assertEqual("", open("Jenkinsfile", "r").read()) + + def test_custom_template(self): + generated_file = Path("weather") + self.generated_files.append(generated_file) + + custom_template_path = Path(__file__).parent.parent.joinpath(Path("testdata", "pipeline", "custom_template")) + inputs = ["2", str(custom_template_path), "", "Rainy"] # custom template + + init_command_list = self.get_init_command_list() + init_process_execute = run_command_with_inputs(init_command_list, inputs) + + self.assertEqual(init_process_execute.process.returncode, 0) + + self.assertTrue(generated_file.exists()) + + with open(generated_file, "r") as f: + self.assertEqual("Rainy\n", f.read()) + + @parameterized.expand([("with_bootstrap",), (False,)]) + def test_with_pipelineconfig_has_all_stage_values(self, with_bootstrap): + generated_jenkinsfile_path = Path("Jenkinsfile") + self.generated_files.append(generated_jenkinsfile_path) + + Path(PIPELINE_CONFIG_DIR).mkdir(parents=True, exist_ok=True) + pipelineconfig_path = Path(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME) + with open(pipelineconfig_path, "w") as f: + f.write( + dedent( + """\ + version = 0.1 + [default] + [default.pipeline_bootstrap] + [default.pipeline_bootstrap.parameters] + pipeline_user = "arn:aws:iam::123:user/aws-sam-cli-managed-test-pipeline-res-PipelineUser-123" + + [test] + [test.pipeline_bootstrap] + [test.pipeline_bootstrap.parameters] + pipeline_execution_role = "test-pipeline-execution-role" + cloudformation_execution_role = "test-cfn-execution-role" + artifacts_bucket = "test-bucket" + image_repository = "test-ecr" + region = "us-east-2" + + [prod] + [prod.pipeline_bootstrap] + [prod.pipeline_bootstrap.parameters] + pipeline_execution_role = "prod-pipeline-execution-role" + cloudformation_execution_role = "prod-cfn-execution-role" + artifacts_bucket = "prod-bucket" + image_repository = "prod-ecr" + region = "us-west-2" + """ + ) + ) + + inputs = [ + "1", # quick start + "1", # jenkins, this depends on the template repo. + "credential-id", + "main", + "template.yaml", + "1", + "test-stack", + "2", + "prod-stack", + ] + + init_command_list = self.get_init_command_list(with_bootstrap) + init_process_execute = run_command_with_inputs(init_command_list, inputs) + + self.assertEqual(init_process_execute.process.returncode, 0) + self.assertTrue(Path("Jenkinsfile").exists()) + + expected_file_path = Path(__file__).parent.parent.joinpath(Path("testdata", "pipeline", "expected_jenkinsfile")) + with open(expected_file_path, "r") as expected, open(generated_jenkinsfile_path, "r") as output: + self.assertEqual(expected.read(), output.read()) + + +@skipIf(SKIP_BOOTSTRAP_TESTS, "Skip bootstrap tests in CI/CD only") +class TestInitWithBootstrap(BootstrapIntegBase): + generated_files: List[Path] = [] + + def setUp(self): + super().setUp() + self.command_list = [self.base_command(), "pipeline", "init", "--bootstrap"] + generated_jenkinsfile_path = Path("Jenkinsfile") + self.generated_files.append(generated_jenkinsfile_path) + + def tearDown(self) -> None: + for generated_file in self.generated_files: + if generated_file.is_dir(): + shutil.rmtree(generated_file, ignore_errors=True) + elif generated_file.exists(): + generated_file.unlink() + super().tearDown() + + def test_without_stages_in_pipeline_config(self): + stage_names = [] + for suffix in ["1", "2"]: + stage_name, stack_name = self._get_stage_and_stack_name(suffix) + stage_names.append(stage_name) + self.stack_names.append(stack_name) + + inputs = [ + "1", # quick start + "1", # jenkins, this depends on the template repo. + "y", # Do you want to go through stage setup process now? + stage_names[0], + CREDENTIAL_PROFILE, + self.region, + "", # pipeline user + "", # Pipeline execution role + "", # CloudFormation execution role + "", # Artifacts bucket + "N", # no ECR repo + "", # Confirm summary + "y", # Create resources + "y", # Do you want to go through stage setup process now? + stage_names[1], + CREDENTIAL_PROFILE, + self.region, + "", # pipeline user + "", # Pipeline execution role + "", # CloudFormation execution role + "", # Artifacts bucket + "N", # no ECR repo + "", # Confirm summary + "y", # Create resources + "credential-id", + "main", + "template.yaml", + "1", + "test-stack", + "2", + "prod-stack", + ] + init_process_execute = run_command_with_inputs(self.command_list, inputs) + self.assertEqual(init_process_execute.process.returncode, 0) + self.assertIn("Here are the stage names detected", init_process_execute.stdout.decode()) + self.assertIn(stage_names[0], init_process_execute.stdout.decode()) + self.assertIn(stage_names[1], init_process_execute.stdout.decode()) + + def test_with_one_stages_in_pipeline_config(self): + stage_names = [] + for suffix in ["1", "2"]: + stage_name, stack_name = self._get_stage_and_stack_name(suffix) + stage_names.append(stage_name) + self.stack_names.append(stack_name) + + bootstrap_command_list = self.get_bootstrap_command_list() + + inputs = [ + stage_names[0], + CREDENTIAL_PROFILE, + self.region, # region + "", # pipeline user + "", # Pipeline execution role + "", # CloudFormation execution role + "", # Artifacts bucket + "N", # no + "", # Confirm summary + "y", # Create resources + ] + + bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs) + + self.assertEqual(bootstrap_process_execute.process.returncode, 0) + + inputs = [ + "1", # quick start + "1", # jenkins, this depends on the template repo. + "y", # Do you want to go through stage setup process now? + stage_names[1], + CREDENTIAL_PROFILE, + self.region, + "", # Pipeline execution role + "", # CloudFormation execution role + "", # Artifacts bucket + "N", # no ECR repo + "", # Confirm summary + "y", # Create resources + "credential-id", + "main", + "template.yaml", + "1", + "test-stack", + "2", + "prod-stack", + ] + init_process_execute = run_command_with_inputs(self.command_list, inputs) + self.assertEqual(init_process_execute.process.returncode, 0) + self.assertIn("Here are the stage names detected", init_process_execute.stdout.decode()) + self.assertIn(stage_names[0], init_process_execute.stdout.decode()) + self.assertIn(stage_names[1], init_process_execute.stdout.decode()) diff --git a/tests/integration/testdata/pipeline/custom_template/cookiecutter.json b/tests/integration/testdata/pipeline/custom_template/cookiecutter.json new file mode 100644 index 0000000000..c02b7caed1 --- /dev/null +++ b/tests/integration/testdata/pipeline/custom_template/cookiecutter.json @@ -0,0 +1,4 @@ +{ + "outputDir": "aws-sam-pipeline", + "weather": "" +} \ No newline at end of file diff --git a/tests/integration/testdata/pipeline/custom_template/metadata.json b/tests/integration/testdata/pipeline/custom_template/metadata.json new file mode 100644 index 0000000000..689fe297f8 --- /dev/null +++ b/tests/integration/testdata/pipeline/custom_template/metadata.json @@ -0,0 +1,3 @@ +{ + "number_of_stages": 0 +} diff --git a/tests/integration/testdata/pipeline/custom_template/questions.json b/tests/integration/testdata/pipeline/custom_template/questions.json new file mode 100644 index 0000000000..a0fe2167bf --- /dev/null +++ b/tests/integration/testdata/pipeline/custom_template/questions.json @@ -0,0 +1,7 @@ +{ + "questions": [{ + "key": "weather", + "question": "How is the weather today?", + "default": "Sunny" + }] +} \ No newline at end of file diff --git a/tests/integration/testdata/pipeline/custom_template/{{cookiecutter.outputDir}}/weather b/tests/integration/testdata/pipeline/custom_template/{{cookiecutter.outputDir}}/weather new file mode 100644 index 0000000000..3501ffd0ae --- /dev/null +++ b/tests/integration/testdata/pipeline/custom_template/{{cookiecutter.outputDir}}/weather @@ -0,0 +1 @@ +{{cookiecutter.weather}} diff --git a/tests/integration/testdata/pipeline/expected_jenkinsfile b/tests/integration/testdata/pipeline/expected_jenkinsfile new file mode 100644 index 0000000000..7a213a30f9 --- /dev/null +++ b/tests/integration/testdata/pipeline/expected_jenkinsfile @@ -0,0 +1,177 @@ +pipeline { + agent any + environment { + PIPELINE_USER_CREDENTIAL_ID = 'credential-id' + SAM_TEMPLATE = 'template.yaml' + MAIN_BRANCH = 'main' + TESTING_STACK_NAME = 'test-stack' + TESTING_PIPELINE_EXECUTION_ROLE = 'test-pipeline-execution-role' + TESTING_CLOUDFORMATION_EXECUTION_ROLE = 'test-cfn-execution-role' + TESTING_ARTIFACTS_BUCKET = 'test-bucket' + TESTING_IMAGE_REPOSITORY = 'test-ecr' + TESTING_REGION = 'us-east-2' + PROD_STACK_NAME = 'prod-stack' + PROD_PIPELINE_EXECUTION_ROLE = 'prod-pipeline-execution-role' + PROD_CLOUDFORMATION_EXECUTION_ROLE = 'prod-cfn-execution-role' + PROD_ARTIFACTS_BUCKET = 'prod-bucket' + PROD_IMAGE_REPOSITORY = 'prod-ecr' + PROD_REGION = 'us-west-2' + } + stages { + // uncomment and modify the following step for running the unit-tests + // stage('test') { + // steps { + // sh ''' + // # trigger the tests here + // ''' + // } + // } + + stage('build-and-deploy-feature') { + // this stage is triggered only for feature branches (feature*), + // which will build the stack and deploy to a stack named with branch name. + when { + branch 'feature*' + } + agent { + docker { + image 'public.ecr.aws/sam/build-provided' + args '--user 0:0 -v /var/run/docker.sock:/var/run/docker.sock' + } + } + steps { + sh 'sam build --template ${SAM_TEMPLATE} --use-container' + withAWS( + credentials: env.PIPELINE_USER_CREDENTIAL_ID, + region: env.TESTING_REGION, + role: env.TESTING_PIPELINE_EXECUTION_ROLE, + roleSessionName: 'deploying-feature') { + sh ''' + sam deploy --stack-name $(echo ${BRANCH_NAME} | tr -cd '[a-zA-Z0-9-]') \ + --capabilities CAPABILITY_IAM \ + --region ${TESTING_REGION} \ + --s3-bucket ${TESTING_ARTIFACTS_BUCKET} \ + --image-repository ${TESTING_IMAGE_REPOSITORY} \ + --no-fail-on-empty-changeset \ + --role-arn ${TESTING_CLOUDFORMATION_EXECUTION_ROLE} + ''' + } + } + } + + stage('build-and-package') { + when { + branch env.MAIN_BRANCH + } + agent { + docker { + image 'public.ecr.aws/sam/build-provided' + args '--user 0:0 -v /var/run/docker.sock:/var/run/docker.sock' + } + } + steps { + sh 'sam build --template ${SAM_TEMPLATE} --use-container' + withAWS( + credentials: env.PIPELINE_USER_CREDENTIAL_ID, + region: env.TESTING_REGION, + role: env.TESTING_PIPELINE_EXECUTION_ROLE, + roleSessionName: 'testing-packaging') { + sh ''' + sam package \ + --s3-bucket ${TESTING_ARTIFACTS_BUCKET} \ + --image-repository ${TESTING_IMAGE_REPOSITORY} \ + --region ${TESTING_REGION} \ + --output-template-file packaged-testing.yaml + ''' + } + + withAWS( + credentials: env.PIPELINE_USER_CREDENTIAL_ID, + region: env.PROD_REGION, + role: env.PROD_PIPELINE_EXECUTION_ROLE, + roleSessionName: 'prod-packaging') { + sh ''' + sam package \ + --s3-bucket ${PROD_ARTIFACTS_BUCKET} \ + --image-repository ${PROD_IMAGE_REPOSITORY} \ + --region ${PROD_REGION} \ + --output-template-file packaged-prod.yaml + ''' + } + + archiveArtifacts artifacts: 'packaged-testing.yaml' + archiveArtifacts artifacts: 'packaged-prod.yaml' + } + } + + stage('deploy-testing') { + when { + branch env.MAIN_BRANCH + } + agent { + docker { + image 'public.ecr.aws/sam/build-provided' + } + } + steps { + withAWS( + credentials: env.PIPELINE_USER_CREDENTIAL_ID, + region: env.TESTING_REGION, + role: env.TESTING_PIPELINE_EXECUTION_ROLE, + roleSessionName: 'testing-deployment') { + sh ''' + sam deploy --stack-name ${TESTING_STACK_NAME} \ + --template packaged-testing.yaml \ + --capabilities CAPABILITY_IAM \ + --region ${TESTING_REGION} \ + --s3-bucket ${TESTING_ARTIFACTS_BUCKET} \ + --image-repository ${TESTING_IMAGE_REPOSITORY} \ + --no-fail-on-empty-changeset \ + --role-arn ${TESTING_CLOUDFORMATION_EXECUTION_ROLE} + ''' + } + } + } + + // uncomment and modify the following step for running the integration-tests + // stage('integration-test') { + // when { + // branch env.MAIN_BRANCH + // } + // steps { + // sh ''' + // # trigger the integration tests here + // ''' + // } + // } + + stage('deploy-prod') { + when { + branch env.MAIN_BRANCH + } + agent { + docker { + image 'public.ecr.aws/sam/build-provided' + } + } + steps { + withAWS( + credentials: env.PIPELINE_USER_CREDENTIAL_ID, + region: env.PROD_REGION, + role: env.PROD_PIPELINE_EXECUTION_ROLE, + roleSessionName: 'prod-deployment') { + sh ''' + sam deploy --stack-name ${PROD_STACK_NAME} \ + --template packaged-prod.yaml \ + --capabilities CAPABILITY_IAM \ + --region ${PROD_REGION} \ + --s3-bucket ${PROD_ARTIFACTS_BUCKET} \ + --image-repository ${PROD_IMAGE_REPOSITORY} \ + --no-fail-on-empty-changeset \ + --role-arn ${PROD_CLOUDFORMATION_EXECUTION_ROLE} + ''' + } + } + } + } +} diff --git a/tests/testing_utils.py b/tests/testing_utils.py index 0cc7aa3067..78da67ab0c 100644 --- a/tests/testing_utils.py +++ b/tests/testing_utils.py @@ -5,6 +5,7 @@ import shutil from collections import namedtuple from subprocess import Popen, PIPE, TimeoutExpired +from typing import List IS_WINDOWS = platform.system().lower() == "windows" RUNNING_ON_CI = os.environ.get("APPVEYOR", False) @@ -50,6 +51,10 @@ def run_command_with_input(command_list, stdin_input, timeout=TIMEOUT) -> Comman raise +def run_command_with_inputs(command_list: List[str], inputs: List[str], timeout=TIMEOUT) -> CommandResult: + return run_command_with_input(command_list, ("\n".join(inputs) + "\n").encode(), timeout) + + class FileCreator(object): def __init__(self): self.rootdir = tempfile.mkdtemp() diff --git a/tests/unit/commands/_utils/test_template.py b/tests/unit/commands/_utils/test_template.py index be4001be68..1de707ec38 100644 --- a/tests/unit/commands/_utils/test_template.py +++ b/tests/unit/commands/_utils/test_template.py @@ -1,12 +1,10 @@ -import os import copy +import os +from unittest import TestCase +from unittest.mock import patch, mock_open, MagicMock -import jmespath import yaml from botocore.utils import set_value_from_jmespath - -from unittest import TestCase -from unittest.mock import patch, mock_open, MagicMock from parameterized import parameterized, param from samcli.commands._utils.resources import AWS_SERVERLESS_FUNCTION, AWS_SERVERLESS_API diff --git a/tests/unit/commands/deploy/test_guided_context.py b/tests/unit/commands/deploy/test_guided_context.py index 6e49b73a60..7b31ff60eb 100644 --- a/tests/unit/commands/deploy/test_guided_context.py +++ b/tests/unit/commands/deploy/test_guided_context.py @@ -666,7 +666,7 @@ def test_guided_prompts_with_code_signing( expected_code_sign_calls = expected_code_sign_calls * (number_of_functions + number_of_layers) self.assertEqual(expected_code_sign_calls, patched_code_signer_prompt.call_args_list) - @patch("samcli.commands.deploy.guided_context.get_session") + @patch("samcli.commands.deploy.guided_context.get_default_aws_region") @patch("samcli.commands.deploy.guided_context.prompt") @patch("samcli.commands.deploy.guided_context.confirm") @patch("samcli.commands.deploy.guided_context.manage_stack") @@ -685,7 +685,7 @@ def test_guided_prompts_check_default_config_region( patched_manage_stack, patched_confirm, patched_prompt, - patched_get_session, + patched_get_default_aws_region, ): patched_sam_function_provider.return_value = {} patched_get_template_artifacts_format.return_value = [ZIP] @@ -695,7 +695,7 @@ def test_guided_prompts_check_default_config_region( patched_confirm.side_effect = [True, False, True, True, ""] patched_signer_config_per_function.return_value = ({}, {}) patched_manage_stack.return_value = "managed_s3_stack" - patched_get_session.return_value.get_config_variable.return_value = "default_config_region" + patched_get_default_aws_region.return_value = "default_config_region" # setting the default region to None self.gc.region = None self.gc.guided_prompts(parameter_override_keys=None) diff --git a/tests/unit/commands/pipeline/__init__.py b/tests/unit/commands/pipeline/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/commands/pipeline/bootstrap/__init__.py b/tests/unit/commands/pipeline/bootstrap/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/commands/pipeline/bootstrap/test_cli.py b/tests/unit/commands/pipeline/bootstrap/test_cli.py new file mode 100644 index 0000000000..649fbbdf32 --- /dev/null +++ b/tests/unit/commands/pipeline/bootstrap/test_cli.py @@ -0,0 +1,276 @@ +from unittest import TestCase +from unittest.mock import patch, Mock + +import click +from click.testing import CliRunner + +from samcli.commands.pipeline.bootstrap.cli import ( + _load_saved_pipeline_user_arn, + _get_bootstrap_command_names, + PIPELINE_CONFIG_FILENAME, + PIPELINE_CONFIG_DIR, +) +from samcli.commands.pipeline.bootstrap.cli import cli as bootstrap_cmd +from samcli.commands.pipeline.bootstrap.cli import do_cli as bootstrap_cli + +ANY_REGION = "ANY_REGION" +ANY_PROFILE = "ANY_PROFILE" +ANY_STAGE_NAME = "ANY_STAGE_NAME" +ANY_PIPELINE_USER_ARN = "ANY_PIPELINE_USER_ARN" +ANY_PIPELINE_EXECUTION_ROLE_ARN = "ANY_PIPELINE_EXECUTION_ROLE_ARN" +ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN = "ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN" +ANY_ARTIFACTS_BUCKET_ARN = "ANY_ARTIFACTS_BUCKET_ARN" +ANY_IMAGE_REPOSITORY_ARN = "ANY_IMAGE_REPOSITORY_ARN" +ANY_ARN = "ANY_ARN" +ANY_CONFIG_FILE = "ANY_CONFIG_FILE" +ANY_CONFIG_ENV = "ANY_CONFIG_ENV" +PIPELINE_BOOTSTRAP_COMMAND_NAMES = ["pipeline", "bootstrap"] + + +class TestCli(TestCase): + def setUp(self) -> None: + self.cli_context = { + "region": ANY_REGION, + "profile": ANY_PROFILE, + "interactive": True, + "stage_name": ANY_STAGE_NAME, + "pipeline_user_arn": ANY_PIPELINE_USER_ARN, + "pipeline_execution_role_arn": ANY_PIPELINE_EXECUTION_ROLE_ARN, + "cloudformation_execution_role_arn": ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + "artifacts_bucket_arn": ANY_ARTIFACTS_BUCKET_ARN, + "create_image_repository": True, + "image_repository_arn": ANY_IMAGE_REPOSITORY_ARN, + "confirm_changeset": True, + "config_file": ANY_CONFIG_FILE, + "config_env": ANY_CONFIG_ENV, + } + + @patch("samcli.commands.pipeline.bootstrap.cli.do_cli") + def test_bootstrap_command_default_argument_values(self, do_cli_mock): + runner: CliRunner = CliRunner() + runner.invoke(bootstrap_cmd) + # Test the defaults are as following: + # interactive -> True + # create_image_repository -> False + # confirm_changeset -> True + # region, profile, stage_name and all ARNs are None + do_cli_mock.assert_called_once_with( + region=None, + profile=None, + interactive=True, + stage_name=None, + pipeline_user_arn=None, + pipeline_execution_role_arn=None, + cloudformation_execution_role_arn=None, + artifacts_bucket_arn=None, + create_image_repository=False, + image_repository_arn=None, + confirm_changeset=True, + config_file="default", + config_env="samconfig.toml", + ) + + @patch("samcli.commands.pipeline.bootstrap.cli.do_cli") + def test_bootstrap_command_flag_arguments(self, do_cli_mock): + runner: CliRunner = CliRunner() + runner.invoke(bootstrap_cmd, args=["--interactive", "--no-create-image-repository", "--confirm-changeset"]) + args, kwargs = do_cli_mock.call_args + self.assertTrue(kwargs["interactive"]) + self.assertFalse(kwargs["create_image_repository"]) + self.assertTrue(kwargs["confirm_changeset"]) + + runner.invoke(bootstrap_cmd, args=["--no-interactive", "--create-image-repository", "--no-confirm-changeset"]) + args, kwargs = do_cli_mock.call_args + self.assertFalse(kwargs["interactive"]) + self.assertTrue(kwargs["create_image_repository"]) + self.assertFalse(kwargs["confirm_changeset"]) + + @patch("samcli.commands.pipeline.bootstrap.cli.do_cli") + def test_bootstrap_command_with_different_arguments_combination(self, do_cli_mock): + runner: CliRunner = CliRunner() + runner.invoke( + bootstrap_cmd, + args=["--no-interactive", "--stage", "environment1", "--bucket", "bucketARN"], + ) + args, kwargs = do_cli_mock.call_args + self.assertFalse(kwargs["interactive"]) + self.assertEqual(kwargs["stage_name"], "environment1") + self.assertEqual(kwargs["artifacts_bucket_arn"], "bucketARN") + + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + @patch("samcli.commands.pipeline.bootstrap.cli._load_saved_pipeline_user_arn") + @patch("samcli.commands.pipeline.bootstrap.cli.Stage") + @patch("samcli.commands.pipeline.bootstrap.cli.GuidedContext") + def test_bootstrapping_normal_interactive_flow( + self, guided_context_mock, environment_mock, load_saved_pipeline_user_arn_mock, get_command_names_mock + ): + # setup + gc_instance = Mock() + guided_context_mock.return_value = gc_instance + environment_instance = Mock() + environment_mock.return_value = environment_instance + load_saved_pipeline_user_arn_mock.return_value = ANY_PIPELINE_USER_ARN + self.cli_context["interactive"] = True + self.cli_context["pipeline_user_arn"] = None + get_command_names_mock.return_value = PIPELINE_BOOTSTRAP_COMMAND_NAMES + + # trigger + bootstrap_cli(**self.cli_context) + + # verify + load_saved_pipeline_user_arn_mock.assert_called_once() + gc_instance.run.assert_called_once() + environment_instance.bootstrap.assert_called_once_with(confirm_changeset=True) + environment_instance.print_resources_summary.assert_called_once() + environment_instance.save_config_safe.assert_called_once_with( + config_dir=PIPELINE_CONFIG_DIR, + filename=PIPELINE_CONFIG_FILENAME, + cmd_names=PIPELINE_BOOTSTRAP_COMMAND_NAMES, + ) + + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + @patch("samcli.commands.pipeline.bootstrap.cli._load_saved_pipeline_user_arn") + @patch("samcli.commands.pipeline.bootstrap.cli.Stage") + @patch("samcli.commands.pipeline.bootstrap.cli.GuidedContext") + def test_bootstrap_will_not_try_loading_pipeline_user_if_already_provided( + self, guided_context_mock, environment_mock, load_saved_pipeline_user_arn_mock, get_command_names_mock + ): + bootstrap_cli(**self.cli_context) + load_saved_pipeline_user_arn_mock.assert_not_called() + + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + @patch("samcli.commands.pipeline.bootstrap.cli._load_saved_pipeline_user_arn") + @patch("samcli.commands.pipeline.bootstrap.cli.Stage") + @patch("samcli.commands.pipeline.bootstrap.cli.GuidedContext") + def test_bootstrap_will_try_loading_pipeline_user_if_not_provided( + self, guided_context_mock, environment_mock, load_saved_pipeline_user_arn_mock, get_command_names_mock + ): + self.cli_context["pipeline_user_arn"] = None + bootstrap_cli(**self.cli_context) + load_saved_pipeline_user_arn_mock.assert_called_once() + + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + @patch("samcli.commands.pipeline.bootstrap.cli._load_saved_pipeline_user_arn") + @patch("samcli.commands.pipeline.bootstrap.cli.Stage") + @patch("samcli.commands.pipeline.bootstrap.cli.GuidedContext") + def test_stage_name_is_required_to_be_provided_in_case_of_non_interactive_mode( + self, guided_context_mock, environment_mock, load_saved_pipeline_user_arn_mock, get_command_names_mock + ): + self.cli_context["interactive"] = False + self.cli_context["stage_name"] = None + with self.assertRaises(click.UsageError): + bootstrap_cli(**self.cli_context) + + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + @patch("samcli.commands.pipeline.bootstrap.cli._load_saved_pipeline_user_arn") + @patch("samcli.commands.pipeline.bootstrap.cli.Stage") + @patch("samcli.commands.pipeline.bootstrap.cli.GuidedContext") + def test_stage_name_is_not_required_to_be_provided_in_case_of_interactive_mode( + self, guided_context_mock, environment_mock, load_saved_pipeline_user_arn_mock, get_command_names_mock + ): + self.cli_context["interactive"] = True + self.cli_context["stage_name"] = None + bootstrap_cli(**self.cli_context) # No exception is thrown + + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + @patch("samcli.commands.pipeline.bootstrap.cli._load_saved_pipeline_user_arn") + @patch("samcli.commands.pipeline.bootstrap.cli.Stage") + @patch("samcli.commands.pipeline.bootstrap.cli.GuidedContext") + def test_guided_context_will_be_enabled_or_disabled_based_on_the_interactive_mode( + self, guided_context_mock, environment_mock, load_saved_pipeline_user_arn_mock, get_command_names_mock + ): + gc_instance = Mock() + guided_context_mock.return_value = gc_instance + self.cli_context["interactive"] = False + bootstrap_cli(**self.cli_context) + gc_instance.run.assert_not_called() + self.cli_context["interactive"] = True + bootstrap_cli(**self.cli_context) + gc_instance.run.assert_called_once() + + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + @patch("samcli.commands.pipeline.bootstrap.cli._load_saved_pipeline_user_arn") + @patch("samcli.commands.pipeline.bootstrap.cli.Stage") + @patch("samcli.commands.pipeline.bootstrap.cli.GuidedContext") + def test_bootstrapping_will_confirm_before_creating_the_resources_unless_the_user_choose_not_to( + self, guided_context_mock, environment_mock, load_saved_pipeline_user_arn_mock, get_command_names_mock + ): + environment_instance = Mock() + environment_mock.return_value = environment_instance + self.cli_context["confirm_changeset"] = False + bootstrap_cli(**self.cli_context) + environment_instance.bootstrap.assert_called_once_with(confirm_changeset=False) + environment_instance.bootstrap.reset_mock() + self.cli_context["confirm_changeset"] = True + bootstrap_cli(**self.cli_context) + environment_instance.bootstrap.assert_called_once_with(confirm_changeset=True) + + @patch("samcli.commands.pipeline.bootstrap.cli.SamConfig") + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + def test_load_saved_pipeline_user_arn_will_read_from_the_correct_file( + self, get_command_names_mock, sam_config_mock + ): + # setup + get_command_names_mock.return_value = PIPELINE_BOOTSTRAP_COMMAND_NAMES + sam_config_instance_mock = Mock() + sam_config_mock.return_value = sam_config_instance_mock + sam_config_instance_mock.exists.return_value = False + + # trigger + _load_saved_pipeline_user_arn() + + # verify + sam_config_mock.assert_called_once_with(config_dir=PIPELINE_CONFIG_DIR, filename=PIPELINE_CONFIG_FILENAME) + + @patch("samcli.commands.pipeline.bootstrap.cli.SamConfig") + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + def test_load_saved_pipeline_user_arn_will_return_non_if_the_pipeline_toml_file_is_not_found( + self, get_command_names_mock, sam_config_mock + ): + # setup + get_command_names_mock.return_value = PIPELINE_BOOTSTRAP_COMMAND_NAMES + sam_config_instance_mock = Mock() + sam_config_mock.return_value = sam_config_instance_mock + sam_config_instance_mock.exists.return_value = False + + # trigger + pipeline_user_arn = _load_saved_pipeline_user_arn() + + # verify + self.assertIsNone(pipeline_user_arn) + + @patch("samcli.commands.pipeline.bootstrap.cli.SamConfig") + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + def test_load_saved_pipeline_user_arn_will_return_non_if_the_pipeline_toml_file_does_not_contain_pipeline_user( + self, get_command_names_mock, sam_config_mock + ): + # setup + get_command_names_mock.return_value = PIPELINE_BOOTSTRAP_COMMAND_NAMES + sam_config_instance_mock = Mock() + sam_config_mock.return_value = sam_config_instance_mock + sam_config_instance_mock.exists.return_value = True + sam_config_instance_mock.get_all.return_value = {"non-pipeline_user-key": "any_value"} + + # trigger + pipeline_user_arn = _load_saved_pipeline_user_arn() + + # verify + self.assertIsNone(pipeline_user_arn) + + @patch("samcli.commands.pipeline.bootstrap.cli.SamConfig") + @patch("samcli.commands.pipeline.bootstrap.cli._get_bootstrap_command_names") + def test_load_saved_pipeline_user_arn_returns_the_pipeline_user_arn_from_the_pipeline_toml_file( + self, get_command_names_mock, sam_config_mock + ): + # setup + get_command_names_mock.return_value = PIPELINE_BOOTSTRAP_COMMAND_NAMES + sam_config_instance_mock = Mock() + sam_config_mock.return_value = sam_config_instance_mock + sam_config_instance_mock.exists.return_value = True + sam_config_instance_mock.get_all.return_value = {"pipeline_user": ANY_PIPELINE_USER_ARN} + + # trigger + pipeline_user_arn = _load_saved_pipeline_user_arn() + + # verify + self.assertEqual(pipeline_user_arn, ANY_PIPELINE_USER_ARN) diff --git a/tests/unit/commands/pipeline/bootstrap/test_guided_context.py b/tests/unit/commands/pipeline/bootstrap/test_guided_context.py new file mode 100644 index 0000000000..c4c11e9792 --- /dev/null +++ b/tests/unit/commands/pipeline/bootstrap/test_guided_context.py @@ -0,0 +1,231 @@ +from unittest import TestCase +from unittest.mock import patch, Mock, ANY + +from parameterized import parameterized + +from samcli.commands.pipeline.bootstrap.guided_context import GuidedContext + +ANY_STAGE_NAME = "ANY_STAGE_NAME" +ANY_PIPELINE_USER_ARN = "ANY_PIPELINE_USER_ARN" +ANY_PIPELINE_EXECUTION_ROLE_ARN = "ANY_PIPELINE_EXECUTION_ROLE_ARN" +ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN = "ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN" +ANY_ARTIFACTS_BUCKET_ARN = "ANY_ARTIFACTS_BUCKET_ARN" +ANY_IMAGE_REPOSITORY_ARN = "ANY_IMAGE_REPOSITORY_ARN" +ANY_ARN = "ANY_ARN" +ANY_REGION = "us-east-2" + + +class TestGuidedContext(TestCase): + @patch("samcli.commands.pipeline.bootstrap.guided_context.get_current_account_id") + @patch("samcli.commands.pipeline.bootstrap.guided_context.click") + @patch("samcli.commands.pipeline.bootstrap.guided_context.GuidedContext._prompt_account_id") + def test_guided_context_will_not_prompt_for_fields_that_are_already_provided( + self, prompt_account_id_mock, click_mock, account_id_mock + ): + account_id_mock.return_value = "1234567890" + click_mock.confirm.return_value = False + click_mock.prompt = Mock(return_value="0") + gc: GuidedContext = GuidedContext( + stage_name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + pipeline_execution_role_arn=ANY_PIPELINE_EXECUTION_ROLE_ARN, + cloudformation_execution_role_arn=ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + artifacts_bucket_arn=ANY_ARTIFACTS_BUCKET_ARN, + create_image_repository=True, + image_repository_arn=ANY_IMAGE_REPOSITORY_ARN, + region=ANY_REGION, + ) + gc.run() + # there should only two prompt to ask + # 1. which account to use (mocked in _prompt_account_id(), not contributing to count) + # 2. what values customers want to change + prompt_account_id_mock.assert_called_once() + click_mock.prompt.assert_called_once() + + @patch("samcli.commands.pipeline.bootstrap.guided_context.get_current_account_id") + @patch("samcli.commands.pipeline.bootstrap.guided_context.click") + @patch("samcli.commands.pipeline.bootstrap.guided_context.GuidedContext._prompt_account_id") + def test_guided_context_will_prompt_for_fields_that_are_not_provided( + self, prompt_account_id_mock, click_mock, account_id_mock + ): + account_id_mock.return_value = "1234567890" + click_mock.confirm.return_value = False + click_mock.prompt = Mock(return_value="0") + gc: GuidedContext = GuidedContext( + image_repository_arn=ANY_IMAGE_REPOSITORY_ARN # Exclude ECR repo, it has its own detailed test below + ) + gc.run() + prompt_account_id_mock.assert_called_once() + self.assertTrue(self.did_prompt_text_like("Stage Name", click_mock.prompt)) + self.assertTrue(self.did_prompt_text_like("Pipeline IAM user", click_mock.prompt)) + self.assertTrue(self.did_prompt_text_like("Pipeline execution role", click_mock.prompt)) + self.assertTrue(self.did_prompt_text_like("CloudFormation execution role", click_mock.prompt)) + self.assertTrue(self.did_prompt_text_like("Artifact bucket", click_mock.prompt)) + self.assertTrue(self.did_prompt_text_like("region", click_mock.prompt)) + + @patch("samcli.commands.pipeline.bootstrap.guided_context.get_current_account_id") + @patch("samcli.commands.pipeline.bootstrap.guided_context.click") + @patch("samcli.commands.pipeline.bootstrap.guided_context.GuidedContext._prompt_account_id") + def test_guided_context_will_not_prompt_for_not_provided_image_repository_if_no_image_repository_is_required( + self, prompt_account_id_mock, click_mock, account_id_mock + ): + account_id_mock.return_value = "1234567890" + # ECR Image Repository choices: + # 1 - No, My SAM Template won't include lambda functions of Image package-type + # 2 - Yes, I need a help creating one + # 3 - I already have an ECR image repository + gc_without_ecr_info: GuidedContext = GuidedContext( + stage_name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + pipeline_execution_role_arn=ANY_PIPELINE_EXECUTION_ROLE_ARN, + cloudformation_execution_role_arn=ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + artifacts_bucket_arn=ANY_ARTIFACTS_BUCKET_ARN, + ) + + self.assertIsNone(gc_without_ecr_info.image_repository_arn) + + click_mock.confirm.return_value = False # the user chose to not CREATE an ECR Image repository + click_mock.prompt.side_effect = [None, "0"] + gc_without_ecr_info.run() + self.assertIsNone(gc_without_ecr_info.image_repository_arn) + self.assertFalse(gc_without_ecr_info.create_image_repository) + self.assertFalse(self.did_prompt_text_like("Please enter the ECR image repository", click_mock.prompt)) + + click_mock.confirm.return_value = True # the user chose to CREATE an ECR Image repository + click_mock.prompt.side_effect = [None, None, "0"] + gc_without_ecr_info.run() + self.assertIsNone(gc_without_ecr_info.image_repository_arn) + self.assertTrue(gc_without_ecr_info.create_image_repository) + self.assertTrue(self.did_prompt_text_like("Please enter the ECR image repository", click_mock.prompt)) + + click_mock.confirm.return_value = True # the user already has a repo + click_mock.prompt.side_effect = [None, ANY_IMAGE_REPOSITORY_ARN, "0"] + gc_without_ecr_info.run() + self.assertFalse(gc_without_ecr_info.create_image_repository) + self.assertTrue( + self.did_prompt_text_like("Please enter the ECR image repository", click_mock.prompt) + ) # we've asked about it + self.assertEqual(gc_without_ecr_info.image_repository_arn, ANY_IMAGE_REPOSITORY_ARN) + + @staticmethod + def did_prompt_text_like(txt, click_prompt_mock): + txt = txt.lower() + for kall in click_prompt_mock.call_args_list: + args, kwargs = kall + if args: + text = args[0].lower() + else: + text = kwargs.get("text", "").lower() + if txt in text: + return True + return False + + +class TestGuidedContext_prompt_account_id(TestCase): + @patch("samcli.commands.pipeline.bootstrap.guided_context.get_current_account_id") + @patch("samcli.commands.pipeline.bootstrap.guided_context.click") + @patch("samcli.commands.pipeline.bootstrap.guided_context.os.getenv") + @patch("samcli.commands.pipeline.bootstrap.guided_context.list_available_profiles") + def test_prompt_account_id_can_display_profiles_and_environment( + self, list_available_profiles_mock, getenv_mock, click_mock, get_current_account_id_mock + ): + getenv_mock.return_value = "not None" + list_available_profiles_mock.return_value = ["profile1", "profile2"] + click_mock.prompt.return_value = "1" # select environment variable + get_current_account_id_mock.return_value = "account_id" + + guided_context_mock = Mock() + GuidedContext._prompt_account_id(guided_context_mock) + + click_mock.prompt.assert_called_once_with( + ANY, show_choices=False, show_default=False, type=click_mock.Choice(["1", "2", "3", "q"]) + ) + + @patch("samcli.commands.pipeline.bootstrap.guided_context.get_current_account_id") + @patch("samcli.commands.pipeline.bootstrap.guided_context.click") + @patch("samcli.commands.pipeline.bootstrap.guided_context.os.getenv") + @patch("samcli.commands.pipeline.bootstrap.guided_context.list_available_profiles") + def test_prompt_account_id_wont_show_environment_option_when_it_doesnt_exist( + self, list_available_profiles_mock, getenv_mock, click_mock, get_current_account_id_mock + ): + getenv_mock.return_value = None + list_available_profiles_mock.return_value = ["profile1", "profile2"] + click_mock.prompt.return_value = "1" # select environment variable + get_current_account_id_mock.return_value = "account_id" + + guided_context_mock = Mock() + GuidedContext._prompt_account_id(guided_context_mock) + + click_mock.prompt.assert_called_once_with( + ANY, show_choices=False, show_default=False, type=click_mock.Choice(["2", "3", "q"]) + ) + + @patch("samcli.commands.pipeline.bootstrap.guided_context.get_current_account_id") + @patch("samcli.commands.pipeline.bootstrap.guided_context.click") + @patch("samcli.commands.pipeline.bootstrap.guided_context.os.getenv") + @patch("samcli.commands.pipeline.bootstrap.guided_context.list_available_profiles") + def test_prompt_account_id_select_environment_unset_self_profile( + self, list_available_profiles_mock, getenv_mock, click_mock, get_current_account_id_mock + ): + getenv_mock.return_value = "not None" + list_available_profiles_mock.return_value = ["profile1", "profile2"] + click_mock.prompt.return_value = "1" # select environment variable + get_current_account_id_mock.return_value = "account_id" + + guided_context_mock = Mock() + GuidedContext._prompt_account_id(guided_context_mock) + + self.assertEquals(None, guided_context_mock.profile) + + @parameterized.expand( + [ + ( + "2", + "profile1", + ), + ( + "3", + "profile2", + ), + ] + ) + @patch("samcli.commands.pipeline.bootstrap.guided_context.get_current_account_id") + @patch("samcli.commands.pipeline.bootstrap.guided_context.click") + @patch("samcli.commands.pipeline.bootstrap.guided_context.os.getenv") + @patch("samcli.commands.pipeline.bootstrap.guided_context.list_available_profiles") + def test_prompt_account_id_select_profile_set_profile_to_its_name( + self, + profile_selection, + expected_profile, + list_available_profiles_mock, + getenv_mock, + click_mock, + get_current_account_id_mock, + ): + getenv_mock.return_value = "not None" + list_available_profiles_mock.return_value = ["profile1", "profile2"] + click_mock.prompt.return_value = profile_selection + get_current_account_id_mock.return_value = "account_id" + + guided_context_mock = Mock() + GuidedContext._prompt_account_id(guided_context_mock) + + self.assertEquals(expected_profile, guided_context_mock.profile) + + @patch("samcli.commands.pipeline.bootstrap.guided_context.sys.exit") + @patch("samcli.commands.pipeline.bootstrap.guided_context.get_current_account_id") + @patch("samcli.commands.pipeline.bootstrap.guided_context.click") + @patch("samcli.commands.pipeline.bootstrap.guided_context.os.getenv") + @patch("samcli.commands.pipeline.bootstrap.guided_context.list_available_profiles") + def test_prompt_account_id_select_quit( + self, list_available_profiles_mock, getenv_mock, click_mock, get_current_account_id_mock, exit_mock + ): + getenv_mock.return_value = "not None" + list_available_profiles_mock.return_value = ["profile1", "profile2"] + click_mock.prompt.return_value = "q" # quit + get_current_account_id_mock.return_value = "account_id" + + guided_context_mock = Mock() + GuidedContext._prompt_account_id(guided_context_mock) + + exit_mock.assert_called_once_with(0) diff --git a/tests/unit/commands/pipeline/init/__init__.py b/tests/unit/commands/pipeline/init/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/commands/pipeline/init/test_cli.py b/tests/unit/commands/pipeline/init/test_cli.py new file mode 100644 index 0000000000..2e7cd0699b --- /dev/null +++ b/tests/unit/commands/pipeline/init/test_cli.py @@ -0,0 +1,22 @@ +from unittest import TestCase +from unittest.mock import patch + +from click.testing import CliRunner + +from samcli.commands.pipeline.init.cli import cli as init_cmd +from samcli.commands.pipeline.init.cli import do_cli as init_cli + + +class TestCli(TestCase): + @patch("samcli.commands.pipeline.init.cli.do_cli") + def test_cli_default_flow(self, do_cli_mock): + runner: CliRunner = CliRunner() + runner.invoke(init_cmd) + # Currently we support the interactive mode only, i.e. we don't accept any command arguments, + # instead we ask the user about the required arguments in an interactive way + do_cli_mock.assert_called_once_with(False) # Called without arguments + + @patch("samcli.commands.pipeline.init.cli.InteractiveInitFlow.do_interactive") + def test_do_cli(self, do_interactive_mock): + init_cli(False) + do_interactive_mock.assert_called_once_with() # Called without arguments diff --git a/tests/unit/commands/pipeline/init/test_initeractive_init_flow.py b/tests/unit/commands/pipeline/init/test_initeractive_init_flow.py new file mode 100644 index 0000000000..2cdaacc91e --- /dev/null +++ b/tests/unit/commands/pipeline/init/test_initeractive_init_flow.py @@ -0,0 +1,566 @@ +import json +import shutil +import tempfile +from unittest import TestCase +from unittest.mock import patch, Mock, call +import os +from pathlib import Path + +from parameterized import parameterized + +from samcli.commands.exceptions import AppPipelineTemplateMetadataException +from samcli.commands.pipeline.init.interactive_init_flow import ( + InteractiveInitFlow, + PipelineTemplateCloneException, + APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME, + shared_path, + CUSTOM_PIPELINE_TEMPLATE_REPO_LOCAL_NAME, + _prompt_cicd_provider, + _prompt_provider_pipeline_template, + _get_pipeline_template_metadata, + _copy_dir_contents_to_cwd, +) +from samcli.commands.pipeline.init.pipeline_templates_manifest import AppPipelineTemplateManifestException +from samcli.lib.utils.git_repo import CloneRepoException +from samcli.lib.cookiecutter.interactive_flow_creator import QuestionsNotFoundException + + +class TestInteractiveInitFlow(TestCase): + @patch("samcli.commands.pipeline.init.interactive_init_flow._read_app_pipeline_templates_manifest") + @patch("samcli.commands.pipeline.init.interactive_init_flow._prompt_pipeline_template") + @patch("samcli.commands.pipeline.init.interactive_init_flow.InteractiveInitFlow._generate_from_pipeline_template") + @patch("samcli.commands.pipeline.init.interactive_init_flow.shared_path") + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.lib.cookiecutter.question.click") + def test_app_pipeline_templates_clone_fail_when_an_old_clone_exists( + self, + click_mock, + clone_mock, + shared_path_mock, + generate_from_pipeline_template_mock, + select_pipeline_template_mock, + read_app_pipeline_templates_manifest_mock, + ): + # setup + clone_mock.side_effect = CloneRepoException # clone fail + app_pipeline_templates_path_mock = Mock() + selected_pipeline_template_path_mock = Mock() + pipeline_templates_manifest_mock = Mock() + shared_path_mock.joinpath.return_value = app_pipeline_templates_path_mock + app_pipeline_templates_path_mock.exists.return_value = True # An old clone exists + app_pipeline_templates_path_mock.joinpath.return_value = selected_pipeline_template_path_mock + read_app_pipeline_templates_manifest_mock.return_value = pipeline_templates_manifest_mock + click_mock.prompt.return_value = "1" # App pipeline templates + + # trigger + InteractiveInitFlow(allow_bootstrap=False).do_interactive() + + # verify + clone_mock.assert_called_once_with( + shared_path_mock, APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME, replace_existing=True + ) + app_pipeline_templates_path_mock.exists.assert_called_once() + read_app_pipeline_templates_manifest_mock.assert_called_once_with(app_pipeline_templates_path_mock) + select_pipeline_template_mock.assert_called_once_with(pipeline_templates_manifest_mock) + generate_from_pipeline_template_mock.assert_called_once_with(selected_pipeline_template_path_mock) + + @patch("samcli.commands.pipeline.init.interactive_init_flow.shared_path") + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.lib.cookiecutter.question.click") + def test_app_pipeline_templates_clone_fail_when_no_old_clone_exist(self, click_mock, clone_mock, shared_path_mock): + # setup + clone_mock.side_effect = CloneRepoException # clone fail + app_pipeline_templates_path_mock = Mock() + shared_path_mock.joinpath.return_value = app_pipeline_templates_path_mock + app_pipeline_templates_path_mock.exists.return_value = False # No old clone exists + click_mock.prompt.return_value = "1" # App pipeline templates + + # trigger + with self.assertRaises(PipelineTemplateCloneException): + InteractiveInitFlow(allow_bootstrap=False).do_interactive() + + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.commands.pipeline.init.interactive_init_flow.click") + @patch("samcli.lib.cookiecutter.question.click") + def test_custom_pipeline_template_clone_fail(self, question_click_mock, init_click_mock, clone_mock): + # setup + clone_mock.side_effect = CloneRepoException # clone fail + question_click_mock.prompt.return_value = "2" # Custom pipeline templates + init_click_mock.prompt.return_value = ( + "https://github.com/any-custom-pipeline-template-repo.git" # Custom pipeline template repo URL + ) + + # trigger + with self.assertRaises(PipelineTemplateCloneException): + InteractiveInitFlow(allow_bootstrap=False).do_interactive() + + @patch("samcli.commands.pipeline.init.interactive_init_flow._read_app_pipeline_templates_manifest") + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.lib.cookiecutter.question.click") + def test_app_pipeline_templates_with_invalid_manifest( + self, click_mock, clone_mock, read_app_pipeline_templates_manifest_mock + ): + # setup + app_pipeline_templates_path_mock = Mock() + clone_mock.return_value = app_pipeline_templates_path_mock + read_app_pipeline_templates_manifest_mock.side_effect = AppPipelineTemplateManifestException("") + click_mock.prompt.return_value = "1" # App pipeline templates + + # trigger + with self.assertRaises(AppPipelineTemplateManifestException): + InteractiveInitFlow(allow_bootstrap=False).do_interactive() + + @patch("samcli.commands.pipeline.init.interactive_init_flow.SamConfig") + @patch("samcli.commands.pipeline.init.interactive_init_flow.osutils") + @patch("samcli.lib.cookiecutter.template.cookiecutter") + @patch("samcli.commands.pipeline.init.interactive_init_flow.InteractiveFlowCreator.create_flow") + @patch("samcli.commands.pipeline.init.interactive_init_flow.PipelineTemplatesManifest") + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.commands.pipeline.init.interactive_init_flow._copy_dir_contents_to_cwd") + @patch("samcli.commands.pipeline.init.interactive_init_flow._get_pipeline_template_metadata") + @patch("samcli.lib.cookiecutter.question.click") + def test_generate_pipeline_configuration_file_from_app_pipeline_template_happy_case( + self, + click_mock, + _get_pipeline_template_metadata_mock, + _copy_dir_contents_to_cwd_mock, + clone_mock, + PipelineTemplatesManifest_mock, + create_interactive_flow_mock, + cookiecutter_mock, + osutils_mock, + samconfig_mock, + ): + # setup + any_app_pipeline_templates_path = Path( + os.path.normpath(shared_path.joinpath(APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME)) + ) + clone_mock.return_value = any_app_pipeline_templates_path + jenkins_template_location = "some/location" + jenkins_template_mock = Mock( + display_name="Jenkins pipeline template", location=jenkins_template_location, provider="jenkins" + ) + pipeline_templates_manifest_mock = Mock( + providers=[ + Mock(id="gitlab", display_name="Gitlab"), + Mock(id="jenkins", display_name="Jenkins"), + ], + templates=[jenkins_template_mock], + ) + PipelineTemplatesManifest_mock.return_value = pipeline_templates_manifest_mock + cookiecutter_output_dir_mock = "/tmp/any/dir2" + osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=cookiecutter_output_dir_mock) + interactive_flow_mock = Mock() + create_interactive_flow_mock.return_value = interactive_flow_mock + cookiecutter_context_mock = {"key": "value"} + interactive_flow_mock.run.return_value = cookiecutter_context_mock + config_file = Mock() + samconfig_mock.return_value = config_file + config_file.exists.return_value = True + config_file.get_stage_names.return_value = ["testing", "prod"] + config_file.get_stage_names.return_value = ["testing", "prod"] + config_file.get_all.return_value = {"pipeline_execution_role": "arn:aws:iam::123456789012:role/execution-role"} + + click_mock.prompt.side_effect = [ + "1", # App pipeline templates + "2", # choose "Jenkins" when prompt for CI/CD system. (See pipeline_templates_manifest_mock, Jenkins is the 2nd provider) + "1", # choose "Jenkins pipeline template" when prompt for pipeline template + ] + _get_pipeline_template_metadata_mock.return_value = {"number_of_stages": 2} + + # trigger + InteractiveInitFlow(allow_bootstrap=False).do_interactive() + + # verify + osutils_mock.mkdir_temp.assert_called() # cookiecutter project is generated to temp + expected_cookicutter_template_location = any_app_pipeline_templates_path.joinpath(jenkins_template_location) + clone_mock.assert_called_once_with(shared_path, APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME, replace_existing=True) + PipelineTemplatesManifest_mock.assert_called_once() + create_interactive_flow_mock.assert_called_once_with( + str(expected_cookicutter_template_location.joinpath("questions.json")) + ) + interactive_flow_mock.run.assert_called_once_with( + { + str(["testing", "pipeline_execution_role"]): "arn:aws:iam::123456789012:role/execution-role", + str(["1", "pipeline_execution_role"]): "arn:aws:iam::123456789012:role/execution-role", + str(["prod", "pipeline_execution_role"]): "arn:aws:iam::123456789012:role/execution-role", + str(["2", "pipeline_execution_role"]): "arn:aws:iam::123456789012:role/execution-role", + str(["stage_names_message"]): "Here are the stage names detected " + f'in {os.path.join(".aws-sam", "pipeline", "pipelineconfig.toml")}:\n\t1 - testing\n\t2 - prod', + } + ) + cookiecutter_mock.assert_called_once_with( + template=str(expected_cookicutter_template_location), + output_dir=cookiecutter_output_dir_mock, + no_input=True, + extra_context=cookiecutter_context_mock, + overwrite_if_exists=True, + ) + + @patch("samcli.commands.pipeline.init.interactive_init_flow._read_app_pipeline_templates_manifest") + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.lib.cookiecutter.question.click") + def test_generate_pipeline_configuration_file_when_pipeline_template_missing_questions_file( + self, click_mock, clone_mock, read_app_pipeline_templates_manifest_mock + ): + # setup + any_app_pipeline_templates_path = shared_path.joinpath(APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME) + clone_mock.return_value = any_app_pipeline_templates_path + jenkins_template_location = "some/location" + jenkins_template_mock = Mock( + display_name="Jenkins pipeline template", location=jenkins_template_location, provider="jenkins" + ) + pipeline_templates_manifest_mock = Mock( + providers=[ + Mock(id="gitlab", display_name="Gitlab"), + Mock(id="jenkins", display_name="Jenkins"), + ], + templates=[jenkins_template_mock], + ) + read_app_pipeline_templates_manifest_mock.return_value = pipeline_templates_manifest_mock + + click_mock.prompt.side_effect = [ + "1", # App pipeline templates + "2", # choose "Jenkins" when prompt for CI/CD system. (See pipeline_templates_manifest_mock, Jenkins is the 2nd provider) + "1", # choose "Jenkins pipeline template" when prompt for pipeline template + ] + + # trigger + with self.assertRaises(QuestionsNotFoundException): + InteractiveInitFlow(allow_bootstrap=False).do_interactive() + + @patch("samcli.commands.pipeline.init.interactive_init_flow.os") + @patch("samcli.commands.pipeline.init.interactive_init_flow.osutils") + @patch("samcli.commands.pipeline.init.interactive_init_flow.InteractiveInitFlow._generate_from_pipeline_template") + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.commands.pipeline.init.interactive_init_flow.click") + @patch("samcli.lib.cookiecutter.question.click") + def test_generate_pipeline_configuration_file_from_custom_local_existing_path_will_not_do_git_clone( + self, + questions_click_mock, + init_click_mock, + clone_mock, + generate_from_pipeline_template_mock, + osutils_mock, + os_mock, + ): + # setup + local_pipeline_templates_path = "/any/existing/local/path" + os_mock.path.exists.return_value = True + questions_click_mock.prompt.return_value = "2" # Custom pipeline templates + init_click_mock.prompt.return_value = local_pipeline_templates_path # git repo path + # trigger + InteractiveInitFlow(allow_bootstrap=False).do_interactive() + + # verify + osutils_mock.mkdir_temp.assert_not_called() + clone_mock.assert_not_called() + generate_from_pipeline_template_mock.assert_called_once_with(Path(local_pipeline_templates_path)) + + @patch("samcli.commands.pipeline.init.interactive_init_flow.osutils") + @patch("samcli.lib.cookiecutter.template.cookiecutter") + @patch("samcli.commands.pipeline.init.interactive_init_flow.InteractiveFlowCreator.create_flow") + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.commands.pipeline.init.interactive_init_flow.click") + @patch("samcli.commands.pipeline.init.interactive_init_flow._copy_dir_contents_to_cwd") + @patch("samcli.commands.pipeline.init.interactive_init_flow._get_pipeline_template_metadata") + @patch("samcli.lib.cookiecutter.question.click") + def test_generate_pipeline_configuration_file_from_custom_remote_pipeline_template_happy_case( + self, + questions_click_mock, + _get_pipeline_template_metadata_mock, + _copy_dir_contents_to_cwd_mock, + init_click_mock, + clone_mock, + create_interactive_flow_mock, + cookiecutter_mock, + osutils_mock, + ): + # setup + any_temp_dir = "/tmp/any/dir" + cookiecutter_output_dir_mock = "/tmp/any/dir2" + osutils_mock.mkdir_temp.return_value.__enter__ = Mock(side_effect=[any_temp_dir, cookiecutter_output_dir_mock]) + osutils_mock.mkdir_temp.return_value.__exit__ = Mock() + any_custom_pipeline_templates_path = Path(os.path.join(any_temp_dir, CUSTOM_PIPELINE_TEMPLATE_REPO_LOCAL_NAME)) + clone_mock.return_value = any_custom_pipeline_templates_path + interactive_flow_mock = Mock() + create_interactive_flow_mock.return_value = interactive_flow_mock + cookiecutter_context_mock = {"key": "value"} + interactive_flow_mock.run.return_value = cookiecutter_context_mock + _copy_dir_contents_to_cwd_mock.return_value = ["file1"] + + questions_click_mock.prompt.return_value = "2" # Custom pipeline templates + init_click_mock.prompt.return_value = "https://github.com/any-custom-pipeline-template-repo.git" + _get_pipeline_template_metadata_mock.return_value = {"number_of_stages": 2} + + # trigger + InteractiveInitFlow(allow_bootstrap=False).do_interactive() + + # verify + # Custom templates are cloned to temp; cookiecutter project is generated to temp + osutils_mock.mkdir_temp.assert_called() + clone_mock.assert_called_once_with( + Path(any_temp_dir), CUSTOM_PIPELINE_TEMPLATE_REPO_LOCAL_NAME, replace_existing=True + ) + create_interactive_flow_mock.assert_called_once_with( + str(any_custom_pipeline_templates_path.joinpath("questions.json")) + ) + interactive_flow_mock.run.assert_called_once() + cookiecutter_mock.assert_called_once_with( + template=str(any_custom_pipeline_templates_path), + output_dir=cookiecutter_output_dir_mock, + no_input=True, + extra_context=cookiecutter_context_mock, + overwrite_if_exists=True, + ) + + @patch("samcli.lib.cookiecutter.question.click") + def test_prompt_cicd_provider_will_not_prompt_if_the_list_of_providers_has_only_one_provider(self, click_mock): + gitlab_provider = Mock(id="gitlab", display_name="Gitlab CI/CD") + providers = [gitlab_provider] + + chosen_provider = _prompt_cicd_provider(providers) + click_mock.prompt.assert_not_called() + self.assertEqual(chosen_provider, gitlab_provider) + + jenkins_provider = Mock(id="jenkins", display_name="Jenkins") + providers.append(jenkins_provider) + click_mock.prompt.return_value = "2" + chosen_provider = _prompt_cicd_provider(providers) + click_mock.prompt.assert_called_once() + self.assertEqual(chosen_provider, jenkins_provider) + + @patch("samcli.lib.cookiecutter.question.click") + def test_prompt_provider_pipeline_template_will_not_prompt_if_the_list_of_templatess_has_only_one_provider( + self, click_mock + ): + template1 = Mock(display_name="anyName1", location="anyLocation1", provider="a provider") + template2 = Mock(display_name="anyName2", location="anyLocation2", provider="a provider") + templates = [template1] + + chosen_template = _prompt_provider_pipeline_template(templates) + click_mock.prompt.assert_not_called() + self.assertEqual(chosen_template, template1) + + templates.append(template2) + click_mock.prompt.return_value = "2" + chosen_template = _prompt_provider_pipeline_template(templates) + click_mock.prompt.assert_called_once() + self.assertEqual(chosen_template, template2) + + def test_get_pipeline_template_metadata_can_load(self): + with tempfile.TemporaryDirectory() as dir: + metadata = {"number_of_stages": 2} + with open(Path(dir, "metadata.json"), "w") as f: + json.dump(metadata, f) + self.assertEquals(metadata, _get_pipeline_template_metadata(dir)) + + def test_get_pipeline_template_metadata_not_exist(self): + with tempfile.TemporaryDirectory() as dir: + with self.assertRaises(AppPipelineTemplateMetadataException): + _get_pipeline_template_metadata(dir) + + @parameterized.expand( + [ + ('["not_a_dict"]',), + ("not a json"), + ] + ) + def test_get_pipeline_template_metadata_not_valid(self, metadata_str): + with tempfile.TemporaryDirectory() as dir: + with open(Path(dir, "metadata.json"), "w") as f: + f.write(metadata_str) + with self.assertRaises(AppPipelineTemplateMetadataException): + _get_pipeline_template_metadata(dir) + + +class TestInteractiveInitFlowWithBootstrap(TestCase): + @patch("samcli.commands.pipeline.init.interactive_init_flow.SamConfig") + @patch("samcli.commands.pipeline.init.interactive_init_flow.osutils") + @patch("samcli.lib.cookiecutter.template.cookiecutter") + @patch("samcli.commands.pipeline.init.interactive_init_flow.InteractiveFlowCreator.create_flow") + @patch( + "samcli.commands.pipeline.init.interactive_init_flow.InteractiveInitFlow._prompt_run_bootstrap_within_pipeline_init" + ) + @patch("samcli.commands.pipeline.init.interactive_init_flow.PipelineTemplatesManifest") + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.commands.pipeline.init.interactive_init_flow._copy_dir_contents_to_cwd") + @patch("samcli.commands.pipeline.init.interactive_init_flow._get_pipeline_template_metadata") + @patch("samcli.lib.cookiecutter.question.click") + def test_with_bootstrap_but_answer_no( + self, + click_mock, + _get_pipeline_template_metadata_mock, + _copy_dir_contents_to_cwd_mock, + clone_mock, + PipelineTemplatesManifest_mock, + _prompt_run_bootstrap_within_pipeline_init_mock, + create_interactive_flow_mock, + cookiecutter_mock, + osutils_mock, + samconfig_mock, + ): + # setup + any_app_pipeline_templates_path = Path( + os.path.normpath(shared_path.joinpath(APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME)) + ) + clone_mock.return_value = any_app_pipeline_templates_path + jenkins_template_location = "some/location" + jenkins_template_mock = Mock( + display_name="Jenkins pipeline template", location=jenkins_template_location, provider="jenkins" + ) + pipeline_templates_manifest_mock = Mock( + providers=[ + Mock(id="gitlab", display_name="Gitlab"), + Mock(id="jenkins", display_name="Jenkins"), + ], + templates=[jenkins_template_mock], + ) + PipelineTemplatesManifest_mock.return_value = pipeline_templates_manifest_mock + cookiecutter_output_dir_mock = "/tmp/any/dir2" + osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=cookiecutter_output_dir_mock) + interactive_flow_mock = Mock() + create_interactive_flow_mock.return_value = interactive_flow_mock + cookiecutter_context_mock = {"key": "value"} + interactive_flow_mock.run.return_value = cookiecutter_context_mock + config_file = Mock() + samconfig_mock.return_value = config_file + config_file.exists.return_value = True + config_file.get_stage_names.return_value = ["testing"] + config_file.get_all.return_value = {"pipeline_execution_role": "arn:aws:iam::123456789012:role/execution-role"} + _get_pipeline_template_metadata_mock.return_value = {"number_of_stages": 2} + + click_mock.prompt.side_effect = [ + "1", # App pipeline templates + "2", + # choose "Jenkins" when prompt for CI/CD system. (See pipeline_templates_manifest_mock, Jenkins is the 2nd provider) + "1", # choose "Jenkins pipeline template" when prompt for pipeline template + ] + + _prompt_run_bootstrap_within_pipeline_init_mock.return_value = False # not to bootstrap + + # trigger + InteractiveInitFlow(allow_bootstrap=True).do_interactive() + + # verify + _prompt_run_bootstrap_within_pipeline_init_mock.assert_called_once_with(["testing"], 2) + + @parameterized.expand( + [ + ([["testing"], ["testing", "prod"]], [call(["testing"], 2)]), + ([[], ["testing"], ["testing", "prod"]], [call([], 2), call(["testing"], 2)]), + ] + ) + @patch("samcli.commands.pipeline.init.interactive_init_flow.SamConfig") + @patch("samcli.commands.pipeline.init.interactive_init_flow.osutils") + @patch("samcli.lib.cookiecutter.template.cookiecutter") + @patch("samcli.commands.pipeline.init.interactive_init_flow.InteractiveFlowCreator.create_flow") + @patch( + "samcli.commands.pipeline.init.interactive_init_flow.InteractiveInitFlow._prompt_run_bootstrap_within_pipeline_init" + ) + @patch("samcli.commands.pipeline.init.interactive_init_flow.PipelineTemplatesManifest") + @patch("samcli.commands.pipeline.init.interactive_init_flow.GitRepo.clone") + @patch("samcli.commands.pipeline.init.interactive_init_flow._copy_dir_contents_to_cwd") + @patch("samcli.commands.pipeline.init.interactive_init_flow._get_pipeline_template_metadata") + @patch("samcli.lib.cookiecutter.question.click") + def test_with_bootstrap_answer_yes( + self, + get_stage_name_side_effects, + _prompt_run_bootstrap_expected_calls, + click_mock, + _get_pipeline_template_metadata_mock, + _copy_dir_contents_to_cwd_mock, + clone_mock, + PipelineTemplatesManifest_mock, + _prompt_run_bootstrap_within_pipeline_init_mock, + create_interactive_flow_mock, + cookiecutter_mock, + osutils_mock, + samconfig_mock, + ): + # setup + any_app_pipeline_templates_path = Path( + os.path.normpath(shared_path.joinpath(APP_PIPELINE_TEMPLATES_REPO_LOCAL_NAME)) + ) + clone_mock.return_value = any_app_pipeline_templates_path + jenkins_template_location = "some/location" + jenkins_template_mock = Mock( + display_name="Jenkins pipeline template", location=jenkins_template_location, provider="jenkins" + ) + pipeline_templates_manifest_mock = Mock( + providers=[ + Mock(id="gitlab", display_name="Gitlab"), + Mock(id="jenkins", display_name="Jenkins"), + ], + templates=[jenkins_template_mock], + ) + PipelineTemplatesManifest_mock.return_value = pipeline_templates_manifest_mock + cookiecutter_output_dir_mock = "/tmp/any/dir2" + osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=cookiecutter_output_dir_mock) + interactive_flow_mock = Mock() + create_interactive_flow_mock.return_value = interactive_flow_mock + cookiecutter_context_mock = {"key": "value"} + interactive_flow_mock.run.return_value = cookiecutter_context_mock + config_file = Mock() + samconfig_mock.return_value = config_file + config_file.exists.return_value = True + config_file.get_stage_names.side_effect = get_stage_name_side_effects + config_file.get_all.return_value = {"pipeline_execution_role": "arn:aws:iam::123456789012:role/execution-role"} + _get_pipeline_template_metadata_mock.return_value = {"number_of_stages": 2} + + click_mock.prompt.side_effect = [ + "1", # App pipeline templates + "2", + # choose "Jenkins" when prompt for CI/CD system. (See pipeline_templates_manifest_mock, Jenkins is the 2nd provider) + "1", # choose "Jenkins pipeline template" when prompt for pipeline template + ] + + _prompt_run_bootstrap_within_pipeline_init_mock.return_value = True # to bootstrap + + # trigger + InteractiveInitFlow(allow_bootstrap=True).do_interactive() + + # verify + _prompt_run_bootstrap_within_pipeline_init_mock.assert_has_calls(_prompt_run_bootstrap_expected_calls) + + +class TestInteractiveInitFlow_copy_dir_contents_to_cwd(TestCase): + def tearDown(self) -> None: + if Path("file").exists(): + Path("file").unlink() + shutil.rmtree(os.path.join(".aws-sam", "pipeline"), ignore_errors=True) + + @patch("samcli.commands.pipeline.init.interactive_init_flow.click.confirm") + def test_copy_dir_contents_to_cwd_no_need_override(self, confirm_mock): + with tempfile.TemporaryDirectory() as source: + confirm_mock.return_value = True + Path(source, "file").touch() + Path(source, "file").write_text("hi") + file_paths = _copy_dir_contents_to_cwd(source) + confirm_mock.assert_not_called() + self.assertEqual("hi", Path("file").read_text(encoding="utf-8")) + self.assertEqual([str(Path(".", "file"))], file_paths) + + @patch("samcli.commands.pipeline.init.interactive_init_flow.click.confirm") + def test_copy_dir_contents_to_cwd_override(self, confirm_mock): + with tempfile.TemporaryDirectory() as source: + confirm_mock.return_value = True + Path(source, "file").touch() + Path(source, "file").write_text("hi") + Path("file").touch() + file_paths = _copy_dir_contents_to_cwd(source) + confirm_mock.assert_called_once() + self.assertEqual("hi", Path("file").read_text(encoding="utf-8")) + self.assertEqual([str(Path(".", "file"))], file_paths) + + @patch("samcli.commands.pipeline.init.interactive_init_flow.click.confirm") + def test_copy_dir_contents_to_cwd_not_override(self, confirm_mock): + with tempfile.TemporaryDirectory() as source: + confirm_mock.return_value = False + Path(source, "file").touch() + Path(source, "file").write_text("hi") + Path("file").touch() + file_paths = _copy_dir_contents_to_cwd(source) + confirm_mock.assert_called_once() + self.assertEqual("", Path("file").read_text(encoding="utf-8")) + self.assertEqual([str(Path(".aws-sam", "pipeline", "generated-files", "file"))], file_paths) diff --git a/tests/unit/commands/pipeline/init/test_pipeline_templates_manifest.py b/tests/unit/commands/pipeline/init/test_pipeline_templates_manifest.py new file mode 100644 index 0000000000..d35541c3f6 --- /dev/null +++ b/tests/unit/commands/pipeline/init/test_pipeline_templates_manifest.py @@ -0,0 +1,82 @@ +from unittest import TestCase +import os +from pathlib import Path +from samcli.commands.pipeline.init.pipeline_templates_manifest import ( + Provider, + PipelineTemplatesManifest, + PipelineTemplateMetadata, + AppPipelineTemplateManifestException, +) +from samcli.lib.utils import osutils + +INVALID_YAML_MANIFEST = """ +providers: +- Jenkins with wrong identation +""" + +MISSING_KEYS_MANIFEST = """ +NotProviders: + - Jenkins +Templates: + - NotName: jenkins-two-environments-pipeline + provider: Jenkins + location: templates/cookiecutter-jenkins-two-environments-pipeline +""" + +VALID_MANIFEST = """ +providers: + - displayName: Jenkins + id: jenkins + - displayName: Gitlab CI/CD + id: gitlab + - displayName: Github Actions + id: github-actions +templates: + - displayName: jenkins-two-environments-pipeline + provider: jenkins + location: templates/cookiecutter-jenkins-two-environments-pipeline + - displayName: gitlab-two-environments-pipeline + provider: gitlab + location: templates/cookiecutter-gitlab-two-environments-pipeline + - displayName: Github-Actions-two-environments-pipeline + provider: github-actions + location: templates/cookiecutter-github-actions-two-environments-pipeline +""" + + +class TestCli(TestCase): + def test_manifest_file_not_found(self): + non_existing_path = Path(os.path.normpath("/any/non/existing/manifest.yaml")) + with self.assertRaises(AppPipelineTemplateManifestException): + PipelineTemplatesManifest(manifest_path=non_existing_path) + + def test_invalid_yaml_manifest_file(self): + with osutils.mkdir_temp(ignore_errors=True) as tempdir: + manifest_path = os.path.normpath(os.path.join(tempdir, "manifest.yaml")) + with open(manifest_path, "w", encoding="utf-8") as fp: + fp.write(INVALID_YAML_MANIFEST) + with self.assertRaises(AppPipelineTemplateManifestException): + PipelineTemplatesManifest(manifest_path=Path(manifest_path)) + + def test_manifest_missing_required_keys(self): + with osutils.mkdir_temp(ignore_errors=True) as tempdir: + manifest_path = os.path.normpath(os.path.join(tempdir, "manifest.yaml")) + with open(manifest_path, "w", encoding="utf-8") as fp: + fp.write(MISSING_KEYS_MANIFEST) + with self.assertRaises(AppPipelineTemplateManifestException): + PipelineTemplatesManifest(manifest_path=Path(manifest_path)) + + def test_manifest_happy_case(self): + with osutils.mkdir_temp(ignore_errors=True) as tempdir: + manifest_path = os.path.normpath(os.path.join(tempdir, "manifest.yaml")) + with open(manifest_path, "w", encoding="utf-8") as fp: + fp.write(VALID_MANIFEST) + manifest = PipelineTemplatesManifest(manifest_path=Path(manifest_path)) + self.assertEquals(len(manifest.providers), 3) + gitlab_provider: Provider = next(p for p in manifest.providers if p.id == "gitlab") + self.assertEquals(gitlab_provider.display_name, "Gitlab CI/CD") + self.assertEquals(len(manifest.templates), 3) + gitlab_template: PipelineTemplateMetadata = next(t for t in manifest.templates if t.provider == "gitlab") + self.assertEquals(gitlab_template.display_name, "gitlab-two-environments-pipeline") + self.assertEquals(gitlab_template.provider, "gitlab") + self.assertEquals(gitlab_template.location, "templates/cookiecutter-gitlab-two-environments-pipeline") diff --git a/tests/unit/lib/bootstrap/test_bootstrap.py b/tests/unit/lib/bootstrap/test_bootstrap.py index 8094a404c0..e62ad26a5c 100644 --- a/tests/unit/lib/bootstrap/test_bootstrap.py +++ b/tests/unit/lib/bootstrap/test_bootstrap.py @@ -1,23 +1,45 @@ from unittest import TestCase -from unittest.mock import patch +from unittest.mock import patch, MagicMock -from samcli.commands.exceptions import UserException -from samcli.lib.bootstrap.bootstrap import manage_stack +from samcli.commands.exceptions import UserException, CredentialsError +from samcli.lib.bootstrap.bootstrap import manage_stack, StackOutput, get_current_account_id class TestBootstrapManagedStack(TestCase): @patch("samcli.lib.bootstrap.bootstrap.manage_cloudformation_stack") def test_stack_missing_bucket(self, manage_cfn_stack_mock): - manage_cfn_stack_mock.return_value = [] + manage_cfn_stack_mock.return_value = StackOutput(stack_output=[]) with self.assertRaises(UserException): manage_stack("testProfile", "fakeRegion") - manage_cfn_stack_mock.return_value = [{"OutputKey": "NotSourceBucket", "OutputValue": "AnyValue"}] + manage_cfn_stack_mock.return_value = StackOutput( + stack_output=[{"OutputKey": "NotSourceBucket", "OutputValue": "AnyValue"}] + ) with self.assertRaises(UserException): manage_stack("testProfile", "fakeRegion") @patch("samcli.lib.bootstrap.bootstrap.manage_cloudformation_stack") def test_manage_stack_happy_case(self, manage_cfn_stack_mock): expected_bucket_name = "BucketName" - manage_cfn_stack_mock.return_value = [{"OutputKey": "SourceBucket", "OutputValue": expected_bucket_name}] + manage_cfn_stack_mock.return_value = StackOutput( + stack_output=[{"OutputKey": "SourceBucket", "OutputValue": expected_bucket_name}] + ) actual_bucket_name = manage_stack("testProfile", "fakeRegion") self.assertEqual(actual_bucket_name, expected_bucket_name) + + @patch("samcli.lib.bootstrap.bootstrap.boto3") + def test_get_current_account_id(self, boto3_mock): + session_mock = boto3_mock.Session.return_value = MagicMock() + sts_mock = MagicMock() + sts_mock.get_caller_identity.return_value = {"Account": 1234567890} + session_mock.client.return_value = sts_mock + account_id = get_current_account_id() + self.assertEqual(account_id, 1234567890) + + @patch("samcli.lib.bootstrap.bootstrap.boto3") + def test_get_current_account_id_missing_id(self, boto3_mock): + session_mock = boto3_mock.Session.return_value = MagicMock() + sts_mock = MagicMock() + sts_mock.get_caller_identity.return_value = {} + session_mock.client.return_value = sts_mock + with self.assertRaises(CredentialsError): + get_current_account_id() diff --git a/tests/unit/lib/cookiecutter/test_question.py b/tests/unit/lib/cookiecutter/test_question.py index c46a37fa43..2db7055357 100644 --- a/tests/unit/lib/cookiecutter/test_question.py +++ b/tests/unit/lib/cookiecutter/test_question.py @@ -27,6 +27,7 @@ def setUp(self): key=self._ANY_KEY, default=self._ANY_ANSWER, is_required=True, + allow_autofill=False, next_question_map=self._ANY_NEXT_QUESTION_MAP, default_next_question_key=self._ANY_DEFAULT_NEXT_QUESTION_KEY, ) @@ -151,6 +152,16 @@ def test_ask_resolves_from_cookiecutter_context_with_default_object_missing_keys with self.assertRaises(KeyError): question.ask(context=context) + def test_question_allow_autofill_with_default_value(self): + q = Question(text=self._ANY_TEXT, key=self._ANY_KEY, is_required=True, allow_autofill=True, default="123") + self.assertEquals("123", q.ask()) + + @patch("samcli.lib.cookiecutter.question.click") + def test_question_allow_autofill_without_default_value(self, click_mock): + answer_mock = click_mock.prompt.return_value = Mock() + q = Question(text=self._ANY_TEXT, key=self._ANY_KEY, is_required=True, allow_autofill=True) + self.assertEquals(answer_mock, q.ask()) + class TestChoice(TestCase): def setUp(self): @@ -188,7 +199,11 @@ def test_ask(self, mock_click, mock_choice): answer = self.question.ask({}) self.assertEqual(answer, TestQuestion._ANY_OPTIONS[1]) # we deduct one from user's choice (base 1 vs base 0) mock_click.prompt.assert_called_once_with( - text="Choice", default=self.question.default_answer, show_choices=False, type=ANY + text="Choice", + default=self.question.default_answer, + show_choices=False, + type=ANY, + show_default=self.question.default_answer is not None, ) mock_choice.assert_called_once_with(["1", "2", "3"]) diff --git a/tests/unit/lib/cookiecutter/test_template.py b/tests/unit/lib/cookiecutter/test_template.py index edb7412f59..318939f46b 100644 --- a/tests/unit/lib/cookiecutter/test_template.py +++ b/tests/unit/lib/cookiecutter/test_template.py @@ -114,11 +114,16 @@ def test_generate_project(self, mock_preprocessor, mock_postprocessor, mock_inte postprocessors=[mock_postprocessor], ) mock_preprocessor.run.return_value = self._ANY_PROCESSOR_CONTEXT - t.generate_project(context=self._ANY_INTERACTIVE_FLOW_CONTEXT) + output_dir = Mock() + t.generate_project(context=self._ANY_INTERACTIVE_FLOW_CONTEXT, output_dir=output_dir) mock_interactive_flow.run.assert_not_called() mock_preprocessor.run.assert_called_once_with(self._ANY_INTERACTIVE_FLOW_CONTEXT) mock_cookiecutter.assert_called_with( - template=self._ANY_LOCATION, output_dir=".", no_input=True, extra_context=self._ANY_PROCESSOR_CONTEXT + template=self._ANY_LOCATION, + output_dir=output_dir, + no_input=True, + extra_context=self._ANY_PROCESSOR_CONTEXT, + overwrite_if_exists=True, ) mock_postprocessor.run.assert_called_once_with(self._ANY_PROCESSOR_CONTEXT) @@ -127,7 +132,7 @@ def test_generate_project_preprocessors_exceptions(self, mock_preprocessor): t = Template(location=self._ANY_LOCATION, preprocessors=[mock_preprocessor]) with self.assertRaises(PreprocessingError): mock_preprocessor.run.side_effect = Exception("something went wrong") - t.generate_project({}) + t.generate_project({}, Mock()) @patch("samcli.lib.cookiecutter.template.cookiecutter") @patch("samcli.lib.cookiecutter.processor") @@ -135,7 +140,7 @@ def test_generate_project_postprocessors_exceptions(self, mock_postprocessor, mo t = Template(location=self._ANY_LOCATION, postprocessors=[mock_postprocessor]) with self.assertRaises(PostprocessingError): mock_postprocessor.run.side_effect = Exception("something went wrong") - t.generate_project({}) + t.generate_project({}, Mock()) @patch("samcli.lib.cookiecutter.template.generate_non_cookiecutter_project") @patch("samcli.lib.cookiecutter.template.cookiecutter") @@ -143,13 +148,13 @@ def test_generate_project_cookiecutter_exceptions(self, mock_cookiecutter, mock_ t = Template(location=self._ANY_LOCATION) with self.assertRaises(InvalidLocationError): mock_cookiecutter.side_effect = UnknownRepoType() - t.generate_project({}) + t.generate_project({}, Mock()) mock_cookiecutter.reset_mock() with self.assertRaises(GenerateProjectFailedError): mock_cookiecutter.side_effect = Exception("something went wrong") - t.generate_project({}) + t.generate_project({}, Mock()) mock_cookiecutter.reset_mock() # if the provided template is not a cookiecutter template, we generate a non cookiecutter template mock_cookiecutter.side_effect = RepositoryNotFound() - t.generate_project({}) + t.generate_project({}, Mock()) mock_generate_non_cookiecutter_project.assert_called_once() diff --git a/tests/unit/lib/pipeline/__init__.py b/tests/unit/lib/pipeline/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/lib/pipeline/bootstrap/__init__.py b/tests/unit/lib/pipeline/bootstrap/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/lib/pipeline/bootstrap/test_environment.py b/tests/unit/lib/pipeline/bootstrap/test_environment.py new file mode 100644 index 0000000000..9a12f2be15 --- /dev/null +++ b/tests/unit/lib/pipeline/bootstrap/test_environment.py @@ -0,0 +1,425 @@ +from unittest import TestCase +from unittest.mock import Mock, patch, call, MagicMock + +from samcli.lib.pipeline.bootstrap.stage import Stage + +ANY_STAGE_NAME = "ANY_STAGE_NAME" +ANY_PIPELINE_USER_ARN = "ANY_PIPELINE_USER_ARN" +ANY_PIPELINE_EXECUTION_ROLE_ARN = "ANY_PIPELINE_EXECUTION_ROLE_ARN" +ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN = "ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN" +ANY_ARTIFACTS_BUCKET_ARN = "ANY_ARTIFACTS_BUCKET_ARN" +ANY_IMAGE_REPOSITORY_ARN = "ANY_IMAGE_REPOSITORY_ARN" +ANY_ARN = "ANY_ARN" + + +class TestStage(TestCase): + def test_stage_name_is_the_only_required_field_to_initialize_an_stage(self): + stage: Stage = Stage(name=ANY_STAGE_NAME) + self.assertEqual(stage.name, ANY_STAGE_NAME) + self.assertIsNone(stage.aws_profile) + self.assertIsNone(stage.aws_region) + self.assertIsNotNone(stage.pipeline_user) + self.assertIsNotNone(stage.pipeline_execution_role) + self.assertIsNotNone(stage.cloudformation_execution_role) + self.assertIsNotNone(stage.artifacts_bucket) + self.assertIsNotNone(stage.image_repository) + + with self.assertRaises(TypeError): + Stage() + + def test_did_user_provide_all_required_resources_when_not_all_resources_are_provided(self): + stage: Stage = Stage(name=ANY_STAGE_NAME) + self.assertFalse(stage.did_user_provide_all_required_resources()) + stage: Stage = Stage(name=ANY_STAGE_NAME, pipeline_user_arn=ANY_PIPELINE_USER_ARN) + self.assertFalse(stage.did_user_provide_all_required_resources()) + stage: Stage = Stage( + name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + pipeline_execution_role_arn=ANY_PIPELINE_EXECUTION_ROLE_ARN, + ) + self.assertFalse(stage.did_user_provide_all_required_resources()) + stage: Stage = Stage( + name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + pipeline_execution_role_arn=ANY_PIPELINE_EXECUTION_ROLE_ARN, + cloudformation_execution_role_arn=ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + ) + self.assertFalse(stage.did_user_provide_all_required_resources()) + stage: Stage = Stage( + name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + pipeline_execution_role_arn=ANY_PIPELINE_EXECUTION_ROLE_ARN, + cloudformation_execution_role_arn=ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + artifacts_bucket_arn=ANY_ARTIFACTS_BUCKET_ARN, + create_image_repository=True, + ) + self.assertFalse(stage.did_user_provide_all_required_resources()) + + def test_did_user_provide_all_required_resources_ignore_image_repository_if_it_is_not_required(self): + stage: Stage = Stage( + name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + pipeline_execution_role_arn=ANY_PIPELINE_EXECUTION_ROLE_ARN, + cloudformation_execution_role_arn=ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + artifacts_bucket_arn=ANY_ARTIFACTS_BUCKET_ARN, + create_image_repository=False, + ) + self.assertTrue(stage.did_user_provide_all_required_resources()) + + def test_did_user_provide_all_required_resources_when_image_repository_is_required(self): + stage: Stage = Stage( + name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + pipeline_execution_role_arn=ANY_PIPELINE_EXECUTION_ROLE_ARN, + cloudformation_execution_role_arn=ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + artifacts_bucket_arn=ANY_ARTIFACTS_BUCKET_ARN, + create_image_repository=True, + ) + self.assertFalse(stage.did_user_provide_all_required_resources()) + stage: Stage = Stage( + name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + pipeline_execution_role_arn=ANY_PIPELINE_EXECUTION_ROLE_ARN, + cloudformation_execution_role_arn=ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + artifacts_bucket_arn=ANY_ARTIFACTS_BUCKET_ARN, + create_image_repository=True, + image_repository_arn=ANY_IMAGE_REPOSITORY_ARN, + ) + self.assertTrue(stage.did_user_provide_all_required_resources()) + + @patch("samcli.lib.pipeline.bootstrap.stage.Stage._get_pipeline_user_secret_pair") + @patch("samcli.lib.pipeline.bootstrap.stage.click") + @patch("samcli.lib.pipeline.bootstrap.stage.manage_stack") + def test_did_user_provide_all_required_resources_returns_false_if_the_stage_was_initialized_without_any_of_the_resources_even_if_fulfilled_after_bootstrap( + self, manage_stack_mock, click_mock, pipeline_user_secret_pair_mock + ): + # setup + stack_output = Mock() + pipeline_user_secret_pair_mock.return_value = ("id", "secret") + stack_output.get.return_value = ANY_ARN + manage_stack_mock.return_value = stack_output + stage: Stage = Stage(name=ANY_STAGE_NAME) + + self.assertFalse(stage.did_user_provide_all_required_resources()) + + stage.bootstrap(confirm_changeset=False) + # After bootstrapping, all the resources should be fulfilled + self.assertEqual(ANY_ARN, stage.pipeline_user.arn) + self.assertEqual(ANY_ARN, stage.pipeline_execution_role.arn) + self.assertEqual(ANY_ARN, stage.cloudformation_execution_role.arn) + self.assertEqual(ANY_ARN, stage.artifacts_bucket.arn) + self.assertEqual(ANY_ARN, stage.image_repository.arn) + + # although all of the resources got fulfilled, `did_user_provide_all_required_resources` should return false + # as these resources are not provided by the user + self.assertFalse(stage.did_user_provide_all_required_resources()) + + @patch("samcli.lib.pipeline.bootstrap.stage.click") + @patch("samcli.lib.pipeline.bootstrap.stage.manage_stack") + @patch.object(Stage, "did_user_provide_all_required_resources") + def test_bootstrap_will_not_deploy_the_cfn_template_if_all_resources_are_already_provided( + self, did_user_provide_all_required_resources_mock, manage_stack_mock, click_mock + ): + did_user_provide_all_required_resources_mock.return_value = True + stage: Stage = Stage(name=ANY_STAGE_NAME) + stage.bootstrap(confirm_changeset=False) + manage_stack_mock.assert_not_called() + + @patch("samcli.lib.pipeline.bootstrap.stage.Stage._get_pipeline_user_secret_pair") + @patch("samcli.lib.pipeline.bootstrap.stage.click") + @patch("samcli.lib.pipeline.bootstrap.stage.manage_stack") + def test_bootstrap_will_confirm_before_deploying_unless_confirm_changeset_is_disabled( + self, manage_stack_mock, click_mock, pipeline_user_secret_pair_mock + ): + click_mock.confirm.return_value = False + pipeline_user_secret_pair_mock.return_value = ("id", "secret") + stage: Stage = Stage(name=ANY_STAGE_NAME) + stage.bootstrap(confirm_changeset=False) + click_mock.confirm.assert_not_called() + manage_stack_mock.assert_called_once() + manage_stack_mock.reset_mock() + stage.bootstrap(confirm_changeset=True) + click_mock.confirm.assert_called_once() + manage_stack_mock.assert_not_called() # As the user choose to not confirm + + @patch("samcli.lib.pipeline.bootstrap.stage.click") + @patch("samcli.lib.pipeline.bootstrap.stage.manage_stack") + def test_bootstrap_will_not_deploy_the_cfn_template_if_the_user_did_not_confirm( + self, manage_stack_mock, click_mock + ): + click_mock.confirm.return_value = False + stage: Stage = Stage(name=ANY_STAGE_NAME) + stage.bootstrap(confirm_changeset=True) + manage_stack_mock.assert_not_called() + + @patch("samcli.lib.pipeline.bootstrap.stage.Stage._get_pipeline_user_secret_pair") + @patch("samcli.lib.pipeline.bootstrap.stage.click") + @patch("samcli.lib.pipeline.bootstrap.stage.manage_stack") + def test_bootstrap_will_deploy_the_cfn_template_if_the_user_did_confirm( + self, manage_stack_mock, click_mock, pipeline_user_secret_pair_mock + ): + click_mock.confirm.return_value = True + pipeline_user_secret_pair_mock.return_value = ("id", "secret") + stage: Stage = Stage(name=ANY_STAGE_NAME) + stage.bootstrap(confirm_changeset=True) + manage_stack_mock.assert_called_once() + + @patch("samcli.lib.pipeline.bootstrap.stage.Stage._get_pipeline_user_secret_pair") + @patch("samcli.lib.pipeline.bootstrap.stage.click") + @patch("samcli.lib.pipeline.bootstrap.stage.manage_stack") + def test_bootstrap_will_pass_arns_of_all_user_provided_resources_any_empty_strings_for_other_resources_to_the_cfn_stack( + self, manage_stack_mock, click_mock, pipeline_user_secret_pair_mock + ): + click_mock.confirm.return_value = True + pipeline_user_secret_pair_mock.return_value = ("id", "secret") + stage: Stage = Stage( + name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + artifacts_bucket_arn=ANY_ARTIFACTS_BUCKET_ARN, + create_image_repository=True, + image_repository_arn=ANY_IMAGE_REPOSITORY_ARN, + ) + stage.bootstrap() + manage_stack_mock.assert_called_once() + args, kwargs = manage_stack_mock.call_args_list[0] + expected_parameter_overrides = { + "PipelineUserArn": ANY_PIPELINE_USER_ARN, + "PipelineExecutionRoleArn": "", + "CloudFormationExecutionRoleArn": "", + "ArtifactsBucketArn": ANY_ARTIFACTS_BUCKET_ARN, + "CreateImageRepository": "true", + "ImageRepositoryArn": ANY_IMAGE_REPOSITORY_ARN, + } + self.assertEqual(expected_parameter_overrides, kwargs["parameter_overrides"]) + + @patch("samcli.lib.pipeline.bootstrap.stage.Stage._get_pipeline_user_secret_pair") + @patch("samcli.lib.pipeline.bootstrap.stage.click") + @patch("samcli.lib.pipeline.bootstrap.stage.manage_stack") + def test_bootstrap_will_fullfill_all_resource_arns( + self, manage_stack_mock, click_mock, pipeline_user_secret_pair_mock + ): + # setup + pipeline_user_secret_pair_mock.return_value = ("id", "secret") + stack_output = Mock() + stack_output.get.return_value = ANY_ARN + manage_stack_mock.return_value = stack_output + stage: Stage = Stage(name=ANY_STAGE_NAME) + click_mock.confirm.return_value = True + + # verify resources' ARNS are empty + self.assertIsNone(stage.pipeline_user.arn) + self.assertIsNone(stage.pipeline_execution_role.arn) + self.assertIsNone(stage.cloudformation_execution_role.arn) + self.assertIsNone(stage.artifacts_bucket.arn) + + # trigger + stage.bootstrap() + + # verify + manage_stack_mock.assert_called_once() + self.assertEqual(ANY_ARN, stage.pipeline_user.arn) + self.assertEqual(ANY_ARN, stage.pipeline_execution_role.arn) + self.assertEqual(ANY_ARN, stage.cloudformation_execution_role.arn) + self.assertEqual(ANY_ARN, stage.artifacts_bucket.arn) + + @patch("samcli.lib.pipeline.bootstrap.stage.SamConfig") + def test_save_config_escapes_none_resources(self, samconfig_mock): + cmd_names = ["any", "commands"] + samconfig_instance_mock = Mock() + samconfig_mock.return_value = samconfig_instance_mock + stage: Stage = Stage(name=ANY_STAGE_NAME) + + empty_ecr_call = call( + cmd_names=cmd_names, + section="parameters", + env=ANY_STAGE_NAME, + key="image_repository", + value="", + ) + + expected_calls = [] + self.trigger_and_assert_save_config_calls( + stage, cmd_names, expected_calls + [empty_ecr_call], samconfig_instance_mock.put + ) + + stage.pipeline_user.arn = ANY_PIPELINE_USER_ARN + expected_calls.append( + call(cmd_names=cmd_names, section="parameters", key="pipeline_user", value=ANY_PIPELINE_USER_ARN) + ) + self.trigger_and_assert_save_config_calls( + stage, cmd_names, expected_calls + [empty_ecr_call], samconfig_instance_mock.put + ) + + stage.pipeline_execution_role.arn = ANY_PIPELINE_EXECUTION_ROLE_ARN + expected_calls.append( + call( + cmd_names=cmd_names, + section="parameters", + env=ANY_STAGE_NAME, + key="pipeline_execution_role", + value=ANY_PIPELINE_EXECUTION_ROLE_ARN, + ), + ) + self.trigger_and_assert_save_config_calls( + stage, cmd_names, expected_calls + [empty_ecr_call], samconfig_instance_mock.put + ) + + stage.cloudformation_execution_role.arn = ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN + expected_calls.append( + call( + cmd_names=cmd_names, + section="parameters", + env=ANY_STAGE_NAME, + key="cloudformation_execution_role", + value=ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + ), + ) + self.trigger_and_assert_save_config_calls( + stage, cmd_names, expected_calls + [empty_ecr_call], samconfig_instance_mock.put + ) + + stage.artifacts_bucket.arn = "arn:aws:s3:::artifact_bucket_name" + expected_calls.append( + call( + cmd_names=cmd_names, + section="parameters", + env=ANY_STAGE_NAME, + key="artifacts_bucket", + value="artifact_bucket_name", + ), + ) + self.trigger_and_assert_save_config_calls( + stage, cmd_names, expected_calls + [empty_ecr_call], samconfig_instance_mock.put + ) + + stage.image_repository.arn = "arn:aws:ecr:us-east-2:111111111111:repository/image_repository_name" + expected_calls.append( + call( + cmd_names=cmd_names, + section="parameters", + env=ANY_STAGE_NAME, + key="image_repository", + value="111111111111.dkr.ecr.us-east-2.amazonaws.com/image_repository_name", + ) + ) + self.trigger_and_assert_save_config_calls(stage, cmd_names, expected_calls, samconfig_instance_mock.put) + + def trigger_and_assert_save_config_calls(self, stage, cmd_names, expected_calls, samconfig_put_mock): + stage.save_config(config_dir="any_config_dir", filename="any_pipeline.toml", cmd_names=cmd_names) + self.assertEqual(len(expected_calls), samconfig_put_mock.call_count) + samconfig_put_mock.assert_has_calls(expected_calls, any_order=True) + samconfig_put_mock.reset_mock() + + @patch("samcli.lib.pipeline.bootstrap.stage.boto3") + def test_getting_pipeline_user_credentials(self, boto3_mock): + sm_client_mock = MagicMock() + sm_client_mock.get_secret_value.return_value = { + "SecretString": '{"aws_access_key_id": "AccessKeyId", "aws_secret_access_key": "SuperSecretKey"}' + } + session_mock = MagicMock() + session_mock.client.return_value = sm_client_mock + boto3_mock.Session.return_value = session_mock + + (key, secret) = Stage._get_pipeline_user_secret_pair("dummy_arn", None, "dummy-region") + self.assertEqual(key, "AccessKeyId") + self.assertEqual(secret, "SuperSecretKey") + sm_client_mock.get_secret_value.assert_called_once_with(SecretId="dummy_arn") + + @patch("samcli.lib.pipeline.bootstrap.stage.SamConfig") + def test_save_config_ignores_exceptions_thrown_while_calculating_artifacts_bucket_name(self, samconfig_mock): + samconfig_instance_mock = Mock() + samconfig_mock.return_value = samconfig_instance_mock + stage: Stage = Stage(name=ANY_STAGE_NAME, artifacts_bucket_arn="invalid ARN") + # calling artifacts_bucket.name() during save_config() will raise a ValueError exception, we need to make sure + # this exception is swallowed so that other configs can be safely saved to the pipelineconfig.toml file + stage.save_config(config_dir="any_config_dir", filename="any_pipeline.toml", cmd_names=["any", "commands"]) + + @patch("samcli.lib.pipeline.bootstrap.stage.SamConfig") + def test_save_config_ignores_exceptions_thrown_while_calculating_image_repository_uri(self, samconfig_mock): + samconfig_instance_mock = Mock() + samconfig_mock.return_value = samconfig_instance_mock + stage: Stage = Stage(name=ANY_STAGE_NAME, image_repository_arn="invalid ARN") + # calling image_repository.get_uri() during save_config() will raise a ValueError exception, we need to make + # sure this exception is swallowed so that other configs can be safely saved to the pipelineconfig.toml file + stage.save_config(config_dir="any_config_dir", filename="any_pipeline.toml", cmd_names=["any", "commands"]) + + @patch.object(Stage, "save_config") + def test_save_config_safe(self, save_config_mock): + save_config_mock.side_effect = Exception + stage: Stage = Stage(name=ANY_STAGE_NAME) + stage.save_config_safe(config_dir="any_config_dir", filename="any_pipeline.toml", cmd_names=["commands"]) + save_config_mock.assert_called_once_with("any_config_dir", "any_pipeline.toml", ["commands"]) + + @patch("samcli.lib.pipeline.bootstrap.stage.click") + def test_print_resources_summary_when_no_resources_provided_by_the_user(self, click_mock): + stage: Stage = Stage(name=ANY_STAGE_NAME) + stage.print_resources_summary() + self.assert_summary_has_a_message_like("The following resources were created in your account", click_mock.secho) + + @patch("samcli.lib.pipeline.bootstrap.stage.click") + def test_print_resources_summary_when_all_resources_are_provided_by_the_user(self, click_mock): + stage: Stage = Stage( + name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + pipeline_execution_role_arn=ANY_PIPELINE_EXECUTION_ROLE_ARN, + cloudformation_execution_role_arn=ANY_CLOUDFORMATION_EXECUTION_ROLE_ARN, + artifacts_bucket_arn=ANY_ARTIFACTS_BUCKET_ARN, + create_image_repository=True, + image_repository_arn=ANY_IMAGE_REPOSITORY_ARN, + ) + stage.print_resources_summary() + self.assert_summary_does_not_have_a_message_like( + "The following resources were created in your account", click_mock.secho + ) + + @patch("samcli.lib.pipeline.bootstrap.stage.click") + def test_print_resources_summary_when_some_resources_are_provided_by_the_user(self, click_mock): + stage: Stage = Stage( + name=ANY_STAGE_NAME, + pipeline_user_arn=ANY_PIPELINE_USER_ARN, + artifacts_bucket_arn=ANY_ARTIFACTS_BUCKET_ARN, + create_image_repository=True, + image_repository_arn=ANY_IMAGE_REPOSITORY_ARN, + ) + stage.print_resources_summary() + self.assert_summary_has_a_message_like("The following resources were created in your account", click_mock.secho) + + @patch("samcli.lib.pipeline.bootstrap.stage.click") + def test_print_resources_summary_prints_the_credentials_of_the_pipeline_user_iff_not_provided_by_the_user( + self, click_mock + ): + stage_with_provided_pipeline_user: Stage = Stage(name=ANY_STAGE_NAME, pipeline_user_arn=ANY_PIPELINE_USER_ARN) + stage_with_provided_pipeline_user.print_resources_summary() + self.assert_summary_does_not_have_a_message_like("AWS_ACCESS_KEY_ID", click_mock.secho) + self.assert_summary_does_not_have_a_message_like("AWS_SECRET_ACCESS_KEY", click_mock.secho) + click_mock.secho.reset_mock() + + stage_without_provided_pipeline_user: Stage = Stage(name=ANY_STAGE_NAME) + stage_without_provided_pipeline_user.print_resources_summary() + self.assert_summary_has_a_message_like("AWS_ACCESS_KEY_ID", click_mock.secho) + self.assert_summary_has_a_message_like("AWS_SECRET_ACCESS_KEY", click_mock.secho) + + def assert_summary_has_a_message_like(self, msg, click_secho_mock): + self.assertTrue( + self.does_summary_have_a_message_like(msg, click_secho_mock), + msg=f'stage resources summary does not include "{msg}" which is unexpected', + ) + + def assert_summary_does_not_have_a_message_like(self, msg, click_secho_mock): + self.assertFalse( + self.does_summary_have_a_message_like(msg, click_secho_mock), + msg=f'stage resources summary includes "{msg}" which is unexpected', + ) + + @staticmethod + def does_summary_have_a_message_like(msg, click_secho_mock): + msg = msg.lower() + for kall in click_secho_mock.call_args_list: + args, kwargs = kall + if args: + message = args[0].lower() + else: + message = kwargs.get("message", "").lower() + if msg in message: + return True + return False diff --git a/tests/unit/lib/pipeline/bootstrap/test_resource.py b/tests/unit/lib/pipeline/bootstrap/test_resource.py new file mode 100644 index 0000000000..f7dcab50f2 --- /dev/null +++ b/tests/unit/lib/pipeline/bootstrap/test_resource.py @@ -0,0 +1,81 @@ +from unittest import TestCase + +from samcli.lib.pipeline.bootstrap.resource import ARNParts, Resource, IAMUser, ECRImageRepository + +VALID_ARN = "arn:partition:service:region:account-id:resource-id" +INVALID_ARN = "ARN" + + +class TestArnParts(TestCase): + def test_arn_parts_of_valid_arn(self): + arn_parts: ARNParts = ARNParts(arn=VALID_ARN) + self.assertEqual(arn_parts.partition, "partition") + self.assertEqual(arn_parts.service, "service") + self.assertEqual(arn_parts.region, "region") + self.assertEqual(arn_parts.account_id, "account-id") + self.assertEqual(arn_parts.resource_id, "resource-id") + + def test_arn_parts_of_invalid_arn(self): + with self.assertRaises(ValueError): + invalid_arn = "invalid_arn" + ARNParts(arn=invalid_arn) + + +class TestResource(TestCase): + def test_resource(self): + resource = Resource(arn=VALID_ARN, comment="") + self.assertEqual(resource.arn, VALID_ARN) + self.assertTrue(resource.is_user_provided) + self.assertEqual(resource.name(), "resource-id") + + resource = Resource(arn=INVALID_ARN, comment="") + self.assertEqual(resource.arn, INVALID_ARN) + self.assertTrue(resource.is_user_provided) + with self.assertRaises(ValueError): + resource.name() + + resource = Resource(arn=None, comment="") + self.assertIsNone(resource.arn) + self.assertFalse(resource.is_user_provided) + self.assertIsNone(resource.name()) + + +class TestIAMUser(TestCase): + def test_create_iam_user(self): + user: IAMUser = IAMUser(arn=VALID_ARN, comment="user") + self.assertEquals(user.arn, VALID_ARN) + self.assertEquals(user.comment, "user") + self.assertIsNone(user.access_key_id) + self.assertIsNone(user.secret_access_key) + + user = IAMUser( + arn=INVALID_ARN, + access_key_id="any_access_key_id", + secret_access_key="any_secret_access_key", + comment="user", + ) + self.assertEquals(user.arn, INVALID_ARN) + self.assertEquals(user.comment, "user") + self.assertEquals(user.access_key_id, "any_access_key_id") + self.assertEquals(user.secret_access_key, "any_secret_access_key") + + +class TestECRImageRepository(TestCase): + def test_get_uri_with_valid_ecr_arn(self): + valid_ecr_arn = "arn:partition:service:region:account-id:repository/repository-name" + repo: ECRImageRepository = ECRImageRepository(arn=valid_ecr_arn, comment="ecr") + self.assertEqual(repo.get_uri(), "account-id.dkr.ecr.region.amazonaws.com/repository-name") + self.assertEquals("ecr", repo.comment) + + def test_get_uri_with_invalid_ecr_arn(self): + repo = ECRImageRepository(arn=INVALID_ARN, comment="ecr") + with self.assertRaises(ValueError): + repo.get_uri() + + def test_get_uri_with_valid_aws_arn_that_is_invalid_ecr_arn(self): + ecr_arn_missing_repository_prefix = ( + "arn:partition:service:region:account-id:repository-name-without-repository/-prefix" + ) + repo = ECRImageRepository(arn=ecr_arn_missing_repository_prefix, comment="ecr") + with self.assertRaises(ValueError): + repo.get_uri() diff --git a/tests/unit/lib/samconfig/test_samconfig.py b/tests/unit/lib/samconfig/test_samconfig.py index 74c9ee9661..42017d5490 100644 --- a/tests/unit/lib/samconfig/test_samconfig.py +++ b/tests/unit/lib/samconfig/test_samconfig.py @@ -1,11 +1,11 @@ import os from pathlib import Path - from unittest import TestCase from samcli.lib.config.exceptions import SamConfigVersionException +from samcli.lib.config.samconfig import SamConfig, DEFAULT_CONFIG_FILE_NAME, DEFAULT_GLOBAL_CMDNAME, DEFAULT_ENV from samcli.lib.config.version import VERSION_KEY, SAM_CONFIG_VERSION -from samcli.lib.config.samconfig import SamConfig, DEFAULT_CONFIG_FILE_NAME, DEFAULT_GLOBAL_CMDNAME +from samcli.lib.utils import osutils class TestSamConfig(TestCase): @@ -27,14 +27,25 @@ def _check_config_file(self): self.assertTrue(self.samconfig.sanity_check()) self.assertEqual(SAM_CONFIG_VERSION, self.samconfig.document.get(VERSION_KEY)) - def _update_samconfig(self, cmd_names, section, key, value, env): - self.samconfig.put(cmd_names=cmd_names, section=section, key=key, value=value, env=env) + def _update_samconfig(self, cmd_names, section, key, value, env=None): + if env: + self.samconfig.put(cmd_names=cmd_names, section=section, key=key, value=value, env=env) + else: + self.samconfig.put(cmd_names=cmd_names, section=section, key=key, value=value) self.samconfig.flush() self._check_config_file() def test_init(self): self.assertEqual(self.samconfig.filepath, Path(self.config_dir, DEFAULT_CONFIG_FILE_NAME)) + def test_get_stage_names(self): + self.assertEqual(self.samconfig.get_stage_names(), []) + self._update_samconfig(cmd_names=["myCommand"], section="mySection", key="port", value=5401, env="stage1") + self._update_samconfig(cmd_names=["myCommand"], section="mySection", key="port", value=5401, env="stage2") + self.assertEqual(self.samconfig.get_stage_names(), ["stage1", "stage2"]) + self._update_samconfig(cmd_names=["myCommand"], section="mySection", key="port", value=5401) + self.assertEqual(self.samconfig.get_stage_names(), ["stage1", "stage2", DEFAULT_ENV]) + def test_param_overwrite(self): self._update_samconfig(cmd_names=["myCommand"], section="mySection", key="port", value=5401, env="myEnv") self.assertEqual( @@ -195,3 +206,18 @@ def test_write_config_file_non_standard_version(self): self.samconfig.put(cmd_names=["local", "start", "api"], section="parameters", key="skip_pull_image", value=True) self.samconfig.sanity_check() self.assertEqual(self.samconfig.document.get(VERSION_KEY), 0.2) + + def test_write_config_file_will_create_the_file_if_not_exist(self): + with osutils.mkdir_temp(ignore_errors=True) as tempdir: + non_existing_dir = os.path.join(tempdir, "non-existing-dir") + non_existing_file = "non-existing-file" + samconfig = SamConfig(config_dir=non_existing_dir, filename=non_existing_file) + + self.assertFalse(samconfig.exists()) + + samconfig.flush() + self.assertFalse(samconfig.exists()) # nothing to write, no need to create the file + + samconfig.put(cmd_names=["any", "command"], section="any-section", key="any-key", value="any-value") + samconfig.flush() + self.assertTrue(samconfig.exists()) diff --git a/tests/unit/lib/utils/test_managed_cloudformation_stack.py b/tests/unit/lib/utils/test_managed_cloudformation_stack.py index 9f1ea0915a..fd21b792f1 100644 --- a/tests/unit/lib/utils/test_managed_cloudformation_stack.py +++ b/tests/unit/lib/utils/test_managed_cloudformation_stack.py @@ -21,19 +21,28 @@ def _stubbed_cf_client(self): def test_session_missing_profile(self, boto_mock): boto_mock.side_effect = ProfileNotFound(profile="test-profile") with self.assertRaises(CredentialsError): - manage_stack("test-profile", "fake-region", SAM_CLI_STACK_NAME, _get_stack_template()) + manage_stack( + profile="test-profile", + region="fake-region", + stack_name=SAM_CLI_STACK_NAME, + template_body=_get_stack_template(), + ) @patch("boto3.client") def test_client_missing_credentials(self, boto_mock): boto_mock.side_effect = NoCredentialsError() with self.assertRaises(CredentialsError): - manage_stack(None, "fake-region", SAM_CLI_STACK_NAME, _get_stack_template()) + manage_stack( + profile=None, region="fake-region", stack_name=SAM_CLI_STACK_NAME, template_body=_get_stack_template() + ) @patch("boto3.client") def test_client_missing_region(self, boto_mock): boto_mock.side_effect = NoRegionError() with self.assertRaises(RegionError): - manage_stack(None, "fake-region", SAM_CLI_STACK_NAME, _get_stack_template()) + manage_stack( + profile=None, region="fake-region", stack_name=SAM_CLI_STACK_NAME, template_body=_get_stack_template() + ) def test_new_stack(self): stub_cf, stubber = self._stubbed_cf_client() @@ -47,6 +56,8 @@ def test_new_stack(self): "Tags": [{"Key": "ManagedStackSource", "Value": "AwsSamCli"}], "ChangeSetType": "CREATE", "ChangeSetName": "InitialCreation", + "Capabilities": ["CAPABILITY_IAM"], + "Parameters": [], } ccs_resp = {"Id": "id", "StackId": "aws-sam-cli-managed-default"} stubber.add_response("create_change_set", ccs_resp, ccs_params) @@ -151,6 +162,8 @@ def test_change_set_creation_fails(self): "Tags": [{"Key": "ManagedStackSource", "Value": "AwsSamCli"}], "ChangeSetType": "CREATE", "ChangeSetName": "InitialCreation", + "Capabilities": ["CAPABILITY_IAM"], + "Parameters": [], } stubber.add_client_error("create_change_set", service_error_code="ClientError", expected_params=ccs_params) stubber.activate() @@ -171,6 +184,8 @@ def test_change_set_execution_fails(self): "Tags": [{"Key": "ManagedStackSource", "Value": "AwsSamCli"}], "ChangeSetType": "CREATE", "ChangeSetName": "InitialCreation", + "Capabilities": ["CAPABILITY_IAM"], + "Parameters": [], } ccs_resp = {"Id": "id", "StackId": "aws-sam-cli-managed-default"} stubber.add_response("create_change_set", ccs_resp, ccs_params)