diff --git a/.github/labeler-issue-triage.yaml b/.github/labeler-issue-triage.yaml index a0a95dfed3..8a03033cfd 100644 --- a/.github/labeler-issue-triage.yaml +++ b/.github/labeler-issue-triage.yaml @@ -37,7 +37,10 @@ feature/policies: - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azuread_(authentication_strength_policy|claims_mapping_policy|group_role_management_policy)((.|\n)*)###' feature/service-principals: - - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azuread_(client_config|service_principal|synchronization_)((.|\n)*)###' + - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azuread_(client_config|service_principal)((.|\n)*)###' + +feature/synchronization: + - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azuread_synchronization_((.|\n)*)###' feature/user-flows: - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azuread_user_flow_attribute((.|\n)*)###' diff --git a/.github/labeler-pull-request-triage.yaml b/.github/labeler-pull-request-triage.yaml index 0c01fa9a7b..7227a9eff2 100644 --- a/.github/labeler-pull-request-triage.yaml +++ b/.github/labeler-pull-request-triage.yaml @@ -80,6 +80,10 @@ feature/service-principals: - changed-files: - any-glob-to-any-file: - internal/services/serviceprincipals/**/* + +feature/synchronization: +- changed-files: + - any-glob-to-any-file: - internal/services/synchronization/**/* feature/user-flows: diff --git a/.github/workflows/depscheck.yaml b/.github/workflows/depscheck.yaml index 81aca7ffda..6875a99f72 100644 --- a/.github/workflows/depscheck.yaml +++ b/.github/workflows/depscheck.yaml @@ -12,8 +12,8 @@ jobs: depscheck: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: .go-version - run: bash scripts/gogetcookie.sh diff --git a/.github/workflows/docs-lint.yaml b/.github/workflows/docs-lint.yaml index 209c36475a..812d0ba5a2 100644 --- a/.github/workflows/docs-lint.yaml +++ b/.github/workflows/docs-lint.yaml @@ -11,8 +11,8 @@ jobs: docs-lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: .go-version - run: bash scripts/gogetcookie.sh diff --git a/.github/workflows/gencheck.yaml b/.github/workflows/gencheck.yaml index 7fdd49851b..83467d671d 100644 --- a/.github/workflows/gencheck.yaml +++ b/.github/workflows/gencheck.yaml @@ -17,10 +17,10 @@ concurrency: jobs: gencheck: - runs-on: [custom, linux, large] + runs-on: custom-linux-large steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: ./.go-version - run: bash scripts/gogetcookie.sh diff --git a/.github/workflows/golint.yaml b/.github/workflows/golint.yaml index 3a37bdf644..018a7ba8d3 100644 --- a/.github/workflows/golint.yaml +++ b/.github/workflows/golint.yaml @@ -12,8 +12,8 @@ jobs: golint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: .go-version - uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 diff --git a/.github/workflows/increment-milestone.yaml b/.github/workflows/increment-milestone.yaml index 730a3ead05..2ac8f4e780 100644 --- a/.github/workflows/increment-milestone.yaml +++ b/.github/workflows/increment-milestone.yaml @@ -14,7 +14,7 @@ jobs: increment-milestone: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 with: fetch-depth: 0 diff --git a/.github/workflows/issue-opened.yaml b/.github/workflows/issue-opened.yaml index 94f965b7fc..62a905bdfa 100644 --- a/.github/workflows/issue-opened.yaml +++ b/.github/workflows/issue-opened.yaml @@ -12,7 +12,7 @@ jobs: issue_triage: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: github/issue-labeler@c1b0f9f52a63158c4adc09425e858e87b32e9685 # v3.4 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/link-milestone.yaml b/.github/workflows/link-milestone.yaml index f8356d455f..8ab85b9cca 100644 --- a/.github/workflows/link-milestone.yaml +++ b/.github/workflows/link-milestone.yaml @@ -15,7 +15,7 @@ jobs: pull-requests: write issues: write steps: - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version: '1.21.3' diff --git a/.github/workflows/provider-test.yaml b/.github/workflows/provider-test.yaml index b55f0b4b72..56d80e02ae 100644 --- a/.github/workflows/provider-test.yaml +++ b/.github/workflows/provider-test.yaml @@ -31,15 +31,15 @@ jobs: fi provider-tests: - runs-on: [custom, linux, large] + runs-on: custom-linux-large needs: [secrets-check] if: needs.secrets-check.outputs.available == 'true' steps: - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: ./.go-version diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml index 0a4525609c..89e266b7a0 100644 --- a/.github/workflows/pull-request.yaml +++ b/.github/workflows/pull-request.yaml @@ -16,7 +16,7 @@ jobs: configuration-path: .github/labeler-pull-request-triage.yaml repo-token: "${{ secrets.GITHUB_TOKEN }}" - - uses: CodelyTV/pr-size-labeler@f2aafc4d8735009c6de18acefe15eecbfbfae56f # v1.9.0 + - uses: CodelyTV/pr-size-labeler@56f6f0fc35c7cc0f72963b8467729e1120cb4bed # v1.10.0 with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} xs_label: 'size/XS' diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 39886c28c8..1f313c972d 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -13,7 +13,7 @@ jobs: release-notes: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 with: fetch-depth: 0 @@ -29,7 +29,7 @@ jobs: terraform-provider-release: name: 'Terraform Provider Release' needs: release-notes - uses: hashicorp/ghaction-terraform-provider-release/.github/workflows/hashicorp.yml@9b5d2ca4b85f3a54d5c4d12e7690ddad1526ff6c # v3.0.1 + uses: hashicorp/ghaction-terraform-provider-release/.github/workflows/hashicorp.yml@393dac4dd208c749b1622323f9f0e8d26a6f26cc # v4.0.1 secrets: hc-releases-github-token: '${{ secrets.HASHI_RELEASES_GITHUB_TOKEN }}' hc-releases-host-staging: '${{ secrets.HC_RELEASES_HOST_STAGING }}' diff --git a/.github/workflows/teamcity-test.yaml b/.github/workflows/teamcity-test.yaml index 40d7c21c55..c1c48a5b0e 100644 --- a/.github/workflows/teamcity-test.yaml +++ b/.github/workflows/teamcity-test.yaml @@ -21,7 +21,7 @@ jobs: teamcity-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/setup-java@99b8673ff64fbf99d8d325f52d9a5bdedb8483e9 # v4.2.1 with: distribution: zulu diff --git a/.github/workflows/tflint.yaml b/.github/workflows/tflint.yaml index 7999b89e09..8dbdd78f1c 100644 --- a/.github/workflows/tflint.yaml +++ b/.github/workflows/tflint.yaml @@ -20,8 +20,8 @@ jobs: tflint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: ./.go-version - run: bash scripts/gogetcookie.sh diff --git a/.github/workflows/thirty-two-bit.yaml b/.github/workflows/thirty-two-bit.yaml index 333992c5d9..b7f029d0f1 100644 --- a/.github/workflows/thirty-two-bit.yaml +++ b/.github/workflows/thirty-two-bit.yaml @@ -21,8 +21,8 @@ jobs: compatibility-32bit-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: ./.go-version - run: bash scripts/gogetcookie.sh diff --git a/.github/workflows/unit-test.yaml b/.github/workflows/unit-test.yaml index 4f64b6e056..cc63a6bbd7 100644 --- a/.github/workflows/unit-test.yaml +++ b/.github/workflows/unit-test.yaml @@ -19,10 +19,10 @@ concurrency: jobs: test: - runs-on: [custom, linux, large] + runs-on: custom-linux-large steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: ./.go-version - run: bash scripts/gogetcookie.sh diff --git a/.github/workflows/validate-examples.yaml b/.github/workflows/validate-examples.yaml index 5d42855a29..edfc5103d1 100644 --- a/.github/workflows/validate-examples.yaml +++ b/.github/workflows/validate-examples.yaml @@ -20,8 +20,8 @@ jobs: validate-examples: runs-on: ubuntu-latest steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: ./.go-version - run: bash scripts/gogetcookie.sh diff --git a/.teamcity/components/generated/services.kt b/.teamcity/components/generated/services.kt index d77ef0deb9..935abb4843 100644 --- a/.teamcity/components/generated/services.kt +++ b/.teamcity/components/generated/services.kt @@ -15,6 +15,7 @@ var services = mapOf( "invitations" to "Invitations", "policies" to "Policies", "serviceprincipals" to "Service Principals", + "synchronization" to "Synchronization", "userflows" to "User Flows", "users" to "Users" ) \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index db39136e75..0049d1ba2d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,18 +1,64 @@ -## 2.49.0 (Unreleased) +## 2.52.0 (June 13, 2024) + +BUG FIXES: + +* `azuread_application` - fix a bug that could prevent the `ignore_changes` lifecycle argument from working for the `app_role`, `oauth2_permission_scope`, `identifier_uris`, `optional_claims`, and `required_resource_access` properties ([#1403](https://github.com/hashicorp/terraform-provider-azuread/issues/1403)) +* `azuread_application` - add a workaround for an API bug when instantiating an application from template using the `template_id` property ([#1406](https://github.com/hashicorp/terraform-provider-azuread/issues/1406)) + +## 2.51.0 (June 06, 2024) + +ENHANCEMENTS: + +* `data.azuread_users` - support for the `mails` property ([#1400](https://github.com/hashicorp/terraform-provider-azuread/issues/1400)) + +BUG FIXES: + +* `azuread_access_package_assignment_policy` - fix a bug preventing removal of the `assignment_review_settings` block ([#1394](https://github.com/hashicorp/terraform-provider-azuread/issues/1394)) + +## 2.50.0 (May 16, 2024) + +ENHANCEMENTS: + +* dependencies: updating to `v0.68.0` of `github.com/manicminer/hamilton` ([#1382](https://github.com/hashicorp/terraform-provider-azuread/issues/1382)) +* `data.azuread_application` - support looking up applications with the `identifier_uri` property [GH 1303] +* `azuread_conditional_access_policy` - improve handling of the `session_controls` block ([#1382](https://github.com/hashicorp/terraform-provider-azuread/issues/1382)) + +BUG FIXES: + +* `data.azuread_service_principal` - treat the `display_name` property case-insensitively ([#1381](https://github.com/hashicorp/terraform-provider-azuread/issues/1381)) +* `azuread_conditional_access_policy` - fix a bug that could cause a persistent diff when setting certain properties in the `session_controls` block ([#1382](https://github.com/hashicorp/terraform-provider-azuread/issues/1382)) +* `azuread_user` - don't overwrite the existing password in state, when a password change fails ([#1308](https://github.com/hashicorp/terraform-provider-azuread/issues/1308)) + +## 2.49.1 (May 13, 2024) + +BUG FIXES: + +* `data.azuread_group_role_management_policy` - resolve a potential crash ([#1375](https://github.com/hashicorp/terraform-provider-azuread/issues/1375)) +* `azuread_group_role_management_policy` - resolve a number of potential crashes ([#1375](https://github.com/hashicorp/terraform-provider-azuread/issues/1375)) +* `azuread_privileged_access_group_assignment_schedule` - resolve a number of potential crashes ([#1375](https://github.com/hashicorp/terraform-provider-azuread/issues/1375)) +* `azuread_privileged_access_group_eligibility_schedule` - resolve a number of potential crashes ([#1375](https://github.com/hashicorp/terraform-provider-azuread/issues/1375)) + +## 2.49.0 (May 09, 2024) FEATURES: -* **New Data Source:** `azuread_group_role_management_policy` [GH-1327] -* **New Resource:** `azuread_group_role_management_policy` [GH-1327] -* **New Resource:** `azuread_privileged_access_group_assignment_schedule` [GH-1327] -* **New Resource:** `azuread_privileged_access_group_eligibility_schedule` [GH-1327] +* **New Data Source:** `azuread_group_role_management_policy` ([#1327](https://github.com/hashicorp/terraform-provider-azuread/issues/1327)) +* **New Resource:** `azuread_group_role_management_policy` ([#1327](https://github.com/hashicorp/terraform-provider-azuread/issues/1327)) +* **New Resource:** `azuread_privileged_access_group_assignment_schedule` ([#1327](https://github.com/hashicorp/terraform-provider-azuread/issues/1327)) +* **New Resource:** `azuread_privileged_access_group_eligibility_schedule` ([#1327](https://github.com/hashicorp/terraform-provider-azuread/issues/1327)) +* **New Resource:** `azuread_synchronization_job_provision_on_demand` ([#1032](https://github.com/hashicorp/terraform-provider-azuread/issues/1032)) ENHANCEMENTS: -* `data.azuread_group` - support for the `include_transitive_members` property [GH-1300] -* `azuread_application` - relax validation for the `identifier_uris` property to allow more values [GH-1351] -* `azuread_application_identifier_uri` - relax validation for the `identifier_uri` property to allow more values [GH-1351] -* `azuread_user` - relax validation for the `employee_type` property to allow more values [GH-1328] +* `data.azuread_group` - support for the `include_transitive_members` property ([#1300](https://github.com/hashicorp/terraform-provider-azuread/issues/1300)) +* `azuread_application` - relax validation for the `identifier_uris` property to allow more values ([#1351](https://github.com/hashicorp/terraform-provider-azuread/issues/1351)) +* `azuread_application_identifier_uri` - relax validation for the `identifier_uri` property to allow more values ([#1351](https://github.com/hashicorp/terraform-provider-azuread/issues/1351)) +* `azuread_group` - support the `SkipExchangeInstantOn` value for the `behaviors` property ([#1370](https://github.com/hashicorp/terraform-provider-azuread/issues/1370)) +* `azuread_user` - relax validation for the `employee_type` property to allow more values ([#1328](https://github.com/hashicorp/terraform-provider-azuread/issues/1328)) + +BUG FIXES: + +* `azuread_application_pre_authorized` - fix a destroy-time bug that could prevent deletion of the resource ([#1299](https://github.com/hashicorp/terraform-provider-azuread/issues/1299)) ## 2.48.0 (April 11, 2024) diff --git a/GNUmakefile b/GNUmakefile index c903bd3100..2ca33b4622 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -21,6 +21,10 @@ tools: build: fmtcheck go install +debug: fmtcheck + go build -gcflags="all=-N -l" -trimpath -o terraform-provider-azuread + dlv exec --listen=:51000 --headless=true --api-version=2 --accept-multiclient --continue terraform-provider-azuread -- -debug + fumpt: @echo "==> Fixing source code with gofmt..." # This logic should match the search logic in scripts/gofmtcheck.sh diff --git a/README.md b/README.md index 7028bdbf62..0b5b661602 100644 --- a/README.md +++ b/README.md @@ -102,6 +102,18 @@ $ $GOPATH/bin/terraform-provider-azuread ... ``` +To compile the provider for attached debugging run `make debug`. + +```sh +$ make debug +... +Provider started. To attach Terraform CLI, set the TF_REATTACH_PROVIDERS environment variable with the following: + TF_REATTACH_PROVIDERS='{"registry.terraform.io/hashicorp/azuread":{"Protocol":"grpc","ProtocolVersion":5,"Pid":16227,"Test":true,"Addr":{"Network":"unix","String":"/var/folders/dy/r91ps1bx7fscm_v64qbwd0nh0000gn/T/plugin1540622971"}}}' +``` + +See the [documentation](https://developer.hashicorp.com/terraform/plugin/debugging#starting-a-provider-in-debug-mode) for attaching a debugger. + + In order to test the provider, you can simply run `make test`. ```sh diff --git a/docs/data-sources/application.md b/docs/data-sources/application.md index 9a66375a1a..21868e99a1 100644 --- a/docs/data-sources/application.md +++ b/docs/data-sources/application.md @@ -33,8 +33,9 @@ The following arguments are supported: * `client_id` - (Optional) Specifies the Client ID of the application. * `display_name` - (Optional) Specifies the display name of the application. * `object_id` - (Optional) Specifies the Object ID of the application. +* `identifier_uri` - (Optional) Specifies any identifier URI of the application. See also the `identifier_uris` attribute which contains a list of all identifier URIs for the application. -~> One of `client_id`, `display_name`, or `object_id` must be specified. +~> One of `client_id`, `display_name`, `object_id`, or `identifier_uri` must be specified. ## Attributes Reference diff --git a/docs/data-sources/users.md b/docs/data-sources/users.md index c0974fe77b..0f6f83c774 100644 --- a/docs/data-sources/users.md +++ b/docs/data-sources/users.md @@ -29,11 +29,12 @@ The following arguments are supported: * `employee_ids` - (Optional) The employee identifiers assigned to the users by the organisation. * `ignore_missing` - (Optional) Ignore missing users and return users that were found. The data source will still fail if no users are found. Cannot be specified with `return_all`. Defaults to `false`. * `mail_nicknames` - (Optional) The email aliases of the users. +* `mails` - (Optional) The SMTP email addresses of the users. * `object_ids` - (Optional) The object IDs of the users. * `return_all` - (Optional) When `true`, the data source will return all users. Cannot be used with `ignore_missing`. Defaults to `false`. * `user_principal_names` - (Optional) The user principal names (UPNs) of the users. -~> Either `return_all`, or one of `user_principal_names`, `object_ids`, `mail_nicknames` or `employee_ids` must be specified. These _may_ be specified as an empty list, in which case no results will be returned. +~> Either `return_all`, or one of `user_principal_names`, `object_ids`, `mail_nicknames`, `mails`, or `employee_ids` must be specified. These _may_ be specified as an empty list, in which case no results will be returned. ## Attributes Reference @@ -41,6 +42,7 @@ The following attributes are exported: * `employee_ids` - The employee identifiers assigned to the users by the organisation. * `mail_nicknames` - The email aliases of the users. +* `mails` - The SMTP email addresses of the users. * `object_ids` - The object IDs of the users. * `user_principal_names` - The user principal names (UPNs) of the users. * `users` - A list of users. Each `user` object provides the attributes documented below. @@ -49,11 +51,11 @@ The following attributes are exported: `user` object exports the following: -* `account_enabled` - Whether or not the account is enabled. +* `account_enabled` - Whether the account is enabled. * `display_name` - The display name of the user. * `employee_id` - The employee identifier assigned to the user by the organisation. * `mail_nickname` - The email alias of the user. -* `mail` - The primary email address of the user. +* `mail` - The SMTP email address of the user. * `object_id` - The object ID of the user. * `onpremises_immutable_id` - The value used to associate an on-premises Active Directory user account with their Azure AD user object. * `onpremises_sam_account_name` - The on-premise SAM account name of the user. diff --git a/docs/resources/administrative_unit.md b/docs/resources/administrative_unit.md index afc0d02440..b11fadfde0 100644 --- a/docs/resources/administrative_unit.md +++ b/docs/resources/administrative_unit.md @@ -32,6 +32,8 @@ The following arguments are supported: * `display_name` - (Required) The display name of the administrative unit. * `members` - (Optional) A set of object IDs of members who should be present in this administrative unit. Supported object types are Users or Groups. +~> **Caution** When using the `members` property of the [azuread_administrative_unit](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/administrative_unit#members) resource, to manage Administrative Unit membership for a group, you will need to use an `ignore_changes = [administrative_unit_ids]` lifecycle meta argument for the `azuread_group` resource, in order to avoid a persistent diff. + !> **Warning** Do not use the `members` property at the same time as the [azuread_administrative_unit_member](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/administrative_unit_member) resource for the same administrative unit. Doing so will cause a conflict and administrative unit members will be removed. * `hidden_membership_enabled` - (Optional) Whether the administrative unit and its members are hidden or publicly viewable in the directory. diff --git a/docs/resources/administrative_unit_member.md b/docs/resources/administrative_unit_member.md index 64cfedc3c0..df3dd4e071 100644 --- a/docs/resources/administrative_unit_member.md +++ b/docs/resources/administrative_unit_member.md @@ -41,6 +41,8 @@ The following arguments are supported: * `administrative_unit_object_id` - (Required) The object ID of the administrative unit you want to add the member to. Changing this forces a new resource to be created. * `member_object_id` - (Required) The object ID of the user or group you want to add as a member of the administrative unit. Changing this forces a new resource to be created. +~> **Caution** When using the [azuread_administrative_unit_member](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/administrative_unit_member) resource to manage Administrative Unit membership for a group, you will need to use an `ignore_changes = [administrative_unit_ids]` lifecycle meta argument for the `azuread_group` resource, in order to avoid a persistent diff. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: diff --git a/docs/resources/group.md b/docs/resources/group.md index 718355b833..bae7bb102f 100644 --- a/docs/resources/group.md +++ b/docs/resources/group.md @@ -112,14 +112,14 @@ The following arguments are supported: * `administrative_unit_ids` - (Optional) The object IDs of administrative units in which the group is a member. If specified, new groups will be created in the scope of the first administrative unit and added to the others. If empty, new groups will be created at the tenant level. -!> **Warning** Do not use the `administrative_unit_ids` property at the same time as the [azuread_administrative_unit_member](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/administrative_unit_member) resource, or the `members` property of the [azuread_administrative_unit](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/administrative_unit#members) resource, _for the same group_. Doing so will cause a conflict and administrative unit members will be removed. +~> **Caution** When using the [azuread_administrative_unit_member](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/administrative_unit_member) resource, or the `members` property of the [azuread_administrative_unit](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/administrative_unit#members) resource, to manage Administrative Unit membership for a group, you will need to use an `ignore_changes = [administrative_unit_ids]` lifecycle meta argument for the `azuread_group` resource, in order to avoid a persistent diff. * `assignable_to_role` - (Optional) Indicates whether this group can be assigned to an Azure Active Directory role. Defaults to `false`. Can only be set to `true` for security-enabled groups. Changing this forces a new resource to be created. * `auto_subscribe_new_members` - (Optional) Indicates whether new members added to the group will be auto-subscribed to receive email notifications. Can only be set for Unified groups. ~> **Known Permissions Issue** The `auto_subscribe_new_members` property can only be set when authenticating as a Member user of the tenant and _not_ when authenticating as a Guest user or as a service principal. Please see the [Microsoft Graph Known Issues](https://docs.microsoft.com/en-us/graph/known-issues#groups) documentation. -* `behaviors` - (Optional) A set of behaviors for a Microsoft 365 group. Possible values are `AllowOnlyMembersToPost`, `HideGroupInOutlook`, `SubscribeMembersToCalendarEventsDisabled`, `SubscribeNewGroupMembers` and `WelcomeEmailDisabled`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for more details. Changing this forces a new resource to be created. +* `behaviors` - (Optional) A set of behaviors for a Microsoft 365 group. Possible values are `AllowOnlyMembersToPost`, `HideGroupInOutlook`, `SkipExchangeInstantOn`, `SubscribeMembersToCalendarEventsDisabled`, `SubscribeNewGroupMembers` and `WelcomeEmailDisabled`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for more details. Changing this forces a new resource to be created. * `description` - (Optional) The description for the group. * `display_name` - (Required) The display name for the group. * `dynamic_membership` - (Optional) A `dynamic_membership` block as documented below. Required when `types` contains `DynamicMembership`. Cannot be used with the `members` property. diff --git a/docs/resources/group_role_management_policy.md b/docs/resources/group_role_management_policy.md index ede936e93e..be238998b5 100644 --- a/docs/resources/group_role_management_policy.md +++ b/docs/resources/group_role_management_policy.md @@ -30,8 +30,8 @@ resource "azuread_user" "member" { } resource "azuread_group_role_management_policy" "example" { - group_id = azuread_group.example.id - assignment_type = "member" + group_id = azuread_group.example.id + role_id = "member" active_assignment_rules { expire_after = "P365D" @@ -70,12 +70,12 @@ resource "azuread_group_role_management_policy" "example" { An `activation_rules` block supports the following: - `approval_stage` - (Optional) An `approval_stage` block as defined below. -- `maximum_duration` - (Optional) The maximum length of time an activated role can be valid, in an IS)8601 Duration format (e.g. `PT8H`). Valid range is `PT30M` to `PT23H30M`, in 30 minute increments, or `PT1D`. +- `maximum_duration` - (Optional) The maximum length of time an activated role can be valid, in an ISO8601 Duration format (e.g. `PT8H`). Valid range is `PT30M` to `PT23H30M`, in 30 minute increments, or `PT1D`. - `require_approval` - (Optional) Is approval required for activation. If `true` an `approval_stage` block must be provided. - `require_justification` - (Optional) Is a justification required during activation of the role. - `require_multifactor_authentication` - (Optional) Is multi-factor authentication required to activate the role. Conflicts with `required_conditional_access_authentication_context`. - `require_ticket_info` - (Optional) Is ticket information requrired during activation of the role. -- `required_conditional_access_authentication_context` - (Optional) The Entra ID Conditional Access context that must be present for activation. Conflicts with `require_multifactor_authentication`. +- `required_conditional_access_authentication_context` - (Optional) The Entra ID Conditional Access context that must be present for activation (e.g `c1`). Conflicts with `require_multifactor_authentication`. --- diff --git a/docs/resources/synchronization_job_provision_on_demand.md b/docs/resources/synchronization_job_provision_on_demand.md new file mode 100644 index 0000000000..828bbc1ac0 --- /dev/null +++ b/docs/resources/synchronization_job_provision_on_demand.md @@ -0,0 +1,110 @@ +--- +subcategory: "Synchronization" +--- + +# Resource: azuread_synchronization_job_provision_on_demand + +Manages synchronization job on demand provisioning associated with a service principal (enterprise application) within Azure Active Directory. + +## API Permissions + +The following API permissions are required in order to use this resource. + +When authenticated with a service principal, this resource requires one of the following application roles: `Synchronization.ReadWrite.All` + +## Example Usage + +*Basic example* + +```terraform +data "azuread_client_config" "current" {} + +resource "azuread_group" "example" { + display_name = "example" + owners = [data.azuread_client_config.current.object_id] + security_enabled = true +} + +data "azuread_application_template" "example" { + display_name = "Azure Databricks SCIM Provisioning Connector" +} + +resource "azuread_application" "example" { + display_name = "example" + template_id = data.azuread_application_template.example.template_id + feature_tags { + enterprise = true + gallery = true + } +} + +resource "azuread_service_principal" "example" { + client_id = azuread_application.example.client_id + use_existing = true +} + +resource "azuread_synchronization_secret" "example" { + service_principal_id = azuread_service_principal.example.id + + credential { + key = "BaseAddress" + value = "https://adb-example.azuredatabricks.net/api/2.0/preview/scim" + } + credential { + key = "SecretToken" + value = "some-token" + } +} + +resource "azuread_synchronization_job" "example" { + service_principal_id = azuread_service_principal.example.id + template_id = "dataBricks" + enabled = true +} + +resource "azuread_synchronization_job_provision_on_demand" "example" { + service_principal_id = azuread_service_principal.example.id + synchronization_job_id = azuread_synchronization_job.example.id + parameter { + # see specific synchronization schema for rule id https://learn.microsoft.com/en-us/graph/api/synchronization-synchronizationschema-get?view=graph-rest-beta + rule_id = "" + subject { + object_id = azuread_group.example.object_id + object_type_name = "Group" + } + } +} + +``` + +## Argument Reference + +The following arguments are supported: + + +- `synchronization_job_id` (Required) Identifier of the synchronization template this job is based on. +- `parameter` (Required) One or more `parameter` blocks as documented below. +- `service_principal_id` (Required) The object ID of the service principal for the synchronization job. +- `triggers` (Optional) Map of arbitrary keys and values that, when changed, will trigger a re-invocation. To force a re-invocation without changing these keys/values, use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html). + +--- + +`parameter` block supports the following: + +* `rule_id` (Required) The identifier of the synchronization rule to be applied. This rule ID is defined in the schema for a given synchronization job or template. +* `subject` (Required) One or more `subject` blocks as documented below. + +--- + +`subject` block supports the following: + +* `object_id` (String) The identifier of an object to which a synchronization job is to be applied. Can be one of the following: (1) An onPremisesDistinguishedName for synchronization from Active Directory to Azure AD. (2) The user ID for synchronization from Azure AD to a third-party. (3) The Worker ID of the Workday worker for synchronization from Workday to either Active Directory or Azure AD. +* `object_type_name` (String) The type of the object to which a synchronization job is to be applied. Can be one of the following: `user` for synchronizing between Active Directory and Azure AD, `User` for synchronizing a user between Azure AD and a third-party application, `Worker` for synchronization a user between Workday and either Active Directory or Azure AD, `Group` for synchronizing a group between Azure AD and a third-party application. + +## Attributes Reference + +No additional attributes are exported. + +## Import + +This resource does not support importing. diff --git a/go.mod b/go.mod index d3f8109008..f87f4a167f 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 github.com/hashicorp/terraform-plugin-testing v1.5.1 - github.com/manicminer/hamilton v0.67.0 + github.com/manicminer/hamilton v0.70.0 golang.org/x/text v0.14.0 ) @@ -63,3 +63,5 @@ require ( ) go 1.21.3 + +replace github.com/manicminer/hamilton => github.com/MarkDordoy/hamilton v0.17.1-0.20240611151114-899c6ce169f6 diff --git a/go.sum b/go.sum index c9861012b7..bde093446b 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/MarkDordoy/hamilton v0.17.1-0.20240611151114-899c6ce169f6 h1:yRxIRrSebI7v7BspqjteIZKrnNQDWhRA68NVeU8cTmI= +github.com/MarkDordoy/hamilton v0.17.1-0.20240611151114-899c6ce169f6/go.mod h1:u80g9rPtJpCG7EC0iayttt8UfeAp6jknClixgZGE950= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= @@ -111,8 +113,6 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/manicminer/hamilton v0.67.0 h1:hG3tPunQCGcgP2Nx0+lwW+Swu9MXOs4JGospakK79pY= -github.com/manicminer/hamilton v0.67.0/go.mod h1:u80g9rPtJpCG7EC0iayttt8UfeAp6jknClixgZGE950= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= diff --git a/internal/provider/helpers.go b/internal/provider/helpers.go index e0408ef898..04445848a8 100644 --- a/internal/provider/helpers.go +++ b/internal/provider/helpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package provider import ( diff --git a/internal/services/administrativeunits/administrative_unit_member_resource_test.go b/internal/services/administrativeunits/administrative_unit_member_resource_test.go index 67a7653c5f..a543198be6 100644 --- a/internal/services/administrativeunits/administrative_unit_member_resource_test.go +++ b/internal/services/administrativeunits/administrative_unit_member_resource_test.go @@ -162,6 +162,9 @@ func (r AdministrativeUnitMemberResource) group(data acceptance.TestData) string resource "azuread_group" "member" { display_name = "acctest-AdministrativeUnitMember-%[2]d" security_enabled = true + lifecycle { + ignore_changes = [administrative_unit_ids] + } } resource "azuread_administrative_unit_member" "test" { diff --git a/internal/services/administrativeunits/administrative_unit_resource_test.go b/internal/services/administrativeunits/administrative_unit_resource_test.go index d1cdb0fece..f3500c5a98 100644 --- a/internal/services/administrativeunits/administrative_unit_resource_test.go +++ b/internal/services/administrativeunits/administrative_unit_resource_test.go @@ -140,6 +140,9 @@ data "azuread_domains" "test" { resource "azuread_group" "member" { display_name = "acctest-AdministrativeUnitMember-%[1]d" security_enabled = true + lifecycle { + ignore_changes = [administrative_unit_ids] + } } resource "azuread_user" "memberA" { diff --git a/internal/services/applications/application_data_source.go b/internal/services/applications/application_data_source.go index 16fbdce9ca..be245e3510 100644 --- a/internal/services/applications/application_data_source.go +++ b/internal/services/applications/application_data_source.go @@ -34,7 +34,7 @@ func applicationDataSource() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, Computed: true, - ExactlyOneOf: []string{"application_id", "client_id", "display_name", "object_id"}, + ExactlyOneOf: []string{"application_id", "client_id", "display_name", "object_id", "identifier_uri"}, ValidateDiagFunc: validation.ValidateDiag(validation.IsUUID), }, @@ -43,7 +43,7 @@ func applicationDataSource() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, Computed: true, - ExactlyOneOf: []string{"application_id", "client_id", "display_name", "object_id"}, + ExactlyOneOf: []string{"application_id", "client_id", "display_name", "object_id", "identifier_uri"}, ValidateDiagFunc: validation.ValidateDiag(validation.IsUUID), Deprecated: "The `application_id` property has been replaced with the `client_id` property and will be removed in version 3.0 of the AzureAD provider", }, @@ -53,10 +53,19 @@ func applicationDataSource() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, Computed: true, - ExactlyOneOf: []string{"application_id", "client_id", "display_name", "object_id"}, + ExactlyOneOf: []string{"application_id", "client_id", "display_name", "object_id", "identifier_uri"}, ValidateDiagFunc: validation.ValidateDiag(validation.IsUUID), }, + "identifier_uri": { + Description: "One of the application's identifier URIs", + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ExactlyOneOf: []string{"application_id", "client_id", "display_name", "object_id", "identifier_uri"}, + ValidateDiagFunc: validation.ValidateDiag(validation.StringIsNotEmpty), + }, + "disabled_by_microsoft": { Description: "Whether Microsoft has disabled the registered application", Type: pluginsdk.TypeString, @@ -68,7 +77,7 @@ func applicationDataSource() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, Computed: true, - ExactlyOneOf: []string{"application_id", "client_id", "display_name", "object_id"}, + ExactlyOneOf: []string{"application_id", "client_id", "display_name", "object_id", "identifier_uri"}, ValidateDiagFunc: validation.ValidateDiag(validation.StringIsNotEmpty), }, @@ -525,6 +534,7 @@ func applicationDataSourceRead(ctx context.Context, d *pluginsdk.ResourceData, m } } else { var fieldName, fieldValue string + filterOp := "%s eq '%s'" if applicationId, ok := d.Get("application_id").(string); ok && applicationId != "" { fieldName = "appId" fieldValue = applicationId @@ -534,11 +544,15 @@ func applicationDataSourceRead(ctx context.Context, d *pluginsdk.ResourceData, m } else if displayName, ok := d.Get("display_name").(string); ok && displayName != "" { fieldName = "displayName" fieldValue = displayName + } else if identifierUri, ok := d.Get("identifier_uri").(string); ok { + fieldName = "IdentifierUris" + fieldValue = identifierUri + filterOp = "%s/any(uri:uri eq '%s')" } else { - return tf.ErrorDiagF(nil, "One of `object_id`, `application_id`, `client_id`, or `displayName` must be specified") + return tf.ErrorDiagF(nil, "One of `object_id`, `application_id`, `client_id`, `displayName`, or `identifier_uri` must be specified") } - filter := fmt.Sprintf("%s eq '%s'", fieldName, fieldValue) + filter := fmt.Sprintf(filterOp, fieldName, fieldValue) result, _, err := client.List(ctx, odata.Query{Filter: filter}) if err != nil { diff --git a/internal/services/applications/application_data_source_test.go b/internal/services/applications/application_data_source_test.go index f0ab09f400..a7832c458c 100644 --- a/internal/services/applications/application_data_source_test.go +++ b/internal/services/applications/application_data_source_test.go @@ -61,6 +61,18 @@ func TestAccApplicationDataSource_byDisplayName(t *testing.T) { }) } +func TestAccApplicationDataSource_byIdentifierUri(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azuread_application", "test") + r := ApplicationDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.identifierUri(data), + Check: r.testCheck(data), + }, + }) +} + func (ApplicationDataSource) testCheck(data acceptance.TestData) acceptance.TestCheckFunc { return acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("application_id").IsUuid(), @@ -130,3 +142,13 @@ data "azuread_application" "test" { } `, ApplicationResource{}.complete(data)) } + +func (ApplicationDataSource) identifierUri(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +data "azuread_application" "test" { + identifier_uri = tolist(azuread_application.test.identifier_uris)[0] +} +`, ApplicationResource{}.complete(data)) +} diff --git a/internal/services/applications/application_pre_authorized_resource.go b/internal/services/applications/application_pre_authorized_resource.go index 4bcae42095..fcbe31c6d0 100644 --- a/internal/services/applications/application_pre_authorized_resource.go +++ b/internal/services/applications/application_pre_authorized_resource.go @@ -304,6 +304,9 @@ func applicationPreAuthorizedResourceDelete(ctx context.Context, d *pluginsdk.Re return tf.ErrorDiagPathF(err, "id", "Parsing pre-authorized application ID %q", d.Id()) } + tf.LockByName(applicationResourceName, id.ObjectId) + defer tf.UnlockByName(applicationResourceName, id.ObjectId) + app, status, err := client.Get(ctx, id.ObjectId, odata.Query{}) if err != nil { if status == http.StatusNotFound { diff --git a/internal/services/applications/application_pre_authorized_resource_test.go b/internal/services/applications/application_pre_authorized_resource_test.go index 3985cc6330..78f17c30f2 100644 --- a/internal/services/applications/application_pre_authorized_resource_test.go +++ b/internal/services/applications/application_pre_authorized_resource_test.go @@ -89,6 +89,33 @@ func TestAccApplicationPreAuthorized_deprecatedId2(t *testing.T) { }) } +func TestAccApplicationPreAuthorized_multipleCreateDestroy(t *testing.T) { + data := acceptance.BuildTestData(t, "azuread_application_pre_authorized", "authorize_1") + data2 := acceptance.BuildTestData(t, "azuread_application", "authorizer") + r := ApplicationPreAuthorizedResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.multiple(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.multipleDestroy(data2), + }, + { + // This step should catch any failed destroys from the previous step by throwing an ImportAsExists error + Config: r.multiple(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (ApplicationPreAuthorizedResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { client := clients.Applications.ApplicationsClientBeta client.BaseClient.DisableRetries = true @@ -249,3 +276,83 @@ resource "azuread_application_pre_authorized" "test" { } `, data.RandomInteger, data.UUID(), data.UUID()) } + +func (ApplicationPreAuthorizedResource) multiple(data acceptance.TestData) string { + return fmt.Sprintf(` +resource "azuread_application" "authorized_1" { + display_name = "acctestApp-authorized-1-%[1]d" +} + +resource "azuread_application" "authorized_2" { + display_name = "acctestApp-authorized-2-%[1]d" +} + +resource "azuread_application" "authorized_3" { + display_name = "acctestApp-authorized-3-%[1]d" +} + +resource "azuread_application" "authorizer" { + display_name = "acctestApp-authorizer-%[1]d" + + api { + oauth2_permission_scope { + admin_consent_description = "Administer the application" + admin_consent_display_name = "Administer" + enabled = true + id = "11111111-1111-1111-1111-111111111111" + type = "Admin" + value = "administer" + } + } +} + +resource "azuread_application_pre_authorized" "authorize_1" { + application_id = azuread_application.authorizer.id + authorized_client_id = azuread_application.authorized_1.client_id + permission_ids = ["11111111-1111-1111-1111-111111111111"] +} + +resource "azuread_application_pre_authorized" "authorize_2" { + application_id = azuread_application.authorizer.id + authorized_client_id = azuread_application.authorized_2.client_id + permission_ids = ["11111111-1111-1111-1111-111111111111"] +} + +resource "azuread_application_pre_authorized" "authorize_3" { + application_id = azuread_application.authorizer.id + authorized_client_id = azuread_application.authorized_3.client_id + permission_ids = ["11111111-1111-1111-1111-111111111111"] +} +`, data.RandomInteger) +} + +func (ApplicationPreAuthorizedResource) multipleDestroy(data acceptance.TestData) string { + return fmt.Sprintf(` +resource "azuread_application" "authorized_1" { + display_name = "acctestApp-authorized-1-%[1]d" +} + +resource "azuread_application" "authorized_2" { + display_name = "acctestApp-authorized-2-%[1]d" +} + +resource "azuread_application" "authorized_3" { + display_name = "acctestApp-authorized-3-%[1]d" +} + +resource "azuread_application" "authorizer" { + display_name = "acctestApp-authorizer-%[1]d" + + api { + oauth2_permission_scope { + admin_consent_description = "Administer the application" + admin_consent_display_name = "Administer" + enabled = true + id = "11111111-1111-1111-1111-111111111111" + type = "Admin" + value = "administer" + } + } +} +`, data.RandomInteger) +} diff --git a/internal/services/applications/application_redirect_uris_resource.go b/internal/services/applications/application_redirect_uris_resource.go index 215e4809d8..4d1a79f11a 100644 --- a/internal/services/applications/application_redirect_uris_resource.go +++ b/internal/services/applications/application_redirect_uris_resource.go @@ -199,7 +199,7 @@ func (r ApplicationRedirectUrisResource) Update() sdk.ResourceFunc { applicationId := parse.NewApplicationID(id.ApplicationId) var model ApplicationRedirectUrisModel - if err := metadata.Decode(&model); err != nil { + if err = metadata.Decode(&model); err != nil { return fmt.Errorf("decoding: %+v", err) } diff --git a/internal/services/applications/application_registration_resource.go b/internal/services/applications/application_registration_resource.go index 947bdc4fde..e2e5e12a75 100644 --- a/internal/services/applications/application_registration_resource.go +++ b/internal/services/applications/application_registration_resource.go @@ -353,6 +353,9 @@ func (r ApplicationRegistrationResource) Update() sdk.ResourceFunc { return fmt.Errorf("decoding: %+v", err) } + tf.LockByName(applicationResourceName, id.ApplicationId) + defer tf.UnlockByName(applicationResourceName, id.ApplicationId) + properties := msgraph.Application{ DirectoryObject: msgraph.DirectoryObject{ Id: &id.ApplicationId, diff --git a/internal/services/applications/application_resource.go b/internal/services/applications/application_resource.go index 166093d88d..74537dbfeb 100644 --- a/internal/services/applications/application_resource.go +++ b/internal/services/applications/application_resource.go @@ -954,15 +954,97 @@ func applicationResourceCreate(ctx context.Context, d *pluginsdk.ResourceData, m } if templateId != "" { + // Validate the template exists + if _, status, err := appTemplatesClient.Get(ctx, templateId, odata.Query{}); err != nil { + if status == http.StatusNotFound { + return tf.ErrorDiagPathF(err, "template_id", "Could not find application template with ID %q", templateId) + } + return tf.ErrorDiagF(err, "Could not retrieve application template with ID %q", templateId) + } + + // Generate a temporary display name to assert uniqueness when handling buggy 404 when instantiating + uuid, err := uuid.GenerateUUID() + if err != nil { + return tf.ErrorDiagF(err, "Failed to generate a UUID") + } + tempDisplayName := fmt.Sprintf("TERRAFORM_INSTANTIATE_%s", uuid) + // Instantiate application from template gallery and return via the update function properties := msgraph.ApplicationTemplate{ ID: pointer.To(templateId), - DisplayName: pointer.To(displayName), + DisplayName: pointer.To(tempDisplayName), } - result, _, err := appTemplatesClient.Instantiate(ctx, properties) + // When the /instantiate operation returns 404, it has probably created the application anyway. There is no way to tell this + // other than polling for the application object which is created out-of-band, so we create it with a quasi-unique temporary + // displayName and then poll for it. + result, status, err := appTemplatesClient.Instantiate(ctx, properties) if err != nil { - return tf.ErrorDiagF(err, "Could not instantiate application from template") + if status != http.StatusNotFound { + return tf.ErrorDiagF(err, "Could not instantiate application from template") + } + + deadline, ok := ctx.Deadline() + if !ok { + return tf.ErrorDiagF(errors.New("context has no deadline"), "internal-error: context has no deadline") + } + + // Since the API response can't be trusted, we'll have to take on responsibility for ensuring + // the application object and service principal objects were created as expected. + pollingResult, err := (&pluginsdk.StateChangeConf{ //nolint:staticcheck + Pending: []string{"Waiting"}, + Target: []string{"Found"}, + Timeout: time.Until(deadline), + MinTimeout: 5 * time.Second, + Refresh: func() (interface{}, string, error) { + // List applications with matching applicationTemplateId and displayName (using the temporary display name we generated above) + filter := fmt.Sprintf("applicationTemplateId eq '%s' and displayName eq '%s'", odata.EscapeSingleQuote(templateId), odata.EscapeSingleQuote(tempDisplayName)) + applicationsResult, _, err := client.List(ctx, odata.Query{Filter: filter}) + if err != nil { + return nil, "Error", err + } + if applicationsResult == nil { + return nil, "Waiting", nil + } + for _, application := range *applicationsResult { + if id := application.ID(); id != nil && application.AppId != nil && application.ApplicationTemplateId != nil && *application.ApplicationTemplateId == templateId && application.DisplayName != nil && *application.DisplayName == tempDisplayName { + // We should ensure the service principal was also created + servicePrincipalsClient := meta.(*clients.Client).Applications.ServicePrincipalsClient + + // List service principals for the created application + servicePrincipalsFilter := fmt.Sprintf("appId eq '%s'", odata.EscapeSingleQuote(*application.AppId)) + servicePrincipalsResult, _, err := servicePrincipalsClient.List(ctx, odata.Query{Filter: servicePrincipalsFilter}) + if err != nil { + return nil, "Error", err + } + if servicePrincipalsResult == nil { + return nil, "Waiting", nil + } + for _, servicePrincipal := range *servicePrincipalsResult { + // Validate the appId and applicationTemplateId match the application + if servicePrincipalId := servicePrincipal.ID(); servicePrincipalId != nil && servicePrincipal.AppId != nil && *servicePrincipal.AppId == *application.AppId && servicePrincipal.ApplicationTemplateId != nil && *servicePrincipal.ApplicationTemplateId == templateId { + return msgraph.ApplicationTemplate{ + Application: &application, + ServicePrincipal: &servicePrincipal, + }, "Found", nil + } + } + } + } + return nil, "Waiting", nil + }, + }).WaitForStateContext(ctx) + + if err != nil { + return tf.ErrorDiagF(err, "Could not instantiate application from template") + } + if pollingResult == nil { + return tf.ErrorDiagF(errors.New("attempted to poll for application and service principal but they were not found"), "Could not instantiate application from template") + } + + if template, ok := pollingResult.(msgraph.ApplicationTemplate); ok { + result = &template + } } if result.Application == nil { @@ -976,7 +1058,8 @@ func applicationResourceCreate(ctx context.Context, d *pluginsdk.ResourceData, m id := parse.NewApplicationID(*result.Application.ID()) d.SetId(id.ID()) - // The application was created out of band, so we'll update it just as if it was imported + // The application was created out of band, so we'll update it just as if it was imported. This will also + // set the correct displayName for the application. return applicationResourceUpdate(ctx, d, meta) } @@ -1106,7 +1189,7 @@ func applicationResourceCreate(ctx context.Context, d *pluginsdk.ResourceData, m // See https://github.com/hashicorp/terraform-provider-azuread/issues/914 if acceptMappedClaims != nil { api.AcceptMappedClaims = acceptMappedClaims - if _, err := client.Update(ctx, msgraph.Application{ + if _, err = client.Update(ctx, msgraph.Application{ DirectoryObject: msgraph.DirectoryObject{ Id: app.Id, }, @@ -1119,7 +1202,7 @@ func applicationResourceCreate(ctx context.Context, d *pluginsdk.ResourceData, m if len(ownersExtra) > 0 { // Add any remaining owners after the application is created app.Owners = &ownersExtra - if _, err := client.AddOwners(ctx, app); err != nil { + if _, err = client.AddOwners(ctx, app); err != nil { return tf.ErrorDiagF(err, "Could not add owners to application with object ID: %q", id.ApplicationId) } } @@ -1133,7 +1216,7 @@ func applicationResourceCreate(ctx context.Context, d *pluginsdk.ResourceData, m // Upload the application image if imageContentType != "" && len(imageData) > 0 { - _, err := client.UploadLogo(ctx, id.ApplicationId, imageContentType, imageData) + _, err = client.UploadLogo(ctx, id.ApplicationId, imageContentType, imageData) if err != nil { return tf.ErrorDiagF(err, "Could not upload logo image for application with object ID: %q", id.ApplicationId) } @@ -1151,6 +1234,9 @@ func applicationResourceUpdate(ctx context.Context, d *pluginsdk.ResourceData, m return tf.ErrorDiagPathF(err, "id", "Parsing ID") } + tf.LockByName(applicationResourceName, id.ApplicationId) + defer tf.UnlockByName(applicationResourceName, id.ApplicationId) + displayName := d.Get("display_name").(string) // Perform this check at apply time to catch any duplicate names created during the same apply @@ -1175,7 +1261,6 @@ func applicationResourceUpdate(ctx context.Context, d *pluginsdk.ResourceData, m var imageContentType string var imageData []byte if v, ok := d.GetOk("logo_image"); ok && v != "" && d.HasChange("logo_image") { - var err error imageContentType, imageData, err = applicationParseLogoImage(v.(string)) if err != nil { return tf.ErrorDiagPathF(err, "image", "Could not decode image data") @@ -1193,12 +1278,9 @@ func applicationResourceUpdate(ctx context.Context, d *pluginsdk.ResourceData, m DirectoryObject: msgraph.DirectoryObject{ Id: pointer.To(id.ApplicationId), }, - Api: expandApplicationApi(d.Get("api").([]interface{})), - AppRoles: expandApplicationAppRoles(d.Get("app_role").(*pluginsdk.Set).List()), Description: tf.NullableString(d.Get("description").(string)), DisplayName: pointer.To(displayName), GroupMembershipClaims: expandApplicationGroupMembershipClaims(d.Get("group_membership_claims").(*pluginsdk.Set).List()), - IdentifierUris: tf.ExpandStringSlicePtr(d.Get("identifier_uris").(*pluginsdk.Set).List()), Info: &msgraph.InformationalUrl{ MarketingUrl: tf.NullableString(d.Get("marketing_url").(string)), PrivacyStatementUrl: tf.NullableString(d.Get("privacy_statement_url").(string)), @@ -1209,9 +1291,7 @@ func applicationResourceUpdate(ctx context.Context, d *pluginsdk.ResourceData, m IsFallbackPublicClient: pointer.To(d.Get("fallback_public_client_enabled").(bool)), Notes: tf.NullableString(d.Get("notes").(string)), Oauth2RequirePostResponse: pointer.To(d.Get("oauth2_post_response_required").(bool)), - OptionalClaims: expandApplicationOptionalClaims(d.Get("optional_claims").([]interface{})), PublicClient: expandApplicationPublicClient(d.Get("public_client").([]interface{})), - RequiredResourceAccess: expandApplicationRequiredResourceAccess(d.Get("required_resource_access").(*pluginsdk.Set).List()), ServiceManagementReference: tf.NullableString(d.Get("service_management_reference").(string)), SignInAudience: pointer.To(d.Get("sign_in_audience").(string)), Spa: expandApplicationSpa(d.Get("single_page_application").([]interface{})), @@ -1219,15 +1299,39 @@ func applicationResourceUpdate(ctx context.Context, d *pluginsdk.ResourceData, m Web: expandApplicationWeb(d.Get("web").([]interface{})), } - if err := applicationDisableAppRoles(ctx, client, &properties, expandApplicationAppRoles(d.Get("app_role").(*pluginsdk.Set).List())); err != nil { - return tf.ErrorDiagPathF(err, "app_role", "Could not disable App Roles for application with object ID %q", id.ApplicationId) + api := expandApplicationApi(d.Get("api").([]interface{})) + + if d.HasChange("app_role") { + if err = applicationDisableAppRoles(ctx, client, &properties, expandApplicationAppRoles(d.Get("app_role").(*pluginsdk.Set).List())); err != nil { + return tf.ErrorDiagPathF(err, "app_role", "Could not disable App Roles for application with object ID %q", id.ApplicationId) + } + + properties.AppRoles = expandApplicationAppRoles(d.Get("app_role").(*pluginsdk.Set).List()) + } + + if d.HasChange("api.0.oauth2_permission_scope") { + if err = applicationDisableOauth2PermissionScopes(ctx, client, &properties, expandApplicationOAuth2PermissionScope(d.Get("api.0.oauth2_permission_scope").(*pluginsdk.Set).List())); err != nil { + return tf.ErrorDiagPathF(err, "api.0.oauth2_permission_scope", "Could not disable OAuth2 Permission Scopes for application with object ID %q", id.ApplicationId) + } + } else { + api.OAuth2PermissionScopes = nil + } + + if d.HasChange("identifier_uris") { + properties.IdentifierUris = tf.ExpandStringSlicePtr(d.Get("identifier_uris").(*pluginsdk.Set).List()) + } + + if d.HasChange("optional_claims") { + properties.OptionalClaims = expandApplicationOptionalClaims(d.Get("optional_claims").([]interface{})) } - if err := applicationDisableOauth2PermissionScopes(ctx, client, &properties, expandApplicationOAuth2PermissionScope(d.Get("api.0.oauth2_permission_scope").(*pluginsdk.Set).List())); err != nil { - return tf.ErrorDiagPathF(err, "api.0.oauth2_permission_scope", "Could not disable OAuth2 Permission Scopes for application with object ID %q", id.ApplicationId) + if d.HasChange("required_resource_access") { + properties.RequiredResourceAccess = expandApplicationRequiredResourceAccess(d.Get("required_resource_access").(*pluginsdk.Set).List()) } - if _, err := client.Update(ctx, properties); err != nil { + properties.Api = api + + if _, err = client.Update(ctx, properties); err != nil { return tf.ErrorDiagF(err, "Could not update application with object ID: %q", id.ApplicationId) } @@ -1253,7 +1357,7 @@ func applicationResourceUpdate(ctx context.Context, d *pluginsdk.ResourceData, m } properties.Owners = &newOwners - if _, err := client.AddOwners(ctx, &properties); err != nil { + if _, err = client.AddOwners(ctx, &properties); err != nil { return tf.ErrorDiagF(err, "Could not add owners to application with object ID: %q", id.ApplicationId) } } @@ -1267,8 +1371,7 @@ func applicationResourceUpdate(ctx context.Context, d *pluginsdk.ResourceData, m // Upload the application image if imageContentType != "" && len(imageData) > 0 { - _, err := client.UploadLogo(ctx, id.ApplicationId, imageContentType, imageData) - if err != nil { + if _, err = client.UploadLogo(ctx, id.ApplicationId, imageContentType, imageData); err != nil { return tf.ErrorDiagF(err, "Could not upload logo image for application with object ID: %q", id.ApplicationId) } } diff --git a/internal/services/applications/application_resource_test.go b/internal/services/applications/application_resource_test.go index 3e8b9b21a6..2c20290aee 100644 --- a/internal/services/applications/application_resource_test.go +++ b/internal/services/applications/application_resource_test.go @@ -635,8 +635,31 @@ resource "azuread_application" "test" { display_name = "acctest-APP-%[1]d" owners = [data.azuread_client_config.test.object_id] template_id = "%[2]s" + + api { + oauth2_permission_scope { + admin_consent_description = "Allow the application to access acctest-APP-%[1]d on behalf of the signed-in user." + admin_consent_display_name = "Access acctest-APP-%[1]d" + enabled = true + id = "%[3]s" + type = "User" + user_consent_description = "Allow the application to access acctest-APP-%[1]d on your behalf." + user_consent_display_name = "Access acctest-APP-%[1]d" + value = "user_impersonation" + } + } + + app_role { + allowed_member_types = [ + "User", + ] + description = "msiam_access" + display_name = "msiam_access" + enabled = true + id = "dfd0e7dd-26fb-4b2c-98d2-e444486c1e37" + } } -`, data.RandomInteger, testApplicationTemplateId) +`, data.RandomInteger, testApplicationTemplateId, data.UUID()) } func (ApplicationResource) withGroupMembershipClaims(data acceptance.TestData) string { diff --git a/internal/services/applications/client/client.go b/internal/services/applications/client/client.go index b6bcdd9a77..a99d73ee07 100644 --- a/internal/services/applications/client/client.go +++ b/internal/services/applications/client/client.go @@ -13,6 +13,7 @@ type Client struct { ApplicationsClientBeta *msgraph.ApplicationsClient ApplicationTemplatesClient *msgraph.ApplicationTemplatesClient DirectoryObjectsClient *msgraph.DirectoryObjectsClient + ServicePrincipalsClient *msgraph.ServicePrincipalsClient } func NewClient(o *common.ClientOptions) *Client { @@ -31,10 +32,14 @@ func NewClient(o *common.ClientOptions) *Client { directoryObjectsClient := msgraph.NewDirectoryObjectsClient() o.ConfigureClient(&directoryObjectsClient.BaseClient) + servicePrincipalsClient := msgraph.NewServicePrincipalsClient() + o.ConfigureClient(&servicePrincipalsClient.BaseClient) + return &Client{ ApplicationsClient: applicationsClient, ApplicationsClientBeta: applicationsClientBeta, ApplicationTemplatesClient: applicationTemplatesClient, DirectoryObjectsClient: directoryObjectsClient, + ServicePrincipalsClient: servicePrincipalsClient, } } diff --git a/internal/services/applications/parse/api_access.go b/internal/services/applications/parse/api_access.go index 472d128807..a7a19eb20f 100644 --- a/internal/services/applications/parse/api_access.go +++ b/internal/services/applications/parse/api_access.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/app_role.go b/internal/services/applications/parse/app_role.go index 1102061e56..c5be8c020c 100644 --- a/internal/services/applications/parse/app_role.go +++ b/internal/services/applications/parse/app_role.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/application.go b/internal/services/applications/parse/application.go index 783fb09f9f..a62fedadce 100644 --- a/internal/services/applications/parse/application.go +++ b/internal/services/applications/parse/application.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/fallback_public_client.go b/internal/services/applications/parse/fallback_public_client.go index 7b999739a3..d1258d1181 100644 --- a/internal/services/applications/parse/fallback_public_client.go +++ b/internal/services/applications/parse/fallback_public_client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/from_template.go b/internal/services/applications/parse/from_template.go index a18b558d2b..4b0047e6ce 100644 --- a/internal/services/applications/parse/from_template.go +++ b/internal/services/applications/parse/from_template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/identifier_uri.go b/internal/services/applications/parse/identifier_uri.go index 3ad269fc20..1f2568d5b6 100644 --- a/internal/services/applications/parse/identifier_uri.go +++ b/internal/services/applications/parse/identifier_uri.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/known_clients.go b/internal/services/applications/parse/known_clients.go index c360567982..9619dff74c 100644 --- a/internal/services/applications/parse/known_clients.go +++ b/internal/services/applications/parse/known_clients.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/optional_claims.go b/internal/services/applications/parse/optional_claims.go index 2bc7dffe2e..0d29ebe8d7 100644 --- a/internal/services/applications/parse/optional_claims.go +++ b/internal/services/applications/parse/optional_claims.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/owner.go b/internal/services/applications/parse/owner.go index 883b48f2c2..e29579fc3c 100644 --- a/internal/services/applications/parse/owner.go +++ b/internal/services/applications/parse/owner.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/permission_scope.go b/internal/services/applications/parse/permission_scope.go index 828d71b51b..dae95849ec 100644 --- a/internal/services/applications/parse/permission_scope.go +++ b/internal/services/applications/parse/permission_scope.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/redirect_uris.go b/internal/services/applications/parse/redirect_uris.go index c7fcaf00ab..b738951354 100644 --- a/internal/services/applications/parse/redirect_uris.go +++ b/internal/services/applications/parse/redirect_uris.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/applications/parse/service_principal.go b/internal/services/applications/parse/service_principal.go index a997773a96..d78517d817 100644 --- a/internal/services/applications/parse/service_principal.go +++ b/internal/services/applications/parse/service_principal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/conditionalaccess/conditional_access_policy_resource.go b/internal/services/conditionalaccess/conditional_access_policy_resource.go index e4c8d9d7f8..b374ed8a3c 100644 --- a/internal/services/conditionalaccess/conditional_access_policy_resource.go +++ b/internal/services/conditionalaccess/conditional_access_policy_resource.go @@ -77,7 +77,7 @@ func conditionalAccessPolicyResource() *pluginsdk.Resource { "included_applications": { Type: pluginsdk.TypeList, Optional: true, - ExactlyOneOf: []string{"conditions.0.applications.0.included_applications", "conditions.0.applications.0.included_user_actions"}, + ExactlyOneOf: []string{"conditions.0.applications.0.included_applications", "conditions.0.applications.0.included_user_actions", "conditions.0.applications.0.filter"}, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateDiagFunc: validation.ValidateDiag(validation.StringIsNotEmpty), @@ -96,12 +96,37 @@ func conditionalAccessPolicyResource() *pluginsdk.Resource { "included_user_actions": { Type: pluginsdk.TypeList, Optional: true, - ExactlyOneOf: []string{"conditions.0.applications.0.included_applications", "conditions.0.applications.0.included_user_actions"}, + ExactlyOneOf: []string{"conditions.0.applications.0.included_applications", "conditions.0.applications.0.included_user_actions", "conditions.0.applications.0.filter"}, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateDiagFunc: validation.ValidateDiag(validation.StringIsNotEmpty), }, }, + + "filter": { + Type: pluginsdk.TypeList, + Optional: true, + ExactlyOneOf: []string{"conditions.0.applications.0.included_applications", "conditions.0.applications.0.included_user_actions", "conditions.0.applications.0.filter"}, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "mode": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + msgraph.ConditionalAccessFilterModeExclude, + msgraph.ConditionalAccessFilterModeInclude, + }, false), + }, + + "rule": { + Type: pluginsdk.TypeString, + Required: true, + ValidateDiagFunc: validation.ValidateDiag(validation.StringIsNotEmpty), + }, + }, + }, + }, }, }, }, @@ -582,7 +607,7 @@ func conditionalAccessPolicyResource() *pluginsdk.Resource { "sign_in_frequency_authentication_type": { Type: pluginsdk.TypeString, Optional: true, - Default: msgraph.ConditionalAccessAuthenticationTypePrimaryAndSecondaryAuthentication, + Computed: true, ValidateFunc: validation.StringInSlice([]string{ msgraph.ConditionalAccessAuthenticationTypePrimaryAndSecondaryAuthentication, msgraph.ConditionalAccessAuthenticationTypeSecondaryAuthentication, @@ -592,7 +617,7 @@ func conditionalAccessPolicyResource() *pluginsdk.Resource { "sign_in_frequency_interval": { Type: pluginsdk.TypeString, Optional: true, - Default: msgraph.ConditionalAccessFrequencyIntervalTimeBased, + Computed: true, ValidateFunc: validation.StringInSlice([]string{ msgraph.ConditionalAccessFrequencyIntervalTimeBased, msgraph.ConditionalAccessFrequencyIntervalEveryTime, @@ -637,12 +662,14 @@ func conditionalAccessPolicyCustomizeDiff(_ context.Context, diff *pluginsdk.Res func conditionalAccessPolicyDiffSuppress(k, old, new string, d *pluginsdk.ResourceData) bool { suppress := false + // When ineffectual `session_controls` are specified, you must send `sessionControls: null`, and when policy has ineffectual + // `sessionControls`, the API condenses it to `sessionControls: null` in the response. if k == "session_controls.#" && old == "0" && new == "1" { - // When an ineffectual `session_controls` block is configured, the API just ignores it and returns - // sessionControls: null sessionControlsRaw := d.Get("session_controls").([]interface{}) if len(sessionControlsRaw) == 1 && sessionControlsRaw[0] != nil { sessionControls := sessionControlsRaw[0].(map[string]interface{}) + + // Suppress by default, but only if all the block properties have a non-default value suppress = true if v, ok := sessionControls["application_enforced_restrictions_enabled"]; ok && v.(bool) { suppress = false @@ -659,10 +686,10 @@ func conditionalAccessPolicyDiffSuppress(k, old, new string, d *pluginsdk.Resour if v, ok := sessionControls["sign_in_frequency"]; ok && v.(int) > 0 { suppress = false } - if v, ok := sessionControls["sign_in_frequency_authentication_type"]; ok && v.(string) != msgraph.ConditionalAccessAuthenticationTypePrimaryAndSecondaryAuthentication { + if v, ok := sessionControls["sign_in_frequency_authentication_type"]; ok && v.(string) != "" { suppress = false } - if v, ok := sessionControls["sign_in_frequency_interval"]; ok && v.(string) != msgraph.ConditionalAccessFrequencyIntervalTimeBased { + if v, ok := sessionControls["sign_in_frequency_interval"]; ok && v.(string) != "" { suppress = false } if v, ok := sessionControls["sign_in_frequency_period"]; ok && v.(string) != "" { diff --git a/internal/services/conditionalaccess/conditional_access_policy_resource_test.go b/internal/services/conditionalaccess/conditional_access_policy_resource_test.go index 07ebc3a21c..15f9bf2cc2 100644 --- a/internal/services/conditionalaccess/conditional_access_policy_resource_test.go +++ b/internal/services/conditionalaccess/conditional_access_policy_resource_test.go @@ -162,16 +162,6 @@ func TestAccConditionalAccessPolicy_sessionControls(t *testing.T) { ), }, data.ImportStep(), - }) -} - -func TestAccConditionalAccessPolicy_sessionControlsDisabled(t *testing.T) { - // This is testing the DiffSuppressFunc for the `session_controls` block - - data := acceptance.BuildTestData(t, "azuread_conditional_access_policy", "test") - r := ConditionalAccessPolicyResource{} - - data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.sessionControlsDisabled(data), Check: acceptance.ComposeTestCheckFunc( @@ -199,6 +189,46 @@ func TestAccConditionalAccessPolicy_sessionControlsDisabled(t *testing.T) { ), }, data.ImportStep(), + { + Config: r.sessionControlsApplicationEnforcedRestrictions(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("id").Exists(), + check.That(data.ResourceName).Key("display_name").HasValue(fmt.Sprintf("acctest-CONPOLICY-%d", data.RandomInteger)), + check.That(data.ResourceName).Key("state").HasValue("disabled"), + ), + }, + data.ImportStep(), + { + Config: r.sessionControlsCloudAppSecurityPolicy(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("id").Exists(), + check.That(data.ResourceName).Key("display_name").HasValue(fmt.Sprintf("acctest-CONPOLICY-%d", data.RandomInteger)), + check.That(data.ResourceName).Key("state").HasValue("disabled"), + ), + }, + data.ImportStep(), + { + Config: r.sessionControlsPersistentBrowserMode(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("id").Exists(), + check.That(data.ResourceName).Key("display_name").HasValue(fmt.Sprintf("acctest-CONPOLICY-%d", data.RandomInteger)), + check.That(data.ResourceName).Key("state").HasValue("disabled"), + ), + }, + data.ImportStep(), + { + Config: r.sessionControlsDisabled(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("id").Exists(), + check.That(data.ResourceName).Key("display_name").HasValue(fmt.Sprintf("acctest-CONPOLICY-%d", data.RandomInteger)), + check.That(data.ResourceName).Key("state").HasValue("disabled"), + ), + }, + data.ImportStep(), }) } @@ -302,6 +332,11 @@ func TestAccConditionalAccessPolicy_guestsOrExternalUsers(t *testing.T) { } func (r ConditionalAccessPolicyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + clients.ConditionalAccess.PoliciesClient.BaseClient.DisableRetries = true + defer func() { + clients.ConditionalAccess.PoliciesClient.BaseClient.DisableRetries = false + }() + var id *string app, status, err := clients.ConditionalAccess.PoliciesClient.Get(ctx, state.ID, odata.Query{}) @@ -523,6 +558,129 @@ resource "azuread_conditional_access_policy" "test" { `, data.RandomInteger) } +func (ConditionalAccessPolicyResource) sessionControlsApplicationEnforcedRestrictions(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azuread" {} + +resource "azuread_conditional_access_policy" "test" { + display_name = "acctest-CONPOLICY-%[1]d" + state = "disabled" + + conditions { + client_app_types = ["browser"] + + applications { + included_applications = ["All"] + } + + locations { + included_locations = ["All"] + } + + platforms { + included_platforms = ["all"] + } + + users { + included_users = ["All"] + excluded_users = ["GuestsOrExternalUsers"] + } + } + + grant_controls { + operator = "OR" + built_in_controls = ["block"] + } + + session_controls { + application_enforced_restrictions_enabled = true + } +} +`, data.RandomInteger) +} + +func (ConditionalAccessPolicyResource) sessionControlsCloudAppSecurityPolicy(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azuread" {} + +resource "azuread_conditional_access_policy" "test" { + display_name = "acctest-CONPOLICY-%[1]d" + state = "disabled" + + conditions { + client_app_types = ["browser"] + + applications { + included_applications = ["All"] + } + + locations { + included_locations = ["All"] + } + + platforms { + included_platforms = ["all"] + } + + users { + included_users = ["All"] + excluded_users = ["GuestsOrExternalUsers"] + } + } + + grant_controls { + operator = "OR" + built_in_controls = ["block"] + } + + session_controls { + cloud_app_security_policy = "monitorOnly" + } +} +`, data.RandomInteger) +} + +func (ConditionalAccessPolicyResource) sessionControlsPersistentBrowserMode(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azuread" {} + +resource "azuread_conditional_access_policy" "test" { + display_name = "acctest-CONPOLICY-%[1]d" + state = "disabled" + + conditions { + client_app_types = ["browser"] + + applications { + included_applications = ["All"] + } + + locations { + included_locations = ["All"] + } + + platforms { + included_platforms = ["all"] + } + + users { + included_users = ["All"] + excluded_users = ["GuestsOrExternalUsers"] + } + } + + grant_controls { + operator = "OR" + built_in_controls = ["block"] + } + + session_controls { + persistent_browser_mode = "always" + } +} +`, data.RandomInteger) +} + func (ConditionalAccessPolicyResource) clientApplicationsIncluded(data acceptance.TestData) string { return fmt.Sprintf(` provider "azuread" {} diff --git a/internal/services/conditionalaccess/conditionalaccess.go b/internal/services/conditionalaccess/conditionalaccess.go index f01a63b8cd..db94056a90 100644 --- a/internal/services/conditionalaccess/conditionalaccess.go +++ b/internal/services/conditionalaccess/conditionalaccess.go @@ -4,6 +4,8 @@ package conditionalaccess import ( + "log" + "github.com/hashicorp/go-azure-helpers/lang/pointer" "github.com/hashicorp/terraform-provider-azuread/internal/tf" "github.com/manicminer/hamilton/msgraph" @@ -40,6 +42,7 @@ func flattenConditionalAccessApplications(in *msgraph.ConditionalAccessApplicati "included_applications": tf.FlattenStringSlicePtr(in.IncludeApplications), "excluded_applications": tf.FlattenStringSlicePtr(in.ExcludeApplications), "included_user_actions": tf.FlattenStringSlicePtr(in.IncludeUserActions), + "filter": flattenConditionalAccessFilter(in.ApplicationFilter), }, } } @@ -83,7 +86,7 @@ func flattenConditionalAccessDevices(in *msgraph.ConditionalAccessDevices) []int return []interface{}{ map[string]interface{}{ - "filter": flattenConditionalAccessDeviceFilter(in.DeviceFilter), + "filter": flattenConditionalAccessFilter(in.DeviceFilter), }, } } @@ -188,11 +191,14 @@ func flattenConditionalAccessSessionControls(in *msgraph.ConditionalAccessSessio } } -func flattenConditionalAccessDeviceFilter(in *msgraph.ConditionalAccessFilter) []interface{} { +func flattenConditionalAccessFilter(in *msgraph.ConditionalAccessFilter) []interface{} { if in == nil { + log.Print("=== no access filters to flatten") return []interface{}{} } + log.Printf("=== access filters are being flattened: %s", *in.Rule) + return []interface{}{ map[string]interface{}{ "mode": in.Mode, @@ -339,10 +345,17 @@ func expandConditionalAccessApplications(in []interface{}) *msgraph.ConditionalA includeApplications := config["included_applications"].([]interface{}) excludeApplications := config["excluded_applications"].([]interface{}) includeUserActions := config["included_user_actions"].([]interface{}) + filter := config["filter"].([]interface{}) result.IncludeApplications = tf.ExpandStringSlicePtr(includeApplications) result.ExcludeApplications = tf.ExpandStringSlicePtr(excludeApplications) result.IncludeUserActions = tf.ExpandStringSlicePtr(includeUserActions) + if len(filter) > 0 { + log.Printf("=== appliction filter being expanded %+v \n", filter...) + result.ApplicationFilter = expandConditionalAccessFilter(filter) + } else { + log.Println("=== no application filter to expand") + } return &result } @@ -493,19 +506,24 @@ func expandConditionalAccessSessionControls(in []interface{}) *msgraph.Condition signInFrequency.IsEnabled = pointer.To(true) signInFrequency.Type = pointer.To(config["sign_in_frequency_period"].(string)) signInFrequency.Value = pointer.To(int32(frequencyValue)) + + // AuthenticationType and FrequencyInterval must be set to default values here + signInFrequency.AuthenticationType = pointer.To(msgraph.ConditionalAccessAuthenticationTypePrimaryAndSecondaryAuthentication) + signInFrequency.FrequencyInterval = pointer.To(msgraph.ConditionalAccessFrequencyIntervalTimeBased) } - if authenticationType, ok := config["sign_in_frequency_authentication_type"]; ok { + if authenticationType, ok := config["sign_in_frequency_authentication_type"]; ok && authenticationType.(string) != "" { signInFrequency.AuthenticationType = pointer.To(authenticationType.(string)) } - if interval, ok := config["sign_in_frequency_interval"]; ok { + if interval, ok := config["sign_in_frequency_interval"]; ok && interval.(string) != "" { signInFrequency.FrequencyInterval = pointer.To(interval.(string)) } // API returns 400 error if signInFrequency is set with all default/zero values - if pointer.From(signInFrequency.IsEnabled) || pointer.From(signInFrequency.FrequencyInterval) != msgraph.ConditionalAccessFrequencyIntervalTimeBased || - pointer.From(signInFrequency.AuthenticationType) != msgraph.ConditionalAccessAuthenticationTypePrimaryAndSecondaryAuthentication { + if (signInFrequency.IsEnabled != nil && *signInFrequency.IsEnabled) || + (signInFrequency.FrequencyInterval != nil && *signInFrequency.FrequencyInterval != msgraph.ConditionalAccessFrequencyIntervalTimeBased) || + (signInFrequency.AuthenticationType != nil && *signInFrequency.AuthenticationType != msgraph.ConditionalAccessAuthenticationTypePrimaryAndSecondaryAuthentication) { result.SignInFrequency = &signInFrequency } diff --git a/internal/services/directoryroles/directory_role_eligibility_schedule_request_resource.go b/internal/services/directoryroles/directory_role_eligibility_schedule_request_resource.go index 220e0c7361..36538ad5b0 100644 --- a/internal/services/directoryroles/directory_role_eligibility_schedule_request_resource.go +++ b/internal/services/directoryroles/directory_role_eligibility_schedule_request_resource.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package directoryroles import ( diff --git a/internal/services/directoryroles/directory_role_eligibility_schedule_request_resource_test.go b/internal/services/directoryroles/directory_role_eligibility_schedule_request_resource_test.go index 99a014fae8..e8627e7ca7 100644 --- a/internal/services/directoryroles/directory_role_eligibility_schedule_request_resource_test.go +++ b/internal/services/directoryroles/directory_role_eligibility_schedule_request_resource_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package directoryroles_test import ( @@ -16,13 +19,27 @@ import ( type RoleEligibilityScheduleRequestResource struct{} -func TestAccRoleEligibilityScheduleRequest_basic(t *testing.T) { +func TestAccRoleEligibilityScheduleRequest_builtin(t *testing.T) { data := acceptance.BuildTestData(t, "azuread_directory_role_eligibility_schedule_request", "test") r := RoleEligibilityScheduleRequestResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.basic(data), + Config: r.builtin(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + +func TestAccRoleEligibilityScheduleRequest_custom(t *testing.T) { + data := acceptance.BuildTestData(t, "azuread_directory_role_eligibility_schedule_request", "test") + r := RoleEligibilityScheduleRequestResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.custom(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), @@ -45,7 +62,7 @@ func (r RoleEligibilityScheduleRequestResource) Exists(ctx context.Context, clie return pointer.To(resr.ID != nil && *resr.ID == state.ID), nil } -func (r RoleEligibilityScheduleRequestResource) basic(data acceptance.TestData) string { +func (r RoleEligibilityScheduleRequestResource) builtin(data acceptance.TestData) string { return fmt.Sprintf(` provider "azuread" {} @@ -71,3 +88,36 @@ resource "azuread_directory_role_eligibility_schedule_request" "test" { } `, data.RandomInteger, data.RandomPassword) } + +func (r RoleEligibilityScheduleRequestResource) custom(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azuread" {} + +data "azuread_domains" "test" { + only_initial = true +} + +resource "azuread_user" "test" { + user_principal_name = "acctestManager.%[1]d@${data.azuread_domains.test.domains.0.domain_name}" + display_name = "acctestManager-%[1]d" + password = "%[2]s" +} + +resource "azuread_custom_directory_role" "test" { + display_name = "acctestCustomRole-%[1]d" + enabled = true + version = "1.0" + + permissions { + allowed_resource_actions = ["microsoft.directory/applications/standard/read"] + } +} + +resource "azuread_directory_role_eligibility_schedule_request" "test" { + role_definition_id = azuread_custom_directory_role.test.object_id + principal_id = azuread_user.test.object_id + directory_scope_id = "/" + justification = "abc" +} +`, data.RandomInteger, data.RandomPassword) +} diff --git a/internal/services/directoryroles/parse/directory_role.go b/internal/services/directoryroles/parse/directory_role.go index 75f1dd8de1..b092987724 100644 --- a/internal/services/directoryroles/parse/directory_role.go +++ b/internal/services/directoryroles/parse/directory_role.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import "fmt" diff --git a/internal/services/groups/group_resource.go b/internal/services/groups/group_resource.go index e500901547..64fdf01de8 100644 --- a/internal/services/groups/group_resource.go +++ b/internal/services/groups/group_resource.go @@ -96,6 +96,7 @@ func groupResource() *pluginsdk.Resource { msgraph.GroupResourceBehaviorOptionCalendarMemberReadOnly, msgraph.GroupResourceBehaviorOptionConnectorsDisabled, msgraph.GroupResourceBehaviorOptionHideGroupInOutlook, + msgraph.GroupResourceBehaviorOptionSkipExchangeInstantOn, msgraph.GroupResourceBehaviorOptionSubscribeMembersToCalendarEventsDisabled, msgraph.GroupResourceBehaviorOptionSubscribeNewGroupMembers, msgraph.GroupResourceBehaviorOptionWelcomeEmailDisabled, diff --git a/internal/services/identitygovernance/access_package_assignment_policy_resource_test.go b/internal/services/identitygovernance/access_package_assignment_policy_resource_test.go index 8e1ea7c69a..0215150035 100644 --- a/internal/services/identitygovernance/access_package_assignment_policy_resource_test.go +++ b/internal/services/identitygovernance/access_package_assignment_policy_resource_test.go @@ -76,6 +76,13 @@ func TestAccAccessPackageAssignmentPolicy_update(t *testing.T) { ), }, data.ImportStep(), + { + Config: r.simple(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), { Config: r.complete(data), Check: acceptance.ComposeTestCheckFunc( diff --git a/internal/services/identitygovernance/helpers/test.go b/internal/services/identitygovernance/helpers/test.go index 475817f1e3..e3c2a87272 100644 --- a/internal/services/identitygovernance/helpers/test.go +++ b/internal/services/identitygovernance/helpers/test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package helpers import ( diff --git a/internal/services/identitygovernance/identitygovernance.go b/internal/services/identitygovernance/identitygovernance.go index bd77c36c0b..8142de5a6c 100644 --- a/internal/services/identitygovernance/identitygovernance.go +++ b/internal/services/identitygovernance/identitygovernance.go @@ -131,6 +131,17 @@ func expandAssignmentReviewSettings(input []interface{}) (*msgraph.AssignmentRev result.Reviewers = expandUserSets(in["reviewer"].([]interface{})) + if result.AccessReviewTimeoutBehavior == "" && + (result.DurationInDays == nil || *result.DurationInDays == 0) && + (result.IsAccessRecommendationEnabled == nil || !*result.IsAccessRecommendationEnabled) && + (result.IsApprovalJustificationRequired == nil || !*result.IsApprovalJustificationRequired) && + (result.IsEnabled == nil || !*result.IsEnabled) && + result.RecurrenceType == "" && + result.ReviewerType == "" && + (result.Reviewers == nil || len(*result.Reviewers) == 0) { + return nil, nil + } + return &result, nil } diff --git a/internal/services/identitygovernance/parse/privileged_access_group_schedule.go b/internal/services/identitygovernance/parse/privileged_access_group_schedule.go index fce674c9c1..722057f91c 100644 --- a/internal/services/identitygovernance/parse/privileged_access_group_schedule.go +++ b/internal/services/identitygovernance/parse/privileged_access_group_schedule.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/identitygovernance/privileged_access_group_assignment_schedule_resource.go b/internal/services/identitygovernance/privileged_access_group_assignment_schedule_resource.go index 383df5a112..bf2e35d15a 100644 --- a/internal/services/identitygovernance/privileged_access_group_assignment_schedule_resource.go +++ b/internal/services/identitygovernance/privileged_access_group_assignment_schedule_resource.go @@ -101,10 +101,8 @@ func (r PrivilegedAccessGroupAssignmentScheduleResource) Read() sdk.ResourceFunc return sdk.ResourceFunc{ Timeout: 5 * time.Minute, Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { - cSchedule := metadata.Client.IdentityGovernance.PrivilegedAccessGroupAssignmentScheduleClient - cRequests := metadata.Client.IdentityGovernance.PrivilegedAccessGroupAssignmentScheduleRequestsClient - - var request *msgraph.PrivilegedAccessGroupAssignmentScheduleRequest + scheduleClient := metadata.Client.IdentityGovernance.PrivilegedAccessGroupAssignmentScheduleClient + requestsClient := metadata.Client.IdentityGovernance.PrivilegedAccessGroupAssignmentScheduleRequestsClient id, err := parse.ParsePrivilegedAccessGroupScheduleID(metadata.ResourceData.Id()) if err != nil { @@ -112,20 +110,22 @@ func (r PrivilegedAccessGroupAssignmentScheduleResource) Read() sdk.ResourceFunc } var model PrivilegedAccessGroupScheduleModel - if err := metadata.Decode(&model); err != nil { + if err = metadata.Decode(&model); err != nil { return fmt.Errorf("decoding: %+v", err) } - schedule, status, err := cSchedule.Get(ctx, id.ID()) - if err != nil && status != http.StatusNotFound { + schedule, scheduleStatus, err := scheduleClient.Get(ctx, id.ID()) + if err != nil && scheduleStatus != http.StatusNotFound { return fmt.Errorf("retrieving %s: %+v", id, err) } + var request *msgraph.PrivilegedAccessGroupAssignmentScheduleRequest + // Some details are only available on the request which is used for the create/update of the schedule. // Schedule requests are never deleted. New ones are created when changes are made. // Therefore on a read, we need to find the latest version of the request. // This is to cater for changes being made outside of Terraform. - requests, _, err := cRequests.List(ctx, odata.Query{ + requests, _, err := requestsClient.List(ctx, odata.Query{ Filter: fmt.Sprintf("groupId eq '%s' and targetScheduleId eq '%s'", id.GroupId, id.ID()), OrderBy: odata.OrderBy{ Field: "createdDateTime", @@ -135,45 +135,57 @@ func (r PrivilegedAccessGroupAssignmentScheduleResource) Read() sdk.ResourceFunc if err != nil { return fmt.Errorf("listing requests: %+v", err) } - if len(*requests) == 0 { - if status == http.StatusNotFound { + if requests == nil || len(*requests) == 0 { + if scheduleStatus == http.StatusNotFound { + // No request and no schedule was found return metadata.MarkAsGone(id) } } else { request = pointer.To((*requests)[0]) - - model.Justification = *request.Justification - if request.TicketInfo.TicketNumber != nil { - model.TicketNumber = *request.TicketInfo.TicketNumber - } - if request.TicketInfo.TicketSystem != nil { - model.TicketSystem = *request.TicketInfo.TicketSystem - } - if request.ScheduleInfo.Expiration.Duration != nil { - model.Duration = *request.ScheduleInfo.Expiration.Duration - } } - // Typically this is because the request has expired - // So we populate the model with the schedule details - if status == http.StatusNotFound { + var scheduleInfo *msgraph.RequestSchedule + + if request != nil { + // The request is still present, populate from the request + scheduleInfo = request.ScheduleInfo + model.AssignmentType = request.AccessId - model.ExpirationDate = request.ScheduleInfo.Expiration.EndDateTime.Format(time.RFC3339) - model.GroupId = *request.GroupId - model.PermanentAssignment = *request.ScheduleInfo.Expiration.Type == msgraph.ExpirationPatternTypeNoExpiration - model.PrincipalId = *request.PrincipalId - model.StartDate = request.ScheduleInfo.StartDateTime.Format(time.RFC3339) + model.GroupId = pointer.From(request.GroupId) + model.Justification = pointer.From(request.Justification) + model.PrincipalId = pointer.From(request.PrincipalId) model.Status = request.Status + + if ticketInfo := request.TicketInfo; ticketInfo != nil { + model.TicketNumber = pointer.From(ticketInfo.TicketNumber) + model.TicketSystem = pointer.From(ticketInfo.TicketSystem) + } } else { + // The request has likely expired, so populate from the schedule + scheduleInfo = schedule.ScheduleInfo + model.AssignmentType = schedule.AccessId - model.ExpirationDate = schedule.ScheduleInfo.Expiration.EndDateTime.Format(time.RFC3339) - model.GroupId = *schedule.GroupId - model.PermanentAssignment = *schedule.ScheduleInfo.Expiration.Type == msgraph.ExpirationPatternTypeNoExpiration - model.PrincipalId = *schedule.PrincipalId - model.StartDate = schedule.ScheduleInfo.StartDateTime.Format(time.RFC3339) + model.GroupId = pointer.From(schedule.GroupId) + model.PrincipalId = pointer.From(schedule.PrincipalId) model.Status = schedule.Status } + if scheduleInfo != nil { + if expiration := scheduleInfo.Expiration; expiration != nil { + model.Duration = pointer.From(expiration.Duration) + + if expiration.EndDateTime != nil { + model.ExpirationDate = expiration.EndDateTime.Format(time.RFC3339) + } + if expiration.Type != nil { + model.PermanentAssignment = *expiration.Type == msgraph.ExpirationPatternTypeNoExpiration + } + } + if scheduleInfo.StartDateTime != nil { + model.StartDate = scheduleInfo.StartDateTime.Format(time.RFC3339) + } + } + return metadata.Encode(&model) }, } diff --git a/internal/services/identitygovernance/privileged_access_group_eligiblity_schedule_resource.go b/internal/services/identitygovernance/privileged_access_group_eligiblity_schedule_resource.go index d92a9ad977..6cb4465130 100644 --- a/internal/services/identitygovernance/privileged_access_group_eligiblity_schedule_resource.go +++ b/internal/services/identitygovernance/privileged_access_group_eligiblity_schedule_resource.go @@ -101,10 +101,8 @@ func (r PrivilegedAccessGroupEligibilityScheduleResource) Read() sdk.ResourceFun return sdk.ResourceFunc{ Timeout: 5 * time.Minute, Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { - cSchedule := metadata.Client.IdentityGovernance.PrivilegedAccessGroupEligibilityScheduleClient - cRequests := metadata.Client.IdentityGovernance.PrivilegedAccessGroupEligibilityScheduleRequestsClient - - var request *msgraph.PrivilegedAccessGroupEligibilityScheduleRequest + scheduleClient := metadata.Client.IdentityGovernance.PrivilegedAccessGroupEligibilityScheduleClient + requestsClient := metadata.Client.IdentityGovernance.PrivilegedAccessGroupEligibilityScheduleRequestsClient id, err := parse.ParsePrivilegedAccessGroupScheduleID(metadata.ResourceData.Id()) if err != nil { @@ -112,20 +110,22 @@ func (r PrivilegedAccessGroupEligibilityScheduleResource) Read() sdk.ResourceFun } var model PrivilegedAccessGroupScheduleModel - if err := metadata.Decode(&model); err != nil { + if err = metadata.Decode(&model); err != nil { return fmt.Errorf("decoding: %+v", err) } - schedule, status, err := cSchedule.Get(ctx, id.ID()) - if err != nil && status != http.StatusNotFound { + schedule, scheduleStatus, err := scheduleClient.Get(ctx, id.ID()) + if err != nil && scheduleStatus != http.StatusNotFound { return fmt.Errorf("retrieving %s: %+v", id, err) } + var request *msgraph.PrivilegedAccessGroupEligibilityScheduleRequest + // Some details are only available on the request which is used for the create/update of the schedule. // Schedule requests are never deleted. New ones are created when changes are made. // Therefore on a read, we need to find the latest version of the request. // This is to cater for changes being made outside of Terraform. - requests, _, err := cRequests.List(ctx, odata.Query{ + requests, _, err := requestsClient.List(ctx, odata.Query{ Filter: fmt.Sprintf("groupId eq '%s' and targetScheduleId eq '%s'", id.GroupId, id.ID()), OrderBy: odata.OrderBy{ Field: "createdDateTime", @@ -135,45 +135,57 @@ func (r PrivilegedAccessGroupEligibilityScheduleResource) Read() sdk.ResourceFun if err != nil { return fmt.Errorf("listing requests: %+v", err) } - if len(*requests) == 0 { - if status == http.StatusNotFound { + if requests == nil || len(*requests) == 0 { + if scheduleStatus == http.StatusNotFound { + // No request and no schedule was found return metadata.MarkAsGone(id) } } else { request = pointer.To((*requests)[0]) - - model.Justification = *request.Justification - if request.TicketInfo.TicketNumber != nil { - model.TicketNumber = *request.TicketInfo.TicketNumber - } - if request.TicketInfo.TicketSystem != nil { - model.TicketSystem = *request.TicketInfo.TicketSystem - } - if request.ScheduleInfo.Expiration.Duration != nil { - model.Duration = *request.ScheduleInfo.Expiration.Duration - } } - // Typically this is because the request has expired - // So we populate the model with the schedule details - if status == http.StatusNotFound { + var scheduleInfo *msgraph.RequestSchedule + + if request != nil { + // The request is still present, populate from the request + scheduleInfo = request.ScheduleInfo + model.AssignmentType = request.AccessId - model.ExpirationDate = request.ScheduleInfo.Expiration.EndDateTime.Format(time.RFC3339) - model.GroupId = *request.GroupId - model.PermanentAssignment = *request.ScheduleInfo.Expiration.Type == msgraph.ExpirationPatternTypeNoExpiration - model.PrincipalId = *request.PrincipalId - model.StartDate = request.ScheduleInfo.StartDateTime.Format(time.RFC3339) + model.GroupId = pointer.From(request.GroupId) + model.Justification = pointer.From(request.Justification) + model.PrincipalId = pointer.From(request.PrincipalId) model.Status = request.Status + + if ticketInfo := request.TicketInfo; ticketInfo != nil { + model.TicketNumber = pointer.From(ticketInfo.TicketNumber) + model.TicketSystem = pointer.From(ticketInfo.TicketSystem) + } } else { + // The request has likely expired, so populate from the schedule + scheduleInfo = schedule.ScheduleInfo + model.AssignmentType = schedule.AccessId - model.ExpirationDate = schedule.ScheduleInfo.Expiration.EndDateTime.Format(time.RFC3339) - model.GroupId = *schedule.GroupId - model.PermanentAssignment = *schedule.ScheduleInfo.Expiration.Type == msgraph.ExpirationPatternTypeNoExpiration - model.PrincipalId = *schedule.PrincipalId - model.StartDate = schedule.ScheduleInfo.StartDateTime.Format(time.RFC3339) + model.GroupId = pointer.From(schedule.GroupId) + model.PrincipalId = pointer.From(schedule.PrincipalId) model.Status = schedule.Status } + if scheduleInfo != nil { + if expiration := scheduleInfo.Expiration; expiration != nil { + model.Duration = pointer.From(expiration.Duration) + + if expiration.EndDateTime != nil { + model.ExpirationDate = expiration.EndDateTime.Format(time.RFC3339) + } + if expiration.Type != nil { + model.PermanentAssignment = *expiration.Type == msgraph.ExpirationPatternTypeNoExpiration + } + } + if scheduleInfo.StartDateTime != nil { + model.StartDate = scheduleInfo.StartDateTime.Format(time.RFC3339) + } + } + return metadata.Encode(&model) }, } diff --git a/internal/services/identitygovernance/privileged_access_group_schedule.go b/internal/services/identitygovernance/privileged_access_group_schedule.go index 17618a309f..73156d90a3 100644 --- a/internal/services/identitygovernance/privileged_access_group_schedule.go +++ b/internal/services/identitygovernance/privileged_access_group_schedule.go @@ -143,9 +143,10 @@ func buildScheduleRequest(model *PrivilegedAccessGroupScheduleModel, metadata *s schedule := msgraph.RequestSchedule{} schedule.Expiration = &msgraph.ExpirationPattern{} var startDate, expiryDate time.Time + var err error if model.StartDate != "" { - startDate, err := time.Parse(time.RFC3339, model.StartDate) + startDate, err = time.Parse(time.RFC3339, model.StartDate) if err != nil { return nil, fmt.Errorf("parsing %s: %+v", model.StartDate, err) } @@ -159,7 +160,7 @@ func buildScheduleRequest(model *PrivilegedAccessGroupScheduleModel, metadata *s switch { case model.ExpirationDate != "": - expiryDate, err := time.Parse(time.RFC3339, model.ExpirationDate) + expiryDate, err = time.Parse(time.RFC3339, model.ExpirationDate) if err != nil { return nil, fmt.Errorf("parsing %s: %+v", model.ExpirationDate, err) } diff --git a/internal/services/policies/group_role_management_policy_data_source.go b/internal/services/policies/group_role_management_policy_data_source.go index aeb1a65a4b..735b2a9fa4 100644 --- a/internal/services/policies/group_role_management_policy_data_source.go +++ b/internal/services/policies/group_role_management_policy_data_source.go @@ -5,9 +5,9 @@ package policies import ( "context" - "errors" "fmt" + "github.com/hashicorp/go-azure-helpers/lang/pointer" "github.com/hashicorp/go-azure-sdk/sdk/odata" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azuread/internal/sdk" @@ -89,7 +89,7 @@ func (r GroupRoleManagementPolicyDataSource) Read() sdk.ResourceFunc { roleID := metadata.ResourceData.Get("role_id").(string) id, err := getPolicyId(ctx, metadata, groupID, roleID) if err != nil { - return errors.New("Bad API response") + return fmt.Errorf("determining Policy ID: %+v", err) } result, _, err := clientPolicy.Get(ctx, id.ID()) @@ -106,15 +106,18 @@ func (r GroupRoleManagementPolicyDataSource) Read() sdk.ResourceFunc { if err != nil { return fmt.Errorf("retrieving %s: %+v", id, err) } + if assignments == nil { + return fmt.Errorf("retrieving %s: expected 1 assignment, got nil result", id) + } if len(*assignments) != 1 { return fmt.Errorf("retrieving %s: expected 1 assignment, got %d", id, len(*assignments)) } state := GroupRoleManagementPolicyDataSourceModel{ - Description: *result.Description, - DisplayName: *result.DisplayName, - GroupId: *result.ScopeId, - RoleId: *(*assignments)[0].RoleDefinitionId, + Description: pointer.From(result.Description), + DisplayName: pointer.From(result.DisplayName), + GroupId: pointer.From(result.ScopeId), + RoleId: pointer.From((*assignments)[0].RoleDefinitionId), } metadata.ResourceData.SetId(id.ID()) diff --git a/internal/services/policies/group_role_management_policy_resource.go b/internal/services/policies/group_role_management_policy_resource.go index c49a561048..1c05c93ef6 100644 --- a/internal/services/policies/group_role_management_policy_resource.go +++ b/internal/services/policies/group_role_management_policy_resource.go @@ -397,7 +397,7 @@ func (r GroupRoleManagementPolicyResource) Read() sdk.ResourceFunc { id, err := parse.ParseRoleManagementPolicyID(metadata.ResourceData.Id()) if err != nil { - return fmt.Errorf("Could not parse policy ID, %+v", err) + return fmt.Errorf("could not parse policy ID, %+v", err) } var model GroupRoleManagementPolicyModel @@ -419,14 +419,17 @@ func (r GroupRoleManagementPolicyResource) Read() sdk.ResourceFunc { if err != nil { return fmt.Errorf("retrieving %s: %+v", id, err) } + if assignments == nil { + return fmt.Errorf("retrieving %s: expected 1 assignment, got nil result", id) + } if len(*assignments) != 1 { return fmt.Errorf("retrieving %s: expected 1 assignment, got %d", id, len(*assignments)) } - model.Description = *result.Description - model.DisplayName = *result.DisplayName - model.GroupId = *result.ScopeId - model.RoleId = *(*assignments)[0].RoleDefinitionId + model.Description = pointer.From(result.Description) + model.DisplayName = pointer.From(result.DisplayName) + model.GroupId = pointer.From(result.ScopeId) + model.RoleId = pointer.From((*assignments)[0].RoleDefinitionId) if len(model.EligibleAssignmentRules) == 0 { model.EligibleAssignmentRules = make([]GroupRoleManagementPolicyEligibleAssignmentRules, 1) @@ -450,116 +453,130 @@ func (r GroupRoleManagementPolicyResource) Read() sdk.ResourceFunc { model.NotificationRules[0].EligibleAssignments = make([]GroupRoleManagementPolicyNotificationRule, 1) } - for _, rule := range *result.Rules { - switch *rule.ID { - case "Approval_EndUser_Assignment": - model.ActivationRules[0].RequireApproval = *rule.Setting.IsApprovalRequired - - primaryApprovers := make([]GroupRoleManagementPolicyApprover, 0) - for _, approver := range *(*rule.Setting.ApprovalStages)[0].PrimaryApprovers { - switch { - case *approver.ODataType == "#microsoft.graph.singleUser": - primaryApprovers = append(primaryApprovers, GroupRoleManagementPolicyApprover{ - ID: pointer.ToString(approver.UserID), - Type: "singleUser", - }) - case *approver.ODataType == "#microsoft.graph.groupMembers": - primaryApprovers = append(primaryApprovers, GroupRoleManagementPolicyApprover{ - ID: pointer.ToString(approver.GroupID), - Type: "groupMembers", - }) - default: - return fmt.Errorf("unknown approver type: %s", *approver.ODataType) + if result.Rules != nil { + for _, rule := range *result.Rules { + switch pointer.From(rule.ID) { + case "Approval_EndUser_Assignment": + model.ActivationRules[0].RequireApproval = pointer.From(rule.Setting.IsApprovalRequired) + + primaryApprovers := make([]GroupRoleManagementPolicyApprover, 0) + + if rule.Setting != nil && rule.Setting.ApprovalStages != nil { + if approvers := (*rule.Setting.ApprovalStages)[0].PrimaryApprovers; approvers != nil { + for _, approver := range *approvers { + switch { + case pointer.From(approver.ODataType) == "#microsoft.graph.singleUser": + primaryApprovers = append(primaryApprovers, GroupRoleManagementPolicyApprover{ + ID: pointer.ToString(approver.UserID), + Type: "singleUser", + }) + case pointer.From(approver.ODataType) == "#microsoft.graph.groupMembers": + primaryApprovers = append(primaryApprovers, GroupRoleManagementPolicyApprover{ + ID: pointer.ToString(approver.GroupID), + Type: "groupMembers", + }) + default: + return fmt.Errorf("unknown approver type: %s", *approver.ODataType) + } + } + } } - } - model.ActivationRules[0].ApprovalStages = []GroupRoleManagementPolicyApprovalStage{{PrimaryApprovers: primaryApprovers}} - case "AuthenticationContext_EndUser_Assignment": - if rule.ClaimValue != nil && *rule.ClaimValue != "" { - model.ActivationRules[0].RequireConditionalAccessContext = *rule.ClaimValue - } + model.ActivationRules[0].ApprovalStages = []GroupRoleManagementPolicyApprovalStage{{PrimaryApprovers: primaryApprovers}} - case "Enablement_Admin_Assignment": - model.ActiveAssignmentRules[0].RequireMultiFactorAuth = false - model.ActiveAssignmentRules[0].RequireJustification = false - for _, enabledRule := range *rule.EnabledRules { - switch enabledRule { - case "MultiFactorAuthentication": - model.ActiveAssignmentRules[0].RequireMultiFactorAuth = true - case "Justification": - model.ActiveAssignmentRules[0].RequireJustification = true + case "AuthenticationContext_EndUser_Assignment": + if rule.ClaimValue != nil && *rule.ClaimValue != "" { + model.ActivationRules[0].RequireConditionalAccessContext = *rule.ClaimValue } - } - case "Enablement_EndUser_Assignment": - model.ActivationRules[0].RequireMultiFactorAuth = false - model.ActivationRules[0].RequireJustification = false - model.ActivationRules[0].RequireTicketInfo = false - for _, enabledRule := range *rule.EnabledRules { - switch enabledRule { - case "MultiFactorAuthentication": - model.ActivationRules[0].RequireMultiFactorAuth = true - case "Justification": - model.ActivationRules[0].RequireJustification = true - case "Ticketing": - model.ActivationRules[0].RequireTicketInfo = true + case "Enablement_Admin_Assignment": + model.ActiveAssignmentRules[0].RequireMultiFactorAuth = false + model.ActiveAssignmentRules[0].RequireJustification = false + + if enabledRules := rule.EnabledRules; enabledRules != nil { + for _, enabledRule := range *enabledRules { + switch enabledRule { + case "MultiFactorAuthentication": + model.ActiveAssignmentRules[0].RequireMultiFactorAuth = true + case "Justification": + model.ActiveAssignmentRules[0].RequireJustification = true + } + } } - } - case "Expiration_Admin_Eligibility": - model.EligibleAssignmentRules[0].ExpirationRequired = *rule.IsExpirationRequired - model.EligibleAssignmentRules[0].ExpireAfter = *rule.MaximumDuration + case "Enablement_EndUser_Assignment": + model.ActivationRules[0].RequireMultiFactorAuth = false + model.ActivationRules[0].RequireJustification = false + model.ActivationRules[0].RequireTicketInfo = false + + if enabledRules := rule.EnabledRules; enabledRules != nil { + for _, enabledRule := range *enabledRules { + switch enabledRule { + case "MultiFactorAuthentication": + model.ActivationRules[0].RequireMultiFactorAuth = true + case "Justification": + model.ActivationRules[0].RequireJustification = true + case "Ticketing": + model.ActivationRules[0].RequireTicketInfo = true + } + } + } - case "Expiration_Admin_Assignment": - model.ActiveAssignmentRules[0].ExpirationRequired = *rule.IsExpirationRequired - model.ActiveAssignmentRules[0].ExpireAfter = *rule.MaximumDuration + case "Expiration_Admin_Eligibility": + model.EligibleAssignmentRules[0].ExpirationRequired = pointer.From(rule.IsExpirationRequired) + model.EligibleAssignmentRules[0].ExpireAfter = pointer.From(rule.MaximumDuration) - case "Expiration_EndUser_Assignment": - model.ActivationRules[0].MaximumDuration = *rule.MaximumDuration + case "Expiration_Admin_Assignment": + model.ActiveAssignmentRules[0].ExpirationRequired = pointer.From(rule.IsExpirationRequired) + model.ActiveAssignmentRules[0].ExpireAfter = pointer.From(rule.MaximumDuration) - case "Notification_Admin_Admin_Assignment": - model.NotificationRules[0].ActiveAssignments[0].AdminNotifications = []GroupRoleManagementPolicyNotificationSettings{ - *flattenNotificationSettings(pointer.To(rule)), - } + case "Expiration_EndUser_Assignment": + model.ActivationRules[0].MaximumDuration = pointer.From(rule.MaximumDuration) - case "Notification_Admin_Admin_Eligibility": - model.NotificationRules[0].EligibleAssignments[0].AdminNotifications = []GroupRoleManagementPolicyNotificationSettings{ - *flattenNotificationSettings(pointer.To(rule)), - } + case "Notification_Admin_Admin_Assignment": + model.NotificationRules[0].ActiveAssignments[0].AdminNotifications = []GroupRoleManagementPolicyNotificationSettings{ + flattenNotificationSettings(rule), + } - case "Notification_Admin_EndUser_Assignment": - model.NotificationRules[0].EligibleActivations[0].AdminNotifications = []GroupRoleManagementPolicyNotificationSettings{ - *flattenNotificationSettings(pointer.To(rule)), - } + case "Notification_Admin_Admin_Eligibility": + model.NotificationRules[0].EligibleAssignments[0].AdminNotifications = []GroupRoleManagementPolicyNotificationSettings{ + flattenNotificationSettings(rule), + } - case "Notification_Approver_Admin_Assignment": - model.NotificationRules[0].ActiveAssignments[0].ApproverNotifications = []GroupRoleManagementPolicyNotificationSettings{ - *flattenNotificationSettings(pointer.To(rule)), - } + case "Notification_Admin_EndUser_Assignment": + model.NotificationRules[0].EligibleActivations[0].AdminNotifications = []GroupRoleManagementPolicyNotificationSettings{ + flattenNotificationSettings(rule), + } - case "Notification_Approver_Admin_Eligibility": - model.NotificationRules[0].EligibleAssignments[0].ApproverNotifications = []GroupRoleManagementPolicyNotificationSettings{ - *flattenNotificationSettings(pointer.To(rule)), - } + case "Notification_Approver_Admin_Assignment": + model.NotificationRules[0].ActiveAssignments[0].ApproverNotifications = []GroupRoleManagementPolicyNotificationSettings{ + flattenNotificationSettings(rule), + } - case "Notification_Approver_EndUser_Assignment": - model.NotificationRules[0].EligibleActivations[0].ApproverNotifications = []GroupRoleManagementPolicyNotificationSettings{ - *flattenNotificationSettings(pointer.To(rule)), - } + case "Notification_Approver_Admin_Eligibility": + model.NotificationRules[0].EligibleAssignments[0].ApproverNotifications = []GroupRoleManagementPolicyNotificationSettings{ + flattenNotificationSettings(rule), + } - case "Notification_Requestor_Admin_Assignment": - model.NotificationRules[0].ActiveAssignments[0].AssigneeNotifications = []GroupRoleManagementPolicyNotificationSettings{ - *flattenNotificationSettings(pointer.To(rule)), - } + case "Notification_Approver_EndUser_Assignment": + model.NotificationRules[0].EligibleActivations[0].ApproverNotifications = []GroupRoleManagementPolicyNotificationSettings{ + flattenNotificationSettings(rule), + } - case "Notification_Requestor_Admin_Eligibility": - model.NotificationRules[0].EligibleAssignments[0].AssigneeNotifications = []GroupRoleManagementPolicyNotificationSettings{ - *flattenNotificationSettings(pointer.To(rule)), - } + case "Notification_Requestor_Admin_Assignment": + model.NotificationRules[0].ActiveAssignments[0].AssigneeNotifications = []GroupRoleManagementPolicyNotificationSettings{ + flattenNotificationSettings(rule), + } - case "Notification_Requestor_EndUser_Assignment": - model.NotificationRules[0].EligibleActivations[0].AssigneeNotifications = []GroupRoleManagementPolicyNotificationSettings{ - *flattenNotificationSettings(pointer.To(rule)), + case "Notification_Requestor_Admin_Eligibility": + model.NotificationRules[0].EligibleAssignments[0].AssigneeNotifications = []GroupRoleManagementPolicyNotificationSettings{ + flattenNotificationSettings(rule), + } + + case "Notification_Requestor_EndUser_Assignment": + model.NotificationRules[0].EligibleActivations[0].AssigneeNotifications = []GroupRoleManagementPolicyNotificationSettings{ + flattenNotificationSettings(rule), + } } } } @@ -955,11 +972,11 @@ func expandNotificationSettings(rule msgraph.UnifiedRoleManagementPolicyRule, da } } -func flattenNotificationSettings(rule *msgraph.UnifiedRoleManagementPolicyRule) *GroupRoleManagementPolicyNotificationSettings { - return &GroupRoleManagementPolicyNotificationSettings{ +func flattenNotificationSettings(rule msgraph.UnifiedRoleManagementPolicyRule) GroupRoleManagementPolicyNotificationSettings { + return GroupRoleManagementPolicyNotificationSettings{ NotificationLevel: rule.NotificationLevel, - DefaultRecipients: *rule.IsDefaultRecipientsEnabled, - AdditionalRecipients: *rule.NotificationRecipients, + DefaultRecipients: pointer.From(rule.IsDefaultRecipientsEnabled), + AdditionalRecipients: pointer.From(rule.NotificationRecipients), } } diff --git a/internal/services/policies/parse/role_management_policy.go b/internal/services/policies/parse/role_management_policy.go index 0c39de8d1c..15a1565c2e 100644 --- a/internal/services/policies/parse/role_management_policy.go +++ b/internal/services/policies/parse/role_management_policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/policies/parse/role_management_policy_assignment.go b/internal/services/policies/parse/role_management_policy_assignment.go index 7422a5a8e0..b8156d55b1 100644 --- a/internal/services/policies/parse/role_management_policy_assignment.go +++ b/internal/services/policies/parse/role_management_policy_assignment.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/policies/parse/role_management_policy_rule.go b/internal/services/policies/parse/role_management_policy_rule.go index bc49c67d35..593505b29b 100644 --- a/internal/services/policies/parse/role_management_policy_rule.go +++ b/internal/services/policies/parse/role_management_policy_rule.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package parse import ( diff --git a/internal/services/serviceprincipals/service_principal_data_source.go b/internal/services/serviceprincipals/service_principal_data_source.go index db9f0479d1..fba3c3022f 100644 --- a/internal/services/serviceprincipals/service_principal_data_source.go +++ b/internal/services/serviceprincipals/service_principal_data_source.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/hashicorp/go-azure-helpers/lang/pointer" "github.com/hashicorp/go-azure-sdk/sdk/odata" "github.com/hashicorp/terraform-provider-azuread/internal/clients" "github.com/hashicorp/terraform-provider-azuread/internal/helpers" @@ -327,11 +328,7 @@ func servicePrincipalDataSourceRead(ctx context.Context, d *pluginsdk.ResourceDa } for _, sp := range *result { - if sp.DisplayName == nil { - continue - } - - if *sp.DisplayName == displayName { + if strings.EqualFold(pointer.From(sp.DisplayName), displayName) { servicePrincipal = &sp break } @@ -361,11 +358,7 @@ func servicePrincipalDataSourceRead(ctx context.Context, d *pluginsdk.ResourceDa } for _, sp := range *result { - if sp.AppId == nil { - continue - } - - if *sp.AppId == clientId { + if strings.EqualFold(pointer.From(sp.AppId), clientId) { servicePrincipal = &sp break } diff --git a/internal/services/serviceprincipals/service_principal_data_source_test.go b/internal/services/serviceprincipals/service_principal_data_source_test.go index b5bc786139..d1ee9e3e55 100644 --- a/internal/services/serviceprincipals/service_principal_data_source_test.go +++ b/internal/services/serviceprincipals/service_principal_data_source_test.go @@ -75,6 +75,25 @@ func TestAccServicePrincipalDataSource_byObjectId(t *testing.T) { }) } +func TestAccServicePrincipalDataSource_builtInByDisplayName(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azuread_service_principal", "test") + r := ServicePrincipalDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.builtInByDisplayName(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("app_role_ids.%").MatchesRegex(regexp.MustCompile("[0-9]+")), + check.That(data.ResourceName).Key("app_roles.#").MatchesRegex(regexp.MustCompile("[0-9]+")), + check.That(data.ResourceName).Key("client_id").IsUuid(), + check.That(data.ResourceName).Key("display_name").Exists(), + check.That(data.ResourceName).Key("oauth2_permission_scope_ids.%").MatchesRegex(regexp.MustCompile("[0-9]+")), + check.That(data.ResourceName).Key("oauth2_permission_scopes.#").MatchesRegex(regexp.MustCompile("[0-9]+")), + ), + }, + }) +} + func (ServicePrincipalDataSource) testCheckFunc(data acceptance.TestData) acceptance.TestCheckFunc { tenantId := os.Getenv("ARM_TENANT_ID") return acceptance.ComposeTestCheckFunc( @@ -85,6 +104,7 @@ func (ServicePrincipalDataSource) testCheckFunc(data acceptance.TestData) accept check.That(data.ResourceName).Key("app_roles.#").HasValue("2"), check.That(data.ResourceName).Key("application_id").IsUuid(), check.That(data.ResourceName).Key("application_tenant_id").HasValue(tenantId), + check.That(data.ResourceName).Key("client_id").IsUuid(), check.That(data.ResourceName).Key("description").HasValue("An internal app for testing"), check.That(data.ResourceName).Key("display_name").Exists(), check.That(data.ResourceName).Key("feature_tags.#").HasValue("1"), @@ -173,3 +193,13 @@ data "azuread_service_principal" "test" { } `, ServicePrincipalResource{}.complete(data)) } + +func (ServicePrincipalDataSource) builtInByDisplayName(data acceptance.TestData) string { + return ` +provider "azuread" {} + +data "azuread_service_principal" "test" { + display_name = "MiCrOsOfT GrApH" +} +` +} diff --git a/internal/services/synchronization/registration.go b/internal/services/synchronization/registration.go index 51eca7e9b4..57b805324d 100644 --- a/internal/services/synchronization/registration.go +++ b/internal/services/synchronization/registration.go @@ -12,18 +12,18 @@ type Registration struct{} // Name is the name of this Service func (r Registration) Name() string { - return "Service Principals" + return "Synchronization" } // AssociatedGitHubLabel is the issue/PR label which can be applied to PRs that include changes to this service package func (r Registration) AssociatedGitHubLabel() string { - return "feature/service-principals" + return "feature/synchronization" } // WebsiteCategories returns a list of categories which can be used for the sidebar func (r Registration) WebsiteCategories() []string { return []string{ - "Service Principals", + "Synchronization", } } @@ -35,8 +35,9 @@ func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { // SupportedResources returns the supported Resources supported by this Service func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { return map[string]*pluginsdk.Resource{ - "azuread_synchronization_job": synchronizationJobResource(), - "azuread_synchronization_secret": synchronizationSecretResource(), + "azuread_synchronization_job": synchronizationJobResource(), + "azuread_synchronization_job_provision_on_demand": synchronizationJobProvisionOnDemandResource(), + "azuread_synchronization_secret": synchronizationSecretResource(), } } diff --git a/internal/services/synchronization/synchronization.go b/internal/services/synchronization/synchronization.go index bc9ee184f1..a409c3158f 100644 --- a/internal/services/synchronization/synchronization.go +++ b/internal/services/synchronization/synchronization.go @@ -48,6 +48,41 @@ func expandSynchronizationSecretKeyStringValuePair(in []interface{}) *[]msgraph. return &result } +func expandSynchronizationJobApplicationParameters(in []interface{}) *[]msgraph.SynchronizationJobApplicationParameters { + result := make([]msgraph.SynchronizationJobApplicationParameters, 0) + + for _, raw := range in { + if raw == nil { + continue + } + item := raw.(map[string]interface{}) + + result = append(result, msgraph.SynchronizationJobApplicationParameters{ + Subjects: expandSynchronizationJobSubject(item["subject"].([]interface{})), + RuleId: pointer.To(item["rule_id"].(string)), + }) + } + + return &result +} + +func expandSynchronizationJobSubject(in []interface{}) *[]msgraph.SynchronizationJobSubject { + result := make([]msgraph.SynchronizationJobSubject, 0) + for _, raw := range in { + if raw == nil { + continue + } + item := raw.(map[string]interface{}) + + result = append(result, msgraph.SynchronizationJobSubject{ + ObjectId: pointer.To(item["object_id"].(string)), + ObjectTypeName: pointer.To(item["object_type_name"].(string)), + }) + } + + return &result +} + func flattenSynchronizationSchedule(in *msgraph.SynchronizationSchedule) []map[string]interface{} { if in == nil { return []map[string]interface{}{} diff --git a/internal/services/synchronization/synchronization_job_provision_on_demand_resource.go b/internal/services/synchronization/synchronization_job_provision_on_demand_resource.go new file mode 100644 index 0000000000..3cb92c64d0 --- /dev/null +++ b/internal/services/synchronization/synchronization_job_provision_on_demand_resource.go @@ -0,0 +1,156 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package synchronization + +import ( + "context" + "errors" + "net/http" + "time" + + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-azuread/internal/clients" + "github.com/hashicorp/terraform-provider-azuread/internal/tf" + "github.com/hashicorp/terraform-provider-azuread/internal/tf/validation" + "github.com/manicminer/hamilton/msgraph" +) + +func synchronizationJobProvisionOnDemandResource() *schema.Resource { + return &schema.Resource{ + CreateContext: synchronizationProvisionOnDemandResourceCreate, + ReadContext: synchronizationProvisionOnDemandResourceRead, + DeleteContext: synchronizationProvisionOnDemandResourceDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(15 * time.Minute), + Read: schema.DefaultTimeout(1 * time.Minute), + Delete: schema.DefaultTimeout(1 * time.Minute), + }, + SchemaVersion: 0, + + Schema: map[string]*schema.Schema{ + "service_principal_id": { + Description: "The object ID of the service principal for which this synchronization job should be provisioned", + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: validation.ValidateDiag(validation.IsUUID), + }, + + "synchronization_job_id": { + Description: "The identifier for the synchronization jop.", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "parameter": { + Description: "Represents the objects that will be provisioned and the synchronization rules executed. The resource is primarily used for on-demand provisioning.", + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_id": { + Description: "The identifier of the synchronization rule to be applied. This rule ID is defined in the schema for a given synchronization job or template.", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "subject": { + Description: "The identifiers of one or more objects to which a synchronizationJob is to be applied.", + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "object_id": { + Description: "The identifier of an object to which a synchronization job is to be applied. Can be one of the following: (1) An onPremisesDistinguishedName for synchronization from Active Directory to Azure AD. (2) The user ID for synchronization from Azure AD to a third-party. (3) The Worker ID of the Workday worker for synchronization from Workday to either Active Directory or Azure AD.", + Type: schema.TypeString, + Required: true, + }, + + "object_type_name": { + Description: "The type of the object to which a synchronization job is to be applied. Can be one of the following: `user` for synchronizing between Active Directory and Azure AD, `User` for synchronizing a user between Azure AD and a third-party application, `Worker` for synchronization a user between Workday and either Active Directory or Azure AD, `Group` for synchronizing a group between Azure AD and a third-party application.", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"Group", "user", "User", "Worker"}, false), + }, + }, + }, + }, + }, + }, + }, + + "triggers": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func synchronizationProvisionOnDemandResourceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*clients.Client).ServicePrincipals.SynchronizationJobClient + spClient := meta.(*clients.Client).ServicePrincipals.ServicePrincipalsClient + objectId := d.Get("service_principal_id").(string) + jobId := d.Get("synchronization_job_id").(string) + + tf.LockByName(servicePrincipalResourceName, objectId) + defer tf.UnlockByName(servicePrincipalResourceName, objectId) + + servicePrincipal, status, err := spClient.Get(ctx, objectId, odata.Query{}) + if err != nil { + if status == http.StatusNotFound { + return tf.ErrorDiagPathF(nil, "service_principal_id", "Service principal with object ID %q was not found", objectId) + } + return tf.ErrorDiagPathF(err, "service_principal_id", "Retrieving service principal with object ID %q", objectId) + } + if servicePrincipal == nil || servicePrincipal.ID() == nil { + return tf.ErrorDiagF(errors.New("nil service principal or service principal with nil ID was returned"), "API error retrieving service principal with object ID %q", objectId) + } + + job, status, err := client.Get(ctx, jobId, objectId) + if err != nil { + if status == http.StatusNotFound { + return tf.ErrorDiagPathF(nil, "job_id", "Job with object ID %q was not found for service principle %q", jobId, objectId) + } + return tf.ErrorDiagPathF(err, "job_id", "Retrieving job with object ID %q for service principle %q", jobId, objectId) + } + if job == nil || job.ID == nil { + return tf.ErrorDiagF(errors.New("nil job or job with nil ID was returned"), "API error retrieving job with object ID %q/%s", objectId, jobId) + } + + // Create a new synchronization job + synchronizationProvisionOnDemand := &msgraph.SynchronizationJobProvisionOnDemand{ + Parameters: expandSynchronizationJobApplicationParameters(d.Get("parameter").([]interface{})), + } + + _, err = client.ProvisionOnDemand(ctx, jobId, synchronizationProvisionOnDemand, *servicePrincipal.ID()) + if err != nil { + return tf.ErrorDiagF(err, "Creating synchronization job for service principal ID %q", *servicePrincipal.ID()) + } + + id, _ := uuid.GenerateUUID() + d.SetId(id) + + return synchronizationProvisionOnDemandResourceRead(ctx, d, meta) +} + +func synchronizationProvisionOnDemandResourceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func synchronizationProvisionOnDemandResourceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} diff --git a/internal/services/synchronization/synchronization_job_provision_on_demand_resource_test.go b/internal/services/synchronization/synchronization_job_provision_on_demand_resource_test.go new file mode 100644 index 0000000000..1093e9b2bb --- /dev/null +++ b/internal/services/synchronization/synchronization_job_provision_on_demand_resource_test.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package synchronization_test + +import ( + "context" + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-azuread/internal/acceptance" + "github.com/hashicorp/terraform-provider-azuread/internal/clients" +) + +type SynchronizationJobProvisionOnDemandResource struct{} + +func TestAccSynchronizationJobProvisionOnDemand_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azuread_synchronization_job_provision_on_demand", "test") + r := SynchronizationJobProvisionOnDemandResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + // The provisioned app isn't actually integrated so this will never work + Config: r.basic(data), + ExpectError: regexp.MustCompile("CredentialsMissing: Please configure provisioning"), + }, + }) +} + +func (r SynchronizationJobProvisionOnDemandResource) Exists(_ context.Context, _ *clients.Client, _ *terraform.InstanceState) (*bool, error) { + return pointer.To(true), nil +} + +func (SynchronizationJobProvisionOnDemandResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azuread" {} + +data "azuread_client_config" "test" {} + +data "azuread_application_template" "test" { + display_name = "Azure Databricks SCIM Provisioning Connector" +} + +resource "azuread_application" "test" { + display_name = "acctestSynchronizationJob-%[1]d" + owners = [data.azuread_client_config.test.object_id] + template_id = data.azuread_application_template.test.template_id +} + +resource "azuread_service_principal" "test" { + client_id = azuread_application.test.client_id + owners = [data.azuread_client_config.test.object_id] + use_existing = true +} + +resource "azuread_synchronization_job" "test" { + service_principal_id = azuread_service_principal.test.id + template_id = "dataBricks" +} + +resource "azuread_group" "test" { + display_name = "acctestGroup-%[1]d" + security_enabled = true +} +`, data.RandomInteger) +} + +func (r SynchronizationJobProvisionOnDemandResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +resource "azuread_synchronization_job_provision_on_demand" "test" { + service_principal_id = azuread_service_principal.test.id + synchronization_job_id = trimprefix(azuread_synchronization_job.test.id, "${azuread_service_principal.test.id}/job/") + + parameter { + rule_id = "03f7d90d-bf71-41b1-bda6-aaf0ddbee5d8" // appears to be a global value + + subject { + object_id = azuread_group.test.id + object_type_name = "Group" + } + } +} + + +`, r.template(data)) +} diff --git a/internal/services/users/user_resource.go b/internal/services/users/user_resource.go index 02df5f2ea4..8608581e2b 100644 --- a/internal/services/users/user_resource.go +++ b/internal/services/users/user_resource.go @@ -576,6 +576,11 @@ func userResourceUpdate(ctx context.Context, d *pluginsdk.ResourceData, meta int } if _, err := client.Update(ctx, properties); err != nil { + // Flag the state as 'partial' to avoid setting `password` from the current config. Since the config is the + // only source for this property, if the update fails due to a bad password, the current password will be forgotten + // and Terraform will not offer a diff in the next plan. + d.Partial(true) //lintignore:R007 + return tf.ErrorDiagF(err, "Could not update user with ID: %q", d.Id()) } diff --git a/internal/services/users/user_resource_test.go b/internal/services/users/user_resource_test.go index c6834a5f10..3fb697da2f 100644 --- a/internal/services/users/user_resource_test.go +++ b/internal/services/users/user_resource_test.go @@ -127,6 +127,34 @@ func TestAccUser_passwordOmitted(t *testing.T) { }) } +func TestAccUser_passwordInvalid(t *testing.T) { + data := acceptance.BuildTestData(t, "azuread_user", "test") + r := UserResource{} + firstPassword := data.RandomPassword + secondPassword := "B" + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.setPassword(data, firstPassword), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.setPassword(data, secondPassword), + ExpectError: regexp.MustCompile("specified password does not comply"), + }, + { + RefreshState: true, + ExpectNonEmptyPlan: true, + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("password").HasValue(firstPassword), + ), + }, + }) +} + func (r UserResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { client := clients.Users.UsersClient client.BaseClient.DisableRetries = true @@ -228,6 +256,7 @@ resource "azuread_user" "testA" { user_principal_name = "acctestUser'%[1]d.A@${data.azuread_domains.test.domains.0.domain_name}" display_name = "acctestUser-%[1]d-A" employee_id = "A%[3]s%[3]s" + mail = "acctestUser-%[1]d-A@${data.azuread_domains.test.domains.0.domain_name}" password = "%[2]s" } @@ -235,6 +264,7 @@ resource "azuread_user" "testB" { user_principal_name = "acctestUser.%[1]d.B@${data.azuread_domains.test.domains.0.domain_name}" display_name = "acctestUser-%[1]d-B" mail_nickname = "acctestUser-%[1]d-B" + mail = "acctestUser-%[1]d-B@${data.azuread_domains.test.domains.0.domain_name}" employee_id = "B%[3]s%[3]s" password = "%[2]s" } @@ -282,3 +312,19 @@ resource "azuread_user" "test" { } `, data.RandomInteger) } + +func (UserResource) setPassword(data acceptance.TestData, password string) string { + return fmt.Sprintf(` +provider "azuread" {} + +data "azuread_domains" "test" { + only_initial = true +} + +resource "azuread_user" "test" { + user_principal_name = "acctestUser'%[1]d@${data.azuread_domains.test.domains.0.domain_name}" + display_name = "acctestUser-%[1]d" + password = "%[2]s" +} +`, data.RandomInteger, password) +} diff --git a/internal/services/users/users_data_source.go b/internal/services/users/users_data_source.go index 3fc8848390..e2624bc2c7 100644 --- a/internal/services/users/users_data_source.go +++ b/internal/services/users/users_data_source.go @@ -35,7 +35,7 @@ func usersData() *pluginsdk.Resource { Type: pluginsdk.TypeList, Optional: true, Computed: true, - ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "employee_ids", "return_all"}, + ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "mails", "employee_ids", "return_all"}, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateDiagFunc: validation.ValidateDiag(validation.StringIsNotEmpty), @@ -47,7 +47,19 @@ func usersData() *pluginsdk.Resource { Type: pluginsdk.TypeList, Optional: true, Computed: true, - ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "employee_ids", "return_all"}, + ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "mails", "employee_ids", "return_all"}, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateDiagFunc: validation.ValidateDiag(validation.StringIsNotEmpty), + }, + }, + + "mails": { + Description: "The SMTP address of the users", + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "mails", "employee_ids", "return_all"}, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateDiagFunc: validation.ValidateDiag(validation.StringIsNotEmpty), @@ -59,7 +71,7 @@ func usersData() *pluginsdk.Resource { Type: pluginsdk.TypeList, Optional: true, Computed: true, - ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "employee_ids", "return_all"}, + ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "mails", "employee_ids", "return_all"}, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateDiagFunc: validation.ValidateDiag(validation.IsUUID), @@ -71,7 +83,7 @@ func usersData() *pluginsdk.Resource { Type: pluginsdk.TypeList, Optional: true, Computed: true, - ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "employee_ids", "return_all"}, + ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "mails", "employee_ids", "return_all"}, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateDiagFunc: validation.ValidateDiag(validation.StringIsNotEmpty), @@ -92,7 +104,7 @@ func usersData() *pluginsdk.Resource { Optional: true, Default: false, ConflictsWith: []string{"ignore_missing"}, - ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "employee_ids", "return_all"}, + ExactlyOneOf: []string{"object_ids", "user_principal_names", "mail_nicknames", "mails", "employee_ids", "return_all"}, }, "users": { @@ -263,6 +275,31 @@ func usersDataSourceRead(ctx context.Context, d *pluginsdk.ResourceData, meta in } users = append(users, (*result)[0]) } + } else if mails, ok := d.Get("mails").([]interface{}); ok && len(mails) > 0 { + expectedCount = len(mails) + for _, v := range mails { + query := odata.Query{ + Filter: fmt.Sprintf("mail eq '%s'", odata.EscapeSingleQuote(v.(string))), + } + result, _, err := client.List(ctx, query) + if err != nil { + return tf.ErrorDiagF(err, "Finding user with mail address: %q", v) + } + if result == nil { + return tf.ErrorDiagF(errors.New("API returned nil result"), "Bad API Response") + } + + count := len(*result) + if count > 1 { + return tf.ErrorDiagPathF(nil, "mails", "More than one user found with mail address: %q", v) + } else if count == 0 { + if ignoreMissing { + continue + } + return tf.ErrorDiagPathF(err, "mails", "User not found with mail address: %q", v) + } + users = append(users, (*result)[0]) + } } else if employeeIds, ok := d.Get("employee_ids").([]interface{}); ok && len(employeeIds) > 0 { expectedCount = len(employeeIds) for _, v := range employeeIds { @@ -299,6 +336,7 @@ func usersDataSourceRead(ctx context.Context, d *pluginsdk.ResourceData, meta in upns := make([]string, 0) objectIds := make([]string, 0) mailNicknames := make([]string, 0) + mails := make([]msgraph.StringNullWhenEmpty, 0) employeeIds := make([]msgraph.StringNullWhenEmpty, 0) userList := make([]map[string]interface{}, 0) for _, u := range users { @@ -311,6 +349,9 @@ func usersDataSourceRead(ctx context.Context, d *pluginsdk.ResourceData, meta in if u.MailNickname != nil { mailNicknames = append(mailNicknames, *u.MailNickname) } + if u.Mail != nil { + mails = append(mails, *u.Mail) + } if u.EmployeeId != nil { employeeIds = append(employeeIds, *u.EmployeeId) } @@ -339,6 +380,7 @@ func usersDataSourceRead(ctx context.Context, d *pluginsdk.ResourceData, meta in d.SetId("users#" + base64.URLEncoding.EncodeToString(h.Sum(nil))) tf.Set(d, "employee_ids", employeeIds) tf.Set(d, "mail_nicknames", mailNicknames) + tf.Set(d, "mails", mails) tf.Set(d, "object_ids", objectIds) tf.Set(d, "user_principal_names", upns) tf.Set(d, "users", userList) diff --git a/internal/services/users/users_data_source_test.go b/internal/services/users/users_data_source_test.go index 1520f40e8a..aab5f136d7 100644 --- a/internal/services/users/users_data_source_test.go +++ b/internal/services/users/users_data_source_test.go @@ -103,6 +103,38 @@ func TestAccUsersDataSource_byMailNicknamesIgnoreMissing(t *testing.T) { }}) } +func TestAccUsersDataSource_byMails(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azuread_users", "test") + + data.DataSourceTest(t, []acceptance.TestStep{{ + Config: UsersDataSource{}.byMails(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("user_principal_names.#").HasValue("2"), + check.That(data.ResourceName).Key("object_ids.#").HasValue("2"), + check.That(data.ResourceName).Key("mail_nicknames.#").HasValue("2"), + check.That(data.ResourceName).Key("mails.#").HasValue("2"), + check.That(data.ResourceName).Key("employee_ids.#").HasValue("2"), + check.That(data.ResourceName).Key("users.#").HasValue("2"), + ), + }}) +} + +func TestAccUsersDataSource_byMailsIgnoreMissing(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azuread_users", "test") + + data.DataSourceTest(t, []acceptance.TestStep{{ + Config: UsersDataSource{}.byMailsIgnoreMissing(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("user_principal_names.#").HasValue("2"), + check.That(data.ResourceName).Key("object_ids.#").HasValue("2"), + check.That(data.ResourceName).Key("mail_nicknames.#").HasValue("2"), + check.That(data.ResourceName).Key("mails.#").HasValue("2"), + check.That(data.ResourceName).Key("employee_ids.#").HasValue("2"), + check.That(data.ResourceName).Key("users.#").HasValue("2"), + ), + }}) +} + func TestAccUsersDataSource_byEmployeeIds(t *testing.T) { data := acceptance.BuildTestData(t, "data.azuread_users", "test") @@ -242,6 +274,32 @@ data "azuread_users" "test" { `, UserResource{}.threeUsersABC(data), data.RandomInteger) } +func (UsersDataSource) byMails(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +data "azuread_users" "test" { + mails = [azuread_user.testA.mail, azuread_user.testB.mail] +} +`, UserResource{}.threeUsersABC(data)) +} + +func (UsersDataSource) byMailsIgnoreMissing(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +data "azuread_users" "test" { + ignore_missing = true + + mails = [ + azuread_user.testA.mail, + "not-a-real-user-%[2]d${data.azuread_domains.test.domains.0.domain_name}", + azuread_user.testB.mail, + ] +} +`, UserResource{}.threeUsersABC(data), data.RandomInteger) +} + func (UsersDataSource) byEmployeeIds(data acceptance.TestData) string { return fmt.Sprintf(` %[1]s diff --git a/internal/tf/pluginsdk/diag.go b/internal/tf/pluginsdk/diag.go index 2bcfd452df..00486bb1d6 100644 --- a/internal/tf/pluginsdk/diag.go +++ b/internal/tf/pluginsdk/diag.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pluginsdk import ( diff --git a/internal/tf/strings.go b/internal/tf/strings.go index 66f7ff739e..0c2ae1db09 100644 --- a/internal/tf/strings.go +++ b/internal/tf/strings.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tf import "github.com/manicminer/hamilton/msgraph" diff --git a/internal/tf/validation/net.go b/internal/tf/validation/net.go index ad722c9566..6333c89282 100644 --- a/internal/tf/validation/net.go +++ b/internal/tf/validation/net.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package validation import ( diff --git a/internal/tf/validation/net_test.go b/internal/tf/validation/net_test.go index a69838bc20..469f0f0ce3 100644 --- a/internal/tf/validation/net_test.go +++ b/internal/tf/validation/net_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package validation import ( diff --git a/main.go b/main.go index 205bccd591..9e94983944 100644 --- a/main.go +++ b/main.go @@ -4,11 +4,22 @@ package main import ( + "flag" + "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" ) func main() { - plugin.Serve(&plugin.ServeOpts{ + var debug bool + + flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") + flag.Parse() + + opts := &plugin.ServeOpts{ + Debug: debug, + ProviderAddr: "registry.terraform.io/hashicorp/azuread", ProviderFunc: Provider, - }) + } + + plugin.Serve(opts) } diff --git a/scripts/release.sh b/scripts/release.sh index fbb6520f9f..87c8a763e3 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -8,17 +8,21 @@ cd "${REPO_DIR}" TRUNK="main" +# Uncomment to print commands instead of executing them +#debug="echo " + usage() { - echo "Usage: $0 -y [-C] [-T] [-f]" >&2 + echo "Usage: $0 -y [-C] [-f]" >&2 echo >&2 echo " -y Proceed with release. Must be specified." >&2 echo " -C Only prepare the changelog; do not commit, tag or push" >&2 + echo " -t Override trunk branch (default: ${TRUNK}), useful for patch releases" echo " -T Skip tests before preparing release" >&2 echo " -f Force release prep when \`${TRUNK}\` branch is not checked out" >&2 echo >&2 } -while getopts ':yCTfh' opt; do +while getopts ':yCt:Tfh' opt; do case "$opt" in y) GOTIME=1 @@ -26,6 +30,9 @@ while getopts ':yCTfh' opt; do C) NOTAG=1 ;; + t) + TRUNK="${OPTARG}" + ;; T) NOTEST=1 ;; @@ -77,7 +84,7 @@ if [[ "${NOTEST}" == "1" ]]; then echo "Warning: Skipping tests" else echo "Running tests..." - ( set -x; TF_ACC= make test ) + ( set -x; TF_ACC= scripts/run-test.sh ) fi echo "Preparing changelog for release..." @@ -95,15 +102,15 @@ if [[ "${RELEASE}" == "" ]]; then fi # Ensure latest changes are checked out -( set -x; git pull --rebase origin "${TRUNK}" ) +( set -x; ${debug}git pull --rebase origin "${TRUNK}" ) # Replace [GH-nnnn] references with issue links -( set -x; $SED -i.bak "s/\[GH-([0-9]+)\]/\(\[#\1\]\(${PROVIDER_URL}\/\1\)\)/g" CHANGELOG.md ) +( set -x; ${debug}$SED -i.bak "s/\[GH-([0-9]+)\]/\(\[#\1\]\(${PROVIDER_URL}\/\1\)\)/g" CHANGELOG.md ) # Set the date for the latest release -( set -x; $SED -i.bak "s/^(## v?[0-9.]+) \(Unreleased\)/\1 (${DATE})/i" CHANGELOG.md ) +( set -x; ${debug}$SED -i.bak "s/^(## v?[0-9.]+) \(Unreleased\)/\1 (${DATE})/i" CHANGELOG.md ) -rm CHANGELOG.md.bak +${debug}rm CHANGELOG.md.bak if [[ "${NOTAG}" == "1" ]]; then echo "Warning: Skipping commit, tag and push." @@ -113,14 +120,15 @@ fi echo "Committing changelog..." ( set -x - git commit CHANGELOG.md -m v"${RELEASE}" - git push origin "${BRANCH}" + ${debug}git commit CHANGELOG.md -m v"${RELEASE}" + ${debug}git push origin "${BRANCH}" ) + echo "Releasing v${RELEASE}..." ( set -x - git tag v"${RELEASE}" - git push origin v"${RELEASE}" + ${debug}git tag v"${RELEASE}" + ${debug}git push origin v"${RELEASE}" ) diff --git a/vendor/github.com/manicminer/hamilton/msgraph/attribute_set.go b/vendor/github.com/manicminer/hamilton/msgraph/attribute_set.go new file mode 100644 index 0000000000..05ded44645 --- /dev/null +++ b/vendor/github.com/manicminer/hamilton/msgraph/attribute_set.go @@ -0,0 +1,165 @@ +package msgraph + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +const ( + attributeSetEntity = "/directory/attributeSets" +) + +type AttributeSetClient struct { + BaseClient Client +} + +func NewAttributeSetClient() *AttributeSetClient { + return &AttributeSetClient{ + BaseClient: NewClient(Version10), + } +} + +func (c *AttributeSetClient) List(ctx context.Context, query odata.Query) (*[]AttributeSet, int, error) { + resp, status, _, err := c.BaseClient.Get( + ctx, + GetHttpRequestInput{ + OData: query, + ValidStatusCodes: []int{http.StatusOK}, + Uri: Uri{ + Entity: attributeSetEntity, + }, + }, + ) + if err != nil { + return nil, status, fmt.Errorf("AttributeSet.BaseClient.Get(): %v", err) + } + + defer resp.Body.Close() + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, status, fmt.Errorf("io.ReadAll(): %v", err) + } + + var data struct { + AttributeSets []AttributeSet `json:"value"` + } + + if err := json.Unmarshal(respBody, &data); err != nil { + return nil, status, fmt.Errorf("json.Unmarshal(): %v", err) + } + + return &data.AttributeSets, status, nil +} + +func (c *AttributeSetClient) Create(ctx context.Context, attributeSet AttributeSet) (*AttributeSet, int, error) { + var status int + var newAttributeSet AttributeSet + + body, err := json.Marshal(attributeSet) + if err != nil { + return nil, status, fmt.Errorf("json.Marshal(): %v", err) + } + + requestInput := PostHttpRequestInput{ + Body: body, + OData: odata.Query{ + Metadata: odata.MetadataFull, + }, + ValidStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + Uri: Uri{ + Entity: attributeSetEntity, + }, + } + + resp, status, _, err := c.BaseClient.Post(ctx, requestInput) + if err != nil { + return nil, status, fmt.Errorf("AttributeSetClient.BaseClient.Post(): %v", err) + } + + defer resp.Body.Close() + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, status, fmt.Errorf("io.ReadAll(): %v", err) + } + + if err := json.Unmarshal(respBody, &newAttributeSet); err != nil { + return nil, status, fmt.Errorf("json.Unmarshal():%v", err) + } + + return &newAttributeSet, status, nil +} + +func (c *AttributeSetClient) Get(ctx context.Context, id string, query odata.Query) (*AttributeSet, int, error) { + var AttributeSet AttributeSet + + resp, status, _, err := c.BaseClient.Get( + ctx, + GetHttpRequestInput{ + ConsistencyFailureFunc: RetryOn404ConsistencyFailureFunc, + OData: query, + ValidStatusCodes: []int{http.StatusOK}, + Uri: Uri{ + Entity: fmt.Sprintf("%s/%s", attributeSetEntity, id), + }, + }, + ) + if err != nil { + return nil, status, fmt.Errorf("AttributeSetClient.BaseClient.Get(): %v", err) + } + + defer resp.Body.Close() + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, status, fmt.Errorf("io.ReadAll(): %v", err) + } + + if err := json.Unmarshal(respBody, &AttributeSet); err != nil { + return nil, status, fmt.Errorf("json.Unmarshal(): %v", err) + } + + return &AttributeSet, status, nil +} + +func (c *AttributeSetClient) Update(ctx context.Context, AttributeSet AttributeSet) (int, error) { + var status int + + if AttributeSet.ID == nil { + return status, fmt.Errorf("cannot update AttributeSet with a nil ID") + } + + id := *AttributeSet.ID + AttributeSet.ID = nil + + body, err := json.Marshal(AttributeSet) + if err != nil { + return status, fmt.Errorf("json.Marshal(): %v", err) + } + + _, status, _, err = c.BaseClient.Patch( + ctx, + PatchHttpRequestInput{ + Body: body, + ConsistencyFailureFunc: RetryOn404ConsistencyFailureFunc, + ValidStatusCodes: []int{ + http.StatusOK, + http.StatusNoContent, + }, + Uri: Uri{ + Entity: fmt.Sprintf("%s/%s", attributeSetEntity, id), + }, + }, + ) + if err != nil { + return status, fmt.Errorf("AttributeSetClient.BaseClient.Patch(): %v", err) + } + + return status, nil +} diff --git a/vendor/github.com/manicminer/hamilton/msgraph/custom_security_attributes.go b/vendor/github.com/manicminer/hamilton/msgraph/custom_security_attributes.go new file mode 100644 index 0000000000..d3337e783e --- /dev/null +++ b/vendor/github.com/manicminer/hamilton/msgraph/custom_security_attributes.go @@ -0,0 +1,226 @@ +package msgraph + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/odata" + "github.com/manicminer/hamilton/internal/utils" +) + +const ( + // customSecurityAttributeDefinitionEntity is a static string used by all methods on the + // CustomSecurityAttributeDefinitionClient struct + customSecurityAttributeDefinitionEntity = "/directory/customSecurityAttributeDefinitions" +) + +// CustomSecurityAttributeDefinitionClient returns a BaseClient to enable interaction with the +// graph API +type CustomSecurityAttributeDefinitionClient struct { + BaseClient Client +} + +// NewCustomSecurityAttributeDefinitionClient returns a new instance of +// CustomSecurityAttributeDefinitionClient +func NewCustomSecurityAttributeDefinitionClient() *CustomSecurityAttributeDefinitionClient { + return &CustomSecurityAttributeDefinitionClient{ + BaseClient: NewClient(Version10), + } +} + +// List returns a slice of CustomSecurityAttributeDefinition, the HTTP status code and any errors +func (c *CustomSecurityAttributeDefinitionClient) List(ctx context.Context, query odata.Query) (*[]CustomSecurityAttributeDefinition, int, error) { + resp, status, _, err := c.BaseClient.Get( + ctx, + GetHttpRequestInput{ + OData: query, + ValidStatusCodes: []int{http.StatusOK}, + Uri: Uri{ + Entity: customSecurityAttributeDefinitionEntity, + }, + }, + ) + if err != nil { + return nil, status, fmt.Errorf("CustomSecurityAttributeDefinition.BaseClient.Get(): %v", err) + } + + defer resp.Body.Close() + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, status, fmt.Errorf("io.ReadAll(): %v", err) + } + + var data struct { + CustomSecurityAttributeDefinitions []CustomSecurityAttributeDefinition `json:"value"` + } + + if err := json.Unmarshal(respBody, &data); err != nil { + return nil, status, fmt.Errorf("json.Unmarshal(): %v", err) + } + + return &data.CustomSecurityAttributeDefinitions, status, nil +} + +// Create will create a CustomSecurityAttributeDefinition and return the result, HTTP status code +// as well as any errors +func (c *CustomSecurityAttributeDefinitionClient) Create(ctx context.Context, customSecurityAttributeDefinition CustomSecurityAttributeDefinition) (*CustomSecurityAttributeDefinition, int, error) { + var status int + var newCustomSecurityAttributeDefinition CustomSecurityAttributeDefinition + + body, err := json.Marshal(customSecurityAttributeDefinition) + if err != nil { + return nil, status, fmt.Errorf("json.Marshal(): %v", err) + } + + requestInput := PostHttpRequestInput{ + Body: body, + OData: odata.Query{ + Metadata: odata.MetadataFull, + }, + ValidStatusCodes: []int{http.StatusCreated}, + Uri: Uri{ + Entity: customSecurityAttributeDefinitionEntity, + }, + } + + resp, status, _, err := c.BaseClient.Post(ctx, requestInput) + if err != nil { + return nil, status, fmt.Errorf("CustomSecurityAttributeDefinitionClient.BaseClient.Post(): %v", err) + } + + defer resp.Body.Close() + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, status, fmt.Errorf("io.ReadAll(): %v", err) + } + + if err := json.Unmarshal(respBody, &newCustomSecurityAttributeDefinition); err != nil { + return nil, status, fmt.Errorf("json.Unmarshal():%v", err) + } + + return &newCustomSecurityAttributeDefinition, status, nil +} + +// Get returns a single CustomSecurityAttributeDefinition, HTTP status code, and any errors +func (c *CustomSecurityAttributeDefinitionClient) Get(ctx context.Context, id string, query odata.Query) (*CustomSecurityAttributeDefinition, int, error) { + var customSecurityAttributeDefinition CustomSecurityAttributeDefinition + + resp, status, _, err := c.BaseClient.Get( + ctx, + GetHttpRequestInput{ + ConsistencyFailureFunc: RetryOn404ConsistencyFailureFunc, + OData: query, + ValidStatusCodes: []int{http.StatusOK}, + Uri: Uri{ + Entity: fmt.Sprintf("%s/%s", customSecurityAttributeDefinitionEntity, id), + }, + }, + ) + if err != nil { + return nil, status, fmt.Errorf("CustomSecurityAttributeDefinitionClient.BaseClient.Get(): %v", err) + } + + defer resp.Body.Close() + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, status, fmt.Errorf("io.ReadAll(): %v", err) + } + + if err := json.Unmarshal(respBody, &customSecurityAttributeDefinition); err != nil { + return nil, status, fmt.Errorf("json.Unmarshal(): %v", err) + } + + return &customSecurityAttributeDefinition, status, nil +} + +// Update will update a single CustomSecurityAttributeDefinition entity returning the HTTP status +// code and any errors +func (c *CustomSecurityAttributeDefinitionClient) Update(ctx context.Context, customSecurityAttributeDefinition CustomSecurityAttributeDefinition) (int, error) { + var status int + + if customSecurityAttributeDefinition.ID == nil { + return status, fmt.Errorf("cannot update customSecurityAttributeDefinition with a nil ID") + } + + id := *customSecurityAttributeDefinition.ID + customSecurityAttributeDefinition.ID = nil + + body, err := json.Marshal(customSecurityAttributeDefinition) + if err != nil { + return status, fmt.Errorf("json.Marshal(): %v", err) + } + + _, status, _, err = c.BaseClient.Patch( + ctx, + PatchHttpRequestInput{ + Body: body, + ConsistencyFailureFunc: RetryOn404ConsistencyFailureFunc, + ValidStatusCodes: []int{ + http.StatusOK, + http.StatusNoContent, + }, + Uri: Uri{ + Entity: fmt.Sprintf("%s/%s", customSecurityAttributeDefinitionEntity, id), + }, + }, + ) + if err != nil { + return status, fmt.Errorf("CustomSecurityAttributeDefinitionClient.BaseClient.Patch(): %v", err) + } + + return status, nil +} + +// Delete removes an instance of CustomSecurityAttributeDefinition by `id` +func (c *CustomSecurityAttributeDefinitionClient) Delete(ctx context.Context, id string) (int, error) { + _, status, _, err := c.BaseClient.Delete( + ctx, + DeleteHttpRequestInput{ + ConsistencyFailureFunc: RetryOn404ConsistencyFailureFunc, + ValidStatusCodes: []int{http.StatusNoContent}, + Uri: Uri{ + Entity: fmt.Sprintf("%s/%s", customSecurityAttributeDefinitionEntity, id), + }, + }, + ) + if err != nil { + return status, fmt.Errorf("CustomSecurityAttributeDefinitionClient.BaseClient.Delete(): %v", err) + } + + return status, nil +} + +func (c *CustomSecurityAttributeDefinitionClient) Deactivate(ctx context.Context, id string) (int, error) { + var status int + var customSecurityAttributeDefinition CustomSecurityAttributeDefinition + + customSecurityAttributeDefinition.Status = utils.StringPtr("Deprecated") + + body, err := json.Marshal(customSecurityAttributeDefinition) + if err != nil { + return status, fmt.Errorf("json.Marshal(): %v", err) + } + + _, status, _, err = c.BaseClient.Patch( + ctx, + PatchHttpRequestInput{ + Body: body, + ConsistencyFailureFunc: RetryOn404ConsistencyFailureFunc, + ValidStatusCodes: []int{ + http.StatusOK, + http.StatusNoContent, + }, + Uri: Uri{ + Entity: fmt.Sprintf("%s/%s", customSecurityAttributeDefinitionEntity, id), + }, + }, + ) + if err != nil { + return status, fmt.Errorf("customSecurityAttributeDefinitionClient.BaseClient.Patch(): %v", err) + } + + return status, nil +} diff --git a/vendor/github.com/manicminer/hamilton/msgraph/models.go b/vendor/github.com/manicminer/hamilton/msgraph/models.go index 630f4b1e49..d542facbca 100644 --- a/vendor/github.com/manicminer/hamilton/msgraph/models.go +++ b/vendor/github.com/manicminer/hamilton/msgraph/models.go @@ -64,7 +64,7 @@ type AccessPackageAssignmentRequest struct { type AccessPackageAssignmentPolicy struct { AccessPackageId *string `json:"accessPackageId,omitempty"` - AccessReviewSettings *AssignmentReviewSettings `json:"accessReviewSettings,omitempty"` + AccessReviewSettings *AssignmentReviewSettings `json:"accessReviewSettings"` CanExtend *bool `json:"canExtend,omitempty"` CreatedBy *string `json:"createdBy,omitempty"` CreatedDateTime *time.Time `json:"createdDateTime,omitempty"` @@ -703,11 +703,11 @@ type ConditionalAccessPolicy struct { } type ConditionalAccessSessionControls struct { - ApplicationEnforcedRestrictions *ApplicationEnforcedRestrictionsSessionControl `json:"applicationEnforcedRestrictions,omitempty"` - CloudAppSecurity *CloudAppSecurityControl `json:"cloudAppSecurity,omitempty"` + ApplicationEnforcedRestrictions *ApplicationEnforcedRestrictionsSessionControl `json:"applicationEnforcedRestrictions"` + CloudAppSecurity *CloudAppSecurityControl `json:"cloudAppSecurity"` DisableResilienceDefaults *bool `json:"disableResilienceDefaults,omitempty"` - PersistentBrowser *PersistentBrowserSessionControl `json:"persistentBrowser,omitempty"` - SignInFrequency *SignInFrequencySessionControl `json:"signInFrequency,omitempty"` + PersistentBrowser *PersistentBrowserSessionControl `json:"persistentBrowser"` + SignInFrequency *SignInFrequencySessionControl `json:"signInFrequency"` } type ConditionalAccessUsers struct { @@ -2242,3 +2242,21 @@ type UserFlowAttribute struct { UserFlowAttributeType *string `json:"userFlowAttributeType,omitempty"` DataType *UserflowAttributeDataType `json:"dataType,omitempty"` } + +type AttributeSet struct { + ID *string `json:"id,omitempty"` + Description *string `json:"description,omitempty"` + MaxAttributesPerSet *int32 `json:"maxAttributesPerSet,omitempty"` +} + +type CustomSecurityAttributeDefinition struct { + AttributeSet *string `json:"attributeSet,omitempty"` + Description *string `json:"description,omitempty"` + ID *string `json:"id,omitempty"` + IsCollection *bool `json:"isCollection,omitempty"` + IsSearchable *bool `json:"isSearchable,omitempty"` + Name *string `json:"name,omitempty"` + Status *string `json:"status,omitempty"` + Type *string `json:"type,omitempty"` + UsePreDefinedValuesOnly *bool `json:"usePreDefinedValuesOnly,omitempty"` +} diff --git a/vendor/github.com/manicminer/hamilton/msgraph/users.go b/vendor/github.com/manicminer/hamilton/msgraph/users.go index a68db85137..e6cdf53b4e 100644 --- a/vendor/github.com/manicminer/hamilton/msgraph/users.go +++ b/vendor/github.com/manicminer/hamilton/msgraph/users.go @@ -424,3 +424,20 @@ func (c *UsersClient) DeleteManager(ctx context.Context, id string) (int, error) return status, nil } + +// UploadThumbnailPhoto uploads a thumbnail photo for the specified user which should be a gif, jpeg or png image. +func (c *UsersClient) UploadThumbnailPhoto(ctx context.Context, userId, contentType string, thumbnailData []byte) (int, error) { + _, status, _, err := c.BaseClient.Put(ctx, PutHttpRequestInput{ + Body: thumbnailData, + ConsistencyFailureFunc: RetryOn404ConsistencyFailureFunc, + ContentType: contentType, + ValidStatusCodes: []int{http.StatusOK}, + Uri: Uri{ + Entity: fmt.Sprintf("/users/%s/photo/$value", userId), + }, + }) + if err != nil { + return status, fmt.Errorf("UsersClient.BaseClient.Put(): %v", err) + } + return status, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index bd507408d1..f96b5e7f83 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -209,7 +209,7 @@ github.com/hashicorp/terraform-svchost # github.com/hashicorp/yamux v0.1.1 ## explicit; go 1.15 github.com/hashicorp/yamux -# github.com/manicminer/hamilton v0.67.0 +# github.com/manicminer/hamilton v0.70.0 => github.com/MarkDordoy/hamilton v0.17.1-0.20240611151114-899c6ce169f6 ## explicit; go 1.21 github.com/manicminer/hamilton/errors github.com/manicminer/hamilton/internal/utils @@ -428,3 +428,4 @@ google.golang.org/protobuf/types/known/timestamppb ## explicit; go 1.19 software.sslmate.com/src/go-pkcs12 software.sslmate.com/src/go-pkcs12/internal/rc2 +# github.com/manicminer/hamilton => github.com/MarkDordoy/hamilton v0.17.1-0.20240611151114-899c6ce169f6