diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 42b0a5d3a2..1d196af0ce 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -134,7 +134,7 @@ jobs: - name: Build docs uses: lee-dohm/generate-elixir-docs@v1 - name: Generate openapi.json - run: mix openapi.spec.json --start-app=false --spec TrentoWeb.OpenApi.ApiSpec + run: mix openapi.spec.json --start-app=false --spec TrentoWeb.OpenApi.V1.ApiSpec - name: Generate Swagger UI uses: Legion2/swagger-ui-action@v1 with: diff --git a/assets/js/components/Button/Button.jsx b/assets/js/components/Button/Button.jsx index 39e0eeb402..b674108d2a 100644 --- a/assets/js/components/Button/Button.jsx +++ b/assets/js/components/Button/Button.jsx @@ -14,10 +14,14 @@ const getButtonClasses = (type) => { switch (type) { case 'primary-white': return 'bg-white hover:opacity-75 focus:outline-none text-jungle-green-500 w-full transition ease-in duration-200 text-center font-semibold rounded shadow'; + case 'primary-white-fit': + return 'bg-white hover:opacity-75 focus:outline-none text-jungle-green-500 w-fit transition ease-in duration-200 text-center font-semibold rounded shadow'; case 'transparent': return 'bg-transparent hover:opacity-75 focus:outline-none w-full transition ease-in duration-200 font-semibold'; case 'secondary': - return 'bg-persimmon hover:opacity-75 focus:outline-none text-gray-800 w-full transition ease-in duration-200 text-center font-semibold rounded shadow'; + return 'bg-persimmon hover:opacity-75 focus:outline-none text-gray-800 w-full transition ease-in duration-200 text-center font-semibold rounded shadow'; + case 'default-fit': + return 'bg-jungle-green-500 hover:opacity-75 focus:outline-none text-white w-fit transition ease-in duration-200 text-center font-semibold rounded shadow'; default: return 'bg-jungle-green-500 hover:opacity-75 focus:outline-none text-white w-full transition ease-in duration-200 text-center font-semibold rounded shadow'; } diff --git a/assets/js/components/ClustersList.jsx b/assets/js/components/ClustersList.jsx index 3972503020..a15f928c28 100644 --- a/assets/js/components/ClustersList.jsx +++ b/assets/js/components/ClustersList.jsx @@ -17,6 +17,8 @@ const getClusterTypeLabel = (type) => { return 'HANA Scale Up'; case 'hana_scale_out': return 'HANA Scale Out'; + case 'ascs_ers': + return 'ASCS/ERS'; default: return 'Unknown'; } @@ -67,7 +69,9 @@ function ClustersList() { title: 'SID', key: 'sid', filterFromParams: true, - filter: true, + filter: (filter, key) => (element) => + element[key].some((sid) => filter.includes(sid)), + render: (_, { sid }) => sid.join(', '), }, { title: 'Hosts', @@ -122,7 +126,7 @@ function ClustersList() { health: cluster.health, name: cluster.name, id: cluster.id, - sid: cluster.sid, + sid: (cluster.sid ? [cluster.sid] : []).concat(cluster.additional_sids), type: cluster.type, hosts_number: cluster.hosts_number, resources_number: cluster.resources_number, diff --git a/assets/js/components/ClustersList.test.jsx b/assets/js/components/ClustersList.test.jsx index 0bb62212ce..be30c6bd75 100644 --- a/assets/js/components/ClustersList.test.jsx +++ b/assets/js/components/ClustersList.test.jsx @@ -58,14 +58,18 @@ describe('ClustersList component', () => { }, { filter: 'SID', - options: ['PRD', 'QAS'], + options: ['PRD', 'QAS', 'HA1', 'HA2'], state: { ...cleanInitialState, clustersList: { clusters: [].concat( clusterFactory.buildList(4), clusterFactory.buildList(2, { sid: 'PRD' }), - clusterFactory.buildList(2, { sid: 'QAS' }) + clusterFactory.buildList(2, { sid: 'QAS' }), + clusterFactory.buildList(2, { + sid: null, + additional_sids: ['HA1', 'HA2'], + }) ), }, }, @@ -73,13 +77,14 @@ describe('ClustersList component', () => { }, { filter: 'Type', - options: ['hana_scale_up'], + options: ['hana_scale_up', 'ascs_ers'], state: { ...cleanInitialState, clustersList: { clusters: [].concat( clusterFactory.buildList(2, { type: 'unknown' }), - clusterFactory.buildList(2, { type: 'hana_scale_up' }) + clusterFactory.buildList(2, { type: 'hana_scale_up' }), + clusterFactory.buildList(2, { type: 'ascs_ers' }) ), }, }, @@ -123,6 +128,24 @@ describe('ClustersList component', () => { } ); + it('should show SIDs delimited by comma in multi-sid clusters', () => { + const state = { + ...cleanInitialState, + clustersList: { + clusters: clusterFactory.buildList(1, { + sid: null, + additional_sids: ['HA1', 'HA2'], + }), + }, + }; + + const [StatefulClustersList] = withState(, state); + + renderWithRouter(StatefulClustersList); + + expect(screen.getByText('HA1, HA2')).toBeVisible(); + }); + it('should put the filters values in the query string when filters are selected', () => { const tag = 'Tag1'; const clusters = clusterFactory.buildList(1, { diff --git a/assets/js/components/DeregistrationModal/DeregistrationModal.jsx b/assets/js/components/DeregistrationModal/DeregistrationModal.jsx new file mode 100644 index 0000000000..efec1b74c8 --- /dev/null +++ b/assets/js/components/DeregistrationModal/DeregistrationModal.jsx @@ -0,0 +1,48 @@ +import React from 'react'; + +import { EOS_CLEANING_SERVICES } from 'eos-icons-react'; + +import Modal from '@components/Modal'; +import Button from '@components/Button'; + +function DeregistrationModal({ + hostname, + isOpen = false, + onCleanUp, + onCancel, +}) { + return ( + +
+ This action will cause Trento to stop tracking all the components + discovered by the agent in this host, including the host itself and any + other component depending on it. +
+
+ + +
+
+ ); +} + +export default DeregistrationModal; diff --git a/assets/js/components/DeregistrationModal/DeregistrationModal.stories.jsx b/assets/js/components/DeregistrationModal/DeregistrationModal.stories.jsx new file mode 100644 index 0000000000..468fa76a35 --- /dev/null +++ b/assets/js/components/DeregistrationModal/DeregistrationModal.stories.jsx @@ -0,0 +1,70 @@ +import React, { useState } from 'react'; + +import Button from '@components/Button'; +import DeregistrationModal from '.'; + +export default { + title: 'DeregistrationModal', + component: DeregistrationModal, + argTypes: { + hostname: { + type: 'string', + description: 'The host name to confirm deregistration of', + control: { type: 'text' }, + }, + isOpen: { + type: 'boolean', + description: 'Sets the visibility of the modal', + control: false, + }, + onCleanUp: { + description: 'Callback function to run when "Clean up" button is clicked', + action: 'Deregistration', + control: false, + }, + onClose: { + description: 'Callback function to run when "Cancel" button is clicked', + action: 'Cancel', + control: false, + }, + }, +}; + +function ButtonToOpenModal({ hostname }) { + const [open, setOpen] = useState(false); + const [deregistered, setDeregistered] = useState(false); + + return ( + <> + + + { + setDeregistered(true); + setOpen(false); + }} + onCancel={() => setOpen(false)} + /> + + ); +} + +export const Default = { + args: { + hostname: 'example host', + }, + render: (args) => , +}; diff --git a/assets/js/components/DeregistrationModal/DeregistrationModal.test.jsx b/assets/js/components/DeregistrationModal/DeregistrationModal.test.jsx new file mode 100644 index 0000000000..9ff2b75750 --- /dev/null +++ b/assets/js/components/DeregistrationModal/DeregistrationModal.test.jsx @@ -0,0 +1,26 @@ +import React from 'react'; +import { render, screen } from '@testing-library/react'; +import { faker } from '@faker-js/faker'; + +import DeregistrationModal from '.'; + +describe('Deregistration Modal component', () => { + it('should render deregistration modal correctly', async () => { + const hostname = faker.name.firstName(); + + render( + {}} + onCancel={() => {}} + /> + ); + + expect(await screen.findByText(hostname, { exact: false })).toBeTruthy(); + expect( + await screen.findByRole('button', { name: /Clean up/i }) + ).toBeTruthy(); + expect(await screen.findByRole('button', { name: /Cancel/i })).toBeTruthy(); + }); +}); diff --git a/assets/js/components/DeregistrationModal/index.js b/assets/js/components/DeregistrationModal/index.js new file mode 100644 index 0000000000..d4811f4c95 --- /dev/null +++ b/assets/js/components/DeregistrationModal/index.js @@ -0,0 +1,3 @@ +import DeregistrationModal from './DeregistrationModal'; + +export default DeregistrationModal; diff --git a/assets/js/lib/network/index.js b/assets/js/lib/network/index.js index 5ef3d62e51..b76e43e8cc 100644 --- a/assets/js/lib/network/index.js +++ b/assets/js/lib/network/index.js @@ -68,36 +68,36 @@ function handleResponseStatus(response) { return response; } -export const post = function post(url, data) { +export const post = function post(url, data, config = null) { return networkClient - .post(url, data) + .post(url, data, config) .then(handleResponseStatus) .catch((error) => { handleError(error); }); }; -export const del = function del(url) { +export const del = function del(url, config = null) { return networkClient - .delete(url) + .delete(url, config) .then(handleResponseStatus) .catch((error) => { handleError(error); }); }; -export const put = function put(url, data) { +export const put = function put(url, data, config = null) { return networkClient - .put(url, data) + .put(url, data, config) .then(handleResponseStatus) .catch((error) => { handleError(error); }); }; -export const get = function get(url) { +export const get = function get(url, config = null) { return networkClient - .get(url) + .get(url, config) .then(handleResponseStatus) .catch((error) => { handleError(error); diff --git a/assets/js/lib/network/network.test.js b/assets/js/lib/network/network.test.js index 9b833dcad8..b1b11677b8 100644 --- a/assets/js/lib/network/network.test.js +++ b/assets/js/lib/network/network.test.js @@ -24,6 +24,22 @@ describe('networkClient', () => { clearCredentialsFromStore(); }); + it('should use default baseURL', async () => { + axiosMock.onGet('/api/v1/test').reply(200, { ok: 'ok' }); + + const response = await networkClient.get('/test'); + + expect(response.data).toEqual({ ok: 'ok' }); + }); + + it('should apply the specific config in each request', async () => { + axiosMock.onGet('/base/test').reply(200, { ok: 'ok' }); + + const response = await networkClient.get('/test', { baseURL: '/base' }); + + expect(response.data).toEqual({ ok: 'ok' }); + }); + it('should attach the access token from the store when a request is made', async () => { storeAccessToken('test-access'); diff --git a/assets/js/state/channels.js b/assets/js/state/channels.js index 33dfb0dd75..3446c0efe7 100644 --- a/assets/js/state/channels.js +++ b/assets/js/state/channels.js @@ -21,6 +21,7 @@ const processChannelEvents = (reduxStore, socket) => { 'host_details_updated', 'heartbeat_succeded', 'heartbeat_failed', + 'host_deregistered', ]); registerEvents(reduxStore, socket, 'monitoring:clusters', [ 'cluster_registered', @@ -30,17 +31,22 @@ const processChannelEvents = (reduxStore, socket) => { 'checks_results_updated', 'cluster_health_changed', 'cluster_cib_last_written_updated', + 'cluster_deregistered', ]); registerEvents(reduxStore, socket, 'monitoring:sap_systems', [ 'sap_system_registered', 'sap_system_health_changed', 'application_instance_registered', + 'application_instance_deregistered', 'application_instance_health_changed', + 'sap_system_deregistered', + 'sap_system_updated', ]); registerEvents(reduxStore, socket, 'monitoring:databases', [ 'database_registered', 'database_health_changed', 'database_instance_registered', + 'database_instance_deregistered', 'database_instance_health_changed', 'database_instance_system_replication_changed', ]); diff --git a/assets/js/state/clusters.js b/assets/js/state/clusters.js index 31d9cf32ff..d2d5159d08 100644 --- a/assets/js/state/clusters.js +++ b/assets/js/state/clusters.js @@ -95,9 +95,14 @@ export const clustersListSlice = createSlice({ return cluster; }); }, + removeCluster: (state, { payload: { id } }) => { + state.clusters = state.clusters.filter((cluster) => cluster.id !== id); + }, }, }); +export const CLUSTER_DEREGISTERED = 'CLUSTER_DEREGISTERED'; + export const { setClusters, appendCluster, @@ -110,6 +115,7 @@ export const { updateCibLastWritten, startClustersLoading, stopClustersLoading, + removeCluster, } = clustersListSlice.actions; export default clustersListSlice.reducer; diff --git a/assets/js/state/clusters.test.js b/assets/js/state/clusters.test.js new file mode 100644 index 0000000000..5581230224 --- /dev/null +++ b/assets/js/state/clusters.test.js @@ -0,0 +1,19 @@ +import clustersReducer, { removeCluster } from '@state/clusters'; +import { clusterFactory } from '@lib/test-utils/factories'; + +describe('Clusters reducer', () => { + it('should remove cluster from state', () => { + const [cluster1, cluster2] = clusterFactory.buildList(2); + const initialState = { + clusters: [cluster1, cluster2], + }; + + const action = removeCluster(cluster1); + + const expectedState = { + clusters: [cluster2], + }; + + expect(clustersReducer(initialState, action)).toEqual(expectedState); + }); +}); diff --git a/assets/js/state/databases.js b/assets/js/state/databases.js index 67fffe793d..ee86496cc1 100644 --- a/assets/js/state/databases.js +++ b/assets/js/state/databases.js @@ -30,6 +30,19 @@ export const databasesListSlice = createSlice({ appendDatabaseInstance: (state, action) => { state.databaseInstances = [...state.databaseInstances, action.payload]; }, + removeDatabaseInstance: ( + state, + { payload: { sap_system_id, host_id, instance_number } } + ) => { + state.databaseInstances = state.databaseInstances.filter( + (databaseInstance) => + !( + databaseInstance.sap_system_id === sap_system_id && + databaseInstance.host_id === host_id && + databaseInstance.instance_number === instance_number + ) + ); + }, updateDatabaseHealth: (state, action) => { state.databases = state.databases.map((database) => { if (database.id === action.payload.id) { @@ -81,6 +94,7 @@ export const databasesListSlice = createSlice({ export const DATABASE_REGISTERED = 'DATABASE_REGISTERED'; export const DATABASE_HEALTH_CHANGED = 'DATABASE_HEALTH_CHANGED'; export const DATABASE_INSTANCE_REGISTERED = 'DATABASE_INSTANCE_REGISTERED'; +export const DATABASE_INSTANCE_DEREGISTERED = 'DATABASE_INSTANCE_DEREGISTERED'; export const DATABASE_INSTANCE_HEALTH_CHANGED = 'DATABASE_INSTANCE_HEALTH_CHANGED'; export const DATABASE_INSTANCE_SYSTEM_REPLICATION_CHANGED = @@ -91,6 +105,7 @@ export const { stopDatabasesLoading, setDatabases, appendDatabase, + removeDatabaseInstance, appendDatabaseInstance, updateDatabaseHealth, updateDatabaseInstanceHealth, diff --git a/assets/js/state/databases.test.js b/assets/js/state/databases.test.js new file mode 100644 index 0000000000..e6df5e93bd --- /dev/null +++ b/assets/js/state/databases.test.js @@ -0,0 +1,19 @@ +import databaseReducer, { removeDatabaseInstance } from '@state/databases'; +import { databaseInstanceFactory } from '@lib/test-utils/factories/databases'; + +describe('Databases reducer', () => { + it('should remove a datase instance from state', () => { + const [instance1, instance2] = databaseInstanceFactory.buildList(2); + const initialState = { + databaseInstances: [instance1, instance2], + }; + + const action = removeDatabaseInstance(instance1); + + const expectedState = { + databaseInstances: [instance2], + }; + + expect(databaseReducer(initialState, action)).toEqual(expectedState); + }); +}); diff --git a/assets/js/state/hosts.js b/assets/js/state/hosts.js index cf29d434ad..47557660f2 100644 --- a/assets/js/state/hosts.js +++ b/assets/js/state/hosts.js @@ -65,9 +65,14 @@ export const hostsListSlice = createSlice({ stopHostsLoading: (state) => { state.loading = false; }, + removeHost: (state, { payload: { id } }) => { + state.hosts = state.hosts.filter((host) => host.id !== id); + }, }, }); +export const HOST_DEREGISTERED = 'HOST_DEREGISTERED'; + export const { setHosts, appendHost, @@ -78,6 +83,7 @@ export const { stopHostsLoading, setHeartbeatPassing, setHeartbeatCritical, + removeHost, } = hostsListSlice.actions; export default hostsListSlice.reducer; diff --git a/assets/js/state/hosts.test.js b/assets/js/state/hosts.test.js new file mode 100644 index 0000000000..73b44f8dca --- /dev/null +++ b/assets/js/state/hosts.test.js @@ -0,0 +1,19 @@ +import hostsReducer, { removeHost } from '@state/hosts'; +import { hostFactory } from '@lib/test-utils/factories'; + +describe('Hosts reducer', () => { + it('should remove host from state', () => { + const [host1, host2] = hostFactory.buildList(2); + const initialState = { + hosts: [host1, host2], + }; + + const action = removeHost(host1); + + const expectedState = { + hosts: [host2], + }; + + expect(hostsReducer(initialState, action)).toEqual(expectedState); + }); +}); diff --git a/assets/js/state/sagas/clusters.js b/assets/js/state/sagas/clusters.js new file mode 100644 index 0000000000..a302c4dedd --- /dev/null +++ b/assets/js/state/sagas/clusters.js @@ -0,0 +1,17 @@ +import { put, takeEvery } from 'redux-saga/effects'; +import { CLUSTER_DEREGISTERED, removeCluster } from '@state/clusters'; +import { notify } from '@state/actions/notifications'; + +export function* clusterDeregistered({ payload: { name, id } }) { + yield put(removeCluster({ id })); + yield put( + notify({ + text: `The cluster ${name || id} has been deregistered.`, + icon: 'ℹ️', + }) + ); +} + +export function* watchClusterDeregistered() { + yield takeEvery(CLUSTER_DEREGISTERED, clusterDeregistered); +} diff --git a/assets/js/state/sagas/clusters.test.js b/assets/js/state/sagas/clusters.test.js new file mode 100644 index 0000000000..d5e9cf2d3f --- /dev/null +++ b/assets/js/state/sagas/clusters.test.js @@ -0,0 +1,16 @@ +import { recordSaga } from '@lib/test-utils'; +import { clusterDeregistered } from '@state/sagas/clusters'; +import { removeCluster } from '@state/clusters'; +import { clusterFactory } from '@lib/test-utils/factories'; + +describe('Clusters sagas', () => { + it('should remove the cluster', async () => { + const { id, name } = clusterFactory.build(); + + const dispatched = await recordSaga(clusterDeregistered, { + payload: { id, name }, + }); + + expect(dispatched).toContainEqual(removeCluster({ id })); + }); +}); diff --git a/assets/js/state/sagas/databases.js b/assets/js/state/sagas/databases.js index e28741d2ec..4ead26def7 100644 --- a/assets/js/state/sagas/databases.js +++ b/assets/js/state/sagas/databases.js @@ -3,6 +3,7 @@ import { DATABASE_REGISTERED, DATABASE_HEALTH_CHANGED, DATABASE_INSTANCE_REGISTERED, + DATABASE_INSTANCE_DEREGISTERED, DATABASE_INSTANCE_HEALTH_CHANGED, DATABASE_INSTANCE_SYSTEM_REPLICATION_CHANGED, appendDatabase, @@ -10,10 +11,12 @@ import { updateDatabaseHealth, updateDatabaseInstanceHealth, updateDatabaseInstanceSystemReplication, + removeDatabaseInstance, } from '@state/databases'; import { appendDatabaseInstanceToSapSystem, + removeDatabaseInstanceFromSapSystem, updateSAPSystemDatabaseInstanceHealth, updateSAPSystemDatabaseInstanceSystemReplication, } from '@state/sapSystems'; @@ -74,6 +77,23 @@ function* databaseInstanceRegistered({ payload }) { ); } +export function* databaseInstanceDeregistered({ payload }) { + yield put(removeDatabaseInstance(payload)); + yield put(removeDatabaseInstanceFromSapSystem(payload)); + yield put( + appendEntryToLiveFeed({ + source: payload.sid, + message: 'Database instance deregistered.', + }) + ); + yield put( + notify({ + text: `The database instance ${payload.instance_number} has been deregistered from ${payload.sid}.`, + icon: 'ℹ️', + }) + ); +} + function* databaseInstanceHealthChanged({ payload }) { yield put(updateDatabaseInstanceHealth(payload)); yield put(updateSAPSystemDatabaseInstanceHealth(payload)); @@ -88,6 +108,7 @@ export function* watchDatabase() { yield takeEvery(DATABASE_REGISTERED, databaseRegistered); yield takeEvery(DATABASE_HEALTH_CHANGED, databaseHealthChanged); yield takeEvery(DATABASE_INSTANCE_REGISTERED, databaseInstanceRegistered); + yield takeEvery(DATABASE_INSTANCE_DEREGISTERED, databaseInstanceDeregistered); yield takeEvery( DATABASE_INSTANCE_HEALTH_CHANGED, databaseInstanceHealthChanged diff --git a/assets/js/state/sagas/databases.test.js b/assets/js/state/sagas/databases.test.js new file mode 100644 index 0000000000..4d1ed7bd21 --- /dev/null +++ b/assets/js/state/sagas/databases.test.js @@ -0,0 +1,35 @@ +import { recordSaga } from '@lib/test-utils'; +import { databaseInstanceDeregistered } from '@state/sagas/databases'; +import { removeDatabaseInstance } from '@state/databases'; +import { removeDatabaseInstanceFromSapSystem } from '@state/sapSystems'; +import { databaseInstanceFactory } from '@lib/test-utils/factories'; +import { notify } from '@state/actions/notifications'; + +describe('SAP Systems sagas', () => { + it('should remove the database instance', async () => { + const { sap_system_id, host_id, instance_number, sid } = + databaseInstanceFactory.build(); + + const dispatched = await recordSaga(databaseInstanceDeregistered, { + payload: { sap_system_id, host_id, instance_number, sid }, + }); + + expect(dispatched).toContainEqual( + removeDatabaseInstanceFromSapSystem({ + sap_system_id, + host_id, + instance_number, + sid, + }) + ); + expect(dispatched).toContainEqual( + removeDatabaseInstance({ sap_system_id, host_id, instance_number, sid }) + ); + expect(dispatched).toContainEqual( + notify({ + text: `The database instance ${instance_number} has been deregistered from ${sid}.`, + icon: 'ℹ️', + }) + ); + }); +}); diff --git a/assets/js/state/sagas/hosts.js b/assets/js/state/sagas/hosts.js new file mode 100644 index 0000000000..e2d93ab7a0 --- /dev/null +++ b/assets/js/state/sagas/hosts.js @@ -0,0 +1,17 @@ +import { put, takeEvery } from 'redux-saga/effects'; +import { HOST_DEREGISTERED, removeHost } from '@state/hosts'; +import { notify } from '@state/actions/notifications'; + +export function* hostDeregistered({ payload }) { + yield put(removeHost(payload)); + yield put( + notify({ + text: `The host ${payload.hostname} has been deregistered.`, + icon: 'ℹ️', + }) + ); +} + +export function* watchHostDeregistered() { + yield takeEvery(HOST_DEREGISTERED, hostDeregistered); +} diff --git a/assets/js/state/sagas/hosts.test.js b/assets/js/state/sagas/hosts.test.js new file mode 100644 index 0000000000..017d14a0e8 --- /dev/null +++ b/assets/js/state/sagas/hosts.test.js @@ -0,0 +1,17 @@ +import { recordSaga } from '@lib/test-utils'; +import { hostDeregistered } from '@state/sagas/hosts'; +import { removeHost } from '@state/hosts'; +import { hostFactory } from '@lib/test-utils/factories'; + +describe('Hosts sagas', () => { + it('should remove the host', async () => { + const { id, hostname } = hostFactory.build(); + const payload = { id, hostname }; + + const dispatched = await recordSaga(hostDeregistered, { + payload, + }); + + expect(dispatched).toContainEqual(removeHost(payload)); + }); +}); diff --git a/assets/js/state/sagas/index.js b/assets/js/state/sagas/index.js index fab3ddb739..86262b38b2 100644 --- a/assets/js/state/sagas/index.js +++ b/assets/js/state/sagas/index.js @@ -62,6 +62,9 @@ import { watchAcceptEula } from '@state/sagas/eula'; import { watchCatalogUpdate } from '@state/sagas/catalog'; import { watchSapSystem } from '@state/sagas/sapSystems'; import { watchDatabase } from '@state/sagas/databases'; +import { watchHostDeregistered } from '@state/sagas/hosts'; +import { watchClusterDeregistered } from '@state/sagas/clusters'; + import { watchUpdateLastExecution, watchRequestExecution, @@ -111,7 +114,9 @@ function* initialDataFetch() { yield put(stopHostsLoading()); yield put(startClustersLoading()); - const { data: clusters } = yield call(get, '/clusters'); + const { data: clusters } = yield call(get, '/clusters', { + baseURL: '/api/v2', + }); yield put(setClusters(clusters)); yield put(stopClustersLoading()); @@ -386,9 +391,11 @@ export default function* rootSaga() { watchHostDetailsUpdated(), watchHeartbeatSucceded(), watchHeartbeatFailed(), + watchHostDeregistered(), watchClusterRegistered(), watchClusterDetailsUpdated(), watchClusterCibLastWrittenUpdated(), + watchClusterDeregistered(), watchNotifications(), watchChecksSelected(), watchChecksExecutionStarted(), diff --git a/assets/js/state/sagas/sapSystems.js b/assets/js/state/sagas/sapSystems.js index 79e27f1f1d..1964a66584 100644 --- a/assets/js/state/sagas/sapSystems.js +++ b/assets/js/state/sagas/sapSystems.js @@ -4,10 +4,16 @@ import { SAP_SYSTEM_HEALTH_CHANGED, APPLICATION_INSTANCE_REGISTERED, APPLICATION_INSTANCE_HEALTH_CHANGED, + APPLICATION_INSTANCE_DEREGISTERED, + SAP_SYSTEM_DEREGISTERED, + SAP_SYSTEM_UPDATED, appendSapsystem, updateSapSystemHealth, appendApplicationInstance, + removeApplicationInstance, updateApplicationInstanceHealth, + removeSAPSystem, + updateSAPSystem, } from '@state/sapSystems'; import { getSapSystem } from '@state/selectors'; import { appendEntryToLiveFeed } from '@state/liveFeed'; @@ -58,10 +64,40 @@ function* applicationInstanceRegistered({ payload }) { ); } +export function* applicationInstanceDeregistered({ payload }) { + yield put(removeApplicationInstance(payload)); + yield put( + appendEntryToLiveFeed({ + source: payload.sid, + message: 'Application instance deregistered.', + }) + ); + yield put( + notify({ + text: `The application instance ${payload.instance_number} has been deregistered from ${payload.sid}.`, + icon: 'ℹ️', + }) + ); +} + function* applicationInstanceHealthChanged({ payload }) { yield put(updateApplicationInstanceHealth(payload)); } +export function* sapSystemDeregistered({ payload: { id, sid } }) { + yield put(removeSAPSystem({ id })); + yield put( + notify({ + text: `The SAP System ${sid} has been deregistered.`, + icon: 'ℹ️', + }) + ); +} + +export function* sapSystemUpdated({ payload }) { + yield put(updateSAPSystem(payload)); +} + export function* watchSapSystem() { yield takeEvery(SAP_SYSTEM_REGISTERED, sapSystemRegistered); yield takeEvery(SAP_SYSTEM_HEALTH_CHANGED, sapSystemHealthChanged); @@ -69,8 +105,14 @@ export function* watchSapSystem() { APPLICATION_INSTANCE_REGISTERED, applicationInstanceRegistered ); + yield takeEvery( + APPLICATION_INSTANCE_DEREGISTERED, + applicationInstanceDeregistered + ); yield takeEvery( APPLICATION_INSTANCE_HEALTH_CHANGED, applicationInstanceHealthChanged ); + yield takeEvery(SAP_SYSTEM_DEREGISTERED, sapSystemDeregistered); + yield takeEvery(SAP_SYSTEM_UPDATED, sapSystemUpdated); } diff --git a/assets/js/state/sagas/sapSystems.test.js b/assets/js/state/sagas/sapSystems.test.js new file mode 100644 index 0000000000..83c470f66a --- /dev/null +++ b/assets/js/state/sagas/sapSystems.test.js @@ -0,0 +1,50 @@ +import { recordSaga } from '@lib/test-utils'; +import { + applicationInstanceDeregistered, + sapSystemDeregistered, + sapSystemUpdated, +} from '@state/sagas/sapSystems'; +import { + removeSAPSystem, + removeApplicationInstance, + updateSAPSystem, +} from '@state/sapSystems'; +import { + sapSystemFactory, + sapSystemApplicationInstanceFactory, +} from '@lib/test-utils/factories'; + +describe('SAP Systems sagas', () => { + it('should remove the SAP system', async () => { + const { id, sid } = sapSystemFactory.build(); + + const dispatched = await recordSaga(sapSystemDeregistered, { + payload: { id, sid }, + }); + + expect(dispatched).toContainEqual(removeSAPSystem({ id })); + }); + + it('should remove the application instance', async () => { + const { sap_system_id, host_id, instance_number } = + sapSystemApplicationInstanceFactory.build(); + + const dispatched = await recordSaga(applicationInstanceDeregistered, { + payload: { sap_system_id, host_id, instance_number }, + }); + + expect(dispatched).toContainEqual( + removeApplicationInstance({ sap_system_id, host_id, instance_number }) + ); + }); + + it('should updated the SAP system', async () => { + const { id, ensa_version } = sapSystemFactory.build(); + + const dispatched = await recordSaga(sapSystemUpdated, { + payload: { id, ensa_version }, + }); + + expect(dispatched).toContainEqual(updateSAPSystem({ id, ensa_version })); + }); +}); diff --git a/assets/js/state/sapSystems.js b/assets/js/state/sapSystems.js index dd24e719f7..38730f766e 100644 --- a/assets/js/state/sapSystems.js +++ b/assets/js/state/sapSystems.js @@ -45,11 +45,37 @@ export const sapSystemsListSlice = createSlice({ action.payload, ]; }, + removeApplicationInstance: ( + state, + { payload: { sap_system_id, host_id, instance_number } } + ) => { + state.applicationInstances = state.applicationInstances.filter( + (applicationInstance) => + !( + applicationInstance.sap_system_id === sap_system_id && + applicationInstance.host_id === host_id && + applicationInstance.instance_number === instance_number + ) + ); + }, // When a new DatabaseInstanceRegistered comes in, // it need to be appended to the list of the database instances of the relative sap system appendDatabaseInstanceToSapSystem: (state, action) => { state.databaseInstances = [...state.databaseInstances, action.payload]; }, + removeDatabaseInstanceFromSapSystem: ( + state, + { payload: { sap_system_id, host_id, instance_number } } + ) => { + state.databaseInstances = state.databaseInstances.filter( + (databaseInstance) => + !( + databaseInstance.sap_system_id === sap_system_id && + databaseInstance.host_id === host_id && + databaseInstance.instance_number === instance_number + ) + ); + }, updateSapSystemHealth: (state, action) => { state.sapSystems = state.sapSystems.map((sapSystem) => { if (sapSystem.id === action.payload.id) { @@ -100,6 +126,19 @@ export const sapSystemsListSlice = createSlice({ return sapSystem; }); }, + removeSAPSystem: (state, { payload: { id } }) => { + state.sapSystems = state.sapSystems.filter( + (sapSystem) => sapSystem.id !== id + ); + }, + updateSAPSystem: (state, { payload }) => { + state.sapSystems = state.sapSystems.map((sapSystem) => { + if (sapSystem.id === payload.id) { + sapSystem = { ...sapSystem, ...payload }; + } + return sapSystem; + }); + }, }, }); @@ -107,8 +146,12 @@ export const SAP_SYSTEM_REGISTERED = 'SAP_SYSTEM_REGISTERED'; export const SAP_SYSTEM_HEALTH_CHANGED = 'SAP_SYSTEM_HEALTH_CHANGED'; export const APPLICATION_INSTANCE_REGISTERED = 'APPLICATION_INSTANCE_REGISTERED'; +export const APPLICATION_INSTANCE_DEREGISTERED = + 'APPLICATION_INSTANCE_DEREGISTERED'; export const APPLICATION_INSTANCE_HEALTH_CHANGED = 'APPLICATION_INSTANCE_HEALTH_CHANGED'; +export const SAP_SYSTEM_DEREGISTERED = 'SAP_SYSTEM_DEREGISTERED'; +export const SAP_SYSTEM_UPDATED = 'SAP_SYSTEM_UPDATED'; export const { startSapSystemsLoading, @@ -116,13 +159,17 @@ export const { setSapSystems, appendSapsystem, appendApplicationInstance, + removeApplicationInstance, appendDatabaseInstanceToSapSystem, + removeDatabaseInstanceFromSapSystem, updateSapSystemHealth, updateApplicationInstanceHealth, updateSAPSystemDatabaseInstanceHealth, updateSAPSystemDatabaseInstanceSystemReplication, addTagToSAPSystem, removeTagFromSAPSystem, + removeSAPSystem, + updateSAPSystem, } = sapSystemsListSlice.actions; export default sapSystemsListSlice.reducer; diff --git a/assets/js/state/sapSystems.test.js b/assets/js/state/sapSystems.test.js new file mode 100644 index 0000000000..5e69d3dcc7 --- /dev/null +++ b/assets/js/state/sapSystems.test.js @@ -0,0 +1,70 @@ +import sapSystemsReducer, { + removeSAPSystem, + removeApplicationInstance, + updateSAPSystem, +} from '@state/sapSystems'; +import { + sapSystemFactory, + sapSystemApplicationInstanceFactory, +} from '@lib/test-utils/factories/sapSystems'; + +describe('SAP Systems reducer', () => { + it('should remove SAP system from state', () => { + const [sapSystem1, sapSystem2] = sapSystemFactory.buildList(2); + const initialState = { + sapSystems: [sapSystem1, sapSystem2], + }; + + const action = removeSAPSystem(sapSystem1); + + const expectedState = { + sapSystems: [sapSystem2], + }; + + expect(sapSystemsReducer(initialState, action)).toEqual(expectedState); + }); + + it('should remove an application instance from state', () => { + const [instance1, instance2] = + sapSystemApplicationInstanceFactory.buildList(2); + + const initialState = { + applicationInstances: [instance1, instance2], + }; + + const action = removeApplicationInstance(instance1); + + const expectedState = { + applicationInstances: [instance2], + }; + + expect(sapSystemsReducer(initialState, action)).toEqual(expectedState); + }); + + it('should update a SAP system data', () => { + const changedIndex = 2; + const sapSystems = sapSystemFactory.buildList(5); + const initialState = { + sapSystems, + }; + + const updateEvent = { + id: sapSystems[changedIndex].id, + ensa_version: 'new_version', + }; + + const expectedSapSystems = [...sapSystems]; + expectedSapSystems[changedIndex] = { + ...sapSystems[changedIndex], + ...updateEvent, + }; + + const action = updateSAPSystem(updateEvent); + + const expectedState = { + sapSystems: expectedSapSystems, + }; + + expect(sapSystemsReducer(initialState, action)).toEqual(expectedState); + }); +}); diff --git a/config/config.exs b/config/config.exs index 4c5ffd0e5b..988dcf3037 100644 --- a/config/config.exs +++ b/config/config.exs @@ -82,6 +82,10 @@ config :trento, :pow, # Agent heartbeat interval. Adding one extra second to the agent 5s interval to avoid glitches config :trento, Trento.Heartbeats, interval: :timer.seconds(6) +# This is passed to the frontend as the time after the last heartbeat +# to wait before displaying the deregistration button +config :trento, deregistration_debounce: :timer.seconds(0) + config :trento, Trento.Scheduler, jobs: [ heartbeat_check: [ diff --git a/config/dev.exs b/config/dev.exs index a6ee55a9d5..38bed0df34 100644 --- a/config/dev.exs +++ b/config/dev.exs @@ -128,6 +128,8 @@ config :trento, :checks_service, base_url: "http://localhost:4001" config :unplug, :init_mode, :runtime +config :open_api_spex, :cache_adapter, OpenApiSpex.Plug.NoneCache + # Override with local dev.local.exs file if File.exists?("#{__DIR__}/dev.local.exs") do import_config "dev.local.exs" diff --git a/config/test.exs b/config/test.exs index 19912da12f..726b4ac6b6 100644 --- a/config/test.exs +++ b/config/test.exs @@ -41,6 +41,13 @@ config :logger, level: :warn # Initialize plugs at runtime for faster test compilation config :phoenix, :plug_init_mode, :runtime +# Agent heartbeat interval. Adding one extra second to the agent 5s interval to avoid glitches +config :trento, Trento.Heartbeats, interval: :timer.seconds(6) + +# This is passed to the frontend as the time after the last heartbeat +# to wait before displaying the deregistration button +config :trento, deregistration_debounce: :timer.seconds(5) + config :trento, api_key_authentication_enabled: false, jwt_authentication_enabled: false diff --git a/lib/trento/application.ex b/lib/trento/application.ex index 80e1c59de3..deeae33f72 100644 --- a/lib/trento/application.ex +++ b/lib/trento/application.ex @@ -21,6 +21,7 @@ defmodule Trento.Application do Trento.Scheduler, Trento.EventHandlersSupervisor, Trento.ProjectorsSupervisor, + Trento.ProcessManagersSupervisor, Trento.Infrastructure.Messaging.Adapter.AMQP.Publisher, Trento.Integration.Checks.AMQP.Consumer # Start a worker by calling: Trento.Worker.start_link(arg) diff --git a/lib/trento/application/event_handlers/stream_roll_up_event_handler.ex b/lib/trento/application/event_handlers/stream_roll_up_event_handler.ex index 89ede63164..f17ef07d0d 100644 --- a/lib/trento/application/event_handlers/stream_roll_up_event_handler.ex +++ b/lib/trento/application/event_handlers/stream_roll_up_event_handler.ex @@ -19,6 +19,12 @@ defmodule Trento.StreamRollUpEventHandler do RollUpSapSystem } + alias Trento.Domain.Events.{ + ClusterTombstoned, + HostTombstoned, + SapSystemTombstoned + } + require Logger @max_stream_version Application.compile_env!(:trento, [__MODULE__, :max_stream_version]) @@ -116,6 +122,32 @@ defmodule Trento.StreamRollUpEventHandler do end end + def handle(%HostTombstoned{host_id: host_id}, _) do + Logger.info("Rolling up host: #{host_id} because HostTombstoned was received") + + commanded().dispatch(%RollUpHost{host_id: host_id}, + consistency: :strong + ) + end + + def handle(%ClusterTombstoned{cluster_id: cluster_id}, _) do + Logger.info("Rolling up cluster: #{cluster_id} because ClusterTombstoned was received") + + commanded().dispatch(%RollUpCluster{cluster_id: cluster_id}, + consistency: :strong + ) + end + + def handle(%SapSystemTombstoned{sap_system_id: sap_system_id}, _) do + Logger.info( + "Rolling up sap system: #{sap_system_id} because SapSystemTombstoned was received" + ) + + commanded().dispatch(%RollUpSapSystem{sap_system_id: sap_system_id}, + consistency: :strong + ) + end + defp commanded, do: Application.fetch_env!(:trento, Trento.Commanded)[:adapter] end diff --git a/lib/trento/application/integration/discovery/discovery.ex b/lib/trento/application/integration/discovery/discovery.ex index 523aed6ac3..84bb7a45d1 100644 --- a/lib/trento/application/integration/discovery/discovery.ex +++ b/lib/trento/application/integration/discovery/discovery.ex @@ -17,8 +17,15 @@ defmodule Trento.Integration.Discovery do SapSystemPolicy } + alias Trento.{Clusters, SapSystems} + @type command :: struct + @doc """ + Transform a discovery in a list of commands event by using the appropriate policy. + Store the event in the discovery events log for auditing purposes and dispatch the commands. + """ + @spec handle(map) :: :ok | {:error, any} def handle(event) do with {:ok, commands} <- do_handle(event), @@ -33,6 +40,9 @@ defmodule Trento.Integration.Discovery do end end + @doc """ + Get the discovery events that were handled to build the current state of the system. + """ @spec get_current_discovery_events :: [DiscoveryEvent.t()] def get_current_discovery_events do subquery = @@ -47,6 +57,9 @@ defmodule Trento.Integration.Discovery do Repo.all(query) end + @doc """ + Get the discovery events that were dead-lettered. + """ @spec get_discarded_discovery_events(number) :: [DiscardedDiscoveryEvent.t()] def get_discarded_discovery_events(event_number) do query = @@ -57,6 +70,9 @@ defmodule Trento.Integration.Discovery do Repo.all(query) end + @doc """ + Prune the discovery events log by removing the events older than the given number of days. + """ @spec prune_events(number) :: non_neg_integer() def prune_events(days) do end_datetime = Timex.shift(DateTime.utc_now(), days: -days) @@ -69,6 +85,9 @@ defmodule Trento.Integration.Discovery do events_number end + @doc """ + Prune the discarded discovery events log by removing the events older than the given number of days. + """ @spec prune_discarded_discovery_events(number) :: non_neg_integer() def prune_discarded_discovery_events(days) do end_datetime = Timex.shift(DateTime.utc_now(), days: -days) @@ -112,11 +131,18 @@ defmodule Trento.Integration.Discovery do defp do_handle(%{"discovery_type" => "subscription_discovery"} = event), do: HostPolicy.handle(event) - defp do_handle(%{"discovery_type" => "ha_cluster_discovery"} = event), - do: ClusterPolicy.handle(event) + defp do_handle(%{"discovery_type" => "ha_cluster_discovery", "agent_id" => agent_id} = event) do + current_cluster_id = Clusters.get_cluster_id_by_host_id(agent_id) + + ClusterPolicy.handle(event, current_cluster_id) + end - defp do_handle(%{"discovery_type" => "sap_system_discovery"} = event), - do: SapSystemPolicy.handle(event) + defp do_handle(%{"discovery_type" => "sap_system_discovery", "agent_id" => agent_id} = event) do + current_application_instances = SapSystems.get_application_instances_by_host_id(agent_id) + current_database_instances = SapSystems.get_database_instances_by_host_id(agent_id) + + SapSystemPolicy.handle(event, current_application_instances ++ current_database_instances) + end defp do_handle(_), do: {:error, :unknown_discovery_type} diff --git a/lib/trento/application/integration/discovery/payloads/cluster/cluster_discovery_payload.ex b/lib/trento/application/integration/discovery/payloads/cluster/cluster_discovery_payload.ex index 672ca729c6..e21848425d 100644 --- a/lib/trento/application/integration/discovery/payloads/cluster/cluster_discovery_payload.ex +++ b/lib/trento/application/integration/discovery/payloads/cluster/cluster_discovery_payload.ex @@ -5,6 +5,7 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload do @required_fields [:dc, :provider, :id, :cluster_type, :cib, :sbd, :crmmon] @required_fields_hana [:sid] + @required_fields_ascs_ers [:additional_sids] use Trento.Type @@ -28,6 +29,7 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload do field :name, :string field :cluster_type, Ecto.Enum, values: ClusterType.values() field :sid, :string + field :additional_sids, {:array, :string} embeds_one :cib, Cib embeds_one :sbd, Sbd @@ -52,9 +54,31 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload do defp enrich_cluster_type(attrs), do: Map.put(attrs, "cluster_type", parse_cluster_type(attrs)) - defp enrich_cluster_sid(attrs), do: Map.put(attrs, "sid", parse_cluster_sid(attrs)) + defp enrich_cluster_sid(%{"cluster_type" => ClusterType.unknown()} = attrs) do + attrs + |> Map.put("sid", nil) + |> Map.put("additional_sids", []) + end + + defp enrich_cluster_sid(attrs) do + attrs + |> Map.put("sid", parse_cluster_sid(attrs)) + |> Map.put("additional_sids", parse_cluster_additional_sids(attrs)) + end + + defp parse_cluster_type(%{"crmmon" => %{"clones" => nil, "groups" => nil}}), + do: ClusterType.unknown() - defp parse_cluster_type(%{"crmmon" => %{"clones" => nil}}), do: :unknown + defp parse_cluster_type(%{"crmmon" => %{"clones" => nil, "groups" => groups}}) do + sap_instance_count = + Enum.count(groups, fn %{"resources" => resources} -> + Enum.any?(resources, fn %{"agent" => agent} -> + agent == "ocf::heartbeat:SAPInstance" + end) + end) + + do_detect_cluster_type(sap_instance_count) + end defp parse_cluster_type(%{"crmmon" => %{"clones" => clones}}) do has_sap_hana_topology = @@ -77,10 +101,17 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload do do_detect_cluster_type(has_sap_hana_topology, has_sap_hana, has_sap_hana_controller) end + defp parse_cluster_type(_), do: ClusterType.unknown() + defp do_detect_cluster_type(true, true, _), do: ClusterType.hana_scale_up() defp do_detect_cluster_type(true, _, true), do: ClusterType.hana_scale_out() defp do_detect_cluster_type(_, _, _), do: ClusterType.unknown() + defp do_detect_cluster_type(count) when count >= 2 and rem(count, 2) == 0, + do: ClusterType.ascs_ers() + + defp do_detect_cluster_type(_), do: ClusterType.unknown() + defp parse_cluster_sid(%{ "cib" => %{"configuration" => %{"resources" => %{"clones" => nil}}} }), @@ -106,12 +137,41 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload do end) end + defp parse_cluster_additional_sids(%{ + "cib" => %{"configuration" => %{"resources" => %{"clones" => nil, "groups" => groups}}} + }) do + groups + |> Enum.flat_map(fn + %{"primitives" => primitives} -> primitives + end) + |> Enum.flat_map(fn + %{"type" => "SAPInstance", "instance_attributes" => attributes} -> + attributes + + _ -> + [] + end) + |> Enum.flat_map(fn + %{"name" => "InstanceName", "value" => value} when value != "" -> + value |> String.split("_") |> Enum.at(0) |> List.wrap() + + _ -> + [] + end) + |> Enum.uniq() + end + + defp parse_cluster_additional_sids(_), do: [] + defp maybe_validate_required_fields(cluster, %{"cluster_type" => ClusterType.hana_scale_up()}), do: validate_required(cluster, @required_fields_hana) defp maybe_validate_required_fields(cluster, %{"cluster_type" => ClusterType.hana_scale_out()}), do: validate_required(cluster, @required_fields_hana) + defp maybe_validate_required_fields(cluster, %{"cluster_type" => ClusterType.ascs_ers()}), + do: validate_required(cluster, @required_fields_ascs_ers) + defp maybe_validate_required_fields(cluster, _), do: cluster end diff --git a/lib/trento/application/integration/discovery/payloads/cluster/crmmon_discovery_payload.ex b/lib/trento/application/integration/discovery/payloads/cluster/crmmon_discovery_payload.ex index 396a6bf065..9b4a0fe10c 100644 --- a/lib/trento/application/integration/discovery/payloads/cluster/crmmon_discovery_payload.ex +++ b/lib/trento/application/integration/discovery/payloads/cluster/crmmon_discovery_payload.ex @@ -165,6 +165,13 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload.Crmmon do embeds_one :summary, Summary + embeds_many :nodes, Node, primary_key: false do + field :id, :string + field :name, :string + field :online, :boolean + field :unclean, :boolean + end + embeds_many :resources, CrmmonResource embeds_many :groups, CrmmonGroup, primary_key: false do @@ -205,6 +212,7 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload.Crmmon do crmmon |> cast(transformed_attrs, [:version]) |> cast_embed(:summary) + |> cast_embed(:nodes, with: &nodes_changeset/2) |> cast_embed(:resources) |> cast_embed(:groups, with: &groups_changeset/2) |> cast_embed(:clones, with: &clones_changeset/2) @@ -213,6 +221,12 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload.Crmmon do |> validate_required_fields(@required_fields) end + defp nodes_changeset(nodes, attrs) do + nodes + |> cast(attrs, [:id, :name, :online, :unclean]) + |> validate_required([:id, :name]) + end + defp groups_changeset(groups, attrs) do groups |> cast(attrs, [:id]) @@ -239,11 +253,11 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload.Crmmon do defp node_attributes_changeset(node_attributes, attrs) do node_attributes |> cast(attrs, []) - |> cast_embed(:nodes, with: &nodes_changeset/2) + |> cast_embed(:nodes, with: &node_attributes_nodes_changeset/2) |> validate_required_fields([:nodes]) end - defp nodes_changeset(nodes, attrs) do + defp node_attributes_nodes_changeset(nodes, attrs) do nodes |> cast(attrs, [:name]) |> cast_embed(:attributes, with: &attributes_changeset/2) @@ -257,9 +271,11 @@ defmodule Trento.Integration.Discovery.ClusterDiscoveryPayload.Crmmon do end defp transform_nil_lists( - %{"groups" => groups, "clones" => clones, "resources" => resources} = attrs + %{"nodes" => nodes, "groups" => groups, "clones" => clones, "resources" => resources} = + attrs ) do attrs + |> Map.put("nodes", ListHelper.to_list(nodes)) |> Map.put("groups", ListHelper.to_list(groups)) |> Map.put("clones", ListHelper.to_list(clones)) |> Map.put("resources", ListHelper.to_list(resources)) diff --git a/lib/trento/application/integration/discovery/policies/cluster_policy.ex b/lib/trento/application/integration/discovery/policies/cluster_policy.ex index 1c2693fb59..a71bb9327c 100644 --- a/lib/trento/application/integration/discovery/policies/cluster_policy.ex +++ b/lib/trento/application/integration/discovery/policies/cluster_policy.ex @@ -5,25 +5,70 @@ defmodule Trento.Integration.Discovery.ClusterPolicy do require Trento.Domain.Enums.Provider, as: Provider require Trento.Domain.Enums.ClusterType, as: ClusterType + require Trento.Domain.Enums.Health, as: Health + require Trento.Domain.Enums.AscsErsClusterRole, as: AscsErsClusterRole - alias Trento.{ - Domain.Commands.RegisterClusterHost, - Integration.Discovery.ClusterDiscoveryPayload + alias Trento.Domain.Commands.{ + DeregisterClusterHost, + RegisterClusterHost } + alias Trento.Integration.Discovery.ClusterDiscoveryPayload + @uuid_namespace Application.compile_env!(:trento, :uuid_namespace) - def handle(%{ - "discovery_type" => "ha_cluster_discovery", - "agent_id" => agent_id, - "payload" => payload - }) do - payload - |> ProperCase.to_snake_case() - |> ClusterDiscoveryPayload.new() - |> case do - {:ok, decoded_payload} -> build_register_cluster_host_command(agent_id, decoded_payload) - error -> error + def handle( + %{ + "discovery_type" => "ha_cluster_discovery", + "agent_id" => agent_id, + "payload" => nil + }, + current_cluster_id + ) do + {:ok, + Enum.reject( + [ + build_deregister_cluster_host_command(agent_id, nil, current_cluster_id) + ], + &is_nil/1 + )} + end + + def handle( + %{ + "discovery_type" => "ha_cluster_discovery", + "agent_id" => agent_id, + "payload" => payload + }, + current_cluster_id + ) do + with {:ok, %ClusterDiscoveryPayload{id: cluster_id} = decoded_payload} <- + payload + |> ProperCase.to_snake_case() + |> ClusterDiscoveryPayload.new(), + {:ok, register_cluster_host_command} <- + build_register_cluster_host_command(agent_id, decoded_payload) do + {:ok, + Enum.reject( + [ + build_deregister_cluster_host_command(agent_id, cluster_id, current_cluster_id), + register_cluster_host_command + ], + &is_nil/1 + )} + end + end + + defp build_deregister_cluster_host_command(_, _, nil), + do: nil + + defp build_deregister_cluster_host_command(agent_id, cluster_id, current_cluster_id) do + if generate_cluster_id(cluster_id) != current_cluster_id do + DeregisterClusterHost.new!(%{ + host_id: agent_id, + cluster_id: current_cluster_id, + deregistered_at: DateTime.utc_now() + }) end end @@ -35,7 +80,8 @@ defmodule Trento.Integration.Discovery.ClusterPolicy do dc: designated_controller, provider: provider, cluster_type: cluster_type, - sid: sid + sid: sid, + additional_sids: additional_sids } = payload ) do cluster_details = parse_cluster_details(payload) @@ -45,6 +91,7 @@ defmodule Trento.Integration.Discovery.ClusterPolicy do host_id: agent_id, name: name, sid: sid, + additional_sids: additional_sids, type: cluster_type, designated_controller: designated_controller, resources_number: parse_resources_number(payload), @@ -84,6 +131,22 @@ defmodule Trento.Integration.Discovery.ClusterPolicy do } end + defp parse_cluster_details( + %{ + crmmon: crmmon, + sbd: sbd, + cluster_type: ClusterType.ascs_ers(), + additional_sids: additional_sids + } = payload + ) do + %{ + sap_systems: Enum.map(additional_sids, &parse_ascs_ers_cluster_sap_system(payload, &1)), + fencing_type: parse_cluster_fencing_type(crmmon), + stopped_resources: parse_cluster_stopped_resources(crmmon), + sbd_devices: parse_sbd_devices(sbd) + } + end + defp parse_cluster_details(_) do nil end @@ -107,8 +170,8 @@ defmodule Trento.Integration.Discovery.ClusterPolicy do ) do Enum.map(nodes, fn %{name: name, attributes: attributes} -> attributes = - Enum.reduce(attributes, %{}, fn %{name: name, value: value}, acc -> - Map.put(acc, name, value) + Enum.into(attributes, %{}, fn %{name: name, value: value} -> + {name, value} end) node_resources = parse_node_resources(name, crmmon) @@ -353,27 +416,182 @@ defmodule Trento.Integration.Discovery.ClusterPolicy do defp do_parse_hana_status("S", _), do: "Failed" defp do_parse_hana_status(_, _), do: "Unknown" + defp parse_ascs_ers_cluster_sap_system(payload, sid) do + resources_by_sid = get_resources_by_sid(payload, sid) + + is_filesystem_resource_based = + Enum.count(resources_by_sid, fn + %{type: "Filesystem"} -> true + _ -> false + end) == 2 + + %{ + sid: sid, + filesystem_resource_based: is_filesystem_resource_based, + distributed: is_distributed(payload, resources_by_sid), + nodes: parse_ascs_ers_cluster_nodes(payload, resources_by_sid) + } + end + + defp get_resources_by_sid(%{cib: %{configuration: %{resources: %{groups: groups}}}}, sid) do + Enum.flat_map(groups, fn + %{primitives: primitives} -> + primitives + |> Enum.find_value([], fn + %{type: "SAPInstance", instance_attributes: attributes} -> + attributes + + _ -> + nil + end) + |> Enum.find_value([], fn + %{name: "InstanceName", value: value} -> + # The nesting level looks reasonable to avoid having a new function + # credo:disable-for-next-line /\.Nesting/ + if value |> String.split("_") |> Enum.at(0) == sid, do: primitives + + _ -> + nil + end) + end) + end + + # Check if the SAPInstance resource is running in different nodes only using + # the resources belonging to a specific SAP system. + # The next conditions must met for the SAPInstance resources: + # - Role is Started + # - Failed is false + # - The nodes are in a clean state + # - The 2 SAPInstance resources are running in different nodes + defp is_distributed(%{crmmon: %{nodes: nodes, groups: groups}}, resources) do + resource_ids = Enum.map(resources, fn %{id: id} -> id end) + + clean_nodes = + Enum.flat_map(nodes, fn + %{name: name, online: true, unclean: false} -> [name] + _ -> [] + end) + + groups + |> Enum.flat_map(fn + %{resources: resources} -> resources + _ -> [] + end) + |> Enum.filter(fn + %{ + id: id, + agent: "ocf::heartbeat:SAPInstance", + role: "Started", + failed: false, + node: %{name: name} + } -> + id in resource_ids and name in clean_nodes + + _ -> + false + end) + |> Enum.uniq_by(fn + %{node: %{name: name}} -> name + end) + |> Enum.count() == 2 + end + + # Parse details from each node for a specific sid. + # The runtime information of where the resource is running belongs to crmmon payload, + # but the data itself is in the cib payload, so both payloads must be crossed. + defp parse_ascs_ers_cluster_nodes( + %{ + provider: provider, + crmmon: %{nodes: nodes, node_attributes: %{nodes: node_attributes}} = crmmon + }, + cib_resources_by_sid + ) do + Enum.map(nodes, fn %{name: node_name} -> + cib_resource_ids = Enum.map(cib_resources_by_sid, fn %{id: id} -> id end) + + crm_node_resources = + node_name + |> parse_node_resources(crmmon) + |> Enum.filter(fn %{id: id} -> id in cib_resource_ids end) + + crm_node_resource_ids = Enum.map(crm_node_resources, fn %{id: id} -> id end) + + cib_node_resources = + Enum.filter(cib_resources_by_sid, fn %{id: id} -> id in crm_node_resource_ids end) + + attributes = + node_attributes + |> Enum.find_value([], fn + %{name: ^node_name, attributes: attributes} -> attributes + _ -> false + end) + |> Enum.into(%{}, fn %{name: name, value: value} -> + {name, value} + end) + + roles = + cib_node_resources + |> parse_resource_by_type("SAPInstance", "IS_ERS") + |> Enum.map(fn + "true" -> AscsErsClusterRole.ers() + _ -> AscsErsClusterRole.ascs() + end) + + virtual_ip_type = get_virtual_ip_type_suffix_by_provider(provider) + + %{ + name: node_name, + roles: roles, + virtual_ips: parse_resource_by_type(cib_node_resources, virtual_ip_type, "ip"), + filesystems: parse_resource_by_type(cib_node_resources, "Filesystem", "directory"), + attributes: attributes, + resources: crm_node_resources + } + end) + end + + defp parse_resource_by_type(resources, type, attribute_name) do + resources + |> Enum.filter(fn + %{type: ^type} -> true + _ -> false + end) + |> Enum.map(fn %{instance_attributes: instance_attributes} -> + Enum.find_value(instance_attributes, nil, fn + %{name: ^attribute_name, value: value} -> value + _ -> nil + end) + end) + end + defp parse_cib_last_written(%{ crmmon: %{summary: %{last_change: %{time: cib_last_written}}} }), do: cib_last_written + defp generate_cluster_id(nil), do: nil defp generate_cluster_id(id), do: UUID.uuid5(@uuid_namespace, id) defp parse_cluster_health(details, cluster_type) when cluster_type in [ClusterType.hana_scale_up(), ClusterType.hana_scale_out()], do: parse_hana_cluster_health(details) - defp parse_cluster_health(_, _), do: :unknown + defp parse_cluster_health(%{sap_systems: sap_systems}, ClusterType.ascs_ers()) do + Enum.find_value(sap_systems, Health.passing(), fn %{distributed: distributed} -> + if not distributed, do: Health.critical() + end) + end + + defp parse_cluster_health(_, _), do: Health.unknown() # Passing state if SR Health state is 4 and Sync state is SOK, everything else is critical # If data is not present for some reason the state goes to unknown defp parse_hana_cluster_health(%{sr_health_state: "4", secondary_sync_state: "SOK"}), - do: :passing + do: Health.passing() defp parse_hana_cluster_health(%{sr_health_state: "", secondary_sync_state: ""}), - do: :unknown + do: Health.unknown() defp parse_hana_cluster_health(%{sr_health_state: _, secondary_sync_state: _}), - do: :critical + do: Health.critical() end diff --git a/lib/trento/application/integration/discovery/policies/sap_system_policy.ex b/lib/trento/application/integration/discovery/policies/sap_system_policy.ex index eacd3fce4f..da5a57e3d2 100644 --- a/lib/trento/application/integration/discovery/policies/sap_system_policy.ex +++ b/lib/trento/application/integration/discovery/policies/sap_system_policy.ex @@ -3,11 +3,17 @@ defmodule Trento.Integration.Discovery.SapSystemPolicy do This module contains functions to transform SAP system related integration events into commands.. """ + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion + alias Trento.Domain.Commands.{ + DeregisterApplicationInstance, + DeregisterDatabaseInstance, RegisterApplicationInstance, RegisterDatabaseInstance } + alias Trento.{ApplicationInstanceReadModel, DatabaseInstanceReadModel} + alias Trento.Integration.Discovery.SapSystemDiscoveryPayload alias SapSystemDiscoveryPayload.{ @@ -24,31 +30,53 @@ defmodule Trento.Integration.Discovery.SapSystemPolicy do @application_type 2 @diagnostics_type 3 - @spec handle(map) :: - {:ok, [RegisterApplicationInstance.t() | RegisterDatabaseInstance.t()]} | {:error, any} - def handle(%{ - "discovery_type" => "sap_system_discovery", - "agent_id" => agent_id, - "payload" => payload - }) do - case SapSystemDiscoveryPayload.new(payload) do - {:ok, sap_systems} -> - sap_systems - |> Enum.flat_map(fn sap_system -> build_commands(sap_system, agent_id) end) - |> Enum.reduce_while( - {:ok, []}, - fn - {:ok, command}, {:ok, commands} -> {:cont, {:ok, commands ++ [command]}} - {:error, _} = error, _ -> {:halt, error} - end - ) - - error -> - error + @spec handle(map, [ApplicationInstanceReadModel.t() | DatabaseInstanceReadModel.t()]) :: + {:ok, + [ + DeregisterApplicationInstance.t() + | DeregisterDatabaseInstance.t() + | RegisterApplicationInstance.t() + | RegisterDatabaseInstance.t() + ]} + | {:error, any} + def handle( + %{ + "discovery_type" => "sap_system_discovery", + "agent_id" => agent_id, + "payload" => payload + }, + current_instances + ) do + with {:ok, sap_systems} <- SapSystemDiscoveryPayload.new(payload), + {:ok, register_instance_commands} <- + sap_systems + |> Enum.flat_map(fn sap_system -> + build_register_instances_commands(sap_system, agent_id) + end) + |> Enum.reduce_while( + {:ok, []}, + fn + {:ok, command}, {:ok, commands} -> {:cont, {:ok, commands ++ [command]}} + {:error, _} = error, _ -> {:halt, error} + end + ) do + # Build deregistration commands but only for instances that are not + # present in the discovery payload anymore. + deregister_instance_commands = + current_instances + |> Enum.reject(fn current_instance -> + Enum.any?(register_instance_commands, fn instance -> + instance.host_id == current_instance.host_id && + instance.instance_number == current_instance.instance_number + end) + end) + |> build_deregister_instances_commands() + + {:ok, deregister_instance_commands ++ register_instance_commands} end end - defp build_commands( + defp build_register_instances_commands( %SapSystemDiscoveryPayload{ Id: id, SID: sid, @@ -79,7 +107,7 @@ defmodule Trento.Integration.Discovery.SapSystemPolicy do end) end - defp build_commands( + defp build_register_instances_commands( %SapSystemDiscoveryPayload{ SID: sid, Type: @application_type, @@ -103,13 +131,39 @@ defmodule Trento.Integration.Discovery.SapSystemPolicy do https_port: parse_https_port(instance), start_priority: parse_start_priority(instance), host_id: host_id, - health: parse_dispstatus(instance) + health: parse_dispstatus(instance), + ensa_version: parse_ensa_version(instance) }) end) end - defp build_commands(%SapSystemDiscoveryPayload{Type: @diagnostics_type}, _), do: [] - defp build_commands(%SapSystemDiscoveryPayload{Type: @unknown_type}, _), do: [] + defp build_register_instances_commands(%SapSystemDiscoveryPayload{Type: @diagnostics_type}, _), + do: [] + + defp build_register_instances_commands(%SapSystemDiscoveryPayload{Type: @unknown_type}, _), + do: [] + + defp build_deregister_instances_commands(current_instances) do + Enum.map(current_instances, fn + %ApplicationInstanceReadModel{} = instance -> + DeregisterApplicationInstance.new!(%{ + sid: instance.sid, + host_id: instance.host_id, + instance_number: instance.instance_number, + sap_system_id: instance.sap_system_id, + deregistered_at: DateTime.utc_now() + }) + + %DatabaseInstanceReadModel{} = instance -> + DeregisterDatabaseInstance.new!(%{ + sid: instance.sid, + host_id: instance.host_id, + instance_number: instance.instance_number, + sap_system_id: instance.sap_system_id, + deregistered_at: DateTime.utc_now() + }) + end) + end defp parse_instance_number(instance), do: parse_sap_control_property("SAPSYSTEM", instance) @@ -173,4 +227,16 @@ defmodule Trento.Integration.Discovery.SapSystemPolicy do SystemReplication: %SystemReplication{overall_replication_status: status} }), do: status + + defp parse_ensa_version(%Instance{SAPControl: %SapControl{Processes: processes}}) do + Enum.find_value(processes, EnsaVersion.no_ensa(), fn + %{name: "enserver"} -> EnsaVersion.ensa1() + %{name: "enrepserver"} -> EnsaVersion.ensa1() + %{name: "enq_server"} -> EnsaVersion.ensa2() + %{name: "enq_replicator"} -> EnsaVersion.ensa2() + _ -> nil + end) + end + + defp parse_ensa_version(_), do: EnsaVersion.no_ensa() end diff --git a/lib/trento/application/integration/discovery/protocol/enrich_register_application_instance.ex b/lib/trento/application/integration/discovery/protocol/enrich_register_application_instance.ex index 173820bed4..ad8b1ce20f 100644 --- a/lib/trento/application/integration/discovery/protocol/enrich_register_application_instance.ex +++ b/lib/trento/application/integration/discovery/protocol/enrich_register_application_instance.ex @@ -14,14 +14,14 @@ defimpl Trento.Support.Middleware.Enrichable, from d in DatabaseInstanceReadModel, join: h in HostReadModel, on: d.host_id == h.id, - where: ^db_host in h.ip_addresses and ^tenant == d.tenant + where: ^db_host in h.ip_addresses and ^tenant == d.tenant and is_nil(h.deregistered_at) case Repo.one(query) do %DatabaseInstanceReadModel{sap_system_id: sap_system_id} -> {:ok, %RegisterApplicationInstance{command | sap_system_id: sap_system_id}} nil -> - {:error, :database_not_found} + {:error, :database_not_registered} end end end diff --git a/lib/trento/application/integration/discovery/protocol/enrich_request_host_deregistration.ex b/lib/trento/application/integration/discovery/protocol/enrich_request_host_deregistration.ex new file mode 100644 index 0000000000..5f5a81db45 --- /dev/null +++ b/lib/trento/application/integration/discovery/protocol/enrich_request_host_deregistration.ex @@ -0,0 +1,47 @@ +defimpl Trento.Support.Middleware.Enrichable, + for: Trento.Domain.Commands.RequestHostDeregistration do + alias Trento.Domain.Commands.RequestHostDeregistration + + alias Trento.{ + Hosts, + HostReadModel + } + + @heartbeat_interval Application.compile_env!(:trento, Trento.Heartbeats)[:interval] + @deregistration_debounce Application.compile_env!( + :trento, + :deregistration_debounce + ) + @total_deregistration_debounce @heartbeat_interval + @deregistration_debounce + + @spec enrich(RequestHostDeregistration.t(), map) :: + {:ok, RequestHostDeregistration.t()} + | {:error, :host_alive} + | {:error, :host_not_registered} + def enrich(%RequestHostDeregistration{host_id: host_id} = command, _), + do: host_deregisterable(Hosts.get_host_by_id(host_id), command) + + defp host_deregisterable( + %HostReadModel{last_heartbeat_timestamp: nil, deregistered_at: nil}, + %RequestHostDeregistration{} = command + ), + do: {:ok, command} + + defp host_deregisterable( + %HostReadModel{ + last_heartbeat_timestamp: last_heartbeat_timestamp, + deregistered_at: nil + }, + %RequestHostDeregistration{} = command + ) do + if :lt == + DateTime.compare( + DateTime.utc_now(), + DateTime.add(last_heartbeat_timestamp, @total_deregistration_debounce, :millisecond) + ), + do: {:error, :host_alive}, + else: {:ok, command} + end + + defp host_deregisterable(_, _), do: {:error, :host_not_registered} +end diff --git a/lib/trento/application/integration/prometheus/prometheus.ex b/lib/trento/application/integration/prometheus/prometheus.ex index 7509b6526d..6908f7ab93 100644 --- a/lib/trento/application/integration/prometheus/prometheus.ex +++ b/lib/trento/application/integration/prometheus/prometheus.ex @@ -3,13 +3,11 @@ defmodule Trento.Integration.Prometheus do Prometheus integration service """ - alias Trento.Repo - - alias Trento.HostReadModel + alias Trento.Hosts @spec get_targets :: [map] def get_targets do - Repo.all(HostReadModel) + Hosts.get_all_hosts() end @spec get_exporters_status(String.t()) :: {:ok, map} | {:error, any} diff --git a/lib/trento/application/process_managers/deregistration_process_manager.ex b/lib/trento/application/process_managers/deregistration_process_manager.ex new file mode 100644 index 0000000000..76b314a465 --- /dev/null +++ b/lib/trento/application/process_managers/deregistration_process_manager.ex @@ -0,0 +1,314 @@ +defmodule Trento.DeregistrationProcessManager do + @moduledoc """ + DeregistrationProcessManager is a Commanded ProcessManager, it's the responsible + for the deregistration procedure for the aggregates + + This represents a transaction to ensure that the procedure of deregistering domain aggregates + follows a certain path and satisfies some requisites. + + For more information see https://hexdocs.pm/commanded/process-managers.html + """ + + defmodule Instance do + @moduledoc """ + An application or database instance and which SAP System it belongs to. + """ + @required_fields :all + use Trento.Type + + deftype do + field :sap_system_id, Ecto.UUID + field :instance_number, :string + end + end + + use Commanded.ProcessManagers.ProcessManager, + application: Trento.Commanded, + name: "deregistration_process_manager" + + @required_fields [] + use Trento.Type + + deftype do + field :cluster_id, Ecto.UUID + embeds_many :application_instances, Instance + embeds_many :database_instances, Instance + end + + alias Trento.DeregistrationProcessManager + + alias Trento.Domain.Events.{ + ApplicationInstanceDeregistered, + ApplicationInstanceRegistered, + ClusterRolledUp, + DatabaseInstanceDeregistered, + DatabaseInstanceRegistered, + HostAddedToCluster, + HostDeregistered, + HostDeregistrationRequested, + HostRegistered, + HostRemovedFromCluster, + HostRolledUp, + SapSystemRolledUp + } + + alias Trento.Domain.Commands.{ + DeregisterApplicationInstance, + DeregisterClusterHost, + DeregisterDatabaseInstance, + DeregisterHost + } + + alias Trento.Domain.SapSystem + + @doc """ + The Process Manager is started by the following events (provided the instance hasn't been started already): + - HostRegistered for a newly registered host. + - HostAddedToCluster when a Host gets added to a Cluster, as this event may arrive prior to the + HostRegistered event. + - DatabaseInstanceRegistered when an instance gets added to a SAP system, as this event may arrive prior to the + HostRegistered event. + - ApplicationInstanceRegistered when an instance gets added to a SAP system, as this event may arrive prior to the + HostRegistered event. + - "Rolled-Up" events: + - HostRolledUp as the HostRegistered event might have been rolled up. + - ClusterRolledUp as the HostAddedToCluster event might have been rolled up. + - SapSystemRolledUp as the DatabaseInstanceRegistered/ApplicationInstanceRegistered events might have been + rolled up. + + HostDeregistered stops a Process Manager for the Host identified by host_id. + """ + # Start the Process Manager + def interested?(%HostRegistered{host_id: host_id}), do: {:start, host_id} + def interested?(%HostRolledUp{host_id: host_id}), do: {:start, host_id} + def interested?(%HostAddedToCluster{host_id: host_id}), do: {:start, host_id} + def interested?(%ClusterRolledUp{snapshot: %{hosts: hosts}}), do: {:start, hosts} + def interested?(%DatabaseInstanceRegistered{host_id: host_id}), do: {:start, host_id} + def interested?(%ApplicationInstanceRegistered{host_id: host_id}), do: {:start, host_id} + + def interested?(%SapSystemRolledUp{ + snapshot: %SapSystem{ + database: %SapSystem.Database{instances: db_instances}, + application: %SapSystem.Application{instances: app_instances} + } + }), + do: + {:start, + (db_instances ++ app_instances) + |> Enum.map(fn %SapSystem.Instance{host_id: host_id} -> host_id end) + |> Enum.uniq()} + + # Continue the Process Manager + def interested?(%HostDeregistrationRequested{host_id: host_id}), do: {:continue, host_id} + def interested?(%HostRemovedFromCluster{host_id: host_id}), do: {:continue, host_id} + def interested?(%DatabaseInstanceDeregistered{host_id: host_id}), do: {:continue, host_id} + def interested?(%ApplicationInstanceDeregistered{host_id: host_id}), do: {:continue, host_id} + # Stop the Process Manager + def interested?(%HostDeregistered{host_id: host_id}), do: {:stop, host_id} + + def interested?(_event), do: false + + def handle( + %DeregistrationProcessManager{ + cluster_id: nil, + application_instances: [], + database_instances: [] + }, + %HostDeregistrationRequested{ + host_id: host_id, + requested_at: requested_at + } + ) do + %DeregisterHost{host_id: host_id, deregistered_at: requested_at} + end + + def handle( + %DeregistrationProcessManager{ + cluster_id: cluster_id, + database_instances: database_instances, + application_instances: application_instances + }, + %HostDeregistrationRequested{ + host_id: host_id, + requested_at: requested_at + } + ) do + database_instances_deregister_commands = + Enum.map(database_instances, fn %Instance{ + sap_system_id: sap_system_id, + instance_number: instance_number + } -> + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id, + deregistered_at: requested_at + } + end) + + application_instances_deregister_commands = + Enum.map(application_instances, fn %Instance{ + sap_system_id: sap_system_id, + instance_number: instance_number + } -> + %DeregisterApplicationInstance{ + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id, + deregistered_at: requested_at + } + end) + + database_instances_deregister_commands ++ + application_instances_deregister_commands ++ + maybe_deregister_cluster_host(cluster_id, host_id, requested_at) ++ + [%DeregisterHost{host_id: host_id, deregistered_at: requested_at}] + end + + def apply(%DeregistrationProcessManager{} = state, %HostAddedToCluster{ + cluster_id: cluster_id + }) do + %DeregistrationProcessManager{state | cluster_id: cluster_id} + end + + def apply(%DeregistrationProcessManager{} = state, %HostRemovedFromCluster{}) do + %DeregistrationProcessManager{state | cluster_id: nil} + end + + def apply(%DeregistrationProcessManager{} = state, %ClusterRolledUp{ + cluster_id: cluster_id + }) do + %DeregistrationProcessManager{state | cluster_id: cluster_id} + end + + def apply( + %DeregistrationProcessManager{database_instances: database_instances} = state, + %DatabaseInstanceRegistered{ + sap_system_id: sap_system_id, + instance_number: instance_number + } + ) do + %DeregistrationProcessManager{ + state + | database_instances: [ + %Instance{ + sap_system_id: sap_system_id, + instance_number: instance_number + } + | database_instances + ] + } + end + + def apply( + %DeregistrationProcessManager{application_instances: application_instances} = state, + %ApplicationInstanceRegistered{ + sap_system_id: sap_system_id, + instance_number: instance_number + } + ) do + %DeregistrationProcessManager{ + state + | application_instances: [ + %Instance{ + sap_system_id: sap_system_id, + instance_number: instance_number + } + | application_instances + ] + } + end + + def apply( + %DeregistrationProcessManager{ + database_instances: database_instances, + application_instances: application_instances + } = state, + %SapSystemRolledUp{ + sap_system_id: snapshot_sap_system_id, + snapshot: %SapSystem{ + database: %SapSystem.Database{instances: snapshot_database_instances}, + application: %SapSystem.Application{instances: snapshot_application_instances} + } + } + ) do + new_database_instances = + snapshot_database_instances + |> Enum.map(fn %SapSystem.Instance{ + instance_number: instance_number + } -> + %Instance{sap_system_id: snapshot_sap_system_id, instance_number: instance_number} + end) + |> Enum.concat(database_instances) + |> Enum.uniq() + + new_application_instances = + snapshot_application_instances + |> Enum.map(fn %SapSystem.Instance{ + instance_number: instance_number + } -> + %Instance{sap_system_id: snapshot_sap_system_id, instance_number: instance_number} + end) + |> Enum.concat(application_instances) + |> Enum.uniq() + + %DeregistrationProcessManager{ + state + | database_instances: new_database_instances, + application_instances: new_application_instances + } + end + + def apply( + %DeregistrationProcessManager{database_instances: database_instances} = state, + %DatabaseInstanceDeregistered{instance_number: instance_number} + ) do + %DeregistrationProcessManager{ + state + | database_instances: + Enum.reject(database_instances, fn %Instance{ + instance_number: current_instance_number + } -> + current_instance_number == instance_number + end) + } + end + + def apply( + %DeregistrationProcessManager{application_instances: application_instances} = state, + %ApplicationInstanceDeregistered{instance_number: instance_number} + ) do + %DeregistrationProcessManager{ + state + | application_instances: + Enum.reject(application_instances, fn %Instance{ + instance_number: current_instance_number + } -> + current_instance_number == instance_number + end) + } + end + + # Retry the rollup errors, stop the process on other errors + + def error({:error, :host_rolling_up}, _command_or_event, %{context: context}), + do: {:retry, context} + + def error({:error, :cluster_rolling_up}, _command_or_event, %{context: context}), + do: {:retry, context} + + def error({:error, :sap_system_rolling_up}, _command_or_event, %{context: context}), + do: {:retry, context} + + defp maybe_deregister_cluster_host(nil, _, _), do: [] + + defp maybe_deregister_cluster_host(cluster_id, host_id, requested_at) do + [ + %DeregisterClusterHost{ + host_id: host_id, + cluster_id: cluster_id, + deregistered_at: requested_at + } + ] + end +end diff --git a/lib/trento/application/projectors/cluster_projector.ex b/lib/trento/application/projectors/cluster_projector.ex index f0659eba8f..08852427d7 100644 --- a/lib/trento/application/projectors/cluster_projector.ex +++ b/lib/trento/application/projectors/cluster_projector.ex @@ -8,13 +8,15 @@ defmodule Trento.ClusterProjector do repo: Trento.Repo, name: "cluster_projector" - alias TrentoWeb.V1.ClusterView + alias TrentoWeb.V2.ClusterView alias Trento.Domain.Events.{ ChecksSelected, + ClusterDeregistered, ClusterDetailsUpdated, ClusterHealthChanged, - ClusterRegistered + ClusterRegistered, + ClusterRestored } alias Trento.ClusterReadModel @@ -28,6 +30,7 @@ defmodule Trento.ClusterProjector do cluster_id: id, name: name, sid: sid, + additional_sids: additional_sids, provider: provider, type: type, resources_number: resources_number, @@ -41,6 +44,7 @@ defmodule Trento.ClusterProjector do id: id, name: name, sid: sid, + additional_sids: additional_sids, provider: provider, type: type, resources_number: resources_number, @@ -53,11 +57,43 @@ defmodule Trento.ClusterProjector do end ) + project( + %ClusterDeregistered{ + cluster_id: cluster_id, + deregistered_at: deregistered_at + }, + fn multi -> + changeset = + ClusterReadModel.changeset(%ClusterReadModel{id: cluster_id}, %{ + deregistered_at: deregistered_at + }) + + Ecto.Multi.update(multi, :cluster, changeset) + end + ) + + project( + %ClusterRestored{ + cluster_id: cluster_id + }, + fn multi -> + cluster = Repo.get!(ClusterReadModel, cluster_id) + + changeset = + ClusterReadModel.changeset(cluster, %{ + deregistered_at: nil + }) + + Ecto.Multi.update(multi, :cluster, changeset) + end + ) + project( %ClusterDetailsUpdated{ cluster_id: id, name: name, sid: sid, + additional_sids: additional_sids, provider: provider, type: type, resources_number: resources_number, @@ -69,6 +105,7 @@ defmodule Trento.ClusterProjector do ClusterReadModel.changeset(%ClusterReadModel{id: id}, %{ name: name, sid: sid, + additional_sids: additional_sids, provider: provider, type: type, resources_number: resources_number, @@ -147,5 +184,28 @@ defmodule Trento.ClusterProjector do }) end + @impl true + def after_update(%ClusterDeregistered{cluster_id: cluster_id}, _, _) do + %ClusterReadModel{name: name} = Repo.get!(ClusterReadModel, cluster_id) + + TrentoWeb.Endpoint.broadcast("monitoring:clusters", "cluster_deregistered", %{ + id: cluster_id, + name: name + }) + end + + @impl true + def after_update(%ClusterRestored{cluster_id: cluster_id}, _, _) do + cluster = Repo.get!(ClusterReadModel, cluster_id) + + restored_cluster = enrich_cluster_model(cluster) + + TrentoWeb.Endpoint.broadcast( + "monitoring:clusters", + "cluster_registered", + ClusterView.render("cluster_registered.json", cluster: restored_cluster) + ) + end + def after_update(_, _, _), do: :ok end diff --git a/lib/trento/application/projectors/database_projector.ex b/lib/trento/application/projectors/database_projector.ex index 3cb2d8d5f1..d74233b7a5 100644 --- a/lib/trento/application/projectors/database_projector.ex +++ b/lib/trento/application/projectors/database_projector.ex @@ -16,13 +16,18 @@ defmodule Trento.DatabaseProjector do alias TrentoWeb.V1.SapSystemView alias Trento.Domain.Events.{ + DatabaseDeregistered, DatabaseHealthChanged, + DatabaseInstanceDeregistered, DatabaseInstanceHealthChanged, DatabaseInstanceRegistered, DatabaseInstanceSystemReplicationChanged, - DatabaseRegistered + DatabaseRegistered, + DatabaseRestored } + alias Trento.Repo + @databases_topic "monitoring:databases" project( @@ -138,6 +143,59 @@ defmodule Trento.DatabaseProjector do end ) + project( + %DatabaseDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + }, + fn multi -> + changeset = + DatabaseReadModel.changeset( + %DatabaseReadModel{ + id: sap_system_id + }, + %{deregistered_at: deregistered_at} + ) + + Ecto.Multi.update(multi, :database, changeset) + end + ) + + project( + %DatabaseRestored{ + sap_system_id: sap_system_id, + health: health + }, + fn multi -> + db = Repo.get!(DatabaseReadModel, sap_system_id) + + changeset = + DatabaseReadModel.changeset( + db, + %{deregistered_at: nil, health: health} + ) + + Ecto.Multi.update(multi, :database, changeset) + end + ) + + project( + %DatabaseInstanceDeregistered{ + instance_number: instance_number, + host_id: host_id, + sap_system_id: sap_system_id + }, + fn multi -> + deregistered_instance = %DatabaseInstanceReadModel{ + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id + } + + Ecto.Multi.delete(multi, :database_instance, deregistered_instance) + end + ) + @impl true def after_update( %DatabaseRegistered{}, @@ -240,6 +298,69 @@ defmodule Trento.DatabaseProjector do ) end + @impl true + def after_update( + %DatabaseRestored{sap_system_id: sap_system_id}, + _, + _ + ) do + database = Repo.get!(DatabaseReadModel, sap_system_id) + + TrentoWeb.Endpoint.broadcast( + @databases_topic, + "database_registered", + SapSystemView.render("database_registered.json", database: database) + ) + end + + @impl true + def after_update( + %DatabaseDeregistered{ + sap_system_id: sap_system_id + }, + _, + _ + ) do + %DatabaseReadModel{ + sid: sid + } = Repo.get(DatabaseReadModel, sap_system_id) + + TrentoWeb.Endpoint.broadcast( + @databases_topic, + "database_deregistered", + SapSystemView.render("database_deregistered.json", + id: sap_system_id, + sid: sid + ) + ) + end + + @impl true + def after_update( + %DatabaseInstanceDeregistered{ + instance_number: instance_number, + host_id: host_id, + sap_system_id: sap_system_id + }, + _, + _ + ) do + %DatabaseReadModel{ + sid: sid + } = Repo.get!(DatabaseReadModel, sap_system_id) + + TrentoWeb.Endpoint.broadcast( + @databases_topic, + "database_instance_deregistered", + SapSystemView.render("instance_deregistered.json", + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id, + sid: sid + ) + ) + end + @impl true def after_update(_, _, _), do: :ok end diff --git a/lib/trento/application/projectors/host_projector.ex b/lib/trento/application/projectors/host_projector.ex index 76a3e10849..5ac053acab 100644 --- a/lib/trento/application/projectors/host_projector.ex +++ b/lib/trento/application/projectors/host_projector.ex @@ -17,8 +17,11 @@ defmodule Trento.HostProjector do HeartbeatSucceded, HostAddedToCluster, HostChecksSelected, + HostDeregistered, HostDetailsUpdated, HostRegistered, + HostRemovedFromCluster, + HostRestored, ProviderUpdated } @@ -48,6 +51,37 @@ defmodule Trento.HostProjector do end ) + project( + %HostDeregistered{ + host_id: id, + deregistered_at: deregistered_at + }, + fn multi -> + changeset = + HostReadModel.changeset(%HostReadModel{id: id}, %{ + deregistered_at: deregistered_at + }) + + Ecto.Multi.update(multi, :host, changeset) + end + ) + + project( + %HostRestored{ + host_id: id + }, + fn multi -> + host = Repo.get!(HostReadModel, id) + + changeset = + HostReadModel.changeset(host, %{ + deregistered_at: nil + }) + + Ecto.Multi.update(multi, :host, changeset) + end + ) + project( %HostAddedToCluster{ host_id: id, @@ -66,6 +100,29 @@ defmodule Trento.HostProjector do end ) + project( + %HostRemovedFromCluster{ + host_id: id, + cluster_id: cluster_id + }, + fn multi -> + host = Repo.get!(HostReadModel, id) + # Only remove the cluster_id if it matches the one in the event + # We cannot guarantee the order of the events during the delta deregistration, + # so we need to make sure we don't remove the cluster_id if it has been overwritten by HostAddedToCluster + if host.cluster_id == cluster_id do + changeset = + HostReadModel.changeset(host, %{ + cluster_id: nil + }) + + Ecto.Multi.update(multi, :host, changeset) + else + multi + end + end + ) + project( %HostDetailsUpdated{ host_id: id, @@ -162,6 +219,37 @@ defmodule Trento.HostProjector do ) end + def after_update( + %HostRestored{host_id: id}, + _, + _ + ) do + host = Repo.get!(HostReadModel, id) + + TrentoWeb.Endpoint.broadcast( + "monitoring:hosts", + "host_registered", + HostView.render("host_registered.json", host: host) + ) + end + + def after_update( + %HostDeregistered{host_id: id}, + _, + _ + ) do + %HostReadModel{hostname: hostname} = Repo.get!(HostReadModel, id) + + TrentoWeb.Endpoint.broadcast( + "monitoring:hosts", + "host_deregistered", + %{ + id: id, + hostname: hostname + } + ) + end + def after_update( %HostAddedToCluster{host_id: id, cluster_id: cluster_id}, _, diff --git a/lib/trento/application/projectors/sap_system_projector.ex b/lib/trento/application/projectors/sap_system_projector.ex index 6d12cadc14..86d7d73683 100644 --- a/lib/trento/application/projectors/sap_system_projector.ex +++ b/lib/trento/application/projectors/sap_system_projector.ex @@ -9,10 +9,14 @@ defmodule Trento.SapSystemProjector do name: "sap_system_projector" alias Trento.Domain.Events.{ + ApplicationInstanceDeregistered, ApplicationInstanceHealthChanged, ApplicationInstanceRegistered, + SapSystemDeregistered, SapSystemHealthChanged, - SapSystemRegistered + SapSystemRegistered, + SapSystemRestored, + SapSystemUpdated } alias TrentoWeb.V1.SapSystemView @@ -22,13 +26,16 @@ defmodule Trento.SapSystemProjector do SapSystemReadModel } + alias Trento.Repo + project( %SapSystemRegistered{ sap_system_id: sap_system_id, sid: sid, tenant: tenant, db_host: db_host, - health: health + health: health, + ensa_version: ensa_version }, fn multi -> changeset = @@ -37,7 +44,8 @@ defmodule Trento.SapSystemProjector do sid: sid, tenant: tenant, db_host: db_host, - health: health + health: health, + ensa_version: ensa_version }) Ecto.Multi.insert(multi, :sap_system, changeset) @@ -111,9 +119,81 @@ defmodule Trento.SapSystemProjector do end ) + project( + %SapSystemDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + }, + fn multi -> + changeset = + SapSystemReadModel.changeset( + %SapSystemReadModel{id: sap_system_id}, + %{deregistered_at: deregistered_at} + ) + + Ecto.Multi.update(multi, :sap_system, changeset) + end + ) + + project( + %SapSystemRestored{ + sap_system_id: sap_system_id, + tenant: tenant, + db_host: db_host, + health: health + }, + fn multi -> + sap_system = Repo.get!(SapSystemReadModel, sap_system_id) + + changeset = + SapSystemReadModel.changeset(sap_system, %{ + tenant: tenant, + db_host: db_host, + health: health, + deregistered_at: nil + }) + + Ecto.Multi.update(multi, :sap_system, changeset) + end + ) + + project( + %ApplicationInstanceDeregistered{ + instance_number: instance_number, + host_id: host_id, + sap_system_id: sap_system_id + }, + fn multi -> + deregistered_instance = + Repo.get_by(ApplicationInstanceReadModel, + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id + ) + + Ecto.Multi.delete(multi, :application_instance, deregistered_instance) + end + ) + + project( + %SapSystemUpdated{ + sap_system_id: sap_system_id, + ensa_version: ensa_version + }, + fn multi -> + changeset = + SapSystemReadModel.changeset(%SapSystemReadModel{id: sap_system_id}, %{ + ensa_version: ensa_version + }) + + Ecto.Multi.update(multi, :sap_system, changeset) + end + ) + @sap_systems_topic "monitoring:sap_systems" @impl true + @spec after_update(any, any, any) :: :ok | {:error, any} def after_update( %SapSystemRegistered{}, _, @@ -184,6 +264,81 @@ defmodule Trento.SapSystemProjector do ) end + @impl true + def after_update( + %SapSystemDeregistered{sap_system_id: sap_system_id}, + _, + _ + ) do + %SapSystemReadModel{sid: sid} = Repo.get!(SapSystemReadModel, sap_system_id) + + TrentoWeb.Endpoint.broadcast( + @sap_systems_topic, + "sap_system_deregistered", + SapSystemView.render("sap_system_deregistered.json", + id: sap_system_id, + sid: sid + ) + ) + end + + @impl true + def after_update( + %SapSystemRestored{sap_system_id: sap_system_id}, + _, + _ + ) do + sap_system = Repo.get!(SapSystemReadModel, sap_system_id) + + TrentoWeb.Endpoint.broadcast( + @sap_systems_topic, + "sap_system_registered", + SapSystemView.render("sap_system_registered.json", sap_system: sap_system) + ) + end + + @impl true + def after_update( + %ApplicationInstanceDeregistered{ + instance_number: instance_number, + host_id: host_id, + sap_system_id: sap_system_id + }, + _, + _ + ) do + %SapSystemReadModel{ + sid: sid + } = Repo.get!(SapSystemReadModel, sap_system_id) + + TrentoWeb.Endpoint.broadcast( + @sap_systems_topic, + "application_instance_deregistered", + SapSystemView.render("instance_deregistered.json", + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id, + sid: sid + ) + ) + end + + @impl true + def after_update( + %SapSystemUpdated{sap_system_id: sap_system_id, ensa_version: ensa_version}, + _, + _ + ) do + TrentoWeb.Endpoint.broadcast( + @sap_systems_topic, + "sap_system_updated", + SapSystemView.render("sap_system_updated.json", + id: sap_system_id, + ensa_version: ensa_version + ) + ) + end + @impl true def after_update(_, _, _), do: :ok end diff --git a/lib/trento/application/read_models/application_instance_read_model.ex b/lib/trento/application/read_models/application_instance_read_model.ex index dde2c376b8..562cd4c0e1 100644 --- a/lib/trento/application/read_models/application_instance_read_model.ex +++ b/lib/trento/application/read_models/application_instance_read_model.ex @@ -27,7 +27,10 @@ defmodule Trento.ApplicationInstanceReadModel do field :host_id, Ecto.UUID, primary_key: true field :health, Ecto.Enum, values: Health.values() - has_one :host, HostReadModel, references: :host_id, foreign_key: :id + has_one :host, HostReadModel, + references: :host_id, + foreign_key: :id, + where: [deregistered_at: nil] end @spec changeset(t() | Ecto.Changeset.t(), map) :: Ecto.Changeset.t() diff --git a/lib/trento/application/read_models/cluster_read_model.ex b/lib/trento/application/read_models/cluster_read_model.ex index f271691bd6..06e36d0ee2 100644 --- a/lib/trento/application/read_models/cluster_read_model.ex +++ b/lib/trento/application/read_models/cluster_read_model.ex @@ -18,6 +18,7 @@ defmodule Trento.ClusterReadModel do schema "clusters" do field :name, :string, default: "" field :sid, :string + field :additional_sids, {:array, :string}, default: [] field :provider, Ecto.Enum, values: Provider.values() field :type, Ecto.Enum, values: ClusterType.values() field :selected_checks, {:array, :string}, default: [] @@ -30,6 +31,8 @@ defmodule Trento.ClusterReadModel do # Virtually enriched fields field :cib_last_written, :string, virtual: true + + field :deregistered_at, :utc_datetime_usec end @spec changeset(t() | Ecto.Changeset.t(), map) :: Ecto.Changeset.t() diff --git a/lib/trento/application/read_models/database_instance_read_model.ex b/lib/trento/application/read_models/database_instance_read_model.ex index c5f38c5aa0..bdf767baa5 100644 --- a/lib/trento/application/read_models/database_instance_read_model.ex +++ b/lib/trento/application/read_models/database_instance_read_model.ex @@ -30,7 +30,10 @@ defmodule Trento.DatabaseInstanceReadModel do field :system_replication_status, :string, default: "" field :health, Ecto.Enum, values: Health.values() - has_one :host, HostReadModel, references: :host_id, foreign_key: :id + has_one :host, HostReadModel, + references: :host_id, + foreign_key: :id, + where: [deregistered_at: nil] end @spec changeset(t() | Ecto.Changeset.t(), map) :: Ecto.Changeset.t() diff --git a/lib/trento/application/read_models/database_read_model.ex b/lib/trento/application/read_models/database_read_model.ex index cb8543b148..473a059006 100644 --- a/lib/trento/application/read_models/database_read_model.ex +++ b/lib/trento/application/read_models/database_read_model.ex @@ -25,6 +25,8 @@ defmodule Trento.DatabaseReadModel do references: :id, foreign_key: :sap_system_id, preload_order: [asc: :instance_number, asc: :host_id] + + field :deregistered_at, :utc_datetime_usec end @spec changeset(t() | Ecto.Changeset.t(), map) :: Ecto.Changeset.t() diff --git a/lib/trento/application/read_models/host_read_model.ex b/lib/trento/application/read_models/host_read_model.ex index cb79ce7363..f596daaa89 100644 --- a/lib/trento/application/read_models/host_read_model.ex +++ b/lib/trento/application/read_models/host_read_model.ex @@ -31,6 +31,10 @@ defmodule Trento.HostReadModel do references: :id, foreign_key: :host_id, preload_order: [desc: :identifier] + + field :last_heartbeat_timestamp, :utc_datetime_usec, virtual: true + + field :deregistered_at, :utc_datetime_usec end @spec changeset(t() | Ecto.Changeset.t(), map) :: Ecto.Changeset.t() diff --git a/lib/trento/application/read_models/sap_system_read_model.ex b/lib/trento/application/read_models/sap_system_read_model.ex index b886d2e380..8727860bb4 100644 --- a/lib/trento/application/read_models/sap_system_read_model.ex +++ b/lib/trento/application/read_models/sap_system_read_model.ex @@ -7,6 +7,7 @@ defmodule Trento.SapSystemReadModel do import Ecto.Changeset + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion require Trento.Domain.Enums.Health, as: Health alias Trento.{ @@ -23,6 +24,7 @@ defmodule Trento.SapSystemReadModel do field :tenant, :string field :db_host, :string field :health, Ecto.Enum, values: Health.values() + field :ensa_version, Ecto.Enum, values: EnsaVersion.values(), default: EnsaVersion.no_ensa() has_many :database_instances, DatabaseInstanceReadModel, references: :id, @@ -35,6 +37,8 @@ defmodule Trento.SapSystemReadModel do preload_order: [asc: :instance_number, asc: :host_id] has_many :tags, Trento.Tag, foreign_key: :resource_id + + field :deregistered_at, :utc_datetime_usec end @spec changeset(t() | Ecto.Changeset.t(), map) :: Ecto.Changeset.t() diff --git a/lib/trento/application/usecases/clusters/clusters.ex b/lib/trento/application/usecases/clusters/clusters.ex index f87d74eb4a..3ba16c6fbd 100644 --- a/lib/trento/application/usecases/clusters/clusters.ex +++ b/lib/trento/application/usecases/clusters/clusters.ex @@ -32,7 +32,12 @@ defmodule Trento.Clusters do @spec request_checks_execution(String.t()) :: :ok | {:error, any} def request_checks_execution(cluster_id) do - case Repo.get(ClusterReadModel, cluster_id) do + query = + from(c in ClusterReadModel, + where: is_nil(c.deregistered_at) and c.id == ^cluster_id + ) + + case Repo.one(query) do %ClusterReadModel{} = cluster -> Logger.debug("Requesting checks execution, cluster: #{cluster_id}") @@ -48,13 +53,26 @@ defmodule Trento.Clusters do @spec get_all_clusters :: [ClusterReadModel.t()] def get_all_clusters do from(c in ClusterReadModel, - order_by: [asc: c.name], - preload: [:tags] + order_by: [asc: c.name, asc: c.id], + preload: [:tags], + where: is_nil(c.deregistered_at) ) |> enrich_cluster_model_query() |> Repo.all() end + @spec get_cluster_id_by_host_id(String.t()) :: String.t() | nil + def get_cluster_id_by_host_id(host_id) do + query = + from c in ClusterReadModel, + join: h in HostReadModel, + on: h.cluster_id == c.id, + where: h.id == ^host_id, + select: c.id + + Repo.one(query) + end + @spec enrich_cluster_model(ClusterReadModel.t()) :: ClusterReadModel.t() def enrich_cluster_model(%ClusterReadModel{id: id} = cluster) do case Repo.get(ClusterEnrichmentData, id) do @@ -71,7 +89,9 @@ defmodule Trento.Clusters do query = from(c in ClusterReadModel, select: c.id, - where: c.type == ^ClusterType.hana_scale_up() or c.type == ^ClusterType.hana_scale_out() + where: + (c.type == ^ClusterType.hana_scale_up() or + c.type == ^ClusterType.hana_scale_out()) and is_nil(c.deregistered_at) ) query @@ -121,7 +141,7 @@ defmodule Trento.Clusters do Repo.all( from h in HostReadModel, select: %{host_id: h.id}, - where: h.cluster_id == ^cluster_id + where: h.cluster_id == ^cluster_id and is_nil(h.deregistered_at) ) Checks.request_execution( diff --git a/lib/trento/application/usecases/hosts/hosts.ex b/lib/trento/application/usecases/hosts/hosts.ex index 1330425aa3..3befbc8740 100644 --- a/lib/trento/application/usecases/hosts/hosts.ex +++ b/lib/trento/application/usecases/hosts/hosts.ex @@ -8,23 +8,40 @@ defmodule Trento.Hosts do require Logger alias Trento.{ + Heartbeat, HostReadModel, + Repo, SlesSubscriptionReadModel } - alias Trento.Domain.Commands.SelectHostChecks + alias Trento.Support.DateService + + alias Trento.Domain.Commands.{ + RequestHostDeregistration, + SelectHostChecks + } alias Trento.Repo @spec get_all_hosts :: [HostReadModel.t()] def get_all_hosts do HostReadModel - |> where([h], not is_nil(h.hostname)) + |> where([h], not is_nil(h.hostname) and is_nil(h.deregistered_at)) |> order_by(asc: :hostname) + |> enrich_host_read_model_query() |> Repo.all() |> Repo.preload([:sles_subscriptions, :tags]) end + @spec get_host_by_id(Ecto.UUID.t()) :: HostReadModel.t() | nil + def get_host_by_id(id) do + HostReadModel + |> where([h], h.id == ^id and is_nil(h.deregistered_at)) + |> enrich_host_read_model_query() + |> Repo.one() + |> Repo.preload([:sles_subscriptions, :tags]) + end + @spec get_all_sles_subscriptions :: non_neg_integer() def get_all_sles_subscriptions do query = @@ -50,6 +67,21 @@ defmodule Trento.Hosts do end end + @spec deregister_host(Ecto.UUID.t(), DateService) :: + :ok | {:error, :host_alive} | {:error, :host_not_registered} + def deregister_host(host_id, date_service \\ DateService) do + commanded().dispatch( + RequestHostDeregistration.new!(%{host_id: host_id, requested_at: date_service.utc_now()}) + ) + end + + @spec enrich_host_read_model_query(Ecto.Query.t()) :: Ecto.Query.t() + defp enrich_host_read_model_query(query) do + query + |> join(:left, [h], hb in Heartbeat, on: type(h.id, :string) == hb.agent_id) + |> select_merge([h, hb], %{last_heartbeat_timestamp: hb.timestamp}) + end + defp commanded, do: Application.fetch_env!(:trento, Trento.Commanded)[:adapter] end diff --git a/lib/trento/application/usecases/sap_systems/health_summary_service.ex b/lib/trento/application/usecases/sap_systems/health_summary_service.ex index 47fd2aec11..e6b54b0bcc 100644 --- a/lib/trento/application/usecases/sap_systems/health_summary_service.ex +++ b/lib/trento/application/usecases/sap_systems/health_summary_service.ex @@ -5,7 +5,7 @@ defmodule Trento.SapSystems.HealthSummaryService do import Ecto.Query - require Trento.Domain.Enums.ClusterType, as: ClusterType + require Trento.Domain.Enums.Health, as: HealthEnum alias Trento.{ ApplicationInstanceReadModel, @@ -19,11 +19,10 @@ defmodule Trento.SapSystems.HealthSummaryService do alias Trento.Repo - @type instance_list :: [DatabaseInstanceReadModel.t() | ApplicationInstanceReadModel.t()] - @spec get_health_summary :: [map()] def get_health_summary do SapSystemReadModel + |> where([s], is_nil(s.deregistered_at)) |> order_by(asc: :sid) |> Repo.all() |> Repo.preload(application_instances: :host) @@ -46,8 +45,10 @@ defmodule Trento.SapSystems.HealthSummaryService do sid: sid, sapsystem_health: health, database_health: compute_database_health(database_instances), - clusters_health: compute_clusters_health(all_instances), + application_cluster_health: compute_cluster_health(application_instances), + database_cluster_health: compute_cluster_health(database_instances), hosts_health: compute_hosts_health(all_instances), + application_instances: application_instances, database_instances: database_instances } end @@ -59,48 +60,26 @@ defmodule Trento.SapSystems.HealthSummaryService do |> HealthService.compute_aggregated_health() end - @spec compute_clusters_health(instance_list) :: - Health.t() - defp compute_clusters_health(instances) do - instances - |> reject_unclustered_instances() - |> clusters_from_instance() - |> keep_only_hana_scale_up_clusters() - |> health_from_cluster() - |> HealthService.compute_aggregated_health() - end - - @spec clusters_from_instance(instance_list) :: [ClusterReadModel.t()] - defp clusters_from_instance(instances) do - instances - |> Enum.filter(fn %{host: host} -> host end) - |> Enum.map(fn %{host: %{cluster_id: cluster_id}} -> cluster_id end) - |> Enum.uniq() - |> Enum.map(fn cluster_id -> Repo.get!(ClusterReadModel, cluster_id) end) + @spec compute_cluster_health( + [DatabaseInstanceReadModel.t()] + | [ApplicationInstanceReadModel.t()] + ) :: Health.t() + defp compute_cluster_health(instances) do + cluster_id = + Enum.find_value(instances, nil, fn + %{host: %{cluster_id: nil}} -> false + %{host: %{cluster_id: cluster_id}} -> cluster_id + _ -> false + end) + + case cluster_id do + nil -> HealthEnum.unknown() + cluster_id -> ClusterReadModel |> Repo.get!(cluster_id) |> Map.get(:health) + end end - @spec health_from_cluster([ClusterReadModel.t()]) :: [String.t()] - defp health_from_cluster(clusters) do - Enum.map(clusters, fn %ClusterReadModel{health: health} -> health end) - end - - @spec reject_unclustered_instances(instance_list) :: instance_list - defp reject_unclustered_instances(instances) do - Enum.reject(instances, fn - %{host: %{cluster_id: nil}} -> true - _ -> false - end) - end - - @spec keep_only_hana_scale_up_clusters([ClusterReadModel.t()]) :: [ClusterReadModel.t()] - defp keep_only_hana_scale_up_clusters(clusters) do - Enum.filter(clusters, fn - %ClusterReadModel{type: ClusterType.hana_scale_up()} -> true - _ -> false - end) - end - - @spec compute_hosts_health(instance_list) :: Health.t() + @spec compute_hosts_health([DatabaseInstanceReadModel.t() | ApplicationInstanceReadModel.t()]) :: + Health.t() defp compute_hosts_health(instances) do instances |> Enum.filter(fn %{host: host} -> host end) diff --git a/lib/trento/application/usecases/sap_systems/sap_systems.ex b/lib/trento/application/usecases/sap_systems/sap_systems.ex index 59af34aecf..ff4406119c 100644 --- a/lib/trento/application/usecases/sap_systems/sap_systems.ex +++ b/lib/trento/application/usecases/sap_systems/sap_systems.ex @@ -6,6 +6,8 @@ defmodule Trento.SapSystems do import Ecto.Query alias Trento.{ + ApplicationInstanceReadModel, + DatabaseInstanceReadModel, DatabaseReadModel, SapSystemReadModel } @@ -15,19 +17,39 @@ defmodule Trento.SapSystems do @spec get_all_sap_systems :: [SapSystemReadModel.t()] def get_all_sap_systems do SapSystemReadModel + |> where([s], is_nil(s.deregistered_at)) |> order_by(asc: :sid) |> Repo.all() - |> Repo.preload(:application_instances) - |> Repo.preload(:database_instances) - |> Repo.preload(:tags) + |> Repo.preload([ + :application_instances, + :database_instances, + :tags + ]) end - @spec get_all_databases :: [map] + @spec get_all_databases :: [DatabaseReadModel.t()] def get_all_databases do DatabaseReadModel + |> where([d], is_nil(d.deregistered_at)) |> order_by(asc: :sid) |> Repo.all() - |> Repo.preload(:database_instances) - |> Repo.preload(:tags) + |> Repo.preload([ + :database_instances, + :tags + ]) + end + + @spec get_application_instances_by_host_id(String.t()) :: [ApplicationInstanceReadModel.t()] + def get_application_instances_by_host_id(host_id) do + ApplicationInstanceReadModel + |> where([a], a.host_id == ^host_id) + |> Repo.all() + end + + @spec get_database_instances_by_host_id(String.t()) :: [DatabaseInstanceReadModel.t()] + def get_database_instances_by_host_id(host_id) do + DatabaseInstanceReadModel + |> where([d], d.host_id == ^host_id) + |> Repo.all() end end diff --git a/lib/trento/domain/cluster/cluster.ex b/lib/trento/domain/cluster/cluster.ex index 6c9c380df6..64f46c3f00 100644 --- a/lib/trento/domain/cluster/cluster.ex +++ b/lib/trento/domain/cluster/cluster.ex @@ -7,10 +7,16 @@ defmodule Trento.Domain.Cluster do SAP workloads. Each deployed cluster is registered as a new aggregate entry, meaning that all the hosts belonging - to the same cluster are part of the same stream. A cluster is registered first time/details updated afterwards - only by cluster discovery messages coming from the **designated controller** node. Once a cluster is - registered other hosts can be added receiving discovery messages coming from other nodes. All the hosts - are listed in the `hosts` field. + to the same cluster are part of the same stream. + + A new cluster is registered when a cluster discovery message from any of the nodes of the cluster is received. + + The cluster details will be populated if the received discovery message is coming from the **designated controller** node. + Otherwise the cluster details are left as unknown, and filled once a message from the **designated controller** is received. + Once a cluster is registered, other hosts will be added when cluster discovery messages from them are received. + + All the hosts are listed in the `hosts` field. + The cluster aggregate stores and updates information coming in the cluster discovery messages such as: @@ -55,6 +61,7 @@ defmodule Trento.Domain.Cluster do alias Commanded.Aggregate.Multi alias Trento.Domain.{ + AscsErsClusterDetails, Cluster, HanaClusterDetails, HealthService @@ -62,6 +69,7 @@ defmodule Trento.Domain.Cluster do alias Trento.Domain.Commands.{ CompleteChecksExecution, + DeregisterClusterHost, RegisterClusterHost, RollUpCluster, SelectChecks @@ -73,14 +81,18 @@ defmodule Trento.Domain.Cluster do ChecksExecutionStarted, ChecksSelected, ClusterChecksHealthChanged, + ClusterDeregistered, ClusterDetailsUpdated, ClusterDiscoveredHealthChanged, ClusterHealthChanged, ClusterRegistered, + ClusterRestored, ClusterRolledUp, ClusterRollUpRequested, + ClusterTombstoned, HostAddedToCluster, - HostChecksExecutionCompleted + HostChecksExecutionCompleted, + HostRemovedFromCluster } @required_fields [] @@ -98,6 +110,7 @@ defmodule Trento.Domain.Cluster do field :name, :string field :type, Ecto.Enum, values: ClusterType.values() field :sid, :string + field :additional_sids, {:array, :string}, default: [] field :resources_number, :integer field :hosts_number, :integer field :provider, Ecto.Enum, values: Provider.values() @@ -107,13 +120,23 @@ defmodule Trento.Domain.Cluster do field :hosts, {:array, :string}, default: [] field :selected_checks, {:array, :string}, default: [] field :rolling_up, :boolean, default: false - embeds_one :details, HanaClusterDetails + field :deregistered_at, :utc_datetime_usec, default: nil + + field :details, PolymorphicEmbed, + types: [ + hana_scale_up: [ + module: HanaClusterDetails, + identify_by_fields: [:system_replication_mode] + ], + ascs_ers: [module: AscsErsClusterDetails, identify_by_fields: [:sap_systems]] + ], + on_replace: :update end def execute(%Cluster{rolling_up: true}, _), do: {:error, :cluster_rolling_up} - # When a DC cluster node is registered for the first time, a cluster is registered - # and the host of the node is added to the cluster + # When a DC node is discovered, a cluster is registered and the host is added to the cluster. + # The cluster details are populated with the information coming from the DC node. def execute( %Cluster{cluster_id: nil}, %RegisterClusterHost{ @@ -122,6 +145,7 @@ defmodule Trento.Domain.Cluster do name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -136,6 +160,7 @@ defmodule Trento.Domain.Cluster do name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -149,10 +174,93 @@ defmodule Trento.Domain.Cluster do ] end - # If no DC node was received yet, no cluster was registered. - def execute(%Cluster{cluster_id: nil}, %RegisterClusterHost{designated_controller: false}), - do: {:error, :cluster_not_found} + # When a non-DC node is discovered, a cluster is registered and the host is added to the cluster. + # The cluster details are left as unknown, and filled once a message from the DC node is received. + def execute(%Cluster{cluster_id: nil}, %RegisterClusterHost{ + cluster_id: cluster_id, + name: name, + host_id: host_id, + designated_controller: false + }) do + [ + %ClusterRegistered{ + cluster_id: cluster_id, + name: name, + type: :unknown, + sid: nil, + additional_sids: [], + provider: :unknown, + resources_number: nil, + hosts_number: nil, + details: nil, + health: :unknown + }, + %HostAddedToCluster{ + cluster_id: cluster_id, + host_id: host_id + } + ] + end + + def execute(%Cluster{cluster_id: nil}, _), + do: {:error, :cluster_not_registered} + + # Restoration, when a RegisterClusterHost command is received for a deregistered Cluster + # the cluster is restored, the host is added to cluster and if the host is a DC + # cluster details are updated + def execute( + %Cluster{deregistered_at: deregistered_at, cluster_id: cluster_id}, + %RegisterClusterHost{ + host_id: host_id, + designated_controller: false + } + ) + when not is_nil(deregistered_at) do + [ + %ClusterRestored{cluster_id: cluster_id}, + %HostAddedToCluster{ + cluster_id: cluster_id, + host_id: host_id + } + ] + end + + def execute( + %Cluster{deregistered_at: deregistered_at, cluster_id: cluster_id} = cluster, + %RegisterClusterHost{ + host_id: host_id, + designated_controller: true + } = command + ) + when not is_nil(deregistered_at) do + cluster + |> Multi.new() + |> Multi.execute(fn _ -> + %ClusterRestored{cluster_id: cluster_id} + end) + |> Multi.execute(fn _ -> + %HostAddedToCluster{ + cluster_id: cluster_id, + host_id: host_id + } + end) + |> maybe_update_cluster(command) + end + + def execute( + %Cluster{cluster_id: cluster_id} = snapshot, + %RollUpCluster{} + ) do + %ClusterRollUpRequested{ + cluster_id: cluster_id, + snapshot: snapshot + } + end + + def execute(%Cluster{deregistered_at: deregistered_at}, _) when not is_nil(deregistered_at), + do: {:error, :cluster_not_registered} + # If the cluster is already registered, and the host was never discovered before, it is added to the cluster. def execute( %Cluster{} = cluster, %RegisterClusterHost{ @@ -163,6 +271,9 @@ defmodule Trento.Domain.Cluster do maybe_emit_host_added_to_cluster_event(cluster, host_id) end + # When a DC node is discovered, if the cluster is already registered, + # the cluster details are updated with the information coming from the DC node. + # The cluster discovered health is updated based on the new details. def execute( %Cluster{} = cluster, %RegisterClusterHost{ @@ -171,16 +282,9 @@ defmodule Trento.Domain.Cluster do ) do cluster |> Multi.new() - |> Multi.execute(fn cluster -> maybe_emit_cluster_details_updated_event(cluster, command) end) - |> Multi.execute(fn cluster -> - maybe_emit_cluster_discovered_health_changed_event(cluster, command) - end) - |> Multi.execute(fn cluster -> maybe_emit_cluster_health_changed_event(cluster) end) + |> maybe_update_cluster(command) end - def execute(%Cluster{cluster_id: nil}, _), - do: {:error, :cluster_not_found} - # Checks selected def execute( %Cluster{ @@ -218,13 +322,21 @@ defmodule Trento.Domain.Cluster do end def execute( - %Cluster{cluster_id: cluster_id} = snapshot, - %RollUpCluster{} + %Cluster{cluster_id: cluster_id} = cluster, + %DeregisterClusterHost{ + host_id: host_id, + cluster_id: cluster_id + } = command ) do - %ClusterRollUpRequested{ - cluster_id: cluster_id, - snapshot: snapshot - } + cluster + |> Multi.new() + |> Multi.execute(fn _ -> + %HostRemovedFromCluster{ + cluster_id: cluster_id, + host_id: host_id + } + end) + |> Multi.execute(&maybe_emit_cluster_deregistered_event(&1, command)) end def apply( @@ -234,6 +346,7 @@ defmodule Trento.Domain.Cluster do name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -247,6 +360,7 @@ defmodule Trento.Domain.Cluster do name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -280,6 +394,7 @@ defmodule Trento.Domain.Cluster do name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -291,6 +406,7 @@ defmodule Trento.Domain.Cluster do | name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -336,8 +452,26 @@ defmodule Trento.Domain.Cluster do snapshot end + def apply(%Cluster{hosts: hosts} = cluster, %HostRemovedFromCluster{ + host_id: host_id + }) do + %Cluster{cluster | hosts: List.delete(hosts, host_id)} + end + + # Deregistration + def apply(%Cluster{} = cluster, %ClusterDeregistered{deregistered_at: deregistered_at}) do + %Cluster{cluster | deregistered_at: deregistered_at} + end + + # Restoration + def apply(%Cluster{} = cluster, %ClusterRestored{}) do + %Cluster{cluster | deregistered_at: nil} + end + def apply(cluster, %legacy_event{}) when legacy_event in @legacy_events, do: cluster + def apply(%Cluster{} = cluster, %ClusterTombstoned{}), do: cluster + defp maybe_emit_host_added_to_cluster_event( %Cluster{cluster_id: cluster_id, hosts: hosts}, host_id @@ -354,11 +488,25 @@ defmodule Trento.Domain.Cluster do end end + defp maybe_update_cluster( + multi, + %RegisterClusterHost{host_id: host_id} = command + ) do + multi + |> Multi.execute(fn cluster -> maybe_emit_host_added_to_cluster_event(cluster, host_id) end) + |> Multi.execute(fn cluster -> maybe_emit_cluster_details_updated_event(cluster, command) end) + |> Multi.execute(fn cluster -> + maybe_emit_cluster_discovered_health_changed_event(cluster, command) + end) + |> Multi.execute(fn cluster -> maybe_emit_cluster_health_changed_event(cluster) end) + end + defp maybe_emit_cluster_details_updated_event( %Cluster{ name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -368,6 +516,7 @@ defmodule Trento.Domain.Cluster do name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -384,6 +533,7 @@ defmodule Trento.Domain.Cluster do name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -395,6 +545,7 @@ defmodule Trento.Domain.Cluster do name: name, type: type, sid: sid, + additional_sids: additional_sids, provider: provider, resources_number: resources_number, hosts_number: hosts_number, @@ -434,6 +585,21 @@ defmodule Trento.Domain.Cluster do } end + defp maybe_emit_cluster_deregistered_event( + %Cluster{cluster_id: cluster_id, hosts: []}, + %DeregisterClusterHost{ + cluster_id: cluster_id, + deregistered_at: deregistered_at + } + ) do + [ + %ClusterDeregistered{cluster_id: cluster_id, deregistered_at: deregistered_at}, + %ClusterTombstoned{cluster_id: cluster_id} + ] + end + + defp maybe_emit_cluster_deregistered_event(_, _), do: nil + defp maybe_add_checks_health(healths, _, []), do: healths defp maybe_add_checks_health(healths, checks_health, _), do: [checks_health | healths] diff --git a/lib/trento/domain/cluster/commands/deregister_cluster_host.ex b/lib/trento/domain/cluster/commands/deregister_cluster_host.ex new file mode 100644 index 0000000000..d3bd8f01e4 --- /dev/null +++ b/lib/trento/domain/cluster/commands/deregister_cluster_host.ex @@ -0,0 +1,14 @@ +defmodule Trento.Domain.Commands.DeregisterClusterHost do + @moduledoc """ + Deregister a host from a cluster + """ + @required_fields :all + + use Trento.Command + + defcommand do + field :host_id, Ecto.UUID + field :cluster_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/cluster/commands/register_cluster_host.ex b/lib/trento/domain/cluster/commands/register_cluster_host.ex index 86f18a5b2a..2e1e9d70e6 100644 --- a/lib/trento/domain/cluster/commands/register_cluster_host.ex +++ b/lib/trento/domain/cluster/commands/register_cluster_host.ex @@ -18,7 +18,10 @@ defmodule Trento.Domain.Commands.RegisterClusterHost do require Trento.Domain.Enums.ClusterType, as: ClusterType require Trento.Domain.Enums.Health, as: Health - alias Trento.Domain.HanaClusterDetails + alias Trento.Domain.{ + AscsErsClusterDetails, + HanaClusterDetails + } defcommand do field :cluster_id, Ecto.UUID @@ -26,6 +29,7 @@ defmodule Trento.Domain.Commands.RegisterClusterHost do field :name, :string field :type, Ecto.Enum, values: ClusterType.values() field :sid, :string + field :additional_sids, {:array, :string} field :provider, Ecto.Enum, values: Provider.values() field :designated_controller, :boolean field :resources_number, :integer @@ -33,6 +37,14 @@ defmodule Trento.Domain.Commands.RegisterClusterHost do field :discovered_health, Ecto.Enum, values: Health.values() field :cib_last_written, :string - embeds_one :details, HanaClusterDetails + field :details, PolymorphicEmbed, + types: [ + hana_scale_up: [ + module: HanaClusterDetails, + identify_by_fields: [:system_replication_mode] + ], + ascs_ers: [module: AscsErsClusterDetails, identify_by_fields: [:sap_systems]] + ], + on_replace: :update end end diff --git a/lib/trento/domain/cluster/events/cluster_deregistered.ex b/lib/trento/domain/cluster/events/cluster_deregistered.ex new file mode 100644 index 0000000000..992065daca --- /dev/null +++ b/lib/trento/domain/cluster/events/cluster_deregistered.ex @@ -0,0 +1,12 @@ +defmodule Trento.Domain.Events.ClusterDeregistered do + @moduledoc """ + This event is emitted when a cluster is deregistered. + """ + + use Trento.Event + + defevent do + field :cluster_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/cluster/events/cluster_details_updated.ex b/lib/trento/domain/cluster/events/cluster_details_updated.ex index 520c90ff28..970dca443e 100644 --- a/lib/trento/domain/cluster/events/cluster_details_updated.ex +++ b/lib/trento/domain/cluster/events/cluster_details_updated.ex @@ -8,17 +8,29 @@ defmodule Trento.Domain.Events.ClusterDetailsUpdated do require Trento.Domain.Enums.Provider, as: Provider require Trento.Domain.Enums.ClusterType, as: ClusterType - alias Trento.Domain.HanaClusterDetails + alias Trento.Domain.{ + AscsErsClusterDetails, + HanaClusterDetails + } defevent do field :cluster_id, Ecto.UUID field :name, :string field :type, Ecto.Enum, values: ClusterType.values() field :sid, :string + field :additional_sids, {:array, :string}, default: [] field :provider, Ecto.Enum, values: Provider.values() field :resources_number, :integer field :hosts_number, :integer - embeds_one :details, HanaClusterDetails + field :details, PolymorphicEmbed, + types: [ + hana_scale_up: [ + module: HanaClusterDetails, + identify_by_fields: [:system_replication_mode] + ], + ascs_ers: [module: AscsErsClusterDetails, identify_by_fields: [:sap_systems]] + ], + on_replace: :update end end diff --git a/lib/trento/domain/cluster/events/cluster_registered.ex b/lib/trento/domain/cluster/events/cluster_registered.ex index 48403216c0..a77e7d7014 100644 --- a/lib/trento/domain/cluster/events/cluster_registered.ex +++ b/lib/trento/domain/cluster/events/cluster_registered.ex @@ -9,18 +9,30 @@ defmodule Trento.Domain.Events.ClusterRegistered do require Trento.Domain.Enums.ClusterType, as: ClusterType require Trento.Domain.Enums.Health, as: Health - alias Trento.Domain.HanaClusterDetails + alias Trento.Domain.{ + AscsErsClusterDetails, + HanaClusterDetails + } defevent do field :cluster_id, Ecto.UUID field :name, :string field :type, Ecto.Enum, values: ClusterType.values() field :sid, :string + field :additional_sids, {:array, :string}, default: [] field :provider, Ecto.Enum, values: Provider.values() field :resources_number, :integer field :hosts_number, :integer field :health, Ecto.Enum, values: Health.values() - embeds_one :details, HanaClusterDetails + field :details, PolymorphicEmbed, + types: [ + hana_scale_up: [ + module: HanaClusterDetails, + identify_by_fields: [:system_replication_mode] + ], + ascs_ers: [module: AscsErsClusterDetails, identify_by_fields: [:sap_systems]] + ], + on_replace: :update end end diff --git a/lib/trento/domain/cluster/events/cluster_restored.ex b/lib/trento/domain/cluster/events/cluster_restored.ex new file mode 100644 index 0000000000..096d5805f0 --- /dev/null +++ b/lib/trento/domain/cluster/events/cluster_restored.ex @@ -0,0 +1,11 @@ +defmodule Trento.Domain.Events.ClusterRestored do + @moduledoc """ + This event is emitted after a cluster is restored from a deregistered state + """ + + use Trento.Event + + defevent do + field :cluster_id, Ecto.UUID + end +end diff --git a/lib/trento/domain/cluster/events/cluster_tombstoned.ex b/lib/trento/domain/cluster/events/cluster_tombstoned.ex new file mode 100644 index 0000000000..9104b252b4 --- /dev/null +++ b/lib/trento/domain/cluster/events/cluster_tombstoned.ex @@ -0,0 +1,11 @@ +defmodule Trento.Domain.Events.ClusterTombstoned do + @moduledoc """ + This event is emitted after a successful cluster deregistration, to tombstone and stop the cluster aggregate + """ + + use Trento.Event + + defevent do + field :cluster_id, Ecto.UUID + end +end diff --git a/lib/trento/domain/cluster/events/host_removed_from_cluster.ex b/lib/trento/domain/cluster/events/host_removed_from_cluster.ex new file mode 100644 index 0000000000..d2e5d14628 --- /dev/null +++ b/lib/trento/domain/cluster/events/host_removed_from_cluster.ex @@ -0,0 +1,13 @@ +defmodule Trento.Domain.Events.HostRemovedFromCluster do + @moduledoc """ + This event is emitted when a host is removed from a cluster. + """ + + use Trento.Event + + defevent do + field :host_id, Ecto.UUID + field :cluster_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/enums/ascs_ers_cluster_role.ex b/lib/trento/domain/enums/ascs_ers_cluster_role.ex new file mode 100644 index 0000000000..dfbf400a3e --- /dev/null +++ b/lib/trento/domain/enums/ascs_ers_cluster_role.ex @@ -0,0 +1,7 @@ +defmodule Trento.Domain.Enums.AscsErsClusterRole do + @moduledoc """ + Type that represents the ASCS/ERS cluster roles. + """ + + use Trento.Support.Enum, values: [:ascs, :ers] +end diff --git a/lib/trento/domain/enums/cluster_type.ex b/lib/trento/domain/enums/cluster_type.ex index df76941b66..4bcbae5efd 100644 --- a/lib/trento/domain/enums/cluster_type.ex +++ b/lib/trento/domain/enums/cluster_type.ex @@ -3,5 +3,5 @@ defmodule Trento.Domain.Enums.ClusterType do Type that represents the supported cluster types. """ - use Trento.Support.Enum, values: [:hana_scale_up, :hana_scale_out, :unknown] + use Trento.Support.Enum, values: [:hana_scale_up, :hana_scale_out, :ascs_ers, :unknown] end diff --git a/lib/trento/domain/enums/ensa_version.ex b/lib/trento/domain/enums/ensa_version.ex new file mode 100644 index 0000000000..7d8de8f227 --- /dev/null +++ b/lib/trento/domain/enums/ensa_version.ex @@ -0,0 +1,7 @@ +defmodule Trento.Domain.Enums.EnsaVersion do + @moduledoc """ + Type that represents the supported ENSA versions. + """ + + use Trento.Support.Enum, values: [:no_ensa, :ensa1, :ensa2] +end diff --git a/lib/trento/domain/host/commands/deregister_host.ex b/lib/trento/domain/host/commands/deregister_host.ex new file mode 100644 index 0000000000..c055ffcb4d --- /dev/null +++ b/lib/trento/domain/host/commands/deregister_host.ex @@ -0,0 +1,13 @@ +defmodule Trento.Domain.Commands.DeregisterHost do + @moduledoc """ + Deregister a host + """ + @required_fields :all + + use Trento.Command + + defcommand do + field :host_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/host/commands/request_host_deregistration.ex b/lib/trento/domain/host/commands/request_host_deregistration.ex new file mode 100644 index 0000000000..3bc10fa19e --- /dev/null +++ b/lib/trento/domain/host/commands/request_host_deregistration.ex @@ -0,0 +1,13 @@ +defmodule Trento.Domain.Commands.RequestHostDeregistration do + @moduledoc """ + Request a deregistration of a host + """ + @required_fields :all + + use Trento.Command + + defcommand do + field :host_id, Ecto.UUID + field :requested_at, :utc_datetime_usec, default: DateTime.utc_now() + end +end diff --git a/lib/trento/domain/host/events/host_deregistered.ex b/lib/trento/domain/host/events/host_deregistered.ex new file mode 100644 index 0000000000..03c6125b10 --- /dev/null +++ b/lib/trento/domain/host/events/host_deregistered.ex @@ -0,0 +1,12 @@ +defmodule Trento.Domain.Events.HostDeregistered do + @moduledoc """ + This event is emitted when a deregistration (decommission) of a host is completed. + """ + + use Trento.Event + + defevent do + field :host_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/host/events/host_deregistration_requested.ex b/lib/trento/domain/host/events/host_deregistration_requested.ex new file mode 100644 index 0000000000..3c8c7fb661 --- /dev/null +++ b/lib/trento/domain/host/events/host_deregistration_requested.ex @@ -0,0 +1,12 @@ +defmodule Trento.Domain.Events.HostDeregistrationRequested do + @moduledoc """ + This event is emitted when a deregistration (decommission) of a host is requested. + """ + + use Trento.Event + + defevent do + field :host_id, Ecto.UUID + field :requested_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/host/events/host_restored.ex b/lib/trento/domain/host/events/host_restored.ex new file mode 100644 index 0000000000..f2d24fa66a --- /dev/null +++ b/lib/trento/domain/host/events/host_restored.ex @@ -0,0 +1,11 @@ +defmodule Trento.Domain.Events.HostRestored do + @moduledoc """ + This event is emitted when a host is restored from a deregistered state + """ + + use Trento.Event + + defevent do + field :host_id, Ecto.UUID + end +end diff --git a/lib/trento/domain/host/events/host_tombstoned.ex b/lib/trento/domain/host/events/host_tombstoned.ex new file mode 100644 index 0000000000..cc6b196929 --- /dev/null +++ b/lib/trento/domain/host/events/host_tombstoned.ex @@ -0,0 +1,11 @@ +defmodule Trento.Domain.Events.HostTombstoned do + @moduledoc """ + This event is emitted after a successful host deregistration, to tombstone and stop the host aggregate + """ + + use Trento.Event + + defevent do + field :host_id, Ecto.UUID + end +end diff --git a/lib/trento/domain/host/host.ex b/lib/trento/domain/host/host.ex index f699d80a1d..237f65abaa 100644 --- a/lib/trento/domain/host/host.ex +++ b/lib/trento/domain/host/host.ex @@ -29,7 +29,9 @@ defmodule Trento.Domain.Host do } alias Trento.Domain.Commands.{ + DeregisterHost, RegisterHost, + RequestHostDeregistration, RollUpHost, SelectHostChecks, UpdateHeartbeat, @@ -41,10 +43,14 @@ defmodule Trento.Domain.Host do HeartbeatFailed, HeartbeatSucceded, HostChecksSelected, + HostDeregistered, + HostDeregistrationRequested, HostDetailsUpdated, HostRegistered, + HostRestored, HostRolledUp, HostRollUpRequested, + HostTombstoned, ProviderUpdated, SlesSubscriptionsUpdated } @@ -69,6 +75,7 @@ defmodule Trento.Domain.Host do field :heartbeat, Ecto.Enum, values: [:passing, :critical, :unknown] field :rolling_up, :boolean, default: false field :selected_checks, {:array, :string}, default: [] + field :deregistered_at, :utc_datetime_usec, default: nil embeds_many :subscriptions, SlesSubscription @@ -119,6 +126,98 @@ defmodule Trento.Domain.Host do } end + # Reject all the commands, except for the registration ones when the host_id does not exists + def execute( + %Host{host_id: nil}, + _ + ) do + {:error, :host_not_registered} + end + + # Restore the host when a RegisterHost command is received for a deregistered host + def execute( + %Host{ + host_id: host_id, + hostname: hostname, + ip_addresses: ip_addresses, + agent_version: agent_version, + cpu_count: cpu_count, + total_memory_mb: total_memory_mb, + socket_count: socket_count, + os_version: os_version, + installation_source: installation_source, + deregistered_at: deregistered_at + }, + %RegisterHost{ + hostname: hostname, + ip_addresses: ip_addresses, + agent_version: agent_version, + cpu_count: cpu_count, + total_memory_mb: total_memory_mb, + socket_count: socket_count, + os_version: os_version, + installation_source: installation_source + } + ) + when not is_nil(deregistered_at) do + %HostRestored{ + host_id: host_id + } + end + + def execute( + %Host{ + host_id: host_id, + deregistered_at: deregistered_at + }, + %RegisterHost{ + hostname: hostname, + ip_addresses: ip_addresses, + agent_version: agent_version, + cpu_count: cpu_count, + total_memory_mb: total_memory_mb, + socket_count: socket_count, + os_version: os_version, + installation_source: installation_source + } + ) + when not is_nil(deregistered_at) do + [ + %HostRestored{ + host_id: host_id + }, + %HostDetailsUpdated{ + host_id: host_id, + hostname: hostname, + ip_addresses: ip_addresses, + agent_version: agent_version, + cpu_count: cpu_count, + total_memory_mb: total_memory_mb, + socket_count: socket_count, + os_version: os_version, + installation_source: installation_source + } + ] + end + + def execute( + %Host{host_id: host_id} = snapshot, + %RollUpHost{} + ) do + %HostRollUpRequested{ + host_id: host_id, + snapshot: snapshot + } + end + + def execute( + %Host{deregistered_at: deregistered_at}, + _ + ) + when not is_nil(deregistered_at) do + {:error, :host_not_registered} + end + # Host exists but details didn't change def execute( %Host{ @@ -172,14 +271,6 @@ defmodule Trento.Domain.Host do } end - # Heartbeat received - def execute( - %Host{host_id: nil}, - %UpdateHeartbeat{} - ) do - {:error, :host_not_registered} - end - def execute( %Host{host_id: host_id, heartbeat: heartbeat}, %UpdateHeartbeat{heartbeat: :passing} @@ -203,14 +294,6 @@ defmodule Trento.Domain.Host do [] end - # Update provider received - def execute( - %Host{host_id: nil}, - %UpdateProvider{} - ) do - {:error, :host_not_registered} - end - def execute( %Host{provider: provider, provider_data: provider_data}, %UpdateProvider{provider: provider, provider_data: provider_data} @@ -229,13 +312,6 @@ defmodule Trento.Domain.Host do } end - def execute( - %Host{host_id: nil}, - %UpdateSlesSubscriptions{} - ) do - {:error, :host_not_registered} - end - def execute(%Host{subscriptions: subscriptions}, %UpdateSlesSubscriptions{ subscriptions: subscriptions }) do @@ -252,29 +328,29 @@ defmodule Trento.Domain.Host do } end - # Start the rollup flow - def execute( - %Host{host_id: nil}, - %RollUpHost{} - ) do - {:error, :host_not_registered} - end - def execute( - %Host{host_id: host_id} = snapshot, - %RollUpHost{} + %Host{host_id: host_id}, + %RequestHostDeregistration{requested_at: requested_at} ) do - %HostRollUpRequested{ + %HostDeregistrationRequested{ host_id: host_id, - snapshot: snapshot + requested_at: requested_at } end def execute( - %Host{host_id: nil}, - %SelectHostChecks{} + %Host{host_id: host_id}, + %DeregisterHost{deregistered_at: deregistered_at} ) do - {:error, :host_not_registered} + [ + %HostDeregistered{ + host_id: host_id, + deregistered_at: deregistered_at + }, + %HostTombstoned{ + host_id: host_id + } + ] end def execute( @@ -409,4 +485,20 @@ defmodule Trento.Domain.Host do | selected_checks: selected_checks } end + + # Deregistration + + def apply(%Host{} = host, %HostDeregistered{ + deregistered_at: deregistered_at + }) do + %Host{host | deregistered_at: deregistered_at} + end + + def apply(%Host{} = host, %HostDeregistrationRequested{}), do: host + def apply(%Host{} = host, %HostTombstoned{}), do: host + + # Restoration + def apply(%Host{} = host, %HostRestored{}) do + %Host{host | deregistered_at: nil} + end end diff --git a/lib/trento/domain/sap_system/commands/deregister_application_instance.ex b/lib/trento/domain/sap_system/commands/deregister_application_instance.ex new file mode 100644 index 0000000000..e84e303966 --- /dev/null +++ b/lib/trento/domain/sap_system/commands/deregister_application_instance.ex @@ -0,0 +1,16 @@ +defmodule Trento.Domain.Commands.DeregisterApplicationInstance do + @moduledoc """ + Deregister (decommission) an application instance from the monitoring system. + """ + + @required_fields :all + + use Trento.Command + + defcommand do + field :instance_number, :string + field :host_id, Ecto.UUID + field :sap_system_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/sap_system/commands/deregister_database_instance.ex b/lib/trento/domain/sap_system/commands/deregister_database_instance.ex new file mode 100644 index 0000000000..eb1cf95e87 --- /dev/null +++ b/lib/trento/domain/sap_system/commands/deregister_database_instance.ex @@ -0,0 +1,16 @@ +defmodule Trento.Domain.Commands.DeregisterDatabaseInstance do + @moduledoc """ + Deregister (decommission) a database instance from the monitoring system. + """ + + @required_fields :all + + use Trento.Command + + defcommand do + field :instance_number, :string + field :host_id, Ecto.UUID + field :sap_system_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/sap_system/commands/register_application_instance.ex b/lib/trento/domain/sap_system/commands/register_application_instance.ex index 1bfbc86ce5..0ff8ea0042 100644 --- a/lib/trento/domain/sap_system/commands/register_application_instance.ex +++ b/lib/trento/domain/sap_system/commands/register_application_instance.ex @@ -16,12 +16,9 @@ defmodule Trento.Domain.Commands.RegisterApplicationInstance do @required_fields [ :host_id, :instance_number, - :health, :sid, :db_host, :tenant, - :host_id, - :instance_number, :instance_hostname, :features, :http_port, @@ -32,6 +29,7 @@ defmodule Trento.Domain.Commands.RegisterApplicationInstance do use Trento.Command + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion require Trento.Domain.Enums.Health, as: Health defcommand do @@ -47,5 +45,6 @@ defmodule Trento.Domain.Commands.RegisterApplicationInstance do field :https_port, :integer field :start_priority, :string field :health, Ecto.Enum, values: Health.values() + field :ensa_version, Ecto.Enum, values: EnsaVersion.values() end end diff --git a/lib/trento/domain/sap_system/database.ex b/lib/trento/domain/sap_system/database.ex index 14b8d2ef27..f0d9fbffb5 100644 --- a/lib/trento/domain/sap_system/database.ex +++ b/lib/trento/domain/sap_system/database.ex @@ -14,6 +14,7 @@ defmodule Trento.Domain.SapSystem.Database do deftype do field :sid, :string embeds_many :instances, Instance + field :deregistered_at, :utc_datetime_usec field :health, Ecto.Enum, values: Health.values() end end diff --git a/lib/trento/domain/sap_system/events/application_instance_deregistered.ex b/lib/trento/domain/sap_system/events/application_instance_deregistered.ex new file mode 100644 index 0000000000..33e6a9283b --- /dev/null +++ b/lib/trento/domain/sap_system/events/application_instance_deregistered.ex @@ -0,0 +1,14 @@ +defmodule Trento.Domain.Events.ApplicationInstanceDeregistered do + @moduledoc """ + This event is emitted when a database application is deregistered (decommissioned) from the SAP system. + """ + + use Trento.Event + + defevent do + field :instance_number, :string + field :host_id, Ecto.UUID + field :sap_system_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/sap_system/events/application_instance_moved.ex b/lib/trento/domain/sap_system/events/application_instance_moved.ex new file mode 100644 index 0000000000..400148e2a8 --- /dev/null +++ b/lib/trento/domain/sap_system/events/application_instance_moved.ex @@ -0,0 +1,14 @@ +defmodule Trento.Domain.Events.ApplicationInstanceMoved do + @moduledoc """ + This event is emitted when an application instance is moved from a host to another. + """ + + use Trento.Event + + defevent do + field :sap_system_id, Ecto.UUID + field :instance_number, :string + field :old_host_id, Ecto.UUID + field :new_host_id, Ecto.UUID + end +end diff --git a/lib/trento/domain/sap_system/events/database_deregistered.ex b/lib/trento/domain/sap_system/events/database_deregistered.ex new file mode 100644 index 0000000000..b823a18190 --- /dev/null +++ b/lib/trento/domain/sap_system/events/database_deregistered.ex @@ -0,0 +1,12 @@ +defmodule Trento.Domain.Events.DatabaseDeregistered do + @moduledoc """ + This event is emitted once all database instances belonging to a HANA database have been deregistered (decommissioned) from the SAP system. + """ + + use Trento.Event + + defevent do + field :sap_system_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/sap_system/events/database_instance_deregistered.ex b/lib/trento/domain/sap_system/events/database_instance_deregistered.ex new file mode 100644 index 0000000000..ca764920bc --- /dev/null +++ b/lib/trento/domain/sap_system/events/database_instance_deregistered.ex @@ -0,0 +1,14 @@ +defmodule Trento.Domain.Events.DatabaseInstanceDeregistered do + @moduledoc """ + This event is emitted when a database instance is deregistered (decommissioned) from the SAP system. + """ + + use Trento.Event + + defevent do + field :instance_number, :string + field :host_id, Ecto.UUID + field :sap_system_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/sap_system/events/database_restored.ex b/lib/trento/domain/sap_system/events/database_restored.ex new file mode 100644 index 0000000000..e486ab69ef --- /dev/null +++ b/lib/trento/domain/sap_system/events/database_restored.ex @@ -0,0 +1,14 @@ +defmodule Trento.Domain.Events.DatabaseRestored do + @moduledoc """ + This event is emitted when a database is restored. + """ + + use Trento.Event + + require Trento.Domain.Enums.Health, as: Health + + defevent do + field :sap_system_id, Ecto.UUID + field :health, Ecto.Enum, values: Health.values() + end +end diff --git a/lib/trento/domain/sap_system/events/sap_system_deregistered.ex b/lib/trento/domain/sap_system/events/sap_system_deregistered.ex new file mode 100644 index 0000000000..341543700d --- /dev/null +++ b/lib/trento/domain/sap_system/events/sap_system_deregistered.ex @@ -0,0 +1,12 @@ +defmodule Trento.Domain.Events.SapSystemDeregistered do + @moduledoc """ + This event is emitted when a SAP system is deregistered (decommissioned). + """ + + use Trento.Event + + defevent do + field :sap_system_id, Ecto.UUID + field :deregistered_at, :utc_datetime_usec + end +end diff --git a/lib/trento/domain/sap_system/events/sap_system_registered.ex b/lib/trento/domain/sap_system/events/sap_system_registered.ex index 0e4daf48d5..1bb1352ffe 100644 --- a/lib/trento/domain/sap_system/events/sap_system_registered.ex +++ b/lib/trento/domain/sap_system/events/sap_system_registered.ex @@ -5,6 +5,7 @@ defmodule Trento.Domain.Events.SapSystemRegistered do use Trento.Event + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion require Trento.Domain.Enums.Health, as: Health defevent do @@ -13,5 +14,6 @@ defmodule Trento.Domain.Events.SapSystemRegistered do field :tenant, :string field :db_host, :string field :health, Ecto.Enum, values: Health.values() + field :ensa_version, Ecto.Enum, values: EnsaVersion.values(), default: EnsaVersion.no_ensa() end end diff --git a/lib/trento/domain/sap_system/events/sap_system_restored.ex b/lib/trento/domain/sap_system/events/sap_system_restored.ex new file mode 100644 index 0000000000..cae01e3908 --- /dev/null +++ b/lib/trento/domain/sap_system/events/sap_system_restored.ex @@ -0,0 +1,16 @@ +defmodule Trento.Domain.Events.SapSystemRestored do + @moduledoc """ + This event is emitted when a sap system is restored. + """ + + use Trento.Event + + require Trento.Domain.Enums.Health, as: Health + + defevent do + field :sap_system_id, Ecto.UUID + field :tenant, :string + field :db_host, :string + field :health, Ecto.Enum, values: Health.values() + end +end diff --git a/lib/trento/domain/sap_system/events/sap_system_tombstoned.ex b/lib/trento/domain/sap_system/events/sap_system_tombstoned.ex new file mode 100644 index 0000000000..260bbc7752 --- /dev/null +++ b/lib/trento/domain/sap_system/events/sap_system_tombstoned.ex @@ -0,0 +1,11 @@ +defmodule Trento.Domain.Events.SapSystemTombstoned do + @moduledoc """ + This event is emitted when a SAP system is deregistered (decommissioned) + """ + + use Trento.Event + + defevent do + field :sap_system_id, Ecto.UUID + end +end diff --git a/lib/trento/domain/sap_system/events/sap_system_updated.ex b/lib/trento/domain/sap_system/events/sap_system_updated.ex new file mode 100644 index 0000000000..4d3104d79e --- /dev/null +++ b/lib/trento/domain/sap_system/events/sap_system_updated.ex @@ -0,0 +1,14 @@ +defmodule Trento.Domain.Events.SapSystemUpdated do + @moduledoc """ + This event is emitted when some of the fields in the SAP system are updated + """ + + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion + + use Trento.Event + + defevent do + field :sap_system_id, Ecto.UUID + field :ensa_version, Ecto.Enum, values: EnsaVersion.values() + end +end diff --git a/lib/trento/domain/sap_system/lifespan.ex b/lib/trento/domain/sap_system/lifespan.ex index 61d6986244..1ffb39a28f 100644 --- a/lib/trento/domain/sap_system/lifespan.ex +++ b/lib/trento/domain/sap_system/lifespan.ex @@ -16,6 +16,7 @@ defmodule Trento.Domain.SapSystem.Lifespan do This is needed to reset the aggregate version, so the aggregate can start appending events to the new stream. """ def after_event(%SapSystemRollUpRequested{}), do: :stop + def after_event(event), do: DefaultLifespan.after_event(event) def after_command(command), do: DefaultLifespan.after_command(command) diff --git a/lib/trento/domain/sap_system/sap_system.ex b/lib/trento/domain/sap_system/sap_system.ex index 616e4f3aca..7944dda8cc 100644 --- a/lib/trento/domain/sap_system/sap_system.ex +++ b/lib/trento/domain/sap_system/sap_system.ex @@ -5,9 +5,11 @@ defmodule Trento.Domain.SapSystem do **The HANA database is the only supported database type.** In order to have a fully registered SAP system, both the database and application - composing this system must be registered. And each of the two layers might be composed - by multiple instances altogether. This means that a SAP system aggregate state can have - multiple application/database instances. + composing this system must be registered. + The minimum set of application features is ABAP and MESSAGESERVER. Otherwise, a complete SAP system cannot exist. + And each of the two layers might be composed by multiple instances altogether. + This means that a SAP system aggregate state can have multiple application/database instances. + ## SAP instance @@ -28,18 +30,21 @@ defmodule Trento.Domain.SapSystem do That being said, this is the logical order of events in order to register a full system: - 1. A SAP system discovery message with a new database instance is received. At this point, the - registration process starts and the database is registered. + 1. A SAP system discovery message with a new database instance is received. + Database instances with Secondary role in a system replication scenario are discarded. + At this point, the registration process starts and the database is registered. Any application instance discovery message without an associated database is ignored. 2. New database instances/updates coming from already registered database instances are registered/applied. - 3. A SAP system discovery with a new application instance is received, and the database associated to - this application exists, the application instance is registered together with the complete - SAP system. The SAP system is fully registered now. + 3. When a SAP system discovery with a new application instance is received, and the database associated to + this application exists: + - Instances that are not MESSAGESERVER or ABAP will be added without completing a SAP system registration + - To have a fully registered SAP system, a MESSAGESERVER instance and one ABAP instance are required 4. New application instances/updates coming from already registered application instances are registered/applied. Find additional information about the application/database association in `Trento.Domain.Commands.RegisterApplicationInstance`. """ + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion require Trento.Domain.Enums.Health, as: Health alias Commanded.Aggregate.Multi @@ -53,23 +58,34 @@ defmodule Trento.Domain.SapSystem do } alias Trento.Domain.Commands.{ + DeregisterApplicationInstance, + DeregisterDatabaseInstance, RegisterApplicationInstance, RegisterDatabaseInstance, RollUpSapSystem } alias Trento.Domain.Events.{ + ApplicationInstanceDeregistered, ApplicationInstanceHealthChanged, + ApplicationInstanceMoved, ApplicationInstanceRegistered, + DatabaseDeregistered, DatabaseHealthChanged, + DatabaseInstanceDeregistered, DatabaseInstanceHealthChanged, DatabaseInstanceRegistered, DatabaseInstanceSystemReplicationChanged, DatabaseRegistered, + DatabaseRestored, + SapSystemDeregistered, SapSystemHealthChanged, SapSystemRegistered, + SapSystemRestored, SapSystemRolledUp, - SapSystemRollUpRequested + SapSystemRollUpRequested, + SapSystemTombstoned, + SapSystemUpdated } alias Trento.Domain.HealthService @@ -80,15 +96,30 @@ defmodule Trento.Domain.SapSystem do deftype do field :sap_system_id, Ecto.UUID - field :sid, :string + field :sid, :string, default: nil field :health, Ecto.Enum, values: Health.values() + field :ensa_version, Ecto.Enum, values: EnsaVersion.values(), default: EnsaVersion.no_ensa() field :rolling_up, :boolean, default: false + field :deregistered_at, :utc_datetime_usec, default: nil embeds_one :database, Database embeds_one :application, Application end + # Stop everything during the rollup process + def execute(%SapSystem{rolling_up: true}, _), do: {:error, :sap_system_rolling_up} + + def execute( + %SapSystem{sap_system_id: nil}, + %RegisterDatabaseInstance{ + system_replication: "Secondary" + } + ), + do: {:error, :sap_system_not_registered} + # First time that a Database instance is registered, the SAP System starts its registration process. + # Database instances are accepted when the system replication is disabled or when enabled, only if the database + # has a primary role # When an Application is discovered, the SAP System completes the registration process. def execute( %SapSystem{sap_system_id: nil}, @@ -132,62 +163,46 @@ defmodule Trento.Domain.SapSystem do ] end - # Stop everything during the rollup process - def execute(%SapSystem{rolling_up: true}, _), do: {:error, :sap_system_rolling_up} - - # When a RegisterDatabaseInstance command is received by an existing SAP System aggregate, - # the SAP System aggregate registers the Database instance if it is not already registered - # and updates the health when needed. + # Database restore def execute( - %SapSystem{database: %Database{instances: instances}} = sap_system, - %RegisterDatabaseInstance{host_id: host_id, instance_number: instance_number} = command - ) do - instance = get_instance(instances, host_id, instance_number) - - sap_system - |> Multi.new() - |> Multi.execute(fn _ -> - maybe_emit_database_instance_system_replication_changed_event(instance, command) - end) - |> Multi.execute(fn _ -> - maybe_emit_database_instance_health_changed_event(instance, command) - end) - |> Multi.execute(fn _ -> - maybe_emit_database_instance_registered_event(instance, command) - end) - |> Multi.execute(&maybe_emit_database_health_changed_event/1) - |> Multi.execute(&maybe_emit_sap_system_health_changed_event/1) - end + %SapSystem{database: %Database{deregistered_at: deregistered_at}}, + %RegisterDatabaseInstance{ + system_replication: "Secondary" + } + ) + when not is_nil(deregistered_at), + do: {:error, :sap_system_not_registered} - # When an Application is discovered, the SAP System completes the registration process. + # When a deregistered database is present, we add the new database instance + # and restore the database, the conditions are the same as registration def execute( - %SapSystem{application: nil}, - %RegisterApplicationInstance{ + %SapSystem{database: %Database{deregistered_at: deregistered_at}}, + %RegisterDatabaseInstance{ sap_system_id: sap_system_id, sid: sid, + tenant: tenant, + host_id: host_id, instance_number: instance_number, instance_hostname: instance_hostname, - tenant: tenant, - db_host: db_host, features: features, http_port: http_port, https_port: https_port, start_priority: start_priority, - host_id: host_id, + system_replication: system_replication, + system_replication_status: system_replication_status, health: health } - ) do + ) + when not is_nil(deregistered_at) do [ - %SapSystemRegistered{ + %DatabaseRestored{ sap_system_id: sap_system_id, - sid: sid, - tenant: tenant, - db_host: db_host, health: health }, - %ApplicationInstanceRegistered{ + %DatabaseInstanceRegistered{ sap_system_id: sap_system_id, sid: sid, + tenant: tenant, instance_number: instance_number, instance_hostname: instance_hostname, features: features, @@ -195,80 +210,158 @@ defmodule Trento.Domain.SapSystem do https_port: https_port, start_priority: start_priority, host_id: host_id, + system_replication: system_replication, + system_replication_status: system_replication_status, health: health } ] end - # When a RegisterApplicationInstance command is received by an existing SAP System aggregate, - # the SAP System aggregate registers the Application instance if it is not already registered + # When a RegisterDatabaseInstance command is received by an existing SAP System aggregate, + # the SAP System aggregate registers the Database instance if it is not already registered # and updates the health when needed. def execute( - %SapSystem{application: %Application{instances: instances}} = sap_system, - %RegisterApplicationInstance{ - sap_system_id: sap_system_id, - sid: sid, - instance_number: instance_number, - instance_hostname: instance_hostname, - features: features, - http_port: http_port, - https_port: https_port, - start_priority: start_priority, - host_id: host_id, - health: health - } + %SapSystem{database: %Database{instances: instances}} = sap_system, + %RegisterDatabaseInstance{host_id: host_id, instance_number: instance_number} = command ) do - instance = - Enum.find(instances, fn - %Instance{host_id: ^host_id, instance_number: ^instance_number} -> - true + instance = get_instance(instances, host_id, instance_number) - _ -> - false - end) + sap_system + |> Multi.new() + |> Multi.execute(fn _ -> + maybe_emit_database_instance_system_replication_changed_event(instance, command) + end) + |> Multi.execute(fn _ -> + maybe_emit_database_instance_health_changed_event(instance, command) + end) + |> Multi.execute(fn _ -> + maybe_emit_database_instance_registered_event(instance, command) + end) + |> Multi.execute(&maybe_emit_database_health_changed_event/1) + |> Multi.execute(&maybe_emit_sap_system_health_changed_event/1) + end - event = - case instance do - %Instance{health: ^health} -> - nil - - %Instance{host_id: host_id, instance_number: instance_number} -> - %ApplicationInstanceHealthChanged{ - sap_system_id: sap_system_id, - host_id: host_id, - instance_number: instance_number, - health: health - } - - nil -> - %ApplicationInstanceRegistered{ - sap_system_id: sap_system_id, - sid: sid, - instance_number: instance_number, - instance_hostname: instance_hostname, - features: features, - http_port: http_port, - https_port: https_port, - start_priority: start_priority, - host_id: host_id, - health: health - } - end + # Restore sap system + # Same registration rules + def execute( + %SapSystem{deregistered_at: deregistered_at} = sap_system, + %RegisterApplicationInstance{} = instance + ) + when not is_nil(deregistered_at) do + sap_system + |> Multi.new() + |> Multi.execute(fn sap_system -> + maybe_emit_application_instance_registered_or_moved_event( + sap_system, + instance + ) + end) + |> Multi.execute(fn sap_system -> + maybe_emit_sap_system_restored_event(sap_system, instance) + end) + end + # SAP system not registered, application already present + # If the instance is not one of MESSAGESERVER or ABAP we discard. + # Otherwise if the instance we want register together with already present instances + # have one MESSAGESERVER and one ABAP, we register the instance and the SAP system + # OR + # When a RegisterApplicationInstance command is received by an existing SAP System aggregate, + # the SAP System aggregate registers the Application instance if it is not already registered + # and updates the health when needed. + def execute( + %SapSystem{} = sap_system, + %RegisterApplicationInstance{} = instance + ) do sap_system |> Multi.new() - |> Multi.execute(fn _ -> event end) + |> Multi.execute(fn sap_system -> + maybe_emit_application_instance_registered_or_moved_event( + sap_system, + instance + ) + end) + |> Multi.execute(fn sap_system -> + maybe_emit_application_instance_health_changed_event( + sap_system, + instance + ) + end) + |> Multi.execute(fn sap_system -> + maybe_emit_sap_system_registered_or_updated_event(sap_system, instance) + end) |> Multi.execute(&maybe_emit_sap_system_health_changed_event/1) end - # Start the rollup flow def execute( %SapSystem{sap_system_id: nil}, - %RollUpSapSystem{} + _ ) do {:error, :sap_system_not_registered} end + # Deregister a database instance and emit a DatabaseInstanceDeregistered + # also potentially emit SapSystemDeregistered and DatabaseDeregistered events + def execute( + %SapSystem{sap_system_id: sap_system_id} = sap_system, + %DeregisterDatabaseInstance{ + instance_number: instance_number, + host_id: host_id, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + ) do + sap_system + |> Multi.new() + |> Multi.execute(fn _ -> + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: host_id, + instance_number: instance_number, + deregistered_at: deregistered_at + } + end) + |> Multi.execute(fn sap_system -> + maybe_emit_database_deregistered_event(sap_system, deregistered_at) + end) + |> Multi.execute(fn sap_system -> + maybe_emit_sap_system_deregistered_event(sap_system, deregistered_at) + end) + |> Multi.execute(&maybe_emit_sap_system_tombstoned_event/1) + end + + # Deregister an application instance and emit a ApplicationInstanceDeregistered + # also emit SapSystemDeregistered event if this was the last application instance + def execute( + %SapSystem{ + sap_system_id: sap_system_id + } = sap_system, + %DeregisterApplicationInstance{ + instance_number: instance_number, + sap_system_id: sap_system_id, + host_id: host_id, + deregistered_at: deregistered_at + } + ) do + sap_system + |> Multi.new() + |> Multi.execute(fn _ -> + %ApplicationInstanceDeregistered{ + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id, + deregistered_at: deregistered_at + } + end) + |> Multi.execute(fn sap_system -> + maybe_emit_sap_system_deregistered_event( + sap_system, + deregistered_at + ) + end) + |> Multi.execute(&maybe_emit_sap_system_tombstoned_event/1) + end + def execute( %SapSystem{sap_system_id: sap_system_id} = snapshot, %RollUpSapSystem{} @@ -279,6 +372,14 @@ defmodule Trento.Domain.SapSystem do } end + def execute( + %SapSystem{deregistered_at: deregistered_at}, + _ + ) + when not is_nil(deregistered_at) do + {:error, :sap_system_not_registered} + end + def apply( %SapSystem{sap_system_id: nil}, %DatabaseRegistered{ @@ -378,6 +479,15 @@ defmodule Trento.Domain.SapSystem do %SapSystem{sap_system | database: Map.put(database, :instances, instances)} end + def apply(%SapSystem{database: %Database{} = database} = sap_system, %DatabaseHealthChanged{ + health: health + }) do + %SapSystem{ + sap_system + | database: Map.put(database, :health, health) + } + end + def apply( %SapSystem{application: nil} = sap_system, %ApplicationInstanceRegistered{ @@ -434,6 +544,32 @@ defmodule Trento.Domain.SapSystem do } end + def apply( + %SapSystem{application: %Application{instances: instances} = application} = sap_system, + %ApplicationInstanceMoved{ + instance_number: instance_number, + old_host_id: old_host_id, + new_host_id: new_host_id + } + ) do + instances = + Enum.map(instances, fn + %Instance{ + instance_number: ^instance_number, + host_id: ^old_host_id + } = instance -> + %Instance{instance | host_id: new_host_id} + + instance -> + instance + end) + + %SapSystem{ + sap_system + | application: Map.put(application, :instances, instances) + } + end + def apply( %SapSystem{application: %Application{instances: instances} = application} = sap_system, %ApplicationInstanceHealthChanged{ @@ -457,11 +593,16 @@ defmodule Trento.Domain.SapSystem do %SapSystem{sap_system | application: Map.put(application, :instances, instances)} end - def apply(%SapSystem{} = sap_system, %SapSystemRegistered{sid: sid, health: health}) do + def apply(%SapSystem{} = sap_system, %SapSystemRegistered{ + sid: sid, + health: health, + ensa_version: ensa_version + }) do %SapSystem{ sap_system | sid: sid, - health: health + health: health, + ensa_version: ensa_version } end @@ -472,12 +613,12 @@ defmodule Trento.Domain.SapSystem do } end - def apply(%SapSystem{database: %Database{} = database} = sap_system, %DatabaseHealthChanged{ - health: health + def apply(%SapSystem{} = sap_system, %SapSystemUpdated{ + ensa_version: ensa_version }) do %SapSystem{ sap_system - | database: Map.put(database, :health, health) + | ensa_version: ensa_version } end @@ -493,6 +634,140 @@ defmodule Trento.Domain.SapSystem do snapshot end + def apply( + %SapSystem{database: %Database{instances: instances} = database} = sap_system, + %DatabaseInstanceDeregistered{ + instance_number: instance_number, + host_id: host_id + } + ) do + instances = + Enum.reject(instances, fn + %Instance{instance_number: ^instance_number, host_id: ^host_id} -> + true + + _ -> + false + end) + + %SapSystem{ + sap_system + | database: %Database{ + database + | instances: instances + } + } + end + + def apply( + %SapSystem{application: %Application{instances: instances} = application} = sap_system, + %ApplicationInstanceDeregistered{instance_number: instance_number, host_id: host_id} + ) do + instances = + Enum.reject(instances, fn + %Instance{instance_number: ^instance_number, host_id: ^host_id} -> + true + + _ -> + false + end) + + %SapSystem{ + sap_system + | application: %Application{ + application + | instances: instances + } + } + end + + def apply( + %SapSystem{database: database} = sap_system, + %DatabaseDeregistered{deregistered_at: deregistered_at} + ) do + %SapSystem{ + sap_system + | database: Map.put(database, :deregistered_at, deregistered_at) + } + end + + def apply( + %SapSystem{database: database} = sap_system, + %DatabaseRestored{ + health: health + } + ) do + %SapSystem{ + sap_system + | database: %Database{ + database + | health: health, + deregistered_at: nil + } + } + end + + def apply( + %SapSystem{} = sap_system, + %SapSystemDeregistered{ + deregistered_at: deregistered_at + } + ) do + %SapSystem{ + sap_system + | deregistered_at: deregistered_at + } + end + + def apply(%SapSystem{} = sap_system, %SapSystemRestored{ + health: health + }) do + %SapSystem{ + sap_system + | health: health, + deregistered_at: nil + } + end + + def apply(%SapSystem{} = sap_system, %SapSystemTombstoned{}), do: sap_system + + defp maybe_emit_database_instance_registered_event( + nil, + %RegisterDatabaseInstance{ + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant, + instance_number: instance_number, + instance_hostname: instance_hostname, + features: features, + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + system_replication: system_replication, + system_replication_status: system_replication_status, + health: health + } + ) do + %DatabaseInstanceRegistered{ + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant, + instance_number: instance_number, + instance_hostname: instance_hostname, + features: features, + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + system_replication: system_replication, + system_replication_status: system_replication_status, + health: health + } + end + + defp maybe_emit_database_instance_registered_event(_, _), do: nil + defp maybe_emit_database_instance_system_replication_changed_event( %Instance{ system_replication: system_replication, @@ -541,12 +816,30 @@ defmodule Trento.Domain.SapSystem do defp maybe_emit_database_instance_health_changed_event(_, _), do: nil - defp maybe_emit_database_instance_registered_event( - nil, - %RegisterDatabaseInstance{ + # Returns a DatabaseHealthChanged event if the newly computed aggregated health of all the instances + # is different from the previous Database health. + defp maybe_emit_database_health_changed_event(%SapSystem{ + sap_system_id: sap_system_id, + database: %Database{instances: instances, health: health} + }) do + new_health = + instances + |> Enum.map(& &1.health) + |> HealthService.compute_aggregated_health() + + if new_health != health do + %DatabaseHealthChanged{ + sap_system_id: sap_system_id, + health: new_health + } + end + end + + defp maybe_emit_application_instance_registered_or_moved_event( + %SapSystem{application: nil}, + %RegisterApplicationInstance{ sap_system_id: sap_system_id, sid: sid, - tenant: tenant, instance_number: instance_number, instance_hostname: instance_hostname, features: features, @@ -554,15 +847,12 @@ defmodule Trento.Domain.SapSystem do https_port: https_port, start_priority: start_priority, host_id: host_id, - system_replication: system_replication, - system_replication_status: system_replication_status, health: health } ) do - %DatabaseInstanceRegistered{ + %ApplicationInstanceRegistered{ sap_system_id: sap_system_id, sid: sid, - tenant: tenant, instance_number: instance_number, instance_hostname: instance_hostname, features: features, @@ -570,35 +860,150 @@ defmodule Trento.Domain.SapSystem do https_port: https_port, start_priority: start_priority, host_id: host_id, - system_replication: system_replication, - system_replication_status: system_replication_status, health: health } end - defp maybe_emit_database_instance_registered_event(_, _), do: nil + defp maybe_emit_application_instance_registered_or_moved_event( + %SapSystem{application: %Application{instances: instances}}, + %RegisterApplicationInstance{ + sap_system_id: sap_system_id, + sid: sid, + instance_number: instance_number, + instance_hostname: instance_hostname, + features: features, + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + health: health + } + ) do + instance = + Enum.find(instances, fn instance -> instance.instance_number == instance_number end) - # Returns a DatabaseHealthChanged event if the newly computed aggregated health of all the instances - # is different from the previous Database health. - defp maybe_emit_database_health_changed_event(%SapSystem{ - sap_system_id: sap_system_id, - database: %Database{instances: instances, health: health} - }) do - new_health = - instances - |> Enum.map(& &1.health) - |> HealthService.compute_aggregated_health() + cond do + is_nil(instance) -> + %ApplicationInstanceRegistered{ + sap_system_id: sap_system_id, + sid: sid, + instance_number: instance_number, + instance_hostname: instance_hostname, + features: features, + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + health: health + } - if new_health != health do - %DatabaseHealthChanged{ + instance.host_id != host_id -> + %ApplicationInstanceMoved{ + sap_system_id: sap_system_id, + instance_number: instance_number, + old_host_id: instance.host_id, + new_host_id: host_id + } + + true -> + nil + end + end + + defp maybe_emit_application_instance_health_changed_event( + %SapSystem{application: %Application{instances: instances}}, + %RegisterApplicationInstance{ + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id, + health: health + } + ) do + instance = get_instance(instances, host_id, instance_number) + + if instance && instance.health != health do + %ApplicationInstanceHealthChanged{ sap_system_id: sap_system_id, - health: new_health + host_id: host_id, + instance_number: instance_number, + health: health + } + end + end + + defp maybe_emit_sap_system_restored_event( + %SapSystem{application: %Application{instances: instances}}, + %RegisterApplicationInstance{ + sap_system_id: sap_system_id, + tenant: tenant, + db_host: db_host, + health: health + } + ) do + if instances_have_abap?(instances) and instances_have_messageserver?(instances) do + %SapSystemRestored{ + db_host: db_host, + health: health, + sap_system_id: sap_system_id, + tenant: tenant } end end + defp maybe_emit_sap_system_registered_or_updated_event( + %SapSystem{sid: nil, application: %Application{instances: instances}}, + %RegisterApplicationInstance{ + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant, + db_host: db_host, + health: health, + ensa_version: ensa_version + } + ) do + if instances_have_abap?(instances) and instances_have_messageserver?(instances) do + %SapSystemRegistered{ + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant, + db_host: db_host, + health: health, + ensa_version: ensa_version + } + end + end + + # Values didn't update + defp maybe_emit_sap_system_registered_or_updated_event( + %SapSystem{ensa_version: ensa_version}, + %RegisterApplicationInstance{ + ensa_version: ensa_version + } + ), + do: nil + + # Don't update if ensa_version is no_ensa, as this means that the coming app is not + # message or enqueue replicator type + defp maybe_emit_sap_system_registered_or_updated_event( + %SapSystem{}, + %RegisterApplicationInstance{ + ensa_version: :no_ensa + } + ), + do: nil + + defp maybe_emit_sap_system_registered_or_updated_event( + %SapSystem{}, + %RegisterApplicationInstance{ + sap_system_id: sap_system_id, + ensa_version: ensa_version + } + ), + do: %SapSystemUpdated{sap_system_id: sap_system_id, ensa_version: ensa_version} + # Do not emit health changed event as the SAP system is not completely registered yet defp maybe_emit_sap_system_health_changed_event(%SapSystem{application: nil}), do: nil + defp maybe_emit_sap_system_health_changed_event(%SapSystem{sid: nil}), do: nil # Returns a SapSystemHealthChanged event when the aggregated health of the application instances # and database is different from the previous SAP system health. @@ -622,6 +1027,106 @@ defmodule Trento.Domain.SapSystem do end end + defp maybe_emit_sap_system_deregistered_event( + %SapSystem{sid: nil}, + _deregistered_at + ), + do: [] + + defp maybe_emit_sap_system_deregistered_event( + %SapSystem{ + sap_system_id: sap_system_id, + deregistered_at: nil, + database: %Database{deregistered_at: database_deregistered_at} + }, + deregistered_at + ) + when not is_nil(database_deregistered_at) do + %SapSystemDeregistered{sap_system_id: sap_system_id, deregistered_at: deregistered_at} + end + + defp maybe_emit_sap_system_deregistered_event( + %SapSystem{ + sap_system_id: sap_system_id, + deregistered_at: nil, + application: %Application{ + instances: instances + } + }, + deregistered_at + ) do + unless instances_have_abap?(instances) and instances_have_messageserver?(instances) do + %SapSystemDeregistered{sap_system_id: sap_system_id, deregistered_at: deregistered_at} + end + end + + defp maybe_emit_sap_system_deregistered_event(_, _), do: nil + + defp maybe_emit_sap_system_tombstoned_event(%SapSystem{ + sap_system_id: sap_system_id, + application: %Application{ + instances: [] + }, + database: %Database{ + instances: [] + } + }) do + %SapSystemTombstoned{sap_system_id: sap_system_id} + end + + defp maybe_emit_sap_system_tombstoned_event(_), do: nil + + defp maybe_emit_database_deregistered_event( + %SapSystem{ + sap_system_id: sap_system_id, + database: %Database{ + deregistered_at: nil, + instances: [] + } + }, + deregistered_at + ) do + %DatabaseDeregistered{sap_system_id: sap_system_id, deregistered_at: deregistered_at} + end + + defp maybe_emit_database_deregistered_event( + %SapSystem{ + sap_system_id: sap_system_id, + database: %Database{ + instances: instances, + deregistered_at: nil + } + }, + deregistered_at + ) do + has_primary? = + Enum.any?(instances, fn %{system_replication: system_replication} -> + system_replication == "Primary" + end) + + has_secondary? = + Enum.any?(instances, fn %{system_replication: system_replication} -> + system_replication == "Secondary" + end) + + if has_secondary? and !has_primary? do + %DatabaseDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + end + end + + defp maybe_emit_database_deregistered_event(_, _), do: nil + + defp instances_have_abap?(instances) do + Enum.any?(instances, fn %{features: features} -> features =~ "ABAP" end) + end + + def instances_have_messageserver?(instances) do + Enum.any?(instances, fn %{features: features} -> features =~ "MESSAGESERVER" end) + end + defp get_instance(instances, host_id, instance_number) do Enum.find(instances, fn %Instance{host_id: ^host_id, instance_number: ^instance_number} -> diff --git a/lib/trento/domain/value_objects/ascs_ers_cluster_details.ex b/lib/trento/domain/value_objects/ascs_ers_cluster_details.ex new file mode 100644 index 0000000000..ac20f642f2 --- /dev/null +++ b/lib/trento/domain/value_objects/ascs_ers_cluster_details.ex @@ -0,0 +1,26 @@ +defmodule Trento.Domain.AscsErsClusterDetails do + @moduledoc """ + Represents the details of a ASCS/ERS cluster. + """ + + @required_fields [ + :fencing_type, + :sap_systems + ] + + use Trento.Type + + alias Trento.Domain.{ + AscsErsClusterSapSystem, + ClusterResource, + SbdDevice + } + + deftype do + field :fencing_type, :string + + embeds_many :sap_systems, AscsErsClusterSapSystem + embeds_many :stopped_resources, ClusterResource + embeds_many :sbd_devices, SbdDevice + end +end diff --git a/lib/trento/domain/value_objects/ascs_ers_cluster_node.ex b/lib/trento/domain/value_objects/ascs_ers_cluster_node.ex new file mode 100644 index 0000000000..045a446a57 --- /dev/null +++ b/lib/trento/domain/value_objects/ascs_ers_cluster_node.ex @@ -0,0 +1,25 @@ +defmodule Trento.Domain.AscsErsClusterNode do + @moduledoc """ + Represents the node of a ASCS/ERS cluster. + """ + + @required_fields [ + :name + ] + + require Trento.Domain.Enums.AscsErsClusterRole, as: AscsErsClusterRole + + use Trento.Type + + alias Trento.Domain.ClusterResource + + deftype do + field :name, :string + field :roles, {:array, Ecto.Enum}, values: AscsErsClusterRole.values() + field :virtual_ips, {:array, :string} + field :filesystems, {:array, :string} + field :attributes, {:map, :string} + + embeds_many :resources, ClusterResource + end +end diff --git a/lib/trento/domain/value_objects/ascs_ers_cluster_sap_system.ex b/lib/trento/domain/value_objects/ascs_ers_cluster_sap_system.ex new file mode 100644 index 0000000000..e191ba89b3 --- /dev/null +++ b/lib/trento/domain/value_objects/ascs_ers_cluster_sap_system.ex @@ -0,0 +1,24 @@ +defmodule Trento.Domain.AscsErsClusterSapSystem do + @moduledoc """ + Represents ASCS/ERS cluster SAP system. + """ + + @required_fields [ + :sid, + :filesystem_resource_based, + :distributed, + :nodes + ] + + use Trento.Type + + alias Trento.Domain.AscsErsClusterNode + + deftype do + field :sid, :string + field :filesystem_resource_based, :boolean + field :distributed, :boolean + + embeds_many :nodes, AscsErsClusterNode + end +end diff --git a/lib/trento/domain/value_objects/hana_cluster_details.ex b/lib/trento/domain/value_objects/hana_cluster_details.ex index c14e0559ed..3823cc84e9 100644 --- a/lib/trento/domain/value_objects/hana_cluster_details.ex +++ b/lib/trento/domain/value_objects/hana_cluster_details.ex @@ -13,8 +13,8 @@ defmodule Trento.Domain.HanaClusterDetails do use Trento.Type alias Trento.Domain.{ - ClusterNode, ClusterResource, + HanaClusterNode, SbdDevice } @@ -26,7 +26,7 @@ defmodule Trento.Domain.HanaClusterDetails do field :fencing_type, :string embeds_many :stopped_resources, ClusterResource - embeds_many :nodes, ClusterNode + embeds_many :nodes, HanaClusterNode embeds_many :sbd_devices, SbdDevice end end diff --git a/lib/trento/domain/value_objects/cluster_node.ex b/lib/trento/domain/value_objects/hana_cluster_node.ex similarity index 90% rename from lib/trento/domain/value_objects/cluster_node.ex rename to lib/trento/domain/value_objects/hana_cluster_node.ex index 2c20280f5a..afabab50e0 100644 --- a/lib/trento/domain/value_objects/cluster_node.ex +++ b/lib/trento/domain/value_objects/hana_cluster_node.ex @@ -1,4 +1,4 @@ -defmodule Trento.Domain.ClusterNode do +defmodule Trento.Domain.HanaClusterNode do @moduledoc """ Represents the node of a HANA cluster. """ diff --git a/lib/trento/infrastructure/process_managers_supervisor.ex b/lib/trento/infrastructure/process_managers_supervisor.ex new file mode 100644 index 0000000000..af4e9b9b73 --- /dev/null +++ b/lib/trento/infrastructure/process_managers_supervisor.ex @@ -0,0 +1,18 @@ +defmodule Trento.ProcessManagersSupervisor do + @moduledoc false + + use Supervisor + + def start_link(init_arg) do + Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__) + end + + @impl true + def init(_init_arg) do + children = [ + Trento.DeregistrationProcessManager + ] + + Supervisor.init(children, strategy: :one_for_one) + end +end diff --git a/lib/trento/infrastructure/router.ex b/lib/trento/infrastructure/router.ex index 94127474b4..8096c27676 100644 --- a/lib/trento/infrastructure/router.ex +++ b/lib/trento/infrastructure/router.ex @@ -11,10 +11,15 @@ defmodule Trento.Router do alias Trento.Domain.Commands.{ CompleteChecksExecution, + DeregisterApplicationInstance, + DeregisterClusterHost, + DeregisterDatabaseInstance, + DeregisterHost, RegisterApplicationInstance, RegisterClusterHost, RegisterDatabaseInstance, RegisterHost, + RequestHostDeregistration, RollUpCluster, RollUpHost, RollUpSapSystem, @@ -34,8 +39,10 @@ defmodule Trento.Router do UpdateHeartbeat, UpdateProvider, UpdateSlesSubscriptions, + SelectHostChecks, RollUpHost, - SelectHostChecks + RequestHostDeregistration, + DeregisterHost ], to: Host, lifespan: Host.Lifespan @@ -44,6 +51,7 @@ defmodule Trento.Router do by: :cluster_id dispatch [ + DeregisterClusterHost, RollUpCluster, RegisterClusterHost, SelectChecks, @@ -54,7 +62,13 @@ defmodule Trento.Router do identify SapSystem, by: :sap_system_id - dispatch [RegisterApplicationInstance, RegisterDatabaseInstance, RollUpSapSystem], - to: SapSystem, - lifespan: SapSystem.Lifespan + dispatch [ + DeregisterApplicationInstance, + DeregisterDatabaseInstance, + RegisterApplicationInstance, + RegisterDatabaseInstance, + RollUpSapSystem + ], + to: SapSystem, + lifespan: SapSystem.Lifespan end diff --git a/lib/trento/support/type.ex b/lib/trento/support/type.ex index 3b6a17335b..76fc3c8769 100644 --- a/lib/trento/support/type.ex +++ b/lib/trento/support/type.ex @@ -19,6 +19,8 @@ defmodule Trento.Type do import Ecto.Changeset + import PolymorphicEmbed, only: [cast_polymorphic_embed: 3] + @type t() :: %__MODULE__{} @primary_key false @@ -48,9 +50,15 @@ defmodule Trento.Type do changes -> {:error, {:validation, - Ecto.Changeset.traverse_errors( - changes, - fn {msg, _} -> msg end + Map.merge( + Ecto.Changeset.traverse_errors( + changes, + fn {msg, _} -> msg end + ), + PolymorphicEmbed.traverse_errors( + changes, + fn {msg, _} -> msg end + ) )}} end end @@ -66,10 +74,6 @@ defmodule Trento.Type do end end - @dialyzer {:no_match, changeset: 2} - # we need to ignore the no_match warning of the ` {_, Ecto.Embedded, _}` case - # since some spec is broken in the Ecto codebase - @doc """ Casts the fields by using Ecto reflection, validates the required ones and returns a changeset. @@ -80,8 +84,13 @@ defmodule Trento.Type do |> cast(params, fields()) |> validate_required_fields(@required_fields) - Enum.reduce(embedded_fields(), changeset, fn field, changeset -> - cast_and_validate_required_embed(changeset, field, @required_fields) + changeset = + Enum.reduce(embedded_fields(), changeset, fn field, changeset -> + cast_and_validate_required_embed(changeset, field, @required_fields) + end) + + Enum.reduce(polymorphic_fields(), changeset, fn field, changeset -> + cast_and_validate_required_polymorphic_embed(changeset, field, @required_fields) end) end @@ -112,6 +121,15 @@ defmodule Trento.Type do def cast_and_validate_required_embed(changeset, field, required_fields), do: cast_embed(changeset, field, required: field in required_fields) + def cast_and_validate_required_polymorphic_embed(changeset, field, nil), + do: cast_polymorphic_embed(changeset, field, required: false) + + def cast_and_validate_required_polymorphic_embed(changeset, field, :all), + do: cast_polymorphic_embed(changeset, field, required: true) + + def cast_and_validate_required_polymorphic_embed(changeset, field, required_fields), + do: cast_polymorphic_embed(changeset, field, required: field in required_fields) + defp map_results(%{error: errors}), do: {:error, map_errors(errors)} @@ -121,10 +139,25 @@ defmodule Trento.Type do defp map_errors(errors), do: {:validation, Enum.map(errors, fn {:validation, error} -> error end)} - defp fields, do: __MODULE__.__schema__(:fields) -- __MODULE__.__schema__(:embeds) + defp fields do + (__MODULE__.__schema__(:fields) -- embedded_fields()) -- polymorphic_fields() + end defp embedded_fields, do: __MODULE__.__schema__(:embeds) + @dialyzer {:no_match, polymorphic_fields: 0} + # we need to ignore the no_match warning of the `{:parameterized, PolymorphicEmbed, _}` case + # since some spec is broken in the Ecto codebase + + defp polymorphic_fields, + do: + Enum.filter(__MODULE__.__schema__(:fields), fn field -> + case __MODULE__.__schema__(:type, field) do + {:parameterized, PolymorphicEmbed, _} -> true + _ -> false + end + end) + defoverridable new: 1 defoverridable changeset: 2 end diff --git a/lib/trento_web/controllers/fallback_controller.ex b/lib/trento_web/controllers/fallback_controller.ex index 14bf25e4ac..9b2c546408 100644 --- a/lib/trento_web/controllers/fallback_controller.ex +++ b/lib/trento_web/controllers/fallback_controller.ex @@ -25,7 +25,12 @@ defmodule TrentoWeb.FallbackController do end def call(conn, {:error, reason}) - when reason in [:host_not_registered, :cluster_not_registered, :sap_system_not_registered] do + when reason in [ + :host_not_registered, + :cluster_not_registered, + :sap_system_not_registered, + :database_not_registered + ] do conn |> put_status(:not_found) |> put_view(ErrorView) @@ -53,6 +58,15 @@ defmodule TrentoWeb.FallbackController do |> render(:"422", reason: "Unknown discovery type.") end + def call(conn, {:error, :host_alive}) do + conn + |> put_status(:unprocessable_entity) + |> put_view(ErrorView) + |> render(:"422", reason: "Requested operation not allowed for live hosts.") + end + + def call(conn, {:error, [error | _]}), do: call(conn, {:error, error}) + def call(conn, {:error, _}) do conn |> put_status(:internal_server_error) diff --git a/lib/trento_web/controllers/health_controller.ex b/lib/trento_web/controllers/health_controller.ex index c3f16d6f8a..c4af94f642 100644 --- a/lib/trento_web/controllers/health_controller.ex +++ b/lib/trento_web/controllers/health_controller.ex @@ -4,7 +4,7 @@ defmodule TrentoWeb.HealthController do alias Ecto.Adapters.SQL - alias TrentoWeb.OpenApi.Schema.{ + alias TrentoWeb.OpenApi.V1.Schema.{ Health, Ready } diff --git a/lib/trento_web/controllers/page_controller.ex b/lib/trento_web/controllers/page_controller.ex index 87229644f7..61a55b37de 100644 --- a/lib/trento_web/controllers/page_controller.ex +++ b/lib/trento_web/controllers/page_controller.ex @@ -5,9 +5,12 @@ defmodule TrentoWeb.PageController do grafana_public_url = Application.fetch_env!(:trento, :grafana)[:public_url] check_service_base_url = Application.fetch_env!(:trento, :checks_service)[:base_url] + deregistration_debounce = Application.fetch_env!(:trento, :deregistration_debounce) + render(conn, "index.html", grafana_public_url: grafana_public_url, - check_service_base_url: check_service_base_url + check_service_base_url: check_service_base_url, + deregistration_debounce: deregistration_debounce ) end end diff --git a/lib/trento_web/controllers/v1/about_controller.ex b/lib/trento_web/controllers/v1/about_controller.ex index 85d6ee3efd..3d582d2707 100644 --- a/lib/trento_web/controllers/v1/about_controller.ex +++ b/lib/trento_web/controllers/v1/about_controller.ex @@ -4,7 +4,7 @@ defmodule TrentoWeb.V1.AboutController do alias Trento.Hosts - alias TrentoWeb.OpenApi.Schema + alias TrentoWeb.OpenApi.V1.Schema @version Mix.Project.config()[:version] diff --git a/lib/trento_web/controllers/v1/cluster_controller.ex b/lib/trento_web/controllers/v1/cluster_controller.ex index faec6839f1..3d5a2f4597 100644 --- a/lib/trento_web/controllers/v1/cluster_controller.ex +++ b/lib/trento_web/controllers/v1/cluster_controller.ex @@ -4,7 +4,7 @@ defmodule TrentoWeb.V1.ClusterController do alias Trento.Clusters - alias TrentoWeb.OpenApi.Schema + alias TrentoWeb.OpenApi.V1.Schema plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true action_fallback TrentoWeb.FallbackController diff --git a/lib/trento_web/controllers/v1/discovery_controller.ex b/lib/trento_web/controllers/v1/discovery_controller.ex index bbe17c39ab..3e6dd38500 100644 --- a/lib/trento_web/controllers/v1/discovery_controller.ex +++ b/lib/trento_web/controllers/v1/discovery_controller.ex @@ -4,7 +4,7 @@ defmodule TrentoWeb.V1.DiscoveryController do alias Trento.Integration.Discovery - alias TrentoWeb.OpenApi.Schema + alias TrentoWeb.OpenApi.V1.Schema plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true action_fallback TrentoWeb.FallbackController diff --git a/lib/trento_web/controllers/v1/health_overview_controller.ex b/lib/trento_web/controllers/v1/health_overview_controller.ex index 4e16fa730e..7495ea8147 100644 --- a/lib/trento_web/controllers/v1/health_overview_controller.ex +++ b/lib/trento_web/controllers/v1/health_overview_controller.ex @@ -4,7 +4,7 @@ defmodule TrentoWeb.V1.HealthOverviewController do alias Trento.SapSystems.HealthSummaryService - alias TrentoWeb.OpenApi.Schema + alias TrentoWeb.OpenApi.V1.Schema operation(:overview, summary: "Health overview of the discovered SAP Systems", diff --git a/lib/trento_web/controllers/v1/host_controller.ex b/lib/trento_web/controllers/v1/host_controller.ex index 405da2c6e2..df4520d589 100644 --- a/lib/trento_web/controllers/v1/host_controller.ex +++ b/lib/trento_web/controllers/v1/host_controller.ex @@ -2,13 +2,19 @@ defmodule TrentoWeb.V1.HostController do use TrentoWeb, :controller use OpenApiSpex.ControllerSpecs - alias TrentoWeb.OpenApi.Schema - alias Trento.{ Heartbeats, Hosts } + alias TrentoWeb.OpenApi.V1.Schema + + alias TrentoWeb.OpenApi.V1.Schema.{ + BadRequest, + NotFound, + UnprocessableEntity + } + plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true action_fallback TrentoWeb.FallbackController @@ -28,6 +34,30 @@ defmodule TrentoWeb.V1.HostController do render(conn, "hosts.json", hosts: hosts) end + operation :delete, + summary: "Deregister a host", + description: "Deregister a host agent from Trento", + parameters: [ + id: [ + in: :path, + required: true, + type: %OpenApiSpex.Schema{type: :string, format: :uuid} + ] + ], + responses: [ + no_content: "The host has been deregistered", + not_found: NotFound.response(), + unprocessable_entity: UnprocessableEntity.response() + ] + + @spec delete(Plug.Conn.t(), map) :: Plug.Conn.t() + def delete(conn, %{id: host_id}) do + case Hosts.deregister_host(host_id) do + :ok -> send_resp(conn, 204, "") + {:error, error} -> {:error, error} + end + end + operation :heartbeat, summary: "Signal that an agent is alive", tags: ["Agent"], @@ -41,8 +71,8 @@ defmodule TrentoWeb.V1.HostController do ], responses: [ no_content: "The heartbeat has been updated", - not_found: Schema.NotFound.response(), - bad_request: Schema.BadRequest.response(), + not_found: NotFound.response(), + bad_request: BadRequest.response(), unprocessable_entity: OpenApiSpex.JsonErrorResponse.response() ] @@ -66,7 +96,7 @@ defmodule TrentoWeb.V1.HostController do request_body: {"Checks Selection", "application/json", Schema.Checks.ChecksSelectionRequest}, responses: [ accepted: "The Selection has been successfully collected", - not_found: Schema.NotFound.response(), + not_found: NotFound.response(), unprocessable_entity: OpenApiSpex.JsonErrorResponse.response() ] diff --git a/lib/trento_web/controllers/v1/prometheus_controller.ex b/lib/trento_web/controllers/v1/prometheus_controller.ex index 90d453eff9..86e7d39fc2 100644 --- a/lib/trento_web/controllers/v1/prometheus_controller.ex +++ b/lib/trento_web/controllers/v1/prometheus_controller.ex @@ -5,7 +5,7 @@ defmodule TrentoWeb.V1.PrometheusController do alias Trento.Integration.Prometheus - alias TrentoWeb.OpenApi.Schema + alias TrentoWeb.OpenApi.V1.Schema require Logger diff --git a/lib/trento_web/controllers/v1/sap_system_controller.ex b/lib/trento_web/controllers/v1/sap_system_controller.ex index 1f8203c358..1e05fe75fb 100644 --- a/lib/trento_web/controllers/v1/sap_system_controller.ex +++ b/lib/trento_web/controllers/v1/sap_system_controller.ex @@ -3,6 +3,8 @@ defmodule TrentoWeb.V1.SapSystemController do alias Trento.SapSystems + alias TrentoWeb.OpenApi.V1.Schema + use OpenApiSpex.ControllerSpecs tags ["Target Infrastructure"] @@ -13,7 +15,7 @@ defmodule TrentoWeb.V1.SapSystemController do responses: [ ok: {"A collection of the discovered SAP Systems", "application/json", - TrentoWeb.OpenApi.Schema.SAPSystem.SAPSystemsCollection} + Schema.SAPSystem.SAPSystemsCollection} ] def list(conn, _) do @@ -28,7 +30,7 @@ defmodule TrentoWeb.V1.SapSystemController do responses: [ ok: {"A collection of the discovered HANA Databases", "application/json", - TrentoWeb.OpenApi.Schema.Database.DatabasesCollection} + Schema.Database.DatabasesCollection} ] def list_databases(conn, _) do diff --git a/lib/trento_web/controllers/v1/settings_controller.ex b/lib/trento_web/controllers/v1/settings_controller.ex index b0c71e8691..2e349a34c9 100644 --- a/lib/trento_web/controllers/v1/settings_controller.ex +++ b/lib/trento_web/controllers/v1/settings_controller.ex @@ -3,7 +3,7 @@ defmodule TrentoWeb.V1.SettingsController do alias Trento.Installation - alias TrentoWeb.OpenApi.Schema + alias TrentoWeb.OpenApi.V1.Schema use OpenApiSpex.ControllerSpecs diff --git a/lib/trento_web/controllers/v1/tags_controller.ex b/lib/trento_web/controllers/v1/tags_controller.ex index a3e9b55a70..93bfa8c685 100644 --- a/lib/trento_web/controllers/v1/tags_controller.ex +++ b/lib/trento_web/controllers/v1/tags_controller.ex @@ -4,7 +4,7 @@ defmodule TrentoWeb.V1.TagsController do alias Trento.Tags - alias TrentoWeb.OpenApi.Schema + alias TrentoWeb.OpenApi.V1.Schema action_fallback TrentoWeb.FallbackController plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true @@ -69,7 +69,7 @@ defmodule TrentoWeb.V1.TagsController do ], responses: [ no_content: "The tag has been removed from the resource", - bad_request: TrentoWeb.OpenApi.Schema.BadRequest.response(), + bad_request: Schema.BadRequest.response(), unprocessable_entity: OpenApiSpex.JsonErrorResponse.response(), not_found: OpenApiSpex.JsonErrorResponse.response() ] diff --git a/lib/trento_web/controllers/v2/cluster_controller.ex b/lib/trento_web/controllers/v2/cluster_controller.ex new file mode 100644 index 0000000000..a30d64d457 --- /dev/null +++ b/lib/trento_web/controllers/v2/cluster_controller.ex @@ -0,0 +1,27 @@ +defmodule TrentoWeb.V2.ClusterController do + use TrentoWeb, :controller + use OpenApiSpex.ControllerSpecs + + alias Trento.Clusters + + alias TrentoWeb.OpenApi.V2.Schema + + plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true + action_fallback TrentoWeb.FallbackController + + operation :list, + summary: "List Pacemaker Clusters", + tags: ["Target Infrastructure"], + description: "List all the discovered Pacemaker Clusters on the target infrastructure", + responses: [ + ok: + {"A collection of the discovered Pacemaker Clusters", "application/json", + Schema.Cluster.PacemakerClustersCollection} + ] + + def list(conn, _) do + clusters = Clusters.get_all_clusters() + + render(conn, "clusters.json", clusters: clusters) + end +end diff --git a/lib/trento_web/openapi/api_spec.ex b/lib/trento_web/openapi/api_spec.ex index 5f4aed97c3..8f0800b425 100644 --- a/lib/trento_web/openapi/api_spec.ex +++ b/lib/trento_web/openapi/api_spec.ex @@ -1,64 +1,93 @@ defmodule TrentoWeb.OpenApi.ApiSpec do @moduledoc """ OpenApi specification entry point + + `api_version` must be provided to specify the version of this openapi specification + + Example: + use TrentoWeb.OpenApi.ApiSpec, + api_version: "v1" """ - alias OpenApiSpex.{ - Components, - Info, - OpenApi, - Paths, - SecurityScheme, - Server, - Tag - } + defmacro __using__(opts) do + api_version = + Keyword.get(opts, :api_version) || raise ArgumentError, "expected :api_version option" - alias TrentoWeb.{Endpoint, Router} - @behaviour OpenApi + quote do + alias OpenApiSpex.{ + Components, + Info, + OpenApi, + Paths, + SecurityScheme, + Server, + Tag + } - @impl OpenApi - def spec do - OpenApiSpex.resolve_schema_modules(%OpenApi{ - servers: [ - endpoint() - ], - info: %Info{ - title: "Trento", - description: to_string(Application.spec(:trento, :description)), - version: to_string(Application.spec(:trento, :vsn)) - }, - components: %Components{ - securitySchemes: %{"authorization" => %SecurityScheme{type: "http", scheme: "bearer"}} - }, - security: [%{"authorization" => []}], - # Populate the paths from a phoenix router - paths: Paths.from_router(Router), - tags: [ - %Tag{ - name: "Target Infrastructure", - description: "Providing access to the discovered target infrastructure" - }, - %Tag{ - name: "Checks", - description: "Providing Checks related feature" - }, - %Tag{ - name: "Platform", - description: "Providing access to Trento Platform features" - } - ] - }) - end + alias TrentoWeb.{Endpoint, Router} + @behaviour OpenApi + + @impl OpenApi + def spec(router \\ Router) do + OpenApiSpex.resolve_schema_modules(%OpenApi{ + servers: [ + endpoint() + ], + info: %Info{ + title: "Trento", + description: to_string(Application.spec(:trento, :description)), + version: to_string(Application.spec(:trento, :vsn)) + }, + components: %Components{ + securitySchemes: %{"authorization" => %SecurityScheme{type: "http", scheme: "bearer"}} + }, + security: [%{"authorization" => []}], + paths: build_paths_for_version(unquote(api_version), router), + tags: [ + %Tag{ + name: "Target Infrastructure", + description: "Providing access to the discovered target infrastructure" + }, + %Tag{ + name: "Checks", + description: "Providing Checks related feature" + }, + %Tag{ + name: "Platform", + description: "Providing access to Trento Platform features" + } + ] + }) + end + + defp endpoint do + if Process.whereis(Endpoint) do + # Populate the Server info from a phoenix endpoint + Server.from_endpoint(Endpoint) + else + # If the endpoint is not running, use a placeholder + # this happens when generarting openapi.json with --start-app=false + # e.g. mix openapi.spec.json --start-app=false --spec WandaWeb.ApiSpec + %OpenApiSpex.Server{url: "https://demo.trento-project.io"} + end + end + + defp build_paths_for_version(version, router) do + excluded_versions = List.delete(router.available_api_versions(), version) + + router + |> Paths.from_router() + |> Enum.reject(fn {path, _info} -> + current_version = + path + |> String.trim("/") + |> String.split("/") + |> Enum.at(1) - defp endpoint do - if Process.whereis(Endpoint) do - # Populate the Server info from a phoenix endpoint - Server.from_endpoint(Endpoint) - else - # If the endpoint is not running, use a placeholder - # this happens when generarting openapi.json with --start-app=false - # e.g. mix openapi.spec.json --start-app=false --spec WandaWeb.ApiSpec - %OpenApiSpex.Server{url: "https://demo.trento-project.io"} + Enum.member?(excluded_versions, current_version) + end) + |> Map.new() + end end end end diff --git a/lib/trento_web/openapi/v1/api_spec.ex b/lib/trento_web/openapi/v1/api_spec.ex new file mode 100644 index 0000000000..1fbe0a02ff --- /dev/null +++ b/lib/trento_web/openapi/v1/api_spec.ex @@ -0,0 +1,8 @@ +defmodule TrentoWeb.OpenApi.V1.ApiSpec do + @moduledoc """ + OpenApi specification entry point for V1 version + """ + + use TrentoWeb.OpenApi.ApiSpec, + api_version: "v1" +end diff --git a/lib/trento_web/openapi/schema/bad_request.ex b/lib/trento_web/openapi/v1/schema/bad_request.ex similarity index 92% rename from lib/trento_web/openapi/schema/bad_request.ex rename to lib/trento_web/openapi/v1/schema/bad_request.ex index 3f84615390..76bac363b8 100644 --- a/lib/trento_web/openapi/schema/bad_request.ex +++ b/lib/trento_web/openapi/v1/schema/bad_request.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.BadRequest do +defmodule TrentoWeb.OpenApi.V1.Schema.BadRequest do @moduledoc """ Bad Request """ diff --git a/lib/trento_web/openapi/schema/checks.ex b/lib/trento_web/openapi/v1/schema/checks.ex similarity index 84% rename from lib/trento_web/openapi/schema/checks.ex rename to lib/trento_web/openapi/v1/schema/checks.ex index 96f163b547..d83b7d741b 100644 --- a/lib/trento_web/openapi/schema/checks.ex +++ b/lib/trento_web/openapi/v1/schema/checks.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.Checks.ChecksSelectionRequest do +defmodule TrentoWeb.OpenApi.V1.Schema.Checks.ChecksSelectionRequest do @moduledoc false require OpenApiSpex alias OpenApiSpex.Schema diff --git a/lib/trento_web/openapi/schema/checks_catalog.ex b/lib/trento_web/openapi/v1/schema/checks_catalog.ex similarity index 98% rename from lib/trento_web/openapi/schema/checks_catalog.ex rename to lib/trento_web/openapi/v1/schema/checks_catalog.ex index 88eb7e73c1..77934366f9 100644 --- a/lib/trento_web/openapi/schema/checks_catalog.ex +++ b/lib/trento_web/openapi/v1/schema/checks_catalog.ex @@ -1,10 +1,10 @@ -defmodule TrentoWeb.OpenApi.Schema.ChecksCatalog do +defmodule TrentoWeb.OpenApi.V1.Schema.ChecksCatalog do @moduledoc false require OpenApiSpex alias OpenApiSpex.Schema - alias TrentoWeb.OpenApi.Schema.Provider + alias TrentoWeb.OpenApi.V1.Schema.Provider defmodule Check do @moduledoc false diff --git a/lib/trento_web/openapi/schema/cluster.ex b/lib/trento_web/openapi/v1/schema/cluster.ex similarity index 81% rename from lib/trento_web/openapi/schema/cluster.ex rename to lib/trento_web/openapi/v1/schema/cluster.ex index b5da212cab..45d9a17993 100644 --- a/lib/trento_web/openapi/schema/cluster.ex +++ b/lib/trento_web/openapi/v1/schema/cluster.ex @@ -1,12 +1,11 @@ -defmodule TrentoWeb.OpenApi.Schema.Cluster do +defmodule TrentoWeb.OpenApi.V1.Schema.Cluster do @moduledoc false require OpenApiSpex - require Trento.Domain.Enums.ClusterType, as: ClusterType alias OpenApiSpex.Schema - alias TrentoWeb.OpenApi.Schema.{Provider, ResourceHealth, Tags} + alias TrentoWeb.OpenApi.V1.Schema.{Provider, ResourceHealth, Tags} defmodule ClusterResource do @moduledoc false @@ -25,26 +24,25 @@ defmodule TrentoWeb.OpenApi.Schema.Cluster do }) end - defmodule ClusterNode do + defmodule HanaClusterNode do @moduledoc false OpenApiSpex.schema(%{ - title: "ClusterNode", - description: "A Cluster Node", + title: "HanaClusterNode", + description: "A HANA Cluster Node", type: :object, properties: %{ name: %Schema{type: :string}, site: %Schema{type: :string}, hana_status: %Schema{type: :string}, attributes: %Schema{ - title: "ClusterNodeAttributes", - type: :array, - items: %Schema{type: :string} + type: :object, + description: "Node attributes", + additionalProperties: %Schema{type: :string} }, virtual_ip: %Schema{type: :string}, resources: %Schema{ - title: "ClustrNodeResources", - description: "A list of ClusterNodes", + description: "A list of Cluster resources", type: :array, items: ClusterResource } @@ -57,7 +55,7 @@ defmodule TrentoWeb.OpenApi.Schema.Cluster do OpenApiSpex.schema(%{ title: "SbdDevice", - description: "Ad Sbd Device", + description: "SBD Device", type: :object, properties: %{ device: %Schema{type: :string}, @@ -83,22 +81,20 @@ defmodule TrentoWeb.OpenApi.Schema.Cluster do sr_health_state: %Schema{type: :string, description: "SR health state"}, fencing_type: %Schema{type: :string, description: "Fencing Type"}, stopped_resources: %Schema{ - title: "ClusterResource", description: "A list of the stopped resources on this HANA Cluster", type: :array, items: ClusterResource }, nodes: %Schema{ - title: "HanaClusterNodes", type: :array, - items: ClusterNode + items: HanaClusterNode }, sbd_devices: %Schema{ - title: "SbdDevice", type: :array, items: SbdDevice } - } + }, + required: [:nodes] }) end @@ -106,7 +102,7 @@ defmodule TrentoWeb.OpenApi.Schema.Cluster do @moduledoc false OpenApiSpex.schema(%{ - title: "PacemakerClusterDetail", + title: "PacemakerClusterDetails", description: "Details of the detected PacemakerCluster", nullable: true, oneOf: [ @@ -126,15 +122,20 @@ defmodule TrentoWeb.OpenApi.Schema.Cluster do id: %Schema{type: :string, description: "Cluster ID", format: :uuid}, name: %Schema{type: :string, description: "Cluster name"}, sid: %Schema{type: :string, description: "SID"}, + additional_sids: %Schema{ + type: :array, + items: %Schema{type: :string}, + description: "Additionally discovered SIDs, such as ASCS/ERS cluster SIDs" + }, provider: Provider.SupportedProviders, type: %Schema{ type: :string, description: "Detected type of the cluster", - enum: ClusterType.values() + enum: [:hana_scale_up, :hana_scale_out, :unknown] }, selected_checks: %Schema{ title: "SelectedChecks", - description: "A list ids of the checks selected for execution on this cluster", + description: "A list of check ids selected for an execution on this cluster", type: :array, items: %Schema{type: :string} }, diff --git a/lib/trento_web/openapi/schema/database.ex b/lib/trento_web/openapi/v1/schema/database.ex similarity index 95% rename from lib/trento_web/openapi/schema/database.ex rename to lib/trento_web/openapi/v1/schema/database.ex index 56b91c5170..62dde505a0 100644 --- a/lib/trento_web/openapi/schema/database.ex +++ b/lib/trento_web/openapi/v1/schema/database.ex @@ -1,10 +1,10 @@ -defmodule TrentoWeb.OpenApi.Schema.Database do +defmodule TrentoWeb.OpenApi.V1.Schema.Database do @moduledoc false require OpenApiSpex alias OpenApiSpex.Schema - alias TrentoWeb.OpenApi.Schema.{ResourceHealth, Tags} + alias TrentoWeb.OpenApi.V1.Schema.{ResourceHealth, Tags} defmodule DatabaseInstance do @moduledoc false diff --git a/lib/trento_web/openapi/schema/discovery_event.ex b/lib/trento_web/openapi/v1/schema/discovery_event.ex similarity index 89% rename from lib/trento_web/openapi/schema/discovery_event.ex rename to lib/trento_web/openapi/v1/schema/discovery_event.ex index 64567eb41f..2ff910ac2a 100644 --- a/lib/trento_web/openapi/schema/discovery_event.ex +++ b/lib/trento_web/openapi/v1/schema/discovery_event.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.DiscoveryEvent do +defmodule TrentoWeb.OpenApi.V1.Schema.DiscoveryEvent do @moduledoc false alias OpenApiSpex.Schema diff --git a/lib/trento_web/openapi/schema/health.ex b/lib/trento_web/openapi/v1/schema/health.ex similarity index 91% rename from lib/trento_web/openapi/schema/health.ex rename to lib/trento_web/openapi/v1/schema/health.ex index 131a737fb6..a2ebc3012d 100644 --- a/lib/trento_web/openapi/schema/health.ex +++ b/lib/trento_web/openapi/v1/schema/health.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.Health do +defmodule TrentoWeb.OpenApi.V1.Schema.Health do @moduledoc """ Healthcheck """ diff --git a/lib/trento_web/openapi/schema/host.ex b/lib/trento_web/openapi/v1/schema/host.ex similarity index 76% rename from lib/trento_web/openapi/schema/host.ex rename to lib/trento_web/openapi/v1/schema/host.ex index 252d1d293b..b800d6f6c4 100644 --- a/lib/trento_web/openapi/schema/host.ex +++ b/lib/trento_web/openapi/v1/schema/host.ex @@ -1,10 +1,10 @@ -defmodule TrentoWeb.OpenApi.Schema.Host do +defmodule TrentoWeb.OpenApi.V1.Schema.Host do @moduledoc false require OpenApiSpex alias OpenApiSpex.Schema - alias TrentoWeb.OpenApi.Schema.{Provider, SlesSubscription, Tags} + alias TrentoWeb.OpenApi.V1.Schema.{Provider, SlesSubscription, Tags} defmodule IPv4 do @moduledoc false @@ -69,6 +69,20 @@ defmodule TrentoWeb.OpenApi.Schema.Host do description: "A list of the available SLES Subscriptions on a host", type: :array, items: SlesSubscription + }, + deregistered_at: %Schema{ + title: "DeregisteredAt", + description: "Timestamp of the last deregistration of the host", + type: :string, + nullable: true, + format: :"date-time" + }, + last_heartbeat_timestamp: %Schema{ + title: "LastHeartbeatTimestamp", + description: "Timestamp of the last heartbeat received from the host", + type: :string, + nullable: true, + format: :"date-time" } } }) diff --git a/lib/trento_web/openapi/schema/http_std.ex b/lib/trento_web/openapi/v1/schema/http_std.ex similarity index 96% rename from lib/trento_web/openapi/schema/http_std.ex rename to lib/trento_web/openapi/v1/schema/http_std.ex index f0e6cde005..241c69cb10 100644 --- a/lib/trento_web/openapi/schema/http_std.ex +++ b/lib/trento_web/openapi/v1/schema/http_std.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.HttpStd do +defmodule TrentoWeb.OpenApi.V1.Schema.HttpStd do @moduledoc false require OpenApiSpex diff --git a/lib/trento_web/openapi/schema/not_found.ex b/lib/trento_web/openapi/v1/schema/not_found.ex similarity index 92% rename from lib/trento_web/openapi/schema/not_found.ex rename to lib/trento_web/openapi/v1/schema/not_found.ex index 515b0deb38..536b2d4e3d 100644 --- a/lib/trento_web/openapi/schema/not_found.ex +++ b/lib/trento_web/openapi/v1/schema/not_found.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.NotFound do +defmodule TrentoWeb.OpenApi.V1.Schema.NotFound do @moduledoc """ 404 - Not Found """ diff --git a/lib/trento_web/openapi/schema/platform.ex b/lib/trento_web/openapi/v1/schema/platform.ex similarity index 96% rename from lib/trento_web/openapi/schema/platform.ex rename to lib/trento_web/openapi/v1/schema/platform.ex index c55122e376..d4afe8c1e2 100644 --- a/lib/trento_web/openapi/schema/platform.ex +++ b/lib/trento_web/openapi/v1/schema/platform.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.Platform do +defmodule TrentoWeb.OpenApi.V1.Schema.Platform do @moduledoc false require OpenApiSpex diff --git a/lib/trento_web/openapi/schema/prometheus.ex b/lib/trento_web/openapi/v1/schema/prometheus.ex similarity index 90% rename from lib/trento_web/openapi/schema/prometheus.ex rename to lib/trento_web/openapi/v1/schema/prometheus.ex index 5e349d3348..cbebbfa06c 100644 --- a/lib/trento_web/openapi/schema/prometheus.ex +++ b/lib/trento_web/openapi/v1/schema/prometheus.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.Prometheus do +defmodule TrentoWeb.OpenApi.V1.Schema.Prometheus do @moduledoc false require OpenApiSpex diff --git a/lib/trento_web/openapi/schema/provider.ex b/lib/trento_web/openapi/v1/schema/provider.ex similarity index 98% rename from lib/trento_web/openapi/schema/provider.ex rename to lib/trento_web/openapi/v1/schema/provider.ex index 9d7a2dffb0..ff95d47389 100644 --- a/lib/trento_web/openapi/schema/provider.ex +++ b/lib/trento_web/openapi/v1/schema/provider.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.Provider do +defmodule TrentoWeb.OpenApi.V1.Schema.Provider do @moduledoc false require OpenApiSpex diff --git a/lib/trento_web/openapi/schema/ready.ex b/lib/trento_web/openapi/v1/schema/ready.ex similarity index 90% rename from lib/trento_web/openapi/schema/ready.ex rename to lib/trento_web/openapi/v1/schema/ready.ex index c8424d45f7..b8e64f08de 100644 --- a/lib/trento_web/openapi/schema/ready.ex +++ b/lib/trento_web/openapi/v1/schema/ready.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.Ready do +defmodule TrentoWeb.OpenApi.V1.Schema.Ready do @moduledoc """ Ready """ diff --git a/lib/trento_web/openapi/schema/resource_health.ex b/lib/trento_web/openapi/v1/schema/resource_health.ex similarity index 82% rename from lib/trento_web/openapi/schema/resource_health.ex rename to lib/trento_web/openapi/v1/schema/resource_health.ex index 79a3e3d299..820b072893 100644 --- a/lib/trento_web/openapi/schema/resource_health.ex +++ b/lib/trento_web/openapi/v1/schema/resource_health.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.ResourceHealth do +defmodule TrentoWeb.OpenApi.V1.Schema.ResourceHealth do @moduledoc false require OpenApiSpex diff --git a/lib/trento_web/openapi/schema/sap_system.ex b/lib/trento_web/openapi/v1/schema/sap_system.ex similarity index 73% rename from lib/trento_web/openapi/schema/sap_system.ex rename to lib/trento_web/openapi/v1/schema/sap_system.ex index eaa99b6479..26f76e79e2 100644 --- a/lib/trento_web/openapi/schema/sap_system.ex +++ b/lib/trento_web/openapi/v1/schema/sap_system.ex @@ -1,10 +1,12 @@ -defmodule TrentoWeb.OpenApi.Schema.SAPSystem do +defmodule TrentoWeb.OpenApi.V1.Schema.SAPSystem do @moduledoc false require OpenApiSpex alias OpenApiSpex.Schema - alias TrentoWeb.OpenApi.Schema.{Database, ResourceHealth, Tags} + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion + + alias TrentoWeb.OpenApi.V1.Schema.{Database, ResourceHealth, Tags} defmodule ApplicationInstance do @moduledoc false @@ -53,6 +55,11 @@ defmodule TrentoWeb.OpenApi.Schema.SAPSystem do tenant: %Schema{type: :string, description: "Tenant"}, db_host: %Schema{type: :string, description: "Address of the connected Database"}, health: ResourceHealth, + ensa_version: %Schema{ + type: :string, + enum: EnsaVersion.values(), + description: "ENSA version of the SAP system" + }, application_instances: %Schema{ title: "ApplicationInstances", description: "A list of the discovered Application Instances for current SAP Systems", @@ -86,10 +93,35 @@ defmodule TrentoWeb.OpenApi.Schema.SAPSystem do properties: %{ id: %Schema{type: :string, description: "SAP System ID", format: :uuid}, sid: %Schema{type: :string, description: "SID"}, + cluster_id: %Schema{ + type: :string, + description: "Cluster ID", + format: :uuid, + deprecated: true + }, + application_cluster_id: %Schema{ + type: :string, + description: "Application cluster ID", + format: :uuid + }, + database_cluster_id: %Schema{ + type: :string, + description: "Database cluster ID", + format: :uuid + }, + database_id: %Schema{type: :string, description: "Database ID", format: :uuid}, sapsystem_health: ResourceHealth, database_health: ResourceHealth, hosts_health: ResourceHealth, - clusters_health: ResourceHealth + clusters_heatlh: %Schema{ + allOf: [ + ResourceHealth, + %Schema{deprecated: true} + ] + }, + application_cluster_health: ResourceHealth, + database_cluster_health: ResourceHealth, + tenant: %Schema{type: :string, description: "Tenant database SID"} } }) end diff --git a/lib/trento_web/openapi/schema/sles_subscription.ex b/lib/trento_web/openapi/v1/schema/sles_subscription.ex similarity index 91% rename from lib/trento_web/openapi/schema/sles_subscription.ex rename to lib/trento_web/openapi/v1/schema/sles_subscription.ex index 18ab77d700..a096ce013c 100644 --- a/lib/trento_web/openapi/schema/sles_subscription.ex +++ b/lib/trento_web/openapi/v1/schema/sles_subscription.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.SlesSubscription do +defmodule TrentoWeb.OpenApi.V1.Schema.SlesSubscription do @moduledoc false require OpenApiSpex diff --git a/lib/trento_web/openapi/schema/tag.ex b/lib/trento_web/openapi/v1/schema/tag.ex similarity index 93% rename from lib/trento_web/openapi/schema/tag.ex rename to lib/trento_web/openapi/v1/schema/tag.ex index 194dac75c6..6f9a63375c 100644 --- a/lib/trento_web/openapi/schema/tag.ex +++ b/lib/trento_web/openapi/v1/schema/tag.ex @@ -1,4 +1,4 @@ -defmodule TrentoWeb.OpenApi.Schema.Tags do +defmodule TrentoWeb.OpenApi.V1.Schema.Tags do @moduledoc false require OpenApiSpex diff --git a/lib/trento_web/openapi/v1/schema/unprocessable_entity.ex b/lib/trento_web/openapi/v1/schema/unprocessable_entity.ex new file mode 100644 index 0000000000..e9cd04121d --- /dev/null +++ b/lib/trento_web/openapi/v1/schema/unprocessable_entity.ex @@ -0,0 +1,35 @@ +defmodule TrentoWeb.OpenApi.V1.Schema.UnprocessableEntity do + @moduledoc """ + 422 - Unprocessable Entity + """ + require OpenApiSpex + + alias OpenApiSpex.Operation + alias OpenApiSpex.Schema + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + errors: %Schema{ + type: :array, + items: %Schema{ + type: :object, + properties: %{ + title: %Schema{type: :string, example: "Invalid value"}, + detail: %Schema{type: :string, example: "null value where string expected"} + }, + required: [:title, :detail] + } + } + }, + required: [:errors] + }) + + def response do + Operation.response( + "Unprocessable Entity", + "application/json", + __MODULE__ + ) + end +end diff --git a/lib/trento_web/openapi/v2/api_spec.ex b/lib/trento_web/openapi/v2/api_spec.ex new file mode 100644 index 0000000000..fd7908b28f --- /dev/null +++ b/lib/trento_web/openapi/v2/api_spec.ex @@ -0,0 +1,8 @@ +defmodule TrentoWeb.OpenApi.V2.ApiSpec do + @moduledoc """ + OpenApi specification entry point for V2 version + """ + + use TrentoWeb.OpenApi.ApiSpec, + api_version: "v2" +end diff --git a/lib/trento_web/openapi/v2/schema/cluster.ex b/lib/trento_web/openapi/v2/schema/cluster.ex new file mode 100644 index 0000000000..ed5b742d38 --- /dev/null +++ b/lib/trento_web/openapi/v2/schema/cluster.ex @@ -0,0 +1,177 @@ +defmodule TrentoWeb.OpenApi.V2.Schema.Cluster do + @moduledoc false + + require OpenApiSpex + require Trento.Domain.Enums.ClusterType, as: ClusterType + require Trento.Domain.Enums.AscsErsClusterRole, as: AscsErsClusterRole + + alias OpenApiSpex.Schema + + alias TrentoWeb.OpenApi.V1.Schema.{Cluster, Provider, ResourceHealth, Tags} + + defmodule AscsErsClusterNode do + @moduledoc false + + OpenApiSpex.schema(%{ + title: "AscsErsClusterNode", + description: "ASCS/ERS Cluster Node", + type: :object, + properties: %{ + attributes: %Schema{ + type: :object, + description: "Node attributes", + additionalProperties: %Schema{type: :string} + }, + filesystems: %Schema{ + type: :array, + items: %Schema{type: :string}, + description: "List of filesystems managed in this node" + }, + name: %Schema{ + type: :string, + description: "Node name" + }, + resources: %Schema{ + type: :array, + items: Cluster.ClusterResource, + description: "A list of Cluster resources" + }, + roles: %Schema{ + type: :array, + items: %Schema{type: :string, enum: AscsErsClusterRole.values()}, + description: "List of roles managed in this node" + }, + virtual_ips: %Schema{ + type: :array, + items: %Schema{type: :string}, + description: "List of virtual IPs managed in this node" + } + } + }) + end + + defmodule AscsErsClusterSAPSystem do + @moduledoc false + + OpenApiSpex.schema(%{ + title: "AscsErsClusterSAPSystem", + description: "SAP system managed by a ASCS/ERS cluster", + type: :object, + properties: %{ + distributed: %Schema{ + type: :boolean, + description: "ASCS and ERS instances are distributed and running in different nodes" + }, + filesystem_resource_based: %Schema{ + type: :boolean, + description: + "ASCS and ERS filesystems are handled by the cluster with the Filesystem resource agent" + }, + nodes: %Schema{ + type: :array, + items: AscsErsClusterNode, + description: "List of ASCS/ERS nodes for this SAP system" + } + } + }) + end + + defmodule AscsErsClusterDetails do + @moduledoc false + + OpenApiSpex.schema(%{ + title: "AscsErsClusterDetails", + description: "Details of a ASCS/ERS Pacemaker Cluster", + type: :object, + properties: %{ + fencing_type: %Schema{ + type: :string, + description: "Fencing type" + }, + sap_systems: %Schema{ + type: :array, + items: AscsErsClusterSAPSystem, + description: "List of managed SAP systems in a single or multi SID cluster" + }, + sbd_devices: %Schema{ + type: :array, + items: Cluster.SbdDevice, + description: "List of SBD devices used in the cluster" + }, + stopped_resources: %Schema{ + type: :array, + items: Cluster.ClusterResource, + description: "List of the stopped resources on this HANA Cluster" + } + }, + required: [:sap_systems] + }) + end + + defmodule Details do + @moduledoc false + + OpenApiSpex.schema(%{ + title: "PacemakerClusterDetails", + description: "Details of the detected PacemakerCluster", + nullable: true, + oneOf: [ + AscsErsClusterDetails, + Cluster.HanaClusterDetails + ] + }) + end + + defmodule PacemakerCluster do + @moduledoc false + + OpenApiSpex.schema(%{ + title: "PacemakerCluster", + description: "A discovered Pacemaker Cluster on the target infrastructure", + type: :object, + properties: %{ + id: %Schema{type: :string, description: "Cluster ID", format: :uuid}, + name: %Schema{type: :string, description: "Cluster name"}, + sid: %Schema{type: :string, description: "SID"}, + additional_sids: %Schema{ + type: :array, + items: %Schema{type: :string}, + description: "Additionally discovered SIDs, such as ASCS/ERS cluster SIDs" + }, + provider: Provider.SupportedProviders, + type: %Schema{ + type: :string, + description: "Detected type of the cluster", + enum: ClusterType.values() + }, + selected_checks: %Schema{ + title: "SelectedChecks", + description: "A list of check ids selected for an execution on this cluster", + type: :array, + items: %Schema{type: :string} + }, + health: ResourceHealth, + resources_number: %Schema{type: :integer, description: "Resource number", nullable: true}, + hosts_number: %Schema{type: :integer, description: "Hosts number", nullable: true}, + cib_last_written: %Schema{ + type: :string, + description: "CIB last written date", + nullable: true + }, + details: Details, + tags: Tags + } + }) + end + + defmodule PacemakerClustersCollection do + @moduledoc false + + OpenApiSpex.schema(%{ + title: "PacemakerClustersCollection", + description: "A list of the discovered Pacemaker Clusters", + type: :array, + items: PacemakerCluster + }) + end +end diff --git a/lib/trento_web/plugs/api_redirector.ex b/lib/trento_web/plugs/api_redirector.ex index 486a05784a..3046af5027 100644 --- a/lib/trento_web/plugs/api_redirector.ex +++ b/lib/trento_web/plugs/api_redirector.ex @@ -1,14 +1,18 @@ defmodule TrentoWeb.Plugs.ApiRedirector do @moduledoc """ This Plug is responsible for redirecting api requests without a specific version - to the latest version, when the requested path exists + to the latest available version, when the requested path exists For example: - Requesting /api/test, will redirect to /api//test, + only if the /api//test exists, otherwise, it will continue with the next available version. + If the route doesn't match with any of the available versions, it returns a not found error. - router and latest_version options should be provided. + router and available_api_versions options should be provided. - latest_version option should be a string,which will be interpolated with the path. + `available_api_versions` option should be a list with the available version from newest to oldest. + + For example: ["v3", "v2", "v1"] """ @behaviour Plug @@ -19,38 +23,50 @@ defmodule TrentoWeb.Plugs.ApiRedirector do import Plug.Conn @impl true - def init(opts), do: opts + def init(opts) do + available_api_versions = + Keyword.get(opts, :available_api_versions) || + raise ArgumentError, "expected :available_api_versions option" + + if Enum.empty?(available_api_versions), + do: raise(ArgumentError, ":available_api_versions must have 1 element at least") + + Keyword.get(opts, :router) || raise ArgumentError, "expected :router option" + + opts + end @impl true def call(%Plug.Conn{path_info: [_ | path_parts], method: method} = conn, opts) do - latest_version = Keyword.get(opts, :latest_version) router = Keyword.get(opts, :router) + available_api_versions = Keyword.get(opts, :available_api_versions) - unless latest_version do - raise ArgumentError, "expected :latest_version option" - end - - unless router do - raise ArgumentError, "expected :router option" - end + case find_versioned_path(router, available_api_versions, path_parts, method) do + nil -> + conn + |> put_resp_content_type("application/json") + |> resp(:not_found, Jason.encode!(ErrorView.render("404.json", %{detail: "Not found"}))) + |> halt() - redirect_path = build_path(latest_version, path_parts) - - if route_exists?(router, redirect_path, method) do - conn - |> put_status(307) - |> redirect(redirect_path) - |> halt() - else - conn - |> put_resp_content_type("application/json") - |> resp(:not_found, Jason.encode!(ErrorView.render("404.json", %{detail: "Not found"}))) - |> halt() + versioned_path -> + conn + |> put_status(307) + |> redirect(versioned_path) + |> halt() end end - defp build_path(version, path_parts) do - "/api/" <> version <> "/" <> Enum.join(path_parts, "/") + # Find first available versioned path. If none is found nil is returned. + defp find_versioned_path(router, available_api_vesions, path_parts, method) do + available_api_vesions + |> Enum.map(fn version -> + ["/api", version] + |> Enum.concat(path_parts) + |> Enum.join("/") + end) + |> Enum.find_value(nil, fn path -> + if route_exists?(router, path, method), do: path + end) end defp redirect(conn, to) do diff --git a/lib/trento_web/router.ex b/lib/trento_web/router.ex index 209d6285fc..8febe7e437 100644 --- a/lib/trento_web/router.ex +++ b/lib/trento_web/router.ex @@ -2,7 +2,8 @@ defmodule TrentoWeb.Router do use TrentoWeb, :router use Pow.Phoenix.Router - @latest_api_version "v1" + # From newest to oldest + @available_api_versions ["v2", "v1"] pipeline :browser do plug :accepts, ["html"] @@ -15,10 +16,19 @@ defmodule TrentoWeb.Router do pipeline :api do plug :accepts, ["json"] - plug OpenApiSpex.Plug.PutApiSpec, module: TrentoWeb.OpenApi.ApiSpec plug TrentoWeb.Auth.JWTAuthPlug, otp_app: :trento end + pipeline :api_v1 do + plug :api + plug OpenApiSpex.Plug.PutApiSpec, module: TrentoWeb.OpenApi.V1.ApiSpec + end + + pipeline :api_v2 do + plug :api + plug OpenApiSpex.Plug.PutApiSpec, module: TrentoWeb.OpenApi.V2.ApiSpec + end + pipeline :protected_api do plug Unplug, if: {Unplug.Predicates.AppConfigEquals, {:trento, :jwt_authentication_enabled, true}}, @@ -37,8 +47,11 @@ defmodule TrentoWeb.Router do pipe_through :browser get "/api/doc", OpenApiSpex.Plug.SwaggerUI, - path: "/api/openapi", - urls: [%{url: "/api/openapi", name: "Version 1"}] + path: "/api/v1/openapi", + urls: [ + %{url: "/api/v1/openapi", name: "Version 1"}, + %{url: "/api/v2/openapi", name: "Version 2"} + ] end scope "/api", TrentoWeb do @@ -65,6 +78,8 @@ defmodule TrentoWeb.Router do get "/me", TrentoWeb.SessionController, :show, as: :me scope "/v1", TrentoWeb.V1 do + pipe_through [:api_v1] + get "/about", AboutController, :info get "/installation/api-key", InstallationController, :get_api_key @@ -86,6 +101,8 @@ defmodule TrentoWeb.Router do assigns: %{resource_type: :host}, as: :hosts_tagging + delete "/hosts/:id", HostController, :delete + delete "/hosts/:id/tags/:value", TagsController, :remove_tag, as: :hosts_tagging post "/clusters/:id/tags", TagsController, :add_tag, @@ -111,12 +128,20 @@ defmodule TrentoWeb.Router do get "/hosts/:id/exporters_status", PrometheusController, :exporters_status end + + scope "/v2", TrentoWeb.V2 do + pipe_through [:api_v2] + + get "/clusters", ClusterController, :list + end end scope "/api" do pipe_through [:api, :apikey_authenticated] scope "/v1", TrentoWeb.V1 do + pipe_through [:api_v1] + post "/collect", DiscoveryController, :collect post "/hosts/:id/heartbeat", HostController, :heartbeat end @@ -126,20 +151,31 @@ defmodule TrentoWeb.Router do pipe_through :api scope "/v1", TrentoWeb.V1 do + pipe_through [:api_v1] + get "/prometheus/targets", PrometheusController, :targets end end scope "/api" do pipe_through :api - get "/openapi", OpenApiSpex.Plug.RenderSpec, [] + + scope "/v1" do + pipe_through :api_v1 + get "/openapi", OpenApiSpex.Plug.RenderSpec, [] + end + + scope "/v2" do + pipe_through :api_v2 + get "/openapi", OpenApiSpex.Plug.RenderSpec, [] + end end scope "/api" do pipe_through :api match :*, "/*path/", TrentoWeb.Plugs.ApiRedirector, - latest_version: @latest_api_version, + available_api_versions: @available_api_versions, router: __MODULE__ end @@ -186,4 +222,6 @@ defmodule TrentoWeb.Router do get "/", PageController, :index end + + def available_api_versions, do: @available_api_versions end diff --git a/lib/trento_web/templates/page/index.html.heex b/lib/trento_web/templates/page/index.html.heex index d595200888..770db5bca1 100644 --- a/lib/trento_web/templates/page/index.html.heex +++ b/lib/trento_web/templates/page/index.html.heex @@ -3,7 +3,8 @@ diff --git a/lib/trento_web/views/v1/cluster_view.ex b/lib/trento_web/views/v1/cluster_view.ex index ced5875afe..856c0d4890 100644 --- a/lib/trento_web/views/v1/cluster_view.ex +++ b/lib/trento_web/views/v1/cluster_view.ex @@ -9,6 +9,7 @@ defmodule TrentoWeb.V1.ClusterView do cluster |> Map.from_struct() |> Map.delete(:__meta__) + |> adapt_v1() end def render("cluster_registered.json", %{cluster: cluster}) do @@ -22,30 +23,12 @@ defmodule TrentoWeb.V1.ClusterView do |> Map.put(:id, data.cluster_id) end - def render("settings.json", %{settings: settings}) do - render_many(settings, __MODULE__, "setting.json", as: :setting) - end + defp adapt_v1(%{type: type} = cluster) when type in [:hana_scale_up, :hana_scale_out, :unknown], + do: cluster - def render("setting.json", %{ - setting: %{ - host_id: host_id, - hostname: hostname, - user: user, - provider_data: provider_data - } - }) do - %{ - host_id: host_id, - hostname: hostname, - user: user, - default_user: determine_default_connection_user(provider_data) - } + defp adapt_v1(cluster) do + cluster + |> Map.replace(:type, :unknown) + |> Map.replace(:details, nil) end - - defp determine_default_connection_user(%{ - "admin_username" => admin_username - }), - do: admin_username - - defp determine_default_connection_user(_), do: "root" end diff --git a/lib/trento_web/views/v1/health_overview_view.ex b/lib/trento_web/views/v1/health_overview_view.ex index 50e5655896..f824f72820 100644 --- a/lib/trento_web/views/v1/health_overview_view.ex +++ b/lib/trento_web/views/v1/health_overview_view.ex @@ -1,7 +1,10 @@ defmodule TrentoWeb.V1.HealthOverviewView do use TrentoWeb, :view - alias Trento.DatabaseInstanceReadModel + alias Trento.{ + ApplicationInstanceReadModel, + DatabaseInstanceReadModel + } def render("overview.json", %{health_infos: health_infos}) do render_many(health_infos, __MODULE__, "health_summary.json", as: :summary) @@ -12,9 +15,11 @@ defmodule TrentoWeb.V1.HealthOverviewView do id: id, sid: sid, sapsystem_health: sapsystem_health, + application_instances: application_instances, database_instances: database_instances, database_health: database_health, - clusters_health: clusters_health, + application_cluster_health: application_cluster_health, + database_cluster_health: database_cluster_health, hosts_health: hosts_health } }) do @@ -23,27 +28,42 @@ defmodule TrentoWeb.V1.HealthOverviewView do sid: sid, sapsystem_health: sapsystem_health, database_health: database_health, - clusters_health: clusters_health, + # deprecated field + clusters_health: database_cluster_health, + application_cluster_health: application_cluster_health, + database_cluster_health: database_cluster_health, hosts_health: hosts_health, + # deprecated field cluster_id: extract_cluster_id(database_instances), + application_cluster_id: extract_cluster_id(application_instances), + database_cluster_id: extract_cluster_id(database_instances), database_id: extract_database_id(database_instances), tenant: extract_tenant(database_instances) } end - @spec extract_database_id([DatabaseInstanceReadModel.t()]) :: String.t() + @spec extract_database_id([DatabaseInstanceReadModel.t()]) :: String.t() | nil defp extract_database_id([]), do: nil defp extract_database_id([%DatabaseInstanceReadModel{sap_system_id: sap_system_id} | _]), do: sap_system_id - @spec extract_cluster_id([DatabaseInstanceReadModel.t()]) :: String.t() + @spec extract_cluster_id([ApplicationInstanceReadModel.t()] | [DatabaseInstanceReadModel.t()]) :: + String.t() | nil defp extract_cluster_id([]), do: nil defp extract_cluster_id([%DatabaseInstanceReadModel{host: %{cluster_id: cluster_id}} | _]), do: cluster_id - @spec extract_tenant([DatabaseInstanceReadModel.t()]) :: String.t() + defp extract_cluster_id(application_instances) do + Enum.find_value(application_instances, nil, fn + %{host: %{cluster_id: nil}} -> false + %{host: %{cluster_id: cluster_id}} -> cluster_id + _ -> false + end) + end + + @spec extract_tenant([DatabaseInstanceReadModel.t()]) :: String.t() | nil defp extract_tenant([]), do: nil defp extract_tenant([%DatabaseInstanceReadModel{tenant: tenant} | _]), diff --git a/lib/trento_web/views/v1/sap_system_view.ex b/lib/trento_web/views/v1/sap_system_view.ex index 02f702b7ca..9562af7244 100644 --- a/lib/trento_web/views/v1/sap_system_view.ex +++ b/lib/trento_web/views/v1/sap_system_view.ex @@ -121,8 +121,23 @@ defmodule TrentoWeb.V1.SapSystemView do |> Map.delete(:tags) end + def render("sap_system_updated.json", %{id: id, ensa_version: ensa_version}), + do: %{id: id, ensa_version: ensa_version} + def render("sap_system_health_changed.json", %{health: health}), do: health + def render("sap_system_deregistered.json", %{id: id, sid: sid}), do: %{id: id, sid: sid} + + def render("database_deregistered.json", %{id: id, sid: sid}), do: %{id: id, sid: sid} + + def render("instance_deregistered.json", %{ + sap_system_id: id, + instance_number: instance_number, + host_id: host_id, + sid: sid + }), + do: %{sap_system_id: id, instance_number: instance_number, host_id: host_id, sid: sid} + defp add_system_replication_status_to_secondary_instance( %{database_instances: database_instances} = sap_system ) do diff --git a/lib/trento_web/views/v2/cluster_view.ex b/lib/trento_web/views/v2/cluster_view.ex new file mode 100644 index 0000000000..c4361982d4 --- /dev/null +++ b/lib/trento_web/views/v2/cluster_view.ex @@ -0,0 +1,24 @@ +defmodule TrentoWeb.V2.ClusterView do + use TrentoWeb, :view + + def render("clusters.json", %{clusters: clusters}) do + render_many(clusters, __MODULE__, "cluster.json") + end + + def render("cluster.json", %{cluster: cluster}) do + cluster + |> Map.from_struct() + |> Map.delete(:__meta__) + end + + def render("cluster_registered.json", %{cluster: cluster}) do + Map.delete(render("cluster.json", %{cluster: cluster}), :tags) + end + + def render("cluster_details_updated.json", %{data: data}) do + data + |> Map.from_struct() + |> Map.delete(:cluster_id) + |> Map.put(:id, data.cluster_id) + end +end diff --git a/priv/repo/migrations/20230309094053_add_deregistered_at_to_host_read_model.exs b/priv/repo/migrations/20230309094053_add_deregistered_at_to_host_read_model.exs new file mode 100644 index 0000000000..a46850a063 --- /dev/null +++ b/priv/repo/migrations/20230309094053_add_deregistered_at_to_host_read_model.exs @@ -0,0 +1,9 @@ +defmodule Trento.Repo.Migrations.AddDeregisteredAtToHostReadModel do + use Ecto.Migration + + def change do + alter table(:hosts) do + add :deregistered_at, :utc_datetime_usec + end + end +end diff --git a/priv/repo/migrations/20230323161309_add_deregistered_at_to_cluster_read_model.exs b/priv/repo/migrations/20230323161309_add_deregistered_at_to_cluster_read_model.exs new file mode 100644 index 0000000000..f35faf23e4 --- /dev/null +++ b/priv/repo/migrations/20230323161309_add_deregistered_at_to_cluster_read_model.exs @@ -0,0 +1,9 @@ +defmodule Trento.Repo.Migrations.AddDeregisteredAtToClusterReadModel do + use Ecto.Migration + + def change do + alter table(:clusters) do + add :deregistered_at, :utc_datetime_usec + end + end +end diff --git a/priv/repo/migrations/20230505124514_add_deregistered_at_to_sap_system_read_model.exs b/priv/repo/migrations/20230505124514_add_deregistered_at_to_sap_system_read_model.exs new file mode 100644 index 0000000000..b33fb7f5ce --- /dev/null +++ b/priv/repo/migrations/20230505124514_add_deregistered_at_to_sap_system_read_model.exs @@ -0,0 +1,9 @@ +defmodule Trento.Repo.Migrations.AddDeregisteredAtToSapSystemReadModel do + use Ecto.Migration + + def change do + alter table(:sap_systems) do + add :deregistered_at, :utc_datetime_usec + end + end +end diff --git a/priv/repo/migrations/20230509114833_add_additional_sids_to_cluster_read_model.exs b/priv/repo/migrations/20230509114833_add_additional_sids_to_cluster_read_model.exs new file mode 100644 index 0000000000..91678f5a6b --- /dev/null +++ b/priv/repo/migrations/20230509114833_add_additional_sids_to_cluster_read_model.exs @@ -0,0 +1,9 @@ +defmodule Trento.Repo.Migrations.AddAdditionalSidsToClusterReadModel do + use Ecto.Migration + + def change do + alter table(:clusters) do + add :additional_sids, {:array, :string}, default: [] + end + end +end diff --git a/priv/repo/migrations/20230608105745_add_deregistered_at_to_database_read_model.exs b/priv/repo/migrations/20230608105745_add_deregistered_at_to_database_read_model.exs new file mode 100644 index 0000000000..c665eb3d48 --- /dev/null +++ b/priv/repo/migrations/20230608105745_add_deregistered_at_to_database_read_model.exs @@ -0,0 +1,9 @@ +defmodule Trento.Repo.Migrations.AddDeregisteredAtToDatabaseReadModel do + use Ecto.Migration + + def change do + alter table(:databases) do + add :deregistered_at, :utc_datetime_usec + end + end +end diff --git a/priv/repo/migrations/20230613101433_add_ensa_version_to_sap_system_read_model.exs b/priv/repo/migrations/20230613101433_add_ensa_version_to_sap_system_read_model.exs new file mode 100644 index 0000000000..c50f1918d2 --- /dev/null +++ b/priv/repo/migrations/20230613101433_add_ensa_version_to_sap_system_read_model.exs @@ -0,0 +1,9 @@ +defmodule Trento.Repo.Migrations.AddEnsaVersionToSapSystemReadModel do + use Ecto.Migration + + def change do + alter table(:sap_systems) do + add :ensa_version, :string + end + end +end diff --git a/priv/repo/migrations/20230613102033_set_ensa_version_initial_value_to_sap_system_read_model.exs b/priv/repo/migrations/20230613102033_set_ensa_version_initial_value_to_sap_system_read_model.exs new file mode 100644 index 0000000000..0d4297290d --- /dev/null +++ b/priv/repo/migrations/20230613102033_set_ensa_version_initial_value_to_sap_system_read_model.exs @@ -0,0 +1,7 @@ +defmodule Trento.Repo.Migrations.SetEnsaVersionInitialValueToSapSystemReadModel do + use Ecto.Migration + + def change do + execute "UPDATE sap_systems SET ensa_version = 'no_ensa'" + end +end diff --git a/test/e2e/cypress/e2e/clusters_overview.cy.js b/test/e2e/cypress/e2e/clusters_overview.cy.js index 93e9b8c013..88f331e358 100644 --- a/test/e2e/cypress/e2e/clusters_overview.cy.js +++ b/test/e2e/cypress/e2e/clusters_overview.cy.js @@ -1,12 +1,13 @@ import { - allClusterNames, - clusterIdByName, + availableClusters, healthyClusterScenario, unhealthyClusterScenario, } from '../fixtures/clusters-overview/available_clusters'; +const clusterIdByName = (clusterName) => + availableClusters.find(({ name }) => name === clusterName).id; + context('Clusters Overview', () => { - const availableClusters = allClusterNames(); beforeEach(() => { cy.visit('/clusters'); cy.url().should('include', '/clusters'); @@ -21,14 +22,35 @@ context('Clusters Overview', () => { it('should have 1 pages', () => { cy.get('.tn-page-item').its('length').should('eq', 1); }); - describe('Discovered clusternames are the expected ones', () => { - availableClusters.forEach((clusterName) => { - it(`should have a cluster named ${clusterName}`, () => { - cy.get('.tn-clustername').each(($link) => { - const displayedClusterName = $link.text().trim(); - expect(availableClusters).to.include(displayedClusterName); + it('should show the expected clusters data', () => { + cy.get('.container').eq(0).as('clustersTable'); + availableClusters.forEach((cluster, index) => { + cy.get('@clustersTable') + .find('tr') + .eq(index + 1) + .find('td') + .as('clusterRow'); + + cy.get('@clustersTable') + .contains('th', 'Name') + .invoke('index') + .then((i) => { + cy.get('@clusterRow').eq(i).should('contain', cluster.name); + }); + + cy.get('@clustersTable') + .contains('th', 'SID') + .invoke('index') + .then((i) => { + cy.get('@clusterRow').eq(i).should('contain', cluster.sid); + }); + + cy.get('@clustersTable') + .contains('th', 'Type') + .invoke('index') + .then((i) => { + cy.get('@clusterRow').eq(i).should('contain', cluster.type); }); - }); }); }); describe('Unnamed cluster', () => { @@ -116,6 +138,7 @@ context('Clusters Overview', () => { taggingRules.forEach(([pattern, tag]) => { describe(`Add tag '${tag}' to all clusters with '${pattern}' in the cluster name`, () => { availableClusters + .map(({ name }) => name) .filter(clustersByMatchingPattern(pattern)) .forEach((clusterName) => { it(`should tag cluster '${clusterName}'`, () => { diff --git a/test/e2e/cypress/fixtures/clusters-overview/available_clusters.js b/test/e2e/cypress/fixtures/clusters-overview/available_clusters.js index 831c699317..9f223cdc9d 100644 --- a/test/e2e/cypress/fixtures/clusters-overview/available_clusters.js +++ b/test/e2e/cypress/fixtures/clusters-overview/available_clusters.js @@ -1,24 +1,60 @@ -const availableClusters = [ - ['8a66f8fb-5fe9-51b3-a34c-24321271a4e3', 'drbd_cluster'], - ['6bd7ec60-8cb1-5c6b-a892-29e1fd2f8380', 'drbd_cluster'], - ['c7a1e943-bf46-590b-bd26-bfc7c78def97', 'drbd_cluster'], - ['7965f822-0254-5858-abca-f6e8b4c27714', 'hana_cluster_1'], - ['fa0d74a3-9240-5d9e-99fa-61c4137acf81', 'hana_cluster_2'], - ['469e7be5-4e20-5007-b044-c6f540a87493', 'hana_cluster_3'], - ['5284f376-c1f4-5178-8966-d490df3dab4f', 'netweaver_cluster'], - ['fb861bce-d212-56b5-8786-74afd6eb58cb', 'netweaver_cluster'], - ['0eac831a-aa66-5f45-89a4-007fbd2c5714', 'netweaver_cluster'], +export const availableClusters = [ + { + id: '8a66f8fb-5fe9-51b3-a34c-24321271a4e3', + name: 'drbd_cluster', + sid: '', + type: 'Unknown', + }, + { + id: '6bd7ec60-8cb1-5c6b-a892-29e1fd2f8380', + name: 'drbd_cluster', + sid: '', + type: 'Unknown', + }, + { + id: 'c7a1e943-bf46-590b-bd26-bfc7c78def97', + name: 'drbd_cluster', + sid: '', + type: 'Unknown', + }, + { + id: '7965f822-0254-5858-abca-f6e8b4c27714', + name: 'hana_cluster_1', + sid: 'HDD', + type: 'HANA Scale Up', + }, + { + id: 'fa0d74a3-9240-5d9e-99fa-61c4137acf81', + name: 'hana_cluster_2', + sid: 'HDQ', + type: 'HANA Scale Up', + }, + { + id: '469e7be5-4e20-5007-b044-c6f540a87493', + name: 'hana_cluster_3', + sid: 'HDP', + type: 'HANA Scale Up', + }, + { + id: '0eac831a-aa66-5f45-89a4-007fbd2c5714', + name: 'netweaver_cluster', + sid: 'NWP', + type: 'ASCS/ERS', + }, + { + id: '5284f376-c1f4-5178-8966-d490df3dab4f', + name: 'netweaver_cluster', + sid: 'NWD', + type: 'ASCS/ERS', + }, + { + id: 'fb861bce-d212-56b5-8786-74afd6eb58cb', + name: 'netweaver_cluster', + sid: 'NWQ', + type: 'ASCS/ERS', + }, ]; -export const allClusterNames = () => - availableClusters.map(([_, clusterName]) => clusterName); -export const allClusterIds = () => - availableClusters.map(([clusterId, _]) => clusterId); -export const clusterIdByName = (clusterName) => - availableClusters.find(([, name]) => name === clusterName)[0]; -export const clusterNameById = (clusterId) => - availableClusters.find(([id]) => id === clusterId)[1]; - export const healthyClusterScenario = { clusterName: 'hana_cluster_2', checks: ['156F64'], diff --git a/test/fixtures/discovery/ha_cluster_discovery_ascs_ers.json b/test/fixtures/discovery/ha_cluster_discovery_ascs_ers.json new file mode 100644 index 0000000000..87082f91f0 --- /dev/null +++ b/test/fixtures/discovery/ha_cluster_discovery_ascs_ers.json @@ -0,0 +1,726 @@ +{ + "agent_id": "4b30a6af-4b52-5bda-bccb-f2248a12c992", + "discovery_type": "ha_cluster_discovery", + "payload": { + "DC": false, + "Provider": "azure", + "Id": "8bca366a6cb7816555538092a1ddd5aa", + "Cib": { + "Configuration": { + "Nodes": [ + { + "Id": "1", + "Uname": "vmnwprd01", + "InstanceAttributes": null + }, + { + "Id": "2", + "Uname": "vmnwprd02", + "InstanceAttributes": null + } + ], + "CrmConfig": { + "ClusterProperties": [ + { + "Id": "cib-bootstrap-options-have-watchdog", + "Name": "have-watchdog", + "Value": "true" + }, + { + "Id": "cib-bootstrap-options-dc-version", + "Name": "dc-version", + "Value": "2.0.5+20201202.ba59be712-4.13.1-2.0.5+20201202.ba59be712" + }, + { + "Id": "cib-bootstrap-options-cluster-infrastructure", + "Name": "cluster-infrastructure", + "Value": "corosync" + }, + { + "Id": "cib-bootstrap-options-cluster-name", + "Name": "cluster-name", + "Value": "netweaver_cluster" + }, + { + "Id": "cib-bootstrap-options-stonith-enabled", + "Name": "stonith-enabled", + "Value": "true" + }, + { + "Id": "cib-bootstrap-options-stonith-timeout", + "Name": "stonith-timeout", + "Value": "144s" + }, + { + "Id": "cib-bootstrap-options-maintenance-mode", + "Name": "maintenance-mode", + "Value": "false" + } + ] + }, + "Resources": { + "Clones": null, + "Groups": [ + { + "Id": "grp_NWP_ASCS00", + "Primitives": [ + { + "Id": "rsc_ip_NWP_ASCS00", + "Type": "IPaddr2", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_ip_NWP_ASCS00-monitor-10s", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_ip_NWP_ASCS00-instance_attributes-ip", + "Name": "ip", + "Value": "10.80.1.25" + } + ] + }, + { + "Id": "rsc_fs_NWP_ASCS00", + "Type": "Filesystem", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_fs_NWP_ASCS00-start-0", + "Name": "start", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ASCS00-stop-0", + "Name": "stop", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ASCS00-monitor-20s", + "Name": "monitor", + "Role": "", + "Timeout": "40s", + "Interval": "20s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_fs_NWP_ASCS00-instance_attributes-device", + "Name": "device", + "Value": "10.80.1.33:/NWP/ASCS" + }, + { + "Id": "rsc_fs_NWP_ASCS00-instance_attributes-directory", + "Name": "directory", + "Value": "/usr/sap/NWP/ASCS00" + }, + { + "Id": "rsc_fs_NWP_ASCS00-instance_attributes-fstype", + "Name": "fstype", + "Value": "nfs4" + } + ] + }, + { + "Id": "rsc_sap_NWP_ASCS00", + "Type": "SAPInstance", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_sap_NWP_ASCS00-operations-monitor-120", + "Name": "monitor", + "Role": "", + "Timeout": "60", + "Interval": "120" + } + ], + "MetaAttributes": [ + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-resource-stickiness", + "Name": "resource-stickiness", + "Value": "5000" + }, + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-failure-timeout", + "Name": "failure-timeout", + "Value": "60" + }, + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-migration-threshold", + "Name": "migration-threshold", + "Value": "1" + }, + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-priority", + "Name": "priority", + "Value": "10" + } + ], + "InstanceAttributes": [ + { + "Id": "rsc_sap_NWP_ASCS00-instance_attributes-InstanceName", + "Name": "InstanceName", + "Value": "NWP_ASCS00_sapnwpas" + }, + { + "Id": "rsc_sap_NWP_ASCS00-instance_attributes-START_PROFILE", + "Name": "START_PROFILE", + "Value": "/sapmnt/NWP/profile/NWP_ASCS00_sapnwpas" + }, + { + "Id": "rsc_sap_NWP_ASCS00-instance_attributes-AUTOMATIC_RECOVER", + "Name": "AUTOMATIC_RECOVER", + "Value": "false" + } + ] + }, + { + "Id": "rsc_socat_NWP_ASCS00", + "Type": "azure-lb", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_socat_NWP_ASCS00-monitor-10", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_socat_NWP_ASCS00-instance_attributes-port", + "Name": "port", + "Value": "62000" + } + ] + } + ] + }, + { + "Id": "grp_NWP_ERS10", + "Primitives": [ + { + "Id": "rsc_ip_NWP_ERS10", + "Type": "IPaddr2", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_ip_NWP_ERS10-monitor-10s", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_ip_NWP_ERS10-instance_attributes-ip", + "Name": "ip", + "Value": "10.80.1.26" + } + ] + }, + { + "Id": "rsc_fs_NWP_ERS10", + "Type": "Filesystem", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_fs_NWP_ERS10-start-0", + "Name": "start", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ERS10-stop-0", + "Name": "stop", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ERS10-monitor-20s", + "Name": "monitor", + "Role": "", + "Timeout": "40s", + "Interval": "20s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_fs_NWP_ERS10-instance_attributes-device", + "Name": "device", + "Value": "10.80.1.33:/NWP/ERS" + }, + { + "Id": "rsc_fs_NWP_ERS10-instance_attributes-directory", + "Name": "directory", + "Value": "/usr/sap/NWP/ERS10" + }, + { + "Id": "rsc_fs_NWP_ERS10-instance_attributes-fstype", + "Name": "fstype", + "Value": "nfs4" + } + ] + }, + { + "Id": "rsc_sap_NWP_ERS10", + "Type": "SAPInstance", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_sap_NWP_ERS10-operations-monitor-120", + "Name": "monitor", + "Role": "", + "Timeout": "60", + "Interval": "120" + } + ], + "MetaAttributes": [ + { + "Id": "rsc_sap_NWP_ERS10-meta_attributes-priority", + "Name": "priority", + "Value": "1000" + } + ], + "InstanceAttributes": [ + { + "Id": "rsc_sap_NWP_ERS10-instance_attributes-InstanceName", + "Name": "InstanceName", + "Value": "NWP_ERS10_sapnwper" + }, + { + "Id": "rsc_sap_NWP_ERS10-instance_attributes-START_PROFILE", + "Name": "START_PROFILE", + "Value": "/sapmnt/NWP/profile/NWP_ERS10_sapnwper" + }, + { + "Id": "rsc_sap_NWP_ERS10-instance_attributes-AUTOMATIC_RECOVER", + "Name": "AUTOMATIC_RECOVER", + "Value": "false" + }, + { + "Id": "rsc_sap_NWP_ERS10-instance_attributes-IS_ERS", + "Name": "IS_ERS", + "Value": "true" + } + ] + }, + { + "Id": "rsc_socat_NWP_ERS10", + "Type": "azure-lb", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_socat_NWP_ERS10-monitor-10", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_socat_NWP_ERS10-instance_attributes-port", + "Name": "port", + "Value": "62110" + } + ] + } + ] + } + ], + "Masters": null, + "Primitives": [ + { + "Id": "stonith-sbd", + "Type": "external/sbd", + "Class": "stonith", + "Provider": "", + "Operations": null, + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "stonith-sbd-instance_attributes-pcmk_delay_max", + "Name": "pcmk_delay_max", + "Value": "30s" + } + ] + } + ] + }, + "Constraints": { + "RscLocations": [ + { + "Id": "loc_sap_NWP_failover_to_ers", + "Node": "", + "Role": "", + "Score": "", + "Resource": "rsc_sap_NWP_ASCS00" + } + ] + } + } + }, + "SBD": { + "Config": { + "SBD_OPTS": "", + "SBD_DEVICE": "/dev/disk/by-id/scsi-SLIO-ORG_IBLOCK_e34218cd-0d9a-4b21-b6d5-a313980baa82", + "SBD_PACEMAKER": "yes", + "SBD_STARTMODE": "always", + "SBD_DELAY_START": "yes", + "SBD_WATCHDOG_DEV": "/dev/watchdog", + "SBD_TIMEOUT_ACTION": "flush,reboot", + "SBD_WATCHDOG_TIMEOUT": "5", + "SBD_MOVE_TO_ROOT_CGROUP": "auto", + "SBD_SYNC_RESOURCE_STARTUP": "no" + }, + "Devices": [ + { + "Dump": { + "Uuid": "e0c97fe2-f63a-4fd1-83df-9a736a03b49b", + "Slots": 255, + "Header": "2.1", + "SectorSize": 512, + "TimeoutLoop": 1, + "TimeoutMsgwait": 10, + "TimeoutAllocate": 2, + "TimeoutWatchdog": 5 + }, + "List": [ + { + "Id": 0, + "Name": "vmnwprd01", + "Status": "clear" + }, + { + "Id": 1, + "Name": "vmnwprd02", + "Status": "clear" + } + ], + "Device": "/dev/disk/by-id/scsi-SLIO-ORG_IBLOCK_e34218cd-0d9a-4b21-b6d5-a313980baa82", + "Status": "healthy" + } + ] + }, + "Name": "netweaver_cluster", + "Crmmon": { + "Nodes": [ + { + "DC": true, + "Id": "1", + "Name": "vmnwprd01", + "Type": "member", + "Online": true, + "Pending": false, + "Standby": false, + "Unclean": false, + "Shutdown": false, + "ExpectedUp": true, + "Maintenance": false, + "StandbyOnFail": false, + "ResourcesRunning": 5 + }, + { + "DC": false, + "Id": "2", + "Name": "vmnwprd02", + "Type": "member", + "Online": true, + "Pending": false, + "Standby": false, + "Unclean": false, + "Shutdown": false, + "ExpectedUp": true, + "Maintenance": false, + "StandbyOnFail": false, + "ResourcesRunning": 4 + } + ], + "Clones": null, + "Groups": [ + { + "Id": "grp_NWP_ASCS00", + "Resources": [ + { + "Id": "rsc_ip_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:IPaddr2", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_fs_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:Filesystem", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_sap_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:SAPInstance", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_socat_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:azure-lb", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ] + }, + { + "Id": "grp_NWP_ERS10", + "Resources": [ + { + "Id": "rsc_ip_NWP_ERS10", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:IPaddr2", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_fs_NWP_ERS10", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:Filesystem", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_sap_NWP_ERS10", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:SAPInstance", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_socat_NWP_ERS10", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:azure-lb", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ] + } + ], + "Summary": { + "Nodes": { + "Number": 2 + }, + "Resources": { + "Number": 9, + "Blocked": 0, + "Disabled": 0 + }, + "LastChange": { + "Time": "Tue Jan 11 13:43:06 2022" + }, + "ClusterOptions": { + "StonithEnabled": true + } + }, + "Version": "2.0.5", + "Resources": [ + { + "Id": "stonith-sbd", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "stonith:external/sbd", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ], + "NodeHistory": { + "Nodes": [ + { + "Name": "vmnwprd01", + "ResourceHistory": [ + { + "Name": "rsc_ip_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "stonith-sbd", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_socat_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_fs_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_sap_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 1 + } + ] + }, + { + "Name": "vmnwprd02", + "ResourceHistory": [ + { + "Name": "rsc_ip_NWP_ERS10", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_fs_NWP_ERS10", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_sap_NWP_ERS10", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_socat_NWP_ERS10", + "FailCount": 0, + "MigrationThreshold": 3 + } + ] + } + ] + }, + "NodeAttributes": { + "Nodes": [ + { + "Name": "vmnwprd02", + "Attributes": [ + { + "Name": "runs_ers_NWP", + "Value": "1" + } + ] + } + ] + } + } + } +} diff --git a/test/fixtures/discovery/ha_cluster_discovery_ascs_ers_invalid.json b/test/fixtures/discovery/ha_cluster_discovery_ascs_ers_invalid.json new file mode 100644 index 0000000000..a546facdac --- /dev/null +++ b/test/fixtures/discovery/ha_cluster_discovery_ascs_ers_invalid.json @@ -0,0 +1,490 @@ +{ + "agent_id": "4b30a6af-4b52-5bda-bccb-f2248a12c992", + "discovery_type": "ha_cluster_discovery", + "payload": { + "DC": false, + "Provider": "azure", + "Id": "8bca366a6cb7816555538092a1ddd5aa", + "Cib": { + "Configuration": { + "Nodes": [ + { + "Id": "1", + "Uname": "vmnwprd01", + "InstanceAttributes": null + }, + { + "Id": "2", + "Uname": "vmnwprd02", + "InstanceAttributes": null + } + ], + "CrmConfig": { + "ClusterProperties": [ + { + "Id": "cib-bootstrap-options-have-watchdog", + "Name": "have-watchdog", + "Value": "true" + }, + { + "Id": "cib-bootstrap-options-dc-version", + "Name": "dc-version", + "Value": "2.0.5+20201202.ba59be712-4.13.1-2.0.5+20201202.ba59be712" + }, + { + "Id": "cib-bootstrap-options-cluster-infrastructure", + "Name": "cluster-infrastructure", + "Value": "corosync" + }, + { + "Id": "cib-bootstrap-options-cluster-name", + "Name": "cluster-name", + "Value": "netweaver_cluster" + }, + { + "Id": "cib-bootstrap-options-stonith-enabled", + "Name": "stonith-enabled", + "Value": "true" + }, + { + "Id": "cib-bootstrap-options-stonith-timeout", + "Name": "stonith-timeout", + "Value": "144s" + }, + { + "Id": "cib-bootstrap-options-maintenance-mode", + "Name": "maintenance-mode", + "Value": "false" + } + ] + }, + "Resources": { + "Clones": null, + "Groups": [ + { + "Id": "grp_NWP_ASCS00", + "Primitives": [ + { + "Id": "rsc_ip_NWP_ASCS00", + "Type": "IPaddr2", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_ip_NWP_ASCS00-monitor-10s", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_ip_NWP_ASCS00-instance_attributes-ip", + "Name": "ip", + "Value": "10.80.1.25" + } + ] + }, + { + "Id": "rsc_fs_NWP_ASCS00", + "Type": "Filesystem", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_fs_NWP_ASCS00-start-0", + "Name": "start", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ASCS00-stop-0", + "Name": "stop", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ASCS00-monitor-20s", + "Name": "monitor", + "Role": "", + "Timeout": "40s", + "Interval": "20s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_fs_NWP_ASCS00-instance_attributes-device", + "Name": "device", + "Value": "10.80.1.33:/NWP/ASCS" + }, + { + "Id": "rsc_fs_NWP_ASCS00-instance_attributes-directory", + "Name": "directory", + "Value": "/usr/sap/NWP/ASCS00" + }, + { + "Id": "rsc_fs_NWP_ASCS00-instance_attributes-fstype", + "Name": "fstype", + "Value": "nfs4" + } + ] + }, + { + "Id": "rsc_sap_NWP_ASCS00", + "Type": "SAPInstance", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_sap_NWP_ASCS00-operations-monitor-120", + "Name": "monitor", + "Role": "", + "Timeout": "60", + "Interval": "120" + } + ], + "MetaAttributes": [ + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-resource-stickiness", + "Name": "resource-stickiness", + "Value": "5000" + }, + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-failure-timeout", + "Name": "failure-timeout", + "Value": "60" + }, + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-migration-threshold", + "Name": "migration-threshold", + "Value": "1" + }, + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-priority", + "Name": "priority", + "Value": "10" + } + ], + "InstanceAttributes": [ + { + "Id": "rsc_sap_NWP_ASCS00-instance_attributes-InstanceName", + "Name": "InstanceName", + "Value": "NWP_ASCS00_sapnwpas" + }, + { + "Id": "rsc_sap_NWP_ASCS00-instance_attributes-START_PROFILE", + "Name": "START_PROFILE", + "Value": "/sapmnt/NWP/profile/NWP_ASCS00_sapnwpas" + }, + { + "Id": "rsc_sap_NWP_ASCS00-instance_attributes-AUTOMATIC_RECOVER", + "Name": "AUTOMATIC_RECOVER", + "Value": "false" + } + ] + }, + { + "Id": "rsc_socat_NWP_ASCS00", + "Type": "azure-lb", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_socat_NWP_ASCS00-monitor-10", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_socat_NWP_ASCS00-instance_attributes-port", + "Name": "port", + "Value": "62000" + } + ] + } + ] + } + ], + "Masters": null, + "Primitives": [ + { + "Id": "stonith-sbd", + "Type": "external/sbd", + "Class": "stonith", + "Provider": "", + "Operations": null, + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "stonith-sbd-instance_attributes-pcmk_delay_max", + "Name": "pcmk_delay_max", + "Value": "30s" + } + ] + } + ] + }, + "Constraints": { + "RscLocations": [ + { + "Id": "loc_sap_NWP_failover_to_ers", + "Node": "", + "Role": "", + "Score": "", + "Resource": "rsc_sap_NWP_ASCS00" + } + ] + } + } + }, + "SBD": { + "Config": { + "SBD_OPTS": "", + "SBD_DEVICE": "/dev/disk/by-id/scsi-SLIO-ORG_IBLOCK_e34218cd-0d9a-4b21-b6d5-a313980baa82", + "SBD_PACEMAKER": "yes", + "SBD_STARTMODE": "always", + "SBD_DELAY_START": "yes", + "SBD_WATCHDOG_DEV": "/dev/watchdog", + "SBD_TIMEOUT_ACTION": "flush,reboot", + "SBD_WATCHDOG_TIMEOUT": "5", + "SBD_MOVE_TO_ROOT_CGROUP": "auto", + "SBD_SYNC_RESOURCE_STARTUP": "no" + }, + "Devices": [ + { + "Dump": { + "Uuid": "e0c97fe2-f63a-4fd1-83df-9a736a03b49b", + "Slots": 255, + "Header": "2.1", + "SectorSize": 512, + "TimeoutLoop": 1, + "TimeoutMsgwait": 10, + "TimeoutAllocate": 2, + "TimeoutWatchdog": 5 + }, + "List": [ + { + "Id": 0, + "Name": "vmnwprd01", + "Status": "clear" + }, + { + "Id": 1, + "Name": "vmnwprd02", + "Status": "clear" + } + ], + "Device": "/dev/disk/by-id/scsi-SLIO-ORG_IBLOCK_e34218cd-0d9a-4b21-b6d5-a313980baa82", + "Status": "healthy" + } + ] + }, + "Name": "netweaver_cluster", + "Crmmon": { + "Nodes": [ + { + "DC": true, + "Id": "1", + "Name": "vmnwprd01", + "Type": "member", + "Online": true, + "Pending": false, + "Standby": false, + "Unclean": false, + "Shutdown": false, + "ExpectedUp": true, + "Maintenance": false, + "StandbyOnFail": false, + "ResourcesRunning": 5 + }, + { + "DC": false, + "Id": "2", + "Name": "vmnwprd02", + "Type": "member", + "Online": true, + "Pending": false, + "Standby": false, + "Unclean": false, + "Shutdown": false, + "ExpectedUp": true, + "Maintenance": false, + "StandbyOnFail": false, + "ResourcesRunning": 4 + } + ], + "Clones": null, + "Groups": [ + { + "Id": "grp_NWP_ASCS00", + "Resources": [ + { + "Id": "rsc_ip_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:IPaddr2", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_fs_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:Filesystem", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_sap_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:SAPInstance", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_socat_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:azure-lb", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ] + } + ], + "Summary": { + "Nodes": { + "Number": 2 + }, + "Resources": { + "Number": 5, + "Blocked": 0, + "Disabled": 0 + }, + "LastChange": { + "Time": "Tue Jan 11 13:43:06 2022" + }, + "ClusterOptions": { + "StonithEnabled": true + } + }, + "Version": "2.0.5", + "Resources": [ + { + "Id": "stonith-sbd", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "stonith:external/sbd", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ], + "NodeHistory": { + "Nodes": [ + { + "Name": "vmnwprd01", + "ResourceHistory": [ + { + "Name": "rsc_ip_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "stonith-sbd", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_socat_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_fs_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_sap_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 1 + } + ] + }, + { + "Name": "vmnwprd02", + "ResourceHistory": [] + } + ] + }, + "NodeAttributes": { + "Nodes": [ + { + "Name": "vmnwprd02", + "Attributes": [ + { + "Name": "runs_ers_NWP", + "Value": "1" + } + ] + } + ] + } + } + } +} diff --git a/test/fixtures/discovery/ha_cluster_discovery_ascs_ers_multi_sid.json b/test/fixtures/discovery/ha_cluster_discovery_ascs_ers_multi_sid.json new file mode 100644 index 0000000000..19379a75b3 --- /dev/null +++ b/test/fixtures/discovery/ha_cluster_discovery_ascs_ers_multi_sid.json @@ -0,0 +1,1210 @@ +{ + "agent_id": "4b30a6af-4b52-5bda-bccb-f2248a12c992", + "discovery_type": "ha_cluster_discovery", + "payload": { + "DC": false, + "Provider": "azure", + "Id": "8bca366a6cb7816555538092a1ddd5aa", + "Cib": { + "Configuration": { + "Nodes": [ + { + "Id": "1", + "Uname": "vmnwprd01", + "InstanceAttributes": null + }, + { + "Id": "2", + "Uname": "vmnwprd02", + "InstanceAttributes": null + } + ], + "CrmConfig": { + "ClusterProperties": [ + { + "Id": "cib-bootstrap-options-have-watchdog", + "Name": "have-watchdog", + "Value": "true" + }, + { + "Id": "cib-bootstrap-options-dc-version", + "Name": "dc-version", + "Value": "2.0.5+20201202.ba59be712-4.13.1-2.0.5+20201202.ba59be712" + }, + { + "Id": "cib-bootstrap-options-cluster-infrastructure", + "Name": "cluster-infrastructure", + "Value": "corosync" + }, + { + "Id": "cib-bootstrap-options-cluster-name", + "Name": "cluster-name", + "Value": "netweaver_cluster" + }, + { + "Id": "cib-bootstrap-options-stonith-enabled", + "Name": "stonith-enabled", + "Value": "true" + }, + { + "Id": "cib-bootstrap-options-stonith-timeout", + "Name": "stonith-timeout", + "Value": "144s" + }, + { + "Id": "cib-bootstrap-options-maintenance-mode", + "Name": "maintenance-mode", + "Value": "false" + } + ] + }, + "Resources": { + "Clones": null, + "Groups": [ + { + "Id": "grp_NWP_ASCS00", + "Primitives": [ + { + "Id": "rsc_ip_NWP_ASCS00", + "Type": "IPaddr2", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_ip_NWP_ASCS00-monitor-10s", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_ip_NWP_ASCS00-instance_attributes-ip", + "Name": "ip", + "Value": "10.80.1.25" + } + ] + }, + { + "Id": "rsc_fs_NWP_ASCS00", + "Type": "Filesystem", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_fs_NWP_ASCS00-start-0", + "Name": "start", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ASCS00-stop-0", + "Name": "stop", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ASCS00-monitor-20s", + "Name": "monitor", + "Role": "", + "Timeout": "40s", + "Interval": "20s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_fs_NWP_ASCS00-instance_attributes-device", + "Name": "device", + "Value": "10.80.1.33:/NWP/ASCS" + }, + { + "Id": "rsc_fs_NWP_ASCS00-instance_attributes-directory", + "Name": "directory", + "Value": "/usr/sap/NWP/ASCS00" + }, + { + "Id": "rsc_fs_NWP_ASCS00-instance_attributes-fstype", + "Name": "fstype", + "Value": "nfs4" + } + ] + }, + { + "Id": "rsc_sap_NWP_ASCS00", + "Type": "SAPInstance", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_sap_NWP_ASCS00-operations-monitor-120", + "Name": "monitor", + "Role": "", + "Timeout": "60", + "Interval": "120" + } + ], + "MetaAttributes": [ + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-resource-stickiness", + "Name": "resource-stickiness", + "Value": "5000" + }, + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-failure-timeout", + "Name": "failure-timeout", + "Value": "60" + }, + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-migration-threshold", + "Name": "migration-threshold", + "Value": "1" + }, + { + "Id": "rsc_sap_NWP_ASCS00-meta_attributes-priority", + "Name": "priority", + "Value": "10" + } + ], + "InstanceAttributes": [ + { + "Id": "rsc_sap_NWP_ASCS00-instance_attributes-InstanceName", + "Name": "InstanceName", + "Value": "NWP_ASCS00_sapnwpas" + }, + { + "Id": "rsc_sap_NWP_ASCS00-instance_attributes-START_PROFILE", + "Name": "START_PROFILE", + "Value": "/sapmnt/NWP/profile/NWP_ASCS00_sapnwpas" + }, + { + "Id": "rsc_sap_NWP_ASCS00-instance_attributes-AUTOMATIC_RECOVER", + "Name": "AUTOMATIC_RECOVER", + "Value": "false" + } + ] + }, + { + "Id": "rsc_socat_NWP_ASCS00", + "Type": "azure-lb", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_socat_NWP_ASCS00-monitor-10", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_socat_NWP_ASCS00-instance_attributes-port", + "Name": "port", + "Value": "62000" + } + ] + } + ] + }, + { + "Id": "grp_NWP_ERS10", + "Primitives": [ + { + "Id": "rsc_ip_NWP_ERS10", + "Type": "IPaddr2", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_ip_NWP_ERS10-monitor-10s", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_ip_NWP_ERS10-instance_attributes-ip", + "Name": "ip", + "Value": "10.80.1.26" + } + ] + }, + { + "Id": "rsc_fs_NWP_ERS10", + "Type": "Filesystem", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_fs_NWP_ERS10-start-0", + "Name": "start", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ERS10-stop-0", + "Name": "stop", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWP_ERS10-monitor-20s", + "Name": "monitor", + "Role": "", + "Timeout": "40s", + "Interval": "20s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_fs_NWP_ERS10-instance_attributes-device", + "Name": "device", + "Value": "10.80.1.33:/NWP/ERS" + }, + { + "Id": "rsc_fs_NWP_ERS10-instance_attributes-directory", + "Name": "directory", + "Value": "/usr/sap/NWP/ERS10" + }, + { + "Id": "rsc_fs_NWP_ERS10-instance_attributes-fstype", + "Name": "fstype", + "Value": "nfs4" + } + ] + }, + { + "Id": "rsc_sap_NWP_ERS10", + "Type": "SAPInstance", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_sap_NWP_ERS10-operations-monitor-120", + "Name": "monitor", + "Role": "", + "Timeout": "60", + "Interval": "120" + } + ], + "MetaAttributes": [ + { + "Id": "rsc_sap_NWP_ERS10-meta_attributes-priority", + "Name": "priority", + "Value": "1000" + } + ], + "InstanceAttributes": [ + { + "Id": "rsc_sap_NWP_ERS10-instance_attributes-InstanceName", + "Name": "InstanceName", + "Value": "NWP_ERS10_sapnwper" + }, + { + "Id": "rsc_sap_NWP_ERS10-instance_attributes-START_PROFILE", + "Name": "START_PROFILE", + "Value": "/sapmnt/NWP/profile/NWP_ERS10_sapnwper" + }, + { + "Id": "rsc_sap_NWP_ERS10-instance_attributes-AUTOMATIC_RECOVER", + "Name": "AUTOMATIC_RECOVER", + "Value": "false" + }, + { + "Id": "rsc_sap_NWP_ERS10-instance_attributes-IS_ERS", + "Name": "IS_ERS", + "Value": "true" + } + ] + }, + { + "Id": "rsc_socat_NWP_ERS10", + "Type": "azure-lb", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_socat_NWP_ERS10-monitor-10", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_socat_NWP_ERS10-instance_attributes-port", + "Name": "port", + "Value": "62110" + } + ] + } + ] + }, + { + "Id": "grp_NWD_ASCS01", + "Primitives": [ + { + "Id": "rsc_ip_NWD_ASCS01", + "Type": "IPaddr2", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_ip_NWD_ASCS01-monitor-10s", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_ip_NWD_ASCS01-instance_attributes-ip", + "Name": "ip", + "Value": "10.80.2.25" + } + ] + }, + { + "Id": "rsc_fs_NWD_ASCS01", + "Type": "Filesystem", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_fs_NWD_ASCS01-start-0", + "Name": "start", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWD_ASCS01-stop-0", + "Name": "stop", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWD_ASCS01-monitor-20s", + "Name": "monitor", + "Role": "", + "Timeout": "40s", + "Interval": "20s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_fs_NWD_ASCS01-instance_attributes-device", + "Name": "device", + "Value": "10.80.2.33:/NWD/ASCS" + }, + { + "Id": "rsc_fs_NWD_ASCS01-instance_attributes-directory", + "Name": "directory", + "Value": "/usr/sap/NWD/ASCS01" + }, + { + "Id": "rsc_fs_NWD_ASCS01-instance_attributes-fstype", + "Name": "fstype", + "Value": "nfs4" + } + ] + }, + { + "Id": "rsc_sap_NWD_ASCS01", + "Type": "SAPInstance", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_sap_NWD_ASCS01-operations-monitor-120", + "Name": "monitor", + "Role": "", + "Timeout": "60", + "Interval": "120" + } + ], + "MetaAttributes": [ + { + "Id": "rsc_sap_NWD_ASCS01-meta_attributes-resource-stickiness", + "Name": "resource-stickiness", + "Value": "5000" + }, + { + "Id": "rsc_sap_NWD_ASCS01-meta_attributes-failure-timeout", + "Name": "failure-timeout", + "Value": "60" + }, + { + "Id": "rsc_sap_NWD_ASCS01-meta_attributes-migration-threshold", + "Name": "migration-threshold", + "Value": "1" + }, + { + "Id": "rsc_sap_NWD_ASCS01-meta_attributes-priority", + "Name": "priority", + "Value": "10" + } + ], + "InstanceAttributes": [ + { + "Id": "rsc_sap_NWD_ASCS01-instance_attributes-InstanceName", + "Name": "InstanceName", + "Value": "NWD_ASCS01_sapnwpas" + }, + { + "Id": "rsc_sap_NWD_ASCS01-instance_attributes-START_PROFILE", + "Name": "START_PROFILE", + "Value": "/sapmnt/NWD/profile/NWD_ASCS01_sapnwpas" + }, + { + "Id": "rsc_sap_NWD_ASCS01-instance_attributes-AUTOMATIC_RECOVER", + "Name": "AUTOMATIC_RECOVER", + "Value": "false" + } + ] + }, + { + "Id": "rsc_socat_NWD_ASCS01", + "Type": "azure-lb", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_socat_NWD_ASCS01-monitor-10", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_socat_NWD_ASCS01-instance_attributes-port", + "Name": "port", + "Value": "62001" + } + ] + } + ] + }, + { + "Id": "grp_NWD_ERS11", + "Primitives": [ + { + "Id": "rsc_ip_NWD_ERS11", + "Type": "IPaddr2", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_ip_NWD_ERS11-monitor-10s", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_ip_NWD_ERS11-instance_attributes-ip", + "Name": "ip", + "Value": "10.80.2.26" + } + ] + }, + { + "Id": "rsc_fs_NWD_ERS11", + "Type": "Filesystem", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_fs_NWD_ERS11-start-0", + "Name": "start", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWD_ERS11-stop-0", + "Name": "stop", + "Role": "", + "Timeout": "60s", + "Interval": "0" + }, + { + "Id": "rsc_fs_NWD_ERS11-monitor-20s", + "Name": "monitor", + "Role": "", + "Timeout": "40s", + "Interval": "20s" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_fs_NWD_ERS11-instance_attributes-device", + "Name": "device", + "Value": "10.80.2.33:/NWD/ERS" + }, + { + "Id": "rsc_fs_NWD_ERS11-instance_attributes-directory", + "Name": "directory", + "Value": "/usr/sap/NWD/ERS11" + }, + { + "Id": "rsc_fs_NWD_ERS11-instance_attributes-fstype", + "Name": "fstype", + "Value": "nfs4" + } + ] + }, + { + "Id": "rsc_sap_NWD_ERS11", + "Type": "SAPInstance", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_sap_NWD_ERS11-operations-monitor-120", + "Name": "monitor", + "Role": "", + "Timeout": "60", + "Interval": "120" + } + ], + "MetaAttributes": [ + { + "Id": "rsc_sap_NWD_ERS11-meta_attributes-priority", + "Name": "priority", + "Value": "1000" + } + ], + "InstanceAttributes": [ + { + "Id": "rsc_sap_NWD_ERS11-instance_attributes-InstanceName", + "Name": "InstanceName", + "Value": "NWD_ERS11_sapnwper" + }, + { + "Id": "rsc_sap_NWD_ERS11-instance_attributes-START_PROFILE", + "Name": "START_PROFILE", + "Value": "/sapmnt/NWD/profile/NWD_ERS11_sapnwper" + }, + { + "Id": "rsc_sap_NWD_ERS11-instance_attributes-AUTOMATIC_RECOVER", + "Name": "AUTOMATIC_RECOVER", + "Value": "false" + }, + { + "Id": "rsc_sap_NWD_ERS11-instance_attributes-IS_ERS", + "Name": "IS_ERS", + "Value": "true" + } + ] + }, + { + "Id": "rsc_socat_NWD_ERS11", + "Type": "azure-lb", + "Class": "ocf", + "Provider": "heartbeat", + "Operations": [ + { + "Id": "rsc_socat_NWD_ERS11-monitor-10", + "Name": "monitor", + "Role": "", + "Timeout": "20s", + "Interval": "10" + } + ], + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "rsc_socat_NWD_ERS11-instance_attributes-port", + "Name": "port", + "Value": "62111" + } + ] + } + ] + } + ], + "Masters": null, + "Primitives": [ + { + "Id": "stonith-sbd", + "Type": "external/sbd", + "Class": "stonith", + "Provider": "", + "Operations": null, + "MetaAttributes": null, + "InstanceAttributes": [ + { + "Id": "stonith-sbd-instance_attributes-pcmk_delay_max", + "Name": "pcmk_delay_max", + "Value": "30s" + } + ] + } + ] + }, + "Constraints": { + "RscLocations": [ + { + "Id": "loc_sap_NWP_failover_to_ers", + "Node": "", + "Role": "", + "Score": "", + "Resource": "rsc_sap_NWP_ASCS00" + } + ] + } + } + }, + "SBD": { + "Config": { + "SBD_OPTS": "", + "SBD_DEVICE": "/dev/disk/by-id/scsi-SLIO-ORG_IBLOCK_e34218cd-0d9a-4b21-b6d5-a313980baa82", + "SBD_PACEMAKER": "yes", + "SBD_STARTMODE": "always", + "SBD_DELAY_START": "yes", + "SBD_WATCHDOG_DEV": "/dev/watchdog", + "SBD_TIMEOUT_ACTION": "flush,reboot", + "SBD_WATCHDOG_TIMEOUT": "5", + "SBD_MOVE_TO_ROOT_CGROUP": "auto", + "SBD_SYNC_RESOURCE_STARTUP": "no" + }, + "Devices": [ + { + "Dump": { + "Uuid": "e0c97fe2-f63a-4fd1-83df-9a736a03b49b", + "Slots": 255, + "Header": "2.1", + "SectorSize": 512, + "TimeoutLoop": 1, + "TimeoutMsgwait": 10, + "TimeoutAllocate": 2, + "TimeoutWatchdog": 5 + }, + "List": [ + { + "Id": 0, + "Name": "vmnwprd01", + "Status": "clear" + }, + { + "Id": 1, + "Name": "vmnwprd02", + "Status": "clear" + } + ], + "Device": "/dev/disk/by-id/scsi-SLIO-ORG_IBLOCK_e34218cd-0d9a-4b21-b6d5-a313980baa82", + "Status": "healthy" + } + ] + }, + "Name": "netweaver_cluster", + "Crmmon": { + "Nodes": [ + { + "DC": true, + "Id": "1", + "Name": "vmnwprd01", + "Type": "member", + "Online": true, + "Pending": false, + "Standby": false, + "Unclean": false, + "Shutdown": false, + "ExpectedUp": true, + "Maintenance": false, + "StandbyOnFail": false, + "ResourcesRunning": 5 + }, + { + "DC": false, + "Id": "2", + "Name": "vmnwprd02", + "Type": "member", + "Online": true, + "Pending": false, + "Standby": false, + "Unclean": false, + "Shutdown": false, + "ExpectedUp": true, + "Maintenance": false, + "StandbyOnFail": false, + "ResourcesRunning": 4 + } + ], + "Clones": null, + "Groups": [ + { + "Id": "grp_NWP_ASCS00", + "Resources": [ + { + "Id": "rsc_ip_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:IPaddr2", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_fs_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:Filesystem", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_sap_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:SAPInstance", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_socat_NWP_ASCS00", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:azure-lb", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ] + }, + { + "Id": "grp_NWP_ERS10", + "Resources": [ + { + "Id": "rsc_ip_NWP_ERS10", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:IPaddr2", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_fs_NWP_ERS10", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:Filesystem", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_sap_NWP_ERS10", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:SAPInstance", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_socat_NWP_ERS10", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:azure-lb", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ] + }, + { + "Id": "grp_NWD_ASCS01", + "Resources": [ + { + "Id": "rsc_ip_NWD_ASCS01", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:IPaddr2", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_fs_NWD_ASCS01", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:Filesystem", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_sap_NWD_ASCS01", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:SAPInstance", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_socat_NWD_ASCS01", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:azure-lb", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ] + }, + { + "Id": "grp_NWD_ERS11", + "Resources": [ + { + "Id": "rsc_ip_NWD_ERS11", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:IPaddr2", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_fs_NWD_ERS11", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:Filesystem", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_sap_NWD_ERS11", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:SAPInstance", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + }, + { + "Id": "rsc_socat_NWD_ERS11", + "Node": { + "Id": "2", + "Name": "vmnwprd02", + "Cached": true + }, + "Role": "Started", + "Agent": "ocf::heartbeat:azure-lb", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ] + } + ], + "Summary": { + "Nodes": { + "Number": 2 + }, + "Resources": { + "Number": 17, + "Blocked": 0, + "Disabled": 0 + }, + "LastChange": { + "Time": "Tue Jan 11 13:43:06 2022" + }, + "ClusterOptions": { + "StonithEnabled": true + } + }, + "Version": "2.0.5", + "Resources": [ + { + "Id": "stonith-sbd", + "Node": { + "Id": "1", + "Name": "vmnwprd01", + "Cached": true + }, + "Role": "Started", + "Agent": "stonith:external/sbd", + "Active": true, + "Failed": false, + "Blocked": false, + "Managed": true, + "Orphaned": false, + "FailureIgnored": false, + "NodesRunningOn": 1 + } + ], + "NodeHistory": { + "Nodes": [ + { + "Name": "vmnwprd01", + "ResourceHistory": [ + { + "Name": "rsc_ip_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "stonith-sbd", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_socat_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_fs_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_sap_NWP_ASCS00", + "FailCount": 0, + "MigrationThreshold": 1 + }, + { + "Name": "rsc_ip_NWD_ASCS01", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_socat_NWD_ASCS01", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_fs_NWD_ASCS01", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_sap_NWD_ASCS01", + "FailCount": 0, + "MigrationThreshold": 1 + } + ] + }, + { + "Name": "vmnwprd02", + "ResourceHistory": [ + { + "Name": "rsc_ip_NWP_ERS10", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_fs_NWP_ERS10", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_sap_NWP_ERS10", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_socat_NWP_ERS10", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_ip_NWD_ERS11", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_fs_NWD_ERS11", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_sap_NWD_ERS11", + "FailCount": 0, + "MigrationThreshold": 3 + }, + { + "Name": "rsc_socat_NWD_ERS11", + "FailCount": 0, + "MigrationThreshold": 3 + } + ] + } + ] + }, + "NodeAttributes": { + "Nodes": [ + { + "Name": "vmnwprd02", + "Attributes": [ + { + "Name": "runs_ers_NWP", + "Value": "1" + }, + { + "Name": "runs_ers_NWD", + "Value": "1" + } + ] + } + ] + } + } + } +} diff --git a/test/fixtures/discovery/ha_cluster_discovery_unclustered.json b/test/fixtures/discovery/ha_cluster_discovery_unclustered.json new file mode 100644 index 0000000000..bc8a4e52bb --- /dev/null +++ b/test/fixtures/discovery/ha_cluster_discovery_unclustered.json @@ -0,0 +1,5 @@ +{ + "agent_id": "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", + "discovery_type": "ha_cluster_discovery", + "payload": null +} diff --git a/test/fixtures/discovery/sap_system_discovery_empty.json b/test/fixtures/discovery/sap_system_discovery_empty.json new file mode 100644 index 0000000000..0c902b8fa4 --- /dev/null +++ b/test/fixtures/discovery/sap_system_discovery_empty.json @@ -0,0 +1,5 @@ +{ + "agent_id": "9cd46919-5f19-59aa-993e-cf3736c71053", + "discovery_type": "sap_system_discovery", + "payload": [] +} diff --git a/test/support/factory.ex b/test/support/factory.ex index ce1786f172..185aef2204 100644 --- a/test/support/factory.ex +++ b/test/support/factory.ex @@ -5,32 +5,51 @@ defmodule Trento.Factory do require Trento.Domain.Enums.Provider, as: Provider require Trento.Domain.Enums.ClusterType, as: ClusterType + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion require Trento.Domain.Enums.Health, as: Health alias Trento.Domain.{ - ClusterNode, + AscsErsClusterDetails, + AscsErsClusterNode, + AscsErsClusterSapSystem, ClusterResource, HanaClusterDetails, + HanaClusterNode, + SapSystem, SbdDevice, SlesSubscription } alias Trento.Domain.Events.{ + ApplicationInstanceDeregistered, ApplicationInstanceRegistered, + ClusterDeregistered, ClusterRegistered, + ClusterTombstoned, + DatabaseDeregistered, + DatabaseInstanceDeregistered, DatabaseInstanceRegistered, DatabaseRegistered, + DatabaseRestored, HostAddedToCluster, HostDetailsUpdated, HostRegistered, + HostRemovedFromCluster, + HostTombstoned, + SapSystemDeregistered, SapSystemRegistered, + SapSystemTombstoned, SlesSubscriptionsUpdated } alias Trento.Domain.Commands.{ + DeregisterApplicationInstance, + DeregisterDatabaseInstance, RegisterApplicationInstance, RegisterClusterHost, - RegisterDatabaseInstance + RegisterDatabaseInstance, + RegisterHost, + RollUpSapSystem } alias Trento.{ @@ -92,7 +111,8 @@ defmodule Trento.Factory do cluster_id: Faker.UUID.v4(), heartbeat: :unknown, provider: Enum.random(Provider.values()), - provider_data: nil + provider_data: nil, + deregistered_at: nil } end @@ -109,6 +129,7 @@ defmodule Trento.Factory do host_id: Faker.UUID.v4(), name: Faker.StarWars.character(), sid: Faker.StarWars.planet(), + additional_sids: [], provider: Enum.random(Provider.values()), resources_number: 8, hosts_number: 2, @@ -119,11 +140,27 @@ defmodule Trento.Factory do } end + def host_removed_from_cluster_event_factory do + HostRemovedFromCluster.new!(%{ + host_id: Faker.UUID.v4(), + cluster_id: Faker.UUID.v4(), + deregistered_at: DateTime.utc_now() + }) + end + + def cluster_deregistered_event_factory do + ClusterDeregistered.new!(%{ + cluster_id: Faker.UUID.v4(), + deregistered_at: DateTime.utc_now() + }) + end + def cluster_registered_event_factory do %ClusterRegistered{ cluster_id: Faker.UUID.v4(), name: Faker.StarWars.character(), sid: Faker.StarWars.planet(), + additional_sids: [], provider: Enum.random(Provider.values()), resources_number: 8, hosts_number: 2, @@ -173,6 +210,7 @@ defmodule Trento.Factory do id: Faker.UUID.v4(), name: Faker.StarWars.character(), sid: Faker.StarWars.planet(), + additional_sids: [], provider: Enum.random(Provider.values()), type: ClusterType.hana_scale_up(), health: Health.passing(), @@ -222,6 +260,38 @@ defmodule Trento.Factory do } end + def database_instance_deregistered_event_factory do + DatabaseInstanceDeregistered.new!(%{ + instance_number: "00", + host_id: Faker.UUID.v4(), + sap_system_id: Faker.UUID.v4(), + deregistered_at: DateTime.utc_now() + }) + end + + def database_restored_event_factory do + DatabaseRestored.new!(%{ + sap_system_id: Faker.UUID.v4(), + health: Health.passing() + }) + end + + def deregister_database_instance_command_factory do + DeregisterDatabaseInstance.new!(%{ + sap_system_id: Faker.UUID.v4(), + deregistered_at: DateTime.utc_now(), + host_id: Faker.UUID.v4(), + instance_number: "00" + }) + end + + def database_deregistered_event_factory do + DatabaseDeregistered.new!(%{ + sap_system_id: Faker.UUID.v4(), + deregistered_at: DateTime.utc_now() + }) + end + def application_instance_registered_event_factory do %ApplicationInstanceRegistered{ sap_system_id: Faker.UUID.v4(), @@ -237,6 +307,24 @@ defmodule Trento.Factory do } end + def application_instance_deregistered_event_factory do + ApplicationInstanceDeregistered.new!(%{ + sap_system_id: Faker.UUID.v4(), + deregistered_at: DateTime.utc_now(), + instance_number: "00", + host_id: Faker.UUID.v4() + }) + end + + def deregister_application_instance_command_factory do + DeregisterApplicationInstance.new!(%{ + sap_system_id: Faker.UUID.v4(), + deregistered_at: DateTime.utc_now(), + instance_number: "00", + host_id: Faker.UUID.v4() + }) + end + def database_registered_event_factory do %DatabaseRegistered{ sap_system_id: Faker.UUID.v4(), @@ -251,15 +339,29 @@ defmodule Trento.Factory do sid: Faker.UUID.v4(), db_host: Faker.Internet.ip_v4_address(), tenant: Faker.Beer.hop(), - health: Health.passing() + health: Health.passing(), + ensa_version: EnsaVersion.ensa1() } end + def sap_system_deregistered_event_factory do + SapSystemDeregistered.new!(%{ + sap_system_id: Faker.UUID.v4(), + deregistered_at: DateTime.utc_now() + }) + end + + def rollup_sap_system_command_factory do + RollUpSapSystem.new!(%{ + sap_system_id: Faker.UUID.v4() + }) + end + def hana_cluster_details_value_object do %HanaClusterDetails{ fencing_type: "external/sbd", nodes: [ - %ClusterNode{ + %HanaClusterNode{ attributes: %{"attribute" => Faker.Beer.name()}, hana_status: "Secondary", name: Faker.StarWars.character(), @@ -297,6 +399,54 @@ defmodule Trento.Factory do } end + def ascs_ers_cluster_node_factory do + %AscsErsClusterNode{ + name: Faker.Pokemon.name(), + roles: [Enum.random(["ascs", "ers"])], + virtual_ips: [Faker.Internet.ip_v4_address()], + filesystems: [Faker.File.file_name()], + attributes: %{ + Faker.Pokemon.name() => Faker.Pokemon.name() + }, + resources: build_list(5, :cluster_resource) + } + end + + def ascs_ers_cluster_sap_system_factory do + %AscsErsClusterSapSystem{ + sid: sequence(:sid, &"PR#{&1}"), + filesystem_resource_based: Enum.random([false, true]), + distributed: Enum.random([false, true]), + nodes: build_list(2, :ascs_ers_cluster_node) + } + end + + def sbd_device_factory do + %SbdDevice{ + device: Faker.File.file_name(), + status: Enum.random(["healthy", "unhealthy"]) + } + end + + def cluster_resource_factory do + %ClusterResource{ + id: Faker.UUID.v4(), + type: Faker.StarWars.planet(), + role: Faker.Beer.hop(), + status: Faker.Pokemon.name(), + fail_count: Enum.random(0..100) + } + end + + def ascs_ers_cluster_details_factory do + %AscsErsClusterDetails{ + fencing_type: Faker.Beer.hop(), + sap_systems: build_list(2, :ascs_ers_cluster_sap_system), + sbd_devices: build_list(2, :sbd_device), + stopped_resources: build_list(2, :cluster_resource) + } + end + def database_factory do %DatabaseReadModel{ id: Faker.UUID.v4(), @@ -310,7 +460,9 @@ defmodule Trento.Factory do sid: Faker.StarWars.planet(), tenant: Faker.Beer.hop(), db_host: Faker.Internet.ip_v4_address(), - health: Health.unknown() + health: Health.unknown(), + ensa_version: EnsaVersion.ensa1(), + deregistered_at: nil } end @@ -349,6 +501,16 @@ defmodule Trento.Factory do build(:application_instance_without_host, host_id: host.id, host: host) end + def sap_system_instance_factory do + %SapSystem.Instance{ + sid: Faker.UUID.v4(), + instance_number: String.pad_leading(sequence(:instance_number, &"#{&1}"), 2, "0"), + features: Faker.Pokemon.name(), + host_id: Faker.UUID.v4(), + health: Health.passing() + } + end + def discovery_event_factory do %DiscoveryEvent{ agent_id: Faker.UUID.v4(), @@ -387,7 +549,8 @@ defmodule Trento.Factory do https_port: 8443, start_priority: "0.3", host_id: Faker.UUID.v4(), - health: Health.passing() + health: Health.passing(), + ensa_version: EnsaVersion.ensa1() }) end @@ -408,4 +571,77 @@ defmodule Trento.Factory do health: Health.passing() }) end + + def cib_resource_factory do + %{ + "Id" => Faker.UUID.v4(), + "Type" => Faker.StarWars.planet(), + "Class" => "ocf", + "Provider" => "heartbeat", + "Operations" => [], + "MetaAttributes" => %{}, + "InstanceAttributes" => [] + } + end + + def crm_resource_factory do + %{ + "Id" => Faker.UUID.v4(), + "Node" => %{ + "Id" => "1", + "Name" => Faker.StarWars.planet(), + "Cached" => true + }, + "Role" => "Started", + "Agent" => Faker.Pokemon.name(), + "Active" => true, + "Failed" => false, + "Blocked" => false, + "Managed" => true, + "Orphaned" => false, + "FailureIgnored" => false, + "NodesRunningOn" => 1 + } + end + + def host_tombstoned_event_factory do + HostTombstoned.new!(%{ + host_id: Faker.UUID.v4() + }) + end + + def cluster_tombstoned_event_factory do + ClusterTombstoned.new!(%{ + cluster_id: Faker.UUID.v4() + }) + end + + def sap_system_tombstoned_event_factory do + SapSystemTombstoned.new!(%{ + sap_system_id: Faker.UUID.v4() + }) + end + + def sapcontrol_process_factory do + %{ + "name" => Faker.Pokemon.name(), + "description" => Faker.StarWars.planet(), + "dispstatus" => "SAPControl-GREEN", + "pid" => Enum.random(0..100) + } + end + + def register_host_command_factory do + RegisterHost.new!(%{ + host_id: Faker.UUID.v4(), + hostname: Faker.StarWars.character(), + ip_addresses: [Faker.Internet.ip_v4_address()], + agent_version: Faker.App.semver(), + cpu_count: Enum.random(1..16), + total_memory_mb: Enum.random(1..128), + socket_count: Enum.random(1..16), + os_version: Faker.App.semver(), + installation_source: Enum.random([:community, :suse, :unknown]) + }) + end end diff --git a/test/support/structs/test_data.ex b/test/support/structs/test_data.ex index 9d2bbd2dc0..d816597fa0 100644 --- a/test/support/structs/test_data.ex +++ b/test/support/structs/test_data.ex @@ -9,6 +9,13 @@ defmodule TestData do field :id, Ecto.UUID field :name, :string embeds_one :embedded, EmbeddedTestData + + field :polymorphic, PolymorphicEmbed, + types: [ + address: [module: PolymorphicAddressTestData, identify_by_fields: [:address]], + phone: [module: PolymorphicPhoneTestData, identify_by_fields: [:phone]] + ], + on_replace: :update end end @@ -24,3 +31,29 @@ defmodule EmbeddedTestData do field :name, :string end end + +defmodule PolymorphicAddressTestData do + @moduledoc false + + @required_fields :all + + use Trento.Type + + deftype do + field :id, Ecto.UUID + field :address, :string + end +end + +defmodule PolymorphicPhoneTestData do + @moduledoc false + + @required_fields :all + + use Trento.Type + + deftype do + field :id, Ecto.UUID + field :phone, :string + end +end diff --git a/test/trento/application/event_handlers/stream_roll_up_event_handler_test.exs b/test/trento/application/event_handlers/stream_roll_up_event_handler_test.exs index af5506b7a8..e074756990 100644 --- a/test/trento/application/event_handlers/stream_roll_up_event_handler_test.exs +++ b/test/trento/application/event_handlers/stream_roll_up_event_handler_test.exs @@ -112,4 +112,38 @@ defmodule Trento.StreamRollUpEventHandlerTest do assert :ok = StreamRollUpEventHandler.handle(event, %{stream_version: @max_stream_version + 1}) end + + test "should dispatch the host roll-up command when HostTombstoned is received" do + host_id = UUID.uuid4() + event = build(:host_tombstoned_event, host_id: host_id) + + expect(Trento.Commanded.Mock, :dispatch, fn %RollUpHost{host_id: ^host_id}, _ -> + :ok + end) + + assert :ok = StreamRollUpEventHandler.handle(event, %{stream_version: 1}) + end + + test "should dispatch the cluster roll-up command when ClusterTombstoned is received" do + cluster_id = UUID.uuid4() + event = build(:cluster_tombstoned_event, cluster_id: cluster_id) + + expect(Trento.Commanded.Mock, :dispatch, fn %RollUpCluster{cluster_id: ^cluster_id}, _ -> + :ok + end) + + assert :ok = StreamRollUpEventHandler.handle(event, %{stream_version: 1}) + end + + test "should dispatch the SAP system roll-up command when SapSystemTombstoned is received" do + sap_system_id = UUID.uuid4() + event = build(:sap_system_tombstoned_event, sap_system_id: sap_system_id) + + expect(Trento.Commanded.Mock, :dispatch, fn %RollUpSapSystem{sap_system_id: ^sap_system_id}, + _ -> + :ok + end) + + assert :ok = StreamRollUpEventHandler.handle(event, %{stream_version: 1}) + end end diff --git a/test/trento/application/integration/discovery/policies/cluster_policy_test.exs b/test/trento/application/integration/discovery/policies/cluster_policy_test.exs index fbccfca63e..7c2c3f5dd7 100644 --- a/test/trento/application/integration/discovery/policies/cluster_policy_test.exs +++ b/test/trento/application/integration/discovery/policies/cluster_policy_test.exs @@ -4,695 +4,1328 @@ defmodule Trento.Integration.Discovery.ClusterPolicyTest do import Trento.Integration.DiscoveryFixturesHelper + import Trento.Factory + require Trento.Domain.Enums.Provider, as: Provider alias Trento.Integration.Discovery.ClusterPolicy - alias Trento.Domain.Commands.RegisterClusterHost + alias Trento.Domain.Commands.{DeregisterClusterHost, RegisterClusterHost} alias Trento.Domain.{ - ClusterNode, + AscsErsClusterDetails, + AscsErsClusterNode, + AscsErsClusterSapSystem, ClusterResource, HanaClusterDetails, + HanaClusterNode, SbdDevice } test "should return the expected commands when a ha_cluster_discovery payload of type hana_scale_up is handled" do assert {:ok, - %RegisterClusterHost{ - cib_last_written: "Fri Oct 18 11:48:22 2019", - cluster_id: "34a94290-2236-5e4d-8def-05beb32d14d4", - designated_controller: true, - details: %HanaClusterDetails{ - fencing_type: "external/sbd", - nodes: [ - %ClusterNode{ - attributes: %{ - "hana_prd_clone_state" => "PROMOTED", - "hana_prd_op_mode" => "logreplay", - "hana_prd_remoteHost" => "node02", - "hana_prd_roles" => "4:P:master1:master:worker:master", - "hana_prd_site" => "PRIMARY_SITE_NAME", - "hana_prd_srmode" => "sync", - "hana_prd_sync_state" => "PRIM", - "hana_prd_version" => "2.00.040.00.1553674765", - "hana_prd_vhost" => "node01", - "lpa_prd_lpt" => "1571392102", - "master-rsc_SAPHana_PRD_HDB00" => "150" - }, - hana_status: "Primary", - name: "node01", - resources: [ - %ClusterResource{ - fail_count: 0, - id: "stonith-sbd", - role: "Started", - status: "Active", - type: "stonith:external/sbd" - }, - %ClusterResource{ - fail_count: 2, - id: "rsc_ip_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:IPaddr2" - }, - %ClusterResource{ - fail_count: 1_000_000, - id: "rsc_SAPHana_PRD_HDB00", - role: "Master", - status: "Active", - type: "ocf::suse:SAPHana" - }, - %ClusterResource{ - fail_count: 0, - id: "rsc_SAPHanaTopology_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::suse:SAPHanaTopology" - }, - %ClusterResource{ - fail_count: nil, - id: "clusterfs", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Filesystem" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_ip_HA1_ASCS00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:IPaddr2" + [ + %RegisterClusterHost{ + cib_last_written: "Fri Oct 18 11:48:22 2019", + cluster_id: "34a94290-2236-5e4d-8def-05beb32d14d4", + designated_controller: true, + details: %HanaClusterDetails{ + fencing_type: "external/sbd", + nodes: [ + %HanaClusterNode{ + attributes: %{ + "hana_prd_clone_state" => "PROMOTED", + "hana_prd_op_mode" => "logreplay", + "hana_prd_remoteHost" => "node02", + "hana_prd_roles" => "4:P:master1:master:worker:master", + "hana_prd_site" => "PRIMARY_SITE_NAME", + "hana_prd_srmode" => "sync", + "hana_prd_sync_state" => "PRIM", + "hana_prd_version" => "2.00.040.00.1553674765", + "hana_prd_vhost" => "node01", + "lpa_prd_lpt" => "1571392102", + "master-rsc_SAPHana_PRD_HDB00" => "150" }, - %ClusterResource{ - fail_count: nil, - id: "rsc_fs_HA1_ASCS00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Filesystem" + hana_status: "Primary", + name: "node01", + resources: [ + %ClusterResource{ + fail_count: 0, + id: "stonith-sbd", + role: "Started", + status: "Active", + type: "stonith:external/sbd" + }, + %ClusterResource{ + fail_count: 2, + id: "rsc_ip_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:IPaddr2" + }, + %ClusterResource{ + fail_count: 1_000_000, + id: "rsc_SAPHana_PRD_HDB00", + role: "Master", + status: "Active", + type: "ocf::suse:SAPHana" + }, + %ClusterResource{ + fail_count: 0, + id: "rsc_SAPHanaTopology_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::suse:SAPHanaTopology" + }, + %ClusterResource{ + fail_count: nil, + id: "clusterfs", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_ip_HA1_ASCS00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:IPaddr2" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_fs_HA1_ASCS00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_sap_HA1_ASCS00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:SAPInstance" + } + ], + site: "PRIMARY_SITE_NAME", + virtual_ip: "192.168.123.200" + }, + %HanaClusterNode{ + attributes: %{ + "hana_prd_clone_state" => "DEMOTED", + "hana_prd_op_mode" => "logreplay", + "hana_prd_remoteHost" => "node01", + "hana_prd_roles" => "4:S:master1:master:worker:master", + "hana_prd_site" => "SECONDARY_SITE_NAME", + "hana_prd_srmode" => "sync", + "hana_prd_sync_state" => "SOK", + "hana_prd_version" => "2.00.040.00.1553674765", + "hana_prd_vhost" => "node02", + "lpa_prd_lpt" => "30", + "master-rsc_SAPHana_PRD_HDB00" => "100" }, - %ClusterResource{ - fail_count: nil, - id: "rsc_sap_HA1_ASCS00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:SAPInstance" + hana_status: "Secondary", + name: "node02", + resources: [ + %ClusterResource{ + fail_count: 0, + id: "test", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Dummy" + }, + %ClusterResource{ + fail_count: 300, + id: "rsc_SAPHana_PRD_HDB00", + role: "Slave", + status: "Active", + type: "ocf::suse:SAPHana" + }, + %ClusterResource{ + fail_count: 0, + id: "rsc_SAPHanaTopology_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::suse:SAPHanaTopology" + }, + %ClusterResource{ + fail_count: nil, + id: "clusterfs", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_ip_HA1_ERS10", + role: "Started", + status: "Active", + type: "ocf::heartbeat:IPaddr2" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_fs_HA1_ERS10", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_sap_HA1_ERS10", + role: "Started", + status: "Active", + type: "ocf::heartbeat:SAPInstance" + } + ], + site: "SECONDARY_SITE_NAME", + virtual_ip: nil + } + ], + sbd_devices: [ + %SbdDevice{ + device: "/dev/vdc", + status: "healthy" + }, + %SbdDevice{ + device: "/dev/vdb", + status: "healthy" + } + ], + secondary_sync_state: "SOK", + sr_health_state: "4", + stopped_resources: [ + %ClusterResource{ + fail_count: nil, + id: "test-stop", + role: "Stopped", + status: nil, + type: "ocf::heartbeat:Dummy" + }, + %ClusterResource{ + fail_count: nil, + id: "clusterfs", + role: "Stopped", + status: nil, + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "clusterfs", + role: "Stopped", + status: nil, + type: "ocf::heartbeat:Filesystem" + } + ], + system_replication_mode: "sync", + system_replication_operation_mode: "logreplay" + }, + host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", + name: "hana_cluster", + sid: "PRD", + additional_sids: [], + type: :hana_scale_up, + hosts_number: 2, + resources_number: 8, + discovered_health: :passing, + provider: Provider.azure() + } + ]} == + "ha_cluster_discovery_hana_scale_up" + |> load_discovery_event_fixture() + |> ClusterPolicy.handle(nil) + end + + test "should return the expected commands when a ha_cluster_discovery payload of type ascs_ers is handled" do + assert {:ok, + [ + %RegisterClusterHost{ + cib_last_written: "Tue Jan 11 13:43:06 2022", + cluster_id: "0eac831a-aa66-5f45-89a4-007fbd2c5714", + designated_controller: false, + details: %AscsErsClusterDetails{ + fencing_type: "external/sbd", + sap_systems: [ + %AscsErsClusterSapSystem{ + sid: "NWP", + filesystem_resource_based: true, + distributed: true, + nodes: [ + %AscsErsClusterNode{ + name: "vmnwprd01", + roles: [:ascs], + virtual_ips: ["10.80.1.25"], + filesystems: ["/usr/sap/NWP/ASCS00"], + attributes: %{}, + resources: [ + %ClusterResource{ + id: "rsc_ip_NWP_ASCS00", + type: "ocf::heartbeat:IPaddr2", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_fs_NWP_ASCS00", + type: "ocf::heartbeat:Filesystem", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_sap_NWP_ASCS00", + type: "ocf::heartbeat:SAPInstance", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_socat_NWP_ASCS00", + type: "ocf::heartbeat:azure-lb", + role: "Started", + status: "Active", + fail_count: 0 + } + ] + }, + %AscsErsClusterNode{ + name: "vmnwprd02", + roles: [:ers], + virtual_ips: ["10.80.1.26"], + filesystems: ["/usr/sap/NWP/ERS10"], + attributes: %{"runs_ers_NWP" => "1"}, + resources: [ + %ClusterResource{ + id: "rsc_ip_NWP_ERS10", + type: "ocf::heartbeat:IPaddr2", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_fs_NWP_ERS10", + type: "ocf::heartbeat:Filesystem", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_sap_NWP_ERS10", + type: "ocf::heartbeat:SAPInstance", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_socat_NWP_ERS10", + type: "ocf::heartbeat:azure-lb", + role: "Started", + status: "Active", + fail_count: 0 + } + ] + } + ] + } + ], + stopped_resources: [], + sbd_devices: [ + %SbdDevice{ + device: + "/dev/disk/by-id/scsi-SLIO-ORG_IBLOCK_e34218cd-0d9a-4b21-b6d5-a313980baa82", + status: "healthy" + } + ] + }, + host_id: "4b30a6af-4b52-5bda-bccb-f2248a12c992", + name: "netweaver_cluster", + sid: nil, + additional_sids: ["NWP"], + type: :ascs_ers, + hosts_number: 2, + resources_number: 9, + discovered_health: :passing, + provider: Provider.azure() + } + ]} == + "ha_cluster_discovery_ascs_ers" + |> load_discovery_event_fixture() + |> ClusterPolicy.handle(nil) + end + + test "should return the expected commands when a ha_cluster_discovery payload of type ascs_ers with resources running in the same node is handled" do + assert {:ok, + [ + %RegisterClusterHost{ + details: %AscsErsClusterDetails{ + sap_systems: [ + %AscsErsClusterSapSystem{ + sid: "NWP", + filesystem_resource_based: true, + distributed: false, + nodes: [ + %AscsErsClusterNode{ + name: "vmnwprd01", + roles: [:ascs, :ers], + virtual_ips: ["10.80.1.25", "10.80.1.26"], + filesystems: ["/usr/sap/NWP/ASCS00", "/usr/sap/NWP/ERS10"] + }, + %AscsErsClusterNode{ + name: "vmnwprd02", + roles: [], + virtual_ips: [], + filesystems: [] + } + ] + } + ] + } + } + ]} = + "ha_cluster_discovery_ascs_ers" + |> load_discovery_event_fixture() + |> update_in( + ["payload", "Crmmon", "Groups"], + &Enum.map(&1, fn group -> + update_in( + group, + ["Resources"], + fn resources -> + Enum.map(resources, fn resource -> + put_in(resource, ["Node", "Name"], "vmnwprd01") + end) + end + ) + end) + ) + |> ClusterPolicy.handle(nil) + end + + test "should return the expected commands when a ha_cluster_discovery payload of type ascs_ers with invalid data is handled" do + assert {:ok, + [ + %RegisterClusterHost{ + cib_last_written: "Tue Jan 11 13:43:06 2022", + cluster_id: "0eac831a-aa66-5f45-89a4-007fbd2c5714", + designated_controller: false, + details: nil, + host_id: "4b30a6af-4b52-5bda-bccb-f2248a12c992", + name: "netweaver_cluster", + sid: nil, + additional_sids: [], + type: :unknown, + hosts_number: 2, + resources_number: 5, + discovered_health: :unknown, + provider: Provider.azure() + } + ]} == + "ha_cluster_discovery_ascs_ers_invalid" + |> load_discovery_event_fixture() + |> ClusterPolicy.handle(nil) + end + + test "should return the expected commands when a ha_cluster_discovery payload of type ascs_ers with multi sid setup is handled" do + assert {:ok, + [ + %RegisterClusterHost{ + cib_last_written: "Tue Jan 11 13:43:06 2022", + cluster_id: "0eac831a-aa66-5f45-89a4-007fbd2c5714", + designated_controller: false, + details: %AscsErsClusterDetails{ + fencing_type: "external/sbd", + sap_systems: [ + %AscsErsClusterSapSystem{ + sid: "NWP", + filesystem_resource_based: true, + distributed: true, + nodes: [ + %AscsErsClusterNode{ + name: "vmnwprd01", + roles: [:ascs], + virtual_ips: ["10.80.1.25"], + filesystems: ["/usr/sap/NWP/ASCS00"], + attributes: %{}, + resources: [ + %ClusterResource{ + id: "rsc_ip_NWP_ASCS00", + type: "ocf::heartbeat:IPaddr2", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_fs_NWP_ASCS00", + type: "ocf::heartbeat:Filesystem", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_sap_NWP_ASCS00", + type: "ocf::heartbeat:SAPInstance", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_socat_NWP_ASCS00", + type: "ocf::heartbeat:azure-lb", + role: "Started", + status: "Active", + fail_count: 0 + } + ] + }, + %AscsErsClusterNode{ + name: "vmnwprd02", + roles: [:ers], + virtual_ips: ["10.80.1.26"], + filesystems: ["/usr/sap/NWP/ERS10"], + attributes: %{"runs_ers_NWD" => "1", "runs_ers_NWP" => "1"}, + resources: [ + %ClusterResource{ + id: "rsc_ip_NWP_ERS10", + type: "ocf::heartbeat:IPaddr2", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_fs_NWP_ERS10", + type: "ocf::heartbeat:Filesystem", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_sap_NWP_ERS10", + type: "ocf::heartbeat:SAPInstance", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_socat_NWP_ERS10", + type: "ocf::heartbeat:azure-lb", + role: "Started", + status: "Active", + fail_count: 0 + } + ] + } + ] + }, + %AscsErsClusterSapSystem{ + sid: "NWD", + filesystem_resource_based: true, + distributed: true, + nodes: [ + %AscsErsClusterNode{ + name: "vmnwprd01", + roles: [:ascs], + virtual_ips: ["10.80.2.25"], + filesystems: ["/usr/sap/NWD/ASCS01"], + attributes: %{}, + resources: [ + %ClusterResource{ + id: "rsc_ip_NWD_ASCS01", + type: "ocf::heartbeat:IPaddr2", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_fs_NWD_ASCS01", + type: "ocf::heartbeat:Filesystem", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_sap_NWD_ASCS01", + type: "ocf::heartbeat:SAPInstance", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_socat_NWD_ASCS01", + type: "ocf::heartbeat:azure-lb", + role: "Started", + status: "Active", + fail_count: 0 + } + ] + }, + %AscsErsClusterNode{ + name: "vmnwprd02", + roles: [:ers], + virtual_ips: ["10.80.2.26"], + filesystems: ["/usr/sap/NWD/ERS11"], + attributes: %{"runs_ers_NWD" => "1", "runs_ers_NWP" => "1"}, + resources: [ + %ClusterResource{ + id: "rsc_ip_NWD_ERS11", + type: "ocf::heartbeat:IPaddr2", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_fs_NWD_ERS11", + type: "ocf::heartbeat:Filesystem", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_sap_NWD_ERS11", + type: "ocf::heartbeat:SAPInstance", + role: "Started", + status: "Active", + fail_count: 0 + }, + %ClusterResource{ + id: "rsc_socat_NWD_ERS11", + type: "ocf::heartbeat:azure-lb", + role: "Started", + status: "Active", + fail_count: 0 + } + ] + } + ] + } + ], + stopped_resources: [], + sbd_devices: [ + %SbdDevice{ + device: + "/dev/disk/by-id/scsi-SLIO-ORG_IBLOCK_e34218cd-0d9a-4b21-b6d5-a313980baa82", + status: "healthy" + } + ] + }, + host_id: "4b30a6af-4b52-5bda-bccb-f2248a12c992", + name: "netweaver_cluster", + sid: nil, + additional_sids: ["NWP", "NWD"], + type: :ascs_ers, + hosts_number: 2, + resources_number: 17, + discovered_health: :passing, + provider: Provider.azure() + } + ]} == + "ha_cluster_discovery_ascs_ers_multi_sid" + |> load_discovery_event_fixture() + |> ClusterPolicy.handle(nil) + end + + test "should set the filesystem_resource_based to false if no Filesystem resources are found" do + group_1 = %{ + "Id" => "Group1", + "Primitives" => [ + build(:cib_resource, %{ + "Id" => "rsc_sap_NWP_ASCS00", + "Type" => "SAPInstance", + "InstanceAttributes" => [ + %{"Id" => "Id1", "Name" => "InstanceName", "Value" => "NWP_ASCS00_sapnwpas"} + ] + }) + ] + } + + group_2 = %{ + "Id" => "Group2", + "Primitives" => [ + build(:cib_resource, %{ + "Id" => "rsc_sap_NWP_ERS10", + "Type" => "SAPInstance", + "InstanceAttributes" => [ + %{"Id" => "Id2", "Name" => "InstanceName", "Value" => "NWP_ERS10_sapnwpas"} + ] + }) + ] + } + + assert {:ok, + [ + %RegisterClusterHost{ + details: %AscsErsClusterDetails{ + sap_systems: [ + %AscsErsClusterSapSystem{ + sid: "NWP", + filesystem_resource_based: false, + distributed: true + } + ] + } + } + ]} = + "ha_cluster_discovery_ascs_ers" + |> load_discovery_event_fixture() + |> put_in(["payload", "Cib", "Configuration", "Resources", "Groups"], [ + group_1, + group_2 + ]) + |> ClusterPolicy.handle(nil) + end + + describe "ascs/ers clusters health" do + test "should set the health to critical when one of the nodes is unclean" do + assert {:ok, + [ + %RegisterClusterHost{ + details: %AscsErsClusterDetails{ + sap_systems: [ + %AscsErsClusterSapSystem{ + sid: "NWP", + distributed: false } - ], - site: "PRIMARY_SITE_NAME", - virtual_ip: "192.168.123.200" + ] }, - %ClusterNode{ - attributes: %{ - "hana_prd_clone_state" => "DEMOTED", - "hana_prd_op_mode" => "logreplay", - "hana_prd_remoteHost" => "node01", - "hana_prd_roles" => "4:S:master1:master:worker:master", - "hana_prd_site" => "SECONDARY_SITE_NAME", - "hana_prd_srmode" => "sync", - "hana_prd_sync_state" => "SOK", - "hana_prd_version" => "2.00.040.00.1553674765", - "hana_prd_vhost" => "node02", - "lpa_prd_lpt" => "30", - "master-rsc_SAPHana_PRD_HDB00" => "100" - }, - hana_status: "Secondary", - name: "node02", - resources: [ - %ClusterResource{ - fail_count: 0, - id: "test", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Dummy" - }, - %ClusterResource{ - fail_count: 300, - id: "rsc_SAPHana_PRD_HDB00", - role: "Slave", - status: "Active", - type: "ocf::suse:SAPHana" - }, - %ClusterResource{ - fail_count: 0, - id: "rsc_SAPHanaTopology_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::suse:SAPHanaTopology" - }, - %ClusterResource{ - fail_count: nil, - id: "clusterfs", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Filesystem" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_ip_HA1_ERS10", - role: "Started", - status: "Active", - type: "ocf::heartbeat:IPaddr2" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_fs_HA1_ERS10", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Filesystem" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_sap_HA1_ERS10", - role: "Started", - status: "Active", - type: "ocf::heartbeat:SAPInstance" + discovered_health: :critical + } + ]} = + "ha_cluster_discovery_ascs_ers" + |> load_discovery_event_fixture() + |> put_in(["payload", "Crmmon", "Nodes"], [ + %{"Id" => "1", "Unclean" => true, "Online" => false, "Name" => "vmnwprd01"}, + %{"Id" => "2", "Unclean" => false, "Online" => true, "Name" => "vmnwprd02"} + ]) + |> ClusterPolicy.handle(nil) + end + + test "should set the health to critical when the SAPInstance resource is Stopped" do + group_1_resources = + build_list(1, :crm_resource, %{ + "Id" => "rsc_sap_NWP_ASCS00", + "Agent" => "ocf::heartbeat:SAPInstance", + "Role" => "Started", + "Node" => %{"Name" => "vmnwpd01"} + }) + + group_2_resources = + build_list(1, :crm_resource, %{ + "Id" => "rsc_sap_NWP_ERS10", + "Agent" => "ocf::heartbeat:SAPInstance", + "Role" => "Stopped", + "Node" => %{"Name" => "vmnwpd02"} + }) + + assert {:ok, + [ + %RegisterClusterHost{ + details: %AscsErsClusterDetails{ + sap_systems: [ + %AscsErsClusterSapSystem{ + sid: "NWP", + distributed: false } - ], - site: "SECONDARY_SITE_NAME", - virtual_ip: nil - } - ], - sbd_devices: [ - %SbdDevice{ - device: "/dev/vdc", - status: "healthy" + ] }, - %SbdDevice{ - device: "/dev/vdb", - status: "healthy" - } - ], - secondary_sync_state: "SOK", - sr_health_state: "4", - stopped_resources: [ - %ClusterResource{ - fail_count: nil, - id: "test-stop", - role: "Stopped", - status: nil, - type: "ocf::heartbeat:Dummy" + discovered_health: :critical + } + ]} = + "ha_cluster_discovery_ascs_ers" + |> load_discovery_event_fixture() + |> put_in(["payload", "Crmmon", "Groups"], [ + %{"Resources" => group_1_resources}, + %{"Resources" => group_2_resources} + ]) + |> ClusterPolicy.handle(nil) + end + + test "should set the health to critical when the SAPInstance resourece is running the same node" do + group_1_resources = + build_list(1, :crm_resource, %{ + "Id" => "rsc_sap_NWP_ASCS00", + "Agent" => "ocf::heartbeat:SAPInstance", + "Node" => %{"Name" => "vmnwpd01"} + }) + + group_2_resources = + build_list(1, :crm_resource, %{ + "Id" => "rsc_sap_NWP_ERS10", + "Agent" => "ocf::heartbeat:SAPInstance", + "Node" => %{"Name" => "vmnwpd01"} + }) + + assert {:ok, + [ + %RegisterClusterHost{ + details: %AscsErsClusterDetails{ + sap_systems: [ + %AscsErsClusterSapSystem{ + sid: "NWP", + distributed: false + } + ] }, - %ClusterResource{ - fail_count: nil, - id: "clusterfs", - role: "Stopped", - status: nil, - type: "ocf::heartbeat:Filesystem" + discovered_health: :critical + } + ]} = + "ha_cluster_discovery_ascs_ers" + |> load_discovery_event_fixture() + |> put_in(["payload", "Crmmon", "Groups"], [ + %{"Resources" => group_1_resources}, + %{"Resources" => group_2_resources} + ]) + |> ClusterPolicy.handle(nil) + end + + test "should set the health to critical when the SAPInstance is on failed state" do + group_1_resources = + build_list(1, :crm_resource, %{ + "Id" => "rsc_sap_NWP_ASCS00", + "Agent" => "ocf::heartbeat:SAPInstance", + "Failed" => true, + "Node" => %{"Name" => "vmnwpd01"} + }) + + group_2_resources = + build_list(1, :crm_resource, %{ + "Id" => "rsc_sap_NWP_ERS10", + "Agent" => "ocf::heartbeat:SAPInstance", + "Failed" => false, + "Node" => %{"Name" => "vmnwpd01"} + }) + + assert {:ok, + [ + %RegisterClusterHost{ + details: %AscsErsClusterDetails{ + sap_systems: [ + %AscsErsClusterSapSystem{ + sid: "NWP", + distributed: false + } + ] }, - %ClusterResource{ - fail_count: nil, - id: "clusterfs", - role: "Stopped", - status: nil, - type: "ocf::heartbeat:Filesystem" - } - ], - system_replication_mode: "sync", - system_replication_operation_mode: "logreplay" - }, - host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", - name: "hana_cluster", - sid: "PRD", - type: :hana_scale_up, - hosts_number: 2, - resources_number: 8, - discovered_health: :passing, - provider: Provider.azure() - }} == - "ha_cluster_discovery_hana_scale_up" - |> load_discovery_event_fixture() - |> ClusterPolicy.handle() + discovered_health: :critical + } + ]} = + "ha_cluster_discovery_ascs_ers" + |> load_discovery_event_fixture() + |> put_in(["payload", "Crmmon", "Groups"], [ + %{"Resources" => group_1_resources}, + %{"Resources" => group_2_resources} + ]) + |> ClusterPolicy.handle(nil) + end end test "should return the expected commands when a ha_cluster_discovery payload with aws provider" do - assert { - :ok, - %Trento.Domain.Commands.RegisterClusterHost{ - cib_last_written: "Wed Apr 27 07:42:23 2022", - cluster_id: "3e83b9d1-00e8-544d-9e29-7a66d9ed7c1e", - designated_controller: true, - details: %Trento.Domain.HanaClusterDetails{ - fencing_type: "external/ec2", - nodes: [ - %Trento.Domain.ClusterNode{ - attributes: %{ - "hana_prd_clone_state" => "PROMOTED", - "hana_prd_op_mode" => "logreplay", - "hana_prd_remoteHost" => "vmhana02", - "hana_prd_roles" => "4:P:master1:master:worker:master", - "hana_prd_site" => "Site1", - "hana_prd_srmode" => "sync", - "hana_prd_sync_state" => "PRIM", - "hana_prd_version" => "2.00.052.00.1599235305", - "hana_prd_vhost" => "vmhana01", - "lpa_prd_lpt" => "1651045343", - "master-rsc_SAPHana_PRD_HDB00" => "150" - }, - hana_status: "Primary", - name: "vmhana01", - resources: [ - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_aws_stonith_PRD_HDB00", - role: "Started", - status: "Active", - type: "stonith:external/ec2" - }, - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_ip_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::suse:aws-vpc-move-ip" - }, - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_exporter_PRD_HDB00", - role: "Started", - status: "Active", - type: "systemd:prometheus-hanadb_exporter@PRD_HDB00" - }, - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_SAPHana_PRD_HDB00", - role: "Master", - status: "Active", - type: "ocf::suse:SAPHana" - }, - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_SAPHanaTopology_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::suse:SAPHanaTopology" - } - ], - site: "Site1", - virtual_ip: "192.168.1.10" - }, - %Trento.Domain.ClusterNode{ - attributes: %{ - "hana_prd_clone_state" => "DEMOTED", - "hana_prd_op_mode" => "logreplay", - "hana_prd_remoteHost" => "vmhana01", - "hana_prd_roles" => "4:S:master1:master:worker:master", - "hana_prd_site" => "Site2", - "hana_prd_srmode" => "sync", - "hana_prd_sync_state" => "SOK", - "hana_prd_version" => "2.00.052.00.1599235305", - "hana_prd_vhost" => "vmhana02", - "lpa_prd_lpt" => "30", - "master-rsc_SAPHana_PRD_HDB00" => "100" - }, - hana_status: "Secondary", - name: "vmhana02", - resources: [ - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_SAPHana_PRD_HDB00", - role: "Slave", - status: "Active", - type: "ocf::suse:SAPHana" - }, - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_SAPHanaTopology_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::suse:SAPHanaTopology" - } - ], - site: "Site2", - virtual_ip: nil - } - ], - sbd_devices: [], - secondary_sync_state: "SOK", - sr_health_state: "4", - stopped_resources: [], - system_replication_mode: "sync", - system_replication_operation_mode: "logreplay" - }, - discovered_health: :passing, - host_id: "a3279fd0-0443-1234-9354-2d7909fd6bc6", - hosts_number: 2, - name: "hana_cluster", - provider: Provider.aws(), - resources_number: 7, - sid: "PRD", - type: :hana_scale_up - } - } == + assert {:ok, + [ + %Trento.Domain.Commands.RegisterClusterHost{ + cib_last_written: "Wed Apr 27 07:42:23 2022", + cluster_id: "3e83b9d1-00e8-544d-9e29-7a66d9ed7c1e", + designated_controller: true, + details: %Trento.Domain.HanaClusterDetails{ + fencing_type: "external/ec2", + nodes: [ + %Trento.Domain.HanaClusterNode{ + attributes: %{ + "hana_prd_clone_state" => "PROMOTED", + "hana_prd_op_mode" => "logreplay", + "hana_prd_remoteHost" => "vmhana02", + "hana_prd_roles" => "4:P:master1:master:worker:master", + "hana_prd_site" => "Site1", + "hana_prd_srmode" => "sync", + "hana_prd_sync_state" => "PRIM", + "hana_prd_version" => "2.00.052.00.1599235305", + "hana_prd_vhost" => "vmhana01", + "lpa_prd_lpt" => "1651045343", + "master-rsc_SAPHana_PRD_HDB00" => "150" + }, + hana_status: "Primary", + name: "vmhana01", + resources: [ + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_aws_stonith_PRD_HDB00", + role: "Started", + status: "Active", + type: "stonith:external/ec2" + }, + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_ip_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::suse:aws-vpc-move-ip" + }, + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_exporter_PRD_HDB00", + role: "Started", + status: "Active", + type: "systemd:prometheus-hanadb_exporter@PRD_HDB00" + }, + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_SAPHana_PRD_HDB00", + role: "Master", + status: "Active", + type: "ocf::suse:SAPHana" + }, + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_SAPHanaTopology_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::suse:SAPHanaTopology" + } + ], + site: "Site1", + virtual_ip: "192.168.1.10" + }, + %Trento.Domain.HanaClusterNode{ + attributes: %{ + "hana_prd_clone_state" => "DEMOTED", + "hana_prd_op_mode" => "logreplay", + "hana_prd_remoteHost" => "vmhana01", + "hana_prd_roles" => "4:S:master1:master:worker:master", + "hana_prd_site" => "Site2", + "hana_prd_srmode" => "sync", + "hana_prd_sync_state" => "SOK", + "hana_prd_version" => "2.00.052.00.1599235305", + "hana_prd_vhost" => "vmhana02", + "lpa_prd_lpt" => "30", + "master-rsc_SAPHana_PRD_HDB00" => "100" + }, + hana_status: "Secondary", + name: "vmhana02", + resources: [ + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_SAPHana_PRD_HDB00", + role: "Slave", + status: "Active", + type: "ocf::suse:SAPHana" + }, + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_SAPHanaTopology_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::suse:SAPHanaTopology" + } + ], + site: "Site2", + virtual_ip: nil + } + ], + sbd_devices: [], + secondary_sync_state: "SOK", + sr_health_state: "4", + stopped_resources: [], + system_replication_mode: "sync", + system_replication_operation_mode: "logreplay" + }, + discovered_health: :passing, + host_id: "a3279fd0-0443-1234-9354-2d7909fd6bc6", + hosts_number: 2, + name: "hana_cluster", + provider: Provider.aws(), + resources_number: 7, + sid: "PRD", + additional_sids: [], + type: :hana_scale_up + } + ]} == "ha_cluster_discovery_aws" |> load_discovery_event_fixture() - |> ClusterPolicy.handle() + |> ClusterPolicy.handle(nil) end test "should return the expected commands when a ha_cluster_discovery payload with gcp provider" do - assert { - :ok, - %Trento.Domain.Commands.RegisterClusterHost{ - cib_last_written: "Wed Apr 27 07:02:35 2022", - cluster_id: "61b4f40d-5e1e-5b58-bdc1-7b855dd7ede2", - designated_controller: true, - details: %Trento.Domain.HanaClusterDetails{ - fencing_type: "fence_gce", - nodes: [ - %Trento.Domain.ClusterNode{ - attributes: %{ - "hana_prd_clone_state" => "UNDEFINED", - "hana_prd_op_mode" => "logreplay", - "hana_prd_remoteHost" => "vmhana02", - "hana_prd_roles" => "1:P:master1::worker:", - "hana_prd_site" => "Site1", - "hana_prd_srmode" => "sync", - "hana_prd_version" => "2.00.057.00.1629894416", - "hana_prd_vhost" => "vmhana01", - "lpa_prd_lpt" => "1650871168", - "master-rsc_SAPHana_PRD_HDB00" => "-9000" - }, - hana_status: "Unknown", - name: "vmhana01", - resources: [ - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_SAPHanaTopology_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::suse:SAPHanaTopology" - }, - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_ip_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:IPaddr2" - }, - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_socat_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:anything" - } - ], - site: "Site1", - virtual_ip: "10.0.0.12" - }, - %Trento.Domain.ClusterNode{ - attributes: %{ - "hana_prd_clone_state" => "DEMOTED", - "hana_prd_op_mode" => "logreplay", - "hana_prd_remoteHost" => "vmhana01", - "hana_prd_roles" => "4:S:master1:master:worker:master", - "hana_prd_site" => "Site2", - "hana_prd_srmode" => "sync", - "hana_prd_version" => "2.00.057.00.1629894416", - "hana_prd_vhost" => "vmhana02", - "lpa_prd_lpt" => "30", - "master-rsc_SAPHana_PRD_HDB00" => "-INFINITY" - }, - hana_status: "Unknown", - name: "vmhana02", - resources: [ - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_SAPHana_PRD_HDB00", - role: "Slave", - status: "Active", - type: "ocf::suse:SAPHana" - }, - %Trento.Domain.ClusterResource{ - fail_count: 0, - id: "rsc_SAPHanaTopology_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::suse:SAPHanaTopology" - } - ], - site: "Site2", - virtual_ip: nil - } - ], - sbd_devices: [], - secondary_sync_state: "Unknown", - sr_health_state: "Unknown", - stopped_resources: [ - %Trento.Domain.ClusterResource{ - fail_count: nil, - id: "rsc_gcp_stonith_PRD_HDB00_vmhana01", - role: "Stopped", - status: nil, - type: "stonith:fence_gce" - }, - %Trento.Domain.ClusterResource{ - fail_count: nil, - id: "rsc_exporter_PRD_HDB00", - role: "Stopped", - status: nil, - type: "systemd:prometheus-hanadb_exporter@PRD_HDB00" - }, - %Trento.Domain.ClusterResource{ - fail_count: nil, - id: "rsc_gcp_stonith_PRD_HDB00_vmhana02", - role: "Stopped", - status: nil, - type: "stonith:fence_gce" - }, - %Trento.Domain.ClusterResource{ - fail_count: nil, - id: "rsc_SAPHana_PRD_HDB00", - role: "Stopped", - status: nil, - type: "ocf::suse:SAPHana" - } - ], - system_replication_mode: "sync", - system_replication_operation_mode: "logreplay" - }, - discovered_health: :critical, - host_id: "1dc79771-0a96-1234-b5b6-cd4d0aef6acc", - hosts_number: 2, - name: "hana_cluster", - provider: Provider.gcp(), - resources_number: 9, - sid: "PRD", - type: :hana_scale_up - } - } == + assert {:ok, + [ + %Trento.Domain.Commands.RegisterClusterHost{ + cib_last_written: "Wed Apr 27 07:02:35 2022", + cluster_id: "61b4f40d-5e1e-5b58-bdc1-7b855dd7ede2", + designated_controller: true, + details: %Trento.Domain.HanaClusterDetails{ + fencing_type: "fence_gce", + nodes: [ + %Trento.Domain.HanaClusterNode{ + attributes: %{ + "hana_prd_clone_state" => "UNDEFINED", + "hana_prd_op_mode" => "logreplay", + "hana_prd_remoteHost" => "vmhana02", + "hana_prd_roles" => "1:P:master1::worker:", + "hana_prd_site" => "Site1", + "hana_prd_srmode" => "sync", + "hana_prd_version" => "2.00.057.00.1629894416", + "hana_prd_vhost" => "vmhana01", + "lpa_prd_lpt" => "1650871168", + "master-rsc_SAPHana_PRD_HDB00" => "-9000" + }, + hana_status: "Unknown", + name: "vmhana01", + resources: [ + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_SAPHanaTopology_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::suse:SAPHanaTopology" + }, + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_ip_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:IPaddr2" + }, + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_socat_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:anything" + } + ], + site: "Site1", + virtual_ip: "10.0.0.12" + }, + %Trento.Domain.HanaClusterNode{ + attributes: %{ + "hana_prd_clone_state" => "DEMOTED", + "hana_prd_op_mode" => "logreplay", + "hana_prd_remoteHost" => "vmhana01", + "hana_prd_roles" => "4:S:master1:master:worker:master", + "hana_prd_site" => "Site2", + "hana_prd_srmode" => "sync", + "hana_prd_version" => "2.00.057.00.1629894416", + "hana_prd_vhost" => "vmhana02", + "lpa_prd_lpt" => "30", + "master-rsc_SAPHana_PRD_HDB00" => "-INFINITY" + }, + hana_status: "Unknown", + name: "vmhana02", + resources: [ + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_SAPHana_PRD_HDB00", + role: "Slave", + status: "Active", + type: "ocf::suse:SAPHana" + }, + %Trento.Domain.ClusterResource{ + fail_count: 0, + id: "rsc_SAPHanaTopology_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::suse:SAPHanaTopology" + } + ], + site: "Site2", + virtual_ip: nil + } + ], + sbd_devices: [], + secondary_sync_state: "Unknown", + sr_health_state: "Unknown", + stopped_resources: [ + %Trento.Domain.ClusterResource{ + fail_count: nil, + id: "rsc_gcp_stonith_PRD_HDB00_vmhana01", + role: "Stopped", + status: nil, + type: "stonith:fence_gce" + }, + %Trento.Domain.ClusterResource{ + fail_count: nil, + id: "rsc_exporter_PRD_HDB00", + role: "Stopped", + status: nil, + type: "systemd:prometheus-hanadb_exporter@PRD_HDB00" + }, + %Trento.Domain.ClusterResource{ + fail_count: nil, + id: "rsc_gcp_stonith_PRD_HDB00_vmhana02", + role: "Stopped", + status: nil, + type: "stonith:fence_gce" + }, + %Trento.Domain.ClusterResource{ + fail_count: nil, + id: "rsc_SAPHana_PRD_HDB00", + role: "Stopped", + status: nil, + type: "ocf::suse:SAPHana" + } + ], + system_replication_mode: "sync", + system_replication_operation_mode: "logreplay" + }, + discovered_health: :critical, + host_id: "1dc79771-0a96-1234-b5b6-cd4d0aef6acc", + hosts_number: 2, + name: "hana_cluster", + provider: Provider.gcp(), + resources_number: 9, + sid: "PRD", + additional_sids: [], + type: :hana_scale_up + } + ]} == "ha_cluster_discovery_gcp" |> load_discovery_event_fixture() - |> ClusterPolicy.handle() + |> ClusterPolicy.handle(nil) end test "should return the expected commands when a ha_cluster_discovery payload does not have a Name field" do assert {:ok, - %RegisterClusterHost{ - cib_last_written: "Fri Oct 18 11:48:22 2019", - cluster_id: "34a94290-2236-5e4d-8def-05beb32d14d4", - designated_controller: true, - details: %HanaClusterDetails{ - fencing_type: "external/sbd", - nodes: [ - %ClusterNode{ - attributes: %{ - "hana_prd_clone_state" => "PROMOTED", - "hana_prd_op_mode" => "logreplay", - "hana_prd_remoteHost" => "node02", - "hana_prd_roles" => "4:P:master1:master:worker:master", - "hana_prd_site" => "PRIMARY_SITE_NAME", - "hana_prd_srmode" => "sync", - "hana_prd_sync_state" => "PRIM", - "hana_prd_version" => "2.00.040.00.1553674765", - "hana_prd_vhost" => "node01", - "lpa_prd_lpt" => "1571392102", - "master-rsc_SAPHana_PRD_HDB00" => "150" - }, - hana_status: "Primary", - name: "node01", - resources: [ - %ClusterResource{ - fail_count: 0, - id: "stonith-sbd", - role: "Started", - status: "Active", - type: "stonith:external/sbd" - }, - %ClusterResource{ - fail_count: 2, - id: "rsc_ip_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:IPaddr2" - }, - %ClusterResource{ - fail_count: 1_000_000, - id: "rsc_SAPHana_PRD_HDB00", - role: "Master", - status: "Active", - type: "ocf::suse:SAPHana" - }, - %ClusterResource{ - fail_count: 0, - id: "rsc_SAPHanaTopology_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::suse:SAPHanaTopology" + [ + %RegisterClusterHost{ + cib_last_written: "Fri Oct 18 11:48:22 2019", + cluster_id: "34a94290-2236-5e4d-8def-05beb32d14d4", + designated_controller: true, + details: %HanaClusterDetails{ + fencing_type: "external/sbd", + nodes: [ + %HanaClusterNode{ + attributes: %{ + "hana_prd_clone_state" => "PROMOTED", + "hana_prd_op_mode" => "logreplay", + "hana_prd_remoteHost" => "node02", + "hana_prd_roles" => "4:P:master1:master:worker:master", + "hana_prd_site" => "PRIMARY_SITE_NAME", + "hana_prd_srmode" => "sync", + "hana_prd_sync_state" => "PRIM", + "hana_prd_version" => "2.00.040.00.1553674765", + "hana_prd_vhost" => "node01", + "lpa_prd_lpt" => "1571392102", + "master-rsc_SAPHana_PRD_HDB00" => "150" }, - %ClusterResource{ - fail_count: nil, - id: "clusterfs", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Filesystem" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_ip_HA1_ASCS00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:IPaddr2" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_fs_HA1_ASCS00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Filesystem" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_sap_HA1_ASCS00", - role: "Started", - status: "Active", - type: "ocf::heartbeat:SAPInstance" - } - ], - site: "PRIMARY_SITE_NAME", - virtual_ip: "192.168.123.200" - }, - %ClusterNode{ - attributes: %{ - "hana_prd_clone_state" => "DEMOTED", - "hana_prd_op_mode" => "logreplay", - "hana_prd_remoteHost" => "node01", - "hana_prd_roles" => "4:S:master1:master:worker:master", - "hana_prd_site" => "SECONDARY_SITE_NAME", - "hana_prd_srmode" => "sync", - "hana_prd_sync_state" => "SOK", - "hana_prd_version" => "2.00.040.00.1553674765", - "hana_prd_vhost" => "node02", - "lpa_prd_lpt" => "30", - "master-rsc_SAPHana_PRD_HDB00" => "100" + hana_status: "Primary", + name: "node01", + resources: [ + %ClusterResource{ + fail_count: 0, + id: "stonith-sbd", + role: "Started", + status: "Active", + type: "stonith:external/sbd" + }, + %ClusterResource{ + fail_count: 2, + id: "rsc_ip_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:IPaddr2" + }, + %ClusterResource{ + fail_count: 1_000_000, + id: "rsc_SAPHana_PRD_HDB00", + role: "Master", + status: "Active", + type: "ocf::suse:SAPHana" + }, + %ClusterResource{ + fail_count: 0, + id: "rsc_SAPHanaTopology_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::suse:SAPHanaTopology" + }, + %ClusterResource{ + fail_count: nil, + id: "clusterfs", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_ip_HA1_ASCS00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:IPaddr2" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_fs_HA1_ASCS00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_sap_HA1_ASCS00", + role: "Started", + status: "Active", + type: "ocf::heartbeat:SAPInstance" + } + ], + site: "PRIMARY_SITE_NAME", + virtual_ip: "192.168.123.200" }, - hana_status: "Secondary", - name: "node02", - resources: [ - %ClusterResource{ - fail_count: 0, - id: "test", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Dummy" - }, - %ClusterResource{ - fail_count: 300, - id: "rsc_SAPHana_PRD_HDB00", - role: "Slave", - status: "Active", - type: "ocf::suse:SAPHana" + %HanaClusterNode{ + attributes: %{ + "hana_prd_clone_state" => "DEMOTED", + "hana_prd_op_mode" => "logreplay", + "hana_prd_remoteHost" => "node01", + "hana_prd_roles" => "4:S:master1:master:worker:master", + "hana_prd_site" => "SECONDARY_SITE_NAME", + "hana_prd_srmode" => "sync", + "hana_prd_sync_state" => "SOK", + "hana_prd_version" => "2.00.040.00.1553674765", + "hana_prd_vhost" => "node02", + "lpa_prd_lpt" => "30", + "master-rsc_SAPHana_PRD_HDB00" => "100" }, - %ClusterResource{ - fail_count: 0, - id: "rsc_SAPHanaTopology_PRD_HDB00", - role: "Started", - status: "Active", - type: "ocf::suse:SAPHanaTopology" - }, - %ClusterResource{ - fail_count: nil, - id: "clusterfs", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Filesystem" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_ip_HA1_ERS10", - role: "Started", - status: "Active", - type: "ocf::heartbeat:IPaddr2" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_fs_HA1_ERS10", - role: "Started", - status: "Active", - type: "ocf::heartbeat:Filesystem" - }, - %ClusterResource{ - fail_count: nil, - id: "rsc_sap_HA1_ERS10", - role: "Started", - status: "Active", - type: "ocf::heartbeat:SAPInstance" - } - ], - site: "SECONDARY_SITE_NAME", - virtual_ip: nil - } - ], - sbd_devices: [ - %SbdDevice{ - device: "/dev/vdc", - status: "healthy" - }, - %SbdDevice{ - device: "/dev/vdb", - status: "healthy" - } - ], - secondary_sync_state: "SOK", - sr_health_state: "4", - stopped_resources: [ - %ClusterResource{ - fail_count: nil, - id: "test-stop", - role: "Stopped", - status: nil, - type: "ocf::heartbeat:Dummy" - }, - %ClusterResource{ - fail_count: nil, - id: "clusterfs", - role: "Stopped", - status: nil, - type: "ocf::heartbeat:Filesystem" - }, - %ClusterResource{ - fail_count: nil, - id: "clusterfs", - role: "Stopped", - status: nil, - type: "ocf::heartbeat:Filesystem" - } - ], - system_replication_mode: "sync", - system_replication_operation_mode: "logreplay" - }, - host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", - name: nil, - sid: "PRD", - type: :hana_scale_up, - hosts_number: 2, - resources_number: 8, - discovered_health: :passing, - provider: Provider.azure() - }} == + hana_status: "Secondary", + name: "node02", + resources: [ + %ClusterResource{ + fail_count: 0, + id: "test", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Dummy" + }, + %ClusterResource{ + fail_count: 300, + id: "rsc_SAPHana_PRD_HDB00", + role: "Slave", + status: "Active", + type: "ocf::suse:SAPHana" + }, + %ClusterResource{ + fail_count: 0, + id: "rsc_SAPHanaTopology_PRD_HDB00", + role: "Started", + status: "Active", + type: "ocf::suse:SAPHanaTopology" + }, + %ClusterResource{ + fail_count: nil, + id: "clusterfs", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_ip_HA1_ERS10", + role: "Started", + status: "Active", + type: "ocf::heartbeat:IPaddr2" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_fs_HA1_ERS10", + role: "Started", + status: "Active", + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "rsc_sap_HA1_ERS10", + role: "Started", + status: "Active", + type: "ocf::heartbeat:SAPInstance" + } + ], + site: "SECONDARY_SITE_NAME", + virtual_ip: nil + } + ], + sbd_devices: [ + %SbdDevice{ + device: "/dev/vdc", + status: "healthy" + }, + %SbdDevice{ + device: "/dev/vdb", + status: "healthy" + } + ], + secondary_sync_state: "SOK", + sr_health_state: "4", + stopped_resources: [ + %ClusterResource{ + fail_count: nil, + id: "test-stop", + role: "Stopped", + status: nil, + type: "ocf::heartbeat:Dummy" + }, + %ClusterResource{ + fail_count: nil, + id: "clusterfs", + role: "Stopped", + status: nil, + type: "ocf::heartbeat:Filesystem" + }, + %ClusterResource{ + fail_count: nil, + id: "clusterfs", + role: "Stopped", + status: nil, + type: "ocf::heartbeat:Filesystem" + } + ], + system_replication_mode: "sync", + system_replication_operation_mode: "logreplay" + }, + host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", + name: nil, + sid: "PRD", + additional_sids: [], + type: :hana_scale_up, + hosts_number: 2, + resources_number: 8, + discovered_health: :passing, + provider: Provider.azure() + } + ]} == "ha_cluster_discovery_unnamed" |> load_discovery_event_fixture() - |> ClusterPolicy.handle() + |> ClusterPolicy.handle(nil) + end + + describe "delta deregistration" do + test "should deregister the host from the current cluster and register to the new one" do + current_cluster_id = UUID.uuid4() + + {:ok, + [ + %DeregisterClusterHost{cluster_id: ^current_cluster_id}, + %RegisterClusterHost{cluster_id: "34a94290-2236-5e4d-8def-05beb32d14d4"} + ]} = + "ha_cluster_discovery_hana_scale_up" + |> load_discovery_event_fixture() + |> ClusterPolicy.handle(current_cluster_id) + end + + test "should deregister the host from the current cluster" do + current_cluster_id = UUID.uuid4() + + {:ok, + [ + %DeregisterClusterHost{cluster_id: ^current_cluster_id} + ]} = + "ha_cluster_discovery_unclustered" + |> load_discovery_event_fixture() + |> ClusterPolicy.handle(current_cluster_id) + end + + test "should not deregister the host if the cluster does not change" do + current_cluster_id = "34a94290-2236-5e4d-8def-05beb32d14d4" + + assert {:ok, + [ + %RegisterClusterHost{cluster_id: ^current_cluster_id} + ]} = + "ha_cluster_discovery_hana_scale_up" + |> load_discovery_event_fixture() + |> ClusterPolicy.handle(current_cluster_id) + end end end diff --git a/test/trento/application/integration/discovery/policies/sap_system_policy_test.exs b/test/trento/application/integration/discovery/policies/sap_system_policy_test.exs new file mode 100644 index 0000000000..e6320fc9a7 --- /dev/null +++ b/test/trento/application/integration/discovery/policies/sap_system_policy_test.exs @@ -0,0 +1,254 @@ +defmodule Trento.Integration.Discovery.SapSystemPolicyTest do + use ExUnit.Case + use Trento.DataCase + + import Trento.Factory + + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion + + import Trento.Integration.DiscoveryFixturesHelper + + alias Trento.Integration.Discovery.SapSystemPolicy + + alias Trento.Domain.Commands.{ + DeregisterApplicationInstance, + DeregisterDatabaseInstance, + RegisterApplicationInstance, + RegisterDatabaseInstance + } + + test "should return the expected commands when a sap_system payload of type database is handled" do + assert {:ok, + [ + %RegisterDatabaseInstance{ + features: "HDB|HDB_WORKER", + host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", + instance_number: "00", + sap_system_id: "97c4127a-29bc-5315-82bd-8f154bee626f", + sid: "PRD", + tenant: "PRD", + system_replication: "Primary", + system_replication_status: "ERROR", + health: :passing + } + ]} = + "sap_system_discovery_database" + |> load_discovery_event_fixture() + |> SapSystemPolicy.handle([]) + end + + test "should return the expected commands when a sap_system payload of type database is handled in the event of a stopped instance" do + assert {:ok, + [ + %RegisterDatabaseInstance{ + features: "HDB|HDB_WORKER", + host_id: "9cd46919-5f19-59aa-993e-cf3736c71053", + instance_number: "10", + sap_system_id: "6c9208eb-a5bb-57ef-be5c-6422dedab602", + sid: "HDP", + tenant: "HDP", + system_replication: nil, + system_replication_status: nil, + health: :unknown + } + ]} = + "sap_system_discovery_database_stopped_instance" + |> load_discovery_event_fixture() + |> SapSystemPolicy.handle([]) + end + + test "should return the expected commands when a sap_system payload of type application is handled" do + assert {:ok, + [ + %RegisterApplicationInstance{ + db_host: "10.74.1.12", + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", + instance_number: "02", + sap_system_id: nil, + sid: "HA1", + tenant: "PRD", + health: :passing, + ensa_version: EnsaVersion.no_ensa() + } + ]} = + "sap_system_discovery_application" + |> load_discovery_event_fixture() + |> SapSystemPolicy.handle([]) + end + + test "should return the expected commands when a sap_system payload of type application and diagnostics is handled" do + assert {:ok, + [ + %RegisterApplicationInstance{ + db_host: "10.74.1.12", + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", + instance_number: "02", + sap_system_id: nil, + sid: "HA1", + tenant: "PRD", + health: :passing + } + ]} = + "sap_system_discovery_application_diagnostics" + |> load_discovery_event_fixture() + |> SapSystemPolicy.handle([]) + end + + test "should return the expected commands when a sap_system payload of type application with ensa version is handled" do + Enum.each( + [ + ["enserver", EnsaVersion.ensa1()], + ["enrepserver", EnsaVersion.ensa1()], + ["enq_server", EnsaVersion.ensa2()], + ["enq_replicator", EnsaVersion.ensa2()] + ], + fn [process_name, expected_ensa_version] -> + assert {:ok, + [ + %RegisterApplicationInstance{ + ensa_version: ^expected_ensa_version + } + ]} = + "sap_system_discovery_application" + |> load_discovery_event_fixture() + |> update_in( + ["payload"], + &Enum.map(&1, fn sap_system -> + update_in( + sap_system, + ["Instances"], + fn instances -> + Enum.map(instances, fn instance -> + put_in( + instance, + ["SAPControl", "Processes"], + [ + build(:sapcontrol_process), + build(:sapcontrol_process, %{"name" => process_name}), + build(:sapcontrol_process) + ] + ) + end) + end + ) + end) + ) + |> SapSystemPolicy.handle([]) + end + ) + end + + test "should return an empty list of commands if an empty payload is received" do + assert {:ok, []} = + "sap_system_discovery_empty" + |> load_discovery_event_fixture() + |> SapSystemPolicy.handle([]) + end + + describe "delta deregistration" do + test "should deregister the old instances and register the new ones" do + database_sap_system_id = UUID.uuid4() + + [ + %{instance_number: database_instance_number_1}, + %{instance_number: database_instance_number_2} + ] = + database_instances = + build_list( + 2, + :database_instance_without_host, + sap_system_id: database_sap_system_id + ) + + [ + %{instance_number: application_instance_number_1}, + %{instance_number: application_instance_number_2} + ] = + application_instances = + build_list( + 2, + :application_instance_without_host + ) + + assert {:ok, + [ + %DeregisterDatabaseInstance{ + sap_system_id: ^database_sap_system_id, + instance_number: ^database_instance_number_1 + }, + %DeregisterDatabaseInstance{ + sap_system_id: ^database_sap_system_id, + instance_number: ^database_instance_number_2 + }, + %DeregisterApplicationInstance{ + instance_number: ^application_instance_number_1 + }, + %DeregisterApplicationInstance{ + instance_number: ^application_instance_number_2 + }, + %RegisterDatabaseInstance{ + instance_number: "00", + sap_system_id: "97c4127a-29bc-5315-82bd-8f154bee626f", + sid: "PRD" + } + ]} = + "sap_system_discovery_database" + |> load_discovery_event_fixture() + |> SapSystemPolicy.handle(database_instances ++ application_instances) + end + + test "should not deregister any instance if the discovered instances did not change" do + application_instance = + build(:application_instance_without_host, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", + instance_number: "02", + sid: "HA1" + ) + + assert {:ok, + [ + %RegisterApplicationInstance{ + db_host: "10.74.1.12", + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", + instance_number: "02", + sap_system_id: nil, + sid: "HA1", + tenant: "PRD", + health: :passing + } + ]} = + "sap_system_discovery_application" + |> load_discovery_event_fixture() + |> SapSystemPolicy.handle([application_instance]) + end + + test "should deregister all instances if the discovered instances is an empty list" do + application_instance = + build(:application_instance_without_host, + instance_number: "02" + ) + + database_instance = + build(:database_instance_without_host, + instance_number: "10" + ) + + assert {:ok, + [ + %DeregisterApplicationInstance{ + instance_number: "02" + }, + %DeregisterDatabaseInstance{ + instance_number: "10" + } + ]} = + "sap_system_discovery_empty" + |> load_discovery_event_fixture() + |> SapSystemPolicy.handle([application_instance, database_instance]) + end + end +end diff --git a/test/trento/application/integration/discovery/policies/sap_system_test.exs b/test/trento/application/integration/discovery/policies/sap_system_test.exs deleted file mode 100644 index dd33803b09..0000000000 --- a/test/trento/application/integration/discovery/policies/sap_system_test.exs +++ /dev/null @@ -1,91 +0,0 @@ -defmodule Trento.Integration.Discovery.SapSystemPolicyTest do - use ExUnit.Case - use Trento.DataCase - - import Trento.Integration.DiscoveryFixturesHelper - - alias Trento.Integration.Discovery.SapSystemPolicy - - alias Trento.Domain.Commands.{ - RegisterApplicationInstance, - RegisterDatabaseInstance - } - - test "should return the expected commands when a sap_system payload of type database is handled" do - assert {:ok, - [ - %RegisterDatabaseInstance{ - features: "HDB|HDB_WORKER", - host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", - instance_number: "00", - sap_system_id: "97c4127a-29bc-5315-82bd-8f154bee626f", - sid: "PRD", - tenant: "PRD", - system_replication: "Primary", - system_replication_status: "ERROR", - health: :passing - } - ]} = - "sap_system_discovery_database" - |> load_discovery_event_fixture() - |> SapSystemPolicy.handle() - end - - test "should return the expected commands when a sap_system payload of type database is handled in the event of a stopped instance" do - assert {:ok, - [ - %RegisterDatabaseInstance{ - features: "HDB|HDB_WORKER", - host_id: "9cd46919-5f19-59aa-993e-cf3736c71053", - instance_number: "10", - sap_system_id: "6c9208eb-a5bb-57ef-be5c-6422dedab602", - sid: "HDP", - tenant: "HDP", - system_replication: nil, - system_replication_status: nil, - health: :unknown - } - ]} = - "sap_system_discovery_database_stopped_instance" - |> load_discovery_event_fixture() - |> SapSystemPolicy.handle() - end - - test "should return the expected commands when a sap_system payload of type application is handled" do - assert {:ok, - [ - %RegisterApplicationInstance{ - db_host: "10.74.1.12", - features: "ABAP|GATEWAY|ICMAN|IGS", - host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", - instance_number: "02", - sap_system_id: nil, - sid: "HA1", - tenant: "PRD", - health: :passing - } - ]} = - "sap_system_discovery_application" - |> load_discovery_event_fixture() - |> SapSystemPolicy.handle() - end - - test "should return the expected commands when a sap_system payload of type application and diagnostics is handled" do - assert {:ok, - [ - %RegisterApplicationInstance{ - db_host: "10.74.1.12", - features: "ABAP|GATEWAY|ICMAN|IGS", - host_id: "779cdd70-e9e2-58ca-b18a-bf3eb3f71244", - instance_number: "02", - sap_system_id: nil, - sid: "HA1", - tenant: "PRD", - health: :passing - } - ]} = - "sap_system_discovery_application_diagnostics" - |> load_discovery_event_fixture() - |> SapSystemPolicy.handle() - end -end diff --git a/test/trento/application/integration/discovery/protocol/enrich_register_application_instance_test.exs b/test/trento/application/integration/discovery/protocol/enrich_register_application_instance_test.exs index 1baeeacedd..12714b979a 100644 --- a/test/trento/application/integration/discovery/protocol/enrich_register_application_instance_test.exs +++ b/test/trento/application/integration/discovery/protocol/enrich_register_application_instance_test.exs @@ -31,6 +31,34 @@ defmodule Trento.EnrichRegisterApplicationInstanceTest do Enrichable.enrich(command, %{}) end + test "should return a database not found error if the database instance host has been deregistered" do + deregistered_host = insert(:host, deregistered_at: DateTime.utc_now()) + + %{ + tenant: tenant, + host: %{ip_addresses: [ip]} + } = + insert(:database_instance_without_host, + host_id: deregistered_host.id, + host: deregistered_host + ) + + command = + build( + :register_application_instance_command, + sap_system_id: nil, + sid: Faker.StarWars.planet(), + db_host: ip, + tenant: tenant, + instance_number: "00", + features: Faker.Pokemon.name(), + host_id: Faker.UUID.v4(), + health: :passing + ) + + assert {:error, :database_not_registered} = Enrichable.enrich(command, %{}) + end + test "should return an error if the database was not found" do %{ tenant: tenant @@ -49,6 +77,6 @@ defmodule Trento.EnrichRegisterApplicationInstanceTest do health: :passing ) - assert {:error, :database_not_found} = Enrichable.enrich(command, %{}) + assert {:error, :database_not_registered} = Enrichable.enrich(command, %{}) end end diff --git a/test/trento/application/integration/discovery/protocol/enrich_request_host_deregistration_test.exs b/test/trento/application/integration/discovery/protocol/enrich_request_host_deregistration_test.exs new file mode 100644 index 0000000000..9becd4181d --- /dev/null +++ b/test/trento/application/integration/discovery/protocol/enrich_request_host_deregistration_test.exs @@ -0,0 +1,101 @@ +defmodule Trento.EnrichRequestHostDeregistrationTest do + use ExUnit.Case + use Trento.DataCase + + import Trento.Factory + + alias Trento.Domain.Commands.RequestHostDeregistration + alias Trento.Support.Middleware.Enrichable + + @heartbeat_interval Application.compile_env!(:trento, Trento.Heartbeats)[:interval] + @deregistration_debounce Application.compile_env!( + :trento, + :deregistration_debounce + ) + @total_deregistration_debounce @heartbeat_interval + @deregistration_debounce + + describe "enrich RequestHostDeregistration" do + test "should deregister host if deregistration request is outside debounce period" do + now = DateTime.utc_now() + + %{id: id} = insert(:host) + + insert(:heartbeat, + agent_id: id, + timestamp: + DateTime.add( + DateTime.utc_now(), + -(@total_deregistration_debounce + 10_000), + :millisecond + ) + ) + + command = RequestHostDeregistration.new!(%{host_id: id, requested_at: now}) + + assert {:ok, %RequestHostDeregistration{host_id: id, requested_at: now}} == + Enrichable.enrich(command, %{}) + end + + test "should deregister host if host does not have a heartbeat entry" do + now = DateTime.utc_now() + + %{id: id} = insert(:host) + + command = RequestHostDeregistration.new!(%{host_id: id, requested_at: now}) + + assert {:ok, %RequestHostDeregistration{host_id: id, requested_at: now}} == + Enrichable.enrich(command, %{}) + end + + test "should return an error if deregistration request is within debounce period" do + %{id: id} = insert(:host) + + insert(:heartbeat, + agent_id: id, + timestamp: + DateTime.add( + DateTime.utc_now(), + -(@total_deregistration_debounce - 2_000), + :millisecond + ) + ) + + command = RequestHostDeregistration.new!(%{host_id: id, requested_at: DateTime.utc_now()}) + + assert {:error, :host_alive} == Enrichable.enrich(command, %{}) + end + + test "should return an error when deregistering a host that is already deregistered" do + %{id: id} = insert(:host, deregistered_at: DateTime.utc_now()) + + insert(:heartbeat, + agent_id: id, + timestamp: + DateTime.add( + DateTime.utc_now(), + -(@total_deregistration_debounce + 10_000), + :millisecond + ) + ) + + command = RequestHostDeregistration.new!(%{host_id: id, requested_at: DateTime.utc_now()}) + + assert {:error, :host_not_registered} == Enrichable.enrich(command, %{}) + end + + test "should return an error when deregistering a host that is already deregistered and does not have a heartbeat entry" do + %{id: id} = insert(:host, deregistered_at: DateTime.utc_now()) + + command = RequestHostDeregistration.new!(%{host_id: id, requested_at: DateTime.utc_now()}) + + assert {:error, :host_not_registered} == Enrichable.enrich(command, %{}) + end + + test "should return an error if host does not exist" do + command = + RequestHostDeregistration.new!(%{host_id: UUID.uuid4(), requested_at: DateTime.utc_now()}) + + assert {:error, :host_not_registered} == Enrichable.enrich(command, %{}) + end + end +end diff --git a/test/trento/application/process_managers/deregistration_process_manager_test.exs b/test/trento/application/process_managers/deregistration_process_manager_test.exs new file mode 100644 index 0000000000..5d7047c910 --- /dev/null +++ b/test/trento/application/process_managers/deregistration_process_manager_test.exs @@ -0,0 +1,542 @@ +defmodule Trento.DeregistrationProcessManagerTest do + use ExUnit.Case + + import Trento.Factory + + alias Trento.Domain.Events.{ + ApplicationInstanceDeregistered, + ApplicationInstanceRegistered, + ClusterRolledUp, + DatabaseInstanceDeregistered, + DatabaseInstanceRegistered, + HostAddedToCluster, + HostDeregistered, + HostDeregistrationRequested, + HostRegistered, + HostRemovedFromCluster, + HostRolledUp, + SapSystemRolledUp + } + + alias Trento.DeregistrationProcessManager + + alias Trento.DeregistrationProcessManager.Instance + + alias Trento.Domain.{ + Cluster, + SapSystem + } + + alias Trento.Domain.Commands.{ + DeregisterApplicationInstance, + DeregisterClusterHost, + DeregisterDatabaseInstance, + DeregisterHost + } + + describe "events interested" do + test "should start the process manager when HostRegistered event arrives" do + host_id = UUID.uuid4() + + assert {:start, ^host_id} = + DeregistrationProcessManager.interested?(%HostRegistered{host_id: host_id}) + end + + test "should start the process manager when HostRolledUp arrives" do + host_id = UUID.uuid4() + + assert {:start, ^host_id} = + DeregistrationProcessManager.interested?(%HostRolledUp{host_id: host_id}) + end + + test "should start the process manager when HostAddedToCluster arrives" do + host_id = UUID.uuid4() + + assert {:start, ^host_id} = + DeregistrationProcessManager.interested?(%HostAddedToCluster{host_id: host_id}) + end + + test "should start the process manager when ClusterRolledUp arrives" do + cluster_hosts = [UUID.uuid4(), UUID.uuid4()] + + assert {:start, ^cluster_hosts} = + DeregistrationProcessManager.interested?(%ClusterRolledUp{ + snapshot: %Cluster{hosts: cluster_hosts} + }) + end + + test "should start the process manager when DatabaseInstanceRegistered event arrives" do + host_id = UUID.uuid4() + + assert {:start, ^host_id} = + DeregistrationProcessManager.interested?(%DatabaseInstanceRegistered{ + host_id: host_id + }) + end + + test "should start the process manager when ApplicationInstanceRegistered event arrives" do + host_id = UUID.uuid4() + + assert {:start, ^host_id} = + DeregistrationProcessManager.interested?(%ApplicationInstanceRegistered{ + host_id: host_id + }) + end + + test "should start process managers when SapSystemRolledUp arrives" do + [%{host_id: db_host_id_1}, %{host_id: db_host_id_2}] = + database_instances = build_list(2, :sap_system_instance) + + [%{host_id: app_host_id_1}, %{host_id: app_host_id_2}] = + application_instances = build_list(2, :sap_system_instance) + + assert {:start, [^db_host_id_1, ^db_host_id_2, ^app_host_id_1, ^app_host_id_2]} = + DeregistrationProcessManager.interested?(%SapSystemRolledUp{ + snapshot: %SapSystem{ + database: %SapSystem.Database{ + instances: database_instances + }, + application: %SapSystem.Application{ + instances: application_instances + } + } + }) + end + + test "should continue the process manager when HostDeregistrationRequested arrives" do + host_id = UUID.uuid4() + + assert {:continue, ^host_id} = + DeregistrationProcessManager.interested?(%HostDeregistrationRequested{ + host_id: host_id + }) + end + + test "should continue the process manager when HostRemovedFromCluster arrives" do + host_id = UUID.uuid4() + + assert {:continue, ^host_id} = + DeregistrationProcessManager.interested?(%HostRemovedFromCluster{host_id: host_id}) + end + + test "should continue the process manager when DatabaseInstanceDeregistered arrives" do + host_id = UUID.uuid4() + + assert {:continue, ^host_id} = + DeregistrationProcessManager.interested?(%DatabaseInstanceDeregistered{ + host_id: host_id + }) + end + + test "should continue the process manager when ApplicationInstanceDeregistered arrives" do + host_id = UUID.uuid4() + + assert {:continue, ^host_id} = + DeregistrationProcessManager.interested?(%ApplicationInstanceDeregistered{ + host_id: host_id + }) + end + + test "should stop the process manager when HostDeregistered arrives" do + host_id = UUID.uuid4() + + assert {:stop, ^host_id} = + DeregistrationProcessManager.interested?(%HostDeregistered{host_id: host_id}) + end + end + + describe "host registration procedure" do + test "should update the state with the proper cluster id when HostAddedToCluster event is emitted" do + initial_state = %DeregistrationProcessManager{} + cluster_id = UUID.uuid4() + host_id = UUID.uuid4() + + events = [%HostAddedToCluster{cluster_id: cluster_id, host_id: host_id}] + + {commands, state} = reduce_events(events, initial_state) + + assert [] == commands + assert %DeregistrationProcessManager{cluster_id: ^cluster_id} = state + end + + test "should add database instance with the proper host id when DatabaseInstanceRegistered event is emitted" do + initial_state = %DeregistrationProcessManager{ + database_instances: [], + application_instances: [] + } + + host_id = UUID.uuid4() + sap_system_id = UUID.uuid4() + instance_number = "01" + + events = [ + %DatabaseInstanceRegistered{ + host_id: host_id, + sap_system_id: sap_system_id, + instance_number: instance_number + } + ] + + {commands, state} = reduce_events(events, initial_state) + + assert [] == commands + + assert %DeregistrationProcessManager{ + database_instances: [ + %Instance{ + sap_system_id: ^sap_system_id, + instance_number: ^instance_number + } + ], + application_instances: [] + } = state + end + + test "should add application instance when ApplicationInstanceRegistered event is emitted" do + sap_system_id_1 = UUID.uuid4() + instance_number_1 = "01" + + initial_state = %DeregistrationProcessManager{ + database_instances: [], + application_instances: [ + %Instance{ + sap_system_id: sap_system_id_1, + instance_number: instance_number_1 + } + ] + } + + host_id = UUID.uuid4() + sap_system_id_2 = UUID.uuid4() + instance_number_2 = "01" + + events = [ + %ApplicationInstanceRegistered{ + host_id: host_id, + sap_system_id: sap_system_id_2, + instance_number: instance_number_2 + } + ] + + {commands, state} = reduce_events(events, initial_state) + + assert [] == commands + + assert %DeregistrationProcessManager{ + database_instances: [], + application_instances: [ + %Instance{ + sap_system_id: ^sap_system_id_2, + instance_number: ^instance_number_2 + }, + %Instance{ + sap_system_id: ^sap_system_id_1, + instance_number: ^instance_number_1 + } + ] + } = state + end + + test "should update the state with the proper cluster id when ClusterRolledUp event is emitted" do + initial_state = %DeregistrationProcessManager{} + cluster_id = UUID.uuid4() + cluster_hosts = [UUID.uuid4(), UUID.uuid4()] + + events = [ + %ClusterRolledUp{cluster_id: cluster_id, snapshot: %Cluster{hosts: cluster_hosts}} + ] + + {commands, state} = reduce_events(events, initial_state) + + assert [] == commands + assert %DeregistrationProcessManager{cluster_id: ^cluster_id} = state + end + + test "should update state when SapSystemRolledUp event received" do + sap_system_id = UUID.uuid4() + instance_number = "00" + database_instance_number = "01" + application_instance_number = "02" + + initial_state = %DeregistrationProcessManager{ + database_instances: [ + %Instance{sap_system_id: sap_system_id, instance_number: instance_number} + ], + application_instances: [] + } + + events = [ + %SapSystemRolledUp{ + sap_system_id: sap_system_id, + snapshot: %SapSystem{ + database: %SapSystem.Database{ + instances: [ + %SapSystem.Instance{ + instance_number: database_instance_number + }, + %SapSystem.Instance{ + instance_number: instance_number + } + ] + }, + application: %SapSystem.Application{ + instances: [ + %SapSystem.Instance{ + instance_number: application_instance_number + } + ] + } + } + } + ] + + {commands, state} = reduce_events(events, initial_state) + + assert [] == commands + + assert %DeregistrationProcessManager{ + database_instances: [ + %Instance{ + sap_system_id: ^sap_system_id, + instance_number: ^database_instance_number + }, + %Instance{ + sap_system_id: ^sap_system_id, + instance_number: ^instance_number + } + ], + application_instances: [ + %Instance{ + sap_system_id: ^sap_system_id, + instance_number: ^application_instance_number + } + ] + } = state + end + end + + describe "host deregistration procedure" do + test "should dispatch DeregisterHost command when HostDeregistrationRequested is emitted" do + host_id = UUID.uuid4() + requested_at = DateTime.utc_now() + initial_state = %DeregistrationProcessManager{} + + events = [%HostDeregistrationRequested{host_id: host_id, requested_at: requested_at}] + + {commands, state} = reduce_events(events, initial_state) + + assert ^initial_state = state + assert %DeregisterHost{host_id: ^host_id, deregistered_at: ^requested_at} = commands + end + + test "should dispatch commands when HostDeregistrationRequested is emitted and the host does not belong to a cluster and has instances associated" do + host_id = UUID.uuid4() + sap_system_id = UUID.uuid4() + db_instance_number = "00" + app_instance_number = "01" + requested_at = DateTime.utc_now() + + initial_state = %DeregistrationProcessManager{ + cluster_id: nil, + database_instances: [ + %Instance{sap_system_id: sap_system_id, instance_number: db_instance_number} + ], + application_instances: [ + %Instance{sap_system_id: sap_system_id, instance_number: app_instance_number} + ] + } + + events = [%HostDeregistrationRequested{host_id: host_id, requested_at: requested_at}] + + {commands, state} = reduce_events(events, initial_state) + + assert ^initial_state = state + + assert [ + %DeregisterDatabaseInstance{ + sap_system_id: ^sap_system_id, + instance_number: ^db_instance_number, + host_id: ^host_id, + deregistered_at: ^requested_at + }, + %DeregisterApplicationInstance{ + sap_system_id: ^sap_system_id, + instance_number: ^app_instance_number, + host_id: ^host_id, + deregistered_at: ^requested_at + }, + %DeregisterHost{host_id: ^host_id, deregistered_at: ^requested_at} + ] = commands + end + + test "should dispatch commands when HostDeregistrationRequested is emitted and the host belongs to a cluster and has no instances associated" do + host_id = UUID.uuid4() + cluster_id = UUID.uuid4() + requested_at = DateTime.utc_now() + + initial_state = %DeregistrationProcessManager{ + cluster_id: cluster_id, + database_instances: [], + application_instances: [] + } + + events = [%HostDeregistrationRequested{host_id: host_id, requested_at: requested_at}] + + {commands, state} = reduce_events(events, initial_state) + + assert ^initial_state = state + + assert [ + %DeregisterClusterHost{ + host_id: ^host_id, + cluster_id: ^cluster_id, + deregistered_at: ^requested_at + }, + %DeregisterHost{host_id: ^host_id, deregistered_at: ^requested_at} + ] = commands + end + + test "should dispatch commands when HostDeregistrationRequested is emitted and the host belongs to a cluster and has instances associated" do + host_id = UUID.uuid4() + cluster_id = UUID.uuid4() + sap_system_id = UUID.uuid4() + db_instance_number = "00" + app_instance_number = "01" + requested_at = DateTime.utc_now() + + initial_state = %DeregistrationProcessManager{ + cluster_id: cluster_id, + database_instances: [ + %Instance{sap_system_id: sap_system_id, instance_number: db_instance_number} + ], + application_instances: [ + %Instance{sap_system_id: sap_system_id, instance_number: app_instance_number} + ] + } + + events = [%HostDeregistrationRequested{host_id: host_id, requested_at: requested_at}] + + {commands, state} = reduce_events(events, initial_state) + + assert ^initial_state = state + + assert [ + %DeregisterDatabaseInstance{ + sap_system_id: ^sap_system_id, + instance_number: ^db_instance_number, + host_id: ^host_id, + deregistered_at: ^requested_at + }, + %DeregisterApplicationInstance{ + sap_system_id: ^sap_system_id, + instance_number: ^app_instance_number, + host_id: ^host_id, + deregistered_at: ^requested_at + }, + %DeregisterClusterHost{ + host_id: ^host_id, + cluster_id: ^cluster_id, + deregistered_at: ^requested_at + }, + %DeregisterHost{host_id: ^host_id, deregistered_at: ^requested_at} + ] = commands + end + + test "should update the state and remove the cluster id when HostRemovedFromCluster event is emitted" do + initial_state = %DeregistrationProcessManager{} + cluster_id = UUID.uuid4() + host_id = UUID.uuid4() + + events = [ + %HostAddedToCluster{cluster_id: cluster_id, host_id: host_id}, + %HostRemovedFromCluster{host_id: host_id} + ] + + {commands, state} = reduce_events(events, initial_state) + + assert [] == commands + assert %DeregistrationProcessManager{cluster_id: nil} = state + end + + test "should remove instance from state when DatabaseInstanceDeregistered event received" do + sap_system_id = UUID.uuid4() + instance_number = "00" + + initial_state = %DeregistrationProcessManager{ + database_instances: [ + %Instance{ + sap_system_id: sap_system_id, + instance_number: instance_number + } + ], + application_instances: [] + } + + host_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + events = [ + %DatabaseInstanceDeregistered{ + instance_number: instance_number, + host_id: host_id, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + ] + + {commands, state} = reduce_events(events, initial_state) + + assert [] == commands + + assert %DeregistrationProcessManager{ + database_instances: [], + application_instances: [] + } = state + end + + test "should remove instance from state when ApplicationInstanceDeregistered event received" do + sap_system_id = UUID.uuid4() + instance_number = "00" + + initial_state = %DeregistrationProcessManager{ + database_instances: [], + application_instances: [ + %Instance{ + sap_system_id: sap_system_id, + instance_number: instance_number + } + ] + } + + host_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + events = [ + %ApplicationInstanceDeregistered{ + instance_number: instance_number, + host_id: host_id, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + ] + + {commands, state} = reduce_events(events, initial_state) + + assert [] == commands + + assert %DeregistrationProcessManager{ + database_instances: [], + application_instances: [] + } = state + end + end + + defp reduce_events(events, initial_state) do + Enum.reduce(events, {[], initial_state}, fn event, {commands, state} -> + new_commands = DeregistrationProcessManager.handle(state, event) + new_state = DeregistrationProcessManager.apply(state, event) + + {commands ++ new_commands, new_state} + end) + end +end diff --git a/test/trento/application/projectors/cluster_projector_test.exs b/test/trento/application/projectors/cluster_projector_test.exs index 0f6144d807..c3eb4ca575 100644 --- a/test/trento/application/projectors/cluster_projector_test.exs +++ b/test/trento/application/projectors/cluster_projector_test.exs @@ -8,8 +8,10 @@ defmodule Trento.ClusterProjectorTest do import Trento.Factory alias Trento.Domain.Events.{ + ClusterDeregistered, ClusterDetailsUpdated, - ClusterHealthChanged + ClusterHealthChanged, + ClusterRestored } alias Trento.{ @@ -44,6 +46,7 @@ defmodule Trento.ClusterProjectorTest do assert event.cluster_id == cluster_projection.id assert event.name == cluster_projection.name assert event.sid == cluster_projection.sid + assert event.additional_sids == cluster_projection.additional_sids assert event.provider == cluster_projection.provider assert event.type == cluster_projection.type assert event.resources_number == cluster_projection.resources_number @@ -59,7 +62,7 @@ defmodule Trento.ClusterProjectorTest do details: %Trento.Domain.HanaClusterDetails{ fencing_type: "external/sbd", nodes: [ - %Trento.Domain.ClusterNode{ + %Trento.Domain.HanaClusterNode{ attributes: _, hana_status: "Secondary", name: _, @@ -131,6 +134,7 @@ defmodule Trento.ClusterProjectorTest do assert event.cluster_id == cluster_projection.id assert event.name == cluster_projection.name assert event.sid == cluster_projection.sid + assert event.additional_sids == cluster_projection.additional_sids assert event.provider == cluster_projection.provider assert event.type == cluster_projection.type assert event.resources_number == cluster_projection.resources_number @@ -142,7 +146,7 @@ defmodule Trento.ClusterProjectorTest do details: %Trento.Domain.HanaClusterDetails{ fencing_type: "external/sbd", nodes: [ - %Trento.Domain.ClusterNode{ + %Trento.Domain.HanaClusterNode{ attributes: _, hana_status: "Secondary", name: _, @@ -187,6 +191,48 @@ defmodule Trento.ClusterProjectorTest do 1000 end + test "should update the deregistered_at field when ClusterDeregistered is received" do + insert(:cluster, id: cluster_id = Faker.UUID.v4(), name: name = "deregistered_cluster") + deregistered_at = DateTime.utc_now() + + event = ClusterDeregistered.new!(%{cluster_id: cluster_id, deregistered_at: deregistered_at}) + + ProjectorTestHelper.project(ClusterProjector, event, "cluster_projector") + cluster_projection = Repo.get!(ClusterReadModel, event.cluster_id) + + assert event.deregistered_at == cluster_projection.deregistered_at + + assert_broadcast "cluster_deregistered", + %{id: ^cluster_id, name: ^name}, + 1000 + end + + test "should set deregistered_at field to nil when ClusterRestored is received" do + %{id: cluster_id, name: name, type: type} = + insert(:cluster, + id: Faker.UUID.v4(), + name: "deregistered_cluster", + selected_checks: [], + deregistered_at: DateTime.utc_now() + ) + + event = ClusterRestored.new!(%{cluster_id: cluster_id}) + + ProjectorTestHelper.project(ClusterProjector, event, "cluster_projector") + cluster_projection = Repo.get!(ClusterReadModel, event.cluster_id) + + assert nil == cluster_projection.deregistered_at + + assert_broadcast "cluster_registered", + %{ + cib_last_written: nil, + id: ^cluster_id, + name: ^name, + type: ^type + }, + 1000 + end + test "should broadcast cluster_health_changed after the ClusterHealthChanged event" do insert(:cluster, id: cluster_id = Faker.UUID.v4()) diff --git a/test/trento/application/projectors/database_projector_test.exs b/test/trento/application/projectors/database_projector_test.exs index c02079d31f..9f76bb4daa 100644 --- a/test/trento/application/projectors/database_projector_test.exs +++ b/test/trento/application/projectors/database_projector_test.exs @@ -14,9 +14,12 @@ defmodule Trento.DatabaseProjectorTest do } alias Trento.Domain.Events.{ + DatabaseDeregistered, DatabaseHealthChanged, + DatabaseInstanceDeregistered, DatabaseInstanceHealthChanged, - DatabaseInstanceSystemReplicationChanged + DatabaseInstanceSystemReplicationChanged, + DatabaseRestored } alias Trento.ProjectorTestHelper @@ -242,4 +245,93 @@ defmodule Trento.DatabaseProjectorTest do }, 1000 end + + test "should update the database read model after a deregistration" do + deregistered_at = DateTime.utc_now() + + insert(:database, id: sap_system_id = Faker.UUID.v4()) + + event = %DatabaseDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + + ProjectorTestHelper.project(DatabaseProjector, event, "database_projector") + + projection = %{sid: sid} = Repo.get(DatabaseReadModel, sap_system_id) + + assert_broadcast "database_deregistered", + %{id: ^sap_system_id, sid: ^sid}, + 1000 + + assert deregistered_at == projection.deregistered_at + end + + test "should remove a database instance from the read model after a deregistration" do + deregistered_at = DateTime.utc_now() + + %{sid: sid} = insert(:database, id: sap_system_id = Faker.UUID.v4()) + insert_list(4, :database_instance) + + %{instance_number: instance_number, host_id: host_id} = + insert(:database_instance, sap_system_id: sap_system_id, sid: sid) + + event = %DatabaseInstanceDeregistered{ + instance_number: instance_number, + host_id: host_id, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + + ProjectorTestHelper.project(DatabaseProjector, event, "database_projector") + + assert nil == + Repo.get_by(DatabaseInstanceReadModel, + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id + ) + + assert 4 == + DatabaseInstanceReadModel + |> Repo.all() + |> Enum.count() + + assert_broadcast "database_instance_deregistered", + %{ + sap_system_id: ^sap_system_id, + instance_number: ^instance_number, + host_id: ^host_id, + sid: ^sid + }, + 1000 + end + + test "should restore a deregistered database when DatabaseRestored is received" do + insert(:database, + id: sap_system_id = Faker.UUID.v4(), + sid: "NWD", + deregistered_at: DateTime.utc_now(), + health: :critical + ) + + event = %DatabaseRestored{ + sap_system_id: sap_system_id, + health: :passing + } + + ProjectorTestHelper.project(DatabaseProjector, event, "database_projector") + + projection = Repo.get(DatabaseReadModel, sap_system_id) + assert nil == projection.deregistered_at + assert :passing == projection.health + + assert_broadcast "database_registered", + %{ + health: :passing, + id: ^sap_system_id, + sid: "NWD" + }, + 1000 + end end diff --git a/test/trento/application/projectors/host_projector_test.exs b/test/trento/application/projectors/host_projector_test.exs index 3e9f92bec1..bd37eaeb0d 100644 --- a/test/trento/application/projectors/host_projector_test.exs +++ b/test/trento/application/projectors/host_projector_test.exs @@ -25,7 +25,10 @@ defmodule Trento.HostProjectorTest do HeartbeatSucceded, HostAddedToCluster, HostChecksSelected, + HostDeregistered, HostDetailsUpdated, + HostRemovedFromCluster, + HostRestored, ProviderUpdated } @@ -46,9 +49,9 @@ defmodule Trento.HostProjectorTest do end setup do - %HostReadModel{id: host_id} = insert(:host) + %HostReadModel{id: host_id, hostname: hostname} = insert(:host) - %{host_id: host_id} + %{host_id: host_id, hostname: hostname} end test "should project a new host when HostRegistered event is received" do @@ -129,6 +132,48 @@ defmodule Trento.HostProjectorTest do refute_broadcast "host_details_updated", %{id: ^host_id, cluster_id: ^cluster_id}, 1000 end + test "should set the cluster_id to nil if a HostRemovedFromCluster event is received and the host is still part of the cluster" do + insert(:cluster, id: cluster_id = Faker.UUID.v4()) + + insert( + :host, + id: host_id = UUID.uuid4(), + hostname: Faker.StarWars.character(), + cluster_id: cluster_id + ) + + event = %HostRemovedFromCluster{ + host_id: host_id, + cluster_id: cluster_id + } + + ProjectorTestHelper.project(HostProjector, event, "host_projector") + projection = Repo.get!(HostReadModel, host_id) + + assert nil == projection.cluster_id + end + + test "should not set the cluster_id to nil if a HostRemovedFromCluster event is received and the host is not part of the cluster anymore" do + insert(:cluster, id: cluster_id = Faker.UUID.v4()) + + insert( + :host, + id: host_id = UUID.uuid4(), + hostname: Faker.StarWars.character(), + cluster_id: cluster_id + ) + + event = %HostRemovedFromCluster{ + host_id: host_id, + cluster_id: UUID.uuid4() + } + + ProjectorTestHelper.project(HostProjector, event, "host_projector") + projection = Repo.get!(HostReadModel, host_id) + + assert cluster_id == projection.cluster_id + end + test "should update an existing host when HostDetailsUpdated event is received", %{ host_id: host_id } do @@ -367,4 +412,67 @@ defmodule Trento.HostProjectorTest do }, 1000 end + + test "should set deregistered_at to nil when HostRestored is received" do + host_id = UUID.uuid4() + insert(:host, id: host_id, deregistered_at: DateTime.utc_now()) + + event = %HostRestored{ + host_id: host_id + } + + ProjectorTestHelper.project(HostProjector, event, "host_projector") + + %{ + agent_version: agent_version, + heartbeat: heartbeat, + hostname: hostname, + id: id, + ip_addresses: ip_addresses, + cluster_id: cluster_id, + provider: provider, + provider_data: provider_data, + deregistered_at: deregistered_at + } = Repo.get!(HostReadModel, host_id) + + assert nil == deregistered_at + + assert_broadcast "host_registered", + %{ + agent_version: ^agent_version, + cluster_id: ^cluster_id, + heartbeat: ^heartbeat, + hostname: ^hostname, + id: ^id, + ip_addresses: ^ip_addresses, + provider: ^provider, + provider_data: ^provider_data + }, + 1000 + end + + test "should update the deregistered_at field when HostDeregistered is received", + %{ + host_id: host_id, + hostname: hostname + } do + timestamp = DateTime.utc_now() + + event = %HostDeregistered{ + host_id: host_id, + deregistered_at: timestamp + } + + ProjectorTestHelper.project(HostProjector, event, "host_projector") + host_projection = Repo.get!(HostReadModel, host_id) + + assert timestamp == host_projection.deregistered_at + + assert_broadcast "host_deregistered", + %{ + id: ^host_id, + hostname: ^hostname + }, + 1000 + end end diff --git a/test/trento/application/projectors/sap_system_projector_test.exs b/test/trento/application/projectors/sap_system_projector_test.exs index 3d670877e1..0cc3f066a5 100644 --- a/test/trento/application/projectors/sap_system_projector_test.exs +++ b/test/trento/application/projectors/sap_system_projector_test.exs @@ -7,6 +7,8 @@ defmodule Trento.SapSystemProjectorTest do import Trento.Factory + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion + alias Trento.{ ApplicationInstanceReadModel, SapSystemProjector, @@ -14,8 +16,12 @@ defmodule Trento.SapSystemProjectorTest do } alias Trento.Domain.Events.{ + ApplicationInstanceDeregistered, ApplicationInstanceHealthChanged, - SapSystemHealthChanged + SapSystemDeregistered, + SapSystemHealthChanged, + SapSystemRestored, + SapSystemUpdated } alias Trento.ProjectorTestHelper @@ -39,13 +45,20 @@ defmodule Trento.SapSystemProjectorTest do ProjectorTestHelper.project(SapSystemProjector, event, "sap_system_projector") - %{db_host: db_host, tenant: tenant, id: id, sid: sid, health: health} = - projection = Repo.get!(SapSystemReadModel, event.sap_system_id) + %{ + db_host: db_host, + tenant: tenant, + id: id, + sid: sid, + health: health, + ensa_version: ensa_version + } = projection = Repo.get!(SapSystemReadModel, event.sap_system_id) assert event.sid == projection.sid assert event.tenant == projection.tenant assert event.db_host == projection.db_host assert event.health == projection.health + assert event.ensa_version == projection.ensa_version assert_broadcast "sap_system_registered", %{ @@ -53,7 +66,8 @@ defmodule Trento.SapSystemProjectorTest do health: ^health, id: ^id, sid: ^sid, - tenant: ^tenant + tenant: ^tenant, + ensa_version: ^ensa_version }, 1000 end @@ -153,4 +167,122 @@ defmodule Trento.SapSystemProjectorTest do }, 1000 end + + test "should update read model after deregistration" do + deregistered_at = DateTime.utc_now() + + %{sid: sid} = insert(:sap_system, id: sap_system_id = Faker.UUID.v4()) + + event = %SapSystemDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + + ProjectorTestHelper.project(SapSystemProjector, event, "sap_system_projector") + + projection = Repo.get(SapSystemReadModel, sap_system_id) + + assert_broadcast "sap_system_deregistered", + %{id: ^sap_system_id, sid: ^sid}, + 1000 + + assert deregistered_at == projection.deregistered_at + end + + test "should restore a SAP system when SapSystemRestored is received" do + %{tenant: tenant, id: sap_system_id, sid: sid} = + insert(:sap_system, deregistered_at: DateTime.utc_now()) + + new_db_host = Faker.Internet.ip_v4_address() + new_health = :passing + + event = %SapSystemRestored{ + sap_system_id: sap_system_id, + tenant: tenant, + db_host: new_db_host, + health: new_health + } + + ProjectorTestHelper.project(SapSystemProjector, event, "sap_system_projector") + + projection = Repo.get(SapSystemReadModel, sap_system_id) + + assert_broadcast "sap_system_registered", + %{ + db_host: ^new_db_host, + health: ^new_health, + id: ^sap_system_id, + sid: ^sid, + tenant: ^tenant + }, + 1000 + + assert nil == projection.deregistered_at + end + + test "should remove an application instance from the read model after a deregistration" do + deregistered_at = DateTime.utc_now() + + %{sid: sid} = insert(:sap_system, id: sap_system_id = Faker.UUID.v4()) + + %{instance_number: instance_number, host_id: host_id} = + insert(:application_instance, sap_system_id: sap_system_id, sid: sid) + + insert_list(4, :application_instance) + + event = %ApplicationInstanceDeregistered{ + instance_number: instance_number, + host_id: host_id, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + + ProjectorTestHelper.project(SapSystemProjector, event, "sap_system_projector") + + assert nil == + Repo.get_by(ApplicationInstanceReadModel, + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id + ) + + assert 4 == + ApplicationInstanceReadModel + |> Repo.all() + |> Enum.count() + + assert_broadcast "application_instance_deregistered", + %{ + sap_system_id: ^sap_system_id, + instance_number: ^instance_number, + host_id: ^host_id, + sid: ^sid + }, + 1000 + end + + test "should update an already existing SAP System when a SapSystemUpdated event is received" do + insert(:sap_system, id: sap_system_id = Faker.UUID.v4(), ensa_version: EnsaVersion.no_ensa()) + + event = %SapSystemUpdated{ + sap_system_id: sap_system_id, + ensa_version: EnsaVersion.ensa1() + } + + ProjectorTestHelper.project(SapSystemProjector, event, "sap_system_projector") + + %{ + id: id, + ensa_version: ensa_version + } = Repo.get!(SapSystemReadModel, event.sap_system_id) + + assert event.ensa_version == ensa_version + + assert_broadcast "sap_system_updated", + %{ + id: ^id, + ensa_version: ^ensa_version + }, + 1000 + end end diff --git a/test/trento/application/usecases/clusters_test.exs b/test/trento/application/usecases/clusters_test.exs index d975761f4d..c2e2e7e687 100644 --- a/test/trento/application/usecases/clusters_test.exs +++ b/test/trento/application/usecases/clusters_test.exs @@ -17,10 +17,12 @@ defmodule Trento.ClustersTest do describe "checks execution with wanda adapter" do test "should start a checks execution on demand if checks are selected" do %{id: cluster_id, provider: provider, type: cluster_type} = insert(:cluster) + insert(:host, deregistered_at: DateTime.utc_now(), cluster_id: cluster_id) insert_list(2, :host, cluster_id: cluster_id) expect(Trento.Infrastructure.Messaging.Adapter.Mock, :publish, fn "executions", message -> assert message.group_id == cluster_id + assert length(message.targets) == 2 assert message.env == %{ "provider" => %{kind: {:string_value, Atom.to_string(provider)}}, @@ -63,6 +65,18 @@ defmodule Trento.ClustersTest do end describe "get clusters" do + test "should not return soft deleted clusters" do + cib_last_written = Date.to_string(Faker.Date.forward(0)) + cluster_id = Faker.UUID.v4() + + insert(:cluster, id: cluster_id) + insert(:cluster, deregistered_at: DateTime.utc_now()) + insert(:cluster_enrichment_data, cluster_id: cluster_id) + + [%ClusterReadModel{id: ^cluster_id, cib_last_written: ^cib_last_written}] = + Clusters.get_all_clusters() + end + test "should return enriched clusters" do cib_last_written = Date.to_string(Faker.Date.forward(0)) cluster_id = Faker.UUID.v4() @@ -75,6 +89,22 @@ defmodule Trento.ClustersTest do end end + describe "get_cluster_id_by_host_id/1" do + test "should return nil if the host is not part of any cluster" do + assert nil == Clusters.get_cluster_id_by_host_id(UUID.uuid4()) + end + + test "should return the cluster_id" do + cluster_id = UUID.uuid4() + host_id = UUID.uuid4() + + insert(:cluster, id: cluster_id) + insert(:host, id: host_id, cluster_id: cluster_id) + + assert cluster_id == Clusters.get_cluster_id_by_host_id(host_id) + end + end + describe "update cib_last_written" do test "should create a new enriched cluster entry" do cib_last_written = Date.to_string(Faker.Date.forward(0)) diff --git a/test/trento/application/usecases/health_summary_service_test.exs b/test/trento/application/usecases/health_summary_service_test.exs index 0a6868558b..ca1ebaa02b 100644 --- a/test/trento/application/usecases/health_summary_service_test.exs +++ b/test/trento/application/usecases/health_summary_service_test.exs @@ -11,6 +11,7 @@ defmodule Trento.HealthSummaryServiceTest do require Trento.Domain.Enums.ClusterType, as: ClusterType alias Trento.{ + ClusterReadModel, HostReadModel, SapSystemReadModel } @@ -31,7 +32,6 @@ defmodule Trento.HealthSummaryServiceTest do insert( :database_instance_without_host, sap_system_id: sap_system_id, - sid: "HDD", host_id: a_host_id ) @@ -48,47 +48,136 @@ defmodule Trento.HealthSummaryServiceTest do end test "should determine health summary for a SAP System" do - %Trento.ClusterReadModel{id: cluster_id} = + %ClusterReadModel{id: db_cluster_id} = insert(:cluster, type: ClusterType.hana_scale_up(), health: Health.passing()) - %Trento.HostReadModel{id: host_1_id} = - host_one = insert(:host, cluster_id: cluster_id, heartbeat: :unknown) + %ClusterReadModel{id: app_cluster_id} = + insert(:cluster, type: ClusterType.ascs_ers(), health: Health.warning()) - %Trento.SapSystemReadModel{ + %HostReadModel{id: db_host_id} = + db_host = insert(:host, cluster_id: db_cluster_id, heartbeat: Health.unknown()) + + %HostReadModel{id: db_host_id_2} = + db_host_2 = insert(:host, cluster_id: nil, heartbeat: Health.passing()) + + %HostReadModel{id: app_host_id} = + app_host = insert(:host, cluster_id: app_cluster_id, heartbeat: Health.passing()) + + %HostReadModel{id: app_host_id_2} = + app_host_2 = insert(:host, cluster_id: nil, heartbeat: Health.critical()) + + %SapSystemReadModel{ id: sap_system_id, sid: sid } = insert(:sap_system, health: Health.critical()) - database_instance = + insert(:sap_system, deregistered_at: DateTime.utc_now()) + + database_instances = [ + insert( + :database_instance, + sap_system_id: sap_system_id, + instance_number: "00", + host_id: db_host_id, + health: Health.warning(), + host: db_host + ), insert( - :database_instance_without_host, + :database_instance, sap_system_id: sap_system_id, - sid: "HDD", - host_id: host_1_id, - health: Health.warning() + instance_number: "01", + host_id: db_host_id_2, + health: Health.passing(), + host: db_host_2 ) + ] - insert( - :application_instance_without_host, - sap_system_id: sap_system_id, - sid: sid, - host_id: host_1_id, - health: Health.critical() - ) + application_instances = [ + insert( + :application_instance, + sap_system_id: sap_system_id, + instance_number: "10", + sid: sid, + host_id: app_host_id, + health: Health.passing(), + host: app_host + ), + insert( + :application_instance, + sap_system_id: sap_system_id, + instance_number: "11", + sid: sid, + host_id: app_host_id_2, + health: Health.critical(), + host: app_host_2 + ) + ] - expected_db_instance = %{database_instance | host: host_one} + assert [ + %{ + id: sap_system_id, + sid: sid, + sapsystem_health: Health.critical(), + database_health: Health.warning(), + database_cluster_health: Health.passing(), + application_cluster_health: Health.warning(), + hosts_health: Health.unknown(), + database_instances: database_instances, + application_instances: application_instances + } + ] == HealthSummaryService.get_health_summary() + end + + test "should set as unknown the clusters health when they are not available" do + %HostReadModel{id: db_host_id} = + db_host = insert(:host, cluster_id: nil, heartbeat: Health.passing()) + + %HostReadModel{id: app_host_id} = + app_host = insert(:host, cluster_id: nil, heartbeat: Health.passing()) + + %SapSystemReadModel{ + id: sap_system_id, + sid: sid + } = insert(:sap_system, health: Health.critical()) + + insert(:sap_system, deregistered_at: DateTime.utc_now()) + + database_instances = + insert_list( + 1, + :database_instance, + sap_system_id: sap_system_id, + instance_number: "00", + host_id: db_host_id, + health: Health.warning(), + host: db_host + ) + + application_instances = + insert_list( + 1, + :application_instance, + sap_system_id: sap_system_id, + instance_number: "10", + sid: sid, + host_id: app_host_id, + health: Health.passing(), + host: app_host + ) assert [ %{ - id: ^sap_system_id, - sid: ^sid, - sapsystem_health: :critical, - database_health: :warning, - clusters_health: :passing, - hosts_health: :unknown, - database_instances: [^expected_db_instance] + id: sap_system_id, + sid: sid, + sapsystem_health: Health.critical(), + database_health: Health.warning(), + database_cluster_health: Health.unknown(), + application_cluster_health: Health.unknown(), + hosts_health: Health.passing(), + database_instances: database_instances, + application_instances: application_instances } - ] = HealthSummaryService.get_health_summary() + ] == HealthSummaryService.get_health_summary() end end end diff --git a/test/trento/application/usecases/hosts_test.exs b/test/trento/application/usecases/hosts_test.exs index 0328f30361..d384599bb4 100644 --- a/test/trento/application/usecases/hosts_test.exs +++ b/test/trento/application/usecases/hosts_test.exs @@ -5,6 +5,7 @@ defmodule Trento.HostsTest do import Mox import Trento.Factory + import Mox alias Trento.Hosts alias Trento.Repo @@ -15,6 +16,8 @@ defmodule Trento.HostsTest do @moduletag :integration + setup :verify_on_exit! + describe "SLES Subscriptions" do test "No SLES4SAP Subscriptions detected" do assert 0 = SlesSubscriptionReadModel |> Repo.all() |> length() @@ -30,6 +33,55 @@ defmodule Trento.HostsTest do end end + describe "get_all_hosts/0" do + test "should list all hosts except the deregistered ones" do + registered_hosts = Enum.map(0..9, fn i -> insert(:host, hostname: "hostname_#{i}") end) + + last_heartbeats = + Enum.map(registered_hosts, fn %Trento.HostReadModel{id: id} -> + insert(:heartbeat, agent_id: id) + end) + + deregistered_host = insert(:host, deregistered_at: DateTime.utc_now()) + + hosts = Hosts.get_all_hosts() + hosts_ids = Enum.map(hosts, & &1.id) + + assert Enum.map(registered_hosts, & &1.id) == hosts_ids + + assert Enum.map(hosts, & &1.last_heartbeat_timestamp) == + Enum.map(last_heartbeats, & &1.timestamp) + + refute deregistered_host.id in hosts_ids + end + end + + describe "get_host_by_id/1" do + test "should return host" do + %Trento.HostReadModel{id: id} = insert(:host) + %Trento.Heartbeat{timestamp: timestamp} = insert(:heartbeat, agent_id: id) + + host = Hosts.get_host_by_id(id) + + assert host.id == id + assert host.last_heartbeat_timestamp == timestamp + end + + test "should return nil if host is deregistered" do + %Trento.HostReadModel{id: id} = insert(:host, deregistered_at: DateTime.utc_now()) + + host = Hosts.get_host_by_id(id) + + assert host == nil + end + + test "should return nil if host does not exist" do + host = Hosts.get_host_by_id(UUID.uuid4()) + + assert host == nil + end + end + describe "Check Selection" do test "should dispatch command on Check Selection" do host_id = Faker.UUID.v4() diff --git a/test/trento/application/usecases/sap_systems_test.exs b/test/trento/application/usecases/sap_systems_test.exs index b09aa90279..0d7ece97d6 100644 --- a/test/trento/application/usecases/sap_systems_test.exs +++ b/test/trento/application/usecases/sap_systems_test.exs @@ -14,7 +14,7 @@ defmodule Trento.SapSystemsTest do @moduletag :integration describe "sap_systems" do - test "should retrieve all the existing sap systems and the related instances" do + test "should retrieve all the currently registered existing sap systems and the related instances" do %SapSystemReadModel{ id: sap_system_id, sid: sid, @@ -22,6 +22,8 @@ defmodule Trento.SapSystemsTest do db_host: db_host } = insert(:sap_system) + insert(:sap_system, deregistered_at: DateTime.utc_now()) + application_instances = Enum.sort_by( insert_list(5, :application_instance_without_host, sap_system_id: sap_system_id), @@ -46,12 +48,14 @@ defmodule Trento.SapSystemsTest do ] = SapSystems.get_all_sap_systems() end - test "should retrieve all the existing databases and the related instances" do + test "should retrieve all the currently registered existing databases and the related instances" do %DatabaseReadModel{ id: sap_system_id, sid: sid } = insert(:database) + insert(:database, deregistered_at: DateTime.utc_now()) + database_instances = Enum.sort_by( insert_list(5, :database_instance_without_host, sap_system_id: sap_system_id), @@ -66,4 +70,38 @@ defmodule Trento.SapSystemsTest do ] = SapSystems.get_all_databases() end end + + describe "get_application_instances_by_host_id/1" do + test "should return an empty list if no application instances were found" do + assert [] == SapSystems.get_application_instances_by_host_id(UUID.uuid4()) + end + + test "should return all the instances with the matching host_id" do + host_id = UUID.uuid4() + insert_list(10, :application_instance_without_host, host_id: host_id) + insert_list(10, :application_instance_without_host) + + application_instances = SapSystems.get_application_instances_by_host_id(host_id) + + assert 10 == length(application_instances) + assert Enum.all?(application_instances, &(&1.host_id == host_id)) + end + end + + describe "get_database_instances_by_host_id/1" do + test "should return empty if no database instances were found" do + assert [] == SapSystems.get_application_instances_by_host_id(UUID.uuid4()) + end + + test "should return all the instances with the matching host_id" do + host_id = UUID.uuid4() + insert_list(10, :database_instance_without_host, host_id: host_id) + insert_list(10, :database_instance_without_host) + + database_instances = SapSystems.get_database_instances_by_host_id(host_id) + + assert 10 == length(database_instances) + assert Enum.all?(database_instances, &(&1.host_id == host_id)) + end + end end diff --git a/test/trento/domain/cluster/cluster_test.exs b/test/trento/domain/cluster/cluster_test.exs index 3f7b57db9e..1d23d30797 100644 --- a/test/trento/domain/cluster/cluster_test.exs +++ b/test/trento/domain/cluster/cluster_test.exs @@ -7,6 +7,7 @@ defmodule Trento.ClusterTest do alias Trento.Domain.Commands.{ CompleteChecksExecution, + DeregisterClusterHost, RegisterClusterHost, RollUpCluster, SelectChecks @@ -18,14 +19,18 @@ defmodule Trento.ClusterTest do ChecksExecutionStarted, ChecksSelected, ClusterChecksHealthChanged, + ClusterDeregistered, ClusterDetailsUpdated, ClusterDiscoveredHealthChanged, ClusterHealthChanged, ClusterRegistered, + ClusterRestored, ClusterRolledUp, ClusterRollUpRequested, + ClusterTombstoned, HostAddedToCluster, - HostChecksExecutionCompleted + HostChecksExecutionCompleted, + HostRemovedFromCluster } alias Trento.Domain.Cluster @@ -33,12 +38,13 @@ defmodule Trento.ClusterTest do require Trento.Domain.Enums.Health, as: Health describe "cluster registration" do - test "should register a cluster and add the node host to the cluster if the node is a DC" do + test "should register a cluster with full details and add the node host to the cluster if the node is a DC" do cluster_id = Faker.UUID.v4() host_id = Faker.UUID.v4() name = Faker.StarWars.character() type = :hana_scale_up sid = Faker.StarWars.planet() + additional_sids = ["HA1", "HA2"] assert_events_and_state( [], @@ -47,6 +53,7 @@ defmodule Trento.ClusterTest do host_id: host_id, name: name, sid: sid, + additional_sids: additional_sids, provider: :azure, type: type, details: nil, @@ -58,6 +65,7 @@ defmodule Trento.ClusterTest do cluster_id: cluster_id, name: name, sid: sid, + additional_sids: additional_sids, provider: :azure, type: type, health: :passing, @@ -72,6 +80,7 @@ defmodule Trento.ClusterTest do cluster_id: cluster_id, name: name, sid: sid, + additional_sids: additional_sids, type: type, provider: :azure, hosts: [host_id], @@ -81,7 +90,52 @@ defmodule Trento.ClusterTest do ) end - test "should add a host to the cluster" do + test "should register a cluster with unknown details when the cluster was not registered yet and a message from a non-DC is received" do + cluster_id = Faker.UUID.v4() + host_id = Faker.UUID.v4() + name = Faker.StarWars.character() + + assert_events_and_state( + [], + RegisterClusterHost.new!(%{ + cluster_id: cluster_id, + host_id: host_id, + name: name, + discovered_health: :unknown, + provider: :unknown, + type: :unknown, + designated_controller: false + }), + [ + %ClusterRegistered{ + cluster_id: cluster_id, + name: name, + sid: nil, + additional_sids: [], + provider: :unknown, + type: :unknown, + health: :unknown, + details: nil + }, + %HostAddedToCluster{ + cluster_id: cluster_id, + host_id: host_id + } + ], + %Cluster{ + cluster_id: cluster_id, + name: name, + sid: nil, + type: :unknown, + provider: :unknown, + hosts: [host_id], + discovered_health: :unknown, + health: :unknown + } + ) + end + + test "should add a host to the cluster if the host is not a DC and the cluster is already registered" do cluster_id = Faker.UUID.v4() host_id = Faker.UUID.v4() name = Faker.StarWars.character() @@ -116,20 +170,48 @@ defmodule Trento.ClusterTest do ) end - test "should return an error if the cluster was not registered yet and a command from a non-DC is received" do - assert_error( - [], + test "should add a host to the cluster if the host is a DC and the cluster is already registered" do + cluster_id = Faker.UUID.v4() + host_id = Faker.UUID.v4() + name = Faker.StarWars.character() + sid = Faker.StarWars.planet() + + assert_events_and_state( + [ + build( + :cluster_registered_event, + cluster_id: cluster_id, + provider: :azure, + sid: sid, + name: name, + details: nil + ), + build(:host_added_to_cluster_event, cluster_id: cluster_id) + ], RegisterClusterHost.new!(%{ - cluster_id: Faker.UUID.v4(), - host_id: Faker.UUID.v4(), - name: Faker.StarWars.character(), - sid: Faker.StarWars.planet(), - discovered_health: :unknown, + cluster_id: cluster_id, + host_id: host_id, + name: name, + sid: sid, + additional_sids: [], type: :hana_scale_up, - designated_controller: false, + discovered_health: :passing, + resources_number: 8, + hosts_number: 2, + designated_controller: true, provider: :azure }), - {:error, :cluster_not_found} + [ + %HostAddedToCluster{ + cluster_id: cluster_id, + host_id: host_id + } + ], + fn cluster -> + assert %Cluster{ + hosts: [^host_id | _] + } = cluster + end ) end end @@ -158,6 +240,7 @@ defmodule Trento.ClusterTest do host_id: host_id, name: new_name, sid: new_sid, + additional_sids: [], provider: :gcp, type: :hana_scale_up, resources_number: 2, @@ -170,6 +253,7 @@ defmodule Trento.ClusterTest do cluster_id: cluster_id, name: new_name, sid: new_sid, + additional_sids: [], provider: :gcp, type: :hana_scale_up, resources_number: 2, @@ -181,6 +265,7 @@ defmodule Trento.ClusterTest do cluster_id: ^cluster_id, name: ^new_name, sid: ^new_sid, + additional_sids: [], provider: :gcp, resources_number: 2, hosts_number: 1, @@ -214,6 +299,7 @@ defmodule Trento.ClusterTest do host_id: host_id, name: name, sid: sid, + additional_sids: [], provider: :azure, resources_number: 8, hosts_number: 2, @@ -262,6 +348,7 @@ defmodule Trento.ClusterTest do test "should use discovered cluster health when no checks are selected" do cluster_id = Faker.UUID.v4() + host_id = Faker.UUID.v4() name = Faker.StarWars.character() sid = Faker.StarWars.planet() @@ -274,11 +361,17 @@ defmodule Trento.ClusterTest do sid: sid, details: nil, provider: :azure + ), + build( + :host_added_to_cluster_event, + cluster_id: cluster_id, + host_id: host_id ) ], [ build( :register_cluster_host, + host_id: host_id, cluster_id: cluster_id, name: name, sid: sid, @@ -439,6 +532,7 @@ defmodule Trento.ClusterTest do host_id: host_added_to_cluster_event.host_id, name: cluster_registered_event.name, sid: cluster_registered_event.sid, + additional_sids: cluster_registered_event.additional_sids, provider: cluster_registered_event.provider, type: cluster_registered_event.type, resources_number: cluster_registered_event.resources_number, @@ -487,6 +581,7 @@ defmodule Trento.ClusterTest do host_id: host_added_to_cluster_event.host_id, name: cluster_registered_event.name, sid: cluster_registered_event.sid, + additional_sids: cluster_registered_event.additional_sids, type: cluster_registered_event.type, resources_number: cluster_registered_event.resources_number, hosts_number: cluster_registered_event.hosts_number, @@ -533,6 +628,7 @@ defmodule Trento.ClusterTest do host_id: host_added_to_cluster_event.host_id, name: cluster_registered_event.name, sid: cluster_registered_event.sid, + additional_sids: cluster_registered_event.additional_sids, provider: :azure, type: cluster_registered_event.type, resources_number: cluster_registered_event.resources_number, @@ -562,7 +658,7 @@ defmodule Trento.ClusterTest do test "should not accept a rollup command if a cluster was not registered yet" do assert_error( RollUpCluster.new!(%{cluster_id: Faker.UUID.v4()}), - {:error, :cluster_not_found} + {:error, :cluster_not_registered} ) end @@ -580,6 +676,7 @@ defmodule Trento.ClusterTest do name: cluster_registered_event.name, type: cluster_registered_event.type, sid: cluster_registered_event.sid, + additional_sids: cluster_registered_event.additional_sids, provider: cluster_registered_event.provider, resources_number: cluster_registered_event.resources_number, hosts_number: cluster_registered_event.hosts_number, @@ -610,6 +707,7 @@ defmodule Trento.ClusterTest do name: cluster_registered_event.name, type: cluster_registered_event.type, sid: cluster_registered_event.sid, + additional_sids: cluster_registered_event.additional_sids, provider: cluster_registered_event.provider, resources_number: cluster_registered_event.resources_number, hosts_number: cluster_registered_event.hosts_number, @@ -679,6 +777,242 @@ defmodule Trento.ClusterTest do end end + describe "deregistration" do + test "should restore a deregistered cluster when a RegisterClusterHost command from a non DC host is received" do + host_one_id = UUID.uuid4() + host_two_id = UUID.uuid4() + + cluster_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + initial_events = [ + build(:cluster_registered_event, cluster_id: cluster_id, hosts_number: 2), + build(:host_added_to_cluster_event, cluster_id: cluster_id, host_id: host_one_id), + build(:host_added_to_cluster_event, cluster_id: cluster_id, host_id: host_two_id), + build(:host_removed_from_cluster_event, cluster_id: cluster_id, host_id: host_one_id), + build(:host_removed_from_cluster_event, cluster_id: cluster_id, host_id: host_two_id), + build(:cluster_deregistered_event, + cluster_id: cluster_id, + deregistered_at: deregistered_at + ) + ] + + new_host_id = UUID.uuid4() + + restoration_command = + build( + :register_cluster_host, + cluster_id: cluster_id, + host_id: new_host_id, + designated_controller: false + ) + + assert_events_and_state( + initial_events, + [restoration_command], + [ + %ClusterRestored{ + cluster_id: cluster_id + }, + %HostAddedToCluster{ + cluster_id: cluster_id, + host_id: new_host_id + } + ], + fn cluster -> + assert nil == cluster.deregistered_at + end + ) + end + + test "should restore a deregistered cluster and perform the cluster update procedure when a RegisterClusterHost command from a DC host is received" do + host_one_id = UUID.uuid4() + host_two_id = UUID.uuid4() + + cluster_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + initial_events = [ + build(:cluster_registered_event, cluster_id: cluster_id, hosts_number: 2), + build(:host_added_to_cluster_event, cluster_id: cluster_id, host_id: host_one_id), + build(:host_added_to_cluster_event, cluster_id: cluster_id, host_id: host_two_id), + build(:host_removed_from_cluster_event, cluster_id: cluster_id, host_id: host_one_id), + build(:host_removed_from_cluster_event, cluster_id: cluster_id, host_id: host_two_id), + build(:cluster_deregistered_event, + cluster_id: cluster_id, + deregistered_at: deregistered_at + ) + ] + + new_host_id = UUID.uuid4() + + restoration_command = + build( + :register_cluster_host, + cluster_id: cluster_id, + host_id: new_host_id, + discovered_health: :critical, + designated_controller: true + ) + + assert_events_and_state( + initial_events, + [restoration_command], + [ + %ClusterRestored{ + cluster_id: cluster_id + }, + %HostAddedToCluster{ + cluster_id: cluster_id, + host_id: new_host_id + }, + %ClusterDetailsUpdated{ + cluster_id: cluster_id, + name: restoration_command.name, + type: restoration_command.type, + sid: restoration_command.sid, + additional_sids: restoration_command.additional_sids, + provider: restoration_command.provider, + resources_number: restoration_command.resources_number, + hosts_number: restoration_command.hosts_number, + details: restoration_command.details + }, + %ClusterDiscoveredHealthChanged{ + cluster_id: cluster_id, + discovered_health: :critical + }, + %ClusterHealthChanged{ + cluster_id: cluster_id, + health: :critical + } + ], + fn cluster -> + assert nil == cluster.deregistered_at + end + ) + end + + test "should reject all the commands when the host is deregistered" do + host_one_id = UUID.uuid4() + host_two_id = UUID.uuid4() + + cluster_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + initial_events = [ + build(:cluster_registered_event, cluster_id: cluster_id, hosts_number: 2), + build(:host_added_to_cluster_event, cluster_id: cluster_id, host_id: host_one_id), + build(:host_added_to_cluster_event, cluster_id: cluster_id, host_id: host_two_id), + build(:host_removed_from_cluster_event, cluster_id: cluster_id, host_id: host_one_id), + build(:host_removed_from_cluster_event, cluster_id: cluster_id, host_id: host_two_id), + build(:cluster_deregistered_event, + cluster_id: cluster_id, + deregistered_at: deregistered_at + ) + ] + + commands_to_reject = [ + %CompleteChecksExecution{cluster_id: cluster_id}, + %DeregisterClusterHost{cluster_id: cluster_id}, + %SelectChecks{cluster_id: cluster_id} + ] + + for command <- commands_to_reject do + assert match?({:error, :cluster_not_registered}, aggregate_run(initial_events, command)), + "Command #{inspect(command)} should be rejected by the aggregate" + end + + commands_to_accept = [ + %RollUpCluster{cluster_id: cluster_id}, + %RegisterClusterHost{cluster_id: cluster_id, designated_controller: true}, + %RegisterClusterHost{cluster_id: cluster_id, designated_controller: false} + ] + + for command <- commands_to_accept do + assert match?({:ok, _, _}, aggregate_run(initial_events, command)), + "Command #{inspect(command)} should be accepted by a deregistered cluster" + end + end + + test "should emit the HostRemovedFromCluster event after a DeregisterClusterHost command and remove the host from the cluster aggregate state" do + cluster_id = Faker.UUID.v4() + dat = DateTime.utc_now() + host_1_added_event = build(:host_added_to_cluster_event, cluster_id: cluster_id) + + host_2_added_event = + %{host_id: host_2_id} = build(:host_added_to_cluster_event, cluster_id: cluster_id) + + assert_events_and_state( + [ + build(:cluster_registered_event, cluster_id: cluster_id, hosts_number: 2), + host_1_added_event, + host_2_added_event + ], + [ + %DeregisterClusterHost{ + host_id: host_1_added_event.host_id, + cluster_id: cluster_id, + deregistered_at: dat + } + ], + [ + %HostRemovedFromCluster{ + host_id: host_1_added_event.host_id, + cluster_id: cluster_id + } + ], + fn cluster -> + assert %Cluster{hosts: [^host_2_id]} = cluster + end + ) + end + + test "should emit the ClusterDeregistered and ClusterTombstoned events when the last ClusterHost is deregistered and set the deregistration date into the state" do + cluster_id = Faker.UUID.v4() + dat = DateTime.utc_now() + host_1_added_event = build(:host_added_to_cluster_event, cluster_id: cluster_id) + host_2_added_event = build(:host_added_to_cluster_event, cluster_id: cluster_id) + + assert_events_and_state( + [ + build(:cluster_registered_event, cluster_id: cluster_id, hosts_number: 2), + host_1_added_event, + host_2_added_event + ], + [ + %DeregisterClusterHost{ + host_id: host_1_added_event.host_id, + cluster_id: cluster_id, + deregistered_at: dat + }, + %DeregisterClusterHost{ + host_id: host_2_added_event.host_id, + cluster_id: cluster_id, + deregistered_at: dat + } + ], + [ + %HostRemovedFromCluster{ + host_id: host_1_added_event.host_id, + cluster_id: cluster_id + }, + %HostRemovedFromCluster{ + host_id: host_2_added_event.host_id, + cluster_id: cluster_id + }, + %ClusterDeregistered{ + cluster_id: cluster_id, + deregistered_at: dat + }, + %ClusterTombstoned{ + cluster_id: cluster_id + } + ], + fn cluster -> assert dat == cluster.deregistered_at end + ) + end + end + describe "legacy events" do test "should ignore legacy events and not update the aggregate" do cluster_id = Faker.UUID.v4() @@ -702,6 +1036,7 @@ defmodule Trento.ClusterTest do assert cluster.name == cluster_registered_event.name assert cluster.type == cluster_registered_event.type assert cluster.sid == cluster_registered_event.sid + assert cluster.additional_sids == cluster_registered_event.additional_sids assert cluster.provider == cluster_registered_event.provider assert cluster.resources_number == cluster_registered_event.resources_number assert cluster.hosts_number == cluster_registered_event.hosts_number diff --git a/test/trento/domain/host/host_test.exs b/test/trento/domain/host/host_test.exs index 30881a301e..4b25dad90e 100644 --- a/test/trento/domain/host/host_test.exs +++ b/test/trento/domain/host/host_test.exs @@ -4,7 +4,9 @@ defmodule Trento.HostTest do import Trento.Factory alias Trento.Domain.Commands.{ + DeregisterHost, RegisterHost, + RequestHostDeregistration, RollUpHost, SelectHostChecks, UpdateHeartbeat, @@ -16,10 +18,14 @@ defmodule Trento.HostTest do HeartbeatFailed, HeartbeatSucceded, HostChecksSelected, + HostDeregistered, + HostDeregistrationRequested, HostDetailsUpdated, HostRegistered, + HostRestored, HostRolledUp, HostRollUpRequested, + HostTombstoned, ProviderUpdated, SlesSubscriptionsUpdated } @@ -197,7 +203,7 @@ defmodule Trento.HostTest do end describe "heartbeat" do - test "should emit an HeartbeatSucceded event if the Host never received an heartbeat already" do + test "should emit a HeartbeatSucceded event if the Host never received a heartbeat already" do host_id = Faker.UUID.v4() host_registered_event = build(:host_registered_event, host_id: host_id) @@ -218,7 +224,7 @@ defmodule Trento.HostTest do ) end - test "should emit an HeartbeatSucceded event if the Host is in a critical status" do + test "should emit a HeartbeatSucceded event if the Host is in a critical status" do host_id = Faker.UUID.v4() initial_events = [ @@ -245,7 +251,7 @@ defmodule Trento.HostTest do ) end - test "should not emit an HeartbeatSucceded event if the Host is in a passing status already" do + test "should not emit a HeartbeatSucceded event if the Host is in a passing status already" do host_id = Faker.UUID.v4() initial_events = [ @@ -265,7 +271,7 @@ defmodule Trento.HostTest do ) end - test "should emit an HeartbeatFailed event if the Host has never received an heartbeat" do + test "should emit a HeartbeatFailed event if the Host has never received a heartbeat" do host_id = Faker.UUID.v4() initial_events = [ @@ -292,7 +298,7 @@ defmodule Trento.HostTest do ) end - test "should emit an HeartbeatFailed event if the Host is in a passing status" do + test "should emit a HeartbeatFailed event if the Host is in a passing status" do host_id = Faker.UUID.v4() host_registered_event = build(:host_registered_event, host_id: host_id) @@ -313,7 +319,7 @@ defmodule Trento.HostTest do ) end - test "should not emit an HeartbeatFailed event if the Host is in a critical status already" do + test "should not emit a HeartbeatFailed event if the Host is in a critical status already" do host_id = Faker.UUID.v4() initial_events = [ @@ -686,4 +692,186 @@ defmodule Trento.HostTest do ) end end + + describe "deregistration" do + test "should restore a deregistered host when a RegisterHost command with no new host information is received" do + host_id = Faker.UUID.v4() + + initial_events = [ + host_registered_event = build(:host_registered_event, host_id: host_id), + %HostDeregistered{ + host_id: host_id, + deregistered_at: DateTime.utc_now() + } + ] + + restoration_command = + build( + :register_host_command, + host_id: host_id, + hostname: host_registered_event.hostname, + ip_addresses: host_registered_event.ip_addresses, + agent_version: host_registered_event.agent_version, + cpu_count: host_registered_event.cpu_count, + total_memory_mb: host_registered_event.total_memory_mb, + socket_count: host_registered_event.socket_count, + os_version: host_registered_event.os_version, + installation_source: host_registered_event.installation_source + ) + + assert_events_and_state( + initial_events, + [ + restoration_command + ], + [ + %HostRestored{host_id: host_id} + ], + fn host -> + assert nil == host.deregistered_at + end + ) + end + + test "should restore and update a deregistered host when a RegisterHost command with new host information is received" do + host_id = Faker.UUID.v4() + + initial_events = [ + build(:host_registered_event, host_id: host_id), + %HostDeregistered{ + host_id: host_id, + deregistered_at: DateTime.utc_now() + } + ] + + restoration_command = build(:register_host_command, host_id: host_id) + + assert_events_and_state( + initial_events, + [ + restoration_command + ], + [ + %HostRestored{host_id: host_id}, + %HostDetailsUpdated{ + host_id: restoration_command.host_id, + hostname: restoration_command.hostname, + ip_addresses: restoration_command.ip_addresses, + agent_version: restoration_command.agent_version, + cpu_count: restoration_command.cpu_count, + total_memory_mb: restoration_command.total_memory_mb, + socket_count: restoration_command.socket_count, + os_version: restoration_command.os_version, + installation_source: restoration_command.installation_source + } + ], + fn host -> + assert nil == host.deregistered_at + end + ) + end + + test "should reject all the commands except the registration ones when the host is deregistered" do + host_id = Faker.UUID.v4() + dat = DateTime.utc_now() + + initial_events = [ + build(:host_registered_event, host_id: host_id), + %HostDeregistered{ + host_id: host_id, + deregistered_at: dat + } + ] + + commands_to_reject = [ + %DeregisterHost{host_id: host_id}, + %RequestHostDeregistration{host_id: host_id}, + %UpdateHeartbeat{host_id: host_id}, + %UpdateProvider{host_id: host_id}, + %UpdateSlesSubscriptions{host_id: host_id} + ] + + for command <- commands_to_reject do + assert_error(initial_events, command, {:error, :host_not_registered}) + end + + commands_to_accept = [ + %RollUpHost{host_id: host_id}, + %RegisterHost{host_id: host_id} + ] + + for command <- commands_to_accept do + assert match?({:ok, _, _}, aggregate_run(initial_events, command)), + "Command #{inspect(command)} should be accepted by a deregistered host" + end + end + + test "should emit the HostDeregistered and HostTombstoned events" do + host_id = Faker.UUID.v4() + dat = DateTime.utc_now() + + host_registered_event = build(:host_registered_event, host_id: host_id) + + assert_events( + [host_registered_event], + [ + %DeregisterHost{ + host_id: host_id, + deregistered_at: dat + } + ], + [ + %HostDeregistered{ + host_id: host_id, + deregistered_at: dat + }, + %HostTombstoned{ + host_id: host_id + } + ] + ) + end + + test "should emit the HostDeregistrationRequest Event" do + host_id = Faker.UUID.v4() + requested_at = DateTime.utc_now() + + host_registered_event = build(:host_registered_event, host_id: host_id) + + assert_events( + [host_registered_event], + [ + %RequestHostDeregistration{ + host_id: host_id, + requested_at: requested_at + } + ], + %HostDeregistrationRequested{ + host_id: host_id, + requested_at: requested_at + } + ) + end + + test "should apply the HostDeregistered event and set the deregistration date into the state" do + host_id = Faker.UUID.v4() + dat = DateTime.utc_now() + + host_registered_event = build(:host_registered_event, host_id: host_id) + + assert_state( + [ + host_registered_event, + %HostDeregistered{ + host_id: host_id, + deregistered_at: dat + } + ], + [], + fn host -> + assert dat == host.deregistered_at + end + ) + end + end end diff --git a/test/trento/domain/sap_system/sap_system_test.exs b/test/trento/domain/sap_system/sap_system_test.exs index e6bd7c89f2..83025c1b44 100644 --- a/test/trento/domain/sap_system/sap_system_test.exs +++ b/test/trento/domain/sap_system/sap_system_test.exs @@ -3,30 +3,135 @@ defmodule Trento.SapSystemTest do import Trento.Factory + require Trento.Domain.Enums.EnsaVersion, as: EnsaVersion + alias Trento.Domain.Commands.{ + DeregisterApplicationInstance, + DeregisterDatabaseInstance, RegisterApplicationInstance, RegisterDatabaseInstance, RollUpSapSystem } alias Trento.Domain.Events.{ + ApplicationInstanceDeregistered, ApplicationInstanceHealthChanged, + ApplicationInstanceMoved, ApplicationInstanceRegistered, + DatabaseDeregistered, DatabaseHealthChanged, + DatabaseInstanceDeregistered, DatabaseInstanceHealthChanged, DatabaseInstanceRegistered, DatabaseInstanceSystemReplicationChanged, DatabaseRegistered, + DatabaseRestored, + SapSystemDeregistered, SapSystemHealthChanged, SapSystemRegistered, + SapSystemRestored, SapSystemRolledUp, - SapSystemRollUpRequested + SapSystemRollUpRequested, + SapSystemTombstoned, + SapSystemUpdated } alias Trento.Domain.SapSystem + alias Trento.Domain.SapSystem.{ + Application, + Database, + Instance + } + describe "SAP System registration" do - test "should create an incomplete SAP system aggregate and register a database instance" do + test "should fail when a sap system does not exists and the database instance has Secondary role" do + command = + build(:register_database_instance_command, + system_replication: "Secondary" + ) + + assert_error( + command, + {:error, :sap_system_not_registered} + ) + end + + test "should create an incomplete SAP system aggregate and register a database instance when the system replication is disabled" do + sap_system_id = Faker.UUID.v4() + sid = Faker.StarWars.planet() + tenant = Faker.Beer.style() + instance_number = "00" + instance_hostname = Faker.Airports.iata() + features = Faker.Pokemon.name() + http_port = 80 + https_port = 443 + start_priority = "0.9" + host_id = Faker.UUID.v4() + + assert_events_and_state( + [], + RegisterDatabaseInstance.new!(%{ + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant, + instance_number: instance_number, + instance_hostname: instance_hostname, + features: features, + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + system_replication: nil, + health: :passing + }), + [ + %DatabaseRegistered{ + sap_system_id: sap_system_id, + sid: sid, + health: :passing + }, + %DatabaseInstanceRegistered{ + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant, + instance_number: instance_number, + instance_hostname: instance_hostname, + features: features, + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + system_replication: nil, + system_replication_status: nil, + health: :passing + } + ], + %SapSystem{ + sap_system_id: sap_system_id, + # The SAP System aggregate is not complete yet. + # The sid will be set when the first application instance is registered. + sid: nil, + database: %SapSystem.Database{ + sid: sid, + health: :passing, + instances: [ + %SapSystem.Instance{ + sid: sid, + system_replication: nil, + system_replication_status: nil, + instance_number: instance_number, + features: features, + host_id: host_id, + health: :passing + } + ] + } + } + ) + end + + test "should create an incomplete SAP system aggregate and register a database instance when the system replication is enabled and the database role is primary" do sap_system_id = Faker.UUID.v4() sid = Faker.StarWars.planet() tenant = Faker.Beer.style() @@ -237,17 +342,17 @@ defmodule Trento.SapSystemTest do ) end - test "should register a SAP System and add an application instance" do + test "should register a SAP System and add an application instance when a MESSAGESERVER instance is already present and a new ABAP instance is added" do sap_system_id = Faker.UUID.v4() sid = Faker.StarWars.planet() db_host = Faker.Internet.ip_v4_address() tenant = Faker.Beer.style() instance_hostname = Faker.Airports.iata() - features = Faker.Pokemon.name() http_port = 80 https_port = 443 start_priority = "0.9" host_id = Faker.UUID.v4() + ensa_version = EnsaVersion.ensa1() initial_events = [ build( @@ -260,6 +365,12 @@ defmodule Trento.SapSystemTest do sap_system_id: sap_system_id, sid: sid, tenant: tenant + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "MESSAGESERVER", + instance_number: "00" ) ] @@ -270,48 +381,54 @@ defmodule Trento.SapSystemTest do sid: sid, db_host: db_host, tenant: tenant, - instance_number: "00", + instance_number: "10", instance_hostname: instance_hostname, - features: features, + features: "ABAP", http_port: http_port, https_port: https_port, start_priority: start_priority, host_id: host_id, - health: :passing + health: :passing, + ensa_version: ensa_version }), [ - %SapSystemRegistered{ - sap_system_id: sap_system_id, - sid: sid, - db_host: db_host, - tenant: tenant, - health: :passing - }, %ApplicationInstanceRegistered{ sap_system_id: sap_system_id, sid: sid, - instance_number: "00", + instance_number: "10", instance_hostname: instance_hostname, - features: features, + features: "ABAP", http_port: http_port, https_port: https_port, start_priority: start_priority, host_id: host_id, health: :passing + }, + %SapSystemRegistered{ + sap_system_id: sap_system_id, + sid: sid, + db_host: db_host, + tenant: tenant, + health: :passing, + ensa_version: ensa_version } ], fn state -> assert %SapSystem{ sid: ^sid, + ensa_version: ^ensa_version, application: %SapSystem.Application{ sid: ^sid, instances: [ %SapSystem.Instance{ sid: ^sid, - instance_number: "00", - features: ^features, + instance_number: "10", + features: "ABAP", host_id: ^host_id, health: :passing + }, + %SapSystem.Instance{ + features: "MESSAGESERVER" } ] } @@ -320,56 +437,89 @@ defmodule Trento.SapSystemTest do ) end - test "should add an application instance to a registered SAP System" do + test "should move an application instance if the host_id changed" do sap_system_id = Faker.UUID.v4() - sid = Faker.StarWars.planet() + sid = fake_sid() + instance_number = "10" + old_host_id = Faker.UUID.v4() + new_host_id = Faker.UUID.v4() + db_host = Faker.Internet.ip_v4_address() + tenant = Faker.Beer.style() + instance_hostname = Faker.Airports.iata() + http_port = 80 + https_port = 443 + start_priority = "0.9" + ensa_version = EnsaVersion.ensa1() initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id, sid: sid), - build(:database_instance_registered_event, sap_system_id: sap_system_id, sid: sid), - build(:sap_system_registered_event, sap_system_id: sap_system_id, sid: sid), - build(:application_instance_registered_event, sap_system_id: sap_system_id, sid: sid) - ] - - new_instance_db_host = Faker.Internet.ip_v4_address() - new_instance_tenant = Faker.Beer.style() - new_instance_number = "10" - new_instance_features = Faker.Pokemon.name() - new_instance_host_id = Faker.UUID.v4() - - assert_events_and_state( - initial_events, build( - :register_application_instance_command, + :database_registered_event, sap_system_id: sap_system_id, - sid: sid, - db_host: new_instance_db_host, - tenant: new_instance_tenant, - instance_number: new_instance_number, - features: new_instance_features, - host_id: new_instance_host_id, - health: :passing + sid: sid ), build( - :application_instance_registered_event, + :database_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid + ), + build(:application_instance_registered_event, sap_system_id: sap_system_id, sid: sid, - instance_number: new_instance_number, - features: new_instance_features, - host_id: new_instance_host_id, - health: :passing + features: "MESSAGESERVER" + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "ABAP", + instance_number: instance_number, + instance_hostname: instance_hostname, + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: old_host_id ), + build(:sap_system_registered_event, + sap_system_id: sap_system_id, + sid: sid, + db_host: db_host, + tenant: tenant, + ensa_version: ensa_version + ) + ] + + assert_events_and_state( + initial_events, + RegisterApplicationInstance.new!(%{ + sap_system_id: sap_system_id, + sid: sid, + db_host: db_host, + tenant: tenant, + instance_number: instance_number, + instance_hostname: instance_hostname, + features: "ABAP", + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: new_host_id, + health: :passing, + ensa_version: ensa_version + }), + [ + %ApplicationInstanceMoved{ + sap_system_id: sap_system_id, + instance_number: instance_number, + old_host_id: old_host_id, + new_host_id: new_host_id + } + ], fn state -> assert %SapSystem{ application: %SapSystem.Application{ - sid: ^sid, instances: [ %SapSystem.Instance{ sid: ^sid, - instance_number: ^new_instance_number, - features: ^new_instance_features, - host_id: ^new_instance_host_id, - health: :passing + instance_number: ^instance_number, + host_id: ^new_host_id } | _ ] @@ -379,281 +529,280 @@ defmodule Trento.SapSystemTest do ) end - test "should not add an application instance if the application instance was already registered" do + test "should update a SAP System ENSA version if it was not set" do sap_system_id = Faker.UUID.v4() - - application_instance_registered_event = - build(:application_instance_registered_event, sap_system_id: sap_system_id) + sid = fake_sid() + ensa_version = EnsaVersion.ensa1() + instance_number = "10" + host_id = Faker.UUID.v4() initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id), - build(:database_instance_registered_event, sap_system_id: sap_system_id), - build(:sap_system_registered_event, sap_system_id: sap_system_id), - application_instance_registered_event - ] - - assert_events( - initial_events, build( - :register_application_instance_command, - sap_system_id: application_instance_registered_event.sap_system_id, - sid: application_instance_registered_event.sid, - db_host: Faker.Internet.ip_v4_address(), - tenant: Faker.Beer.hop(), - instance_number: application_instance_registered_event.instance_number, - features: application_instance_registered_event.features, - host_id: application_instance_registered_event.host_id, - health: :passing + :database_registered_event, + sap_system_id: sap_system_id, + sid: sid ), - [] - ) - end - end - - describe "SAP System health" do - test "should change the health of a Database when a new Database instance is registered" do - sap_system_id = Faker.UUID.v4() - sid = Faker.StarWars.planet() - tenant = Faker.Beer.style() - instance_number = "00" - features = Faker.Pokemon.name() - host_id = Faker.UUID.v4() - - initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id), - build(:database_instance_registered_event, sap_system_id: sap_system_id) + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "MESSAGESERVER", + instance_number: instance_number, + host_id: host_id + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "ABAP" + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: sid, + ensa_version: EnsaVersion.no_ensa() + ) ] assert_events_and_state( initial_events, - build( - :register_database_instance_command, + build(:register_application_instance_command, sap_system_id: sap_system_id, sid: sid, - tenant: tenant, instance_number: instance_number, - features: features, host_id: host_id, - health: :critical + features: "MESSAGESERVER", + ensa_version: ensa_version ), [ - build( - :database_instance_registered_event, + %SapSystemUpdated{ sap_system_id: sap_system_id, - sid: sid, - tenant: tenant, - instance_number: instance_number, - features: features, - host_id: host_id, - health: :critical - ), - %DatabaseHealthChanged{ - sap_system_id: sap_system_id, - health: :critical + ensa_version: ensa_version } ], fn state -> - %SapSystem{ - database: %SapSystem.Database{ - health: :critical, - instances: [ - %SapSystem.Instance{ - health: :critical - }, - %SapSystem.Instance{ - health: :passing - } - ] - } - } = state + assert %SapSystem{ + sid: ^sid, + ensa_version: ^ensa_version, + application: %SapSystem.Application{ + sid: ^sid + } + } = state end ) end - test "should change the health of a Database when a Database instance has changed the health status" do + test "should not update a SAP System ENSA version if the coming application instance does not have ENSA data" do sap_system_id = Faker.UUID.v4() + sid = fake_sid() + ensa_version = EnsaVersion.ensa1() + instance_number = "10" host_id = Faker.UUID.v4() - instance_number = "00" - database_instance_registered_event = + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: sid + ), build( :database_instance_registered_event, sap_system_id: sap_system_id, - host_id: host_id, - instance_number: instance_number + sid: sid + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "MESSAGESERVER" + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "ABAP", + instance_number: instance_number, + host_id: host_id + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: sid, + ensa_version: ensa_version ) - - initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id), - database_instance_registered_event ] assert_events_and_state( initial_events, - build( - :register_database_instance_command, + build(:register_application_instance_command, sap_system_id: sap_system_id, - sid: database_instance_registered_event.sid, - tenant: database_instance_registered_event.tenant, + sid: sid, instance_number: instance_number, - features: database_instance_registered_event.features, host_id: host_id, - health: :critical + features: "ABAP", + ensa_version: EnsaVersion.no_ensa() ), - [ - %DatabaseInstanceHealthChanged{ - sap_system_id: sap_system_id, - instance_number: instance_number, - host_id: host_id, - health: :critical - }, - %DatabaseHealthChanged{ - sap_system_id: sap_system_id, - health: :critical - } - ], + [], fn state -> assert %SapSystem{ - database: %SapSystem.Database{ - health: :critical, - instances: [ - %SapSystem.Instance{ - instance_number: ^instance_number, - host_id: ^host_id, - health: :critical - } - ] + sid: ^sid, + ensa_version: ^ensa_version, + application: %SapSystem.Application{ + sid: ^sid } } = state end ) end - test "should not change the health of a Database if no instance has changed the health status" do + test "should not update a SAP System if the coming data didn't change the current state" do sap_system_id = Faker.UUID.v4() + sid = fake_sid() + ensa_version = EnsaVersion.ensa1() + instance_number = "10" + host_id = Faker.UUID.v4() - new_instance_number = "20" - new_instance_features = Faker.Pokemon.name() - new_instance_host_id = Faker.UUID.v4() - - database_instance_registered_event = + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: sid + ), build( :database_instance_registered_event, sap_system_id: sap_system_id, - health: :warning + sid: sid + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "MESSAGESERVER", + instance_number: instance_number, + host_id: host_id + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "ABAP" + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: sid, + ensa_version: ensa_version ) - - initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id, health: :warning), - database_instance_registered_event ] assert_events_and_state( initial_events, - [ - build( - :register_database_instance_command, - sap_system_id: sap_system_id, - sid: database_instance_registered_event.sid, - tenant: database_instance_registered_event.tenant, - instance_number: database_instance_registered_event.instance_number, - features: database_instance_registered_event.features, - host_id: database_instance_registered_event.host_id, - health: :warning - ), - build( - :register_database_instance_command, - sap_system_id: sap_system_id, - sid: database_instance_registered_event.sid, - tenant: database_instance_registered_event.tenant, - instance_number: new_instance_number, - features: new_instance_features, - host_id: new_instance_host_id, - health: :warning - ) - ], - [ - build( - :database_instance_registered_event, - sap_system_id: sap_system_id, - sid: database_instance_registered_event.sid, - tenant: database_instance_registered_event.tenant, - instance_number: new_instance_number, - features: new_instance_features, - host_id: new_instance_host_id, - health: :warning - ) - ], + build(:register_application_instance_command, + sap_system_id: sap_system_id, + sid: sid, + instance_number: instance_number, + host_id: host_id, + features: "MESSAGESERVER", + ensa_version: ensa_version + ), + [], fn state -> assert %SapSystem{ - database: %SapSystem.Database{ - health: :warning, - instances: [ - %SapSystem.Instance{ - health: :warning - }, - %SapSystem.Instance{ - health: :warning - } - ] + sid: ^sid, + ensa_version: ^ensa_version, + application: %SapSystem.Application{ + sid: ^sid } } = state end ) end - test "should change the health of a SAP System when a new Application instance is registered" do + test "should register a SAP System and add an application instance when an ABAP instance is already present and a new MESSAGESERVER instance is added" do sap_system_id = Faker.UUID.v4() sid = Faker.StarWars.planet() - tenant = Faker.Beer.style() db_host = Faker.Internet.ip_v4_address() - instance_number = "00" - features = Faker.Pokemon.name() + tenant = Faker.Beer.style() + instance_hostname = Faker.Airports.iata() + http_port = 80 + https_port = 443 + start_priority = "0.9" host_id = Faker.UUID.v4() + ensa_version = EnsaVersion.ensa1() initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id), - build(:database_instance_registered_event, sap_system_id: sap_system_id) + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "ABAP", + instance_number: "10" + ) ] assert_events_and_state( initial_events, - build( - :register_application_instance_command, + RegisterApplicationInstance.new!(%{ sap_system_id: sap_system_id, sid: sid, - tenant: tenant, db_host: db_host, - instance_number: instance_number, - features: features, + tenant: tenant, + instance_number: "00", + instance_hostname: instance_hostname, + features: "MESSAGESERVER", + http_port: http_port, + https_port: https_port, + start_priority: start_priority, host_id: host_id, - health: :critical - ), + health: :passing, + ensa_version: ensa_version + }), [ - %SapSystemRegistered{ + %ApplicationInstanceRegistered{ sap_system_id: sap_system_id, sid: sid, - db_host: db_host, - tenant: tenant, - health: :critical + instance_number: "00", + instance_hostname: instance_hostname, + features: "MESSAGESERVER", + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + health: :passing }, - build( - :application_instance_registered_event, + %SapSystemRegistered{ sap_system_id: sap_system_id, sid: sid, - instance_number: instance_number, - features: features, - host_id: host_id, - health: :critical - ) + db_host: db_host, + tenant: tenant, + health: :passing, + ensa_version: ensa_version + } ], fn state -> assert %SapSystem{ - health: :critical, + sid: ^sid, application: %SapSystem.Application{ + sid: ^sid, instances: [ %SapSystem.Instance{ - health: :critical + sid: ^sid, + instance_number: "00", + features: "MESSAGESERVER", + host_id: ^host_id, + health: :passing + }, + %SapSystem.Instance{ + features: "ABAP" } ] } @@ -662,54 +811,75 @@ defmodule Trento.SapSystemTest do ) end - test "should change the health of a SAP System when an Application has changed the health status" do + test "should add an application instance to a non registered SAP system when the instance is ABAP without complete a sap system registration" do sap_system_id = Faker.UUID.v4() - - application_instance_registered = - build(:application_instance_registered_event, sap_system_id: sap_system_id) - - sap_system_registered_event = - build(:sap_system_registered_event, sap_system_id: sap_system_id) + sid = Faker.StarWars.planet() + db_host = Faker.Internet.ip_v4_address() + tenant = Faker.Beer.style() + instance_hostname = Faker.Airports.iata() + http_port = 80 + https_port = 443 + start_priority = "0.9" + host_id = Faker.UUID.v4() + ensa_version = EnsaVersion.ensa1() initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id), - build(:database_instance_registered_event, sap_system_id: sap_system_id), - application_instance_registered, - sap_system_registered_event - ] - - assert_events_and_state( - initial_events, build( - :register_application_instance_command, + :database_registered_event, sap_system_id: sap_system_id, - sid: application_instance_registered.sid, - tenant: sap_system_registered_event.tenant, - db_host: sap_system_registered_event.db_host, - instance_number: application_instance_registered.instance_number, - features: application_instance_registered.features, - host_id: application_instance_registered.host_id, - health: :critical + sid: sid ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant + ) + ] + + assert_events_and_state( + initial_events, + RegisterApplicationInstance.new!(%{ + sap_system_id: sap_system_id, + sid: sid, + db_host: db_host, + tenant: tenant, + instance_number: "00", + instance_hostname: instance_hostname, + features: "ABAP", + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + health: :passing, + ensa_version: ensa_version + }), [ - %ApplicationInstanceHealthChanged{ - sap_system_id: sap_system_id, - instance_number: application_instance_registered.instance_number, - host_id: application_instance_registered.host_id, - health: :critical - }, - %SapSystemHealthChanged{ + %ApplicationInstanceRegistered{ sap_system_id: sap_system_id, - health: :critical + sid: sid, + instance_number: "00", + instance_hostname: instance_hostname, + features: "ABAP", + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + health: :passing } ], fn state -> assert %SapSystem{ - health: :critical, + sid: nil, application: %SapSystem.Application{ + sid: ^sid, instances: [ %SapSystem.Instance{ - health: :critical + sid: ^sid, + instance_number: "00", + features: "ABAP", + host_id: ^host_id, + health: :passing } ] } @@ -718,81 +888,75 @@ defmodule Trento.SapSystemTest do ) end - test "should not change the health of a SAP System if no instance has changed the health status" do + test "should add an application instance to a non registered SAP system when the instance is MESSAGESERVER without completing a SAP system registration" do sap_system_id = Faker.UUID.v4() + sid = Faker.StarWars.planet() + db_host = Faker.Internet.ip_v4_address() + tenant = Faker.Beer.style() + instance_hostname = Faker.Airports.iata() + http_port = 80 + https_port = 443 + start_priority = "0.9" + host_id = Faker.UUID.v4() + ensa_version = EnsaVersion.ensa1() - new_instance_number = "20" - new_instance_features = Faker.Pokemon.name() - new_instance_host_id = Faker.UUID.v4() - - application_instance_registered_event = + initial_events = [ build( - :application_instance_registered_event, + :database_registered_event, sap_system_id: sap_system_id, - health: :warning - ) - - sap_system_registered_event = - build(:sap_system_registered_event, sap_system_id: sap_system_id, health: :warning) - - initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id), + sid: sid + ), build( :database_instance_registered_event, sap_system_id: sap_system_id, - health: :warning - ), - sap_system_registered_event, - application_instance_registered_event + sid: sid, + tenant: tenant + ) ] assert_events_and_state( initial_events, + RegisterApplicationInstance.new!(%{ + sap_system_id: sap_system_id, + sid: sid, + db_host: db_host, + tenant: tenant, + instance_number: "00", + instance_hostname: instance_hostname, + features: "MESSAGESERVER", + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + health: :passing, + ensa_version: ensa_version + }), [ - build( - :register_application_instance_command, - sap_system_id: sap_system_id, - sid: application_instance_registered_event.sid, - tenant: sap_system_registered_event.tenant, - db_host: sap_system_registered_event.db_host, - instance_number: application_instance_registered_event.instance_number, - features: application_instance_registered_event.features, - host_id: application_instance_registered_event.host_id, - health: :warning - ), - build( - :register_application_instance_command, - sap_system_id: sap_system_id, - sid: application_instance_registered_event.sid, - tenant: sap_system_registered_event.tenant, - db_host: sap_system_registered_event.db_host, - instance_number: new_instance_number, - features: new_instance_features, - host_id: new_instance_host_id, - health: :warning - ) - ], - [ - build( - :application_instance_registered_event, + %ApplicationInstanceRegistered{ sap_system_id: sap_system_id, - sid: application_instance_registered_event.sid, - instance_number: new_instance_number, - features: new_instance_features, - host_id: new_instance_host_id, - health: :warning - ) + sid: sid, + instance_number: "00", + instance_hostname: instance_hostname, + features: "MESSAGESERVER", + http_port: http_port, + https_port: https_port, + start_priority: start_priority, + host_id: host_id, + health: :passing + } ], fn state -> assert %SapSystem{ - health: :warning, + sid: nil, application: %SapSystem.Application{ + sid: ^sid, instances: [ %SapSystem.Instance{ - health: :warning - }, - %SapSystem.Instance{ - health: :warning + sid: ^sid, + instance_number: "00", + features: "MESSAGESERVER", + host_id: ^host_id, + health: :passing } ] } @@ -801,181 +965,2955 @@ defmodule Trento.SapSystemTest do ) end - test "should change the health of a SAP System when the Database has changed the health status" do + test "should add an application instance to a registered SAP System" do sap_system_id = Faker.UUID.v4() - - new_instance_number = "20" - new_instance_features = Faker.Pokemon.name() - new_instance_host_id = Faker.UUID.v4() + sid = Faker.StarWars.planet() initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id), - database_instance_registered_event = - build(:database_instance_registered_event, sap_system_id: sap_system_id), - build(:sap_system_registered_event, sap_system_id: sap_system_id), - build(:application_instance_registered_event, sap_system_id: sap_system_id) + build(:database_registered_event, sap_system_id: sap_system_id, sid: sid), + build(:database_instance_registered_event, sap_system_id: sap_system_id, sid: sid), + build(:application_instance_registered_event, sap_system_id: sap_system_id, sid: sid), + build(:sap_system_registered_event, sap_system_id: sap_system_id, sid: sid) ] + new_instance_db_host = Faker.Internet.ip_v4_address() + new_instance_tenant = Faker.Beer.style() + new_instance_number = "10" + new_instance_features = Faker.Pokemon.name() + new_instance_host_id = Faker.UUID.v4() + assert_events_and_state( initial_events, build( - :register_database_instance_command, + :register_application_instance_command, sap_system_id: sap_system_id, - sid: database_instance_registered_event.sid, - tenant: database_instance_registered_event.tenant, + sid: sid, + db_host: new_instance_db_host, + tenant: new_instance_tenant, instance_number: new_instance_number, features: new_instance_features, host_id: new_instance_host_id, - health: :warning + health: :passing + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + instance_number: new_instance_number, + features: new_instance_features, + host_id: new_instance_host_id, + health: :passing ), - [ - build( - :database_instance_registered_event, - sap_system_id: sap_system_id, - sid: database_instance_registered_event.sid, - tenant: database_instance_registered_event.tenant, - instance_number: new_instance_number, - features: new_instance_features, - host_id: new_instance_host_id, - health: :warning - ), - %DatabaseHealthChanged{ - sap_system_id: sap_system_id, - health: :warning - }, - %SapSystemHealthChanged{ - sap_system_id: sap_system_id, - health: :warning - } - ], fn state -> - assert %SapSystem{health: :warning} = state + assert %SapSystem{ + application: %SapSystem.Application{ + sid: ^sid, + instances: [ + %SapSystem.Instance{ + sid: ^sid, + instance_number: ^new_instance_number, + features: ^new_instance_features, + host_id: ^new_instance_host_id, + health: :passing + } + | _ + ] + } + } = state end ) end - end - - describe "rollup" do - test "should not accept a rollup command if a sap_system was not registered yet" do - assert_error( - RollUpSapSystem.new!(%{sap_system_id: Faker.UUID.v4()}), - {:error, :sap_system_not_registered} - ) - end - test "should change the sap_system state to rolling up" do - sap_system_id = UUID.uuid4() - sid = UUID.uuid4() + test "should not add an application instance if the application instance was already registered" do + sap_system_id = Faker.UUID.v4() - database_instance_registered_event = - build(:database_instance_registered_event, sap_system_id: sap_system_id, sid: sid) + application_instance_registered_event = + build(:application_instance_registered_event, sap_system_id: sap_system_id) initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id, sid: sid), - database_instance_registered_event, - build(:sap_system_registered_event, sap_system_id: sap_system_id, sid: sid) + build(:database_registered_event, sap_system_id: sap_system_id), + build(:database_instance_registered_event, sap_system_id: sap_system_id), + application_instance_registered_event, + build(:sap_system_registered_event, sap_system_id: sap_system_id) ] - assert_events_and_state( + assert_events( initial_events, - RollUpSapSystem.new!(%{sap_system_id: sap_system_id}), - %SapSystemRollUpRequested{ + build( + :register_application_instance_command, + sap_system_id: application_instance_registered_event.sap_system_id, + sid: application_instance_registered_event.sid, + db_host: Faker.Internet.ip_v4_address(), + tenant: Faker.Beer.hop(), + instance_number: application_instance_registered_event.instance_number, + features: application_instance_registered_event.features, + host_id: application_instance_registered_event.host_id, + health: :passing + ), + [] + ) + end + end + + describe "SAP System health" do + test "should change the health of a Database when a new Database instance is registered" do + sap_system_id = Faker.UUID.v4() + sid = Faker.StarWars.planet() + tenant = Faker.Beer.style() + instance_number = "00" + features = Faker.Pokemon.name() + host_id = Faker.UUID.v4() + + initial_events = [ + build(:database_registered_event, sap_system_id: sap_system_id), + build(:database_instance_registered_event, sap_system_id: sap_system_id) + ] + + assert_events_and_state( + initial_events, + build( + :register_database_instance_command, sap_system_id: sap_system_id, - snapshot: %SapSystem{ + sid: sid, + tenant: tenant, + instance_number: instance_number, + features: features, + host_id: host_id, + health: :critical + ), + [ + build( + :database_instance_registered_event, sap_system_id: sap_system_id, sid: sid, - health: :passing, + tenant: tenant, + instance_number: instance_number, + features: features, + host_id: host_id, + health: :critical + ), + %DatabaseHealthChanged{ + sap_system_id: sap_system_id, + health: :critical + } + ], + fn state -> + %SapSystem{ database: %SapSystem.Database{ - sid: sid, - health: :passing, + health: :critical, instances: [ %SapSystem.Instance{ - sid: sid, - instance_number: database_instance_registered_event.instance_number, - health: database_instance_registered_event.health, - features: database_instance_registered_event.features, - host_id: database_instance_registered_event.host_id, - system_replication: database_instance_registered_event.system_replication, - system_replication_status: - database_instance_registered_event.system_replication_status + health: :critical + }, + %SapSystem.Instance{ + health: :passing } ] - }, - rolling_up: false - } - }, - fn %SapSystem{rolling_up: rolling_up} -> - assert rolling_up + } + } = state end ) end - test "should not accept commands if a sap_system is in rolling up state" do - sap_system_id = UUID.uuid4() - sid = UUID.uuid4() + test "should change the health of a Database when a Database instance has changed the health status" do + sap_system_id = Faker.UUID.v4() + host_id = Faker.UUID.v4() + instance_number = "00" - initial_events = [ - build(:database_registered_event, sap_system_id: sap_system_id, sid: sid), - build(:database_instance_registered_event, sap_system_id: sap_system_id, sid: sid), - build(:sap_system_registered_event, sap_system_id: sap_system_id, sid: sid), - %SapSystemRollUpRequested{ + database_instance_registered_event = + build( + :database_instance_registered_event, sap_system_id: sap_system_id, - snapshot: %SapSystem{} - } + host_id: host_id, + instance_number: instance_number + ) + + initial_events = [ + build(:database_registered_event, sap_system_id: sap_system_id), + database_instance_registered_event ] - assert_error( + assert_events_and_state( initial_events, - RegisterDatabaseInstance.new!(%{ + build( + :register_database_instance_command, sap_system_id: sap_system_id, - sid: Faker.StarWars.planet(), - tenant: Faker.UUID.v4(), - host_id: Faker.UUID.v4(), - instance_number: "00", - features: Faker.Pokemon.name(), - http_port: 8080, - https_port: 8443, - health: :passing - }), - {:error, :sap_system_rolling_up} - ) - - assert_error( - initial_events, - RollUpSapSystem.new!(%{ - sap_system_id: sap_system_id - }), - {:error, :sap_system_rolling_up} + sid: database_instance_registered_event.sid, + tenant: database_instance_registered_event.tenant, + instance_number: instance_number, + features: database_instance_registered_event.features, + host_id: host_id, + health: :critical + ), + [ + %DatabaseInstanceHealthChanged{ + sap_system_id: sap_system_id, + instance_number: instance_number, + host_id: host_id, + health: :critical + }, + %DatabaseHealthChanged{ + sap_system_id: sap_system_id, + health: :critical + } + ], + fn state -> + assert %SapSystem{ + database: %SapSystem.Database{ + health: :critical, + instances: [ + %SapSystem.Instance{ + instance_number: ^instance_number, + host_id: ^host_id, + health: :critical + } + ] + } + } = state + end ) end - test "should apply the rollup event and rehydrate the aggregate" do - sap_system_id = UUID.uuid4() + test "should not change the health of a Database if no instance has changed the health status" do + sap_system_id = Faker.UUID.v4() - sap_system_registered_event = - build(:sap_system_registered_event, sap_system_id: sap_system_id) + new_instance_number = "20" + new_instance_features = Faker.Pokemon.name() + new_instance_host_id = Faker.UUID.v4() - assert_state( + database_instance_registered_event = + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + health: :warning + ) + + initial_events = [ + build(:database_registered_event, sap_system_id: sap_system_id, health: :warning), + database_instance_registered_event + ] + + assert_events_and_state( + initial_events, [ - sap_system_registered_event, - %SapSystemRolledUp{ + build( + :register_database_instance_command, sap_system_id: sap_system_id, - snapshot: %SapSystem{ - sap_system_id: sap_system_registered_event.sap_system_id, - sid: sap_system_registered_event.sid, - health: sap_system_registered_event.health, - rolling_up: false - } - } + sid: database_instance_registered_event.sid, + tenant: database_instance_registered_event.tenant, + instance_number: database_instance_registered_event.instance_number, + features: database_instance_registered_event.features, + host_id: database_instance_registered_event.host_id, + health: :warning + ), + build( + :register_database_instance_command, + sap_system_id: sap_system_id, + sid: database_instance_registered_event.sid, + tenant: database_instance_registered_event.tenant, + instance_number: new_instance_number, + features: new_instance_features, + host_id: new_instance_host_id, + health: :warning + ) ], - [], - fn sap_system -> - refute sap_system.rolling_up - assert sap_system.sap_system_id == sap_system_registered_event.sap_system_id - assert sap_system.sid == sap_system_registered_event.sid - assert sap_system.health == sap_system_registered_event.health + [ + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + sid: database_instance_registered_event.sid, + tenant: database_instance_registered_event.tenant, + instance_number: new_instance_number, + features: new_instance_features, + host_id: new_instance_host_id, + health: :warning + ) + ], + fn state -> + assert %SapSystem{ + database: %SapSystem.Database{ + health: :warning, + instances: [ + %SapSystem.Instance{ + health: :warning + }, + %SapSystem.Instance{ + health: :warning + } + ] + } + } = state end ) end - end + + test "should change the health of a SAP System when a new Application instance is registered" do + sap_system_id = Faker.UUID.v4() + sid = Faker.StarWars.planet() + tenant = Faker.Beer.style() + db_host = Faker.Internet.ip_v4_address() + instance_number = "00" + features = "MESSAGESERVER" + host_id = Faker.UUID.v4() + ensa_version = EnsaVersion.ensa1() + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant + ), + build(:application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + features: "ABAP", + instance_number: "10" + ) + ] + + assert_events_and_state( + initial_events, + build( + :register_application_instance_command, + sap_system_id: sap_system_id, + sid: sid, + tenant: tenant, + db_host: db_host, + instance_number: instance_number, + features: features, + host_id: host_id, + health: :critical + ), + [ + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + sid: sid, + instance_number: instance_number, + features: features, + host_id: host_id, + health: :critical + ), + %SapSystemRegistered{ + sap_system_id: sap_system_id, + sid: sid, + db_host: db_host, + tenant: tenant, + health: :critical, + ensa_version: ensa_version + } + ], + fn state -> + assert %SapSystem{ + health: :critical, + ensa_version: ^ensa_version, + application: %SapSystem.Application{ + instances: [ + %SapSystem.Instance{ + health: :critical + }, + %SapSystem.Instance{ + health: :passing + } + ] + } + } = state + end + ) + end + + test "should change the health of a SAP System when an Application has changed the health status" do + sap_system_id = Faker.UUID.v4() + + application_instance_registered = + build(:application_instance_registered_event, sap_system_id: sap_system_id) + + sap_system_registered_event = + build(:sap_system_registered_event, sap_system_id: sap_system_id) + + initial_events = [ + build(:database_registered_event, sap_system_id: sap_system_id), + build(:database_instance_registered_event, sap_system_id: sap_system_id), + application_instance_registered, + sap_system_registered_event + ] + + assert_events_and_state( + initial_events, + build( + :register_application_instance_command, + sap_system_id: sap_system_id, + sid: application_instance_registered.sid, + tenant: sap_system_registered_event.tenant, + db_host: sap_system_registered_event.db_host, + instance_number: application_instance_registered.instance_number, + features: application_instance_registered.features, + host_id: application_instance_registered.host_id, + health: :critical + ), + [ + %ApplicationInstanceHealthChanged{ + sap_system_id: sap_system_id, + instance_number: application_instance_registered.instance_number, + host_id: application_instance_registered.host_id, + health: :critical + }, + %SapSystemHealthChanged{ + sap_system_id: sap_system_id, + health: :critical + } + ], + fn state -> + assert %SapSystem{ + health: :critical, + application: %SapSystem.Application{ + instances: [ + %SapSystem.Instance{ + health: :critical + } + ] + } + } = state + end + ) + end + + test "should not change the health of a SAP System if no instance has changed the health status" do + sap_system_id = Faker.UUID.v4() + + new_instance_number = "20" + new_instance_features = Faker.Pokemon.name() + new_instance_host_id = Faker.UUID.v4() + + application_instance_registered_event = + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + health: :warning + ) + + sap_system_registered_event = + build(:sap_system_registered_event, sap_system_id: sap_system_id, health: :warning) + + initial_events = [ + build(:database_registered_event, sap_system_id: sap_system_id), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + health: :warning + ), + application_instance_registered_event, + sap_system_registered_event + ] + + assert_events_and_state( + initial_events, + [ + build( + :register_application_instance_command, + sap_system_id: sap_system_id, + sid: application_instance_registered_event.sid, + tenant: sap_system_registered_event.tenant, + db_host: sap_system_registered_event.db_host, + instance_number: application_instance_registered_event.instance_number, + features: application_instance_registered_event.features, + host_id: application_instance_registered_event.host_id, + health: :warning + ), + build( + :register_application_instance_command, + sap_system_id: sap_system_id, + sid: application_instance_registered_event.sid, + tenant: sap_system_registered_event.tenant, + db_host: sap_system_registered_event.db_host, + instance_number: new_instance_number, + features: new_instance_features, + host_id: new_instance_host_id, + health: :warning + ) + ], + [ + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + sid: application_instance_registered_event.sid, + instance_number: new_instance_number, + features: new_instance_features, + host_id: new_instance_host_id, + health: :warning + ) + ], + fn state -> + assert %SapSystem{ + health: :warning, + application: %SapSystem.Application{ + instances: [ + %SapSystem.Instance{ + health: :warning + }, + %SapSystem.Instance{ + health: :warning + } + ] + } + } = state + end + ) + end + + test "should change the health of a SAP System when the Database has changed the health status" do + sap_system_id = Faker.UUID.v4() + + new_instance_number = "20" + new_instance_features = Faker.Pokemon.name() + new_instance_host_id = Faker.UUID.v4() + + initial_events = [ + build(:database_registered_event, sap_system_id: sap_system_id), + database_instance_registered_event = + build(:database_instance_registered_event, sap_system_id: sap_system_id), + build(:application_instance_registered_event, sap_system_id: sap_system_id), + build(:sap_system_registered_event, sap_system_id: sap_system_id) + ] + + assert_events_and_state( + initial_events, + build( + :register_database_instance_command, + sap_system_id: sap_system_id, + sid: database_instance_registered_event.sid, + tenant: database_instance_registered_event.tenant, + instance_number: new_instance_number, + features: new_instance_features, + host_id: new_instance_host_id, + health: :warning + ), + [ + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + sid: database_instance_registered_event.sid, + tenant: database_instance_registered_event.tenant, + instance_number: new_instance_number, + features: new_instance_features, + host_id: new_instance_host_id, + health: :warning + ), + %DatabaseHealthChanged{ + sap_system_id: sap_system_id, + health: :warning + }, + %SapSystemHealthChanged{ + sap_system_id: sap_system_id, + health: :warning + } + ], + fn state -> + assert %SapSystem{health: :warning} = state + end + ) + end + + test "should update the SAP system if some of the fields have been changed" do + end + end + + describe "rollup" do + test "should not accept a rollup command if a sap_system was not registered yet" do + assert_error( + RollUpSapSystem.new!(%{sap_system_id: Faker.UUID.v4()}), + {:error, :sap_system_not_registered} + ) + end + + test "should change the sap_system state to rolling up" do + sap_system_id = UUID.uuid4() + sid = UUID.uuid4() + + database_instance_registered_event = + build(:database_instance_registered_event, sap_system_id: sap_system_id, sid: sid) + + application_instance_registered_event = + build(:application_instance_registered_event, sap_system_id: sap_system_id, sid: sid) + + initial_events = [ + build(:database_registered_event, sap_system_id: sap_system_id, sid: sid), + database_instance_registered_event, + application_instance_registered_event, + %{ensa_version: ensa_version} = + build(:sap_system_registered_event, sap_system_id: sap_system_id, sid: sid) + ] + + assert_events_and_state( + initial_events, + RollUpSapSystem.new!(%{sap_system_id: sap_system_id}), + %SapSystemRollUpRequested{ + sap_system_id: sap_system_id, + snapshot: %SapSystem{ + sap_system_id: sap_system_id, + sid: sid, + health: :passing, + ensa_version: ensa_version, + application: %SapSystem.Application{ + sid: sid, + instances: [ + %SapSystem.Instance{ + sid: sid, + instance_number: application_instance_registered_event.instance_number, + health: application_instance_registered_event.health, + features: application_instance_registered_event.features, + host_id: application_instance_registered_event.host_id, + system_replication: nil, + system_replication_status: nil + } + ] + }, + database: %SapSystem.Database{ + sid: sid, + health: :passing, + instances: [ + %SapSystem.Instance{ + sid: sid, + instance_number: database_instance_registered_event.instance_number, + health: database_instance_registered_event.health, + features: database_instance_registered_event.features, + host_id: database_instance_registered_event.host_id, + system_replication: database_instance_registered_event.system_replication, + system_replication_status: + database_instance_registered_event.system_replication_status + } + ] + }, + rolling_up: false + } + }, + fn %SapSystem{rolling_up: rolling_up} -> + assert rolling_up + end + ) + end + + test "should not accept commands if a sap_system is in rolling up state" do + sap_system_id = UUID.uuid4() + sid = UUID.uuid4() + + initial_events = [ + build(:database_registered_event, sap_system_id: sap_system_id, sid: sid), + build(:database_instance_registered_event, sap_system_id: sap_system_id, sid: sid), + build(:application_instance_registered_event, sap_system_id: sap_system_id, sid: sid), + build(:sap_system_registered_event, sap_system_id: sap_system_id, sid: sid), + %SapSystemRollUpRequested{ + sap_system_id: sap_system_id, + snapshot: %SapSystem{} + } + ] + + assert_error( + initial_events, + RegisterDatabaseInstance.new!(%{ + sap_system_id: sap_system_id, + sid: Faker.StarWars.planet(), + tenant: Faker.UUID.v4(), + host_id: Faker.UUID.v4(), + instance_number: "00", + features: Faker.Pokemon.name(), + http_port: 8080, + https_port: 8443, + health: :passing + }), + {:error, :sap_system_rolling_up} + ) + + assert_error( + initial_events, + RollUpSapSystem.new!(%{ + sap_system_id: sap_system_id + }), + {:error, :sap_system_rolling_up} + ) + end + + test "should apply the rollup event and rehydrate the aggregate" do + sap_system_id = UUID.uuid4() + + sap_system_registered_event = + build(:sap_system_registered_event, sap_system_id: sap_system_id) + + initial_events = [ + build(:application_instance_registered_event, sap_system_id: sap_system_id), + sap_system_registered_event, + %SapSystemRolledUp{ + sap_system_id: sap_system_id, + snapshot: %SapSystem{ + sap_system_id: sap_system_registered_event.sap_system_id, + sid: sap_system_registered_event.sid, + health: sap_system_registered_event.health, + rolling_up: false + } + } + ] + + assert_state( + initial_events, + [], + fn sap_system -> + refute sap_system.rolling_up + assert sap_system.sap_system_id == sap_system_registered_event.sap_system_id + assert sap_system.sid == sap_system_registered_event.sid + assert sap_system.health == sap_system_registered_event.health + end + ) + end + end + + describe "tombstoning" do + test "should tombstone a deregistered SAP system when no application and no database instances are left" do + sap_system_id = UUID.uuid4() + host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: host_id, + instance_number: db_instance_number_1, + system_replication: "Primary", + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + sid: application_sid, + instance_number: message_server_instance_number + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + sid: application_sid, + instance_number: abap_instance_number + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ) + ] + + assert_events_and_state( + initial_events, + [ + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + deregistered_at: deregistered_at + }, + %DeregisterApplicationInstance{ + sap_system_id: sap_system_id, + host_id: message_server_host_id, + instance_number: message_server_instance_number, + deregistered_at: deregistered_at + }, + %DeregisterApplicationInstance{ + sap_system_id: sap_system_id, + host_id: abap_host_id, + instance_number: abap_instance_number, + deregistered_at: deregistered_at + } + ], + [ + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + %DatabaseDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + }, + %SapSystemDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + }, + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + deregistered_at: deregistered_at + }, + %ApplicationInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: message_server_host_id, + instance_number: message_server_instance_number, + deregistered_at: deregistered_at + }, + %ApplicationInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: abap_host_id, + instance_number: abap_instance_number, + deregistered_at: deregistered_at + }, + %SapSystemTombstoned{ + sap_system_id: sap_system_id + } + ], + fn sap_system -> + assert %SapSystem{ + database: %Database{ + deregistered_at: ^deregistered_at, + sid: ^db_sid, + instances: [] + }, + application: %Application{ + instances: [] + }, + deregistered_at: ^deregistered_at, + sid: ^application_sid + } = sap_system + end + ) + end + end + + describe "deregistration" do + test "should not restore a deregistered database when the registering database instance has Secondary role" do + sap_system_id = UUID.uuid4() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + ), + build(:database_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:sap_system_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ) + ] + + command = + build(:register_database_instance_command, + system_replication: "Secondary", + sid: db_sid, + sap_system_id: sap_system_id + ) + + assert_error( + initial_events, + command, + {:error, :sap_system_not_registered} + ) + end + + test "should restore a deregistered database when the registering database instance has system replication disabled, with database instance leftovers" do + sap_system_id = UUID.uuid4() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + ), + build(:database_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:sap_system_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ) + ] + + %{features: features, instance_number: instance_number, health: health} = + command = + build(:register_database_instance_command, + system_replication: nil, + sid: db_sid, + sap_system_id: sap_system_id + ) + + assert_events_and_state( + initial_events, + command, + [ + %DatabaseRestored{ + sap_system_id: sap_system_id, + health: command.health + }, + %DatabaseInstanceRegistered{ + sap_system_id: sap_system_id, + sid: db_sid, + tenant: command.tenant, + instance_number: command.instance_number, + instance_hostname: command.instance_hostname, + features: command.features, + http_port: command.http_port, + https_port: command.https_port, + start_priority: command.start_priority, + host_id: command.host_id, + system_replication: command.system_replication, + system_replication_status: command.system_replication_status, + health: command.health + } + ], + fn sap_system -> + assert %SapSystem{ + deregistered_at: ^deregistered_at, + database: %Database{ + deregistered_at: nil, + sid: ^db_sid, + instances: [ + %Instance{ + sid: ^db_sid, + instance_number: ^instance_number, + features: ^features, + health: ^health + }, + %Instance{} + ] + } + } = sap_system + end + ) + end + + test "should restore a deregistered database when the registering database instance has system replication disabled, without database instance leftovers" do + sap_system_id = UUID.uuid4() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + deregistered_at: deregistered_at + ), + build(:database_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:sap_system_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ) + ] + + %{features: features, instance_number: instance_number, health: health} = + command = + build(:register_database_instance_command, + system_replication: nil, + sid: db_sid, + sap_system_id: sap_system_id + ) + + assert_events_and_state( + initial_events, + command, + [ + %DatabaseRestored{ + sap_system_id: sap_system_id, + health: command.health + }, + %DatabaseInstanceRegistered{ + sap_system_id: sap_system_id, + sid: db_sid, + tenant: command.tenant, + instance_number: command.instance_number, + instance_hostname: command.instance_hostname, + features: command.features, + http_port: command.http_port, + https_port: command.https_port, + start_priority: command.start_priority, + host_id: command.host_id, + system_replication: command.system_replication, + system_replication_status: command.system_replication_status, + health: command.health + } + ], + fn sap_system -> + assert Kernel.length(sap_system.database.instances) == 1 + + assert %SapSystem{ + deregistered_at: ^deregistered_at, + database: %Database{ + deregistered_at: nil, + sid: ^db_sid, + instances: [ + %Instance{ + sid: ^db_sid, + instance_number: ^instance_number, + features: ^features, + health: ^health, + system_replication: nil + } + ] + } + } = sap_system + end + ) + end + + test "should restore a deregistered database when the registering database instance is a primary, without database instance leftovers" do + sap_system_id = UUID.uuid4() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + deregistered_at: deregistered_at + ), + build(:database_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:sap_system_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ) + ] + + %{features: features, instance_number: instance_number, health: health} = + command = + build(:register_database_instance_command, + system_replication: "Primary", + sid: db_sid, + sap_system_id: sap_system_id + ) + + assert_events_and_state( + initial_events, + command, + [ + %DatabaseRestored{ + sap_system_id: sap_system_id, + health: command.health + }, + %DatabaseInstanceRegistered{ + sap_system_id: sap_system_id, + sid: db_sid, + tenant: command.tenant, + instance_number: command.instance_number, + instance_hostname: command.instance_hostname, + features: command.features, + http_port: command.http_port, + https_port: command.https_port, + start_priority: command.start_priority, + host_id: command.host_id, + system_replication: command.system_replication, + system_replication_status: command.system_replication_status, + health: command.health + } + ], + fn sap_system -> + assert Kernel.length(sap_system.database.instances) == 1 + + assert %SapSystem{ + deregistered_at: ^deregistered_at, + database: %Database{ + deregistered_at: nil, + sid: ^db_sid, + instances: [ + %Instance{ + sid: ^db_sid, + instance_number: ^instance_number, + features: ^features, + health: ^health, + system_replication: "Primary" + } + ] + } + } = sap_system + end + ) + end + + test "should restore a deregistered database when the registering database instance is a primary, with database instance leftovers" do + sap_system_id = UUID.uuid4() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + ), + build(:database_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:sap_system_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ) + ] + + %{features: features, instance_number: instance_number, health: health} = + command = + build(:register_database_instance_command, + system_replication: "Primary", + sid: db_sid, + sap_system_id: sap_system_id + ) + + assert_events_and_state( + initial_events, + command, + [ + %DatabaseRestored{ + sap_system_id: sap_system_id, + health: command.health + }, + %DatabaseInstanceRegistered{ + sap_system_id: sap_system_id, + sid: db_sid, + tenant: command.tenant, + instance_number: command.instance_number, + instance_hostname: command.instance_hostname, + features: command.features, + http_port: command.http_port, + https_port: command.https_port, + start_priority: command.start_priority, + host_id: command.host_id, + system_replication: command.system_replication, + system_replication_status: command.system_replication_status, + health: command.health + } + ], + fn sap_system -> + assert Kernel.length(sap_system.database.instances) == 2 + + assert %SapSystem{ + deregistered_at: ^deregistered_at, + database: %Database{ + deregistered_at: nil, + sid: ^db_sid, + instances: [ + %Instance{ + sid: ^db_sid, + instance_number: ^instance_number, + features: ^features, + health: ^health, + system_replication: "Primary" + }, + %Instance{} + ] + } + } = sap_system + end + ) + end + + test "should not restore a sap system when no abap/messageserver instances are present" do + sap_system_id = UUID.uuid4() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + ), + build(:database_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:sap_system_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:application_instance_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at, + instance_number: message_server_instance_number, + host_id: message_server_host_id + ), + build(:database_instance_registered_event, + system_replication: "Primary", + sid: db_sid, + sap_system_id: sap_system_id + ), + build(:database_restored_event, + sap_system_id: sap_system_id + ) + ] + + command = + build( + :register_application_instance_command, + sap_system_id: sap_system_id, + sid: application_sid, + db_host: primary_database_host_id, + features: "IGS" + ) + + assert_events_and_state( + initial_events, + command, + [ + %ApplicationInstanceRegistered{ + sap_system_id: sap_system_id, + sid: application_sid, + host_id: command.host_id, + instance_number: command.instance_number, + instance_hostname: command.instance_hostname, + features: command.features, + http_port: command.http_port, + https_port: command.https_port, + start_priority: command.start_priority, + health: command.health + } + ], + fn sap_system -> + assert %SapSystem{ + deregistered_at: ^deregistered_at, + database: %Database{ + deregistered_at: nil, + sid: ^db_sid + } + } = sap_system + end + ) + end + + test "should restore a sap system when abap/messageserver instances are present" do + sap_system_id = UUID.uuid4() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + ), + build(:database_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:sap_system_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:application_instance_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at, + instance_number: message_server_instance_number, + host_id: message_server_host_id + ), + build(:database_instance_registered_event, + system_replication: "Primary", + sid: db_sid, + sap_system_id: sap_system_id + ), + build(:database_restored_event, + sap_system_id: sap_system_id + ) + ] + + command = + build( + :register_application_instance_command, + sap_system_id: sap_system_id, + sid: application_sid, + db_host: primary_database_host_id, + features: "MESSAGESERVER" + ) + + assert_events_and_state( + initial_events, + command, + [ + %ApplicationInstanceRegistered{ + sap_system_id: sap_system_id, + sid: application_sid, + host_id: command.host_id, + instance_number: command.instance_number, + instance_hostname: command.instance_hostname, + features: command.features, + http_port: command.http_port, + https_port: command.https_port, + start_priority: command.start_priority, + health: command.health + }, + %SapSystemRestored{ + sap_system_id: sap_system_id, + tenant: command.tenant, + db_host: command.db_host, + health: command.health + } + ], + fn sap_system -> + assert %SapSystem{ + deregistered_at: nil, + database: %Database{ + deregistered_at: nil, + sid: ^db_sid + } + } = sap_system + end + ) + end + + test "should reject all the commands except for the registration/instance deregistration ones, when the SAP system is deregistered" do + sap_system_id = UUID.uuid4() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build(:database_instance_deregistered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + ), + build(:database_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ), + build(:sap_system_deregistered_event, + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + ) + ] + + commands_to_accept = [ + build(:register_database_instance_command), + build(:register_application_instance_command), + build(:deregister_database_instance_command, sap_system_id: sap_system_id), + build(:deregister_application_instance_command, sap_system_id: sap_system_id), + build(:rollup_sap_system_command) + ] + + for command <- commands_to_accept do + assert match?({:ok, _, _}, aggregate_run(initial_events, command)), + "Command #{inspect(command)} should be accepted by a deregistered SAP system" + end + end + + test "should deregister a Database and SAP system when the Primary database instance is removed" do + sap_system_id = UUID.uuid4() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + application_sid = fake_sid() + + message_server_host_id = UUID.uuid4() + message_server_instance_number = "00" + abap_host_id = UUID.uuid4() + abap_instance_number = "01" + + assert_events_and_state( + [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary", + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ) + ], + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + [ + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + %DatabaseDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + }, + %SapSystemDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + ], + fn sap_system -> + assert %SapSystem{ + sid: ^application_sid, + database: %Database{ + sid: ^db_sid, + instances: [ + %Instance{ + instance_number: ^db_instance_number_2, + sid: ^db_sid, + host_id: ^secondary_database_host_id + } + ], + health: :passing, + deregistered_at: ^deregistered_at + }, + application: %Application{ + sid: ^application_sid, + instances: [ + %Instance{instance_number: ^abap_instance_number, sid: ^application_sid}, + %Instance{ + instance_number: ^message_server_instance_number, + sid: ^application_sid + } + ] + }, + deregistered_at: ^deregistered_at + } = sap_system + end + ) + end + + test "should deregister a secondary DB instance, no SAP system registered." do + sap_system_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + instance_number_1 = "00" + instance_number_2 = "01" + + db_sid = fake_sid() + + primary_db_host_id = UUID.uuid4() + secondary_db_host_id = UUID.uuid4() + + assert_events_and_state( + [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + system_replication: "Primary", + sid: db_sid, + instance_number: instance_number_1, + host_id: primary_db_host_id + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + system_replication: "Secondary", + sid: db_sid, + host_id: secondary_db_host_id, + instance_number: instance_number_2 + ) + ], + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: secondary_db_host_id, + instance_number: instance_number_2, + deregistered_at: deregistered_at + }, + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: secondary_db_host_id, + instance_number: instance_number_2, + deregistered_at: deregistered_at + }, + fn sap_system -> + assert %SapSystem{ + sid: nil, + database: %Database{ + sid: ^db_sid, + deregistered_at: nil, + instances: [%Instance{instance_number: ^instance_number_1}] + } + } = sap_system + end + ) + end + + test "should deregister the only database instance and deregister the entire database, no SAP system registered, system replication disabled" do + sap_system_id = UUID.uuid4() + database_host_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + db_instance_number_1 = "00" + db_sid = fake_sid() + + assert_events_and_state( + [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: database_host_id, + instance_number: db_instance_number_1, + system_replication: nil, + sid: db_sid + ) + ], + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + [ + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + %DatabaseDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + ], + fn sap_system -> + assert %SapSystem{ + sid: nil, + database: %Database{ + sid: ^db_sid, + instances: [], + health: :passing, + deregistered_at: ^deregistered_at + }, + application: nil, + deregistered_at: nil + } = sap_system + end + ) + end + + test "should deregister a single DB instance of two if no SR enabled, SAP system not registered" do + sap_system_id = UUID.uuid4() + database_host_id = UUID.uuid4() + second_database_host_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + + assert_events_and_state( + [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: database_host_id, + instance_number: db_instance_number_1, + system_replication: nil, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: second_database_host_id, + instance_number: db_instance_number_2, + system_replication: nil, + sid: db_sid + ) + ], + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + fn sap_system -> + assert %SapSystem{ + database: %Database{ + sid: ^db_sid, + deregistered_at: nil, + instances: [%Instance{instance_number: ^db_instance_number_2}] + } + } = sap_system + end + ) + end + + test "should deregister the primary instance, the entire database and then the secondary instance, SAP system not registered" do + sap_system_id = UUID.uuid4() + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + db_instance_number_1 = "00" + db_instance_number_2 = "01" + + db_sid = fake_sid() + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + system_replication: "Secondary" + ) + ] + + assert_events( + initial_events, + [ + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + deregistered_at: deregistered_at + } + ], + [ + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + %DatabaseDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + }, + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_2, + deregistered_at: deregistered_at + } + ] + ) + end + + test "should correctly deregister the database in a scale out scenario, with two primary and two secondary, no SAP system registered" do + sap_system_id = UUID.uuid4() + first_primary_database_host_id = UUID.uuid4() + other_primary_database_host_id = UUID.uuid4() + + secondary_database_host_id = UUID.uuid4() + other_secondary_database_host_id = UUID.uuid4() + + deregistered_at = DateTime.utc_now() + + db_instance_number_1 = "00" + db_instance_number_2 = "01" + db_instance_number_3 = "02" + db_instance_number_4 = "03" + + db_sid = fake_sid() + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: first_primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: other_primary_database_host_id, + sid: db_sid, + instance_number: db_instance_number_2, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_3, + system_replication: "Secondary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: other_secondary_database_host_id, + instance_number: db_instance_number_4, + system_replication: "Secondary" + ) + ] + + assert_events( + initial_events, + [ + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: first_primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: other_primary_database_host_id, + instance_number: db_instance_number_2, + deregistered_at: deregistered_at + }, + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_3, + deregistered_at: deregistered_at + }, + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: other_secondary_database_host_id, + instance_number: db_instance_number_4, + deregistered_at: deregistered_at + } + ], + [ + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: first_primary_database_host_id, + instance_number: db_instance_number_1, + deregistered_at: deregistered_at + }, + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: other_primary_database_host_id, + instance_number: db_instance_number_2, + deregistered_at: deregistered_at + }, + %DatabaseDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + }, + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: db_instance_number_3, + deregistered_at: deregistered_at + }, + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: other_secondary_database_host_id, + instance_number: db_instance_number_4, + deregistered_at: deregistered_at + } + ] + ) + end + + test "should deregister an ENQREP Application Instance, SAP system registered" do + sap_system_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + database_host_id = UUID.uuid4() + message_server_host_id = UUID.uuid4() + abap_host_id = UUID.uuid4() + enqrep_host_id = UUID.uuid4() + + database_instance_number = "00" + message_server_instance_number = "01" + abap_instance_number = "02" + enqrep_server_instance_number = "03" + + db_sid = fake_sid() + application_sid = fake_sid() + + assert_events_and_state( + [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: database_host_id, + sid: db_sid, + instance_number: database_instance_number, + system_replication: nil + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + sid: application_sid, + host_id: message_server_host_id, + instance_number: message_server_instance_number + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + sid: application_sid, + host_id: abap_host_id, + instance_number: abap_instance_number + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + host_id: enqrep_host_id, + instance_number: enqrep_server_instance_number, + sid: application_sid, + features: "ENQREP" + ) + ], + %DeregisterApplicationInstance{ + sap_system_id: sap_system_id, + host_id: enqrep_host_id, + instance_number: enqrep_server_instance_number, + deregistered_at: deregistered_at + }, + [ + %ApplicationInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: enqrep_host_id, + instance_number: enqrep_server_instance_number, + deregistered_at: deregistered_at + } + ], + fn sap_system -> + assert %SapSystem{ + sid: ^application_sid, + deregistered_at: nil, + database: %Database{ + sid: ^db_sid, + deregistered_at: nil, + instances: [ + %Instance{ + instance_number: ^database_instance_number, + host_id: ^database_host_id + } + ] + }, + application: %Application{ + sid: ^application_sid, + instances: [ + %Instance{ + host_id: ^abap_host_id, + instance_number: ^abap_instance_number + }, + %Instance{ + host_id: ^message_server_host_id, + instance_number: ^message_server_instance_number + } + ] + } + } = sap_system + end + ) + end + + test "should deregister an ABAP Application Instance without deregistering the SAP system" do + sap_system_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + application_sid = fake_sid() + db_sid = fake_sid() + + database_host_id = UUID.uuid4() + message_server_host_id = UUID.uuid4() + abap_host_id = UUID.uuid4() + abap_2_host_id = UUID.uuid4() + enqrep_host_id = UUID.uuid4() + + database_instance_number = "00" + message_server_instance_number = "01" + abap_instance_number = "02" + abap_2_instance_number = "03" + enqrep_server_instance_number = "04" + + assert_events_and_state( + [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: database_host_id, + sid: db_sid, + instance_number: database_instance_number, + system_replication: nil + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + sid: application_sid, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + sid: application_sid, + instance_number: abap_instance_number + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + sid: application_sid, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_2_host_id, + instance_number: abap_2_instance_number + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + sid: application_sid, + host_id: enqrep_host_id, + instance_number: enqrep_server_instance_number, + features: "ENQREP" + ) + ], + %DeregisterApplicationInstance{ + sap_system_id: sap_system_id, + host_id: abap_2_host_id, + instance_number: abap_2_instance_number, + deregistered_at: deregistered_at + }, + [ + %ApplicationInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: abap_2_host_id, + instance_number: abap_2_instance_number, + deregistered_at: deregistered_at + } + ], + fn sap_system -> + assert %SapSystem{ + sid: ^application_sid, + deregistered_at: nil, + database: %Database{ + sid: ^db_sid, + deregistered_at: nil, + instances: [ + %Instance{ + instance_number: ^database_instance_number, + host_id: ^database_host_id + } + ] + }, + application: %Application{ + sid: ^application_sid, + instances: [ + %Instance{ + host_id: ^enqrep_host_id, + instance_number: ^enqrep_server_instance_number + }, + %Instance{ + host_id: ^abap_host_id, + instance_number: ^abap_instance_number + }, + %Instance{ + host_id: ^message_server_host_id, + instance_number: ^message_server_instance_number + } + ] + } + } = sap_system + end + ) + end + + test "should deregister last ABAP Application Instance and deregister SAP System" do + sap_system_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + db_sid = fake_sid() + application_sid = fake_sid() + + database_host_id = UUID.uuid4() + message_server_host_id = UUID.uuid4() + abap_host_id = UUID.uuid4() + enqrep_host_id = UUID.uuid4() + + database_instance_number = "00" + message_server_instance_number = "01" + abap_instance_number = "02" + enqrep_server_instance_number = "03" + + assert_events_and_state( + [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: database_host_id, + sid: db_sid, + instance_number: database_instance_number + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + sid: application_sid, + instance_number: message_server_instance_number + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + sid: application_sid, + instance_number: abap_instance_number + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + sid: application_sid, + host_id: enqrep_host_id, + instance_number: enqrep_server_instance_number, + features: "ENQREP" + ) + ], + %DeregisterApplicationInstance{ + sap_system_id: sap_system_id, + host_id: abap_host_id, + instance_number: abap_instance_number, + deregistered_at: deregistered_at + }, + [ + %ApplicationInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: abap_host_id, + instance_number: abap_instance_number, + deregistered_at: deregistered_at + }, + %SapSystemDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + ], + fn sap_system -> + assert %SapSystem{ + sid: ^application_sid, + database: %Database{ + deregistered_at: nil, + sid: ^db_sid, + instances: [ + %Instance{ + instance_number: ^database_instance_number, + host_id: ^database_host_id + } + ] + }, + application: %Application{ + instances: [ + %Instance{ + instance_number: ^enqrep_server_instance_number, + host_id: ^enqrep_host_id + }, + %Instance{ + instance_number: ^message_server_instance_number, + host_id: ^message_server_host_id + } + ] + }, + deregistered_at: ^deregistered_at + } = sap_system + end + ) + end + + test "should only deregister a Message Server from a not-fully-registered SAP system" do + sap_system_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + database_host_id = UUID.uuid4() + message_server_host_id = UUID.uuid4() + + database_instance_number = "00" + message_server_instance_number = "01" + + application_sid = fake_sid() + database_sid = fake_sid() + + assert_events_and_state( + [ + build( + :database_registered_event, + sid: database_sid, + sap_system_id: sap_system_id + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + sid: database_sid, + host_id: database_host_id, + instance_number: database_instance_number + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + sid: application_sid, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number + ) + ], + %DeregisterApplicationInstance{ + sap_system_id: sap_system_id, + host_id: message_server_host_id, + instance_number: message_server_instance_number, + deregistered_at: deregistered_at + }, + %ApplicationInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: message_server_host_id, + instance_number: message_server_instance_number, + deregistered_at: deregistered_at + }, + fn sap_system -> + assert %SapSystem{ + sid: nil, + database: %Database{ + sid: ^database_sid, + deregistered_at: nil, + instances: [ + %Instance{ + instance_number: ^database_instance_number, + host_id: ^database_host_id + } + ] + }, + application: %Application{ + instances: [] + }, + deregistered_at: nil + } = sap_system + end + ) + end + + test "should deregister Message Server and deregister SAP System" do + sap_system_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + database_host_id = UUID.uuid4() + message_server_host_id = UUID.uuid4() + abap_host_id = UUID.uuid4() + enqrep_host_id = UUID.uuid4() + + database_instance_number = "00" + message_server_instance_number = "01" + abap_instance_number = "02" + enqrep_server_instance_number = "03" + + db_sid = fake_sid() + application_sid = fake_sid() + + assert_events_and_state( + [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: database_host_id, + instance_number: database_instance_number, + sid: db_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + host_id: message_server_host_id, + instance_number: message_server_instance_number, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + host_id: abap_host_id, + instance_number: abap_instance_number, + sid: application_sid + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + host_id: enqrep_host_id, + instance_number: enqrep_server_instance_number, + features: "ENQREP", + sid: application_sid + ) + ], + %DeregisterApplicationInstance{ + sap_system_id: sap_system_id, + host_id: message_server_host_id, + instance_number: message_server_instance_number, + deregistered_at: deregistered_at + }, + [ + %ApplicationInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: message_server_host_id, + instance_number: message_server_instance_number, + deregistered_at: deregistered_at + }, + %SapSystemDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + } + ], + fn sap_system -> + assert %SapSystem{ + database: %Database{ + sid: ^db_sid, + instances: [ + %Instance{ + instance_number: ^database_instance_number, + host_id: ^database_host_id + } + ] + }, + application: %Application{ + instances: [ + %Instance{ + host_id: ^enqrep_host_id, + instance_number: ^enqrep_server_instance_number + }, + %Instance{ + host_id: ^abap_host_id, + instance_number: ^abap_instance_number + } + ] + }, + deregistered_at: ^deregistered_at, + sid: ^application_sid + } = sap_system + end + ) + end + + test "should deregister the primary instance of database, the SAP system and deregister the ABAP application instance" do + sap_system_id = UUID.uuid4() + deregistered_at = DateTime.utc_now() + + primary_database_host_id = UUID.uuid4() + secondary_database_host_id = UUID.uuid4() + + message_server_host_id = UUID.uuid4() + abap_host_id = UUID.uuid4() + + database_instance_number_1 = "00" + database_instance_number_2 = "00" + + message_server_instance_number = "01" + abap_instance_number = "02" + + db_sid = fake_sid() + application_sid = fake_sid() + + initial_events = [ + build( + :database_registered_event, + sap_system_id: sap_system_id, + sid: db_sid + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + sid: db_sid, + instance_number: database_instance_number_1, + system_replication: "Primary" + ), + build( + :database_instance_registered_event, + sap_system_id: sap_system_id, + host_id: secondary_database_host_id, + instance_number: database_instance_number_2, + system_replication: "Secondary" + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "MESSAGESERVER|ENQUE", + sid: application_sid, + host_id: message_server_host_id, + instance_number: message_server_instance_number + ), + build( + :application_instance_registered_event, + sap_system_id: sap_system_id, + features: "ABAP|GATEWAY|ICMAN|IGS", + sid: application_sid, + host_id: abap_host_id, + instance_number: abap_instance_number + ), + build( + :sap_system_registered_event, + sap_system_id: sap_system_id, + sid: application_sid + ) + ] + + assert_events( + initial_events, + [ + %DeregisterDatabaseInstance{ + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: database_instance_number_1, + deregistered_at: deregistered_at + }, + %DeregisterApplicationInstance{ + sap_system_id: sap_system_id, + host_id: message_server_host_id, + instance_number: message_server_instance_number, + deregistered_at: deregistered_at + } + ], + [ + %DatabaseInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: primary_database_host_id, + instance_number: database_instance_number_1, + deregistered_at: deregistered_at + }, + %DatabaseDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + }, + %SapSystemDeregistered{ + sap_system_id: sap_system_id, + deregistered_at: deregistered_at + }, + %ApplicationInstanceDeregistered{ + sap_system_id: sap_system_id, + host_id: message_server_host_id, + instance_number: message_server_instance_number, + deregistered_at: deregistered_at + } + ] + ) + end + end + + defp fake_sid, + do: Enum.join([Faker.Util.letter(), Faker.Util.letter(), Faker.Util.letter()]) end diff --git a/test/trento/support/type_test.exs b/test/trento/support/type_test.exs index 957dfac0be..58e4de1cb9 100644 --- a/test/trento/support/type_test.exs +++ b/test/trento/support/type_test.exs @@ -2,33 +2,46 @@ defmodule Trento.TypeTest do use ExUnit.Case describe "build a struct" do - test "should return errors if the parameters of an embedded field could not be validated" do - assert {:error, {:validation, %{embedded: %{id: ["is invalid"]}}}} = + test "should return errors if the parameters of an embedded and polymorphic fields could not be validated" do + assert {:error, + {:validation, + %{embedded: %{id: ["is invalid"]}, polymorphic: %{id: ["is invalid"]}}}} = TestData.new(%{ id: Faker.UUID.v4(), name: "a", embedded: %{ id: 1, name: Faker.StarWars.planet() + }, + polymorphic: %{ + id: 2, + address: Faker.StarWars.planet() } }) end - test "should raise a RuntimeError if the parameters of an embedded field could not be validated" do - assert_raise RuntimeError, "%{embedded: %{id: [\"is invalid\"]}}", fn -> - TestData.new!(%{ - id: Faker.UUID.v4(), - name: "a", - embedded: %{ - id: 2, - name: Faker.StarWars.planet() - } - }) - end + test "should raise a RuntimeError if the parameters of an embedded and polymorphic fields could not be validated" do + assert_raise RuntimeError, + "%{embedded: %{id: [\"is invalid\"]}, polymorphic: %{id: [\"is invalid\"]}}", + fn -> + TestData.new!(%{ + id: Faker.UUID.v4(), + name: "a", + embedded: %{ + id: 2, + name: Faker.StarWars.planet() + }, + polymorphic: %{ + id: 2, + address: Faker.StarWars.planet() + } + }) + end end - test "should validate the presence of a required embedded field" do - assert {:error, {:validation, %{embedded: ["can't be blank"]}}} == + test "should validate the presence of a required embedded and polymorphic fields" do + assert {:error, + {:validation, %{embedded: ["can't be blank"], polymorphic: ["can't be blank"]}}} == TestData.new(%{ id: Faker.UUID.v4(), name: "a" @@ -37,23 +50,32 @@ defmodule Trento.TypeTest do end describe "build a list of structs" do - test "should validate the presence of a required embedded field" do + test "should validate the presence of a required embedded and polymorphic fields" do assert {:error, - {:validation, [%{embedded: ["can't be blank"]}, %{embedded: ["can't be blank"]}]}} == + {:validation, + [ + %{embedded: ["can't be blank"], polymorphic: ["can't be blank"]}, + %{embedded: ["can't be blank"], polymorphic: ["can't be blank"]} + ]}} == TestData.new([ %{id: Faker.UUID.v4(), name: "carbonara"}, %{id: Faker.UUID.v4(), name: "amatriciana"} ]) assert {:error, - {:validation, [%{embedded: ["can't be blank"]}, %{embedded: ["can't be blank"]}]}} == + {:validation, + [ + %{embedded: ["can't be blank"], polymorphic: ["can't be blank"]}, + %{embedded: ["can't be blank"], polymorphic: ["can't be blank"]} + ]}} == TestData.new([ %{id: Faker.UUID.v4(), name: "carbonara"}, %{id: Faker.UUID.v4(), name: "amatriciana"}, %{ id: Faker.UUID.v4(), name: "cacio_pepe", - embedded: %{id: Faker.UUID.v4(), name: "yay"} + embedded: %{id: Faker.UUID.v4(), name: "yay"}, + polymorphic: %{id: Faker.UUID.v4(), phone: "123456789"} } ]) end @@ -65,51 +87,37 @@ defmodule Trento.TypeTest do end test "should return a list of structs" do - { - :ok, - [ - %TestData{ - embedded: %EmbeddedTestData{id: _id1, name: "yay"}, - id: _id2, - name: "cacio_pepe" - }, - %TestData{ - embedded: %EmbeddedTestData{ - id: _id3, - name: "wow" - }, - id: _id4, - name: "lasagne" - } - ] - } = - TestData.new([ - %{ - id: Faker.UUID.v4(), - name: "cacio_pepe", - embedded: %{id: Faker.UUID.v4(), name: "yay"} - }, - %{ - id: Faker.UUID.v4(), - name: "lasagne", - embedded: %{id: Faker.UUID.v4(), name: "wow"} - } - ]) - end - - test "should raise a RuntimeError if the parameters of an embedded field could not be validated" do - assert_raise RuntimeError, "[%{embedded: %{id: [\"is invalid\"]}}]", fn -> - TestData.new!([ - %{ - id: Faker.UUID.v4(), - name: "a", - embedded: %{ - id: 2, - name: Faker.StarWars.planet() - } - } - ]) - end + assert { + :ok, + [ + %TestData{ + embedded: %EmbeddedTestData{id: _id1, name: "yay"}, + polymorphic: %PolymorphicPhoneTestData{id: _, phone: "123456789"}, + id: _id2, + name: "cacio_pepe" + }, + %TestData{ + embedded: %EmbeddedTestData{id: _id3, name: "wow"}, + polymorphic: %PolymorphicAddressTestData{id: _, address: "rome"}, + id: _id4, + name: "lasagne" + } + ] + } = + TestData.new([ + %{ + id: Faker.UUID.v4(), + name: "cacio_pepe", + embedded: %{id: Faker.UUID.v4(), name: "yay"}, + polymorphic: %{id: Faker.UUID.v4(), phone: "123456789"} + }, + %{ + id: Faker.UUID.v4(), + name: "lasagne", + embedded: %{id: Faker.UUID.v4(), name: "wow"}, + polymorphic: %{id: Faker.UUID.v4(), address: "rome"} + } + ]) end end end diff --git a/test/trento_web/controllers/health_controller_test.exs b/test/trento_web/controllers/health_controller_test.exs index a5e3d99013..e3fd66eab9 100644 --- a/test/trento_web/controllers/health_controller_test.exs +++ b/test/trento_web/controllers/health_controller_test.exs @@ -5,7 +5,7 @@ defmodule Trento.HealthControllerTest do import OpenApiSpex.TestAssertions - alias TrentoWeb.OpenApi.ApiSpec + alias TrentoWeb.OpenApi.V1.ApiSpec setup do %{api_spec: ApiSpec.spec()} diff --git a/test/trento_web/controllers/session_controller_test.exs b/test/trento_web/controllers/session_controller_test.exs index ad931dcff7..6fc73a7f59 100644 --- a/test/trento_web/controllers/session_controller_test.exs +++ b/test/trento_web/controllers/session_controller_test.exs @@ -6,7 +6,7 @@ defmodule TrentoWeb.SessionControllerTest do import Mox import OpenApiSpex.TestAssertions - alias TrentoWeb.OpenApi.ApiSpec + alias TrentoWeb.OpenApi.V1.ApiSpec setup [:set_mox_from_context, :verify_on_exit!] diff --git a/test/trento_web/controllers/v1/cluster_controller_test.exs b/test/trento_web/controllers/v1/cluster_controller_test.exs index bf87d54d90..50110091a9 100644 --- a/test/trento_web/controllers/v1/cluster_controller_test.exs +++ b/test/trento_web/controllers/v1/cluster_controller_test.exs @@ -5,7 +5,7 @@ defmodule TrentoWeb.V1.ClusterControllerTest do import Mox import Trento.Factory - alias TrentoWeb.OpenApi.ApiSpec + alias TrentoWeb.OpenApi.V1.ApiSpec setup [:set_mox_from_context, :verify_on_exit!] diff --git a/test/trento_web/controllers/v1/health_overview_controller_test.exs b/test/trento_web/controllers/v1/health_overview_controller_test.exs index 6811e84ee5..e9d751b337 100644 --- a/test/trento_web/controllers/v1/health_overview_controller_test.exs +++ b/test/trento_web/controllers/v1/health_overview_controller_test.exs @@ -4,7 +4,7 @@ defmodule TrentoWeb.V1.HealthOverviewControllerTest do import OpenApiSpex.TestAssertions import Trento.Factory - alias TrentoWeb.OpenApi.ApiSpec + alias TrentoWeb.OpenApi.V1.ApiSpec require Trento.Domain.Enums.Health, as: Health require Trento.Domain.Enums.ClusterType, as: ClusterType diff --git a/test/trento_web/controllers/v1/host_controller_test.exs b/test/trento_web/controllers/v1/host_controller_test.exs index 9982b97515..d9c7fc2bb2 100644 --- a/test/trento_web/controllers/v1/host_controller_test.exs +++ b/test/trento_web/controllers/v1/host_controller_test.exs @@ -3,7 +3,7 @@ defmodule TrentoWeb.V1.HostControllerTest do import OpenApiSpex.TestAssertions - alias TrentoWeb.OpenApi.ApiSpec + alias TrentoWeb.OpenApi.V1.ApiSpec import Trento.Factory @@ -11,6 +11,10 @@ defmodule TrentoWeb.V1.HostControllerTest do setup [:set_mox_from_context, :verify_on_exit!] + setup do + %{api_spec: ApiSpec.spec()} + end + describe "list" do test "should list all hosts", %{conn: conn} do %{id: host_id} = insert(:host) @@ -180,4 +184,56 @@ defmodule TrentoWeb.V1.HostControllerTest do } == resp end end + + describe "delete" do + test "should send 204 response when successful host deletion", %{conn: conn} do + %{id: host_id} = insert(:host) + + expect( + Trento.Commanded.Mock, + :dispatch, + fn %Trento.Domain.Commands.RequestHostDeregistration{host_id: ^host_id} -> + :ok + end + ) + + conn + |> delete("/api/v1/hosts/#{host_id}") + |> response(204) + end + + test "should send 422 response if the host is still alive", %{conn: conn, api_spec: api_spec} do + %{id: host_id} = insert(:host) + + expect( + Trento.Commanded.Mock, + :dispatch, + fn %Trento.Domain.Commands.RequestHostDeregistration{host_id: ^host_id} -> + {:error, :host_alive} + end + ) + + conn + |> delete("/api/v1/hosts/#{host_id}") + |> json_response(422) + |> assert_schema("UnprocessableEntity", api_spec) + end + + test "should return 404 if the host was not found", %{conn: conn, api_spec: api_spec} do + %{id: host_id} = insert(:host) + + expect( + Trento.Commanded.Mock, + :dispatch, + fn %Trento.Domain.Commands.RequestHostDeregistration{host_id: ^host_id} -> + {:error, :host_not_registered} + end + ) + + conn + |> delete("/api/v1/hosts/#{host_id}") + |> json_response(404) + |> assert_schema("NotFound", api_spec) + end + end end diff --git a/test/trento_web/controllers/v1/installation_controller_test.exs b/test/trento_web/controllers/v1/installation_controller_test.exs index 9d30aa80c2..dcb8685f61 100644 --- a/test/trento_web/controllers/v1/installation_controller_test.exs +++ b/test/trento_web/controllers/v1/installation_controller_test.exs @@ -3,7 +3,7 @@ defmodule TrentoWeb.V1.InstallationControllerTest do import OpenApiSpex.TestAssertions - alias TrentoWeb.OpenApi.ApiSpec + alias TrentoWeb.OpenApi.V1.ApiSpec test "should return the api key", %{conn: conn} do api_spec = ApiSpec.spec() diff --git a/test/trento_web/controllers/v1/prometheus_controller_test.exs b/test/trento_web/controllers/v1/prometheus_controller_test.exs index 5f1acf7f29..91cf642bf0 100644 --- a/test/trento_web/controllers/v1/prometheus_controller_test.exs +++ b/test/trento_web/controllers/v1/prometheus_controller_test.exs @@ -1,20 +1,27 @@ defmodule TrentoWeb.V1.PrometheusControllerTest do use TrentoWeb.ConnCase, async: true - alias TrentoWeb.OpenApi.ApiSpec + alias TrentoWeb.OpenApi.V1.ApiSpec import OpenApiSpex.TestAssertions import Mox import Trento.Factory test "should return the expected targets", %{conn: conn} do + insert(:host, deregistered_at: DateTime.utc_now()) insert_list(2, :host) + api_spec = ApiSpec.spec() - conn - |> get("/api/v1/prometheus/targets") - |> json_response(200) - |> assert_schema("HttpSTDTargetList", api_spec) + response = + conn + |> get("/api/v1/prometheus/targets") + |> json_response(200) + + targets_ids = Enum.map(response, &Map.get(&1, "labels")["agentID"]) + + assert length(targets_ids) == 2 + assert_schema(response, "HttpSTDTargetList", api_spec) end test "should return the expected targets when some host does not have any IP address", %{ diff --git a/test/trento_web/controllers/v1/sap_system_controller_test.exs b/test/trento_web/controllers/v1/sap_system_controller_test.exs index 96f230b368..920051c195 100644 --- a/test/trento_web/controllers/v1/sap_system_controller_test.exs +++ b/test/trento_web/controllers/v1/sap_system_controller_test.exs @@ -5,7 +5,7 @@ defmodule TrentoWeb.V1.SapSystemControllerTest do import OpenApiSpex.TestAssertions - alias TrentoWeb.OpenApi.ApiSpec + alias TrentoWeb.OpenApi.V1.ApiSpec describe "list" do test "should list all sap_systems", %{conn: conn} do diff --git a/test/trento_web/controllers/v1/settings_controller_test.exs b/test/trento_web/controllers/v1/settings_controller_test.exs index 330334c627..39aaf507ca 100644 --- a/test/trento_web/controllers/v1/settings_controller_test.exs +++ b/test/trento_web/controllers/v1/settings_controller_test.exs @@ -3,7 +3,7 @@ defmodule TrentoWeb.V1.SettingsControllerTest do import OpenApiSpex.TestAssertions - alias TrentoWeb.OpenApi.ApiSpec + alias TrentoWeb.OpenApi.V1.ApiSpec test "should return the settings according to the schema", %{conn: conn} do api_spec = ApiSpec.spec() diff --git a/test/trento_web/controllers/v2/cluster_controller_test.exs b/test/trento_web/controllers/v2/cluster_controller_test.exs new file mode 100644 index 0000000000..99d7fead9e --- /dev/null +++ b/test/trento_web/controllers/v2/cluster_controller_test.exs @@ -0,0 +1,24 @@ +defmodule TrentoWeb.V2.ClusterControllerTest do + use TrentoWeb.ConnCase, async: true + + import OpenApiSpex.TestAssertions + import Mox + import Trento.Factory + + alias TrentoWeb.OpenApi.V2.ApiSpec + + setup [:set_mox_from_context, :verify_on_exit!] + + describe "list" do + test "should be compliant with ASCS/ERS clusters schema", %{conn: conn} do + insert(:cluster, details: build(:ascs_ers_cluster_details)) + + api_spec = ApiSpec.spec() + + conn + |> get("/api/v2/clusters") + |> json_response(200) + |> assert_schema("PacemakerClustersCollection", api_spec) + end + end +end diff --git a/test/trento_web/openapi/api_spec_test.exs b/test/trento_web/openapi/api_spec_test.exs new file mode 100644 index 0000000000..e6ce2a5082 --- /dev/null +++ b/test/trento_web/openapi/api_spec_test.exs @@ -0,0 +1,51 @@ +defmodule TrentoWeb.OpenApi.ApiSpecTest do + use ExUnit.Case + + alias TrentoWeb.OpenApi.ApiSpec + + defmodule TestController do + use Phoenix.Controller + use OpenApiSpex.ControllerSpecs + + operation :show, + summary: "Dummy show" + + def show(_, _), do: nil + end + + defmodule TestRouter do + use Phoenix.Router + + scope "/api" do + get "/not_versioned", TestController, :show + get "/v1/route", TestController, :show + get "/v2/route", TestController, :show + end + + def available_api_versions, do: ["v2", "v1"] + end + + defmodule V1 do + use ApiSpec, + api_version: "v1" + end + + defmodule V2 do + use ApiSpec, + api_version: "v2" + end + + describe "ApiSpec" do + test "should render only the v1 version routes" do + assert %OpenApiSpex.OpenApi{ + paths: %{"/api/not_versioned" => _, "/api/v1/route" => _} + } = V1.spec(TestRouter) + end + + test "should render only the v2 version routes" do + assert %OpenApiSpex.OpenApi{ + paths: %{"/api/not_versioned" => _, "/api/v2/route" => _} + } = V2.spec(TestRouter) + end + end +end diff --git a/test/trento_web/plugs/api_redirector_test.exs b/test/trento_web/plugs/api_redirector_test.exs index 9e2e6ae783..fe51acb583 100644 --- a/test/trento_web/plugs/api_redirector_test.exs +++ b/test/trento_web/plugs/api_redirector_test.exs @@ -10,23 +10,27 @@ defmodule TrentoWeb.Plugs.ApiRedirectorTest do end end - describe "call/2" do - test "should raise ArgumentError when :latest_version option is missing", %{conn: conn} do - assert_raise ArgumentError, "expected :latest_version option", fn -> - conn = %{conn | path_info: ["api", "hosts"]} + describe "init/1" do + test "should raise ArgumentError when :available_api_versions option is missing" do + assert_raise ArgumentError, "expected :available_api_versions option", fn -> + ApiRedirector.init([]) + end + end - ApiRedirector.call(conn, []) + test "should raise ArgumentError when :available_api_versions is an empty list" do + assert_raise ArgumentError, ":available_api_versions must have 1 element at least", fn -> + ApiRedirector.init(available_api_versions: []) end end - test "should raise ArgumentError when :router option is missing", %{conn: conn} do + test "should raise ArgumentError when :router option is missing" do assert_raise ArgumentError, "expected :router option", fn -> - conn = %{conn | path_info: ["api", "hosts"]} - - ApiRedirector.call(conn, latest_version: "v1") + ApiRedirector.init(available_api_versions: ["v2", "v1"]) end end + end + describe "call/2" do test "should return 404 with the error view when the path is not recognized by the router", %{ conn: conn } do @@ -39,7 +43,7 @@ defmodule TrentoWeb.Plugs.ApiRedirectorTest do resp = conn |> Map.put(:path_info, ["api", "hosts"]) - |> ApiRedirector.call(latest_version: "v1", router: ErrorNotFoundRouter) + |> ApiRedirector.call(available_api_versions: ["v2", "v1"], router: ErrorNotFoundRouter) |> json_response(404) assert %{ @@ -62,7 +66,7 @@ defmodule TrentoWeb.Plugs.ApiRedirectorTest do resp = conn |> Map.put(:path_info, ["api", "hosts"]) - |> ApiRedirector.call(latest_version: "v1", router: NotFoundRouter) + |> ApiRedirector.call(available_api_versions: ["v2", "v1"], router: NotFoundRouter) |> json_response(404) assert %{ @@ -72,12 +76,36 @@ defmodule TrentoWeb.Plugs.ApiRedirectorTest do } == resp end - test "should redirect to the correct path when the route is recognized with the latest version", + test "should redirect to the newest version path when this version is available", + %{conn: conn} do + conn = + conn + |> Map.put(:path_info, ["api", "test"]) + |> ApiRedirector.call(available_api_versions: ["v2", "v1"], router: FoundRouter) + + assert 307 == conn.status + + location_header = get_resp_header(conn, "location") + + assert ["/api/v2/test"] == location_header + end + + test "should redirect to the next available version path if the newest version is not available", %{conn: conn} do + defmodule V1FoundRouter do + def __match_route__(_, ["api", "v1", "test"], _) do + {%{}, %{}, %{}, {%{}, %{}}} + end + + def __match_route__(_, _, _) do + :error + end + end + conn = conn |> Map.put(:path_info, ["api", "test"]) - |> ApiRedirector.call(latest_version: "v1", router: FoundRouter) + |> ApiRedirector.call(available_api_versions: ["v2", "v1"], router: V1FoundRouter) assert 307 == conn.status @@ -86,18 +114,18 @@ defmodule TrentoWeb.Plugs.ApiRedirectorTest do assert ["/api/v1/test"] == location_header end - test "should redirect to the correct path with a subroute path when the route is recognized with the latest version", + test "should redirect to the correct path with a subroute path when the route is recognized in the available versions list", %{conn: conn} do conn = conn |> Map.put(:path_info, ["api", "some-resource", "12345"]) - |> ApiRedirector.call(latest_version: "v1", router: FoundRouter) + |> ApiRedirector.call(available_api_versions: ["v2", "v1"], router: FoundRouter) assert 307 == conn.status location_header = get_resp_header(conn, "location") - assert ["/api/v1/some-resource/12345"] == location_header + assert ["/api/v2/some-resource/12345"] == location_header end end end diff --git a/test/trento_web/views/error_view_test.exs b/test/trento_web/views/error_view_test.exs index 924190d57d..2afa182b68 100644 --- a/test/trento_web/views/error_view_test.exs +++ b/test/trento_web/views/error_view_test.exs @@ -64,7 +64,12 @@ defmodule TrentoWeb.ErrorViewTest do title: "Invalid value" }, %{detail: "can't be blank", source: %{pointer: "/id"}, title: "Invalid value"}, - %{detail: "can't be blank", source: %{pointer: "/name"}, title: "Invalid value"} + %{detail: "can't be blank", source: %{pointer: "/name"}, title: "Invalid value"}, + %{ + detail: "can't be blank", + source: %{pointer: "/polymorphic"}, + title: "Invalid value" + } ] } == render(TrentoWeb.ErrorView, "422.json", reason: validation_error) end @@ -85,7 +90,12 @@ defmodule TrentoWeb.ErrorViewTest do title: "Invalid value" }, %{detail: "can't be blank", source: %{pointer: "/id"}, title: "Invalid value"}, - %{detail: "can't be blank", source: %{pointer: "/name"}, title: "Invalid value"} + %{detail: "can't be blank", source: %{pointer: "/name"}, title: "Invalid value"}, + %{ + detail: "can't be blank", + source: %{pointer: "/polymorphic"}, + title: "Invalid value" + } ] } == render(TrentoWeb.ErrorView, "422.json", changeset: changeset) end diff --git a/test/trento_web/views/v1/cluster_view_test.exs b/test/trento_web/views/v1/cluster_view_test.exs new file mode 100644 index 0000000000..9241a31951 --- /dev/null +++ b/test/trento_web/views/v1/cluster_view_test.exs @@ -0,0 +1,15 @@ +defmodule TrentoWeb.V1.ClusterViewTest do + use TrentoWeb.ConnCase, async: true + + import Phoenix.View + import Trento.Factory + + alias TrentoWeb.V1.ClusterView + + test "should adapt the cluster view to V1 version" do + cluster = build(:cluster, type: :ascs_ers, details: build(:ascs_ers_cluster_details)) + + assert %{type: :unknown, details: nil} = + render(ClusterView, "cluster.json", %{cluster: cluster}) + end +end diff --git a/test/trento_web/views/v1/health_overview_view_test.exs b/test/trento_web/views/v1/health_overview_view_test.exs index e958d6e658..356c59b267 100644 --- a/test/trento_web/views/v1/health_overview_view_test.exs +++ b/test/trento_web/views/v1/health_overview_view_test.exs @@ -1,50 +1,120 @@ defmodule TrentoWeb.V1.HealthOverviewViewTest do use TrentoWeb.ConnCase, async: true - alias Trento.{ - DatabaseInstanceReadModel, - HostReadModel - } + import Trento.Factory import Phoenix.View - test "renders overview.json" do - sap_system_id = UUID.uuid4() - sid = UUID.uuid4() - tenant = UUID.uuid4() - cluster_id = UUID.uuid4() - - assert [ - %{ - cluster_id: ^cluster_id, - clusters_health: :critical, - database_health: :passing, - database_id: ^sap_system_id, - hosts_health: :warning, - id: ^sap_system_id, - sapsystem_health: :passing, - sid: ^sid, - tenant: ^tenant - } - ] = - render(TrentoWeb.V1.HealthOverviewView, "overview.json", %{ - health_infos: [ - %{ - id: sap_system_id, - sid: sid, - sapsystem_health: :passing, - database_instances: [ - %DatabaseInstanceReadModel{ - host: %HostReadModel{cluster_id: cluster_id}, - sap_system_id: sap_system_id, - tenant: tenant - } - ], - database_health: :passing, - clusters_health: :critical, - hosts_health: :warning - } - ] - }) + require Trento.Domain.Enums.Health, as: Health + + describe "renders overview.json" do + test "should render all the fields" do + sap_system_id = UUID.uuid4() + sid = UUID.uuid4() + tenant = UUID.uuid4() + app_cluster_id = UUID.uuid4() + db_cluster_id = UUID.uuid4() + + application_instances = + build_list(1, :application_instance_without_host, + sap_system_id: sap_system_id, + host: build(:host, cluster_id: nil) + ) ++ + build_list( + 2, + :application_instance_without_host, + sap_system_id: sap_system_id, + host: build(:host, cluster_id: app_cluster_id) + ) + + database_instances = + build_list( + 2, + :database_instance_without_host, + sap_system_id: sap_system_id, + host: build(:host, cluster_id: db_cluster_id), + tenant: tenant + ) + + assert [ + %{ + cluster_id: db_cluster_id, + clusters_health: Health.warning(), + application_cluster_id: app_cluster_id, + database_cluster_id: db_cluster_id, + application_cluster_health: Health.critical(), + database_cluster_health: Health.warning(), + database_health: Health.passing(), + database_id: sap_system_id, + hosts_health: Health.warning(), + id: sap_system_id, + sapsystem_health: Health.passing(), + sid: sid, + tenant: tenant + } + ] == + render(TrentoWeb.V1.HealthOverviewView, "overview.json", %{ + health_infos: [ + %{ + id: sap_system_id, + sid: sid, + sapsystem_health: Health.passing(), + database_health: Health.passing(), + application_cluster_health: Health.critical(), + database_cluster_health: Health.warning(), + hosts_health: Health.warning(), + application_instances: application_instances, + database_instances: database_instances + } + ] + }) + end + + test "should send empty cluster ids" do + sap_system_id = UUID.uuid4() + + application_instances = + build_list( + 2, + :application_instance_without_host, + sap_system_id: sap_system_id, + host: build(:host, cluster_id: nil) + ) + + database_instances = + build_list( + 2, + :database_instance_without_host, + sap_system_id: sap_system_id, + host: build(:host, cluster_id: nil), + tenant: UUID.uuid4() + ) + + assert [ + %{ + cluster_id: nil, + clusters_health: Health.unknown(), + application_cluster_id: nil, + database_cluster_id: nil, + application_cluster_health: Health.unknown(), + database_cluster_health: Health.unknown() + } + ] = + render(TrentoWeb.V1.HealthOverviewView, "overview.json", %{ + health_infos: [ + %{ + id: sap_system_id, + sid: UUID.uuid4(), + sapsystem_health: Health.passing(), + database_health: Health.passing(), + application_cluster_health: Health.unknown(), + database_cluster_health: Health.unknown(), + hosts_health: Health.warning(), + application_instances: application_instances, + database_instances: database_instances + } + ] + }) + end end end