diff --git a/.env b/.env
index ef3897b9..ebf3cda2 100644
--- a/.env
+++ b/.env
@@ -18,6 +18,8 @@ APP_VERSION=V.0.1
APP_DEBUG=1
# What is the enviroment type you want to use for local production? (choose between dec,stag,prod, acce or test)
APP_ENV=dev
+# We use a build to tag images, this is swithced to the version on master and to env on other branches
+APP_BUILD=dev
# The description for this api
APP_DESCRIPTION='Naast deze JSON rest API is er ook een [graphql](/graphql) interface beschikbaar.'
diff --git a/.github/workflows/dockerimage.yml b/.github/workflows/dockerimage.yml
index ded88c92..5907d2d5 100644
--- a/.github/workflows/dockerimage.yml
+++ b/.github/workflows/dockerimage.yml
@@ -21,24 +21,38 @@ jobs:
- uses: actions/checkout@v1
- name: Pulling old images, if any
run: docker-compose pull --ignore-pull-failures
+ - name: Setting APP_NAME
+ run: |
+ export NAME=$(grep APP_NAME= .env | cut -d '=' -f2)
+ echo ::set-env name=APP_NAME::$NAME
+ - name: Print app name
+ run: echo "APP_NAME = $APP_NAME"
- name: Setting APP_ENV to dev
run: |
echo ::set-env name=APP_ENV::dev
+ echo ::set-env name=APP_BUILD::dev
echo "set APP_ENV to $APP_ENV"
- name: Setting APP_ENV to prod
if: contains( github.ref, 'master' ) || contains( github.base_ref, 'master' )
run: |
echo ::set-env name=APP_ENV::prod
echo "set APP_ENV to $APP_ENV"
+ - name: Set APP_BUILD to APP_VERSION
+ if: contains( github.ref, 'master' )
+ run: |
+ export VERSION=$(grep APP_VERSION= .env | cut -d '=' -f2)
+ echo ::set-env name=APP_BUILD::$VERSION
+ echo "set APP_BUILD to $APP_BUILD"
- name: Setting APP_ENV to stag
if: contains( github.ref, 'staging' ) || contains( github.base_ref, 'staging' )
run: |
echo ::set-env name=APP_ENV::stag
+ echo ::set-env name=APP_BUILD::stag
echo "set APP_ENV to $APP_ENV"
- name: Print definitive APP_ENV
run: echo "APP_ENV is now $APP_ENV"
- name: Build the Docker image
- run: docker-compose build --pull --build-arg APP_ENV=$APP_ENV
+ run: docker-compose build --pull --build-arg APP_ENV=$APP_ENV --build-arg APP_BUILD=$APP_BUILD
- name: Run the docker image
run: docker-compose up -d
- name: Taking some sleep
@@ -79,13 +93,24 @@ jobs:
- name: Deploy through helm
id: helm-install
if: (contains( github.ref, 'master' ) || contains( github.ref, 'staging' ) || contains( github.ref, 'development' )) && steps.kubeconfig.outputs.success == 'true'
- run: helm upgrade ${{ secrets.APP_NAME }}-$APP_ENV ./api/helm --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV --set settings.env=$APP_ENV,settings.debug=1
+ run: helm upgrade $APP_NAME-$APP_ENV ./api/helm --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV --set settings.env=$APP_ENV,settings.debug=1
- name: Install through helm
if: failure()
- run: helm install --name ${{ secrets.APP_NAME }}-$APP_ENV ./api/helm --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV --set settings.env=$APP_ENV,settings.debug=1
+ run: helm install --name $APP_NAME-$APP_ENV ./api/helm --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV --set settings.env=$APP_ENV,settings.debug=1
- name: Rollout new containers
- if: (contains( github.ref, 'master' ) || contains( github.ref, 'staging' ) || contains( github.ref, 'development' )) && steps.kubeconfig.outputs.success == 'true'
+ if: (contains( github.ref, 'master' ) || contains( github.ref, 'staging' ) || contains( github.ref, 'development' )) && steps.kubeconfig.outputs.success == 'true' && steps.helm-install.success == 'true' && success()
run: |
- kubectl rollout restart deployment/${{ secrets.APP_NAME }}-php --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV
- kubectl rollout restart deployment/${{ secrets.APP_NAME }}-nginx --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV
- kubectl rollout restart deployment/${{ secrets.APP_NAME }}-varnish --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV
+ kubectl rollout restart deployment/$APP_NAME-php --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV
+ kubectl rollout restart deployment/$APP_NAME-nginx --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV
+ kubectl rollout restart deployment/$APP_NAME-varnish --kubeconfig="kubeconfig.yaml" --namespace=$APP_ENV
+ - name: Create Release
+ if: contains( github.ref, 'master' ) && steps.kubeconfig.outputs.success == 'true' && ( success() || failure() )
+ id: create_release
+ uses: actions/create-release@v1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token
+ with:
+ tag_name: $APP_BUILD
+ release_name: $APP_BUILD
+ draft: false
+ prerelease: false
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index 63cda310..980cc0cb 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -3,7 +3,6 @@
-
@@ -202,7 +201,91 @@
1574770855136
-
+
+ 1575279662940
+
+
+
+ 1575279662940
+
+
+ 1575279748074
+
+
+
+ 1575279748074
+
+
+ 1575280067186
+
+
+
+ 1575280067186
+
+
+ 1575280181182
+
+
+
+ 1575280181182
+
+
+ 1575281141670
+
+
+
+ 1575281141670
+
+
+ 1575282235015
+
+
+
+ 1575282235015
+
+
+ 1575282356507
+
+
+
+ 1575282356507
+
+
+ 1575282386363
+
+
+
+ 1575282386363
+
+
+ 1575282997667
+
+
+
+ 1575282997667
+
+
+ 1575283562491
+
+
+
+ 1575283562491
+
+
+ 1575283747919
+
+
+
+ 1575283747919
+
+
+ 1575283918851
+
+
+
+ 1575283918851
+
+
@@ -226,6 +309,18 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/DESIGN.md b/DESIGN.md
index 34ff5696..cffb8595 100644
--- a/DESIGN.md
+++ b/DESIGN.md
@@ -14,10 +14,10 @@ Welcome, you are currently viewing the design decisions for the proto component.
- [Search](#search)
- [Queries](#queries)
- [Extending](#extending)
-- [Timetravel](#timetravel)
+- [Time travel](#timetravel)
- [Archivation](#archivation)
-- [Audittrail](#audittrail)
-- [Healthchecks](#healthchecks)
+- [Audit trail](#audittrail)
+- [Health checks](#healthchecks)
- [Notifications](#notifications)
- [Authentication](#authentication)
- [Authorization](#authorization)
@@ -29,14 +29,14 @@ Welcome, you are currently viewing the design decisions for the proto component.
*Implementation choices*
- [Api Versioning](#api-versioning)
-- [Environments and namespacing](#environments-and-namespacing)
+- [Environments and name spacing](#environments-and-namespacing)
- [Domain Build-up and routing](#domain-build-up-and-routing)
- [Container Setup](#container-setup)
The European factor
-------
-The proto-component isn't just a Dutch Component, it is in essence a Dutch translation of European components, nowhere is this more obvious than in the core code. Our component is based on [API Platform](https://api-platform.com/) an API specific version of the symfony framework. This framework is build by the lovely people of []() and is build with support of the European Commission trough the [EU-FOSSA Hackathon](https://ec.europa.eu/info/news/first-eu-fossa-hackathon-it-happened-2019-may-03_en) and Digital Ocean trough [Hacktoberfest](https://hacktoberfest.digitalocean.com/).
+The proto-component isn't just a Dutch Component, it is in essence a Dutch translation of European components, nowhere is this more obvious than in the core code. Our component is based on [API Platform](https://api-platform.com/) an API specific version of the symfony framework. This framework is build by the lovely people of [Les Tilleuls](https://les-tilleuls.coop/en) and is build with support of the European Commission trough the [EU-FOSSA Hackathon](https://ec.europa.eu/info/news/first-eu-fossa-hackathon-it-happened-2019-may-03_en) and Digital Ocean trough [Hacktoberfest](https://hacktoberfest.digitalocean.com/).
But it doesn't just end there. The [varnish container](https://hub.docker.com/r/eeacms/varnish/) that we use to speed up the API response is build and maintained by [EEA]() (The European Environment Agency) and the development team at conduction itself is attached to the [Odyssey program](https://www.odyssey.org/) and originated from the [startupinresidence](https://startupinresidence.com/) program.
@@ -46,10 +46,12 @@ On standards and standardization
-------
The specific goal of the proto component (which this current code base is a version of) is to provide a common architecture for common ground components. As such the common ground principles are leading in design choices, and within those principles international compliancy and technological invocation is deemed most important. **We do not want to make concessions to the current infrastructure.** As such the component might differ on [NL API Strategie](https://docs.geostandaarden.nl/api/API-Strategie), [NORA](https://www.noraonline.nl/wiki/Standaarden), [vng.cloud](https://zaakgerichtwerken.vng.cloud/themas/index) and or other standards if they are deemed incompatible or out of line with (inter)national standards and or good practices.
-Unfortunatly (inter)national standards standards can be conflicting. We therefore prioritize standards on two grounds
+Unfortunately (inter)national standards can be conflicting. We therefore prioritize standards on several grounds
-- International standards are put before local standards
-- Standards caried by a standard organisation (like ISO, W3C etc) at put before floating standards (like RFC's) wichs are put before industraty standards, good practices and so on.
+- International is put before local
+- Standards carried by a standard organization (like ISO, W3C etc) at put before floating standards (like RFC's) wichs are put before industry standards, good practices and so on.
+
+So if for instance a **local** standard is out of line with an **international** good practice we follow the international good practice.
### Commonground specific standards
@@ -57,30 +59,30 @@ This component was designed in line with the [NL API Strategie](https://docs.geo
## NL API Strategie
-The [NL API Strategie](https://docs.geostandaarden.nl/api/API-Strategie) takes a special place in this component, it is designed as a set of guidelines for API's for the dutch landscape. As such we follow it as close as posible. It dos however contains inconsistenies with both international standards and good practices. On those items we do not follow the norm but consider it our duty to try to change the norm.
+The [NL API Strategie](https://docs.geostandaarden.nl/api/API-Strategie) takes a special place in this component, it is designed as a set of guidelines for API's for the Dutch landscape. As such we follow it as close as possible. It dos however contains inconsistencies with both international standards and good practices. On those items we do not follow the norm but consider it our duty to try to change the norm.
** We implement **
api-01, api-02, api-03, api-05, api-06, api-10, api-11, api-12, api-13,api-14, api-16, api-18, api-19, api-20, api-21, api-22, api-23, api-24, api-25, api-26, api-27, api-28, api-29, api-30, api-33, api-34, api-35, api-42
** We want to implement **
-- [api-14](https://docs.geostandaarden.nl/api/API-Strategie/#api-14) Use OAuth 2.0 for authorisation
+- [api-14](https://docs.geostandaarden.nl/api/API-Strategie/#api-14) Use OAuth 2.0 for authorization
** We do not implement **
- [api-04](https://docs.geostandaarden.nl/api/API-Strategie/#api-04) Define interfaces in Dutch unless there is an official English glossary (see [english](#english))
-- [api-09](https://docs.geostandaarden.nl/api/API-Strategie/#api-09) Implement custom representation if supported see [fields](#fields))
+- [api-09](https://docs.geostandaarden.nl/api/API-Strategie/#api-09) Implement custom representation if supported (see [fields](#fields))
- [api-17](https://docs.geostandaarden.nl/api/API-Strategie/#api-17) Publish documentation in Dutch unless there is existing documentation in English or there is an official English glossary (see [english](#english))
- [api-31](https://docs.geostandaarden.nl/api/API-Strategie/#api-31) Use the query parameter sorteer to sort (see [ordering](#ordering))
- [api-32](https://docs.geostandaarden.nl/api/API-Strategie/#api-32) Use the query parameter zoek for full-text search (see [search](#search))
- [api-36](https://docs.geostandaarden.nl/api/API-Strategie/#api-36) Provide a POST endpoint for GEO queries (see [queries](#queries))
- [api-37](https://docs.geostandaarden.nl/api/API-Strategie/#api-37) Support mixed queries at POST endpoints available (see [queries](#queries))
-*[api-38](https://docs.geostandaarden.nl/api/API-Strategie/#api-38) Put results of a global spatial query in the relevant geometric context (see [queries](#queries))
--
+- [api-38](https://docs.geostandaarden.nl/api/API-Strategie/#api-38) Put results of a global spatial query in the relevant geometric context (see [queries](#queries))
+
-** We doubt or havn't made a choice yet about**
+** We doubt or haven’t made a choice yet about**
-- [api-15](https://docs.geostandaarden.nl/api/API-Strategie/#api-15) Use PKIoverheid certificates for access-restricted or purpose-limited API authentication
+- [api-15](https://docs.geostandaarden.nl/api/API-Strategie/#api-15) Use PKI overheid certificates for access-restricted or purpose-limited API authentication
- [api-39](https://docs.geostandaarden.nl/api/API-Strategie/#api-39) Use ETRS89 as the preferred coordinate reference system (CRS)
- [api-40](https://docs.geostandaarden.nl/api/API-Strategie/#api-40) Pass the coordinate reference system (CRS) of the request and the response in the headers
- [api-41](https://docs.geostandaarden.nl/api/API-Strategie/#api-41) Use content negotiation to serve different CRS
@@ -88,19 +90,19 @@ api-01, api-02, api-03, api-05, api-06, api-10, api-11, api-12, api-13,api-14, a
NLX
-------
We implement the [NLX system](https://docs.nlx.io/understanding-the-basics/introduction/) as part of the basic commonground infrastructure, as such nlx headers are used in the internal logging.
-The following X-NLX headers have been implemented for that reason `X-NLX-Logrecord-ID`,`X-NLX-Request-Process-Id`,`X-NLX-Request-Data-Elements` and `X-NLX-Request-Data-Subject`, these are tied to the internal audit trail (see audit trail for more information), and `X-Audit-Toelichting` (from the ZGW APIs) is implemented as `X-Audit-Clarification`
-
-We do not use other NLX headers since they (conform to the [NLX schema](https://docs.nlx.io/further-reading/transaction-logs/))wil not reach the provider. Please note that the use of nlx is optional. The component can be used without NLX. In that case the `X-NLX` header should be set to false, the `X-NLX-Logrecord-ID` should be provided with an log record designd by the client application to be retracable to a unique user and action. Other headers still aplly.
+The following X-NLX headers have been implemented for that reason `X-NLX-Logrecord-ID`,`X-NLX-Request-Process-Id`,`X-NLX-Request-Data-Elements` and `X-NLX-Request-Data-Subject`, these are tied to the internal audit trail (see audit trail for more information), and `X-Audit-Toelichting` (from the ZGW APIs) is implemented as `X-Audit-Clarification`. We do not use other NLX headers since they (conform to the [NLX schema](https://docs.nlx.io/reference-information/transaction-log-headers/)) wil not reach the provider.
We strongly discourage the use of the `X-NLX-Request-Data-Subject` header as it might allow private data (such as BSNs) to show up in logging.
+Please note that the use of nlx is optional. The component can be used without NLX. In that case set `X-NLX-Logrecord-ID` to false and provide (the normaly ignored) fields `X-NLX-Requester-User-Id`, `X-NLX-Request-Application-Id`, `X-NLX-Request-Subject-Identifier`, `X-NLX-Requester-Claims` and `X-NLX-Request-User` as if you are making an NLX call. This provides the API with enough credentials to make an complete audit trail. It also provides an easy implementation route to NLX since the only thing that would need to be changed at a later time is making you call to an nlx outway instead of the API directly.
+
English
-------
The [NL API Standard](https://geonovum.github.io/KP-APIs/#api-04-define-interfaces-in-dutch-unless-there-is-an-official-english-glossary) describes that there is a preference for Dutch in API documentation.
> Define resources and the underlying entities, fields and so on (the information model ad the external interface) in Dutch. English is allowed in case there is an official English glossary.
-We view this as a breach with good coding practice and international coding standards, all documentation and code is therefore supplied in English. We do however provide transaltion (or i18n) support.
+We view this as a breach with good coding practice and international coding standards, all documentation and code is therefore supplied in English. We do however provide translation (or i18n) support.
Fields
-------
@@ -108,49 +110,24 @@ A part of the [haal centraal](https://raw.githubusercontent.com/VNG-Realisatie/H
Search
-------
-As part of [api-32](https://docs.geostandaarden.nl/api/API-Strategie/#api-32) a `zoeken` query has been itroduced that can handle wildcards. This breaks best practice, first of allest practice is a `search` query parameter (see also the nodes on [English](#english)). Secondly wildcards are a sql concept, not a webconcept, they are also a rather old concept severly limiting the search options provided. Instead the [regeular expresion standard](https://en.wikipedia.org/wiki/Regular_expression) should be used.
+As part of [api-32](https://docs.geostandaarden.nl/api/API-Strategie/#api-32) a `zoeken` query has been introduced that can handle wildcards. This breaks best practice, first of allest practice is a `search` query parameter (see also the nodes on [English](#english)). Secondly wildcards are a sql concept, not a webconcept, they are also a rather old concept severely limiting the search options provided. Instead the [regular expression standard](https://en.wikipedia.org/wiki/Regular_expression) should be used.
__solution__
We implement a `search` query parameter on resource collections, that filters with regex.
Queries
-------
-In several examples of the nl apistrategie we see query parameters being atached to post requests. This is unusual in the sence that sending query strings allong with a post is ocnsiderd bad practice (becouse query parameters end up as part of an url and are therfore logged by servers). But is is technically posile folowing RFC 3986. The real pain is that in the NL api-stratgie the POST requests seems to be used to search, ot in other words GET data. This is where compliance with HTTP (1.1) breaks.
+In several examples of the nl api strategie we see query parameters being attached to post requests. This is unusual in the sence that sending query strings along with a post is considered bad practice (because query parameters end up as part of an url and are therefore logged by servers). But it is technically possible folowing RFC 3986. The real pain is that in the NL api-stratgie the POST requests seems to be used to search, ot in other words GET data. This is where compliance with HTTP (1.1) breaks.
__solution__
We do not implement a query endpoint on post requests.
-
-Domain Build-up and routing
--------
-By convention the component assumes that you follow the common ground domain name build up, meaning {environment}.{component}.{rest of domain}. That means that only the first two url parts are used for routing. It is also assumed that when no environment is supplied the production environment should be offered E.g. a proper domain for the production API of the verzoeken registratie component would be prod.vrc.zaakonline.nl but it should also be reachable under vrc.zaakonline.nl. The proper location for the development environment should always be dev.vrc.zaakonlin.nl
-
-Environments and namespacing
--------
-We assume that for that you want to run several environments for development purposes. We identify the following namespaces for support.
-- prod (Production)
-- acce (Acceptation)
-- stag (Staging)
-- test (Testing)
-- dev (Development)
-
-Because we base the common ground infrastructure on kubernetes, and we want to keep a hard separation between environment we also assume that you are using your environment as a namespace
-
-Symfony library management gives us the option to define the libraries on a per environment base, you can find that definition in the [bundle config](api/config/bundles.php)
-
-Besides the API environments the component also ships with additional tools/environments but those are not meant to be deployed
-- client (An react client frontend)
-- admin (An read admin interface)
-
-On the local development docker deploy the client environment is used as default instead of the production version of the api.
-
Api Versioning
-------
-As per [landelijke API-strategie.](https://geonovum.github.io/KP-APIs/#versioning) major versions in endpoint minor versions in header, for this the `API-Version` is used (instead of the `api-version` header used in haal centraal)
-
+As per [landelijke API-strategie.](https://geonovum.github.io/KP-APIs/#versioning) we provide/ask major versions in the endpoint and minor versions in header, for this the `API-Version` is used (instead of the `api-version` header used in haal centraal)
__solution__
-The fields parameter and functionality has been implemented as an array, and should be used that way. We do howver support an comma separted value list.
+We implement both endpoint and header versioning
Extending
-------
@@ -161,7 +138,7 @@ The extend parameter has been implemented as an array
Archivation
-------
-There is a need (by law) for archivation, meaning that we should only keep resources for a fixed amount of time and delete them there afther. In line with the extending and fields principle whereby we only want resource properties that we need when we needid, it is deemded good practice make a sub resource of the archivation properties. For the archivation proterties the [zgw](https://zaken-api.vng.cloud/api/v1/schema/#operation/zaak_list) is followed and translated to englisch.
+There is a need (by law) for archivation, meaning that we should only keep resources for a fixed amount of time and delete them thereafter. In line with the extending and fields principle whereby we only want resource properties that we need when we needed, it is deemed good practice make a sub resource of the archivation properties. For the archivation properties the [zgw](https://zaken-api.vng.cloud/api/v1/schema/#operation/zaak_list) is followed and translated to englisch.
```json
@@ -173,15 +150,15 @@ There is a need (by law) for archivation, meaning that we should only keep resou
}
```
-This gives us an intresting thought, acording to [NL API Strategie](https://docs.geostandaarden.nl/api/API-Strategie/#api-10-implement-operations-that-do-not-fit-the-crud-model-as-sub-resources) subresources should have there own endpoint. Therefore we could use a archive sub of a difrend object for archivation rules e.g. /zaken/{uuid}/archivation for a verzoek. This in itself leads credence to the thought that archivation should have its own central crud api.
+This gives us an interesting thought, according to [NL API Strategie](https://docs.geostandaarden.nl/api/API-Strategie/#api-10-implement-operations-that-do-not-fit-the-crud-model-as-sub-resources) sub resources should have their own endpoint. Therefore we could use a archive sub of a different resource for archivation rules e.g. /zaken/{uuid}/archivation for a verzoek. This in itself leads credence to the thought that archivation should have its own central crud api.
Audittrail
-------
For audittrail we use the base mechanism as provided by [vng.cloud](https://zaakgerichtwerken.vng.cloud/themas/achtergronddocumentatie/audit-trail), we do however diver on some key point,
-- Personal data schould never be part of a log, therefore only the user id with the client should be logged (insted of the name)
+- Personal data should never be part of a log, therefore only the user id with the client should be logged (instead of the name)
- Besides an endpoint per resource there should be a general enpoint to search all audit trials of a component
-- [Timetravel](#timetravel) in combinaition with objects versioning makes the return of complete objects unnecesary. But an auditrail endpoint should support the [extend](#extending) functionalitiy to provide the option of obtaining complete objects.
+- [Time travel](#timetravel) in combination with objects versioning makes the return of complete objects unnecessary. But an audit rail endpoint should support the [extend](#extending) functionality to provide the option of obtaining complete objects.
__solution__
@@ -191,17 +168,18 @@ Healthchecks
-------
From [issue 154](https://github.com/VNG-Realisatie/huwelijksplanner/issues/154)
-For healthc
+For healthchecks we use the health-json principle (or json-health to stay in line with json-ld and json-hal). This means the any endpoint `should` be capable of providing health information concerning that endpoint and services behind it.
__solution__
+The use of a `Content-Type: application/health+json` header returns an health json schema.
Notifications
-------
-For notifications we do not YET use the current [ZGW standard](https://zaakgerichtwerken.vng.cloud/themas/achtergronddocumentatie/notificaties) since there is an [dicusion](https://github.com/VNG-Realisatie/gemma-zaken/issues/1427#issuecomment-549272696) about the posible insecurity of sending properties or data objects along with a notification. It also dosn't follow the [web standard](https://www.w3.org/TR/websub/). We wait for the conclusion of that discusion before making an implementation.
+For notifications we do not YET use the current [ZGW standard](https://zaakgerichtwerken.vng.cloud/themas/achtergronddocumentatie/notificaties) since there is an [dicusion](https://github.com/VNG-Realisatie/gemma-zaken/issues/1427#issuecomment-549272696) about the possible insecurity of sending properties or data objects along with a notification. It also doesn’t follow the [web standard](https://www.w3.org/TR/websub/). We wait for the conclusion of that discussion before making an implementation.
__solution__
-In compliance with [w3.org](https://www.w3.org/TR/websub/) each endpoint returns an header containing an subscribtion url. That can be used in acordanse with the application to subscribe to both individual objects as collections. whereby collections serve as 'kanalen'. We aim to implement the ZGW notificatie component, but feel that further features on that component would be required to make to be fully suported. We will suply feature requests per issue to support this effort.
+In compliance with [w3.org](https://www.w3.org/TR/websub/) each endpoint `should` returns an header containing an subscription url. That can be used in accordance with the application to subscribe to both individual objects as collections whereby collections serve as 'kanalen'. We aim to implement the ZGW notificatie component, but feel that further features on that component would be required to make to be fully supported. We will supply feature requests per issue to support this effort.
Authentication
-------
@@ -210,17 +188,18 @@ __solution__
Authorization
-------
-We implement user scopes as per [vng.cloud](https://zaakgerichtwerken.vng.cloud/themas/achtergronddocumentatie/autorisatie-scopes) standard. But see problems with how the scopes are defined and named, and consider the general setup to be to focused on ZGW (including Dutch naming, zgw specific fields like maxVertrouwlijkheid and a lack of CRUD thinking). There is a further document concerning [Authentication and Authorization](https://zaakgerichtwerken.vng.cloud/themas/achtergronddocumentatie/authenticatie-autorisatie) that details how we should authenticate users and give them scopes. We agree with the principles of the document on application based authorization and the use of JWT tokens. But disagree on some key technical aspect. Most important being that the architecture doesn't take into consideration the use of one component by several organizations at once. Or scopese per property.
+We implement user scopes as per [vng.cloud](https://zaakgerichtwerken.vng.cloud/themas/achtergronddocumentatie/autorisatie-scopes) standard. But see problems with how the scopes are defined and named, and consider the general setup to be to focused on ZGW (including Dutch naming, zgw specific fields like maxVertrouwlijkheid and a lack of CRUD thinking). There is a further document concerning [Authentication and Authorization](https://zaakgerichtwerken.vng.cloud/themas/achtergronddocumentatie/authenticatie-autorisatie) that details how we should authenticate users and give them scopes. We agree with the principles of the document on application based authorization and the use of JWT tokens. But disagree on some key technical aspect. Most important being that the architecture doesn't take into consideration the use of one component by several organizations at once. Or scopes per property.
__solution__
-No solution as of yet, so there is no implementation of Authorization or Scopes. We aim to implement the ZGW authorisatie component, but feel that further features on that component would be required to make to be fully suported. We will suply feature requests per issue to support this effort.
+No solution as of yet, so there is no implementation of Authorization or Scopes. We aim to implement the ZGW authorisatie component, but feel that further features on that component would be required to make to be fully supported. We will supply feature requests per issue to support this effort.
Timetravel
-------
A part of the [haal centraal](https://raw.githubusercontent.com/VNG-Realisatie/Haal-Centraal-BRP-bevragen/master/api-specificatie/Bevraging-Ingeschreven-Persoon/openapi.yaml) the concept of timetravel has been introduced, as in getting the version of an object as it was on a given date. For this the `geldigop` [see the docs](file:///C:/Users/ruben/Desktop/doc_gba_historie.html#operation/getBewoningen) header is used. In addition the `geldigvan` and `geldigtot` are introduced as collection filters.
-The commonground proto componant natively supports time traveling on all entities that are annotaded with the @Gedmo\Loggable, this is done by adding the ?validOn=[date] query to a request, date can either be a datetime or datedatime string. Any value supported by php's [strtotime()](https://www.php.net/manual/en/function.strtotime.php) is supported. Keep in mind that this returns the entity a as it was valid on that time or better put, the last changed version BEFORE that moment. To get a complete list of all changes on a item the /audittrail endpoint can be used.
+The commonground proto componant natively supports time traveling on all resources that are annotaded with the @Gedmo\Loggable, this is done by adding the ?validOn=[date] query to a request, date can either be a datetime or datedatime string. Any value supported by php's [strtotime()](https://www.php.net/manual/en/function.strtotime.php) is supported. Keep in mind that this returns the entity a as it was valid on that time or better put, the last changed version BEFORE that moment. To get a complete list of all changes on an item the [/audittrail](#Audittrail
+) endpoint can be used.
__solution__
In compliance with [schema.org](https://schema.org/validFrom) `geldigop`,`geldigvan` and `geldigtot` are implemented as `validOn`,`validFrom` and `validUntil`. And can be used a query parameters on collection operations/
@@ -234,7 +213,7 @@ In the [zaak-api](https://zaken-api.vng.cloud/api/v1/schema/#operation/zaak_list
Translations
-------
-We support translations trough the `Accept-Language` header (read the [docs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Language)), the fallback langouge for all messages is englisch
+We support translations trough the `Accept-Language` header (read the [docs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Language)), the fallback language for all messages is English
Errors
-------
@@ -256,7 +235,7 @@ We support both comma and bracket notation on array's, but only document bracket
Container Setup
-------
- https://medium.com/shiphp/building-a-custom-nginx-docker-image-with-environment-variables-in-the-config-4a0c36c4a617
+https://medium.com/shiphp/building-a-custom-nginx-docker-image-with-environment-variables-in-the-config-4a0c36c4a617
Filtering
@@ -268,7 +247,7 @@ __Regex Exact__
__Regex Contains__
__Like___
-The like filters is used to search for enities with the traditional sql LIKE operator. If pattern does not contain percent signs or underscores, then the pattern only represents the string itself; in that case LIKE acts like the equals operator. An underscore (_) in pattern stands for (matches) any single character; a percent sign (%) matches any sequence of zero or more characters.
+The like filters is used to search for resources with the traditional sql LIKE operator. If pattern does not contain percent signs or underscores, then the pattern only represents the string itself; in that case LIKE acts like the equals operator. An underscore (_) in pattern stands for (matches) any single character; a percent sign (%) matches any sequence of zero or more characters.
Some examples:
@@ -276,6 +255,7 @@ Some examples:
'abc' LIKE 'a%' true
'abc' LIKE '_b_' true
'abc' LIKE 'c' false
+
LIKE pattern matching always covers the entire string. Therefore, if it's desired to match a sequence anywhere within a string, the pattern must start and end with a percent sign.
To match a literal underscore or percent sign without matching other characters, the respective character in pattern must be preceded by a backlash.
@@ -283,10 +263,32 @@ To match a literal underscore or percent sign without matching other characters,
## Kubernetes
### Loadbalancers
-We no longer provide a load balancer per component, since this would require a ip per component. Draining ip's on mult component kubernetes clusters. In stead we make componentes available as an interner service
+We no longer provide a load balancer per component, since this would require a IP address per component (and ipv 4 addresses are in short supply). Instead we make components available as an internal service. A central load balancer could then be used to provide several api’s in one
### server naming
-A component is (speaking in kubernetes terms) a service that is available at
+A component is (speaking in kubernetes terms) a service that is available at a name corresponding to its designation
+
+### Domain Build-up and routing
+By convention the component assumes that you follow the common ground domain name build up, meaning {environment}.{component}.{rest of domain}. That means that only the first two url parts are used for routing. It is also assumed that when no environment is supplied the production environment should be offered E.g. a proper domain for the production API of the verzoeken registratie component would be prod.vrc.zaakonline.nl but it should also be reachable under vrc.zaakonline.nl. The proper location for the development environment should always be dev.vrc.zaakonlin.nl
+
+### Environments and namespacing
+We assume that for that you want to run several environments for development purposes. We identify the following namespaces for support.
+- prod (Production)
+- acce (Acceptation)
+- stag (Staging)
+- test (Testing)
+- dev (Development)
+
+Because we base the common ground infrastructure on kubernetes, and we want to keep a hard separation between environment we also assume that you are using your environment as a namespace
+
+Symfony library management gives us the option to define the libraries on a per environment base, you can find that definition in the [bundle config](api/config/bundles.php)
+
+Besides the API environments the component also ships with additional tools/environments but those are not meant to be deployed
+- client (An react client frontend)
+- admin (An read admin interface)
+
+On the local development docker deploy the client environment is used as default instead of the production version of the api.
+
## Data types
@@ -325,3 +327,4 @@ A component is (speaking in kubernetes terms) a service that is available at
| string | iban | | | | |
| | | | | | |
+
diff --git a/TUTORIAL.md b/TUTORIAL.md
index 74aad7ba..ed149d7b 100644
--- a/TUTORIAL.md
+++ b/TUTORIAL.md
@@ -8,7 +8,7 @@ What do you need for this tutorial?
* Docker for desktop
## Before you begin
-For the steps considering the generation of entities an example entity a availale, feel free to [take a look](https://github.com/ConductionNL/Proto-component-commonground/blob/master/api/src/Entity/ExampleEntity.php) at it if you have trouble figuring out the code.
+For the steps considering the generation of resources (or entities as symfony calls them) an example resource a availale, feel free to [take a look](https://github.com/ConductionNL/Proto-component-commonground/blob/master/api/src/Entity/ExampleEntity.php) at it if you have trouble figuring out the code.
## Setting up your enviroment
@@ -57,10 +57,10 @@ $ docker volume prune
**What are we looking at?**
The Common Ground base component provides a bit more than just a development interface, it also includes an example application and a backend that automatically hooks into your api. For now we're just going to focus on our api, but is good to read up on all the features of the Common Ground base component here.
-## Adding your own objects
-You can now access your api at http://localhost:8080/, as you can see it's pre-loaded with some example objects. Let's replace them with your own objects!
+## Adding your own resources
+You can now access your api at http://localhost:8080/, as you can see it's pre-loaded with some example resources. Let's replace them with your own resources!
-First let's remove the objects currently in the api, we can do that by just removing the entities form our code base, navigate to the folder where you stored your code and open the folder api/src/Entity , you can find the example entities (our name for objects) there. Just delete all the php files in that folder.
+First let's remove the resources currently in the api, we can do that by just removing the resources form our code base, navigate to the folder where you stored your code and open the folder api/src/Entity , you can find the example entities (the symfony name for resources) there. Just delete all the php files in that folder.
Next let's add our own entities, we can do this in two ways, we can do old fashioned coding, but we can also use the build in maker bundle of the proto component, to quickly generate our entities for us (without the fuss of actual coding).
@@ -69,7 +69,7 @@ Let's open a new command line window and navigate to our root folder, exactly li
```CLI
$ docker-compose exec php bin/console make:entity
```
-We should now see a wizard that allows us to either make new entities, or add parameters to existing entities (by supplying the name of an existing entity).
+We should now see a wizard that allows us to either make new entities, or add parameters to existing entities (by supplying the name of an existing resource).
## Keeping your repository up to date with the Conduction Common Ground component
@@ -107,7 +107,7 @@ git merge upstream --allow-unrelated-histories
Keep in mind that you wil need to make sure to stay up to date about changes on the Common Ground component repository.
## Renaming your component
-Right now the name of your component is 'commonground' that's that's fine while running it locally or in its own kubernetes cluster but wil get you in when running it with other components when it without using a name space. So its good practice to name your component distinctly. But besides al of these practical reasons its of course also just cool to name your child before you unleash it on the unsuspecting common ground community.
+Right now the name of your component is `commonground component` and its unique id `cg` that's that's fine while running it locally or in its own kubernetes cluster but wil get you in when running it with other components when it without using a name space. So its good practice to name your component distinctly. But besides al of these practical reasons its of course also just cool to name your child before you unleash it on the unsuspecting common ground community.
Oke, so before we can nae the component we need to come up with a name. There are a couple of conventions here. First of the name should tell us what the component does, or is supposed to do with one or two words. So we would normally call an component about dogs the DogComponent and one about cats te CatComponent. The second convention is that we don't usually actually name our component 'component' but indicate its position in de common ground architecture. For that we have the following options:
* Catalogus
@@ -116,34 +116,11 @@ Oke, so before we can nae the component we need to come up with a name. There ar
* Application
* Tool
-The we need to touch te following files
+The actual name change is rather simple doh, just head over to the .env that contains all our config and change the apropriate variables
* .env
-* dockercompose.yaml
-* api/.env
-* api/helm/values.yaml
-* api/docker/nginx/
-## Adding more openapi documantation
-
-```php
-//...
- /**
- * @ApiProperty(
- * attributes={
- * "openapi_context"={
- * "description" = "The name of a organisation",
- * "type"="string",
- * "format"="string",
- * "example"="My Organisation"
- * }
- * }
- * )
- */
- private $name;
-//...
-```
-
-## Setting up security and access (also helps with serialization)
+## Setting up security and access
+We want to secure our resources in such a way that only users or applications with propper right can acces and update properties.
```PHP
// src/Entity/Organisation.php
@@ -169,7 +146,7 @@ class Organisation
```
## Using validation
-Right now we are just accepting data and passing them on to the database, and in a mock or poc context this is fine. Most of the calls will end up being get requests anyway. But in case that we actually want our clients to make post to the api it would be wise to add some validation to the fields we are recieving. Luckely for us the component comes pre packed with a valdiation tool that we can configure from our entity through annotion. If we for example want to make a field required we could do so as follows:
+Right now we are just accepting data and passing them on to the database, and in a mock or poc context this is fine. Most of the calls will end up being get requests anyway. But in case that we actually want our clients to make post to the api it would be wise to add some validation to the fields we are recieving. Luckely for us the component comes pre packed with a valdiation tool that we can configure from our resources through annotion. If we for example want to make a field required we could do so as follows:
```PHP
// src/Entity/Organisation.php
@@ -196,7 +173,7 @@ Keep in mind that we need to add the assert annotation to our class dependencies
More inforation on using validation can be found at the [symfony website](https://symfony.com/doc/current/validation.html), but it is als worth nothing that tis component comes pre packed with some typical NL validators like BSN. You can find those [here]().
## Using UUID
-As default doctrine uses auto increment integers as identifiers (1,2, etc). For modern web applications we however prefer the use of UUID's. (e.g. e2984465-190a-4562-829e-a8cca81aa35d). Why? Wel for one it is more secure integer id's are easily guessable and make it possible to "ask" endpoint about objects that you should not know about. But UUID's also have a benefit in future proofing the application. If we in the future want to merge a table with another table (for example because two organisations using a component perform a merger) then we would have to reassign al id's and relations if we where using int based id's (both tables would have a row 1,2 etc) with UUID's however the change of doubles range somewhere in the billions. Meaning that it is likely that we only need to either reidentify only a handful of rows or more likely none at al! Turning our entire migration into a copy paste action.
+As default doctrine uses auto increment integers as identifiers (1,2, etc). For modern web applications we however prefer the use of UUID's. (e.g. e2984465-190a-4562-829e-a8cca81aa35d). Why? Wel for one it is more secure integer id's are easily guessable and make it possible to "ask" endpoint about resources that you should not know about. But UUID's also have a benefit in future proofing the application. If we in the future want to merge a table with another table (for example because two organisations using a component perform a merger) then we would have to reassign al id's and relations if we where using int based id's (both tables would have a row 1,2 etc) with UUID's however the change of doubles range somewhere in the billions. Meaning that it is likely that we only need to either reidentify only a handful of rows or more likely none at al! Turning our entire migration into a copy paste action.
The proto component supports Ramsey's uuid objects strategy out of the box, so to use UUID's as identifier simply we need to add the ApiProperty as a dependency
@@ -229,7 +206,7 @@ with
* identifier=true,
* attributes={
* "openapi_context"={
- * "description" = "The UUID identifier of this object",
+ * "description" = "The UUID identifier of this resource",
* "type"="string",
* "format"="uuid",
* "example"="e2984465-190a-4562-829e-a8cca81aa35d"
@@ -247,7 +224,7 @@ with
//..
```
-and remove the integer on the getter turning this:
+and remove the : ?integer on the getter turning this:
```PHP
//...
@@ -272,7 +249,7 @@ into this
and you're all done
### Trouble shooting
-If you have already spun your component including your new entity your going to run into some trouble because doctrine is going to try changing your primary key column (id) from an integer to string (tables tend not to like that). In that case its best to just drop your database and reinstall it using the following commands:
+If you have already spun your component including your new resource your going to run into some trouble because doctrine is going to try changing your primary key column (id) from an integer to string (tables tend not to like that). In that case its best to just drop your database and reinstall it using the following commands:
```CLI
$ bin/console doctrine:schema:drop
@@ -281,9 +258,9 @@ $ bin/console doctrine:schema:update --force
## Advanced data sets
-Oke lets make it complex, until now we have just added some simple entities to our component, but what if we want to attaches one entity to another? Fortunately our build in database engine support rather complex scenarios called associations. So let [take a look](https://www.doctrine-project.org/projects/doctrine-orm/en/2.6/reference/association-mapping.html) at that.
+Oke lets make it complex, until now we have just added some simple entities to our component, but what if we want to attaches one resource to another? Fortunately our build in database engine support rather complex scenarios called associations. So let [take a look](https://www.doctrine-project.org/projects/doctrine-orm/en/2.6/reference/association-mapping.html) at that.
-Baffled? Wel its rather complex. But remember that Make:entity command that we used earlier? That actually accepts relations as a data type. Or to but it simply instead of using the default 'string' we could just type "ManyToOne" and it will just fire up some questions that will help it determine how you want your relations to be.
+Baffled? Wel its rather complex. But remember that make:entity command that we used earlier? That actually accepts relations as a data type. Or to but it simply instead of using the default 'string' we could just type "ManyToOne" and it will just fire up some questions that will help it determine how you want your relations to be.
### Trouble shooting
@@ -315,7 +292,7 @@ We can now prevent circular references by setting a max depth on the properties
```PHP
//...
/**
- * @var ArrayCollection $stuffs Some stuff that is attached to this example object
+ * @var ArrayCollection $stuffs Some stuff that is attached to this example resource
*
* @MaxDepth(1)
* @Groups({"read","write"})
@@ -326,7 +303,7 @@ We can now prevent circular references by setting a max depth on the properties
```
## Data fixtures
-For testing cases it can be useful to use data fixtures a predefined set of data that fills the database of your component at startup. Since we use php classes to describe our objects creating fixtures is easy (you can find an example in your project folder at api/src/DataFixtures). We simply go trough some classes assign values and persist them to the database. Once we have written our fixtures we can use a single command to load them
+For testing cases it can be useful to use data fixtures a predefined set of data that fills the database of your component at startup. Since we use php classes to describe our resources creating fixtures is easy (you can find an example in your project folder at api/src/DataFixtures). We simply go trough some classes assign values and persist them to the database. Once we have written our fixtures we can use a single command to load them
```CLI
$ bin/console doctrine:fixtures:load --env=dev
@@ -350,42 +327,39 @@ When using Github. To set up a webhook, go to the settings page of your reposito
Now every time you update your repository the commonground dev page will alerted, rescan your repository and do al the appropriate platform actions. It just as easy as that.
-Automated Testing and Deployment (continues integration)
+Continues integration
-------
-The following bit of the tutorial requires two additional accounts
-- [https://hub.docker.com/](https://hub.docker.com/) (You might already have this for docker for desktop)
-- [https://travis-ci.org](https://travis-ci.org) (You can use you github account)
-
-The proto component ships with a pre-fab continues integration script based on travis. What does this mean you ask? Continuous integration (or CI for short) is an optimized and automated way for your code to become part of your projects. In the case of your commonground component that means that we will automatically validate new code commits or pushes and (if everything checks out) build that code and deploy the containers thereof to docker hub. Making is possible to update al the environments that use those components.
+> The following bit of the tutorial requires an additional account
+> - [https://hub.docker.com/](https://hub.docker.com/) (You might already have this for docker for desktop)
-Okay, that's nice, but how do we do that? Actually it is very simple. First of all make sure you have a docker account, log into [docker hub](https://hub.docker.com/) and have a look around. We don't need to create anything just yet, but it is nice to get a feeling of the place. As you can see docker hub also uses repositories etc. So that recognizable.
+The proto component ships with a pre-fab continues integration script based on github action (there is also a travis script in here if you want it). What does this mean you ask? Continuous integration (or CI for short) is an optimized and automated way for your code to become part of your projects. In the case of your commonground component that means that we will automatically validate new code commits or pushes and (if everything checks out) build that code and deploy the containers thereof to docker hub. Making is possible to update al the environments that use those components. Whats even better is that we check your code for known security issues, so whenever a dependency or libary has a security issue you will be notified to take action.
-Next we need to prepare our github repository that holds our code. For the travis script to work as intended we need to create a couple of branches(if we don't have those already) open up your git interface and create a branch called 'development' and a branch called 'staging'. Don't forget to push the branches so that they are present on github (and not just on your local machine).
+Okay, that's nice, but how do we do that? Actually it is very simple. You do nothing. The scripts are already enabled by default. Just go to the actions tab of your github repository to see the results whenever you push code.
-Oke just one more place to go and that is travis, head over to [https://travis-ci.org](https://travis-ci.org) and login with your github account. If everything is alright you should see your repository there. Activate it by pressing 'activate repository' and then go to 'More options' -> 'Settings' and scroll down to environment variables. Here we can present travis wit the variables that it need to execute our build script. Lets first set the common variables that we need for all our branches: `DOCKER_PASSWORD` your docker password,`DOCKER_REGISTRY` docker.io/[your username] ,`DOCKER_USERNAME` your docker user name. This will be used by travis to push the completed containers into docker hub. Next we need to specify a couple of variables that are branch specific. Or to be more exact, set the same variable `APP_ENV` with different values for different branches. It needs to be 'staging'->stag,'master'->prod,'development'->dev.
+There is however a bit of extra here that you can do and that is to insert your docker hub credentials into the repository. You can do that under the settings->secrets tab of yout repoistory by setting a `DOCKERHUB_USERNAME` and `DOCKERHUB_PASSWORD` secret containing (you might have guesed it) your dockerhub username and secret. And all done! Head over back to the code on your computer and make a small change. Then commit push that change into github. Wait for the action to complete and head over to your docker hub repository page. You should find your build containers ready for you.
-And all done! Head over back to the code on your computer and make a small change. Then commit push that change into github. Travis should automatically pick op your change and start a build.
-
-
-### Unit / Behat
+Continues deployment
+-------
+> The following bit of the tutorial requires an additional account
+> - [https://www.digitalocean.com/](https://www.digitalocean.com/)
-TODO
+Actually the repository goes a bit further then just getting your containers ready to deploy, it can acctually deploy them for you! Again all the code is already there. The only thing that you need to do is add a kubeconfig file. You can get a kubeconfig file from a running kubernetes clusters, it provides your repository with both the credentials and endpoints it needs to deploy the application. How you get a Kubeconfig file difers a bit from provider to provider. But you can get more information on that here
-### Postman
-TODO
+- [Digitalocean](https://www.digitalocean.com/docs/kubernetes/how-to/connect-to-cluster/)
+- [Google Cloud](https://cloud.google.com/sdk/gcloud/reference/container/clusters/get-credentials)
+- [Amazone AWS](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html)
-### Trouble shooting
-Please make sure that your github repository is set to public, and keep in mind that a complex travis build (and certainly one that includes a pushing of containers can take up to 20 minutes).
+Afther you have abtained a kuneconfig you need to save it to your repository as a secret (NEVER COMMIT A KUBECONFIG FILE), use the secret `KUBECONFIG` to save your cubeconfig file. Now simply commit and push your code to your repository and presto! You have a working common-ground component online.
Documentation and dockblocks
-------
-TODO
+You want both your redoc documentation and your code to be readable and reausable to other developers. To this effect we use docblok annotation. You can read more about that [here](https://docs.phpdoc.org/references/phpdoc/basic-syntax.html) but the basic is this, we supply each class and propery with a docblock contained within /\* \* / characters. At the very least we want to describe our properties, the expected results and example data (see the example under [audittrail](#audittrail)
Audittrail
-------
As you might expect the proto-component ships with a neat function for generating audit trails, that basically exist of three parts.
-First we need to activate logging on the entities that we want logged (for obvious security reasons we don't log entity changes by default) to do that by adding the `@Gedmo\Loggable` annotation to our php class, which should then look something like:
+First we need to activate logging on the entities that we want logged (for obvious security reasons we don't log resource changes by default) to do that by adding the `@Gedmo\Loggable` annotation to our php class, which should then look something like:
```PHP
//...
@@ -440,17 +414,3 @@ class ExampleEntity
```
And now we have a fully nl api strategy integrated audit trail!
-
-
-Setting up automated deployment (continues delivery)
--------
-TODO
-
-## Commonground specific data types
-TODO
-
-### incompleteDate
-
-
-### underInvestigation
-
diff --git a/api/src/Swagger/SwaggerDecorator.php b/api/src/Swagger/SwaggerDecorator.php
index 02e872d2..28b4d3e5 100644
--- a/api/src/Swagger/SwaggerDecorator.php
+++ b/api/src/Swagger/SwaggerDecorator.php
@@ -127,6 +127,12 @@ public function normalize($object, $format = null, array $context = [])
// Oke dit is echt but lelijk
$schemas = (array) $docs['definitions'];
foreach ($schemas as $schemaName => $schema) {
+
+ // We can only merge if we actually have content
+ if (!in_array($schemaName, $additionalDocs)) {
+ continue;
+ }
+
$additionalDocs[$schemaName] = array_merge((array) $schema, $additionalDocs[$schemaName]);
$properties = (array) $schema['properties'];