diff --git a/.drone.yml b/.drone.yml
index 978ba5be..38613f9a 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -1,9 +1,10 @@
-# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
-kind: pipeline
name: license
+kind: pipeline
+type: docker
steps:
- name: check
@@ -11,10 +12,11 @@ steps:
pull: always
commands:
- go get -u github.com/google/addlicense
- - addlicense -c "SIGHUP s.r.l" -v -l bsd --check .
+ - addlicense -c "SIGHUP s.r.l" -v -l bsd -y "2017-present" --check .
---
-kind: pipeline
name: policeman
+kind: pipeline
+type: docker
depends_on:
- license
@@ -39,7 +41,7 @@ steps:
- clone
- name: render
- image: quay.io/sighup/e2e-testing:1.1.0_0.2.2_2.16.1_1.9.4_1.20.7_3.8.7_2.4.1
+ image: quay.io/sighup/e2e-testing:1.1.0_0.7.0_3.1.1_1.9.4_1.24.1_3.8.7_4.21.1
pull: always
depends_on:
- clone
@@ -56,8 +58,9 @@ steps:
- /conftest test -p /policies distribution.yml
---
-kind: pipeline
name: e2e-kubernetes-1.22
+kind: pipeline
+type: docker
node:
runner: internal
@@ -74,20 +77,22 @@ trigger:
include:
- refs/tags/**
- refs/heads/master
+ - refs/heads/main
+ - refs/heads/release-v**
steps:
- name: init
- image: quay.io/sighup/e2e-testing-drone-plugin:v0.13.0
+ image: quay.io/sighup/e2e-testing-drone-plugin:v1.24.0
pull: always
volumes:
- - name: shared
- path: /shared
+ - name: shared
+ path: /shared
depends_on: [ clone ]
settings:
- action: custom-cluster-122
- pipeline_id: cluster-122
+ action: custom-cluster-124
+ pipeline_id: cluster-124
local_kind_config_path: tests/config/kind-config-custom
- cluster_version: '1.22.0'
+ cluster_version: '1.24.0'
instance_path: /shared
instance_size: 2-extra-large
aws_default_region:
@@ -110,11 +115,11 @@ steps:
from_secret: dockerhub_password
- name: test
- image: quay.io/sighup/e2e-testing:1.1.0_0.2.2_2.16.1_1.9.4_1.22.0_3.8.7_2.4.1
+ image: quay.io/sighup/e2e-testing:1.1.0_0.7.0_3.1.1_1.9.4_1.24.1_3.8.7_4.21.1
pull: always
volumes:
- - name: shared
- path: /shared
+ - name: shared
+ path: /shared
depends_on: [ init ]
commands:
- export KUBECONFIG=/shared/kube/kubeconfig-122
@@ -127,7 +132,7 @@ steps:
- bats -t tests/opa.sh
- name: destroy
- image: quay.io/sighup/e2e-testing-drone-plugin:v0.13.0
+ image: quay.io/sighup/e2e-testing-drone-plugin:v1.24.0
depends_on: [ test ]
settings:
action: destroy
@@ -153,16 +158,17 @@ steps:
from_secret: dockerhub_password
when:
status:
- - success
- - failure
+ - success
+ - failure
volumes:
- - name: shared
- temp: {}
+- name: shared
+ temp: {}
---
-kind: pipeline
name: release
+kind: pipeline
+type: docker
depends_on:
- e2e-kubernetes-1.22
@@ -189,7 +195,7 @@ steps:
- refs/tags/**
- name: prepare-release-manifests
- image: quay.io/sighup/e2e-testing:1.1.0_0.2.2_2.16.1_1.9.4_1.18.19_3.8.7_2.4.1
+ image: quay.io/sighup/e2e-testing:1.1.0_0.7.0_3.1.1_1.9.4_1.24.1_3.8.7_4.21.1
pull: always
depends_on: [ clone ]
environment:
diff --git a/Furyfile.yml b/Furyfile.yml
index 781e98a7..7cea38b0 100644
--- a/Furyfile.yml
+++ b/Furyfile.yml
@@ -3,12 +3,13 @@
# license that can be found in the LICENSE file.
versions:
- networking: v1.9.0
- monitoring: v1.14.2
- logging: v1.10.3
- ingress: v1.12.2
- dr: v1.9.2
- opa: v1.6.2
+ networking: v1.10.0
+ monitoring: v2.0.1
+ logging: v3.0.1
+ ingress: v1.13.1
+ dr: v1.10.1
+ opa: v1.7.3
+ auth: v0.0.2
bases:
- name: networking
@@ -17,3 +18,8 @@ bases:
- name: ingress
- name: dr
- name: opa
+ - name: auth
+
+modules:
+ - name: ingress
+ - name: dr
diff --git a/README.md b/README.md
index 204e9d15..003de328 100644
--- a/README.md
+++ b/README.md
@@ -7,16 +7,16 @@
Kubernetes Fury Distribution (KFD) is a certified battle-tested Kubernetes distribution based purely on upstream Kubernetes.
-[](http://ci.sighup.io/sighupio/fury-distribution)
-[](https://github.com/sighupio/fury-distribution/releases/latest)
+[](http://ci.sighup.io/sighupio/fury-distribution)
+[](https://github.com/sighupio/fury-distribution/releases/latest)
[](https://kubernetes.slack.com/archives/C0154HYTAQH)
-[](https://github.com/sighupio/fury-distribution/blob/master/LICENSE)
+[](https://github.com/sighupio/fury-distribution/blob/main/LICENSE)
## Overview
Kubernetes Fury Distribution (KFD) is a [CNCF certified](https://landscape.cncf.io/?selected=fury-distribution) battle-tested Kubernetes distribution based purely on upstream Kubernetes.
-It is developed and maintained by [SIGHUP](https://sighup.io/) and the community, and it is fully open source.
+It is developed and maintained by [SIGHUP][sighup-site] and the community, and it is fully open source.
> đ¯ The goal of Fury is to turn any standard Kubernetes cluster into a fully-configured production-grade cluster.
@@ -36,45 +36,47 @@ Kubernetes Fury Distribution is structured on modules, and each module has a set
- A package is a single unit of functionality.
- A module groups packages that are functionally related together.
-> All modules are open source, widely used, easily customizable, and pre-configured with sane defaults.
+> All modules are open source, widely used, easily customizable, and pre-configured with sane defaults and tested to work well together.
The standard way to deploy KFD is to:
-- Deploy all the [Core Modules](#core-modules) of the distribution
-- Deploy (if needed) any of the [Addon modules](#addon-modules)
+- Deploy all the [Core Modules](#core-modules-) of the distribution
+- Deploy (if needed) any of the [Addon modules](#add-on-modules-)
### Core Modules đĻ
-Core modules provides essential functionality to the distribution.
+Core modules provide essential functionality to the distribution for production-grade clusters.
-
+
-| Module | Included Release | Description |
+| Module | Included Release | Description |
| ------------------------------- | ------------------------------ | ----------------------------------------------------------------------------------------- |
| [Networking][networking-module] | ![Version][networking-version] | Networking functionality via Calico CNI |
| [Ingress][ingress-module] | ![Version][ingress-version] | Fast and reliable Ingress Controller and TLS certificate management |
-| [Logging][logging-module] | ![Version][logging-version] | A centralized logging solution based on the EFK stack (Elastic, Fluentd and Kibana) |
+| [Logging][logging-module] | ![Version][logging-version] | A centralized logging solution based on the EFK stack (Elastic, Fluentd and Kibana) |
| [Monitoring][monitoring-module] | ![Version][monitoring-version] | Monitoring and alerting functionality based on Prometheus, AlertManager and Grafana |
| [Disaster Recovery][dr-module] | ![Version][dr-version] | Backup and disaster recovery solution using Velero |
| [OPA][opa-module] | ![Version][opa-version] | Policy and Governance for your cluster using OPA Gatekeeper and Gatekeeper Policy Manager |
+| [Auth][auth-module] | ![Version][auth-version] | Improved auth for your Kubernetes Cluster and its applications |
### Add-on Modules đĻ
Add-on modules provide additional functionality to the distribution.
-| Module | Latest Release | Description |
+| Module | Latest Release | Description |
| ----------------------------------- | -------------------------------- | ---------------------------------------------------------------------------- |
| [Kong][kong-module] | ![Version][kong-version] | Add Kong API Gateway for Kubernetes applications via Kong Ingress Controller |
| [Service Mesh][service-mesh-module] | ![Version][service-mesh-version] | Deploy a service mesh on top of KFD |
| [Registry][registry-module] | ![Version][registry-version] | Integrate a Container Registry solution |
+| [Storage][storage-module] | ![Version][storage-version] | Rook (Ceph Operator) based Storage solution on Kubernetes |
## Get started with KFD đ
-To get started with KFD, please head to the [documentation site](https://docs.kubernetesfury.com/docs/distribution/#%EF%B8%8F-how-do-i-get-started)
+To get started with KFD, please head to the [quickstart guides on the documentation site](https://docs.kubernetesfury.com/docs/distribution/#%EF%B8%8F-how-do-i-get-started).
## Issues đ
@@ -86,41 +88,46 @@ If the problem is related to a specific module, open the issue in the module rep
If you are looking to run KFD in production and would like to learn more, SIGHUP (the company behind the Fury ecosystem) can help. Feel free to [email us](mailto:sales@sighup.io) or check out [our website](https://sighup.io).
-## Compatibility
+## Support & Compatibility đĒĸ
-| Kubernetes Version | Compatibility | Notes |
-|--------------------|:------------------:|-----------------------------------------------------|
-| `1.22.x` | :white_check_mark: | No known issues |
+Current supported versions of KFD are:
-Check the [compatibility matrix][compatibility-matrix] for additional information about previous releases of the modules.
+| KFD Version | Kubernetes Version |
+| :----------------------------------------------------------------------------: | :----------------: |
+| in development | `1.25.x` |
+| [`1.24.0`](https://github.com/sighupio/fury-distribution/releases/tag/v1.24.0) | `1.24.x` |
+| [`1.23.3`](https://github.com/sighupio/fury-distribution/releases/tag/v1.23.3) | `1.23.x` |
+| [`1.22.1`](https://github.com/sighupio/fury-distribution/releases/tag/v1.22.1) | `1.22.x` |
-Also, check the [versioning documentation file][versioning] to know more about the versioning scheme of the distribution and the upgrade path.
+| Installer / KFD Version | 1.24.0 | 1.23.3 | 1.22.1 |
+| ---------------------------------------------------------------------- | :---------------------------------------------------: | :--------------------------------: | :--------------------------------: |
+| [on-premises](https://github.com/sighupio/fury-kubernetes-on-premises) | :white_check_mark: | :white_check_mark: | :white_check_mark: |
+| [EKS](https://github.com/sighupio/fury-eks-installer) | :white_check_mark: | :white_check_mark: | :white_check_mark: |
+| [GKE](https://github.com/sighupio/fury-gke-installer) | :white_check_mark: | :white_check_mark: | :white_check_mark: |
+| [AKS](https://github.com/sighupio/fury-aks-installer) | :white_check_mark: | :white_check_mark: | :white_check_mark: |
-## Contributing đ¤
+Check the [compatibility matrix][compatibility-matrix] for additional information about previous releases of the Distribution.
-If you wish to contribute please read the [Contributing Guidelines](docs/CONTRIBUTING.md).
+Also, check the [versioning documentation file][versioning] to know more about the versioning scheme of the distribution and the upgrade path.
## CNCF Certified đ
-Kubernetes Fury Distribution has been certified by the [CNCF] *(Cloud Native Computing foundation)* as a *Certified Kubernetes Distribution*. Certified solutions are validated to ensure a set of guarantees as consistency, timely updates and confirmability.
+Kubernetes Fury Distribution has been certified by the [CNCF] (Cloud Native Computing Foundation) as a *Certified Kubernetes Distribution* for all Kubernetes versions since [Kubernetes 1.12](https://github.com/cncf/k8s-conformance/pull/619).
+
+Certified solutions are validated to ensure a set of guarantees such as consistency, timely updates and confirmability.
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
-
+
+## Contributing đ¤
+
+If you wish to contribute please read the [Contributing Guidelines](docs/CONTRIBUTING.md).
+
## License
KFD is open-source software and it's released under the following [LICENSE](LICENSE)
@@ -132,25 +139,30 @@ KFD is open-source software and it's released under the following [LICENSE](LICE
[monitoring-module]: https://github.com/sighupio/fury-kubernetes-monitoring
[dr-module]: https://github.com/sighupio/fury-kubernetes-dr
[opa-module]: https://github.com/sighupio/fury-kubernetes-opa
+[auth-module]: https://github.com/sighupio/fury-kubernetes-auth
+
+[networking-version]: https://img.shields.io/badge/release-v1.10.0-blue
+[ingress-version]: https://img.shields.io/badge/release-v1.13.1-blue
+[logging-version]: https://img.shields.io/badge/release-v3.0.1-blue
+[monitoring-version]: https://img.shields.io/badge/release-v2.0.1-blue
+[dr-version]: https://img.shields.io/badge/release-v1.10.1-blue
+[opa-version]: https://img.shields.io/badge/release-v1.7.3-blue
+[auth-version]: https://img.shields.io/badge/release-v0.0.2-blue
-[networking-version]: https://img.shields.io/badge/release-v1.9.0-blue
-[ingress-version]: https://img.shields.io/badge/release-v1.12.2-blue
-[logging-version]: https://img.shields.io/badge/release-v1.10.3-blue
-[monitoring-version]: https://img.shields.io/badge/release-v1.14.2-blue
-[dr-version]: https://img.shields.io/badge/release-v1.9.2-blue
-[opa-version]: https://img.shields.io/badge/release-v1.6.2-blue
-[compatibility-matrix]: https://github.com/sighupio/fury-distribution/blob/master/docs/COMPATIBILITY_MATRIX.md
-[versioning]: https://github.com/sighupio/fury-distribution/blob/master/docs/VERSIONING.md
+[compatibility-matrix]: https://github.com/sighupio/fury-distribution/blob/main/docs/COMPATIBILITY_MATRIX.md
+[versioning]: https://github.com/sighupio/fury-distribution/blob/main/docs/VERSIONING.md
[kong-module]: https://github.com/sighupio/fury-kubernetes-kong
[service-mesh-module]: https://github.com/sighupio/fury-kubernetes-service-mesh
[registry-module]: https://github.com/sighupio/fury-kubernetes-registry
+[storage-module]: https://github.com/sighupio/fury-kubernetes-storage
[kong-version]: https://img.shields.io/github/v/release/sighupio/fury-kubernetes-kong
[service-mesh-version]: https://img.shields.io/github/v/release/sighupio/fury-kubernetes-service-mesh
[registry-version]: https://img.shields.io/github/v/release/sighupio/fury-kubernetes-registry
+[storage-version]: https://img.shields.io/github/v/release/sighupio/fury-kubernetes-storage
-[sighup-site]: https:sighup.io
+[sighup-site]: https://sighup.io
[CNCF]: https://landscape.cncf.io/card-mode?category=certified-kubernetes-distribution&grouping=category&organization=sighup
diff --git a/docs/COMPATIBILITY_MATRIX.md b/docs/COMPATIBILITY_MATRIX.md
index b5908041..fd0a595f 100644
--- a/docs/COMPATIBILITY_MATRIX.md
+++ b/docs/COMPATIBILITY_MATRIX.md
@@ -1,21 +1,31 @@
# Compatibility Matrix
-| KFD / Kubernetes Version | 1.14.X | 1.15.X | 1.16.X | 1.17.X | 1.18.X | 1.19.X | 1.20.X | 1.21.X | 1.22.X |
-|--------------------------|:------------------:|:------------------:|:------------------:|:------------------:|:------------------:|:------------------:|:------------------:|:------------------:|:------------------:|
-| v1.1.0 | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | | | | |
-| v1.2.0 | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | | | | |
-| v1.3.0 | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | | |
-| v1.4.0 | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | :warning: | | | |
-| v1.5.0 | | | | :warning: | :warning: | :warning: | :warning: | | |
-| v1.5.1 | | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | :warning: | | |
-| v1.6.0 | | | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | :warning: | |
-| v1.7.0 | | | | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | |
-| v1.7.1 | | | | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | |
-| v1.21.0 | | | | | | | :x: | :white_check_mark: | |
-| v1.22.0 | | | | | | | | | :white_check_mark: |
+| KFD / Kubernetes Version | 1.14.X | 1.15.X | 1.16.X | 1.17.X | 1.18.X | 1.19.X | 1.20.X | 1.21.X | 1.22.X | 1.23.X | 1.24.X |
+| ------------------------ | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: | :----------------: |
+| v1.1.0 | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | | | | | | |
+| v1.2.0 | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | | | | | | |
+| v1.3.0 | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | | | | |
+| v1.4.0 | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | :warning: | | | | | |
+| v1.5.0 | | | | :warning: | :warning: | :warning: | :warning: | | | | |
+| v1.5.1 | | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | :warning: | | | | |
+| v1.6.0 | | | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | :warning: | | | |
+| v1.7.0 | | | | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | |
+| v1.7.1 | | | | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | |
+| v1.21.0 | | | | | | | | :white_check_mark: | | | |
+| v1.22.0 | | | | | | | | :white_check_mark: | :white_check_mark: | | |
+| v1.22.1 | | | | | | | | | :white_check_mark: | :white_check_mark: | |
+| v1.23.0 | | | | | | | :x: | :x: | :x: | :warning: | |
+| v1.23.1 | | | | | | | :white_check_mark: | :white_check_mark: | :white_check_mark: | :warning: | |
+| v1.23.2 | | | | | | | | | | :white_check_mark: | |
+| v1.23.3 | | | | | | | | | | :white_check_mark: | :white_check_mark: |
+| v1.24.0 | | | | | | | | | | | :white_check_mark: |
:white_check_mark: Compatible
:warning: Has issues
:x: Incompatible
+
+## Warning
+
+- :x:: version: `v1.23.0` has a known bug breaking upgrades. Please do not use.
diff --git a/docs/VERSIONING.md b/docs/VERSIONING.md
index 4a6b0a05..ce96fc95 100644
--- a/docs/VERSIONING.md
+++ b/docs/VERSIONING.md
@@ -19,3 +19,5 @@ For example:
## Upgrades
Each KFD version will come with comprehensive documentation on all supported upgrade paths (es. tutorials to upgrade minor to minor, including patches if present).
+
+See the [upgrade path](upgrades/UPGRADE_PATH.md) document for more details.
diff --git a/docs/assets/fury-core-modules.png b/docs/assets/fury-core-modules.png
index ad138e54..53237b47 100644
Binary files a/docs/assets/fury-core-modules.png and b/docs/assets/fury-core-modules.png differ
diff --git a/docs/releases/v1.22.0.md b/docs/releases/v1.22.0.md
index 0eafd630..98b5f6e3 100644
--- a/docs/releases/v1.22.0.md
+++ b/docs/releases/v1.22.0.md
@@ -1,8 +1,9 @@
# Kubernetes Fury Distribution Release v1.22.0
-Welcome to the KFD release `v1.22.0`. From this release on, Fury follows a
-different versioning schema. KFD version now will closely follow the version of
-the latest `Kubernetes` release that is supported by Fury. This release supports `kubernetes` runtime `v1.22.x`.
+Welcome to the KFD release `v1.22.0`.
+From this release on, Fury follows a different versioning schema.
+KFD version now will closely follow the version of the latest `Kubernetes` release that is supported by Fury.
+This release supports `kubernetes` runtime `v1.22.x`.
This distribution is maintained with â¤ī¸ by the team [SIGHUP](https://sighup.io/),
and is battle tested in production environments.
@@ -32,21 +33,18 @@ Fury with enhancements and bug fixes.
## Upgrade path
-From this version, we are introducing the new versioning system, see the [versioning documentation file][versioning] to know more about
-the new versioning scheme of the distribution and the upgrade path.
+From this version, we are introducing the new versioning system, see the [versioning documentation file][versioning] to know more about the new versioning scheme of the distribution and the upgrade path.
### Katalog Procedure
-To upgrade the distribution from `v1.21.x` to `v1.22.0`, you need to download this new version, vendor the dependencies,
-finally applying the `kustomize` project.
+To upgrade the distribution from `v1.21.x` to `v1.22.0`, you need to download this new version, vendor the dependencies, finally applying the `kustomize` project.
```bash
furyctl vendor -H
kustomize build . | kubectl apply -f -
```
-> **NOTE**: *The upgrade takes some minutes (depends on the cluster size), and you should expect some downtime during
-the upgrade process.*
+> **NOTE**: *The upgrade takes some minutes (depends on the cluster size), and you should expect some downtime during the upgrade process.*
[versioning]: https://github.com/sighupio/fury-distribution/blob/master/docs/VERSIONING.md
diff --git a/docs/releases/v1.22.1.md b/docs/releases/v1.22.1.md
new file mode 100644
index 00000000..7dea19fc
--- /dev/null
+++ b/docs/releases/v1.22.1.md
@@ -0,0 +1,118 @@
+# Kubernetes Fury Distribution Release v1.22.1
+
+Welcome to KFD release `v1.22.1`.
+
+The distribution is maintained with â¤ī¸ by the team [SIGHUP](https://sighup.io/), and is battle tested in production environments.
+
+This release adds a bunch of new features and improvements to the core modules, adds a new core module `auth` and some package replacement/removals.
+
+## New Features since `v1.22.0`
+
+### Core Module Updates
+
+- [networking](https://github.com/sighupio/fury-kubernetes-networking) đĻ core module: v1.9.0 -> [**v1.10.0**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v1.10.0)
+ - Updated calico from `3.23.2` to `3.24.1`.
+ - Updated ip-masq-agent from `2.5.0` to `2.8.0`.
+ - Added Tigera operator package.
+- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) đĻ core module: v1.14.2 -> [**v2.0.1**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/v2.0.1)
+ - Updated alertmanager from `0.23.0` to `0.24.0`.
+ - Updated grafana from `8.3.3` to `8.5.5`.
+ - Updated kube-rbac-proxy from `0.11.0` to `0.12.0`.
+ - Updated kube-state-metrics from `2.3.0` to `2.5.0`.
+ - Updated prometheus-operator from `0.53.1` to `0.57.0`.
+ - Updated prometheus from `2.32.1` to `2.36.1`.
+ - Updated x509-exporter from `2.12.1` to `3.2.0`.
+ - Removed goldpinger package.
+ - Removed metrics-server package.
+ - Added blackbox-exporter package `0.21.0`.
+ - Added prometheus-adapter package `0.9.1`.
+- [logging](https://github.com/sighupio/fury-kubernetes-logging) đĻ core module: v1.10.3 -> [**v3.0.1**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/v3.0.1)
+ - Removed elasticsearch package.
+ - Removed kibana package.
+ - Removed fluentd package.
+ - Removed curator package.
+ - Added opensearch package `2.0.0`.
+ - Added opensearch-dashboards package `2.0.0`.
+ - Added logging-operator package `3.17.7`.
+ - Added loki-stack as tech preview package `2.4.2`.
+- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) đĻ core module: v1.12.2 -> [**v1.13.1**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/v1.13.1)
+ - Updated cert-manager from `1.6.1` to `1.10.0`.
+ - Updated forecastle from `1.0.75` to `1.0.103`.
+ - Removed nginx-ldap-auth package.
+ - Removed nginx-ovh package.
+ - Removed nginx-gke package.
+ - Removed pomerium package.
+ - Added external-dns package `0.10.2`.
+ - Added aws-cert-manager terraform module.
+ - Added aws-external-dns terraform module.
+- [dr](https://github.com/sighupio/fury-kubernetes-dr) đĻ core module: v1.9.2 -> [**v1.10.1**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/v1.10.1)
+ - Updated velero from `1.7.1` to `1.9.2`.
+ - Updated velero-plugin-for-aws from `1.3.0` to `1.5.1`.
+ - Updated velero-plugin-for-microsoft-azure from `1.3.1` to `1.5.1`.
+ - Updated velero-plugin-for-gcp from `1.3.0` to `1.5.1`.
+ - Updated velero-plugin-for-csi from `0.2.0` to `0.3.1`.
+- [OPA](https://github.com/sighupio/fury-kubernetes-opa) đĻ core module: v1.6.2 -> [**v1.7.3**](https://github.com/sighupio/fury-kubernetes-opa/releases/tag/v1.7.3)
+ - Updated gatekeeper from `3.7.0` to `3.9.2`.
+ - Updated gatekeeper-policy-manager from `0.5.1` to `1.0.2`.
+- [auth](https://github.com/sighupio/fury-kubernetes-auth) đĻ core module: [**v0.0.2**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/v0.0.2)
+ - Added pomerium package `0.15.8`.
+ - Added dex package `2.35.3`.
+ - Added gangway package `3.2.0`.
+
+> Please refer the individual release notes for detailed information.
+
+## Upgrade procedure
+
+Check the [v1.22.0-to-v1.22.1 upgrade guide](../upgrades/v1.22.0-to-v1.22.1.md) for the detailed procedure.
+
+## Breaking changes
+
+Follow a summary of all the breaking changes introduced in this release, check each module for detailed information.
+
+- Monitoring:
+ - Removed goldpinger package, no replacement available.
+ - Replaced metrics-server with prometheus-adapter.
+ - `kubectl --server-side` apply is now required.
+- Logging:
+ - Removed elasticsearch and kibana packages, replaced by opensearch and opensearch-dashboards packages.
+ - Removed self managed fluentd/fluentbit stack in favour of logging-operator.
+- Ingress:
+ - Removed support for annotations on NGINX ingress controller side.
+ - Removed nginx-ldap-auth with no replacement available.
+ - Moved pomerium to the new auth module.
+ - Removed nginx-ovh and nginx-gke packages. The only thing they did was to patch the svc from the NGINX package to type `LoadBalancer`.
+- Disaster Recovery:
+ - Removed deprecated `eks-velero` in favour of `velero-plugin-for-aws`
+
+## New features đ
+
+This release adds new features acrross all the core modules. Following is a list of the most interesting ones for each module.
+
+- Networking
+ - Added Tigera operator package, can be used to manage Calico (instead of installing it directly with calico package) or to enforce Network Policies on EKS-based clusters.
+
+- Monitoring
+ - Added blackbox-exporter package to monitor services external to the cluster.
+
+- Logging
+ - Introduced logging operator to manage all the logging stack and logging configs in a dynamic way.
+ - Added Grafana Loki stack as tech preview.
+ - Swtiched from ElasticSearch to OpenSearch.
+
+- Ingress
+ - Added ExternalDNS package to manage DNS records for services exposed through Ingress.
+ - Added Terraform modules to manage IAM roles on AWS for cert-manager and external-dns.
+ - Added Validating Webhook on NGINX ingress controller to validate the Ingress resources and prevent invalid configurations.
+ - Forecastle now is Fury branded.
+
+- Disaster Recovery
+ - Added a Prometheus alert for when there are no successful backups in the last 24 hours for the included schedules.
+
+- OPA
+ - Major overhaul for Gatekeeper Policy Manager that is now released as a v1.0.2.
+ - A set of custom Prometheus alerts that get triggered when the Gatekeeper webhooks are misbehaving for more than 5 min has been added.
+ - Gatekeeper now has mutating capabilities in addition to just validating.
+
+- Auth
+ - Module was introduced as core module.
+ - Pomerium, Dex and Gangway packages are available.
diff --git a/docs/releases/v1.23.0.md b/docs/releases/v1.23.0.md
new file mode 100644
index 00000000..e668113d
--- /dev/null
+++ b/docs/releases/v1.23.0.md
@@ -0,0 +1,121 @@
+:x: This release contains issues, please use the version v1.23.1 instead :x:
+
+# Kubernetes Fury Distribution Release v1.23.0
+
+Welcome to the KFD release `v1.23.0`. From this release on, Fury follows a
+different versioning schema. KFD version now will closely follow the version of
+the latest `Kubernetes` release that is supported by Fury. So this is the first
+release that supports `kubernetes` runtime `v1.23.0`.
+
+This distribution is maintained with â¤ī¸ by the team [SIGHUP](https://sighup.io/),
+and is battle tested in production environments.
+
+This new release of KFD features a bundle of all the core modules supported by
+Fury with enhancements and bug fixes. Alongside, Fury gets a new face with this
+release. We would like to introduce our brand new
+[documentation site](https://docs.kubernetesfury.com/) with this release.
+
+## New Features
+
+### Core Module Updates
+
+- Kubernetes `1.23` Tech preview on all modules
+- All the core modules have added `kubernetes` labels to easily identify it to
+ be a part of `KFD`
+- Updated documentation for every core module
+- Provision for automated canonical definition generation
+
+- [networking](https://github.com/sighupio/fury-kubernetes-networking) đĻ core module: v1.7.0 -> [**v1.8.0**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v1.8.0)
+ - Update [Calico] from version `3.19.2` to `3.21.3` and improvement to support
+ the new version
+- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) đĻ core module: v1.13.0 -> [**v1.14.0**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/v1.14.0)
+ - Update [Prometheus Operator] from version `0.50.0` to `0.53.1`.
+ - Update [Prometheus] from version `2.29.1` to `2.32.1`.
+ - Update [Grafana] from version `8.1.2` to `8.3.3`.
+ - Update [x509-exporter] from version `2.11.0` to `2.12.1`.
+ - Update [thanos] from version `0.22.0` to `0.24.0`.
+ - Update [node-exporter] from version `1.2.2` to `1.3.1`.
+ - Updates to various prometheus monitoring rules synching with kube-prometheus.
+- [logging](https://github.com/sighupio/fury-kubernetes-logging) đĻ core module: v1.9.1 -> [**v1.10.0**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/v1.10.0)
+ - Update [fluentd] from version `1.14.0` to `1.14.2`.
+ - Update [fluent-bit] from version `1.8.2` to `1.8.10`.
+ - Update [elasticsearch] from version `7.13.3` to `7.16.2`.
+ - Update [kibana] from version `7.13.3` to `7.16.2`.
+ - Improve kibana index pattern injection via sidecar, remove readinessProbe
+ - several bug fixes on fluentd
+- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) đĻ core module: v1.11.2 -> [**v1.12.0**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/v1.12.1)
+ - Update cert-manager CRDs as per upstream, dropping support for versions `< v1`
+ - Update [forecastle] from version `1.0.66` to `1.0.73`.
+ - Update [nginx] ingress controller from version `1.0.0` to `1.1.0`.
+ - Update [cert-manager] from version `1.5.3` to `1.6.1`.
+- [dr](https://github.com/sighupio/fury-kubernetes-dr) đĻ core module: v1.8.0 -> [**v1.9.0**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/v1.9.0)
+ - Deprecation to `eks-velero` module in favor of `aws-velero`
+ - Update [Velero] from version `1.6.3` to `1.7.1`.
+ - Upgrade velero-plugin-for-aws from `1.2.1` to `1.3.0`
+ - Upgrade velero-plugin-for-microsoft-azure from `1.2.1` to `1.3.1`
+ - Upgrade velero-plugin-for-gcp from `1.2.1` to `1.3.0`
+ - Upgrade velero-plugin-for-csi from `0.1.2` to `0.2.0`
+- [OPA](https://github.com/sighupio/fury-kubernetes-opa) đĻ core module: v1.5.0 -> [**v1.6.0**](https://github.com/sighupio/fury-kubernetes-opa/releases/tag/v1.6.0)
+ - Add optional policy to protect accidental namespace deletion
+ - Update [Gatekeeper] from version `v3.6.0` to `v3.7.0`.
+ - Update [Gatekeeper Policy Manager]. Version `v0.5.1`.
+
+> Please refer the individual release notes for detailed information
+
+## Upgrade path
+
+### Katalog Procedure
+
+To upgrade this distribution from `v1.7.x` to `v1.23.0`, you need to download this new version, vendor the dependencies,
+finally applying the `kustomize` project.
+
+```bash
+furyctl vendor -H
+kustomize build . | kubectl apply -f -
+```
+
+> **NOTE**: *The upgrade takes some minutes (depends on the cluster size), and you should expect some downtime during
+the upgrade process.*
+
+### Terraform Procedure
+
+## Test it
+
+If you want to test the distribution in a test environment, spin up a
+[`kind`](https://github.com/kubernetes-sigs/kind/releases/tag/v0.11.0) cluster, then deploy all rendered manifests.
+
+```bash
+$ kind version
+kind v0.11.0 go1.16.4 darwin/amd64
+$ curl -Ls https://github.com/sighupio/fury-distribution/releases/download/v1.23.0/katalog/tests/config/kind-config | kind create cluster --image registry.sighup.io/fury/kindest/node:v1.23.0 --config -
+Creating cluster "kind" ...
+ â Ensuring node image (kindest/node:v1.20.1) đŧ
+ â Preparing nodes đĻ đĻ
+ â Writing configuration đ
+ â Starting control-plane đšī¸
+ â Installing StorageClass đž
+ â Joining worker nodes đ
+Set kubectl context to "kind-kind"
+You can now use your cluster with:
+
+kubectl cluster-info --context kind-kind
+
+Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community đ
+$ kubectl apply -f https://github.com/sighupio/fury-distribution/releases/download/v1.23.0/fury-distribution-v1.23.0.yml
+namespace/cert-manager created
+namespace/gatekeeper-system created
+namespace/ingress-nginx created
+namespace/logging created
+namespace/monitoring created
+customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
+customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
+customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
+customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created
+customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created
+customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created
+customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
+
+```
+
+> **NOTE**: *Run `kubectl apply` multiple times until you see no errors in the console*
diff --git a/docs/releases/v1.23.1.md b/docs/releases/v1.23.1.md
new file mode 100644
index 00000000..b9c863a1
--- /dev/null
+++ b/docs/releases/v1.23.1.md
@@ -0,0 +1,99 @@
+# Kubernetes Fury Distribution Release v1.23.1
+
+Welcome to the KFD release `v1.23.1`. This is a patch release
+fixing bugs in all the core modules.
+
+The team has been working to make the release upgrade as simple as possible, so read carefully the upgrade path of each
+core module listed below along with the upgrade path of the distribution.
+
+â ī¸ If upgrading from `v1.23.0`, you **must** delete all the objects (StatefulSet, Deployment, DaemonSet, etc) as specified in the release notes of the modules
+**before** upgrading to `v1.23.1`.
+
+This distribution is maintained with â¤ī¸ by the team [SIGHUP](https://sighup.io/),
+and is battle tested in production environments.
+
+## New Features
+
+### Core Module Updates
+
+- Removed `commonLabels` from all the `kustomize` katalogs
+
+- [networking](https://github.com/sighupio/fury-kubernetes-networking) đĻ core module: v1.8.0 -> [**v1.8.2**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v1.8.2)
+ - No updates on the components of the module
+ - `commonLabels` bugfix
+- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) đĻ core module: v1.14.0 -> [**v1.14.1**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/v1.14.1)
+ - No updates on the components of the module
+ - `commonLabels` bugfix
+- [logging](https://github.com/sighupio/fury-kubernetes-logging) đĻ core module: v1.10.0 -> [**v1.10.2**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/v1.10.2)
+ - No updates on the components of the module
+ - `commonLabels` bugfix
+- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) đĻ core module: v1.12.0 -> [**v1.12.2**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/v1.12.2)
+ - Update [forecastle] from version `1.0.73` to `1.0.75`.
+ - `commonLabels` bugfix
+- [dr](https://github.com/sighupio/fury-kubernetes-dr) đĻ core module: v1.9.0 -> [**v1.9.2**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/v1.9.2)
+ - No updates on the components of the module
+ - `commonLabels` bugfix
+- [OPA](https://github.com/sighupio/fury-kubernetes-opa) đĻ core module: v1.6.0 -> [**v1.6.2**](https://github.com/sighupio/fury-kubernetes-opa/releases/tag/v1.6.2)
+ - Fixed an issue present only in `v1.6.0` with a missing volume mount that broke the audit process (policy enforcement was unaffected)
+ - `commonLabels` bugfix
+
+> Please refer the individual release notes for detailed information
+
+## Upgrade path
+
+### Katalog Procedure
+To upgrade the distribution from `v1.23.0` to `v1.23.1` please follow the instructions written in the release notes of each core module.
+
+To upgrade this distribution from `v1.7.x` to `v1.23.1`, you need to download this new version, vendor the dependencies,
+finally applying the `kustomize` project.
+
+```bash
+furyctl vendor -H
+kustomize build . | kubectl apply -f -
+```
+
+> **NOTE**: *The upgrade takes some minutes (depends on the cluster size), and you should expect some downtime during
+the upgrade process.*
+
+### Terraform Procedure
+
+## Test it
+
+If you want to test the distribution in a test environment, spin up a
+[`kind`](https://github.com/kubernetes-sigs/kind/releases/tag/v0.11.0) cluster, then deploy all rendered manifests.
+
+```bash
+$ kind version
+kind v0.11.0 go1.16.4 darwin/amd64
+$ curl -Ls https://github.com/sighupio/fury-distribution/releases/download/v1.23.1/katalog/tests/config/kind-config | kind create cluster --image registry.sighup.io/fury/kindest/node:v1.23.1 --config -
+Creating cluster "kind" ...
+ â Ensuring node image (kindest/node:v1.20.1) đŧ
+ â Preparing nodes đĻ đĻ
+ â Writing configuration đ
+ â Starting control-plane đšī¸
+ â Installing StorageClass đž
+ â Joining worker nodes đ
+Set kubectl context to "kind-kind"
+You can now use your cluster with:
+
+kubectl cluster-info --context kind-kind
+
+Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community đ
+$ kubectl apply -f https://github.com/sighupio/fury-distribution/releases/download/v1.23.1/fury-distribution-v1.23.1.yml
+namespace/cert-manager created
+namespace/gatekeeper-system created
+namespace/ingress-nginx created
+namespace/logging created
+namespace/monitoring created
+customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
+customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
+customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
+customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created
+customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created
+customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created
+customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
+
+```
+
+> **NOTE**: *Run `kubectl apply` multiple times until you see no errors in the console*
diff --git a/docs/releases/v1.23.2.md b/docs/releases/v1.23.2.md
new file mode 100644
index 00000000..be311d52
--- /dev/null
+++ b/docs/releases/v1.23.2.md
@@ -0,0 +1,45 @@
+# Kubernetes Fury Distribution Release v1.23.2
+
+Welcome to the KFD release `v1.23.2`. This is a patch release fixing bugs in all the core modules.
+
+The team has been working to make the release upgrade as simple as possible, so read carefully the upgrade path of each core module listed below along with the upgrade path of the distribution.
+
+This distribution is maintained with â¤ī¸ by the team [SIGHUP](https://sighup.io/), and is battle tested in production environments.
+
+## New Features since `v1.23.1`
+
+### Core Module Updates
+
+- [networking](https://github.com/sighupio/fury-kubernetes-networking) đĻ core module: v1.8.2 -> [**v1.9.0**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v1.9.0)
+ - Update [Calico] from version `3.21.3` to `3.23.2`.
+- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) đĻ core module: v1.14.1 -> [**v1.14.2**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/v1.14.2)
+ - Bugfix on [x509-exporter]
+- [logging](https://github.com/sighupio/fury-kubernetes-logging) đĻ core module: v1.10.2 -> [**v1.10.3**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/v1.10.3)
+ - Update [fluent-bit] from version `1.8.10` to `1.9.5`.
+- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) đĻ core module: v1.12.2 -> [**v1.12.2**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/v1.12.2)
+ - No updates
+- [dr](https://github.com/sighupio/fury-kubernetes-dr) đĻ core module: v1.9.2 -> [**v1.9.2**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/v1.9.2)
+ - No updates
+- [OPA](https://github.com/sighupio/fury-kubernetes-opa) đĻ core module: v1.6.2 -> [**v1.6.2**](https://github.com/sighupio/fury-kubernetes-opa/releases/tag/v1.6.2)
+ - No updates
+
+> Please refer the individual release notes for detailed information
+
+## Upgrade path
+
+From this version, we are introducing the new versioning system, see the [versioning documentation file][versioning] to know more about the new versioning scheme of the distribution and the upgrade path.
+
+### Katalog Procedure
+
+To upgrade the distribution from `v1.22.x` to `v1.23.2`, you need to download this new version, vendor the dependencies, finally applying the `kustomize` project.
+
+```bash
+furyctl vendor -H
+kustomize build . | kubectl apply -f -
+```
+
+> **NOTE**: *The upgrade takes some minutes (depends on the cluster size), and you should expect some downtime during the upgrade process.*
+
+
+[versioning]: https://github.com/sighupio/fury-distribution/blob/master/docs/VERSIONING.md
+
diff --git a/docs/releases/v1.23.3.md b/docs/releases/v1.23.3.md
new file mode 100644
index 00000000..84d668a8
--- /dev/null
+++ b/docs/releases/v1.23.3.md
@@ -0,0 +1,118 @@
+# Kubernetes Fury Distribution Release v1.23.3
+
+Welcome to KFD release `v1.23.3`.
+
+The distribution is maintained with â¤ī¸ by the team [SIGHUP](https://sighup.io/), and is battle tested in production environments.
+
+This release adds a bunch of new features and improvements to the core modules, adds a new core module `auth` and some package replacement/removals.
+
+## New Features since `v1.23.2`
+
+### Core Module Updates
+
+- [networking](https://github.com/sighupio/fury-kubernetes-networking) đĻ core module: v1.9.0 -> [**v1.10.0**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v1.10.0)
+ - Updated calico from `3.23.2` to `3.24.1`.
+ - Updated ip-masq-agent from `2.5.0` to `2.8.0`.
+ - Added Tigera operator package.
+- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) đĻ core module: v1.14.2 -> [**v2.0.1**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/v2.0.1)
+ - Updated alertmanager from `0.23.0` to `0.24.0`.
+ - Updated grafana from `8.3.3` to `8.5.5`.
+ - Updated kube-rbac-proxy from `0.11.0` to `0.12.0`.
+ - Updated kube-state-metrics from `2.3.0` to `2.5.0`.
+ - Updated prometheus-operator from `0.53.1` to `0.57.0`.
+ - Updated prometheus from `2.32.1` to `2.36.1`.
+ - Updated x509-exporter from `2.12.1` to `3.2.0`.
+ - Removed goldpinger package.
+ - Removed metrics-server package.
+ - Added blackbox-exporter package `0.21.0`.
+ - Added prometheus-adapter package `0.9.1`.
+- [logging](https://github.com/sighupio/fury-kubernetes-logging) đĻ core module: v1.10.3 -> [**v3.0.1**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/v3.0.1)
+ - Removed elasticsearch package.
+ - Removed kibana package.
+ - Removed fluentd package.
+ - Removed curator package.
+ - Added opensearch package `2.0.0`.
+ - Added opensearch-dashboards package `2.0.0`.
+ - Added logging-operator package `3.17.7`.
+ - Added loki-stack as tech preview package `2.4.2`.
+- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) đĻ core module: v1.12.2 -> [**v1.13.1**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/v1.13.1)
+ - Updated cert-manager from `1.6.1` to `1.10.0`.
+ - Updated forecastle from `1.0.75` to `1.0.103`.
+ - Removed nginx-ldap-auth package.
+ - Removed nginx-ovh package.
+ - Removed nginx-gke package.
+ - Removed pomerium package.
+ - Added external-dns package `0.10.2`.
+ - Added aws-cert-manager terraform module.
+ - Added aws-external-dns terraform module.
+- [dr](https://github.com/sighupio/fury-kubernetes-dr) đĻ core module: v1.9.2 -> [**v1.10.1**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/v1.10.1)
+ - Updated velero from `1.7.1` to `1.9.2`.
+ - Updated velero-plugin-for-aws from `1.3.0` to `1.5.1`.
+ - Updated velero-plugin-for-microsoft-azure from `1.3.1` to `1.5.1`.
+ - Updated velero-plugin-for-gcp from `1.3.0` to `1.5.1`.
+ - Updated velero-plugin-for-csi from `0.2.0` to `0.3.1`.
+- [OPA](https://github.com/sighupio/fury-kubernetes-opa) đĻ core module: v1.6.2 -> [**v1.7.3**](https://github.com/sighupio/fury-kubernetes-opa/releases/tag/v1.7.3)
+ - Updated gatekeeper from `3.7.0` to `3.9.2`.
+ - Updated gatekeeper-policy-manager from `0.5.1` to `1.0.2`.
+- [auth](https://github.com/sighupio/fury-kubernetes-auth) đĻ core module: [**v0.0.2**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/v0.0.2)
+ - Added pomerium package `0.15.8`.
+ - Added dex package `2.35.3`.
+ - Added gangway package `3.2.0`.
+
+> Please refer the individual release notes for detailed information.
+
+## Upgrade procedure
+
+Check the [v1.23.2-to-v1.23.3 upgrade guide](../upgrades/v1.23.2-to-v1.23.3.md) for the detailed procedure.
+
+## Breaking changes
+
+Follow a summary of all the breaking changes introduced in this release, check each module for detailed information.
+
+- Monitoring:
+ - Removed goldpinger package, no replacement available.
+ - Replaced metrics-server with prometheus-adapter.
+ - `kubectl --server-side` apply is now required.
+- Logging:
+ - Removed elasticsearch and kibana packages, replaced by opensearch and opensearch-dashboards packages.
+ - Removed self managed fluentd/fluentbit stack in favour of logging-operator.
+- Ingress:
+ - Removed support for annotations on NGINX ingress controller side.
+ - Removed nginx-ldap-auth with no replacement available.
+ - Moved pomerium to the new auth module.
+ - Removed nginx-ovh and nginx-gke packages. The only thing they did was to patch the svc from the NGINX package to type `LoadBalancer`.
+- Disaster Recovery:
+ - Removed deprecated `eks-velero` in favour of `velero-plugin-for-aws`
+
+## New features đ
+
+This release adds new features acrross all the core modules. Following is a list of the most interesting ones for each module.
+
+- Networking
+ - Added Tigera operator package, can be used to manage Calico (instead of installing it directly with calico package) or to enforce Network Policies on EKS-based clusters.
+
+- Monitoring
+ - Added blackbox-exporter package to monitor services external to the cluster.
+
+- Logging
+ - Introduced logging operator to manage all the logging stack and logging configs in a dynamic way.
+ - Added Grafana Loki stack as tech preview.
+ - Swtiched from ElasticSearch to OpenSearch.
+
+- Ingress
+ - Added ExternalDNS package to manage DNS records for services exposed through Ingress.
+ - Added Terraform modules to manage IAM roles on AWS for cert-manager and external-dns.
+ - Added Validating Webhook on NGINX ingress controller to validate the Ingress resources and prevent invalid configurations.
+ - Forecastle now is Fury branded.
+
+- Disaster Recovery
+ - Added a Prometheus alert for when there are no successful backups in the last 24 hours for the included schedules.
+
+- OPA
+ - Major overhaul for Gatekeeper Policy Manager that is now released as a v1.0.0.
+ - A set of custom Prometheus alerts that get triggered when the Gatekeeper webhooks are misbehaving for more than 5 min has been added.
+ - Gatekeeper now has mutating capabilities in addition to just validating.
+
+- Auth
+ - Module was introduced as core module.
+ - Pomerium, Dex and Gangway packages are available.
diff --git a/docs/releases/v1.24.0.md b/docs/releases/v1.24.0.md
new file mode 100644
index 00000000..a2acff5f
--- /dev/null
+++ b/docs/releases/v1.24.0.md
@@ -0,0 +1,25 @@
+# Kubernetes Fury Distribution Release v1.24.0
+
+Welcome to KFD release `v1.24.0`. This release supports Kubernetes runtime `v1.24.x`.
+
+The distribution is maintained with â¤ī¸ by the team [SIGHUP](https://sighup.io/), and is battle tested in production environments.
+
+This release adds a bunch of new features and improvements to the core modules, adds a new core module `auth` and some package replacement/removals.
+
+## New Features since `v1.23.3`
+
+### Core Module Updates
+
+There are no updates from v1.23.3.
+
+## Upgrade procedure
+
+Check the [v1.23.3-to-v1.24.0 upgrade guide](../upgrades/v1.23.3-to-v1.24.0.md) for the detailed procedure.
+
+## Breaking changes
+
+There are no breaking changes when upgrading from v1.23.3.
+
+## New features đ
+
+No new features since v1.23.3.
diff --git a/docs/upgrades/UPGRADE_PATH.md b/docs/upgrades/UPGRADE_PATH.md
new file mode 100644
index 00000000..f9f8249d
--- /dev/null
+++ b/docs/upgrades/UPGRADE_PATH.md
@@ -0,0 +1,37 @@
+# Kubernetes Fury Distribution Upgrade Path
+
+In this document you will find information on what path to follow to upgrade the Kubernetes Fury Distribution (KFD) between versions.
+
+> đĄ refer to [the versioning document](../VERSIONING.md) to learn more on the versioning policy for KFD and the [compatibility matrix](../COMPATIBILITY_MATRIX.md) for KFD vs Kubernetes versions support.
+
+Next is the current supported and recommended upgrade path for most recent versions of KFD (from newest to oldest).
+
+Once you have identified the path you want to follow, refer to the [relevant upgrade guide](./) for the required versions or to the release notes in case the upgrade guide is missing (for older versions of KFD).
+
+## From 1.23.x to 1.24.0
+
+1.23.3 -> 1.24.0
+
+1.23.2 -> 1.23.3 -> 1.24.0
+
+1.23.1 -> 1.23.2 -> 1.23.3 -> 1.24.0
+
+## From 1.22.x to 1.24.0
+
+1.22.1 -> 1.23.3 -> 1.24.0
+
+1.22.0 -> 1.22.1 -> 1.23.3 -> 1.24.0
+
+## From 1.21.x to 1.24.0
+
+1.21.0 -> 1.22.0 -> 1.22.1 -> 1.23.3 -> 1.24.0
+
+## From 1.7.x to 1.24.0
+
+1.7.1 -> 1.21.0 -> 1.22.0 -> 1.22.1 -> 1.23.3 -> 1.24.0
+
+1.7.0 -> 1.7.1 -> 1.21.0 -> 1.22.0 -> 1.22.1 -> 1.23.3 -> 1.24.0
+
+## From 1.6.x to 1.24.0
+
+1.6.0 -> 1.7.0 -> 1.7.1 -> 1.21.0 -> 1.22.0 -> 1.22.1 -> 1.23.3 -> 1.24.0
diff --git a/docs/upgrades/v1.22.0-to-v1.22.1.md b/docs/upgrades/v1.22.0-to-v1.22.1.md
new file mode 100644
index 00000000..db7cb25a
--- /dev/null
+++ b/docs/upgrades/v1.22.0-to-v1.22.1.md
@@ -0,0 +1,391 @@
+# Kubernetes Fury Distribution v1.22.0 to 1.22.1 Upgrade Guide
+
+This guide describes the steps to follow to upgrade the Kubernetes Fury Distribution (KFD) from v1.22.0 to v1.22.1
+
+If you are running a custom set of modules, or different versions than the ones included with each release of KFD, please refer to each module's release notes.
+
+Notice that the guide will not cover changes related to the cloud provider, ingresses or pod placement changes. Only changes related to KFD and its modules.
+
+> âšī¸ **INFO**
+> starting from 1.22.1, 1.23.3 and 1.24.0, due to the size of some resources, you will need to use the `--server-side` flag when performing `kubectl apply`. Server side apply behaves slighly different than client-side, please read [the official documentation first](https://kubernetes.io/docs/reference/using-api/server-side-apply).
+
+> âī¸ **IMPORTANT**
+> we strongly recommend reading the whole guide before starting the upgrade process to identify possible blockers.
+
+> â ī¸ **WARNING**
+> the upgrade process involves downtime of some components.
+
+## Upgrade procedure
+
+As a high-level overview, the upgrade procedure consists on:
+
+1. Upgrading KFD (all the core modules).
+2. Upgrading the Kubernetes cluster itself.
+
+### 1. Upgrade KFD
+
+The suggested approach to upgrade the distribution is to do it one module at a time, to reduce the risk of errors and to make the process more manageable.
+
+#### Networking module upgrade
+
+To upgrade the Networking module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+ networking: v1.10.0
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+Apply your Kustomize project that uses Networking module packages as bases with:
+
+> âī¸ **IMPORTANT** you may want to limit the scope of the command to only the networking module, otherwise, the first time you apply with `--server-side` other pods may also be restarted.
+>
+> The same applies to the rest of the modules, we will not include this warning in every step for simplicity.
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+Wait until all Calico pods are restarted and running. You can check Calico's Grafana dashboard "General / Felix Dashboard (Calico)" and the "Networking / *" dashboards to make sure everything is working as expected.
+
+#### Monitoring module upgrade
+
+> â ī¸ **WARNING** downtime for the Monitoring stack is expected during this process.
+
+To upgrade the Monitoring module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+...
+ monitoring: v2.0.1
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+This time, before applying the project, you need to do some manual steps on the existing resources:
+
+Since the new release ships changes to some immutable fields, the upgrade process will involve the deletion and recreation of some resources.
+
+```bash
+# Prometheus Operator
+kubectl delete deployments.apps prometheus-operator -n monitoring
+
+# Prometheus Operated
+kubectl delete poddisruptionbudgets.policy prometheus-k8s -n monitoring
+kubectl delete clusterrolebinding.rbac.authorization.k8s.io prometheus-k8s-scrape
+kubectl delete clusterroles.rbac.authorization.k8s.io prometheus-k8s-scrape
+kubectl delete prometheusrules.monitoring.coreos.com prometheus-k8s-rules -n monitoring
+
+# Alertmanager Operated
+kubectl delete poddisruptionbudget.policy alertmanager-main -n monitoring
+
+# Remove Goldpinger (deprecated)
+kubectl delete servicemonitor.monitoring.coreos.com goldpinger -n monitoring
+kubectl delete service goldpinger -n monitoring
+kubectl delete daemonset.apps goldpinger -n monitoring
+kubectl delete clusterrole.rbac.authorization.k8s.io goldpinger
+kubectl delete serviceaccount goldpinger -n monitoring
+kubectl delete rolebinding.rbac.authorization.k8s.io goldpinger:cluster:view -n monitoring
+kubectl delete -n monitoring configmaps goldpinger-grafana-dashboard
+
+# Grafana
+kubectl delete deployments.apps grafana -n monitoring
+
+# Kube Proxy Metrics
+kubectl delete deployments.apps kube-state-metrics -n monitoring
+
+# Remove Metrics Server (deprecated)
+kubectl delete apiservice.apiregistration.k8s.io v1beta1.metrics.k8s.io
+kubectl delete service metrics-server -n kube-system
+kubectl delete deployment.apps metrics-server -n kube-system
+kubectl delete clusterrolebinding.rbac.authorization.k8s.io metrics-server:system:auth-delegator
+kubectl delete clusterrolebinding.rbac.authorization.k8s.io system:metrics-server
+kubectl delete clusterrole.rbac.authorization.k8s.io system:aggregated-metrics-reader
+kubectl delete clusterrole.rbac.authorization.k8s.io system:metrics-server
+kubectl delete rolebinding.rbac.authorization.k8s.io metrics-server-auth-reader -n kube-system
+kubectl delete serviceaccount metrics-server -n kube-system
+kubectl delete certificate.cert-manager.io metrics-server-tls -n kube-system
+kubectl delete certificate.cert-manager.io metrics-server-ca -n kube-system
+kubectl delete issuer.cert-manager.io metrics-server-ca -n kube-system
+kubectl delete issuer.cert-manager.io metrics-server-selfsign -n kube-system
+kubectl delete secret metrics-server-ca metrics-server-tls -n kube-system
+# Node Exporter
+kubectl delete daemonsets.apps node-exporter -n monitoring
+
+# x509 Exporter
+kubectl delete serviceaccount x509-certificate-exporter-node -n monitoring
+kubectl delete clusterrole.rbac.authorization.k8s.io x509-certificate-exporter-node
+kubectl delete clusterrolebinding.rbac.authorization.k8s.io x509-certificate-exporter-node
+kubectl delete daemonset.apps x509-certificate-exporter-nodes -n monitoring
+```
+
+Replace `metrics-server` with `prometheus-adapter` package as a base in your project, to replace the functionalities provided by `metrics-server`.
+
+Delete `goldpinger` from your Kustomize resources.
+
+Add `blackbox-exporter` to your Kustomize base.
+
+Alertmanager configuration now expects 3 new secrets `infra-slack-webhook`, `k8s-slack-webhook` and `healthchecks-webhook` in the `monitoring` namespace with the endpoints where to send the alerts in the `url` key. We recommend you add them to your Kustomize base.
+
+Example commands to create the secrets:
+
+```shell
+$ kubectl create secret generic infra-slack-webhook -n monitoring --from-literal url=""
+secret/infra-slack-webhook created
+
+$ kubectl create secret generic healthchecks-webhook -n monitoring --from-literal url=""
+secret/healthchecks-webhook created
+
+$ kubectl create secret generic k8s-slack-webhook -n monitoring --from-literal url=""
+secret/k8s-slack-webhook created
+```
+
+Then apply your Kustomize project that uses Monitoring module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+Wait a minute and check that you can see metrics in Grafana, both old and new, check that all Prometheus Targets are up and that Alertmanager is working as expected.
+
+#### Logging module upgrade
+
+> âšī¸ **INFO** the Logging module has gone under a big refactor, the ElasticSearch stack has been replaced with OpenSearch. Read carefully the instructions.
+
+> â ī¸ **WARNING** downtime of the Logging stack is expected during this process.
+
+To upgrade the Logging module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+...
+ logging: v3.0.1
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+Since this upgrade changes the major version, there are some manual steps involving breaking changes that you need to do before applying the project:
+
+Remove the old `fluentd` and `fluentbit` stack:
+
+```bash
+kubectl delete ds fluentbit -n logging
+kubectl delete sts fluentd -n logging
+```
+
+Remove `fluentd`, `elasticsearch-single` (or `elasticsearch-triple`), `kibana` and `curator` from your Kustomize project and replace them with the `logging-operator`, `logging-operated`, `opensearch-single` or `opensearch-triple`, `opensearch-dashboards`, `configs` bases on your Kustomize project.
+
+Apply your Kustomize project that uses Logging module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+> đĄ **TIP** you may need to apply twice or thrice because of the new CRDs need some time to be available.
+
+> âšī¸ **INFO** index patterns may take a while to be created in OpenSearch Dashboards. There's a cronjob that runs every hour that creates them.
+
+All the logs will now flow to the new OpenSearch stack.
+
+> đĄ **TIP** don't forget create the ingress for OpenSearch Dashbaords (Kibana replacement).
+>
+> By default the service is called `opensearch-dashboards` in the `monitoring` namespace, and the web interface listens on the port `5601`.
+
+You can leave the old Elasticsearch/Kibana stack running and remove it after you've verified that everything is working as expected and you don't need the data stored in ElasticSearch anymore. To do so, run the following commands:
+
+```bash
+kubectl delete statefulset elasticsearch -n logging
+kubectl delete service elasticsearch -n logging
+kubectl delete prometheusrule es-rules -n logging
+kubectl delete servicemonitor elasticsearch -n logging
+kubectl delete deployment kibana -n logging
+kubectl delete service kibana -n logging
+kubectl delete cronjob curator -n logging
+```
+
+> âšī¸ **INFO** you may need to delete additional resources created in your Kustomize base, `Ingress` objects for example.
+
+> đĄ **TIP** we recommend leaving the ElasticSearch/Kibana stack up for a breif period (like 30 days) and then proceed to delete it.
+>
+> Beware that you'll need the necessary resources to have both solutions running simultaneously though.
+
+#### Ingress module upgrade
+
+> â ī¸ **WARNING** some downtime of the NGINX Ingress Controller is expected during the upgrade process.
+
+To upgrade the Ingress module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+...
+ ingress: v1.13.1
+...
+```
+
+> đĄ **TIP1** `external-dns` [is now part if the Ingress module](https://github.com/sighupio/fury-kubernetes-ingress/tree/main/katalog/external-dns), you may want to switch to it if you were already using it.
+
+> đĄ **TIP2** if you are on AWS, we have added [2 new modules](https://github.com/sighupio/fury-kubernetes-ingress/tree/main/modules) to the Ingress modules to manage IAM permissions for cert-manager and external-dns.
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+> âī¸ **IMPORTANT** if you are using the **dual** NGINX Ingress Controller package, make sure that all your ingresses have the `.spec.ingressClass` field set and that they **don't have** the `kubernetes.io/ingress.class` annotation before proceeding.
+
+cert-manager has been bumped several versions, please check the upgrade guides in the official documentation. In particular, the update from v1.7 to v1.8 includes some changes to the `spec.privateKey.rotationPolicy` field, read carefully if you were using it or you had the `--feature-gates=ServerSideApply=true` flag in the cert-manager controller.
+
+Here you can find the relevant upgrade docs:
+
+-
+-
+-
+-
+
+Apply your Kustomize project that uses Ingress module packages as bases with:
+
+```bash
+# For NGINX Ingress Controller SINGLE
+kubectl delete ingressclass nginx -n ingress-nginx
+# For NGINX Ingress Controller DUAL
+kubectl delete ingressclass external internal -n ingress-nginx
+# Delete cert-manager deployments to update labels
+kubectl delete -n cert-manager deployments.apps cert-manager cert-manager-webhook cert-manager-cainjector
+# finally
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+> âšī¸ **INFO** you may need to apply twice or thrice because a new Validating webhook is added with this release and it needs some time to come up.
+
+#### Disaster Recovery module upgrade
+
+To upgrade the Disaster Recovery module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+...
+ dr: v1.10.0
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+Apply your Kustomize project that uses Ingress module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+> âšī¸ **INFO** `velero-eks` has been deprecated, please use the new `aws-velero` terraform module instead in case you haven't migrated yet.
+
+Check that all velero's pods are up and running, you may want to manually trigger a backup to test that everything is working as expected. For example:
+
+```shell
+# create a backup
+velero backup create --from-schedule manifests test-upgrade -n kube-system
+# ... wait a moment
+# check that Phase is completed
+velero get backup -n kube-system test-upgrade
+# you may want to see some details
+velero backup describe test-upgrade -n kube-system
+```
+
+> đĄ **TIP** you can use a port-forward minio'UI and login to check that the backups are there.
+
+#### OPA module upgrade
+
+To upgrade the OPA module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+> â ī¸ **WARNING** the `http.send` OPA built-in is disabled. Check if there are custom rules using the built-in before proceeding. [Read here for more details](https://open-policy-agent.github.io/gatekeeper/website/docs/externaldata#motivation).
+
+```yaml
+versions:
+...
+ opa: v1.7.3
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+Apply your Kustomize project that uses OPA module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+You can try to deploy a pod that is not compliant with the rules deployed in the cluster and also check in Gatekeeper Policy Manager for new violations of the constraints.
+
+> âšī¸ **INFO** seeing errors like `http: TLS handshake error from 172.16.0.3:42672: EOF` in Gatekeeper Controller logs is normal. The error is considered harmless. See [Gatekeeper's issue #2142 for reference](https://github.com/open-policy-agent/gatekeeper/issues/2142).
+
+#### Auth module upgrade
+
+The Auth module is a new addition to KFD, there is no previous version to upgrade from, but, you could have been using Pomerium, Dex and Gangway which were previously included in the Ingress and on-premises modules respectively.
+
+> âšī¸ **INFO** Pomerium's version has not changed and Dex has been updated for compatibility with Kubernetes 1.24.x, there are no breaking changes.
+
+If you were using these components, adjust your Kustomize project to use the new `auth` module as a base:
+
+```yaml
+versions:
+...
+ auth: v0.0.2
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+> đĄ **TIP** be sure to enable the `customHTMLTemplatesDir: /custom-templates` config option in Gangway's configuration to use the Fury branded templates.
+> See the [example configuration file](https://github.com/sighupio/fury-kubernetes-auth/blob/33ac4818232a155ee3920cfabf1b3eb2a9720e7f/katalog/gangway/example/gangway.yml#L73).
+
+Apply your Kustomize project that uses Auth module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+đ **CONGRATULATIONS** you have now successfully updated all the core modules to KFD 1.23.3
+
+### 2. Upgrade Kubernetes
+
+Being that the underlying Kubernetes cluster could have been created in several different ways, the upgrade of Kubernetes itself is considered out of the scope of this guide.
+
+Please refer to the corresponding documentation for upgrade instructions.
+
+For clusters created with Furyctl:
+
+- [EKS Installer](https://github.com/sighupio/fury-eks-installer)
+- [GKE Installer](https://github.com/sighupio/fury-gke-installer)
+- [AKS Installer](https://github.com/sighupio/fury-aks-installer)
+
+For clusters created with Fury on-premises:
+
+- [KFD on-premises](https://github.com/sighupio/fury-kubernetes-on-premises/tree/main/examples/playbooks#upgrade-cluster)
diff --git a/docs/upgrades/v1.22.1-to-v1.23.3.md b/docs/upgrades/v1.22.1-to-v1.23.3.md
new file mode 100644
index 00000000..274e0c22
--- /dev/null
+++ b/docs/upgrades/v1.22.1-to-v1.23.3.md
@@ -0,0 +1,36 @@
+# Kubernetes Fury Distribution v1.22.1 to 1.23.3 Upgrade Guide
+
+This guide describes the steps to follow to upgrade the Kubernetes Fury Distribution (KFD) from v1.22.1 to v1.23.3.
+
+If you are running a custom set of modules, or different versions than the ones included with each release of KFD, please refer to each module's release notes.
+
+Notice that the guide will not cover changes related to the cloud provider, ingresses or pod placement changes. Only changes related to KFD and its modules.
+
+## Upgrade procedure
+
+As a high-level overview, the upgrade procedure consists on:
+
+1. Upgrading KFD (all the core modules).
+2. Upgrading the Kubernetes cluster itself.
+
+### 1. Upgrade KFD
+
+KFD v1.22.1 and v1.23.3 run the same version of modules and the modules are compatible with both versions of Kubernetes (`1.23.x` and `1.24.y`).
+
+Upgrade is straightforward, proceed to upgrade Kubernetes.
+
+### 2. Upgrade Kubernetes
+
+Being that the underlying Kubernetes cluster could have been created in several different ways, the upgrade of Kubernetes itself is considered out of the scope of this guide.
+
+Please refer to the corresponding documentation for upgrade instructions.
+
+For clusters created with Furyctl:
+
+- [EKS Installer](https://github.com/sighupio/fury-eks-installer)
+- [GKE Installer](https://github.com/sighupio/fury-gke-installer)
+- [AKS Installer](https://github.com/sighupio/fury-aks-installer)
+
+For clusters created with Fury on-premises:
+
+- [KFD on-premises](https://github.com/sighupio/fury-kubernetes-on-premises/tree/main/examples/playbooks#upgrade-cluster)
diff --git a/docs/upgrades/v1.23.2-to-v1.23.3.md b/docs/upgrades/v1.23.2-to-v1.23.3.md
new file mode 100644
index 00000000..d6e6dd55
--- /dev/null
+++ b/docs/upgrades/v1.23.2-to-v1.23.3.md
@@ -0,0 +1,391 @@
+# Kubernetes Fury Distribution v1.23.2 to 1.23.3 Upgrade Guide
+
+This guide describes the steps to follow to upgrade the Kubernetes Fury Distribution (KFD) from v1.23.2 to v1.23.3.
+
+If you are running a custom set of modules, or different versions than the ones included with each release of KFD, please refer to each module's release notes.
+
+Notice that the guide will not cover changes related to the cloud provider, ingresses or pod placement changes. Only changes related to KFD and its modules.
+
+> âšī¸ **INFO**
+> starting from 1.22.1, 1.23.3 and 1.24.0, due to the size of some resources, you will need to use the `--server-side` flag when performing `kubectl apply`. Server side apply behaves slighly different than client-side, please read [the official documentation first](https://kubernetes.io/docs/reference/using-api/server-side-apply).
+
+> âī¸ **IMPORTANT**
+> we strongly recommend reading the whole guide before starting the upgrade process to identify possible blockers.
+
+> â ī¸ **WARNING**
+> the upgrade process involves downtime of some components.
+
+## Upgrade procedure
+
+As a high-level overview, the upgrade procedure consists on:
+
+1. Upgrading KFD (all the core modules).
+2. Upgrading the Kubernetes cluster itself.
+
+### 1. Upgrade KFD
+
+The suggested approach to upgrade the distribution is to do it one module at a time, to reduce the risk of errors and to make the process more manageable.
+
+#### Networking module upgrade
+
+To upgrade the Networking module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+ networking: v1.10.0
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+Apply your Kustomize project that uses Networking module packages as bases with:
+
+> âī¸ **IMPORTANT** you may want to limit the scope of the command to only the networking module, otherwise, the first time you apply with `--server-side` other pods may also be restarted.
+>
+> The same applies to the rest of the modules, we will not include this warning in every step for simplicity.
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+Wait until all Calico pods are restarted and running. You can check Calico's Grafana dashboard "General / Felix Dashboard (Calico)" and the "Networking / *" dashboards to make sure everything is working as expected.
+
+#### Monitoring module upgrade
+
+> â ī¸ **WARNING** downtime for the Monitoring stack is expected during this process.
+
+To upgrade the Monitoring module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+...
+ monitoring: v2.0.1
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+This time, before applying the project, you need to do some manual steps on the existing resources:
+
+Since the new release ships changes to some immutable fields, the upgrade process will involve the deletion and recreation of some resources.
+
+```bash
+# Prometheus Operator
+kubectl delete deployments.apps prometheus-operator -n monitoring
+
+# Prometheus Operated
+kubectl delete poddisruptionbudgets.policy prometheus-k8s -n monitoring
+kubectl delete clusterrolebinding.rbac.authorization.k8s.io prometheus-k8s-scrape
+kubectl delete clusterroles.rbac.authorization.k8s.io prometheus-k8s-scrape
+kubectl delete prometheusrules.monitoring.coreos.com prometheus-k8s-rules -n monitoring
+
+# Alertmanager Operated
+kubectl delete poddisruptionbudget.policy alertmanager-main -n monitoring
+
+# Remove Goldpinger (deprecated)
+kubectl delete servicemonitor.monitoring.coreos.com goldpinger -n monitoring
+kubectl delete service goldpinger -n monitoring
+kubectl delete daemonset.apps goldpinger -n monitoring
+kubectl delete clusterrole.rbac.authorization.k8s.io goldpinger
+kubectl delete serviceaccount goldpinger -n monitoring
+kubectl delete rolebinding.rbac.authorization.k8s.io goldpinger:cluster:view -n monitoring
+kubectl delete -n monitoring configmaps goldpinger-grafana-dashboard
+
+# Grafana
+kubectl delete deployments.apps grafana -n monitoring
+
+# Kube Proxy Metrics
+kubectl delete deployments.apps kube-state-metrics -n monitoring
+
+# Remove Metrics Server (deprecated)
+kubectl delete apiservice.apiregistration.k8s.io v1beta1.metrics.k8s.io
+kubectl delete service metrics-server -n kube-system
+kubectl delete deployment.apps metrics-server -n kube-system
+kubectl delete clusterrolebinding.rbac.authorization.k8s.io metrics-server:system:auth-delegator
+kubectl delete clusterrolebinding.rbac.authorization.k8s.io system:metrics-server
+kubectl delete clusterrole.rbac.authorization.k8s.io system:aggregated-metrics-reader
+kubectl delete clusterrole.rbac.authorization.k8s.io system:metrics-server
+kubectl delete rolebinding.rbac.authorization.k8s.io metrics-server-auth-reader -n kube-system
+kubectl delete serviceaccount metrics-server -n kube-system
+kubectl delete certificate.cert-manager.io metrics-server-tls -n kube-system
+kubectl delete certificate.cert-manager.io metrics-server-ca -n kube-system
+kubectl delete issuer.cert-manager.io metrics-server-ca -n kube-system
+kubectl delete issuer.cert-manager.io metrics-server-selfsign -n kube-system
+kubectl delete secret metrics-server-ca metrics-server-tls -n kube-system
+# Node Exporter
+kubectl delete daemonsets.apps node-exporter -n monitoring
+
+# x509 Exporter
+kubectl delete serviceaccount x509-certificate-exporter-node -n monitoring
+kubectl delete clusterrole.rbac.authorization.k8s.io x509-certificate-exporter-node
+kubectl delete clusterrolebinding.rbac.authorization.k8s.io x509-certificate-exporter-node
+kubectl delete daemonset.apps x509-certificate-exporter-nodes -n monitoring
+```
+
+Replace `metrics-server` with `prometheus-adapter` package as a base in your project, to replace the functionalities provided by `metrics-server`.
+
+Delete `goldpinger` from your Kustomize resources.
+
+Add `blackbox-exporter` to your Kustomize base.
+
+Alertmanager configuration now expects 3 new secrets `infra-slack-webhook`, `k8s-slack-webhook` and `healthchecks-webhook` in the `monitoring` namespace with the endpoints where to send the alerts in the `url` key. We recommend you add them to your Kustomize base.
+
+Example commands to create the secrets:
+
+```shell
+$ kubectl create secret generic infra-slack-webhook -n monitoring --from-literal url=""
+secret/infra-slack-webhook created
+
+$ kubectl create secret generic healthchecks-webhook -n monitoring --from-literal url=""
+secret/healthchecks-webhook created
+
+$ kubectl create secret generic k8s-slack-webhook -n monitoring --from-literal url=""
+secret/k8s-slack-webhook created
+```
+
+Then apply your Kustomize project that uses Monitoring module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+Wait a minute and check that you can see metrics in Grafana, both old and new, check that all Prometheus Targets are up and that Alertmanager is working as expected.
+
+#### Logging module upgrade
+
+> âšī¸ **INFO** the Logging module has gone under a big refactor, the ElasticSearch stack has been replaced with OpenSearch. Read carefully the instructions.
+
+> â ī¸ **WARNING** downtime of the Logging stack is expected during this process.
+
+To upgrade the Logging module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+...
+ logging: v3.0.1
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+Since this upgrade changes the major version, there are some manual steps involving breaking changes that you need to do before applying the project:
+
+Remove the old `fluentd` and `fluentbit` stack:
+
+```bash
+kubectl delete ds fluentbit -n logging
+kubectl delete sts fluentd -n logging
+```
+
+Remove `fluentd`, `elasticsearch-single` (or `elasticsearch-triple`), `kibana` and `curator` from your Kustomize project and replace them with the `logging-operator`, `logging-operated`, `opensearch-single` or `opensearch-triple`, `opensearch-dashboards`, `configs` bases on your Kustomize project.
+
+Apply your Kustomize project that uses Logging module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+> đĄ **TIP** you may need to apply twice or thrice because of the new CRDs need some time to be available.
+
+> âšī¸ **INFO** index patterns may take a while to be created in OpenSearch Dashboards. There's a cronjob that runs every hour that creates them.
+
+All the logs will now flow to the new OpenSearch stack.
+
+> đĄ **TIP** don't forget create the ingress for OpenSearch Dashbaords (Kibana replacement).
+>
+> By default the service is called `opensearch-dashboards` in the `monitoring` namespace, and the web interface listens on the port `5601`.
+
+You can leave the old Elasticsearch/Kibana stack running and remove it after you've verified that everything is working as expected and you don't need the data stored in ElasticSearch anymore. To do so, run the following commands:
+
+```bash
+kubectl delete statefulset elasticsearch -n logging
+kubectl delete service elasticsearch -n logging
+kubectl delete prometheusrule es-rules -n logging
+kubectl delete servicemonitor elasticsearch -n logging
+kubectl delete deployment kibana -n logging
+kubectl delete service kibana -n logging
+kubectl delete cronjob curator -n logging
+```
+
+> âšī¸ **INFO** you may need to delete additional resources created in your Kustomize base, `Ingress` objects for example.
+
+> đĄ **TIP** we recommend leaving the ElasticSearch/Kibana stack up for a breif period (like 30 days) and then proceed to delete it.
+>
+> Beware that you'll need the necessary resources to have both solutions running simultaneously though.
+
+#### Ingress module upgrade
+
+> â ī¸ **WARNING** some downtime of the NGINX Ingress Controller is expected during the upgrade process.
+
+To upgrade the Ingress module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+...
+ ingress: v1.13.1
+...
+```
+
+> đĄ **TIP1** `external-dns` [is now part if the Ingress module](https://github.com/sighupio/fury-kubernetes-ingress/tree/main/katalog/external-dns), you may want to switch to it if you were already using it.
+
+> đĄ **TIP2** if you are on AWS, we have added [2 new modules](https://github.com/sighupio/fury-kubernetes-ingress/tree/main/modules) to the Ingress modules to manage IAM permissions for cert-manager and external-dns.
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+> âī¸ **IMPORTANT** if you are using the **dual** NGINX Ingress Controller package, make sure that all your ingresses have the `.spec.ingressClass` field set and that they **don't have** the `kubernetes.io/ingress.class` annotation before proceeding.
+
+cert-manager has been bumped several versions, please check the upgrade guides in the official documentation. In particular, the update from v1.7 to v1.8 includes some changes to the `spec.privateKey.rotationPolicy` field, read carefully if you were using it or you had the `--feature-gates=ServerSideApply=true` flag in the cert-manager controller.
+
+Here you can find the relevant upgrade docs:
+
+-
+-
+-
+-
+
+Apply your Kustomize project that uses Ingress module packages as bases with:
+
+```bash
+# For NGINX Ingress Controller SINGLE
+kubectl delete ingressclass nginx -n ingress-nginx
+# For NGINX Ingress Controller DUAL
+kubectl delete ingressclass external internal -n ingress-nginx
+# Delete cert-manager deployments to update labels
+kubectl delete -n cert-manager deployments.apps cert-manager cert-manager-webhook cert-manager-cainjector
+# finally
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+> âšī¸ **INFO** you may need to apply twice or thrice because a new Validating webhook is added with this release and it needs some time to come up.
+
+#### Disaster Recovery module upgrade
+
+To upgrade the Disaster Recovery module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+```yaml
+versions:
+...
+ dr: v1.10.0
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+Apply your Kustomize project that uses Ingress module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+> âšī¸ **INFO** `velero-eks` has been deprecated, please use the new `aws-velero` terraform module instead in case you haven't migrated yet.
+
+Check that all velero's pods are up and running, you may want to manually trigger a backup to test that everything is working as expected. For example:
+
+```shell
+# create a backup
+velero backup create --from-schedule manifests test-upgrade -n kube-system
+# ... wait a moment
+# check that Phase is completed
+velero get backup -n kube-system test-upgrade
+# you may want to see some details
+velero backup describe test-upgrade -n kube-system
+```
+
+> đĄ **TIP** you can use a port-forward minio'UI and login to check that the backups are there.
+
+#### OPA module upgrade
+
+To upgrade the OPA module to the new version, update the version on the `Furyfile.yml` file to the new version:
+
+> â ī¸ **WARNING** the `http.send` OPA built-in is disabled. Check if there are custom rules using the built-in before proceeding. [Read here for more details](https://open-policy-agent.github.io/gatekeeper/website/docs/externaldata#motivation).
+
+```yaml
+versions:
+...
+ opa: v1.7.3
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+Apply your Kustomize project that uses OPA module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+You can try to deploy a pod that is not compliant with the rules deployed in the cluster and also check in Gatekeeper Policy Manager for new violations of the constraints.
+
+> âšī¸ **INFO** seeing errors like `http: TLS handshake error from 172.16.0.3:42672: EOF` in Gatekeeper Controller logs is normal. The error is considered harmless. See [Gatekeeper's issue #2142 for reference](https://github.com/open-policy-agent/gatekeeper/issues/2142).
+
+#### Auth module upgrade
+
+The Auth module is a new addition to KFD, there is no previous version to upgrade from, but, you could have been using Pomerium, Dex and Gangway which were previously included in the Ingress and on-premises modules respectively.
+
+> âšī¸ **INFO** Pomerium's version has not changed and Dex has been updated for compatibility with Kubernetes 1.24.x, there are no breaking changes.
+
+If you were using these components, adjust your Kustomize project to use the new `auth` module as a base:
+
+```yaml
+versions:
+...
+ auth: v0.0.2
+...
+```
+
+Then, download the new modules with `furyctl` with the following command:
+
+```bash
+furyctl vendor -H
+```
+
+> đĄ **TIP** be sure to enable the `customHTMLTemplatesDir: /custom-templates` config option in Gangway's configuration to use the Fury branded templates.
+> See the [example configuration file](https://github.com/sighupio/fury-kubernetes-auth/blob/33ac4818232a155ee3920cfabf1b3eb2a9720e7f/katalog/gangway/example/gangway.yml#L73).
+
+Apply your Kustomize project that uses Auth module packages as bases with:
+
+```bash
+kustomize build | kubectl apply -f - --server-side --force-conflicts
+```
+
+đ **CONGRATULATIONS** you have now successfully updated all the core modules to KFD 1.23.3
+
+### 2. Upgrade Kubernetes
+
+Being that the underlying Kubernetes cluster could have been created in several different ways, the upgrade of Kubernetes itself is considered out of the scope of this guide.
+
+Please refer to the corresponding documentation for upgrade instructions.
+
+For clusters created with Furyctl:
+
+- [EKS Installer](https://github.com/sighupio/fury-eks-installer)
+- [GKE Installer](https://github.com/sighupio/fury-gke-installer)
+- [AKS Installer](https://github.com/sighupio/fury-aks-installer)
+
+For clusters created with Fury on-premises:
+
+- [KFD on-premises](https://github.com/sighupio/fury-kubernetes-on-premises/tree/main/examples/playbooks#upgrade-cluster)
diff --git a/docs/upgrades/v1.23.3-to-v1.24.0.md b/docs/upgrades/v1.23.3-to-v1.24.0.md
new file mode 100644
index 00000000..ec0d6395
--- /dev/null
+++ b/docs/upgrades/v1.23.3-to-v1.24.0.md
@@ -0,0 +1,36 @@
+# Kubernetes Fury Distribution v1.23.3 to 1.24.0 Upgrade Guide
+
+This guide describes the steps to follow to upgrade the Kubernetes Fury Distribution (KFD) from v1.23.3 to v1.24.0.
+
+If you are running a custom set of modules, or different versions than the ones included with each release of KFD, please refer to each module's release notes.
+
+Notice that the guide will not cover changes related to the cloud provider, ingresses or pod placement changes. Only changes related to KFD and its modules.
+
+## Upgrade procedure
+
+As a high-level overview, the upgrade procedure consists on:
+
+1. Upgrading KFD (all the core modules).
+2. Upgrading the Kubernetes cluster itself.
+
+### 1. Upgrade KFD
+
+KFD v1.23.3 and v1.24.0 run the same version of modules and the modules are compatible with both versions of Kubernetes (`1.23.x` and `1.24.y`).
+
+Upgrade is straightforward, proceed to upgrade Kubernetes.
+
+### 2. Upgrade Kubernetes
+
+Being that the underlying Kubernetes cluster could have been created in several different ways, the upgrade of Kubernetes itself is considered out of the scope of this guide.
+
+Please refer to the corresponding documentation for upgrade instructions.
+
+For clusters created with Furyctl:
+
+- [EKS Installer](https://github.com/sighupio/fury-eks-installer)
+- [GKE Installer](https://github.com/sighupio/fury-gke-installer)
+- [AKS Installer](https://github.com/sighupio/fury-aks-installer)
+
+For clusters created with Fury on-premises:
+
+- [KFD on-premises](https://github.com/sighupio/fury-kubernetes-on-premises/tree/main/examples/playbooks#upgrade-cluster)
diff --git a/kustomization.yaml b/kustomization.yaml
index c6529ba9..4ccb22f8 100644
--- a/kustomization.yaml
+++ b/kustomization.yaml
@@ -6,6 +6,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
+# NB: This is a starting point for a kustomization.yaml file. It is not meant to be used in production as is.
+
resources:
# Networking
- ./vendor/katalog/networking/calico
@@ -17,19 +19,19 @@ resources:
- ./vendor/katalog/monitoring/prometheus-operator
- ./vendor/katalog/monitoring/prometheus-operated
- ./vendor/katalog/monitoring/grafana
- - ./vendor/katalog/monitoring/goldpinger
- ./vendor/katalog/monitoring/kubeadm-sm
- ./vendor/katalog/monitoring/kube-proxy-metrics
- ./vendor/katalog/monitoring/kube-state-metrics
- ./vendor/katalog/monitoring/node-exporter
- - ./vendor/katalog/monitoring/metrics-server
+ - ./vendor/katalog/monitoring/prometheus-adapter
- ./vendor/katalog/monitoring/alertmanager-operated
# Logging
- - ./vendor/katalog/logging/elasticsearch-single
+ - ./vendor/katalog/logging/opensearch-single
+ - ./vendor/katalog/logging/opensearch-dashboards
- ./vendor/katalog/logging/cerebro
- - ./vendor/katalog/logging/curator
- - ./vendor/katalog/logging/fluentd
- - ./vendor/katalog/logging/kibana
+ - ./vendor/katalog/logging/logging-operator
+ - ./vendor/katalog/logging/logging-operated
+ - ./vendor/katalog/logging/configs
# Ingress
- ./vendor/katalog/ingress/cert-manager
- ./vendor/katalog/ingress/nginx
diff --git a/releases/v1.22.1/Furyfile.yml b/releases/v1.22.1/Furyfile.yml
new file mode 100644
index 00000000..7cea38b0
--- /dev/null
+++ b/releases/v1.22.1/Furyfile.yml
@@ -0,0 +1,25 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+versions:
+ networking: v1.10.0
+ monitoring: v2.0.1
+ logging: v3.0.1
+ ingress: v1.13.1
+ dr: v1.10.1
+ opa: v1.7.3
+ auth: v0.0.2
+
+bases:
+ - name: networking
+ - name: monitoring
+ - name: logging
+ - name: ingress
+ - name: dr
+ - name: opa
+ - name: auth
+
+modules:
+ - name: ingress
+ - name: dr
diff --git a/releases/v1.22.1/kustomization.yaml b/releases/v1.22.1/kustomization.yaml
new file mode 100644
index 00000000..4ccb22f8
--- /dev/null
+++ b/releases/v1.22.1/kustomization.yaml
@@ -0,0 +1,42 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+# NB: This is a starting point for a kustomization.yaml file. It is not meant to be used in production as is.
+
+resources:
+ # Networking
+ - ./vendor/katalog/networking/calico
+ # OPA
+ - ./vendor/katalog/opa/gatekeeper/core
+ - ./vendor/katalog/opa/gatekeeper/rules/templates
+ - ./vendor/katalog/opa/gatekeeper/gpm
+ # Monitoring
+ - ./vendor/katalog/monitoring/prometheus-operator
+ - ./vendor/katalog/monitoring/prometheus-operated
+ - ./vendor/katalog/monitoring/grafana
+ - ./vendor/katalog/monitoring/kubeadm-sm
+ - ./vendor/katalog/monitoring/kube-proxy-metrics
+ - ./vendor/katalog/monitoring/kube-state-metrics
+ - ./vendor/katalog/monitoring/node-exporter
+ - ./vendor/katalog/monitoring/prometheus-adapter
+ - ./vendor/katalog/monitoring/alertmanager-operated
+ # Logging
+ - ./vendor/katalog/logging/opensearch-single
+ - ./vendor/katalog/logging/opensearch-dashboards
+ - ./vendor/katalog/logging/cerebro
+ - ./vendor/katalog/logging/logging-operator
+ - ./vendor/katalog/logging/logging-operated
+ - ./vendor/katalog/logging/configs
+ # Ingress
+ - ./vendor/katalog/ingress/cert-manager
+ - ./vendor/katalog/ingress/nginx
+ - ./vendor/katalog/ingress/forecastle
+ # DR
+ - ./vendor/katalog/dr/velero/velero-on-prem
+ - ./vendor/katalog/dr/velero/velero-schedules
+ - ./vendor/katalog/dr/velero/velero-restic
diff --git a/releases/v1.23.0/Furyfile.yml b/releases/v1.23.0/Furyfile.yml
new file mode 100644
index 00000000..055f9a22
--- /dev/null
+++ b/releases/v1.23.0/Furyfile.yml
@@ -0,0 +1,35 @@
+# Copyright (c) 2020 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+versions:
+ networking: v1.8.1
+ monitoring: v1.14.0
+ logging: v1.10.1
+ ingress: v1.12.1
+ dr: v1.9.1
+ opa: v1.6.1
+
+bases:
+ - name: networking/calico
+ - name: monitoring/prometheus-operator
+ - name: monitoring/prometheus-operated
+ - name: monitoring/grafana
+ - name: monitoring/goldpinger
+ - name: monitoring/configs
+ - name: monitoring/kubeadm-sm
+ - name: monitoring/kube-proxy-metrics
+ - name: monitoring/kube-state-metrics
+ - name: monitoring/node-exporter
+ - name: monitoring/metrics-server
+ - name: monitoring/alertmanager-operated
+ - name: logging/elasticsearch-single
+ - name: logging/cerebro
+ - name: logging/curator
+ - name: logging/fluentd
+ - name: logging/kibana
+ - name: ingress/cert-manager
+ - name: ingress/nginx
+ - name: ingress/forecastle
+ - name: dr/velero
+ - name: opa/gatekeeper
diff --git a/releases/v1.23.0/kustomization.yaml b/releases/v1.23.0/kustomization.yaml
new file mode 100644
index 00000000..d921e785
--- /dev/null
+++ b/releases/v1.23.0/kustomization.yaml
@@ -0,0 +1,36 @@
+# Copyright (c) 2020 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+bases:
+ # Networking
+ - ./vendor/katalog/networking/calico
+ # OPA
+ - ./vendor/katalog/opa/gatekeeper/core
+ - ./vendor/katalog/opa/gatekeeper/rules/templates
+ - ./vendor/katalog/opa/gatekeeper/gpm
+ # Monitoring
+ - ./vendor/katalog/monitoring/prometheus-operator
+ - ./vendor/katalog/monitoring/prometheus-operated
+ - ./vendor/katalog/monitoring/grafana
+ - ./vendor/katalog/monitoring/goldpinger
+ - ./vendor/katalog/monitoring/kubeadm-sm
+ - ./vendor/katalog/monitoring/kube-proxy-metrics
+ - ./vendor/katalog/monitoring/kube-state-metrics
+ - ./vendor/katalog/monitoring/node-exporter
+ - ./vendor/katalog/monitoring/metrics-server
+ - ./vendor/katalog/monitoring/alertmanager-operated
+ # Logging
+ - ./vendor/katalog/logging/elasticsearch-single
+ - ./vendor/katalog/logging/cerebro
+ - ./vendor/katalog/logging/curator
+ - ./vendor/katalog/logging/fluentd
+ - ./vendor/katalog/logging/kibana
+ # Ingress
+ - ./vendor/katalog/ingress/cert-manager
+ - ./vendor/katalog/ingress/nginx
+ - ./vendor/katalog/ingress/forecastle
+ # DR
+ - ./vendor/katalog/dr/velero/velero-on-prem
+ - ./vendor/katalog/dr/velero/velero-schedules
+ - ./vendor/katalog/dr/velero/velero-restic
diff --git a/releases/v1.23.1/Furyfile.yml b/releases/v1.23.1/Furyfile.yml
new file mode 100644
index 00000000..e3ee4fa8
--- /dev/null
+++ b/releases/v1.23.1/Furyfile.yml
@@ -0,0 +1,35 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+versions:
+ networking: v1.8.2
+ monitoring: v1.14.1
+ logging: v1.10.2
+ ingress: v1.12.2
+ dr: v1.9.2
+ opa: v1.6.2
+
+bases:
+ - name: networking/calico
+ - name: monitoring/prometheus-operator
+ - name: monitoring/prometheus-operated
+ - name: monitoring/grafana
+ - name: monitoring/goldpinger
+ - name: monitoring/configs
+ - name: monitoring/kubeadm-sm
+ - name: monitoring/kube-proxy-metrics
+ - name: monitoring/kube-state-metrics
+ - name: monitoring/node-exporter
+ - name: monitoring/metrics-server
+ - name: monitoring/alertmanager-operated
+ - name: logging/elasticsearch-single
+ - name: logging/cerebro
+ - name: logging/curator
+ - name: logging/fluentd
+ - name: logging/kibana
+ - name: ingress/cert-manager
+ - name: ingress/nginx
+ - name: ingress/forecastle
+ - name: dr/velero
+ - name: opa/gatekeeper
diff --git a/releases/v1.23.1/kustomization.yaml b/releases/v1.23.1/kustomization.yaml
new file mode 100644
index 00000000..c6529ba9
--- /dev/null
+++ b/releases/v1.23.1/kustomization.yaml
@@ -0,0 +1,40 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ # Networking
+ - ./vendor/katalog/networking/calico
+ # OPA
+ - ./vendor/katalog/opa/gatekeeper/core
+ - ./vendor/katalog/opa/gatekeeper/rules/templates
+ - ./vendor/katalog/opa/gatekeeper/gpm
+ # Monitoring
+ - ./vendor/katalog/monitoring/prometheus-operator
+ - ./vendor/katalog/monitoring/prometheus-operated
+ - ./vendor/katalog/monitoring/grafana
+ - ./vendor/katalog/monitoring/goldpinger
+ - ./vendor/katalog/monitoring/kubeadm-sm
+ - ./vendor/katalog/monitoring/kube-proxy-metrics
+ - ./vendor/katalog/monitoring/kube-state-metrics
+ - ./vendor/katalog/monitoring/node-exporter
+ - ./vendor/katalog/monitoring/metrics-server
+ - ./vendor/katalog/monitoring/alertmanager-operated
+ # Logging
+ - ./vendor/katalog/logging/elasticsearch-single
+ - ./vendor/katalog/logging/cerebro
+ - ./vendor/katalog/logging/curator
+ - ./vendor/katalog/logging/fluentd
+ - ./vendor/katalog/logging/kibana
+ # Ingress
+ - ./vendor/katalog/ingress/cert-manager
+ - ./vendor/katalog/ingress/nginx
+ - ./vendor/katalog/ingress/forecastle
+ # DR
+ - ./vendor/katalog/dr/velero/velero-on-prem
+ - ./vendor/katalog/dr/velero/velero-schedules
+ - ./vendor/katalog/dr/velero/velero-restic
diff --git a/releases/v1.23.2/Furyfile.yml b/releases/v1.23.2/Furyfile.yml
new file mode 100644
index 00000000..781e98a7
--- /dev/null
+++ b/releases/v1.23.2/Furyfile.yml
@@ -0,0 +1,19 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+versions:
+ networking: v1.9.0
+ monitoring: v1.14.2
+ logging: v1.10.3
+ ingress: v1.12.2
+ dr: v1.9.2
+ opa: v1.6.2
+
+bases:
+ - name: networking
+ - name: monitoring
+ - name: logging
+ - name: ingress
+ - name: dr
+ - name: opa
diff --git a/releases/v1.23.2/kustomization.yaml b/releases/v1.23.2/kustomization.yaml
new file mode 100644
index 00000000..c6529ba9
--- /dev/null
+++ b/releases/v1.23.2/kustomization.yaml
@@ -0,0 +1,40 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ # Networking
+ - ./vendor/katalog/networking/calico
+ # OPA
+ - ./vendor/katalog/opa/gatekeeper/core
+ - ./vendor/katalog/opa/gatekeeper/rules/templates
+ - ./vendor/katalog/opa/gatekeeper/gpm
+ # Monitoring
+ - ./vendor/katalog/monitoring/prometheus-operator
+ - ./vendor/katalog/monitoring/prometheus-operated
+ - ./vendor/katalog/monitoring/grafana
+ - ./vendor/katalog/monitoring/goldpinger
+ - ./vendor/katalog/monitoring/kubeadm-sm
+ - ./vendor/katalog/monitoring/kube-proxy-metrics
+ - ./vendor/katalog/monitoring/kube-state-metrics
+ - ./vendor/katalog/monitoring/node-exporter
+ - ./vendor/katalog/monitoring/metrics-server
+ - ./vendor/katalog/monitoring/alertmanager-operated
+ # Logging
+ - ./vendor/katalog/logging/elasticsearch-single
+ - ./vendor/katalog/logging/cerebro
+ - ./vendor/katalog/logging/curator
+ - ./vendor/katalog/logging/fluentd
+ - ./vendor/katalog/logging/kibana
+ # Ingress
+ - ./vendor/katalog/ingress/cert-manager
+ - ./vendor/katalog/ingress/nginx
+ - ./vendor/katalog/ingress/forecastle
+ # DR
+ - ./vendor/katalog/dr/velero/velero-on-prem
+ - ./vendor/katalog/dr/velero/velero-schedules
+ - ./vendor/katalog/dr/velero/velero-restic
diff --git a/releases/v1.23.3/Furyfile.yml b/releases/v1.23.3/Furyfile.yml
new file mode 100644
index 00000000..7cea38b0
--- /dev/null
+++ b/releases/v1.23.3/Furyfile.yml
@@ -0,0 +1,25 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+versions:
+ networking: v1.10.0
+ monitoring: v2.0.1
+ logging: v3.0.1
+ ingress: v1.13.1
+ dr: v1.10.1
+ opa: v1.7.3
+ auth: v0.0.2
+
+bases:
+ - name: networking
+ - name: monitoring
+ - name: logging
+ - name: ingress
+ - name: dr
+ - name: opa
+ - name: auth
+
+modules:
+ - name: ingress
+ - name: dr
diff --git a/releases/v1.23.3/kustomization.yaml b/releases/v1.23.3/kustomization.yaml
new file mode 100644
index 00000000..4ccb22f8
--- /dev/null
+++ b/releases/v1.23.3/kustomization.yaml
@@ -0,0 +1,42 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+# NB: This is a starting point for a kustomization.yaml file. It is not meant to be used in production as is.
+
+resources:
+ # Networking
+ - ./vendor/katalog/networking/calico
+ # OPA
+ - ./vendor/katalog/opa/gatekeeper/core
+ - ./vendor/katalog/opa/gatekeeper/rules/templates
+ - ./vendor/katalog/opa/gatekeeper/gpm
+ # Monitoring
+ - ./vendor/katalog/monitoring/prometheus-operator
+ - ./vendor/katalog/monitoring/prometheus-operated
+ - ./vendor/katalog/monitoring/grafana
+ - ./vendor/katalog/monitoring/kubeadm-sm
+ - ./vendor/katalog/monitoring/kube-proxy-metrics
+ - ./vendor/katalog/monitoring/kube-state-metrics
+ - ./vendor/katalog/monitoring/node-exporter
+ - ./vendor/katalog/monitoring/prometheus-adapter
+ - ./vendor/katalog/monitoring/alertmanager-operated
+ # Logging
+ - ./vendor/katalog/logging/opensearch-single
+ - ./vendor/katalog/logging/opensearch-dashboards
+ - ./vendor/katalog/logging/cerebro
+ - ./vendor/katalog/logging/logging-operator
+ - ./vendor/katalog/logging/logging-operated
+ - ./vendor/katalog/logging/configs
+ # Ingress
+ - ./vendor/katalog/ingress/cert-manager
+ - ./vendor/katalog/ingress/nginx
+ - ./vendor/katalog/ingress/forecastle
+ # DR
+ - ./vendor/katalog/dr/velero/velero-on-prem
+ - ./vendor/katalog/dr/velero/velero-schedules
+ - ./vendor/katalog/dr/velero/velero-restic
diff --git a/releases/v1.24.0/Furyfile.yml b/releases/v1.24.0/Furyfile.yml
new file mode 100644
index 00000000..7cea38b0
--- /dev/null
+++ b/releases/v1.24.0/Furyfile.yml
@@ -0,0 +1,25 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+versions:
+ networking: v1.10.0
+ monitoring: v2.0.1
+ logging: v3.0.1
+ ingress: v1.13.1
+ dr: v1.10.1
+ opa: v1.7.3
+ auth: v0.0.2
+
+bases:
+ - name: networking
+ - name: monitoring
+ - name: logging
+ - name: ingress
+ - name: dr
+ - name: opa
+ - name: auth
+
+modules:
+ - name: ingress
+ - name: dr
diff --git a/releases/v1.24.0/kustomization.yaml b/releases/v1.24.0/kustomization.yaml
new file mode 100644
index 00000000..f7927393
--- /dev/null
+++ b/releases/v1.24.0/kustomization.yaml
@@ -0,0 +1,43 @@
+# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+# NB: This is a starting point for a kustomization.yaml file. It is not meant to be used in production as is.
+
+resources:
+ # Networking
+ - ./vendor/katalog/networking/calico
+ # OPA
+ - ./vendor/katalog/opa/gatekeeper/core
+ - ./vendor/katalog/opa/gatekeeper/rules/templates
+ - ./vendor/katalog/opa/gatekeeper/gpm
+ # Monitoring
+ - ./vendor/katalog/monitoring/prometheus-operator
+ - ./vendor/katalog/monitoring/prometheus-operated
+ - ./vendor/katalog/monitoring/grafana
+ - ./vendor/katalog/monitoring/kubeadm-sm
+ - ./vendor/katalog/monitoring/kube-proxy-metrics
+ - ./vendor/katalog/monitoring/kube-state-metrics
+ - ./vendor/katalog/monitoring/node-exporter
+ - ./vendor/katalog/monitoring/prometheus-adapter
+ - ./vendor/katalog/monitoring/alertmanager-operated
+ - ./vendor/katalog/monitoring/blackbox-exporter
+ # Logging
+ - ./vendor/katalog/logging/opensearch-single
+ - ./vendor/katalog/logging/opensearch-dashboards
+ - ./vendor/katalog/logging/cerebro
+ - ./vendor/katalog/logging/logging-operator
+ - ./vendor/katalog/logging/logging-operated
+ - ./vendor/katalog/logging/configs
+ # Ingress
+ - ./vendor/katalog/ingress/cert-manager
+ - ./vendor/katalog/ingress/nginx
+ - ./vendor/katalog/ingress/forecastle
+ # DR
+ - ./vendor/katalog/dr/velero/velero-on-prem
+ - ./vendor/katalog/dr/velero/velero-schedules
+ - ./vendor/katalog/dr/velero/velero-restic
diff --git a/tests/helper.bash b/tests/helper.bash
index 43f59487..63715563 100644
--- a/tests/helper.bash
+++ b/tests/helper.bash
@@ -3,7 +3,7 @@
apply (){
kustomize build $1 >&2
- kustomize build $1 | kubectl apply -f - 2>&3
+ kustomize build $1 | kubectl apply -f - --server-side 2>&3
}
delete (){
@@ -24,7 +24,12 @@ loop_it(){
loop_it_result=${ko}
while [[ ko -ne 0 ]]
do
- if [ $retry_counter -ge $max_retry ]; then echo "Timeout waiting a condition"; return 1; fi
+ if [ $retry_counter -ge $max_retry ]; then
+ echo "Timeout waiting for the command to success."
+ echo "Last command output was:"
+ echo "${output}"
+ return 1
+ fi
sleep ${wait_time} && echo "# waiting..." $retry_counter >&3
run ${1}
ko=${status}
diff --git a/tests/logging.sh b/tests/logging.sh
index b6015fcb..f976cab6 100644
--- a/tests/logging.sh
+++ b/tests/logging.sh
@@ -7,10 +7,10 @@
load ./helper
-@test "Elasticsearch is Running" {
+@test "OpenSearch is Running" {
info
test() {
- kubectl get pods -l app=elasticsearch -o json -n logging |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
+ kubectl get pods -l app.kubernetes.io/name=opensearch -o json -n logging |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
}
loop_it test 60 10
status=${loop_it_result}
@@ -30,17 +30,17 @@ load ./helper
@test "Fluentd is Running" {
info
test() {
- kubectl get pods -l app=fluentd -o json -n logging |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
+ kubectl get pods -l app.kubernetes.io/name=fluentd -o json -n logging |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
}
loop_it test 60 10
status=${loop_it_result}
[ "$status" -eq 0 ]
}
-@test "Kibana is Running" {
+@test "OpenSearch Dashboards is Running" {
info
test() {
- kubectl get pods -l app=kibana -o json -n logging |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
+ kubectl get pods -l app=opensearch-dashboards -o json -n logging |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
}
loop_it test 60 10
status=${loop_it_result}
diff --git a/tests/monitoring.sh b/tests/monitoring.sh
index 878367f9..5677a9a3 100644
--- a/tests/monitoring.sh
+++ b/tests/monitoring.sh
@@ -7,20 +7,10 @@
load ./helper
-@test "Goldpinger is Running" {
- info
- test() {
- kubectl get pods -l k8s-app=goldpinger -o json -n monitoring |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
- }
- loop_it test 60 10
- status=${loop_it_result}
- [ "$status" -eq 0 ]
-}
-
@test "Grafana is Running" {
info
test() {
- kubectl get pods -l app=grafana -o json -n monitoring |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
+ kubectl get pods -l app.kubernetes.io/name=grafana -o json -n monitoring |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
}
loop_it test 60 10
status=${loop_it_result}
@@ -30,7 +20,7 @@ load ./helper
@test "Prometheus Operator is Running" {
info
test() {
- kubectl get pods -l k8s-app=prometheus-operator -o json -n monitoring |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
+ kubectl get pods -l app.kubernetes.io/name=prometheus-operator -o json -n monitoring |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
}
loop_it test 60 10
status=${loop_it_result}
@@ -40,7 +30,7 @@ load ./helper
@test "Kube State Metrics is Running" {
info
test() {
- kubectl get pods -l app=kube-state-metrics -o json -n monitoring |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
+ kubectl get pods -l app.kubernetes.io/name=kube-state-metrics -o json -n monitoring |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
}
loop_it test 60 10
status=${loop_it_result}
@@ -50,7 +40,7 @@ load ./helper
@test "Node Exporter is Running" {
info
test() {
- kubectl get pods -l app=node-exporter -o json -n monitoring |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
+ kubectl get pods -l app.kubernetes.io/name=node-exporter -o json -n monitoring |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
}
loop_it test 60 10
status=${loop_it_result}
@@ -68,16 +58,6 @@ load ./helper
[ "$status" -eq 0 ]
}
-@test "metrics-server is Running" {
- info
- test() {
- kubectl get pods -l app=metrics-server -o json -n kube-system |jq '.items[].status.containerStatuses[].ready' | uniq | grep -q true
- }
- loop_it test 60 10
- status=${loop_it_result}
- [ "$status" -eq 0 ]
-}
-
@test "kube-proxy-metrics is Running" {
info
test() {