From 00d5edca06c67177362aec91a4c0439f8c05a388 Mon Sep 17 00:00:00 2001 From: aptalca <541623+aptalca@users.noreply.github.com> Date: Wed, 24 Jan 2024 18:20:05 -0500 Subject: [PATCH 1/5] add multi-arch support to template --- .github/workflows/BuildImage.yml | 4 ++++ Dockerfile.complex | 16 ++++++++++++---- README.md | 2 +- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/.github/workflows/BuildImage.yml b/.github/workflows/BuildImage.yml index 39a64e9b..8213c2f0 100644 --- a/.github/workflows/BuildImage.yml +++ b/.github/workflows/BuildImage.yml @@ -7,6 +7,7 @@ env: ENDPOINT: "linuxserver/mods" #don't modify BASEIMAGE: "replace_baseimage" #replace MODNAME: "replace_modname" #replace + MULTI_ARCH: "false" #set to true if needed jobs: set-vars: @@ -19,6 +20,7 @@ jobs: echo "ENDPOINT=${{ env.ENDPOINT }}" >> $GITHUB_OUTPUT echo "BASEIMAGE=${{ env.BASEIMAGE }}" >> $GITHUB_OUTPUT echo "MODNAME=${{ env.MODNAME }}" >> $GITHUB_OUTPUT + echo "MULTI_ARCH=${{ env.MULTI_ARCH }}" >> $GITHUB_OUTPUT # **** If the mod needs to be versioned, set the versioning logic below. Otherwise leave as is. **** MOD_VERSION="" echo "MOD_VERSION=${MOD_VERSION}" >> $GITHUB_OUTPUT @@ -27,6 +29,7 @@ jobs: ENDPOINT: ${{ steps.outputs.outputs.ENDPOINT }} BASEIMAGE: ${{ steps.outputs.outputs.BASEIMAGE }} MODNAME: ${{ steps.outputs.outputs.MODNAME }} + MULTI_ARCH: ${{ steps.outputs.outputs.MULTI_ARCH }} MOD_VERSION: ${{ steps.outputs.outputs.MOD_VERSION }} build: @@ -42,4 +45,5 @@ jobs: ENDPOINT: ${{ needs.set-vars.outputs.ENDPOINT }} BASEIMAGE: ${{ needs.set-vars.outputs.BASEIMAGE }} MODNAME: ${{ needs.set-vars.outputs.MODNAME }} + MULTI_ARCH: ${{ needs.set-vars.outputs.MULTI_ARCH }} MOD_VERSION: ${{ needs.set-vars.outputs.MOD_VERSION }} diff --git a/Dockerfile.complex b/Dockerfile.complex index c0b5cf72..e7a234d2 100644 --- a/Dockerfile.complex +++ b/Dockerfile.complex @@ -1,7 +1,7 @@ # syntax=docker/dockerfile:1 ## Buildstage ## -FROM ghcr.io/linuxserver/baseimage-alpine:3.17 as buildstage +FROM ghcr.io/linuxserver/baseimage-alpine:3.19 as buildstage RUN \ echo "**** install packages ****" && \ @@ -9,9 +9,17 @@ RUN \ curl && \ echo "**** grab rclone ****" && \ mkdir -p /root-layer && \ - curl -o \ - /root-layer/rclone.deb -L \ - "https://downloads.rclone.org/v1.47.0/rclone-v1.47.0-linux-amd64.deb" + if [[ $(uname -m) == "x86_64" ]]; then \ + echo "Downloading x86_64 tarball" && \ + curl -o \ + /root-layer/rclone.deb -L \ + "https://downloads.rclone.org/v1.47.0/rclone-v1.47.0-linux-amd64.deb"; \ + elif [[ $(uname -m) == "aarch64" ]]; then \ + echo "Downloading aarch64 tarball" && \ + curl -o \ + /root-layer/rclone.deb -L \ + "https://downloads.rclone.org/v1.47.0/rclone-v1.47.0-linux-arm64.deb"; \ + fi && \ # copy local files COPY root/ /root-layer/ diff --git a/README.md b/README.md index c8bfc60a..17006683 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ If adding multiple mods, enter them in an array separated by `|`, such as `DOCKE * Inspect the `root` folder contents. Edit, add and remove as necessary. * After all init scripts and services are created, run `find ./ -path "./.git" -prune -o \( -name "run" -o -name "finish" -o -name "check" \) -not -perm -u=x,g=x,o=x -print -exec chmod +x {} +` to fix permissions. * Edit this readme with pertinent info, delete these instructions. -* Finally edit the `.github/workflows/BuildImage.yml`. Customize the vars for `BASEIMAGE` and `MODNAME`. Set the versioning logic if needed. +* Finally edit the `.github/workflows/BuildImage.yml`. Customize the vars for `BASEIMAGE` and `MODNAME`. Set the versioning logic and `MULTI_ARCH` if needed. * Ask the team to create a new branch named `-`. Baseimage should be the name of the image the mod will be applied to. The new branch will be based on the `template` branch. * Submit PR against the branch created by the team. From c3f81ed55abd76c824b61de90eb0833f3d8af5cb Mon Sep 17 00:00:00 2001 From: aptalca <541623+aptalca@users.noreply.github.com> Date: Wed, 14 Feb 2024 12:09:55 -0500 Subject: [PATCH 2/5] use sh syntax --- Dockerfile.complex | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.complex b/Dockerfile.complex index e7a234d2..eb80898f 100644 --- a/Dockerfile.complex +++ b/Dockerfile.complex @@ -9,12 +9,12 @@ RUN \ curl && \ echo "**** grab rclone ****" && \ mkdir -p /root-layer && \ - if [[ $(uname -m) == "x86_64" ]]; then \ + if [ $(uname -m) = "x86_64" ]; then \ echo "Downloading x86_64 tarball" && \ curl -o \ /root-layer/rclone.deb -L \ "https://downloads.rclone.org/v1.47.0/rclone-v1.47.0-linux-amd64.deb"; \ - elif [[ $(uname -m) == "aarch64" ]]; then \ + elif [ $(uname -m) = "aarch64" ]; then \ echo "Downloading aarch64 tarball" && \ curl -o \ /root-layer/rclone.deb -L \ From e0970214f26f80601468406d8e1869b5e66cae99 Mon Sep 17 00:00:00 2001 From: aptalca <541623+aptalca@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:56:47 -0500 Subject: [PATCH 3/5] Default MULTI_ARCH to true --- .github/workflows/BuildImage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/BuildImage.yml b/.github/workflows/BuildImage.yml index 8213c2f0..6008e850 100644 --- a/.github/workflows/BuildImage.yml +++ b/.github/workflows/BuildImage.yml @@ -7,7 +7,7 @@ env: ENDPOINT: "linuxserver/mods" #don't modify BASEIMAGE: "replace_baseimage" #replace MODNAME: "replace_modname" #replace - MULTI_ARCH: "false" #set to true if needed + MULTI_ARCH: "true" #set to false if not needed jobs: set-vars: From c25d25227ff912f11b5240c3973891b94b3a49f2 Mon Sep 17 00:00:00 2001 From: labmonkey Date: Sun, 10 Mar 2024 15:16:47 +0100 Subject: [PATCH 4/5] - Refactored whole code so that its not such a mess - Updated documentation so that it explains better how the mod works, requirements, setting up labels and notifications - Reduced number of API calls when swag container restarts - Added better support for setting up notifications and fixed issue with default notifications not being applied - Fixed an issue with mod crashing when there were Manually added monitors in Uptime Kuma - Fixed an issue with labels that container numeric values - Added basic support for Monitor Groups - More log messages so that its easier to grasp what is happening - Fixed a crash when mod was executed while uptime kuma was not running (mod just gently stops now) --- .github/workflows/BuildImage.yml | 4 +- .gitignore | 2 + Dockerfile.complex | 33 -- README.md | 62 +++- root/app/auto-uptime-kuma.py | 195 +++++++---- root/app/auto_uptime_kuma/__init__.py | 0 root/app/auto_uptime_kuma/config_service.py | 84 +++++ .../docker_service.py} | 35 +- root/app/auto_uptime_kuma/log.py | 10 + .../auto_uptime_kuma/uptime_kuma_service.py | 320 ++++++++++++++++++ root/app/helpers.py | 20 -- root/app/swagUptimeKuma.py | 185 ---------- 12 files changed, 625 insertions(+), 325 deletions(-) delete mode 100644 Dockerfile.complex create mode 100644 root/app/auto_uptime_kuma/__init__.py create mode 100644 root/app/auto_uptime_kuma/config_service.py rename root/app/{swagDocker.py => auto_uptime_kuma/docker_service.py} (56%) create mode 100644 root/app/auto_uptime_kuma/log.py create mode 100644 root/app/auto_uptime_kuma/uptime_kuma_service.py delete mode 100644 root/app/helpers.py delete mode 100644 root/app/swagUptimeKuma.py diff --git a/.github/workflows/BuildImage.yml b/.github/workflows/BuildImage.yml index 6008e850..7638ca63 100644 --- a/.github/workflows/BuildImage.yml +++ b/.github/workflows/BuildImage.yml @@ -5,8 +5,8 @@ on: [push, pull_request_target, workflow_dispatch] env: GITHUB_REPO: "linuxserver/docker-mods" #don't modify ENDPOINT: "linuxserver/mods" #don't modify - BASEIMAGE: "replace_baseimage" #replace - MODNAME: "replace_modname" #replace + BASEIMAGE: "swag" #replace + MODNAME: "auto-uptime-kuma" #replace MULTI_ARCH: "true" #set to false if not needed jobs: diff --git a/.gitignore b/.gitignore index 96374c4e..7bd333f5 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,5 @@ $RECYCLE.BIN/ Network Trash Folder Temporary Items .apdisk + +__pycache__ \ No newline at end of file diff --git a/Dockerfile.complex b/Dockerfile.complex deleted file mode 100644 index eb80898f..00000000 --- a/Dockerfile.complex +++ /dev/null @@ -1,33 +0,0 @@ -# syntax=docker/dockerfile:1 - -## Buildstage ## -FROM ghcr.io/linuxserver/baseimage-alpine:3.19 as buildstage - -RUN \ - echo "**** install packages ****" && \ - apk add --no-cache \ - curl && \ - echo "**** grab rclone ****" && \ - mkdir -p /root-layer && \ - if [ $(uname -m) = "x86_64" ]; then \ - echo "Downloading x86_64 tarball" && \ - curl -o \ - /root-layer/rclone.deb -L \ - "https://downloads.rclone.org/v1.47.0/rclone-v1.47.0-linux-amd64.deb"; \ - elif [ $(uname -m) = "aarch64" ]; then \ - echo "Downloading aarch64 tarball" && \ - curl -o \ - /root-layer/rclone.deb -L \ - "https://downloads.rclone.org/v1.47.0/rclone-v1.47.0-linux-arm64.deb"; \ - fi && \ - -# copy local files -COPY root/ /root-layer/ - -## Single layer deployed image ## -FROM scratch - -LABEL maintainer="username" - -# Add files from buildstage -COPY --from=buildstage /root-layer/ / diff --git a/README.md b/README.md index ed8dcbc8..b5d006f1 100644 --- a/README.md +++ b/README.md @@ -4,11 +4,13 @@ This mod gives SWAG the ability to automatically add Uptime Kuma "Monitors" for ## Requirements -Running [Uptime Kuma](https://github.com/louislam/uptime-kuma) instance with `username` and `password` configured. The container should be in the same [user defined bridge network](https://docs.linuxserver.io/general/swag#docker-networking) as SWAG. +- This mod needs the [universal-docker mod](https://github.com/linuxserver/docker-mods/tree/universal-docker) installed and set up with either mapping docker.sock or setting the environment variable `DOCKER_HOST=remoteaddress`. +- Other containers to be auto-detected and reverse proxied should be in the same [user defined bridge network](https://docs.linuxserver.io/general/swag#docker-networking) as SWAG. +- A running [Uptime Kuma](https://github.com/louislam/uptime-kuma) instance (at least version `1.21.3`) with `username` and `password` configured. Also in the same network as mentioned above. ## Installation -In SWAG docker arguments, set an environment variable `DOCKER_MODS=linuxserver/mods:swag-auto-uptime-kuma`. +In SWAG docker arguments, set an environment variable `DOCKER_MODS=linuxserver/mods:universal-docker|linuxserver/mods:swag-auto-uptime-kuma`. Add additional environment variables to the SWAG docker image: @@ -20,6 +22,8 @@ Add additional environment variables to the SWAG docker image: Unfortunately Uptime Kuma does not provide API keys for it's Socket.io API at the moment and Username/Password have to be used. +This mod additionaly reads the `URL` environment variable which is part of the SWAG configuration itself. + Finally, add `swag.uptime-kuma.enabled=true` label at minimum to each of your containers that you wish to monitor. More types of labels are listed in next section. ## Labels @@ -33,11 +37,59 @@ This mod is utilizing the wonderful [Uptime Kuma API](https://github.com/lucashe | `swag.uptime-kuma.monitor.url` | `https://{containerName}.{domainName}` | `https://radarr.domain.com/`
`https://pihole.domain.com/admin/` | By default the URL of each container if build based of the actual container name (`{containerName}`) defined in docker and the value of `URL` environment variable (`{domainName}`) defined in SWAG (as required by SWAG itself). | | `swag.uptime-kuma.monitor.type` | http | http | While technically possible to override the monitor type the purpose of this mod is to monitor HTTP endpoints. | | `swag.uptime-kuma.monitor. description` | Automatically generated by SWAG auto-uptime-kuma | My own description | The description is only for informational purposes and can be freely changed | +| `swag.uptime-kuma.monitor. parent` | | `"Media Servers"`, `"Tools"`, `"2137"` | A "special" label that can be used to create Monitor Groups. The value can be a name of the group which then will by dynamically created if it does not exist. A group name has to be unique (different than any of your container names). Alternatively an ID of the group can be used (can be found in the URL when editing the group in Uptime Kuma). Please note that in this mod only a name of the group can be defined. In case you want to edit additional parameters of the group then its best to create it manually and use an ID as value here. | | `swag.uptime-kuma.monitor.*` | | `swag.uptime-kuma.monitor. maxretries=5`
`swag.uptime-kuma.monitor. accepted_statuscodes= 200-299,404,501` | There are many more properties to configure. The fact that aything can be changed does not mean that it should. Some properties or combinations could not work and should be changed only if you know what you are doing. Please check the [Uptime Kuma API](https://uptime-kuma-api.readthedocs.io/en/latest/api.html#uptime_kuma_api.UptimeKumaApi.add_monitor) for more examples. Properties that are expected to be lists should be separated by comma `,` | +### Setting default values for all containers + +This mod does not have an ability to set global default values for your Monitors. In case you would like to set some label value to be same for all of the monitored containers you have few options: + +- In case you are using docker-compose then there are many ways of setting defaults such as [Extensions](https://docs.docker.com/compose/multiple-compose-files/extends/), [Fragments](https://docs.docker.com/compose/compose-file/10-fragments/) or [Extends](https://docs.docker.com/compose/multiple-compose-files/extends/). + + Here is how I am using `extends` myself: + + `docker-compose.template.yml` + ``` + services: + monitored: + labels: + swag.uptime-kuma.enabled: true + swag.uptime-kuma.monitor.interval: 69 + swag.uptime-kuma.monitor.retryInterval: 300 + swag.uptime-kuma.monitor.maxretries: 10 + ``` + `docker-compose.yml` + ``` + services: + bitwarden: + extends: + file: docker-compose.template.yml + service: monitored + # ... some other stuff + labels: + swag: enable + whatever.else: hello + swag.uptime-kuma.monitor.interval: 123 # label specific to this container + ``` + If you define it as above then the labels will be merged and/or overriden and result with: + ``` + ... + labels: + swag: enable + whatever.else: hello + swag.uptime-kuma.enabled: true + swag.uptime-kuma.monitor.interval: 123 # overriden + swag.uptime-kuma.monitor.retryInterval: 300 + swag.uptime-kuma.monitor.maxretries: 10 + ``` + +- In case you are using docker cli you could either define your labels with a common variable or use a common label file for the monitored containers [more info here](https://docs.docker.com/reference/cli/docker/container/run/#label) + +- In case you are using any other way to deploy your containers then please look into documentation of your tool for any templating features. + ## Notifications -While ultimately this mod makes it easier to setup notifications for your docker containers it does not configure more than Uptime Kuma Monitors. In order to receive Notifications you should configure them manually and then either enable one type to be default for all your Monitors or specify the Notifications by using the `swag.uptime-kuma.monitor.notificationIDList` label. +While ultimately this mod makes it easier to setup notifications for your docker containers it does not configure more than Uptime Kuma Monitors. In order to receive Notifications you should configure them manually and then either enable one type to be default for all your Monitors or specify the Notifications by using the `swag.uptime-kuma.monitor.notificationIDList` label. Please note that if you define one or more notifications in Uptime Kuma to be default (enabled by default for new monitors) then even if you specify custom `notificationIDList` via labels then the default notifications will be always appended to the list. ## Known Limitations @@ -47,6 +99,8 @@ While ultimately this mod makes it easier to setup notifications for your docker - Due to limitations of the Uptime Kuma API whenever you make changes to your container or labels that already have a Monior setup then the **Update** action will be performed by running **Delete** followed by **Add**. What it means that all changes will result in a new Monitor for the same container that will lose history of the heartbeats, all manual changes and get a new 'id' number. -## Purge data +## Command Line mode For the purpose of development or simply if you feel that you want to purge all the Monitors and files created by this mod you can run following command via `ssh`: `docker exec swag python3 /app/auto-uptime-kuma.py -purge` (where `swag` is your container name of the SWAG instance). + +It is also possible to fetch and print the raw API data of a Monitor from Uptime Kuma API `ssh`: `docker exec swag python3 /app/auto-uptime-kuma.py -monitor container_name` (where `container_name` is the name of the container that Monitor belongs to). \ No newline at end of file diff --git a/root/app/auto-uptime-kuma.py b/root/app/auto-uptime-kuma.py index 628da0d9..3feec1a8 100644 --- a/root/app/auto-uptime-kuma.py +++ b/root/app/auto-uptime-kuma.py @@ -1,68 +1,135 @@ -from swagDocker import SwagDocker -from swagUptimeKuma import SwagUptimeKuma -import sys -import argparse -import os - - -def parseCommandLine(): - """ - Different application behavior if executed from CLI - """ - parser = argparse.ArgumentParser() - parser.add_argument('-purge', action='store_true') - args = parser.parse_args() - - if (args.purge == True): - swagUptimeKuma.purgeData() - swagUptimeKuma.disconnect() - sys.exit(0) - - -def addOrUpdateMonitors(domainName, swagContainers): - for swagContainer in swagContainers: - containerConfig = swagDocker.parseContainerLabels( - swagContainer.labels, ".monitor.") - containerName = swagContainer.name - monitorData = swagUptimeKuma.parseMonitorData( - containerName, domainName, containerConfig) - - if (not swagUptimeKuma.monitorExists(containerName)): - swagUptimeKuma.addMonitor(containerName, domainName, monitorData) +from auto_uptime_kuma.config_service import ConfigService +from auto_uptime_kuma.uptime_kuma_service import UptimeKumaService +from auto_uptime_kuma.docker_service import DockerService +from auto_uptime_kuma.log import Log +import sys, os + + +def add_or_update_monitors( + docker_service: DockerService, + config_service: ConfigService, + uptime_kuma_service: UptimeKumaService, +): + for container in docker_service.get_swag_containers(): + container_config = docker_service.parse_container_labels( + container.labels, ".monitor." + ) + container_name = container.name + monitor_data = uptime_kuma_service.build_monitor_data( + container_name, container_config + ) + + if not uptime_kuma_service.monitor_exists(container_name): + uptime_kuma_service.create_monitor(container_name, container_config) else: - swagUptimeKuma.updateMonitor( - containerName, domainName, monitorData) - - -def getMonitorsToBeRemoved(swagContainers, apiMonitors): - # Monitors to be removed are those that no longer have an existing container - # Monitor <-> Container link is done by comparing the container name with the monitor swag tag value - existingMonitorNames = [swagUptimeKuma.getMonitorSwagTagValue( - monitor) for monitor in apiMonitors] - existingContainerNames = [container.name for container in swagContainers] - - monitorsToBeRemoved = [ - containerName for containerName in existingMonitorNames if containerName not in existingContainerNames] - return monitorsToBeRemoved + if not config_service.config_exists(container_name): + Log.info( + f"Monitor '{monitor_data['name']}' for container '{container_name}'" + " exists but no preset config found, generating from scratch" + ) + config_service.create_config(container_name, monitor_data) + uptime_kuma_service.edit_monitor(container_name, monitor_data) + + +def delete_removed_monitors( + docker_service: DockerService, uptime_kuma_service: UptimeKumaService +): + Log.info("Searching for Monitors that should be deleted") + # Monitors to be deleted are those that no longer have an existing container + # Monitor <-> Container link is done by comparing the container name + # with the monitor swag tag value + existing_monitor_names = [ + uptime_kuma_service.get_monitor_swag_tag_value(monitor) + for monitor in uptime_kuma_service.monitors + ] + existing_container_names = [ + container.name for container in docker_service.get_swag_containers() + ] + + monitors_to_be_deleted = [ + containerName + for containerName in existing_monitor_names + if containerName not in existing_container_names + ] + + monitors_to_be_deleted = list(filter(None, monitors_to_be_deleted)) + + uptime_kuma_service.delete_monitors(monitors_to_be_deleted) + + +def delete_removed_groups(uptime_kuma_service: UptimeKumaService): + Log.info("Searching for Groups that should be deleted") + # Groups to be deleted are those that no longer have any child Monitors + existing_monitor_group_ids = [ + monitor["parent"] for monitor in uptime_kuma_service.monitors + ] + + # remove empty values + existing_monitor_group_ids = list(filter(None, existing_monitor_group_ids)) + # get unique values + existing_monitor_group_ids = list(set(existing_monitor_group_ids)) + + groups_to_be_deleted = [] + + for group in uptime_kuma_service.groups: + if group["id"] not in existing_monitor_group_ids: + groups_to_be_deleted.append(group["name"]) + + uptime_kuma_service.delete_groups(groups_to_be_deleted) + + +def execute_cli_mode( + config_service: ConfigService, uptime_kuma_service: UptimeKumaService +): + Log.info("Mod was executed from CLI. Running manual tasks.") + args = config_service.get_cli_args() + if args.purge: + uptime_kuma_service.purge_data() + + config_service.purge_data() + if args.monitor: + Log.info(f"Requesting data for Monitor '{args.monitor}'") + print(uptime_kuma_service.get_monitor(args.monitor)) + + uptime_kuma_service.disconnect() if __name__ == "__main__": - url = os.environ['UPTIME_KUMA_URL'] - username = os.environ['UPTIME_KUMA_USERNAME'] - password = os.environ['UPTIME_KUMA_PASSWORD'] - domainName = os.environ['URL'] - - swagDocker = SwagDocker("swag.uptime-kuma") - swagUptimeKuma = SwagUptimeKuma(url, username, password) - - parseCommandLine() - - swagContainers = swagDocker.getSwagContainers() - - addOrUpdateMonitors(domainName, swagContainers) - - monitorsToBeRemoved = getMonitorsToBeRemoved( - swagContainers, swagUptimeKuma.apiMonitors) - swagUptimeKuma.deleteMonitors(monitorsToBeRemoved) - - swagUptimeKuma.disconnect() + Log.init("mod-auto-uptime-kuma") + + url = os.environ["UPTIME_KUMA_URL"] + username = os.environ["UPTIME_KUMA_USERNAME"] + password = os.environ["UPTIME_KUMA_PASSWORD"] + domainName = os.environ["URL"] + + configService = ConfigService(domainName) + uptimeKumaService = UptimeKumaService(configService) + dockerService = DockerService("swag.uptime-kuma") + is_connected = uptimeKumaService.connect(url, username, password) + + if not is_connected: + sys.exit() + + uptimeKumaService.load_data() + if uptimeKumaService.default_notifications: + notification_names = [ + f"{notification['id']}:{notification['name']}" + for notification in uptimeKumaService.default_notifications + ] + Log.info( + f"The following notifications are enabled by default: {notification_names}" + ) + + if configService.is_cli_mode(): + execute_cli_mode(configService, uptimeKumaService) + sys.exit() + + add_or_update_monitors(dockerService, configService, uptimeKumaService) + + # reload data after the sync above + uptimeKumaService.load_data() + # cleanup + delete_removed_monitors(dockerService, uptimeKumaService) + delete_removed_groups(uptimeKumaService) + + uptimeKumaService.disconnect() diff --git a/root/app/auto_uptime_kuma/__init__.py b/root/app/auto_uptime_kuma/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/root/app/auto_uptime_kuma/config_service.py b/root/app/auto_uptime_kuma/config_service.py new file mode 100644 index 00000000..af558f5f --- /dev/null +++ b/root/app/auto_uptime_kuma/config_service.py @@ -0,0 +1,84 @@ +import os +import sys +import argparse +from auto_uptime_kuma.log import Log +from uptime_kuma_api.api import MonitorType + + +class ConfigService: + config_dir = "/auto-uptime-kuma" + domain_name: str + + def __init__(self, domain_name): + self.domain_name = domain_name + if not os.path.exists(self.config_dir): + Log.info(f"Creating config directory '{self.config_dir}'") + os.makedirs(self.config_dir) + + def is_cli_mode(self): + """ + Different application behavior if executed from CLI + """ + return len(sys.argv) > 1 + + def get_cli_args(self): + parser = argparse.ArgumentParser() + parser.add_argument("-purge", action="store_true") + parser.add_argument("-monitor", type=str) + return parser.parse_args() + + def merge_dicts(self, *dict_args): + result = {} + for dictionary in dict_args: + result.update(dictionary) + return result + + def create_config(self, container_name, monitor_data): + content = self.build_config_content(monitor_data) + self.write_config_content(container_name, content) + + def config_exists(self, container_name): + return os.path.exists(f"{self.config_dir}/{container_name.lower()}.conf") + + def build_config_content(self, monitor_data): + """ + In order to compare if container labels were changed the contents + are stored in config files for each container. + """ + content = "" + for key, value in monitor_data.items(): + content += f"{key}={value}\n" + return content.strip() + + def read_config_content(self, container_name): + if not self.config_exists(container_name): + return "" + + file_name = f"{self.config_dir}/{container_name.lower()}.conf" + with open(file_name, "r") as file: + return file.read().strip() + + def write_config_content(self, container_name, content): + with open(f"{self.config_dir}/{container_name.lower()}.conf", "w+") as file: + file.write(content) + + def purge_data(self): + """ + Deletes all of the files created with this script + """ + Log.info("Purging all Docker container configuration added by this mod") + + if os.path.exists(self.config_dir): + Log.info(f"Purging config directory '{self.config_dir}' and its content") + file_list = os.listdir(self.config_dir) + + for filename in file_list: + file_path = os.path.join(self.config_dir, filename) + if os.path.isfile(file_path): + os.remove(file_path) + Log.info(f"Removed '{file_path}' file") + + os.rmdir(self.config_dir) + Log.info(f"Removed '{self.config_dir}' directory") + + Log.info("Config purging finished") diff --git a/root/app/swagDocker.py b/root/app/auto_uptime_kuma/docker_service.py similarity index 56% rename from root/app/swagDocker.py rename to root/app/auto_uptime_kuma/docker_service.py index dd12ab59..ac142b21 100644 --- a/root/app/swagDocker.py +++ b/root/app/auto_uptime_kuma/docker_service.py @@ -1,20 +1,20 @@ import docker -class SwagDocker: +class DockerService: """ A service class for interacting with Docker containers that are used by SWAG mods. """ client = None _containers = None - _labelPrefix = None + label_prefix = None - def __init__(self, labelPrefix: str): - self._labelPrefix = labelPrefix + def __init__(self, label_prefix: str): + self.label_prefix = label_prefix self.client = docker.from_env() - def getSwagContainers(self): + def get_swag_containers(self): """ Retrieve Docker containers filtered by "swag.my_mod.enabled=true": >>> swag = SwagDocker("swag.my_mod") @@ -22,15 +22,16 @@ def getSwagContainers(self): """ if self._containers is None: self._containers = self.client.containers.list( - filters={"label": [f"{self._labelPrefix}.enabled=true"]}) + filters={"label": [f"{self.label_prefix}.enabled=true"]} + ) return self._containers - def parseContainerLabels(self, containerLabels, extraPrefix=""): + def parse_container_labels(self, container_labels, extra_prefix=""): """ Having following example container labels: swag.my_mod.enabled: true - swag.my_mod.config.apple: "123" - swag.my_mod.config.orange: "456" + swag.my_mod.config.apple: "123" + swag.my_mod.config.orange: "456" >>> for container in containers: >>> containerConfigA = swagDocker.parseContainerLabels(container.labels) @@ -38,13 +39,13 @@ def parseContainerLabels(self, containerLabels, extraPrefix=""): >>> containerConfigB = swagDocker.parseContainerLabels(container.labels, ".config.") # Above will return {"apple": "123", "orange": "456"} """ - filteredContainerLabels = {} - fullPrefix = f"{self._labelPrefix}{extraPrefix}" - prefix_length = len(fullPrefix) + filtered_container_labels = {} + full_prefix = f"{self.label_prefix}{extra_prefix}" + prefix_length = len(full_prefix) - for label, value in containerLabels.items(): - if label.startswith(fullPrefix): - parsedLabel = label[prefix_length:] - filteredContainerLabels[parsedLabel] = value + for label, value in container_labels.items(): + if label.startswith(full_prefix): + parsed_label = label[prefix_length:] + filtered_container_labels[parsed_label] = value - return filteredContainerLabels + return filtered_container_labels diff --git a/root/app/auto_uptime_kuma/log.py b/root/app/auto_uptime_kuma/log.py new file mode 100644 index 00000000..0408f489 --- /dev/null +++ b/root/app/auto_uptime_kuma/log.py @@ -0,0 +1,10 @@ +class Log: + prefix: str + + @staticmethod + def init(prefix): + Log.prefix = prefix + + @staticmethod + def info(message): + print(f"[{Log.prefix}] {message}") diff --git a/root/app/auto_uptime_kuma/uptime_kuma_service.py b/root/app/auto_uptime_kuma/uptime_kuma_service.py new file mode 100644 index 00000000..ff45813f --- /dev/null +++ b/root/app/auto_uptime_kuma/uptime_kuma_service.py @@ -0,0 +1,320 @@ +import requests +from uptime_kuma_api.api import UptimeKumaApi, MonitorType +from auto_uptime_kuma.log import Log +from auto_uptime_kuma.config_service import ConfigService + + +class UptimeKumaService: + + api: UptimeKumaApi + swag_tag_name = "swag" + swag_tag_color = "#ff4f97" + swag_tag = None + monitors: list + groups: list + default_notifications: list + + config_service: ConfigService + + default_monitor_data = { + "type": MonitorType.HTTP, + "description": "Automatically generated by SWAG auto-uptime-kuma", + } + + def __init__(self, config_service): + self.config_service = config_service + + def connect(self, url, username, password): + response = requests.get(url, allow_redirects=True, timeout=5) + if response.status_code != 200: + Log.info( + f"Unable to connect to UptimeKuma at '{url}' (Status code: {response.status_code})." + " Please check if the host is running." + ) + return False + + self.api = UptimeKumaApi(url) + self.api.login(username, password) + + return True + + def disconnect(self): + """ + API has to be disconnected at the end as the connection is blocking + """ + self.api.disconnect() + + def load_data(self): + monitors = self.api.get_monitors() + self.get_swag_tag() + + self.default_notifications = self.get_default_notifications() + + self.monitors = [ + monitor + for monitor in monitors + if monitor["type"] != MonitorType.GROUP + and self.get_monitor_swag_tag_value(monitor) is not None + ] + self.groups = [ + group + for group in monitors + if group["type"] == MonitorType.GROUP + and self.get_monitor_swag_tag_value(group) is not None + ] + + def build_monitor_data(self, container_name, configured_monitor_data): + """ + Some of the container label values might have to be converted before sending to API. + Additionally merge default config with label config. + """ + result_data = dict(self.default_monitor_data) + result_data.update( + { + "name": container_name.title(), + "url": f"https://{container_name}.{self.config_service.domain_name}", + } + ) + result_data.update(configured_monitor_data) + + # Convert strings that are lists in API + for key in ["accepted_statuscodes", "notificationIDList"]: + if key in result_data and isinstance(result_data[key], str): + result_data[key] = result_data[key].split(",") + + # If Monitor Groups are used then create them if needed and switch into ID as value + if "parent" in result_data and not str(result_data["parent"]).isdigit(): + if self.group_exists(result_data["parent"]): + group_data = self.get_group(result_data["parent"]) + else: + group_data = self.create_group(result_data["parent"]) + result_data["parent"] = group_data["id"] + + if self.default_notifications: + default_notification_ids = [ + notification["id"] for notification in self.default_notifications + ] + if "notificationIDList" in result_data: + result_data["notificationIDList"] += default_notification_ids + result_data["notificationIDList"] = list( + set(result_data["notificationIDList"]) + ) + else: + result_data["notificationIDList"] = default_notification_ids + + # All numeric values sent to API have to be of type int + for key, value in result_data.items(): + if str(value).isdigit(): + result_data[key] = int(value) + + return result_data + + def get_monitor(self, container_name): + for monitor in self.monitors: + swag_tag = self.get_monitor_swag_tag_value(monitor) + if swag_tag is not None and swag_tag == container_name.lower(): + return monitor + return None + + def monitor_exists(self, container_name): + return self.get_monitor(container_name) is not None + + def create_monitor(self, container_name, monitor_data): + monitor_data = self.build_monitor_data(container_name, monitor_data) + if self.monitor_exists(container_name): + Log.info( + f"Uptime Kuma already contains Monitor '{monitor_data['name']}'" + f" for container '{container_name}', skipping..." + ) + return None + + Log.info( + f"Adding Monitor '{monitor_data['name']}' for container '{container_name}'" + ) + + monitor = self.api.add_monitor(**monitor_data) + + self.api.add_monitor_tag( + tag_id=self.get_swag_tag()["id"], + monitor_id=monitor["monitorID"], + value=container_name.lower(), + ) + + self.config_service.create_config(container_name, monitor_data) + + monitor = self.api.get_monitor(monitor["monitorID"]) + self.monitors.append(monitor) + + return monitor + + def edit_monitor(self, container_name, monitor_data): + """ + Please note that due to API limitations the "update" action + is actually "delete" followed by "add" + so that in the end the monitors are actually recreated + """ + new_monitor_data = self.build_monitor_data(container_name, monitor_data) + existing_monitor_data = self.get_monitor(container_name) + old_content = self.config_service.read_config_content(container_name) + new_content = self.config_service.build_config_content(new_monitor_data) + + if not old_content == new_content: + Log.info( + "Updating (Delete and Add) Monitor" + f" {existing_monitor_data['id']}:{existing_monitor_data['name']}" + ) + self.delete_monitor(container_name) + self.create_monitor(container_name, new_monitor_data) + else: + Log.info( + f"Monitor {existing_monitor_data['id']}:{existing_monitor_data['name']}" + " has no changes, skipping..." + ) + + def delete_monitor(self, container_name: str): + monitor_data = self.get_monitor(container_name) + if monitor_data is not None: + Log.info(f"Deleting Monitor {monitor_data['id']}:{monitor_data['name']}") + self.api.delete_monitor(monitor_data["id"]) + + for i, monitor in enumerate(self.monitors): + if monitor["id"] == monitor_data["id"]: + del self.monitors[i] + break + + def delete_monitors(self, container_names: list[str]): + if container_names: + Log.info( + f"Deleting Monitors for the following containers: {container_names}" + ) + for container_name in container_names: + self.delete_monitor(container_name) + else: + Log.info("No Monitors to delete") + + def get_group(self, group_name): + for group in self.groups: + swag_tag = self.get_monitor_swag_tag_value(group) + if swag_tag is not None and swag_tag == group_name.lower(): + return group + return None + + def group_exists(self, container_name): + return self.get_group(container_name) is not None + + def create_group(self, group_name): + monitor_data = { + "type": MonitorType.GROUP, + "name": group_name, + "description": "Automatically generated by SWAG auto-uptime-kuma", + } + if self.group_exists(group_name): + Log.info( + f"Uptime Kuma already contains Group '{monitor_data['name']}', skipping..." + ) + return + + Log.info(f"Adding Group '{monitor_data['name']}'") + + group = self.api.add_monitor(**monitor_data) + + self.api.add_monitor_tag( + tag_id=self.get_swag_tag()["id"], + monitor_id=group["monitorID"], + value=group_name.lower(), + ) + + group = self.api.get_monitor(group["monitorID"]) + self.groups.append(group) + + return group + + ## This intentionally does not exist. Groups management by this mod is made + ## simple so that they can be only added or deleted. + # def edit_group(self, group_name): + + def delete_group(self, group_name): + group_data = self.get_group(group_name) + if group_data is not None: + Log.info(f"Deleting Group {group_data['id']}:{group_data['name']}") + self.api.delete_monitor(group_data["id"]) + + for i, group in enumerate(self.groups): + if group["id"] == group_data["id"]: + del self.groups[i] + break + + def delete_groups(self, group_names: list[str]): + if group_names: + Log.info(f"Deleting Groups with following names: {group_names}") + for group_name in group_names: + self.delete_group(group_name) + else: + Log.info("No Groups to delete") + + def get_swag_tag(self): + """ + The "swag" tag is used to detect in API which monitors were created using this script. + """ + # If the tag was not fetched yet + if self.swag_tag is None: + for tag in self.api.get_tags(): + if tag["name"] == self.swag_tag_name: + self.swag_tag = tag + break + + # If the tag was not in API then it has to be created + if self.swag_tag is None: + self.swag_tag = self.create_swag_tag() + + return self.swag_tag + + def swag_tag_exists(self): + return self.get_swag_tag() is not None + + def create_swag_tag(self): + self.swag_tag = self.api.add_tag( + name=self.swag_tag_name, color=self.swag_tag_color + ) + return self.swag_tag + + def delete_swag_tag(self): + swag_tag = self.get_swag_tag() + if swag_tag is not None: + self.api.delete_tag(self.swag_tag["id"]) + swag_tag = None + + def get_monitor_swag_tag_value(self, monitor_data): + """ + This value is container name itself. Used to link containers with monitors. + """ + for tag in monitor_data.get("tags"): + if "name" in tag and tag["name"] == self.swag_tag_name: + return tag["value"] + return None + + def get_default_notifications(self): + default_notifications = [ + notification + for notification in self.api.get_notifications() + if notification["isDefault"] is True + ] + + return default_notifications + + def purge_data(self): + """ + Deletes all of the monitors created with this script + """ + Log.info("Purging all Uptime Kuma Monitors added by this mod") + + monitor_names = [ + self.get_monitor_swag_tag_value(monitor) for monitor in self.monitors + ] + self.delete_monitors(monitor_names) + Log.info("Monitor purging finished") + group_names = [self.get_monitor_swag_tag_value(group) for group in self.groups] + self.delete_groups(group_names) + Log.info("Group purging finished") + Log.info("Deleting the Swag tag") + self.delete_swag_tag() diff --git a/root/app/helpers.py b/root/app/helpers.py deleted file mode 100644 index 1efe43be..00000000 --- a/root/app/helpers.py +++ /dev/null @@ -1,20 +0,0 @@ -def has_key_with_value(dictionary, key, value): - return key in dictionary and dictionary[key] == value - - -def merge_dicts(*dict_args): - result = {} - for dictionary in dict_args: - result.update(dictionary) - return result - - -def write_file(filename, content): - with open(filename, 'w+') as file: - file.write(content) - - -def read_file(filename): - with open(filename, 'r') as file: - content = file.read() - return content diff --git a/root/app/swagUptimeKuma.py b/root/app/swagUptimeKuma.py deleted file mode 100644 index f1a77bf3..00000000 --- a/root/app/swagUptimeKuma.py +++ /dev/null @@ -1,185 +0,0 @@ -from uptime_kuma_api.api import UptimeKumaApi, MonitorType -from helpers import * -import os - -logPrefix = "[mod-auto-uptime-kuma]" - - -class SwagUptimeKuma: - swagTagName = "swag" - swagUptimeKumaConfigDir = "/auto-uptime-kuma" - - _api = None - _apiSwagTag = None - apiMonitors = None - - defaultMonitorConfig = dict( - type=MonitorType.HTTP, - description="Automatically generated by SWAG auto-uptime-kuma" - ) - - def __init__(self, url, username, password): - self._api = UptimeKumaApi(url) - self._api.login(username, password) - self.apiMonitors = self._api.get_monitors() - - if not os.path.exists(self.swagUptimeKumaConfigDir): - print( - f"{logPrefix} Creating config directory '{self.swagUptimeKumaConfigDir}'") - os.makedirs(self.swagUptimeKumaConfigDir) - - def disconnect(self): - """ - API has to be disconnected at the end as the connection is blocking - """ - self._api.disconnect() - - def getSwagTag(self): - """ - The "swag" tag is used to detect in API which monitors were created using this script. - """ - # If the tag was not fetched yet - if (self._apiSwagTag == None): - for tag in self._api.get_tags(): - if (tag['name'] == self.swagTagName): - self._apiSwagTag = tag - break - - # If the tag was not in API then it has to be created - if (self._apiSwagTag == None): - self._apiSwagTag = self._api.add_tag( - name=self.swagTagName, color="#ff4f97") - - return self._apiSwagTag - - def parseMonitorData(self, containerName, domainName, monitorData): - """ - Some of the container label values might have to be converted before sending to API. - Additionally merge default config with label config. - """ - # Convert strings that are lists in API - for key in ["accepted_statuscodes", "notificationIDList"]: - if (key in monitorData and type(monitorData[key]) is str): - monitorData[key] = monitorData[key].split(",") - - dynamicMonitorConfig = { - "name": containerName.title(), - "url": f"https://{containerName}.{domainName}" - } - - return merge_dicts(self.defaultMonitorConfig, dynamicMonitorConfig, monitorData) - - def addMonitor(self, containerName, domainName, monitorData): - monitorData = self.parseMonitorData( - containerName, domainName, monitorData) - if (has_key_with_value(self.apiMonitors, "name", monitorData['name'])): - print( - f"{logPrefix} Uptime Kuma already contains '{monitorData['name']}' monitor, skipping...") - return - - print( - f"{logPrefix} Adding monitor '{monitorData['name']}'") - - monitor = self._api.add_monitor(**monitorData) - - self._api.add_monitor_tag( - tag_id=self.getSwagTag()['id'], - monitor_id=monitor['monitorID'], - value=containerName - ) - - content = self.buildContainerConfigContent(monitorData) - write_file( - f"{self.swagUptimeKumaConfigDir}/{containerName}.conf", content) - - def deleteMonitor(self, containerName): - monitorData = self.getMonitor(containerName) - print( - f"{logPrefix} Deleting monitor {monitorData['id']}:{monitorData['name']}") - self._api.delete_monitor(monitorData['id']) - - def deleteMonitors(self, containerNames): - print(f"{logPrefix} Deleting all monitors that had their containers removed or were disabled") - if (containerNames): - for containerName in containerNames: - self.deleteMonitor(containerName) - else: - print(f"{logPrefix} Nothing to remove") - - def updateMonitor(self, containerName, domainName, monitorData): - """ - Please not that due to API limitations the "update" action is actually "delete" followed by "add" - so that in the end the monitors are actually recreated - """ - newContent = self.buildContainerConfigContent(monitorData) - oldContent = self.readContainerConfigContent(containerName) - existingMonitorData = self.getMonitor(containerName) - - if (not oldContent == newContent): - print( - f"{logPrefix} Updating (Delete and Add) monitor {existingMonitorData['id']}:{existingMonitorData['name']}") - self.deleteMonitor(containerName) - self.addMonitor(containerName, domainName, monitorData) - else: - print( - f"{logPrefix} Monitor {existingMonitorData['id']}:{existingMonitorData['name']} is unchanged, skipping...") - - def buildContainerConfigContent(self, monitorData): - """ - In order to compare if container labels were changed the contents are stored in config files for each container. - """ - content = "" - for key, value in monitorData.items(): - content += f'{key}={value}\n' - return content.strip() - - def readContainerConfigContent(self, containerName): - fileName = f"{self.swagUptimeKumaConfigDir}/{containerName}.conf" - if (not os.path.exists(fileName)): - return "" - - return read_file(fileName).strip() - - def getMonitor(self, containerName): - for monitor in self.apiMonitors: - swagTagValue = self.getMonitorSwagTagValue(monitor) - if (swagTagValue != None and swagTagValue == containerName): - return monitor - return None - - def monitorExists(self, containerName): - return True if self.getMonitor(containerName) else False - - def getMonitorSwagTagValue(self, monitorData): - """ - This value is container name itself. Used to link containers with monitors. - """ - for tag in monitorData.get('tags'): - if (has_key_with_value(tag, "name", self.swagTagName)): - return tag['value'] - return None - - def purgeData(self): - """ - Removes all of the monitors and files created with this script - """ - print(f"{logPrefix} Purging all monitors added by swag") - - for monitor in self.apiMonitors: - containerName = self.getMonitorSwagTagValue(monitor) - if (containerName != None): - self.deleteMonitor(containerName) - - if os.path.exists(self.swagUptimeKumaConfigDir): - print( - f"{logPrefix} Purging config directory '{self.swagUptimeKumaConfigDir}'") - file_list = os.listdir(self.swagUptimeKumaConfigDir) - - for filename in file_list: - file_path = os.path.join( - self.swagUptimeKumaConfigDir, filename) - if os.path.isfile(file_path): - os.remove(file_path) - print(f"{logPrefix} Removed '{file_path}' file") - - print(f"{logPrefix} Purging finished") From 65bb87bc064c69401b17bac60cafc052e1ed37e8 Mon Sep 17 00:00:00 2001 From: labmonkey Date: Sun, 21 Apr 2024 13:46:35 +0200 Subject: [PATCH 5/5] changed MULTI_ARCH to false --- .github/workflows/BuildImage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/BuildImage.yml b/.github/workflows/BuildImage.yml index 7638ca63..37850f08 100644 --- a/.github/workflows/BuildImage.yml +++ b/.github/workflows/BuildImage.yml @@ -7,7 +7,7 @@ env: ENDPOINT: "linuxserver/mods" #don't modify BASEIMAGE: "swag" #replace MODNAME: "auto-uptime-kuma" #replace - MULTI_ARCH: "true" #set to false if not needed + MULTI_ARCH: "false" #set to false if not needed jobs: set-vars: