diff --git a/CHANGELOG.md b/CHANGELOG.md index 45cf8c2..dca5057 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## 3.8.0 2023-03-27 + + ### Added + - Introduce DB_DUMP_TARGET_ARCHIVE which works with DB_ARCHIVE_TIME to move backups older than (x) minutes from DB_DUMP_TARGET to DB_DUMP_TARGET_ARCHIVE for use with external backup systems and custom exclude rules + - Introduce CREATE_LATEST_SYMLINK which creates a symbolic link in DB_DUMP_TARGET of `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` + + ## 3.7.7 2023-03-20 ### Changed diff --git a/README.md b/README.md index 01c1eec..e21b06d 100644 --- a/README.md +++ b/README.md @@ -82,11 +82,11 @@ Clone this repository and build the image with `docker build (imagen ### Prebuilt Images Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) -Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup) - +Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup) + ``` docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag) -``` +``` The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md): @@ -123,7 +123,7 @@ The following directories are used for configuration and can be mapped for persi #### Base Images used -This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) or [Debian Linux](https://hub.docker.com/r/tiredofit/debian) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`. +This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`. Be sure to view the following repositories to understand all the customizable options: @@ -133,16 +133,17 @@ Be sure to view the following repositories to understand all the customizable op #### Container Options -| Parameter | Description | Default | -| -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- | -| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` | -| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` | -| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` | -| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | -| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` | -| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | | -| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | | -| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` | +| Parameter | Description | Default | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` | +| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` | +| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` | +| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | +| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` | +| `CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST) | `TRUE` | +| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | | +| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | | +| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` | ### Database Specific Options | Parameter | Description | Default | @@ -163,38 +164,39 @@ Be sure to view the following repositories to understand all the customizable op Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name` ### Scheduling Options -| Parameter | Description | Default | -| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` | -| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | -| | Absolute HHMM, e.g. `2330` or `0415` | | -| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | -| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `/backup` | -| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | - +| Parameter | Description | Default | +| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | +| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` | +| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | +| | Absolute HHMM, e.g. `2330` or `0415` | | +| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | +| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` | +| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archivs are kept. | +| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | +| `DB_ARCHIVE_TIME` | Value in minutes to move all files from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | - You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time. ### Backup Options -| Parameter | Description | Default | -| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | -| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | -| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | -| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | -| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | -| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | -| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | -| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | -| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | -| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | -| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | -| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | -| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` | -| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | -| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | -| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | -| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | -| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | +| Parameter | Description | Default | +| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | +| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | +| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | +| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | +| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | +| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | +| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | +| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | +| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | +| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | +| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | +| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` | +| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | +| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | +| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | +| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | +| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | - When using compression with MongoDB, only `GZ` compression is possible. diff --git a/install/assets/defaults/10-db-backup b/install/assets/defaults/10-db-backup index 07aca2d..a491ddd 100644 --- a/install/assets/defaults/10-db-backup +++ b/install/assets/defaults/10-db-backup @@ -8,6 +8,7 @@ COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"} +DB_DUMP_TARGET_ARCHIVE=${DB_DUMP_TARGET_ARCHIVE:-"${DB_DUMP_TARGET}/archive/"} ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"} ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"} MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"} @@ -27,4 +28,5 @@ SCRIPT_LOCATION_POST=${SCRIPT_LOCATION_POST:-"/assets/scripts/post/"} SIZE_VALUE=${SIZE_VALUE:-"bytes"} SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"} SPLIT_DB=${SPLIT_DB:-"TRUE"} -TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} \ No newline at end of file +TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} +CREATE_LATEST_SYMLINK=${CREATE_LATEST_SYMLINK:-"TRUE"} \ No newline at end of file diff --git a/install/assets/functions/10-db-backup b/install/assets/functions/10-db-backup index 22cdaba..12207ae 100644 --- a/install/assets/functions/10-db-backup +++ b/install/assets/functions/10-db-backup @@ -114,6 +114,7 @@ bootstrap_variables() { backup_couch() { prepare_dbbackup target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt + ltarget=couch_${DB_NAME}_${DB_HOST#*//} compression pre_dbbackup ${DB_NAME} print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}" @@ -139,6 +140,7 @@ backup_influx() { prepare_dbbackup if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi target=influx_${db}_${DB_HOST#*//}_${now} + ltarget=influx_${db}_${DB_HOST#*//} compression pre_dbbackup $db print_notice "Dumping Influx database: '${db}'" @@ -148,6 +150,7 @@ backup_influx() { print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}" tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension} + target=influx_${db}_${DB_HOST#*//} generate_checksum move_dbbackup post_dbbackup $db @@ -158,6 +161,7 @@ backup_influx() { prepare_dbbackup if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi target=influx2_${db}_${DB_HOST#*//}_${now} + ltarget=influx2_${db}_${DB_HOST#*//} compression pre_dbbackup $db print_notice "Dumping Influx2 database: '${db}'" @@ -178,8 +182,10 @@ backup_mongo() { prepare_dbbackup if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive + ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,} else target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz + ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,} mongo_compression="--gzip" compression_string="and compressing with gzip" fi @@ -201,6 +207,7 @@ backup_mongo() { backup_mssql() { prepare_dbbackup target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak + target=mssql_${DB_NAME,,}_${DB_HOST,,} compression pre_dbbackup "${DB_NAME}" print_notice "Dumping MSSQL database: '${DB_NAME}'" @@ -240,6 +247,7 @@ backup_mysql() { for db in ${db_names} ; do prepare_dbbackup target=mysql_${db}_${DB_HOST,,}_${now}.sql + ltarget=mysql_${db}_${DB_HOST,,} compression pre_dbbackup $db print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" @@ -254,6 +262,7 @@ backup_mysql() { print_debug "Not splitting database dumps into their own files" prepare_dbbackup target=mysql_all_${DB_HOST,,}_${now}.sql + ltarget=mysql_all_${DB_HOST,,} compression pre_dbbackup all print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" @@ -289,6 +298,7 @@ backup_pgsql() { for db in ${db_names} ; do prepare_dbbackup target=pgsql_${db}_${DB_HOST,,}_${now}.sql + ltarget=pgsql_${db}_${DB_HOST,,} compression pre_dbbackup $db print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}" @@ -303,6 +313,7 @@ backup_pgsql() { print_debug "Not splitting database dumps into their own files" prepare_dbbackup target=pgsql_all_${DB_HOST,,}_${now}.sql + ltarget=pgsql_${db}_${DB_HOST,,} compression pre_dbbackup all print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" @@ -327,6 +338,7 @@ backup_redis() { prepare_dbbackup print_notice "Dumping Redis - Flushing Redis Cache First" target=redis_all_${DB_HOST,,}_${now}.rdb + ltarget=redis_${DB_HOST,,} echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} sleep 10 try=5 @@ -357,6 +369,7 @@ backup_sqlite3() { db=$(basename "${DB_HOST}") db="${db%.*}" target=sqlite3_${db}_${now}.sqlite3 + target=sqlite3_${db}.sqlite3 compression pre_dbbackup $db print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}" @@ -653,6 +666,14 @@ move_dbbackup() { mkdir -p "${DB_DUMP_TARGET}" mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}" + if var_true "${CREATE_LATEST_SYMLINK}" ; then + ln -sf "${DB_DUMP_TARGET}"/"${target}" "${DB_DUMP_TARGET}"/latest-"${ltarget}" + fi + if [ -n "${DB_ARCHIVE_TIME}" ] ; then + mkdir -p "${DB_DUMP_TARGET_ARCHIVE}" + find "${DB_DUMP_TARGET}"/ -maxdepth 1 -mmin +"${DB_ARCHIVE_TIME}" -iname "*" -exec mv {} foo \; + find "${DB_DUMP_TARGET}"/ -maxdepth 1 -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; + fi ;; "s3" | "minio" ) print_debug "Moving backup to S3 Bucket" @@ -707,6 +728,7 @@ prepare_dbbackup() { now=$(date +"%Y%m%d-%H%M%S") now_time=$(date +"%H:%M:%S") now_date=$(date +"%Y-%m-%d") + ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,} target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql } @@ -836,8 +858,7 @@ setup_mode() { mkdir -p /etc/services.d/99-run_forever cat < /etc/services.d/99-run_forever/run #!/bin/bash -while true -do +while true; do sleep 86400 done EOF